blob: 907e4280116d39f2223466faf56450d16fd42771 [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * AMD SVM support
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
Nicolas Kaiser9611c182010-10-06 14:23:22 +02007 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Avi Kivity6aa8b732006-12-10 02:21:36 -08008 *
9 * Authors:
10 * Yaniv Kamay <yaniv@qumranet.com>
11 * Avi Kivity <avi@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
Avi Kivityedf88412007-12-16 11:02:48 +020017#include <linux/kvm_host.h>
18
Eddie Dong85f455f2007-07-06 12:20:49 +030019#include "irq.h"
Zhang Xiantao1d737c82007-12-14 09:35:10 +080020#include "mmu.h"
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030021#include "kvm_cache_regs.h"
Gleb Natapovfe4c7b12009-03-23 11:23:18 +020022#include "x86.h"
Julian Stecklina66f7b722012-12-05 15:26:19 +010023#include "cpuid.h"
Avi Kivitye4956062007-06-28 14:15:57 -040024
Avi Kivity6aa8b732006-12-10 02:21:36 -080025#include <linux/module.h>
Josh Triplettae759542012-03-28 11:32:28 -070026#include <linux/mod_devicetable.h>
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +020027#include <linux/kernel.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080028#include <linux/vmalloc.h>
29#include <linux/highmem.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040030#include <linux/sched.h>
Marcelo Tosatti229456f2009-06-17 09:22:14 -030031#include <linux/ftrace_event.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090032#include <linux/slab.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080033
Joerg Roedel1018faa2012-02-29 14:57:32 +010034#include <asm/perf_event.h>
Joerg Roedel67ec6602010-05-17 14:43:35 +020035#include <asm/tlbflush.h>
Avi Kivitye4956062007-06-28 14:15:57 -040036#include <asm/desc.h>
Gleb Natapov631bc482010-10-14 11:22:52 +020037#include <asm/kvm_para.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080038
Eduardo Habkost63d11422008-11-17 19:03:20 -020039#include <asm/virtext.h>
Marcelo Tosatti229456f2009-06-17 09:22:14 -030040#include "trace.h"
Eduardo Habkost63d11422008-11-17 19:03:20 -020041
Avi Kivity4ecac3f2008-05-13 13:23:38 +030042#define __ex(x) __kvm_handle_fault_on_reboot(x)
43
Avi Kivity6aa8b732006-12-10 02:21:36 -080044MODULE_AUTHOR("Qumranet");
45MODULE_LICENSE("GPL");
46
Josh Triplettae759542012-03-28 11:32:28 -070047static const struct x86_cpu_id svm_cpu_id[] = {
48 X86_FEATURE_MATCH(X86_FEATURE_SVM),
49 {}
50};
51MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
52
Avi Kivity6aa8b732006-12-10 02:21:36 -080053#define IOPM_ALLOC_ORDER 2
54#define MSRPM_ALLOC_ORDER 1
55
Avi Kivity6aa8b732006-12-10 02:21:36 -080056#define SEG_TYPE_LDT 2
57#define SEG_TYPE_BUSY_TSS16 3
58
Andre Przywara6bc31bd2010-04-11 23:07:28 +020059#define SVM_FEATURE_NPT (1 << 0)
60#define SVM_FEATURE_LBRV (1 << 1)
61#define SVM_FEATURE_SVML (1 << 2)
62#define SVM_FEATURE_NRIP (1 << 3)
Andre Przywaraddce97a2010-12-21 11:12:03 +010063#define SVM_FEATURE_TSC_RATE (1 << 4)
64#define SVM_FEATURE_VMCB_CLEAN (1 << 5)
65#define SVM_FEATURE_FLUSH_ASID (1 << 6)
66#define SVM_FEATURE_DECODE_ASSIST (1 << 7)
Andre Przywara6bc31bd2010-04-11 23:07:28 +020067#define SVM_FEATURE_PAUSE_FILTER (1 << 10)
Joerg Roedel80b77062007-03-30 17:02:14 +030068
Joerg Roedel410e4d52009-08-07 11:49:44 +020069#define NESTED_EXIT_HOST 0 /* Exit handled on host level */
70#define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
71#define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
72
Joerg Roedel24e09cb2008-02-13 18:58:47 +010073#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
74
Joerg Roedelfbc0db72011-03-25 09:44:46 +010075#define TSC_RATIO_RSVD 0xffffff0000000000ULL
Joerg Roedel92a1f122011-03-25 09:44:51 +010076#define TSC_RATIO_MIN 0x0000000000000001ULL
77#define TSC_RATIO_MAX 0x000000ffffffffffULL
Joerg Roedelfbc0db72011-03-25 09:44:46 +010078
Joerg Roedel67ec6602010-05-17 14:43:35 +020079static bool erratum_383_found __read_mostly;
80
Avi Kivity6c8166a2009-05-31 18:15:37 +030081static const u32 host_save_user_msrs[] = {
82#ifdef CONFIG_X86_64
83 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
84 MSR_FS_BASE,
85#endif
86 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
87};
88
89#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
90
91struct kvm_vcpu;
92
Joerg Roedele6aa9ab2009-08-07 11:49:33 +020093struct nested_state {
94 struct vmcb *hsave;
95 u64 hsave_msr;
Joerg Roedel4a810182010-02-24 18:59:15 +010096 u64 vm_cr_msr;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +020097 u64 vmcb;
98
99 /* These are the merged vectors */
100 u32 *msrpm;
101
102 /* gpa pointers to the real vectors */
103 u64 vmcb_msrpm;
Joerg Roedelce2ac082010-03-01 15:34:39 +0100104 u64 vmcb_iopm;
Joerg Roedelaad42c62009-08-07 11:49:34 +0200105
Joerg Roedelcd3ff652009-10-09 16:08:26 +0200106 /* A VMEXIT is required but not yet emulated */
107 bool exit_required;
108
Joerg Roedelaad42c62009-08-07 11:49:34 +0200109 /* cache for intercepts of the guest */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100110 u32 intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +0100111 u32 intercept_dr;
Joerg Roedelaad42c62009-08-07 11:49:34 +0200112 u32 intercept_exceptions;
113 u64 intercept;
114
Joerg Roedel5bd2edc2010-09-10 17:31:02 +0200115 /* Nested Paging related state */
116 u64 nested_cr3;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200117};
118
Joerg Roedel323c3d82010-03-01 15:34:37 +0100119#define MSRPM_OFFSETS 16
120static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
121
Boris Ostrovsky2b036c62012-01-09 14:00:35 -0500122/*
123 * Set osvw_len to higher value when updated Revision Guides
124 * are published and we know what the new status bits are
125 */
126static uint64_t osvw_len = 4, osvw_status;
127
Avi Kivity6c8166a2009-05-31 18:15:37 +0300128struct vcpu_svm {
129 struct kvm_vcpu vcpu;
130 struct vmcb *vmcb;
131 unsigned long vmcb_pa;
132 struct svm_cpu_data *svm_data;
133 uint64_t asid_generation;
134 uint64_t sysenter_esp;
135 uint64_t sysenter_eip;
136
137 u64 next_rip;
138
139 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
Avi Kivityafe9e662010-10-21 12:20:32 +0200140 struct {
Avi Kivitydacccfd2010-10-21 12:20:33 +0200141 u16 fs;
142 u16 gs;
143 u16 ldt;
Avi Kivityafe9e662010-10-21 12:20:32 +0200144 u64 gs_base;
145 } host;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300146
147 u32 *msrpm;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300148
Avi Kivitybd3d1ec2011-02-03 15:29:52 +0200149 ulong nmi_iret_rip;
150
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200151 struct nested_state nested;
Jan Kiszka6be7d302009-10-18 13:24:54 +0200152
153 bool nmi_singlestep;
Jan Kiszka66b71382010-02-23 17:47:56 +0100154
155 unsigned int3_injected;
156 unsigned long int3_rip;
Gleb Natapov631bc482010-10-14 11:22:52 +0200157 u32 apf_reason;
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100158
159 u64 tsc_ratio;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300160};
161
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100162static DEFINE_PER_CPU(u64, current_tsc_ratio);
163#define TSC_RATIO_DEFAULT 0x0100000000ULL
164
Joerg Roedel455716f2010-03-01 15:34:35 +0100165#define MSR_INVALID 0xffffffffU
166
Mathias Krause09941fb2012-08-30 01:30:20 +0200167static const struct svm_direct_access_msrs {
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100168 u32 index; /* Index of the MSR */
169 bool always; /* True if intercept is always on */
170} direct_access_msrs[] = {
Brian Gerst8c065852010-07-17 09:03:26 -0400171 { .index = MSR_STAR, .always = true },
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100172 { .index = MSR_IA32_SYSENTER_CS, .always = true },
173#ifdef CONFIG_X86_64
174 { .index = MSR_GS_BASE, .always = true },
175 { .index = MSR_FS_BASE, .always = true },
176 { .index = MSR_KERNEL_GS_BASE, .always = true },
177 { .index = MSR_LSTAR, .always = true },
178 { .index = MSR_CSTAR, .always = true },
179 { .index = MSR_SYSCALL_MASK, .always = true },
180#endif
181 { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
182 { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
183 { .index = MSR_IA32_LASTINTFROMIP, .always = false },
184 { .index = MSR_IA32_LASTINTTOIP, .always = false },
185 { .index = MSR_INVALID, .always = false },
Avi Kivity6aa8b732006-12-10 02:21:36 -0800186};
187
188/* enable NPT for AMD64 and X86 with PAE */
189#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
190static bool npt_enabled = true;
191#else
Joerg Roedele0231712010-02-24 18:59:10 +0100192static bool npt_enabled;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800193#endif
194
Davidlohr Buesoe2358852012-01-17 14:09:50 +0100195/* allow nested paging (virtualized MMU) for all guests */
196static int npt = true;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800197module_param(npt, int, S_IRUGO);
198
Davidlohr Buesoe2358852012-01-17 14:09:50 +0100199/* allow nested virtualization in KVM/SVM */
200static int nested = true;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800201module_param(nested, int, S_IRUGO);
202
203static void svm_flush_tlb(struct kvm_vcpu *vcpu);
Joerg Roedela5c38322009-08-07 11:49:32 +0200204static void svm_complete_interrupts(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800205
Joerg Roedel410e4d52009-08-07 11:49:44 +0200206static int nested_svm_exit_handled(struct vcpu_svm *svm);
Joerg Roedelb8e88bc2010-02-19 16:23:02 +0100207static int nested_svm_intercept(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800208static int nested_svm_vmexit(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800209static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
210 bool has_error_code, u32 error_code);
Joerg Roedel92a1f122011-03-25 09:44:51 +0100211static u64 __scale_tsc(u64 ratio, u64 tsc);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800212
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100213enum {
Joerg Roedel116a0a22010-12-03 11:45:49 +0100214 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
215 pause filter count */
Joerg Roedelf56838e2010-12-03 11:45:50 +0100216 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
Joerg Roedeld48086d2010-12-03 11:45:51 +0100217 VMCB_ASID, /* ASID */
Joerg Roedeldecdbf62010-12-03 11:45:52 +0100218 VMCB_INTR, /* int_ctl, int_vector */
Joerg Roedelb2747162010-12-03 11:45:53 +0100219 VMCB_NPT, /* npt_en, nCR3, gPAT */
Joerg Roedeldcca1a62010-12-03 11:45:54 +0100220 VMCB_CR, /* CR0, CR3, CR4, EFER */
Joerg Roedel72214b92010-12-03 11:45:55 +0100221 VMCB_DR, /* DR6, DR7 */
Joerg Roedel17a703c2010-12-03 11:45:56 +0100222 VMCB_DT, /* GDT, IDT */
Joerg Roedel060d0c92010-12-03 11:45:57 +0100223 VMCB_SEG, /* CS, DS, SS, ES, CPL */
Joerg Roedel0574dec2010-12-03 11:45:58 +0100224 VMCB_CR2, /* CR2 only */
Joerg Roedelb53ba3f2010-12-03 11:45:59 +0100225 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100226 VMCB_DIRTY_MAX,
227};
228
Joerg Roedel0574dec2010-12-03 11:45:58 +0100229/* TPR and CR2 are always written before VMRUN */
230#define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100231
232static inline void mark_all_dirty(struct vmcb *vmcb)
233{
234 vmcb->control.clean = 0;
235}
236
237static inline void mark_all_clean(struct vmcb *vmcb)
238{
239 vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
240 & ~VMCB_ALWAYS_DIRTY_MASK;
241}
242
243static inline void mark_dirty(struct vmcb *vmcb, int bit)
244{
245 vmcb->control.clean &= ~(1 << bit);
246}
247
Avi Kivity6aa8b732006-12-10 02:21:36 -0800248static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
249{
250 return container_of(vcpu, struct vcpu_svm, vcpu);
251}
252
Joerg Roedel384c6362010-11-30 18:03:56 +0100253static void recalc_intercepts(struct vcpu_svm *svm)
254{
255 struct vmcb_control_area *c, *h;
256 struct nested_state *g;
257
Joerg Roedel116a0a22010-12-03 11:45:49 +0100258 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
259
Joerg Roedel384c6362010-11-30 18:03:56 +0100260 if (!is_guest_mode(&svm->vcpu))
261 return;
262
263 c = &svm->vmcb->control;
264 h = &svm->nested.hsave->control;
265 g = &svm->nested;
266
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100267 c->intercept_cr = h->intercept_cr | g->intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +0100268 c->intercept_dr = h->intercept_dr | g->intercept_dr;
Joerg Roedel384c6362010-11-30 18:03:56 +0100269 c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
270 c->intercept = h->intercept | g->intercept;
271}
272
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100273static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
274{
275 if (is_guest_mode(&svm->vcpu))
276 return svm->nested.hsave;
277 else
278 return svm->vmcb;
279}
280
281static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
282{
283 struct vmcb *vmcb = get_host_vmcb(svm);
284
285 vmcb->control.intercept_cr |= (1U << bit);
286
287 recalc_intercepts(svm);
288}
289
290static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
291{
292 struct vmcb *vmcb = get_host_vmcb(svm);
293
294 vmcb->control.intercept_cr &= ~(1U << bit);
295
296 recalc_intercepts(svm);
297}
298
299static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
300{
301 struct vmcb *vmcb = get_host_vmcb(svm);
302
303 return vmcb->control.intercept_cr & (1U << bit);
304}
305
Joerg Roedel3aed0412010-11-30 18:03:58 +0100306static inline void set_dr_intercept(struct vcpu_svm *svm, int bit)
307{
308 struct vmcb *vmcb = get_host_vmcb(svm);
309
310 vmcb->control.intercept_dr |= (1U << bit);
311
312 recalc_intercepts(svm);
313}
314
315static inline void clr_dr_intercept(struct vcpu_svm *svm, int bit)
316{
317 struct vmcb *vmcb = get_host_vmcb(svm);
318
319 vmcb->control.intercept_dr &= ~(1U << bit);
320
321 recalc_intercepts(svm);
322}
323
Joerg Roedel18c918c2010-11-30 18:03:59 +0100324static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
325{
326 struct vmcb *vmcb = get_host_vmcb(svm);
327
328 vmcb->control.intercept_exceptions |= (1U << bit);
329
330 recalc_intercepts(svm);
331}
332
333static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
334{
335 struct vmcb *vmcb = get_host_vmcb(svm);
336
337 vmcb->control.intercept_exceptions &= ~(1U << bit);
338
339 recalc_intercepts(svm);
340}
341
Joerg Roedel8a05a1b2010-11-30 18:04:00 +0100342static inline void set_intercept(struct vcpu_svm *svm, int bit)
343{
344 struct vmcb *vmcb = get_host_vmcb(svm);
345
346 vmcb->control.intercept |= (1ULL << bit);
347
348 recalc_intercepts(svm);
349}
350
351static inline void clr_intercept(struct vcpu_svm *svm, int bit)
352{
353 struct vmcb *vmcb = get_host_vmcb(svm);
354
355 vmcb->control.intercept &= ~(1ULL << bit);
356
357 recalc_intercepts(svm);
358}
359
Joerg Roedel2af91942009-08-07 11:49:28 +0200360static inline void enable_gif(struct vcpu_svm *svm)
361{
362 svm->vcpu.arch.hflags |= HF_GIF_MASK;
363}
364
365static inline void disable_gif(struct vcpu_svm *svm)
366{
367 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
368}
369
370static inline bool gif_set(struct vcpu_svm *svm)
371{
372 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
373}
374
Avi Kivity6aa8b732006-12-10 02:21:36 -0800375static unsigned long iopm_base;
376
377struct kvm_ldttss_desc {
378 u16 limit0;
379 u16 base0;
Joerg Roedele0231712010-02-24 18:59:10 +0100380 unsigned base1:8, type:5, dpl:2, p:1;
381 unsigned limit1:4, zero0:3, g:1, base2:8;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800382 u32 base3;
383 u32 zero1;
384} __attribute__((packed));
385
386struct svm_cpu_data {
387 int cpu;
388
Avi Kivity5008fdf2007-04-02 13:05:50 +0300389 u64 asid_generation;
390 u32 max_asid;
391 u32 next_asid;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800392 struct kvm_ldttss_desc *tss_desc;
393
394 struct page *save_area;
395};
396
397static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
398
399struct svm_init_data {
400 int cpu;
401 int r;
402};
403
Mathias Krause09941fb2012-08-30 01:30:20 +0200404static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
Avi Kivity6aa8b732006-12-10 02:21:36 -0800405
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +0200406#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800407#define MSRS_RANGE_SIZE 2048
408#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
409
Joerg Roedel455716f2010-03-01 15:34:35 +0100410static u32 svm_msrpm_offset(u32 msr)
411{
412 u32 offset;
413 int i;
414
415 for (i = 0; i < NUM_MSR_MAPS; i++) {
416 if (msr < msrpm_ranges[i] ||
417 msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
418 continue;
419
420 offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
421 offset += (i * MSRS_RANGE_SIZE); /* add range offset */
422
423 /* Now we have the u8 offset - but need the u32 offset */
424 return offset / 4;
425 }
426
427 /* MSR not in any range */
428 return MSR_INVALID;
429}
430
Avi Kivity6aa8b732006-12-10 02:21:36 -0800431#define MAX_INST_SIZE 15
432
Avi Kivity6aa8b732006-12-10 02:21:36 -0800433static inline void clgi(void)
434{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300435 asm volatile (__ex(SVM_CLGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800436}
437
438static inline void stgi(void)
439{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300440 asm volatile (__ex(SVM_STGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800441}
442
443static inline void invlpga(unsigned long addr, u32 asid)
444{
Joerg Roedele0231712010-02-24 18:59:10 +0100445 asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800446}
447
Joerg Roedel4b161842010-09-10 17:31:03 +0200448static int get_npt_level(void)
449{
450#ifdef CONFIG_X86_64
451 return PT64_ROOT_LEVEL;
452#else
453 return PT32E_ROOT_LEVEL;
454#endif
455}
456
Avi Kivity6aa8b732006-12-10 02:21:36 -0800457static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
458{
Zachary Amsden6dc696d2010-05-26 15:09:43 -1000459 vcpu->arch.efer = efer;
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100460 if (!npt_enabled && !(efer & EFER_LMA))
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -0600461 efer &= ~EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800462
Alexander Graf9962d032008-11-25 20:17:02 +0100463 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
Joerg Roedeldcca1a62010-12-03 11:45:54 +0100464 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800465}
466
Avi Kivity6aa8b732006-12-10 02:21:36 -0800467static int is_external_interrupt(u32 info)
468{
469 info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
470 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
471}
472
Glauber Costa2809f5d2009-05-12 16:21:05 -0400473static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
474{
475 struct vcpu_svm *svm = to_svm(vcpu);
476 u32 ret = 0;
477
478 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
Jan Kiszka48005f62010-02-19 19:38:07 +0100479 ret |= KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
Glauber Costa2809f5d2009-05-12 16:21:05 -0400480 return ret & mask;
481}
482
483static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
484{
485 struct vcpu_svm *svm = to_svm(vcpu);
486
487 if (mask == 0)
488 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
489 else
490 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
491
492}
493
Avi Kivity6aa8b732006-12-10 02:21:36 -0800494static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
495{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400496 struct vcpu_svm *svm = to_svm(vcpu);
497
Andre Przywara6bc31bd2010-04-11 23:07:28 +0200498 if (svm->vmcb->control.next_rip != 0)
499 svm->next_rip = svm->vmcb->control.next_rip;
500
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400501 if (!svm->next_rip) {
Andre Przywara51d8b662010-12-21 11:12:02 +0100502 if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
Gleb Natapovf629cf82009-05-11 13:35:49 +0300503 EMULATE_DONE)
504 printk(KERN_DEBUG "%s: NOP\n", __func__);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800505 return;
506 }
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300507 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
508 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
509 __func__, kvm_rip_read(vcpu), svm->next_rip);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800510
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300511 kvm_rip_write(vcpu, svm->next_rip);
Glauber Costa2809f5d2009-05-12 16:21:05 -0400512 svm_set_interrupt_shadow(vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800513}
514
Jan Kiszka116a4752010-02-23 17:47:54 +0100515static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200516 bool has_error_code, u32 error_code,
517 bool reinject)
Jan Kiszka116a4752010-02-23 17:47:54 +0100518{
519 struct vcpu_svm *svm = to_svm(vcpu);
520
Joerg Roedele0231712010-02-24 18:59:10 +0100521 /*
522 * If we are within a nested VM we'd better #VMEXIT and let the guest
523 * handle the exception
524 */
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200525 if (!reinject &&
526 nested_svm_check_exception(svm, nr, has_error_code, error_code))
Jan Kiszka116a4752010-02-23 17:47:54 +0100527 return;
528
Avi Kivity2a6b20b2010-11-09 16:15:42 +0200529 if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) {
Jan Kiszka66b71382010-02-23 17:47:56 +0100530 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
531
532 /*
533 * For guest debugging where we have to reinject #BP if some
534 * INT3 is guest-owned:
535 * Emulate nRIP by moving RIP forward. Will fail if injection
536 * raises a fault that is not intercepted. Still better than
537 * failing in all cases.
538 */
539 skip_emulated_instruction(&svm->vcpu);
540 rip = kvm_rip_read(&svm->vcpu);
541 svm->int3_rip = rip + svm->vmcb->save.cs.base;
542 svm->int3_injected = rip - old_rip;
543 }
544
Jan Kiszka116a4752010-02-23 17:47:54 +0100545 svm->vmcb->control.event_inj = nr
546 | SVM_EVTINJ_VALID
547 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
548 | SVM_EVTINJ_TYPE_EXEPT;
549 svm->vmcb->control.event_inj_err = error_code;
550}
551
Joerg Roedel67ec6602010-05-17 14:43:35 +0200552static void svm_init_erratum_383(void)
553{
554 u32 low, high;
555 int err;
556 u64 val;
557
Hans Rosenfeld1be85a62010-07-28 19:09:32 +0200558 if (!cpu_has_amd_erratum(amd_erratum_383))
Joerg Roedel67ec6602010-05-17 14:43:35 +0200559 return;
560
561 /* Use _safe variants to not break nested virtualization */
562 val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
563 if (err)
564 return;
565
566 val |= (1ULL << 47);
567
568 low = lower_32_bits(val);
569 high = upper_32_bits(val);
570
571 native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
572
573 erratum_383_found = true;
574}
575
Boris Ostrovsky2b036c62012-01-09 14:00:35 -0500576static void svm_init_osvw(struct kvm_vcpu *vcpu)
577{
578 /*
579 * Guests should see errata 400 and 415 as fixed (assuming that
580 * HLT and IO instructions are intercepted).
581 */
582 vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
583 vcpu->arch.osvw.status = osvw_status & ~(6ULL);
584
585 /*
586 * By increasing VCPU's osvw.length to 3 we are telling the guest that
587 * all osvw.status bits inside that length, including bit 0 (which is
588 * reserved for erratum 298), are valid. However, if host processor's
589 * osvw_len is 0 then osvw_status[0] carries no information. We need to
590 * be conservative here and therefore we tell the guest that erratum 298
591 * is present (because we really don't know).
592 */
593 if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
594 vcpu->arch.osvw.status |= 1;
595}
596
Avi Kivity6aa8b732006-12-10 02:21:36 -0800597static int has_svm(void)
598{
Eduardo Habkost63d11422008-11-17 19:03:20 -0200599 const char *msg;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800600
Eduardo Habkost63d11422008-11-17 19:03:20 -0200601 if (!cpu_has_svm(&msg)) {
Joe Perchesff81ff12009-01-08 11:05:17 -0800602 printk(KERN_INFO "has_svm: %s\n", msg);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800603 return 0;
604 }
605
Avi Kivity6aa8b732006-12-10 02:21:36 -0800606 return 1;
607}
608
609static void svm_hardware_disable(void *garbage)
610{
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100611 /* Make sure we clean up behind us */
612 if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
613 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
614
Eduardo Habkost2c8dcee2008-11-17 19:03:21 -0200615 cpu_svm_disable();
Joerg Roedel1018faa2012-02-29 14:57:32 +0100616
617 amd_pmu_disable_virt();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800618}
619
Alexander Graf10474ae2009-09-15 11:37:46 +0200620static int svm_hardware_enable(void *garbage)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800621{
622
Tejun Heo0fe1e002009-10-29 22:34:14 +0900623 struct svm_cpu_data *sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800624 uint64_t efer;
Gleb Natapov89a27f42010-02-16 10:51:48 +0200625 struct desc_ptr gdt_descr;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800626 struct desc_struct *gdt;
627 int me = raw_smp_processor_id();
628
Alexander Graf10474ae2009-09-15 11:37:46 +0200629 rdmsrl(MSR_EFER, efer);
630 if (efer & EFER_SVME)
631 return -EBUSY;
632
Avi Kivity6aa8b732006-12-10 02:21:36 -0800633 if (!has_svm()) {
Borislav Petkov1f5b77f2012-10-20 20:20:04 +0200634 pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
Alexander Graf10474ae2009-09-15 11:37:46 +0200635 return -EINVAL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800636 }
Tejun Heo0fe1e002009-10-29 22:34:14 +0900637 sd = per_cpu(svm_data, me);
Tejun Heo0fe1e002009-10-29 22:34:14 +0900638 if (!sd) {
Borislav Petkov1f5b77f2012-10-20 20:20:04 +0200639 pr_err("%s: svm_data is NULL on %d\n", __func__, me);
Alexander Graf10474ae2009-09-15 11:37:46 +0200640 return -EINVAL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800641 }
642
Tejun Heo0fe1e002009-10-29 22:34:14 +0900643 sd->asid_generation = 1;
644 sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
645 sd->next_asid = sd->max_asid + 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800646
Gleb Natapovd6ab1ed2010-02-25 12:43:07 +0200647 native_store_gdt(&gdt_descr);
Gleb Natapov89a27f42010-02-16 10:51:48 +0200648 gdt = (struct desc_struct *)gdt_descr.address;
Tejun Heo0fe1e002009-10-29 22:34:14 +0900649 sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800650
Alexander Graf9962d032008-11-25 20:17:02 +0100651 wrmsrl(MSR_EFER, efer | EFER_SVME);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800652
Linus Torvaldsd0316552009-12-14 09:58:24 -0800653 wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
Alexander Graf10474ae2009-09-15 11:37:46 +0200654
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100655 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
656 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
657 __get_cpu_var(current_tsc_ratio) = TSC_RATIO_DEFAULT;
658 }
659
Boris Ostrovsky2b036c62012-01-09 14:00:35 -0500660
661 /*
662 * Get OSVW bits.
663 *
664 * Note that it is possible to have a system with mixed processor
665 * revisions and therefore different OSVW bits. If bits are not the same
666 * on different processors then choose the worst case (i.e. if erratum
667 * is present on one processor and not on another then assume that the
668 * erratum is present everywhere).
669 */
670 if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
671 uint64_t len, status = 0;
672 int err;
673
674 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
675 if (!err)
676 status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
677 &err);
678
679 if (err)
680 osvw_status = osvw_len = 0;
681 else {
682 if (len < osvw_len)
683 osvw_len = len;
684 osvw_status |= status;
685 osvw_status &= (1ULL << osvw_len) - 1;
686 }
687 } else
688 osvw_status = osvw_len = 0;
689
Joerg Roedel67ec6602010-05-17 14:43:35 +0200690 svm_init_erratum_383();
691
Joerg Roedel1018faa2012-02-29 14:57:32 +0100692 amd_pmu_enable_virt();
693
Alexander Graf10474ae2009-09-15 11:37:46 +0200694 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800695}
696
Joerg Roedel0da1db752008-07-02 16:02:11 +0200697static void svm_cpu_uninit(int cpu)
698{
Tejun Heo0fe1e002009-10-29 22:34:14 +0900699 struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
Joerg Roedel0da1db752008-07-02 16:02:11 +0200700
Tejun Heo0fe1e002009-10-29 22:34:14 +0900701 if (!sd)
Joerg Roedel0da1db752008-07-02 16:02:11 +0200702 return;
703
704 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
Tejun Heo0fe1e002009-10-29 22:34:14 +0900705 __free_page(sd->save_area);
706 kfree(sd);
Joerg Roedel0da1db752008-07-02 16:02:11 +0200707}
708
Avi Kivity6aa8b732006-12-10 02:21:36 -0800709static int svm_cpu_init(int cpu)
710{
Tejun Heo0fe1e002009-10-29 22:34:14 +0900711 struct svm_cpu_data *sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800712 int r;
713
Tejun Heo0fe1e002009-10-29 22:34:14 +0900714 sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
715 if (!sd)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800716 return -ENOMEM;
Tejun Heo0fe1e002009-10-29 22:34:14 +0900717 sd->cpu = cpu;
718 sd->save_area = alloc_page(GFP_KERNEL);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800719 r = -ENOMEM;
Tejun Heo0fe1e002009-10-29 22:34:14 +0900720 if (!sd->save_area)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800721 goto err_1;
722
Tejun Heo0fe1e002009-10-29 22:34:14 +0900723 per_cpu(svm_data, cpu) = sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800724
725 return 0;
726
727err_1:
Tejun Heo0fe1e002009-10-29 22:34:14 +0900728 kfree(sd);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800729 return r;
730
731}
732
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100733static bool valid_msr_intercept(u32 index)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800734{
735 int i;
736
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100737 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
738 if (direct_access_msrs[i].index == index)
739 return true;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800740
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100741 return false;
742}
743
Avi Kivity6aa8b732006-12-10 02:21:36 -0800744static void set_msr_interception(u32 *msrpm, unsigned msr,
745 int read, int write)
746{
Joerg Roedel455716f2010-03-01 15:34:35 +0100747 u8 bit_read, bit_write;
748 unsigned long tmp;
749 u32 offset;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800750
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100751 /*
752 * If this warning triggers extend the direct_access_msrs list at the
753 * beginning of the file
754 */
755 WARN_ON(!valid_msr_intercept(msr));
756
Joerg Roedel455716f2010-03-01 15:34:35 +0100757 offset = svm_msrpm_offset(msr);
758 bit_read = 2 * (msr & 0x0f);
759 bit_write = 2 * (msr & 0x0f) + 1;
760 tmp = msrpm[offset];
Avi Kivity6aa8b732006-12-10 02:21:36 -0800761
Joerg Roedel455716f2010-03-01 15:34:35 +0100762 BUG_ON(offset == MSR_INVALID);
763
764 read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
765 write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
766
767 msrpm[offset] = tmp;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800768}
769
Joerg Roedelf65c2292008-02-13 18:58:46 +0100770static void svm_vcpu_init_msrpm(u32 *msrpm)
771{
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100772 int i;
773
Joerg Roedelf65c2292008-02-13 18:58:46 +0100774 memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
775
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100776 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
777 if (!direct_access_msrs[i].always)
778 continue;
779
780 set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
781 }
Joerg Roedelf65c2292008-02-13 18:58:46 +0100782}
783
Joerg Roedel323c3d82010-03-01 15:34:37 +0100784static void add_msr_offset(u32 offset)
785{
786 int i;
787
788 for (i = 0; i < MSRPM_OFFSETS; ++i) {
789
790 /* Offset already in list? */
791 if (msrpm_offsets[i] == offset)
792 return;
793
794 /* Slot used by another offset? */
795 if (msrpm_offsets[i] != MSR_INVALID)
796 continue;
797
798 /* Add offset to list */
799 msrpm_offsets[i] = offset;
800
801 return;
802 }
803
804 /*
805 * If this BUG triggers the msrpm_offsets table has an overflow. Just
806 * increase MSRPM_OFFSETS in this case.
807 */
808 BUG();
809}
810
811static void init_msrpm_offsets(void)
812{
813 int i;
814
815 memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
816
817 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
818 u32 offset;
819
820 offset = svm_msrpm_offset(direct_access_msrs[i].index);
821 BUG_ON(offset == MSR_INVALID);
822
823 add_msr_offset(offset);
824 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800825}
826
Joerg Roedel24e09cb2008-02-13 18:58:47 +0100827static void svm_enable_lbrv(struct vcpu_svm *svm)
828{
829 u32 *msrpm = svm->msrpm;
830
831 svm->vmcb->control.lbr_ctl = 1;
832 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
833 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
834 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
835 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
836}
837
838static void svm_disable_lbrv(struct vcpu_svm *svm)
839{
840 u32 *msrpm = svm->msrpm;
841
842 svm->vmcb->control.lbr_ctl = 0;
843 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
844 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
845 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
846 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
847}
848
Avi Kivity6aa8b732006-12-10 02:21:36 -0800849static __init int svm_hardware_setup(void)
850{
851 int cpu;
852 struct page *iopm_pages;
Joerg Roedelf65c2292008-02-13 18:58:46 +0100853 void *iopm_va;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800854 int r;
855
Avi Kivity6aa8b732006-12-10 02:21:36 -0800856 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
857
858 if (!iopm_pages)
859 return -ENOMEM;
Anthony Liguoric8681332007-04-30 09:48:11 +0300860
861 iopm_va = page_address(iopm_pages);
862 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800863 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
864
Joerg Roedel323c3d82010-03-01 15:34:37 +0100865 init_msrpm_offsets();
866
Joerg Roedel50a37eb2008-01-31 14:57:38 +0100867 if (boot_cpu_has(X86_FEATURE_NX))
868 kvm_enable_efer_bits(EFER_NX);
869
Alexander Graf1b2fd702009-02-02 16:23:51 +0100870 if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
871 kvm_enable_efer_bits(EFER_FFXSR);
872
Joerg Roedel92a1f122011-03-25 09:44:51 +0100873 if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
874 u64 max;
875
876 kvm_has_tsc_control = true;
877
878 /*
879 * Make sure the user can only configure tsc_khz values that
880 * fit into a signed integer.
881 * A min value is not calculated needed because it will always
882 * be 1 on all machines and a value of 0 is used to disable
883 * tsc-scaling for the vcpu.
884 */
885 max = min(0x7fffffffULL, __scale_tsc(tsc_khz, TSC_RATIO_MAX));
886
887 kvm_max_guest_tsc_khz = max;
888 }
889
Alexander Graf236de052008-11-25 20:17:10 +0100890 if (nested) {
891 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
Joerg Roedeleec4b142010-05-05 16:04:44 +0200892 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
Alexander Graf236de052008-11-25 20:17:10 +0100893 }
894
Zachary Amsden3230bb42009-09-29 11:38:37 -1000895 for_each_possible_cpu(cpu) {
Avi Kivity6aa8b732006-12-10 02:21:36 -0800896 r = svm_cpu_init(cpu);
897 if (r)
Joerg Roedelf65c2292008-02-13 18:58:46 +0100898 goto err;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800899 }
Joerg Roedel33bd6a02008-02-07 13:47:38 +0100900
Avi Kivity2a6b20b2010-11-09 16:15:42 +0200901 if (!boot_cpu_has(X86_FEATURE_NPT))
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100902 npt_enabled = false;
903
Joerg Roedel6c7dac72008-02-07 13:47:40 +0100904 if (npt_enabled && !npt) {
905 printk(KERN_INFO "kvm: Nested Paging disabled\n");
906 npt_enabled = false;
907 }
908
Joerg Roedel18552672008-02-07 13:47:41 +0100909 if (npt_enabled) {
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100910 printk(KERN_INFO "kvm: Nested Paging enabled\n");
Joerg Roedel18552672008-02-07 13:47:41 +0100911 kvm_enable_tdp();
Joerg Roedel5f4cb662008-07-14 20:36:36 +0200912 } else
913 kvm_disable_tdp();
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100914
Avi Kivity6aa8b732006-12-10 02:21:36 -0800915 return 0;
916
Joerg Roedelf65c2292008-02-13 18:58:46 +0100917err:
Avi Kivity6aa8b732006-12-10 02:21:36 -0800918 __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
919 iopm_base = 0;
920 return r;
921}
922
923static __exit void svm_hardware_unsetup(void)
924{
Joerg Roedel0da1db752008-07-02 16:02:11 +0200925 int cpu;
926
Zachary Amsden3230bb42009-09-29 11:38:37 -1000927 for_each_possible_cpu(cpu)
Joerg Roedel0da1db752008-07-02 16:02:11 +0200928 svm_cpu_uninit(cpu);
929
Avi Kivity6aa8b732006-12-10 02:21:36 -0800930 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
Joerg Roedelf65c2292008-02-13 18:58:46 +0100931 iopm_base = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800932}
933
934static void init_seg(struct vmcb_seg *seg)
935{
936 seg->selector = 0;
937 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
Joerg Roedele0231712010-02-24 18:59:10 +0100938 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800939 seg->limit = 0xffff;
940 seg->base = 0;
941}
942
943static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
944{
945 seg->selector = 0;
946 seg->attrib = SVM_SELECTOR_P_MASK | type;
947 seg->limit = 0xffff;
948 seg->base = 0;
949}
950
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100951static u64 __scale_tsc(u64 ratio, u64 tsc)
952{
953 u64 mult, frac, _tsc;
954
955 mult = ratio >> 32;
956 frac = ratio & ((1ULL << 32) - 1);
957
958 _tsc = tsc;
959 _tsc *= mult;
960 _tsc += (tsc >> 32) * frac;
961 _tsc += ((tsc & ((1ULL << 32) - 1)) * frac) >> 32;
962
963 return _tsc;
964}
965
966static u64 svm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
967{
968 struct vcpu_svm *svm = to_svm(vcpu);
969 u64 _tsc = tsc;
970
971 if (svm->tsc_ratio != TSC_RATIO_DEFAULT)
972 _tsc = __scale_tsc(svm->tsc_ratio, tsc);
973
974 return _tsc;
975}
976
Zachary Amsdencc578282012-02-03 15:43:50 -0200977static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
Joerg Roedel4051b182011-03-25 09:44:49 +0100978{
979 struct vcpu_svm *svm = to_svm(vcpu);
980 u64 ratio;
981 u64 khz;
982
Zachary Amsdencc578282012-02-03 15:43:50 -0200983 /* Guest TSC same frequency as host TSC? */
984 if (!scale) {
Joerg Roedel4051b182011-03-25 09:44:49 +0100985 svm->tsc_ratio = TSC_RATIO_DEFAULT;
986 return;
987 }
988
Zachary Amsdencc578282012-02-03 15:43:50 -0200989 /* TSC scaling supported? */
990 if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
991 if (user_tsc_khz > tsc_khz) {
992 vcpu->arch.tsc_catchup = 1;
993 vcpu->arch.tsc_always_catchup = 1;
994 } else
995 WARN(1, "user requested TSC rate below hardware speed\n");
996 return;
997 }
998
Joerg Roedel4051b182011-03-25 09:44:49 +0100999 khz = user_tsc_khz;
1000
1001 /* TSC scaling required - calculate ratio */
1002 ratio = khz << 32;
1003 do_div(ratio, tsc_khz);
1004
1005 if (ratio == 0 || ratio & TSC_RATIO_RSVD) {
1006 WARN_ONCE(1, "Invalid TSC ratio - virtual-tsc-khz=%u\n",
1007 user_tsc_khz);
1008 return;
1009 }
Joerg Roedel4051b182011-03-25 09:44:49 +01001010 svm->tsc_ratio = ratio;
1011}
1012
Will Auldba904632012-11-29 12:42:50 -08001013static u64 svm_read_tsc_offset(struct kvm_vcpu *vcpu)
1014{
1015 struct vcpu_svm *svm = to_svm(vcpu);
1016
1017 return svm->vmcb->control.tsc_offset;
1018}
1019
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10001020static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1021{
1022 struct vcpu_svm *svm = to_svm(vcpu);
1023 u64 g_tsc_offset = 0;
1024
Joerg Roedel20307532010-11-29 17:51:48 +01001025 if (is_guest_mode(vcpu)) {
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10001026 g_tsc_offset = svm->vmcb->control.tsc_offset -
1027 svm->nested.hsave->control.tsc_offset;
1028 svm->nested.hsave->control.tsc_offset = offset;
1029 }
1030
1031 svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
Joerg Roedel116a0a22010-12-03 11:45:49 +01001032
1033 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10001034}
1035
Marcelo Tosattif1e2b262012-02-03 15:43:55 -02001036static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
Zachary Amsdene48672f2010-08-19 22:07:23 -10001037{
1038 struct vcpu_svm *svm = to_svm(vcpu);
1039
Marcelo Tosattif1e2b262012-02-03 15:43:55 -02001040 WARN_ON(adjustment < 0);
1041 if (host)
1042 adjustment = svm_scale_tsc(vcpu, adjustment);
1043
Zachary Amsdene48672f2010-08-19 22:07:23 -10001044 svm->vmcb->control.tsc_offset += adjustment;
Joerg Roedel20307532010-11-29 17:51:48 +01001045 if (is_guest_mode(vcpu))
Zachary Amsdene48672f2010-08-19 22:07:23 -10001046 svm->nested.hsave->control.tsc_offset += adjustment;
Joerg Roedel116a0a22010-12-03 11:45:49 +01001047 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
Zachary Amsdene48672f2010-08-19 22:07:23 -10001048}
1049
Joerg Roedel857e4092011-03-25 09:44:50 +01001050static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
1051{
1052 u64 tsc;
1053
1054 tsc = svm_scale_tsc(vcpu, native_read_tsc());
1055
1056 return target_tsc - tsc;
1057}
1058
Joerg Roedele6101a92008-02-13 18:58:45 +01001059static void init_vmcb(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001060{
Joerg Roedele6101a92008-02-13 18:58:45 +01001061 struct vmcb_control_area *control = &svm->vmcb->control;
1062 struct vmcb_save_area *save = &svm->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001063
Avi Kivitybff78272010-01-07 13:16:08 +02001064 svm->vcpu.fpu_active = 1;
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001065 svm->vcpu.arch.hflags = 0;
Avi Kivitybff78272010-01-07 13:16:08 +02001066
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001067 set_cr_intercept(svm, INTERCEPT_CR0_READ);
1068 set_cr_intercept(svm, INTERCEPT_CR3_READ);
1069 set_cr_intercept(svm, INTERCEPT_CR4_READ);
1070 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1071 set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1072 set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
1073 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001074
Joerg Roedel3aed0412010-11-30 18:03:58 +01001075 set_dr_intercept(svm, INTERCEPT_DR0_READ);
1076 set_dr_intercept(svm, INTERCEPT_DR1_READ);
1077 set_dr_intercept(svm, INTERCEPT_DR2_READ);
1078 set_dr_intercept(svm, INTERCEPT_DR3_READ);
1079 set_dr_intercept(svm, INTERCEPT_DR4_READ);
1080 set_dr_intercept(svm, INTERCEPT_DR5_READ);
1081 set_dr_intercept(svm, INTERCEPT_DR6_READ);
1082 set_dr_intercept(svm, INTERCEPT_DR7_READ);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001083
Joerg Roedel3aed0412010-11-30 18:03:58 +01001084 set_dr_intercept(svm, INTERCEPT_DR0_WRITE);
1085 set_dr_intercept(svm, INTERCEPT_DR1_WRITE);
1086 set_dr_intercept(svm, INTERCEPT_DR2_WRITE);
1087 set_dr_intercept(svm, INTERCEPT_DR3_WRITE);
1088 set_dr_intercept(svm, INTERCEPT_DR4_WRITE);
1089 set_dr_intercept(svm, INTERCEPT_DR5_WRITE);
1090 set_dr_intercept(svm, INTERCEPT_DR6_WRITE);
1091 set_dr_intercept(svm, INTERCEPT_DR7_WRITE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001092
Joerg Roedel18c918c2010-11-30 18:03:59 +01001093 set_exception_intercept(svm, PF_VECTOR);
1094 set_exception_intercept(svm, UD_VECTOR);
1095 set_exception_intercept(svm, MC_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001096
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01001097 set_intercept(svm, INTERCEPT_INTR);
1098 set_intercept(svm, INTERCEPT_NMI);
1099 set_intercept(svm, INTERCEPT_SMI);
1100 set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
Avi Kivity332b56e2011-11-10 14:57:24 +02001101 set_intercept(svm, INTERCEPT_RDPMC);
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01001102 set_intercept(svm, INTERCEPT_CPUID);
1103 set_intercept(svm, INTERCEPT_INVD);
1104 set_intercept(svm, INTERCEPT_HLT);
1105 set_intercept(svm, INTERCEPT_INVLPG);
1106 set_intercept(svm, INTERCEPT_INVLPGA);
1107 set_intercept(svm, INTERCEPT_IOIO_PROT);
1108 set_intercept(svm, INTERCEPT_MSR_PROT);
1109 set_intercept(svm, INTERCEPT_TASK_SWITCH);
1110 set_intercept(svm, INTERCEPT_SHUTDOWN);
1111 set_intercept(svm, INTERCEPT_VMRUN);
1112 set_intercept(svm, INTERCEPT_VMMCALL);
1113 set_intercept(svm, INTERCEPT_VMLOAD);
1114 set_intercept(svm, INTERCEPT_VMSAVE);
1115 set_intercept(svm, INTERCEPT_STGI);
1116 set_intercept(svm, INTERCEPT_CLGI);
1117 set_intercept(svm, INTERCEPT_SKINIT);
1118 set_intercept(svm, INTERCEPT_WBINVD);
1119 set_intercept(svm, INTERCEPT_MONITOR);
1120 set_intercept(svm, INTERCEPT_MWAIT);
Joerg Roedel81dd35d2010-12-07 17:15:06 +01001121 set_intercept(svm, INTERCEPT_XSETBV);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001122
1123 control->iopm_base_pa = iopm_base;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001124 control->msrpm_base_pa = __pa(svm->msrpm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001125 control->int_ctl = V_INTR_MASKING_MASK;
1126
1127 init_seg(&save->es);
1128 init_seg(&save->ss);
1129 init_seg(&save->ds);
1130 init_seg(&save->fs);
1131 init_seg(&save->gs);
1132
1133 save->cs.selector = 0xf000;
1134 /* Executable/Readable Code Segment */
1135 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1136 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1137 save->cs.limit = 0xffff;
Avi Kivityd92899a2007-02-12 00:54:38 -08001138 /*
1139 * cs.base should really be 0xffff0000, but vmx can't handle that, so
1140 * be consistent with it.
1141 *
1142 * Replace when we have real mode working for vmx.
1143 */
1144 save->cs.base = 0xf0000;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001145
1146 save->gdtr.limit = 0xffff;
1147 save->idtr.limit = 0xffff;
1148
1149 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1150 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1151
Marcelo Tosattieaa48512010-08-31 19:13:14 -03001152 svm_set_efer(&svm->vcpu, 0);
Mike Dayd77c26f2007-10-08 09:02:08 -04001153 save->dr6 = 0xffff0ff0;
Avi Kivityf6e78472010-08-02 15:30:20 +03001154 kvm_set_rflags(&svm->vcpu, 2);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001155 save->rip = 0x0000fff0;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001156 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001157
Joerg Roedele0231712010-02-24 18:59:10 +01001158 /*
1159 * This is the guest-visible cr0 value.
Eduardo Habkost18fa0002009-10-24 02:49:59 -02001160 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
Avi Kivity6aa8b732006-12-10 02:21:36 -08001161 */
Marcelo Tosatti678041a2010-08-31 19:13:13 -03001162 svm->vcpu.arch.cr0 = 0;
1163 (void)kvm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
Eduardo Habkost18fa0002009-10-24 02:49:59 -02001164
Rusty Russell66aee912007-07-17 23:34:16 +10001165 save->cr4 = X86_CR4_PAE;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001166 /* rdx = ?? */
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001167
1168 if (npt_enabled) {
1169 /* Setup VMCB for Nested Paging */
1170 control->nested_ctl = 1;
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01001171 clr_intercept(svm, INTERCEPT_INVLPG);
Joerg Roedel18c918c2010-11-30 18:03:59 +01001172 clr_exception_intercept(svm, PF_VECTOR);
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001173 clr_cr_intercept(svm, INTERCEPT_CR3_READ);
1174 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001175 save->g_pat = 0x0007040600070406ULL;
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001176 save->cr3 = 0;
1177 save->cr4 = 0;
1178 }
Joerg Roedelf40f6a42010-12-03 15:25:15 +01001179 svm->asid_generation = 0;
Alexander Graf1371d902008-11-25 20:17:04 +01001180
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001181 svm->nested.vmcb = 0;
Joerg Roedel2af91942009-08-07 11:49:28 +02001182 svm->vcpu.arch.hflags = 0;
1183
Avi Kivity2a6b20b2010-11-09 16:15:42 +02001184 if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
Mark Langsdorf565d0992009-10-06 14:25:02 -05001185 control->pause_filter_count = 3000;
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01001186 set_intercept(svm, INTERCEPT_PAUSE);
Mark Langsdorf565d0992009-10-06 14:25:02 -05001187 }
1188
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01001189 mark_all_dirty(svm->vmcb);
1190
Joerg Roedel2af91942009-08-07 11:49:28 +02001191 enable_gif(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001192}
1193
Jan Kiszka57f252f2013-03-12 10:20:24 +01001194static void svm_vcpu_reset(struct kvm_vcpu *vcpu)
Avi Kivity04d2cc72007-09-10 18:10:54 +03001195{
1196 struct vcpu_svm *svm = to_svm(vcpu);
Julian Stecklina66f7b722012-12-05 15:26:19 +01001197 u32 dummy;
1198 u32 eax = 1;
Avi Kivity04d2cc72007-09-10 18:10:54 +03001199
Joerg Roedele6101a92008-02-13 18:58:45 +01001200 init_vmcb(svm);
Avi Kivity70433382007-11-07 12:57:23 +02001201
Gleb Natapovc5af89b2009-06-09 15:56:26 +03001202 if (!kvm_vcpu_is_bsp(vcpu)) {
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001203 kvm_rip_write(vcpu, 0);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001204 svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
1205 svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
Avi Kivity70433382007-11-07 12:57:23 +02001206 }
Julian Stecklina66f7b722012-12-05 15:26:19 +01001207
1208 kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy);
1209 kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
Avi Kivity04d2cc72007-09-10 18:10:54 +03001210}
1211
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001212static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001213{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001214 struct vcpu_svm *svm;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001215 struct page *page;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001216 struct page *msrpm_pages;
Alexander Grafb286d5d2008-11-25 20:17:05 +01001217 struct page *hsave_page;
Alexander Graf3d6368e2008-11-25 20:17:07 +01001218 struct page *nested_msrpm_pages;
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001219 int err;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001220
Rusty Russellc16f8622007-07-30 21:12:19 +10001221 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001222 if (!svm) {
1223 err = -ENOMEM;
1224 goto out;
1225 }
1226
Joerg Roedelfbc0db72011-03-25 09:44:46 +01001227 svm->tsc_ratio = TSC_RATIO_DEFAULT;
1228
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001229 err = kvm_vcpu_init(&svm->vcpu, kvm, id);
1230 if (err)
1231 goto free_svm;
1232
Joerg Roedelf65c2292008-02-13 18:58:46 +01001233 err = -ENOMEM;
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001234 page = alloc_page(GFP_KERNEL);
1235 if (!page)
1236 goto uninit;
1237
Joerg Roedelf65c2292008-02-13 18:58:46 +01001238 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1239 if (!msrpm_pages)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001240 goto free_page1;
Alexander Graf3d6368e2008-11-25 20:17:07 +01001241
1242 nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1243 if (!nested_msrpm_pages)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001244 goto free_page2;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001245
Alexander Grafb286d5d2008-11-25 20:17:05 +01001246 hsave_page = alloc_page(GFP_KERNEL);
1247 if (!hsave_page)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001248 goto free_page3;
1249
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001250 svm->nested.hsave = page_address(hsave_page);
Alexander Grafb286d5d2008-11-25 20:17:05 +01001251
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001252 svm->msrpm = page_address(msrpm_pages);
1253 svm_vcpu_init_msrpm(svm->msrpm);
1254
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001255 svm->nested.msrpm = page_address(nested_msrpm_pages);
Joerg Roedel323c3d82010-03-01 15:34:37 +01001256 svm_vcpu_init_msrpm(svm->nested.msrpm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01001257
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001258 svm->vmcb = page_address(page);
1259 clear_page(svm->vmcb);
1260 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
1261 svm->asid_generation = 0;
Joerg Roedele6101a92008-02-13 18:58:45 +01001262 init_vmcb(svm);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001263
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001264 svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
Gleb Natapovc5af89b2009-06-09 15:56:26 +03001265 if (kvm_vcpu_is_bsp(&svm->vcpu))
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001266 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001267
Boris Ostrovsky2b036c62012-01-09 14:00:35 -05001268 svm_init_osvw(&svm->vcpu);
1269
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001270 return &svm->vcpu;
Avi Kivity36241b82006-12-22 01:05:20 -08001271
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001272free_page3:
1273 __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
1274free_page2:
1275 __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
1276free_page1:
1277 __free_page(page);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001278uninit:
1279 kvm_vcpu_uninit(&svm->vcpu);
1280free_svm:
Rusty Russella4770342007-08-01 14:46:11 +10001281 kmem_cache_free(kvm_vcpu_cache, svm);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001282out:
1283 return ERR_PTR(err);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001284}
1285
1286static void svm_free_vcpu(struct kvm_vcpu *vcpu)
1287{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001288 struct vcpu_svm *svm = to_svm(vcpu);
1289
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001290 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
Joerg Roedelf65c2292008-02-13 18:58:46 +01001291 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001292 __free_page(virt_to_page(svm->nested.hsave));
1293 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001294 kvm_vcpu_uninit(vcpu);
Rusty Russella4770342007-08-01 14:46:11 +10001295 kmem_cache_free(kvm_vcpu_cache, svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001296}
1297
Avi Kivity15ad7142007-07-11 18:17:21 +03001298static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001299{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001300 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity15ad7142007-07-11 18:17:21 +03001301 int i;
Avi Kivity0cc50642007-03-25 12:07:27 +02001302
Avi Kivity0cc50642007-03-25 12:07:27 +02001303 if (unlikely(cpu != vcpu->cpu)) {
Marcelo Tosatti4b656b12009-07-21 12:47:45 -03001304 svm->asid_generation = 0;
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01001305 mark_all_dirty(svm->vmcb);
Avi Kivity0cc50642007-03-25 12:07:27 +02001306 }
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001307
Avi Kivity82ca2d12010-10-21 12:20:34 +02001308#ifdef CONFIG_X86_64
1309 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
1310#endif
Avi Kivitydacccfd2010-10-21 12:20:33 +02001311 savesegment(fs, svm->host.fs);
1312 savesegment(gs, svm->host.gs);
1313 svm->host.ldt = kvm_read_ldt();
1314
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001315 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001316 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Joerg Roedelfbc0db72011-03-25 09:44:46 +01001317
1318 if (static_cpu_has(X86_FEATURE_TSCRATEMSR) &&
1319 svm->tsc_ratio != __get_cpu_var(current_tsc_ratio)) {
1320 __get_cpu_var(current_tsc_ratio) = svm->tsc_ratio;
1321 wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio);
1322 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001323}
1324
1325static void svm_vcpu_put(struct kvm_vcpu *vcpu)
1326{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001327 struct vcpu_svm *svm = to_svm(vcpu);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001328 int i;
1329
Avi Kivitye1beb1d2007-11-18 13:50:24 +02001330 ++vcpu->stat.host_state_reload;
Avi Kivitydacccfd2010-10-21 12:20:33 +02001331 kvm_load_ldt(svm->host.ldt);
1332#ifdef CONFIG_X86_64
1333 loadsegment(fs, svm->host.fs);
Avi Kivitydacccfd2010-10-21 12:20:33 +02001334 wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
Joerg Roedel893a5ab2011-01-14 16:45:01 +01001335 load_gs_index(svm->host.gs);
Avi Kivitydacccfd2010-10-21 12:20:33 +02001336#else
Avi Kivity831ca602011-03-08 16:09:51 +02001337#ifdef CONFIG_X86_32_LAZY_GS
Avi Kivitydacccfd2010-10-21 12:20:33 +02001338 loadsegment(gs, svm->host.gs);
1339#endif
Avi Kivity831ca602011-03-08 16:09:51 +02001340#endif
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001341 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001342 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001343}
1344
Kevin Wolfea5e97e2012-02-08 14:34:40 +01001345static void svm_update_cpl(struct kvm_vcpu *vcpu)
1346{
1347 struct vcpu_svm *svm = to_svm(vcpu);
1348 int cpl;
1349
1350 if (!is_protmode(vcpu))
1351 cpl = 0;
1352 else if (svm->vmcb->save.rflags & X86_EFLAGS_VM)
1353 cpl = 3;
1354 else
1355 cpl = svm->vmcb->save.cs.selector & 0x3;
1356
1357 svm->vmcb->save.cpl = cpl;
1358}
1359
Avi Kivity6aa8b732006-12-10 02:21:36 -08001360static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
1361{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001362 return to_svm(vcpu)->vmcb->save.rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001363}
1364
1365static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1366{
Kevin Wolf4cee4792012-02-08 14:34:41 +01001367 unsigned long old_rflags = to_svm(vcpu)->vmcb->save.rflags;
1368
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001369 to_svm(vcpu)->vmcb->save.rflags = rflags;
Kevin Wolf4cee4792012-02-08 14:34:41 +01001370 if ((old_rflags ^ rflags) & X86_EFLAGS_VM)
1371 svm_update_cpl(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001372}
1373
Avi Kivity6de4f3a2009-05-31 22:58:47 +03001374static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1375{
1376 switch (reg) {
1377 case VCPU_EXREG_PDPTR:
1378 BUG_ON(!npt_enabled);
Avi Kivity9f8fe502010-12-05 17:30:00 +02001379 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
Avi Kivity6de4f3a2009-05-31 22:58:47 +03001380 break;
1381 default:
1382 BUG();
1383 }
1384}
1385
Alexander Graff0b85052008-11-25 20:17:01 +01001386static void svm_set_vintr(struct vcpu_svm *svm)
1387{
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01001388 set_intercept(svm, INTERCEPT_VINTR);
Alexander Graff0b85052008-11-25 20:17:01 +01001389}
1390
1391static void svm_clear_vintr(struct vcpu_svm *svm)
1392{
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01001393 clr_intercept(svm, INTERCEPT_VINTR);
Alexander Graff0b85052008-11-25 20:17:01 +01001394}
1395
Avi Kivity6aa8b732006-12-10 02:21:36 -08001396static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
1397{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001398 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001399
1400 switch (seg) {
1401 case VCPU_SREG_CS: return &save->cs;
1402 case VCPU_SREG_DS: return &save->ds;
1403 case VCPU_SREG_ES: return &save->es;
1404 case VCPU_SREG_FS: return &save->fs;
1405 case VCPU_SREG_GS: return &save->gs;
1406 case VCPU_SREG_SS: return &save->ss;
1407 case VCPU_SREG_TR: return &save->tr;
1408 case VCPU_SREG_LDTR: return &save->ldtr;
1409 }
1410 BUG();
Al Viro8b6d44c2007-02-09 16:38:40 +00001411 return NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001412}
1413
1414static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1415{
1416 struct vmcb_seg *s = svm_seg(vcpu, seg);
1417
1418 return s->base;
1419}
1420
1421static void svm_get_segment(struct kvm_vcpu *vcpu,
1422 struct kvm_segment *var, int seg)
1423{
1424 struct vmcb_seg *s = svm_seg(vcpu, seg);
1425
1426 var->base = s->base;
1427 var->limit = s->limit;
1428 var->selector = s->selector;
1429 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
1430 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
1431 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
1432 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
1433 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
1434 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
1435 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
1436 var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
Amit Shah25022ac2008-10-27 09:04:17 +00001437
Joerg Roedele0231712010-02-24 18:59:10 +01001438 /*
1439 * AMD's VMCB does not have an explicit unusable field, so emulate it
Andre Przywara19bca6a2009-04-28 12:45:30 +02001440 * for cross vendor migration purposes by "not present"
1441 */
1442 var->unusable = !var->present || (var->type == 0);
1443
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001444 switch (seg) {
1445 case VCPU_SREG_CS:
1446 /*
1447 * SVM always stores 0 for the 'G' bit in the CS selector in
1448 * the VMCB on a VMEXIT. This hurts cross-vendor migration:
1449 * Intel's VMENTRY has a check on the 'G' bit.
1450 */
Amit Shah25022ac2008-10-27 09:04:17 +00001451 var->g = s->limit > 0xfffff;
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001452 break;
1453 case VCPU_SREG_TR:
1454 /*
1455 * Work around a bug where the busy flag in the tr selector
1456 * isn't exposed
1457 */
Amit Shahc0d09822008-10-27 09:04:18 +00001458 var->type |= 0x2;
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001459 break;
1460 case VCPU_SREG_DS:
1461 case VCPU_SREG_ES:
1462 case VCPU_SREG_FS:
1463 case VCPU_SREG_GS:
1464 /*
1465 * The accessed bit must always be set in the segment
1466 * descriptor cache, although it can be cleared in the
1467 * descriptor, the cached bit always remains at 1. Since
1468 * Intel has a check on this, set it here to support
1469 * cross-vendor migration.
1470 */
1471 if (!var->unusable)
1472 var->type |= 0x1;
1473 break;
Andre Przywarab586eb02009-04-28 12:45:43 +02001474 case VCPU_SREG_SS:
Joerg Roedele0231712010-02-24 18:59:10 +01001475 /*
1476 * On AMD CPUs sometimes the DB bit in the segment
Andre Przywarab586eb02009-04-28 12:45:43 +02001477 * descriptor is left as 1, although the whole segment has
1478 * been made unusable. Clear it here to pass an Intel VMX
1479 * entry check when cross vendor migrating.
1480 */
1481 if (var->unusable)
1482 var->db = 0;
1483 break;
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001484 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001485}
1486
Izik Eidus2e4d2652008-03-24 19:38:34 +02001487static int svm_get_cpl(struct kvm_vcpu *vcpu)
1488{
1489 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1490
1491 return save->cpl;
1492}
1493
Gleb Natapov89a27f42010-02-16 10:51:48 +02001494static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001495{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001496 struct vcpu_svm *svm = to_svm(vcpu);
1497
Gleb Natapov89a27f42010-02-16 10:51:48 +02001498 dt->size = svm->vmcb->save.idtr.limit;
1499 dt->address = svm->vmcb->save.idtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001500}
1501
Gleb Natapov89a27f42010-02-16 10:51:48 +02001502static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001503{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001504 struct vcpu_svm *svm = to_svm(vcpu);
1505
Gleb Natapov89a27f42010-02-16 10:51:48 +02001506 svm->vmcb->save.idtr.limit = dt->size;
1507 svm->vmcb->save.idtr.base = dt->address ;
Joerg Roedel17a703c2010-12-03 11:45:56 +01001508 mark_dirty(svm->vmcb, VMCB_DT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001509}
1510
Gleb Natapov89a27f42010-02-16 10:51:48 +02001511static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001512{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001513 struct vcpu_svm *svm = to_svm(vcpu);
1514
Gleb Natapov89a27f42010-02-16 10:51:48 +02001515 dt->size = svm->vmcb->save.gdtr.limit;
1516 dt->address = svm->vmcb->save.gdtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001517}
1518
Gleb Natapov89a27f42010-02-16 10:51:48 +02001519static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001520{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001521 struct vcpu_svm *svm = to_svm(vcpu);
1522
Gleb Natapov89a27f42010-02-16 10:51:48 +02001523 svm->vmcb->save.gdtr.limit = dt->size;
1524 svm->vmcb->save.gdtr.base = dt->address ;
Joerg Roedel17a703c2010-12-03 11:45:56 +01001525 mark_dirty(svm->vmcb, VMCB_DT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001526}
1527
Avi Kivitye8467fd2009-12-29 18:43:06 +02001528static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
1529{
1530}
1531
Avi Kivityaff48ba2010-12-05 18:56:11 +02001532static void svm_decache_cr3(struct kvm_vcpu *vcpu)
1533{
1534}
1535
Anthony Liguori25c4c272007-04-27 09:29:21 +03001536static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
Avi Kivity399badf2007-01-05 16:36:38 -08001537{
1538}
1539
Avi Kivityd2251572010-01-06 10:55:27 +02001540static void update_cr0_intercept(struct vcpu_svm *svm)
1541{
1542 ulong gcr0 = svm->vcpu.arch.cr0;
1543 u64 *hcr0 = &svm->vmcb->save.cr0;
1544
1545 if (!svm->vcpu.fpu_active)
1546 *hcr0 |= SVM_CR0_SELECTIVE_MASK;
1547 else
1548 *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
1549 | (gcr0 & SVM_CR0_SELECTIVE_MASK);
1550
Joerg Roedeldcca1a62010-12-03 11:45:54 +01001551 mark_dirty(svm->vmcb, VMCB_CR);
Avi Kivityd2251572010-01-06 10:55:27 +02001552
1553 if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001554 clr_cr_intercept(svm, INTERCEPT_CR0_READ);
1555 clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
Avi Kivityd2251572010-01-06 10:55:27 +02001556 } else {
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001557 set_cr_intercept(svm, INTERCEPT_CR0_READ);
1558 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
Avi Kivityd2251572010-01-06 10:55:27 +02001559 }
1560}
1561
Avi Kivity6aa8b732006-12-10 02:21:36 -08001562static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1563{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001564 struct vcpu_svm *svm = to_svm(vcpu);
1565
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001566#ifdef CONFIG_X86_64
Avi Kivityf6801df2010-01-21 15:31:50 +02001567 if (vcpu->arch.efer & EFER_LME) {
Rusty Russell707d92fa2007-07-17 23:19:08 +10001568 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
Avi Kivityf6801df2010-01-21 15:31:50 +02001569 vcpu->arch.efer |= EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -06001570 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001571 }
1572
Mike Dayd77c26f2007-10-08 09:02:08 -04001573 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
Avi Kivityf6801df2010-01-21 15:31:50 +02001574 vcpu->arch.efer &= ~EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -06001575 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001576 }
1577 }
1578#endif
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001579 vcpu->arch.cr0 = cr0;
Avi Kivity888f9f32010-01-10 12:14:04 +02001580
1581 if (!npt_enabled)
1582 cr0 |= X86_CR0_PG | X86_CR0_WP;
Avi Kivity02daab22009-12-30 12:40:26 +02001583
1584 if (!vcpu->fpu_active)
Joerg Roedel334df502008-01-21 13:09:33 +01001585 cr0 |= X86_CR0_TS;
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001586 /*
1587 * re-enable caching here because the QEMU bios
1588 * does not do it - this results in some delay at
1589 * reboot
1590 */
1591 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001592 svm->vmcb->save.cr0 = cr0;
Joerg Roedeldcca1a62010-12-03 11:45:54 +01001593 mark_dirty(svm->vmcb, VMCB_CR);
Avi Kivityd2251572010-01-06 10:55:27 +02001594 update_cr0_intercept(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001595}
1596
Nadav Har'El5e1746d2011-05-25 23:03:24 +03001597static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001598{
Joerg Roedel6394b642008-04-09 14:15:29 +02001599 unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
Joerg Roedele5eab0c2008-09-09 19:11:51 +02001600 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
1601
Nadav Har'El5e1746d2011-05-25 23:03:24 +03001602 if (cr4 & X86_CR4_VMXE)
1603 return 1;
1604
Joerg Roedele5eab0c2008-09-09 19:11:51 +02001605 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
Joerg Roedelf40f6a42010-12-03 15:25:15 +01001606 svm_flush_tlb(vcpu);
Joerg Roedel6394b642008-04-09 14:15:29 +02001607
Joerg Roedelec077262008-04-09 14:15:28 +02001608 vcpu->arch.cr4 = cr4;
1609 if (!npt_enabled)
1610 cr4 |= X86_CR4_PAE;
Joerg Roedel6394b642008-04-09 14:15:29 +02001611 cr4 |= host_cr4_mce;
Joerg Roedelec077262008-04-09 14:15:28 +02001612 to_svm(vcpu)->vmcb->save.cr4 = cr4;
Joerg Roedeldcca1a62010-12-03 11:45:54 +01001613 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
Nadav Har'El5e1746d2011-05-25 23:03:24 +03001614 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001615}
1616
1617static void svm_set_segment(struct kvm_vcpu *vcpu,
1618 struct kvm_segment *var, int seg)
1619{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001620 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001621 struct vmcb_seg *s = svm_seg(vcpu, seg);
1622
1623 s->base = var->base;
1624 s->limit = var->limit;
1625 s->selector = var->selector;
1626 if (var->unusable)
1627 s->attrib = 0;
1628 else {
1629 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
1630 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
1631 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
1632 s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
1633 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
1634 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1635 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1636 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
1637 }
1638 if (seg == VCPU_SREG_CS)
Kevin Wolfea5e97e2012-02-08 14:34:40 +01001639 svm_update_cpl(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001640
Joerg Roedel060d0c92010-12-03 11:45:57 +01001641 mark_dirty(svm->vmcb, VMCB_SEG);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001642}
1643
Jan Kiszkac8639012012-09-21 05:42:55 +02001644static void update_db_bp_intercept(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001645{
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001646 struct vcpu_svm *svm = to_svm(vcpu);
1647
Joerg Roedel18c918c2010-11-30 18:03:59 +01001648 clr_exception_intercept(svm, DB_VECTOR);
1649 clr_exception_intercept(svm, BP_VECTOR);
Gleb Natapov44c11432009-05-11 13:35:52 +03001650
Jan Kiszka6be7d302009-10-18 13:24:54 +02001651 if (svm->nmi_singlestep)
Joerg Roedel18c918c2010-11-30 18:03:59 +01001652 set_exception_intercept(svm, DB_VECTOR);
Gleb Natapov44c11432009-05-11 13:35:52 +03001653
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001654 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
1655 if (vcpu->guest_debug &
1656 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
Joerg Roedel18c918c2010-11-30 18:03:59 +01001657 set_exception_intercept(svm, DB_VECTOR);
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001658 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
Joerg Roedel18c918c2010-11-30 18:03:59 +01001659 set_exception_intercept(svm, BP_VECTOR);
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001660 } else
1661 vcpu->guest_debug = 0;
Gleb Natapov44c11432009-05-11 13:35:52 +03001662}
1663
Tejun Heo0fe1e002009-10-29 22:34:14 +09001664static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001665{
Tejun Heo0fe1e002009-10-29 22:34:14 +09001666 if (sd->next_asid > sd->max_asid) {
1667 ++sd->asid_generation;
1668 sd->next_asid = 1;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001669 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001670 }
1671
Tejun Heo0fe1e002009-10-29 22:34:14 +09001672 svm->asid_generation = sd->asid_generation;
1673 svm->vmcb->control.asid = sd->next_asid++;
Joerg Roedeld48086d2010-12-03 11:45:51 +01001674
1675 mark_dirty(svm->vmcb, VMCB_ASID);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001676}
1677
Gleb Natapov020df072010-04-13 10:05:23 +03001678static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001679{
Jan Kiszka42dbaa52008-12-15 13:52:10 +01001680 struct vcpu_svm *svm = to_svm(vcpu);
Jan Kiszka42dbaa52008-12-15 13:52:10 +01001681
Gleb Natapov020df072010-04-13 10:05:23 +03001682 svm->vmcb->save.dr7 = value;
Joerg Roedel72214b92010-12-03 11:45:55 +01001683 mark_dirty(svm->vmcb, VMCB_DR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001684}
1685
Avi Kivity851ba692009-08-24 11:10:17 +03001686static int pf_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001687{
Gleb Natapov631bc482010-10-14 11:22:52 +02001688 u64 fault_address = svm->vmcb->control.exit_info_2;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001689 u32 error_code;
Gleb Natapov631bc482010-10-14 11:22:52 +02001690 int r = 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001691
Gleb Natapov631bc482010-10-14 11:22:52 +02001692 switch (svm->apf_reason) {
1693 default:
1694 error_code = svm->vmcb->control.exit_info_1;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001695
Gleb Natapov631bc482010-10-14 11:22:52 +02001696 trace_kvm_page_fault(fault_address, error_code);
1697 if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
1698 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
Andre Przywaradc25e892010-12-21 11:12:07 +01001699 r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
1700 svm->vmcb->control.insn_bytes,
1701 svm->vmcb->control.insn_len);
Gleb Natapov631bc482010-10-14 11:22:52 +02001702 break;
1703 case KVM_PV_REASON_PAGE_NOT_PRESENT:
1704 svm->apf_reason = 0;
1705 local_irq_disable();
1706 kvm_async_pf_task_wait(fault_address);
1707 local_irq_enable();
1708 break;
1709 case KVM_PV_REASON_PAGE_READY:
1710 svm->apf_reason = 0;
1711 local_irq_disable();
1712 kvm_async_pf_task_wake(fault_address);
1713 local_irq_enable();
1714 break;
1715 }
1716 return r;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001717}
1718
Avi Kivity851ba692009-08-24 11:10:17 +03001719static int db_interception(struct vcpu_svm *svm)
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001720{
Avi Kivity851ba692009-08-24 11:10:17 +03001721 struct kvm_run *kvm_run = svm->vcpu.run;
1722
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001723 if (!(svm->vcpu.guest_debug &
Gleb Natapov44c11432009-05-11 13:35:52 +03001724 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
Jan Kiszka6be7d302009-10-18 13:24:54 +02001725 !svm->nmi_singlestep) {
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001726 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
1727 return 1;
1728 }
Gleb Natapov44c11432009-05-11 13:35:52 +03001729
Jan Kiszka6be7d302009-10-18 13:24:54 +02001730 if (svm->nmi_singlestep) {
1731 svm->nmi_singlestep = false;
Gleb Natapov44c11432009-05-11 13:35:52 +03001732 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
1733 svm->vmcb->save.rflags &=
1734 ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
Jan Kiszkac8639012012-09-21 05:42:55 +02001735 update_db_bp_intercept(&svm->vcpu);
Gleb Natapov44c11432009-05-11 13:35:52 +03001736 }
1737
1738 if (svm->vcpu.guest_debug &
Joerg Roedele0231712010-02-24 18:59:10 +01001739 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
Gleb Natapov44c11432009-05-11 13:35:52 +03001740 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1741 kvm_run->debug.arch.pc =
1742 svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1743 kvm_run->debug.arch.exception = DB_VECTOR;
1744 return 0;
1745 }
1746
1747 return 1;
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001748}
1749
Avi Kivity851ba692009-08-24 11:10:17 +03001750static int bp_interception(struct vcpu_svm *svm)
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001751{
Avi Kivity851ba692009-08-24 11:10:17 +03001752 struct kvm_run *kvm_run = svm->vcpu.run;
1753
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001754 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1755 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1756 kvm_run->debug.arch.exception = BP_VECTOR;
1757 return 0;
1758}
1759
Avi Kivity851ba692009-08-24 11:10:17 +03001760static int ud_interception(struct vcpu_svm *svm)
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001761{
1762 int er;
1763
Andre Przywara51d8b662010-12-21 11:12:02 +01001764 er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001765 if (er != EMULATE_DONE)
Avi Kivity7ee5d9402007-11-25 15:22:50 +02001766 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001767 return 1;
1768}
1769
Avi Kivity6b52d182010-01-21 15:31:47 +02001770static void svm_fpu_activate(struct kvm_vcpu *vcpu)
Anthony Liguori7807fa62007-04-23 09:17:21 -05001771{
Avi Kivity6b52d182010-01-21 15:31:47 +02001772 struct vcpu_svm *svm = to_svm(vcpu);
Joerg Roedel66a562f2010-02-19 16:23:08 +01001773
Joerg Roedel18c918c2010-11-30 18:03:59 +01001774 clr_exception_intercept(svm, NM_VECTOR);
Joerg Roedel66a562f2010-02-19 16:23:08 +01001775
Rusty Russelle756fc62007-07-30 20:07:08 +10001776 svm->vcpu.fpu_active = 1;
Avi Kivityd2251572010-01-06 10:55:27 +02001777 update_cr0_intercept(svm);
Avi Kivity6b52d182010-01-21 15:31:47 +02001778}
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001779
Avi Kivity6b52d182010-01-21 15:31:47 +02001780static int nm_interception(struct vcpu_svm *svm)
1781{
1782 svm_fpu_activate(&svm->vcpu);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001783 return 1;
Anthony Liguori7807fa62007-04-23 09:17:21 -05001784}
1785
Joerg Roedel67ec6602010-05-17 14:43:35 +02001786static bool is_erratum_383(void)
1787{
1788 int err, i;
1789 u64 value;
1790
1791 if (!erratum_383_found)
1792 return false;
1793
1794 value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
1795 if (err)
1796 return false;
1797
1798 /* Bit 62 may or may not be set for this mce */
1799 value &= ~(1ULL << 62);
1800
1801 if (value != 0xb600000000010015ULL)
1802 return false;
1803
1804 /* Clear MCi_STATUS registers */
1805 for (i = 0; i < 6; ++i)
1806 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
1807
1808 value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
1809 if (!err) {
1810 u32 low, high;
1811
1812 value &= ~(1ULL << 2);
1813 low = lower_32_bits(value);
1814 high = upper_32_bits(value);
1815
1816 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
1817 }
1818
1819 /* Flush tlb to evict multi-match entries */
1820 __flush_tlb_all();
1821
1822 return true;
1823}
1824
Joerg Roedelfe5913e2010-05-17 14:43:34 +02001825static void svm_handle_mce(struct vcpu_svm *svm)
Joerg Roedel53371b52008-04-09 14:15:30 +02001826{
Joerg Roedel67ec6602010-05-17 14:43:35 +02001827 if (is_erratum_383()) {
1828 /*
1829 * Erratum 383 triggered. Guest state is corrupt so kill the
1830 * guest.
1831 */
1832 pr_err("KVM: Guest triggered AMD Erratum 383\n");
1833
Avi Kivitya8eeb042010-05-10 12:34:53 +03001834 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
Joerg Roedel67ec6602010-05-17 14:43:35 +02001835
1836 return;
1837 }
1838
Joerg Roedel53371b52008-04-09 14:15:30 +02001839 /*
1840 * On an #MC intercept the MCE handler is not called automatically in
1841 * the host. So do it by hand here.
1842 */
1843 asm volatile (
1844 "int $0x12\n");
1845 /* not sure if we ever come back to this point */
1846
Joerg Roedelfe5913e2010-05-17 14:43:34 +02001847 return;
1848}
1849
1850static int mc_interception(struct vcpu_svm *svm)
1851{
Joerg Roedel53371b52008-04-09 14:15:30 +02001852 return 1;
1853}
1854
Avi Kivity851ba692009-08-24 11:10:17 +03001855static int shutdown_interception(struct vcpu_svm *svm)
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001856{
Avi Kivity851ba692009-08-24 11:10:17 +03001857 struct kvm_run *kvm_run = svm->vcpu.run;
1858
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001859 /*
1860 * VMCB is undefined after a SHUTDOWN intercept
1861 * so reinitialize it.
1862 */
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001863 clear_page(svm->vmcb);
Joerg Roedele6101a92008-02-13 18:58:45 +01001864 init_vmcb(svm);
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001865
1866 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1867 return 0;
1868}
1869
Avi Kivity851ba692009-08-24 11:10:17 +03001870static int io_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001871{
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02001872 struct kvm_vcpu *vcpu = &svm->vcpu;
Mike Dayd77c26f2007-10-08 09:02:08 -04001873 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
Jan Kiszka34c33d12009-02-08 13:28:15 +01001874 int size, in, string;
Avi Kivity039576c2007-03-20 12:46:50 +02001875 unsigned port;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001876
Rusty Russelle756fc62007-07-30 20:07:08 +10001877 ++svm->vcpu.stat.io_exits;
Laurent Viviere70669a2007-08-05 10:36:40 +03001878 string = (io_info & SVM_IOIO_STR_MASK) != 0;
Avi Kivity039576c2007-03-20 12:46:50 +02001879 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02001880 if (string || in)
Andre Przywara51d8b662010-12-21 11:12:02 +01001881 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02001882
Avi Kivity039576c2007-03-20 12:46:50 +02001883 port = io_info >> 16;
1884 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02001885 svm->next_rip = svm->vmcb->control.exit_info_2;
Guillaume Thouvenine93f36b2008-10-28 10:51:30 +01001886 skip_emulated_instruction(&svm->vcpu);
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02001887
1888 return kvm_fast_pio_out(vcpu, size, port);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001889}
1890
Avi Kivity851ba692009-08-24 11:10:17 +03001891static int nmi_interception(struct vcpu_svm *svm)
Joerg Roedelc47f0982008-04-30 17:56:00 +02001892{
1893 return 1;
1894}
1895
Avi Kivity851ba692009-08-24 11:10:17 +03001896static int intr_interception(struct vcpu_svm *svm)
Joerg Roedela0698052008-04-30 17:56:01 +02001897{
1898 ++svm->vcpu.stat.irq_exits;
1899 return 1;
1900}
1901
Avi Kivity851ba692009-08-24 11:10:17 +03001902static int nop_on_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001903{
1904 return 1;
1905}
1906
Avi Kivity851ba692009-08-24 11:10:17 +03001907static int halt_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001908{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001909 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
Rusty Russelle756fc62007-07-30 20:07:08 +10001910 skip_emulated_instruction(&svm->vcpu);
1911 return kvm_emulate_halt(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001912}
1913
Avi Kivity851ba692009-08-24 11:10:17 +03001914static int vmmcall_interception(struct vcpu_svm *svm)
Avi Kivity02e235b2007-02-19 14:37:47 +02001915{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001916 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Rusty Russelle756fc62007-07-30 20:07:08 +10001917 skip_emulated_instruction(&svm->vcpu);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001918 kvm_emulate_hypercall(&svm->vcpu);
1919 return 1;
Avi Kivity02e235b2007-02-19 14:37:47 +02001920}
1921
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02001922static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
1923{
1924 struct vcpu_svm *svm = to_svm(vcpu);
1925
1926 return svm->nested.nested_cr3;
1927}
1928
Avi Kivitye4e517b2011-07-28 11:36:17 +03001929static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
1930{
1931 struct vcpu_svm *svm = to_svm(vcpu);
1932 u64 cr3 = svm->nested.nested_cr3;
1933 u64 pdpte;
1934 int ret;
1935
1936 ret = kvm_read_guest_page(vcpu->kvm, gpa_to_gfn(cr3), &pdpte,
1937 offset_in_page(cr3) + index * 8, 8);
1938 if (ret)
1939 return 0;
1940 return pdpte;
1941}
1942
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02001943static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
1944 unsigned long root)
1945{
1946 struct vcpu_svm *svm = to_svm(vcpu);
1947
1948 svm->vmcb->control.nested_cr3 = root;
Joerg Roedelb2747162010-12-03 11:45:53 +01001949 mark_dirty(svm->vmcb, VMCB_NPT);
Joerg Roedelf40f6a42010-12-03 15:25:15 +01001950 svm_flush_tlb(vcpu);
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02001951}
1952
Avi Kivity6389ee92010-11-29 16:12:30 +02001953static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
1954 struct x86_exception *fault)
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02001955{
1956 struct vcpu_svm *svm = to_svm(vcpu);
1957
1958 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
1959 svm->vmcb->control.exit_code_hi = 0;
Avi Kivity6389ee92010-11-29 16:12:30 +02001960 svm->vmcb->control.exit_info_1 = fault->error_code;
1961 svm->vmcb->control.exit_info_2 = fault->address;
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02001962
1963 nested_svm_vmexit(svm);
1964}
1965
Joerg Roedel4b161842010-09-10 17:31:03 +02001966static int nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
1967{
1968 int r;
1969
1970 r = kvm_init_shadow_mmu(vcpu, &vcpu->arch.mmu);
1971
1972 vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
1973 vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
Avi Kivitye4e517b2011-07-28 11:36:17 +03001974 vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr;
Joerg Roedel4b161842010-09-10 17:31:03 +02001975 vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
1976 vcpu->arch.mmu.shadow_root_level = get_npt_level();
1977 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
1978
1979 return r;
1980}
1981
1982static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
1983{
1984 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
1985}
1986
Alexander Grafc0725422008-11-25 20:17:03 +01001987static int nested_svm_check_permissions(struct vcpu_svm *svm)
1988{
Avi Kivityf6801df2010-01-21 15:31:50 +02001989 if (!(svm->vcpu.arch.efer & EFER_SVME)
Alexander Grafc0725422008-11-25 20:17:03 +01001990 || !is_paging(&svm->vcpu)) {
1991 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1992 return 1;
1993 }
1994
1995 if (svm->vmcb->save.cpl) {
1996 kvm_inject_gp(&svm->vcpu, 0);
1997 return 1;
1998 }
1999
2000 return 0;
2001}
2002
Alexander Grafcf74a782008-11-25 20:17:08 +01002003static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
2004 bool has_error_code, u32 error_code)
2005{
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01002006 int vmexit;
2007
Joerg Roedel20307532010-11-29 17:51:48 +01002008 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel0295ad72009-08-07 11:49:37 +02002009 return 0;
Alexander Grafcf74a782008-11-25 20:17:08 +01002010
Joerg Roedel0295ad72009-08-07 11:49:37 +02002011 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
2012 svm->vmcb->control.exit_code_hi = 0;
2013 svm->vmcb->control.exit_info_1 = error_code;
2014 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
2015
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01002016 vmexit = nested_svm_intercept(svm);
2017 if (vmexit == NESTED_EXIT_DONE)
2018 svm->nested.exit_required = true;
2019
2020 return vmexit;
Alexander Grafcf74a782008-11-25 20:17:08 +01002021}
2022
Joerg Roedel8fe54652010-02-19 16:23:01 +01002023/* This function returns true if it is save to enable the irq window */
2024static inline bool nested_svm_intr(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01002025{
Joerg Roedel20307532010-11-29 17:51:48 +01002026 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel8fe54652010-02-19 16:23:01 +01002027 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01002028
Joerg Roedel26666952009-08-07 11:49:46 +02002029 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
Joerg Roedel8fe54652010-02-19 16:23:01 +01002030 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01002031
Joerg Roedel26666952009-08-07 11:49:46 +02002032 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
Joerg Roedel8fe54652010-02-19 16:23:01 +01002033 return false;
Alexander Grafcf74a782008-11-25 20:17:08 +01002034
Gleb Natapova0a07cd2010-09-20 10:15:32 +02002035 /*
2036 * if vmexit was already requested (by intercepted exception
2037 * for instance) do not overwrite it with "external interrupt"
2038 * vmexit.
2039 */
2040 if (svm->nested.exit_required)
2041 return false;
2042
Joerg Roedel197717d2010-02-24 18:59:19 +01002043 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
2044 svm->vmcb->control.exit_info_1 = 0;
2045 svm->vmcb->control.exit_info_2 = 0;
Joerg Roedel26666952009-08-07 11:49:46 +02002046
Joerg Roedelcd3ff652009-10-09 16:08:26 +02002047 if (svm->nested.intercept & 1ULL) {
2048 /*
2049 * The #vmexit can't be emulated here directly because this
Guo Chaoc5ec2e52012-06-28 15:16:43 +08002050 * code path runs with irqs and preemption disabled. A
Joerg Roedelcd3ff652009-10-09 16:08:26 +02002051 * #vmexit emulation might sleep. Only signal request for
2052 * the #vmexit here.
2053 */
2054 svm->nested.exit_required = true;
Joerg Roedel236649d2009-10-09 16:08:30 +02002055 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
Joerg Roedel8fe54652010-02-19 16:23:01 +01002056 return false;
Alexander Grafcf74a782008-11-25 20:17:08 +01002057 }
2058
Joerg Roedel8fe54652010-02-19 16:23:01 +01002059 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01002060}
2061
Joerg Roedel887f5002010-02-24 18:59:12 +01002062/* This function returns true if it is save to enable the nmi window */
2063static inline bool nested_svm_nmi(struct vcpu_svm *svm)
2064{
Joerg Roedel20307532010-11-29 17:51:48 +01002065 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel887f5002010-02-24 18:59:12 +01002066 return true;
2067
2068 if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
2069 return true;
2070
2071 svm->vmcb->control.exit_code = SVM_EXIT_NMI;
2072 svm->nested.exit_required = true;
2073
2074 return false;
2075}
2076
Joerg Roedel7597f122010-02-19 16:23:00 +01002077static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002078{
2079 struct page *page;
2080
Joerg Roedel6c3bd3d2010-02-19 16:23:04 +01002081 might_sleep();
2082
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002083 page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002084 if (is_error_page(page))
2085 goto error;
2086
Joerg Roedel7597f122010-02-19 16:23:00 +01002087 *_page = page;
2088
2089 return kmap(page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002090
2091error:
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002092 kvm_inject_gp(&svm->vcpu, 0);
2093
2094 return NULL;
2095}
2096
Joerg Roedel7597f122010-02-19 16:23:00 +01002097static void nested_svm_unmap(struct page *page)
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002098{
Joerg Roedel7597f122010-02-19 16:23:00 +01002099 kunmap(page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002100 kvm_release_page_dirty(page);
2101}
2102
Joerg Roedelce2ac082010-03-01 15:34:39 +01002103static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01002104{
Joerg Roedelce2ac082010-03-01 15:34:39 +01002105 unsigned port;
2106 u8 val, bit;
2107 u64 gpa;
2108
2109 if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
2110 return NESTED_EXIT_HOST;
2111
2112 port = svm->vmcb->control.exit_info_1 >> 16;
2113 gpa = svm->nested.vmcb_iopm + (port / 8);
2114 bit = port % 8;
2115 val = 0;
2116
2117 if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, 1))
2118 val &= (1 << bit);
2119
2120 return val ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
2121}
2122
Joerg Roedeld2477822010-03-01 15:34:34 +01002123static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01002124{
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002125 u32 offset, msr, value;
2126 int write, mask;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002127
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002128 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
Joerg Roedeld2477822010-03-01 15:34:34 +01002129 return NESTED_EXIT_HOST;
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002130
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002131 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
2132 offset = svm_msrpm_offset(msr);
2133 write = svm->vmcb->control.exit_info_1 & 1;
2134 mask = 1 << ((2 * (msr & 0xf)) + write);
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002135
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002136 if (offset == MSR_INVALID)
2137 return NESTED_EXIT_DONE;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002138
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002139 /* Offset is in 32 bit units but need in 8 bit units */
2140 offset *= 4;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002141
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002142 if (kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + offset, &value, 4))
2143 return NESTED_EXIT_DONE;
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002144
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002145 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002146}
2147
Joerg Roedel410e4d52009-08-07 11:49:44 +02002148static int nested_svm_exit_special(struct vcpu_svm *svm)
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002149{
Alexander Grafcf74a782008-11-25 20:17:08 +01002150 u32 exit_code = svm->vmcb->control.exit_code;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002151
Joerg Roedel410e4d52009-08-07 11:49:44 +02002152 switch (exit_code) {
2153 case SVM_EXIT_INTR:
2154 case SVM_EXIT_NMI:
Joerg Roedelff47a492010-04-22 12:33:14 +02002155 case SVM_EXIT_EXCP_BASE + MC_VECTOR:
Joerg Roedel410e4d52009-08-07 11:49:44 +02002156 return NESTED_EXIT_HOST;
Joerg Roedel410e4d52009-08-07 11:49:44 +02002157 case SVM_EXIT_NPF:
Joerg Roedele0231712010-02-24 18:59:10 +01002158 /* For now we are always handling NPFs when using them */
Joerg Roedel410e4d52009-08-07 11:49:44 +02002159 if (npt_enabled)
2160 return NESTED_EXIT_HOST;
2161 break;
Joerg Roedel410e4d52009-08-07 11:49:44 +02002162 case SVM_EXIT_EXCP_BASE + PF_VECTOR:
Gleb Natapov631bc482010-10-14 11:22:52 +02002163 /* When we're shadowing, trap PFs, but not async PF */
2164 if (!npt_enabled && svm->apf_reason == 0)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002165 return NESTED_EXIT_HOST;
2166 break;
Joerg Roedel66a562f2010-02-19 16:23:08 +01002167 case SVM_EXIT_EXCP_BASE + NM_VECTOR:
2168 nm_interception(svm);
2169 break;
Joerg Roedel410e4d52009-08-07 11:49:44 +02002170 default:
2171 break;
Alexander Grafcf74a782008-11-25 20:17:08 +01002172 }
2173
Joerg Roedel410e4d52009-08-07 11:49:44 +02002174 return NESTED_EXIT_CONTINUE;
2175}
2176
2177/*
2178 * If this function returns true, this #vmexit was already handled
2179 */
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01002180static int nested_svm_intercept(struct vcpu_svm *svm)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002181{
2182 u32 exit_code = svm->vmcb->control.exit_code;
2183 int vmexit = NESTED_EXIT_HOST;
2184
Alexander Grafcf74a782008-11-25 20:17:08 +01002185 switch (exit_code) {
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002186 case SVM_EXIT_MSR:
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002187 vmexit = nested_svm_exit_handled_msr(svm);
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002188 break;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002189 case SVM_EXIT_IOIO:
2190 vmexit = nested_svm_intercept_ioio(svm);
2191 break;
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002192 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
2193 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
2194 if (svm->nested.intercept_cr & bit)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002195 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002196 break;
2197 }
Joerg Roedel3aed0412010-11-30 18:03:58 +01002198 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
2199 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
2200 if (svm->nested.intercept_dr & bit)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002201 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002202 break;
2203 }
2204 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
2205 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
Joerg Roedelaad42c62009-08-07 11:49:34 +02002206 if (svm->nested.intercept_exceptions & excp_bits)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002207 vmexit = NESTED_EXIT_DONE;
Gleb Natapov631bc482010-10-14 11:22:52 +02002208 /* async page fault always cause vmexit */
2209 else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
2210 svm->apf_reason != 0)
2211 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002212 break;
2213 }
Joerg Roedel228070b2010-04-22 12:33:10 +02002214 case SVM_EXIT_ERR: {
2215 vmexit = NESTED_EXIT_DONE;
2216 break;
2217 }
Alexander Grafcf74a782008-11-25 20:17:08 +01002218 default: {
2219 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
Joerg Roedelaad42c62009-08-07 11:49:34 +02002220 if (svm->nested.intercept & exit_bits)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002221 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002222 }
2223 }
2224
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01002225 return vmexit;
2226}
2227
2228static int nested_svm_exit_handled(struct vcpu_svm *svm)
2229{
2230 int vmexit;
2231
2232 vmexit = nested_svm_intercept(svm);
2233
2234 if (vmexit == NESTED_EXIT_DONE)
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002235 nested_svm_vmexit(svm);
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002236
2237 return vmexit;
Alexander Grafcf74a782008-11-25 20:17:08 +01002238}
2239
Joerg Roedel0460a972009-08-07 11:49:31 +02002240static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
2241{
2242 struct vmcb_control_area *dst = &dst_vmcb->control;
2243 struct vmcb_control_area *from = &from_vmcb->control;
2244
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002245 dst->intercept_cr = from->intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +01002246 dst->intercept_dr = from->intercept_dr;
Joerg Roedel0460a972009-08-07 11:49:31 +02002247 dst->intercept_exceptions = from->intercept_exceptions;
2248 dst->intercept = from->intercept;
2249 dst->iopm_base_pa = from->iopm_base_pa;
2250 dst->msrpm_base_pa = from->msrpm_base_pa;
2251 dst->tsc_offset = from->tsc_offset;
2252 dst->asid = from->asid;
2253 dst->tlb_ctl = from->tlb_ctl;
2254 dst->int_ctl = from->int_ctl;
2255 dst->int_vector = from->int_vector;
2256 dst->int_state = from->int_state;
2257 dst->exit_code = from->exit_code;
2258 dst->exit_code_hi = from->exit_code_hi;
2259 dst->exit_info_1 = from->exit_info_1;
2260 dst->exit_info_2 = from->exit_info_2;
2261 dst->exit_int_info = from->exit_int_info;
2262 dst->exit_int_info_err = from->exit_int_info_err;
2263 dst->nested_ctl = from->nested_ctl;
2264 dst->event_inj = from->event_inj;
2265 dst->event_inj_err = from->event_inj_err;
2266 dst->nested_cr3 = from->nested_cr3;
2267 dst->lbr_ctl = from->lbr_ctl;
2268}
2269
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002270static int nested_svm_vmexit(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01002271{
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002272 struct vmcb *nested_vmcb;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02002273 struct vmcb *hsave = svm->nested.hsave;
Joerg Roedel33740e42009-08-07 11:49:29 +02002274 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01002275 struct page *page;
Alexander Grafcf74a782008-11-25 20:17:08 +01002276
Joerg Roedel17897f32009-10-09 16:08:29 +02002277 trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
2278 vmcb->control.exit_info_1,
2279 vmcb->control.exit_info_2,
2280 vmcb->control.exit_int_info,
Stefan Hajnoczie097e5f2011-07-22 12:46:52 +01002281 vmcb->control.exit_int_info_err,
2282 KVM_ISA_SVM);
Joerg Roedel17897f32009-10-09 16:08:29 +02002283
Joerg Roedel7597f122010-02-19 16:23:00 +01002284 nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002285 if (!nested_vmcb)
2286 return 1;
2287
Joerg Roedel20307532010-11-29 17:51:48 +01002288 /* Exit Guest-Mode */
2289 leave_guest_mode(&svm->vcpu);
Joerg Roedel06fc77722010-02-19 16:23:07 +01002290 svm->nested.vmcb = 0;
2291
Alexander Grafcf74a782008-11-25 20:17:08 +01002292 /* Give the current vmcb to the guest */
Joerg Roedel33740e42009-08-07 11:49:29 +02002293 disable_gif(svm);
2294
2295 nested_vmcb->save.es = vmcb->save.es;
2296 nested_vmcb->save.cs = vmcb->save.cs;
2297 nested_vmcb->save.ss = vmcb->save.ss;
2298 nested_vmcb->save.ds = vmcb->save.ds;
2299 nested_vmcb->save.gdtr = vmcb->save.gdtr;
2300 nested_vmcb->save.idtr = vmcb->save.idtr;
Joerg Roedel3f6a9d12010-07-27 18:14:20 +02002301 nested_vmcb->save.efer = svm->vcpu.arch.efer;
Joerg Roedelcdbbdc12010-02-19 16:23:03 +01002302 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
Avi Kivity9f8fe502010-12-05 17:30:00 +02002303 nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu);
Joerg Roedel33740e42009-08-07 11:49:29 +02002304 nested_vmcb->save.cr2 = vmcb->save.cr2;
Joerg Roedelcdbbdc12010-02-19 16:23:03 +01002305 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
Avi Kivityf6e78472010-08-02 15:30:20 +03002306 nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
Joerg Roedel33740e42009-08-07 11:49:29 +02002307 nested_vmcb->save.rip = vmcb->save.rip;
2308 nested_vmcb->save.rsp = vmcb->save.rsp;
2309 nested_vmcb->save.rax = vmcb->save.rax;
2310 nested_vmcb->save.dr7 = vmcb->save.dr7;
2311 nested_vmcb->save.dr6 = vmcb->save.dr6;
2312 nested_vmcb->save.cpl = vmcb->save.cpl;
2313
2314 nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
2315 nested_vmcb->control.int_vector = vmcb->control.int_vector;
2316 nested_vmcb->control.int_state = vmcb->control.int_state;
2317 nested_vmcb->control.exit_code = vmcb->control.exit_code;
2318 nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
2319 nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
2320 nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
2321 nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
2322 nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
Joerg Roedel7a190662010-07-27 18:14:21 +02002323 nested_vmcb->control.next_rip = vmcb->control.next_rip;
Alexander Graf8d23c462009-10-09 16:08:25 +02002324
2325 /*
2326 * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
2327 * to make sure that we do not lose injected events. So check event_inj
2328 * here and copy it to exit_int_info if it is valid.
2329 * Exit_int_info and event_inj can't be both valid because the case
2330 * below only happens on a VMRUN instruction intercept which has
2331 * no valid exit_int_info set.
2332 */
2333 if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
2334 struct vmcb_control_area *nc = &nested_vmcb->control;
2335
2336 nc->exit_int_info = vmcb->control.event_inj;
2337 nc->exit_int_info_err = vmcb->control.event_inj_err;
2338 }
2339
Joerg Roedel33740e42009-08-07 11:49:29 +02002340 nested_vmcb->control.tlb_ctl = 0;
2341 nested_vmcb->control.event_inj = 0;
2342 nested_vmcb->control.event_inj_err = 0;
Alexander Grafcf74a782008-11-25 20:17:08 +01002343
2344 /* We always set V_INTR_MASKING and remember the old value in hflags */
2345 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
2346 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
2347
Alexander Grafcf74a782008-11-25 20:17:08 +01002348 /* Restore the original control entries */
Joerg Roedel0460a972009-08-07 11:49:31 +02002349 copy_vmcb_control_area(vmcb, hsave);
Alexander Grafcf74a782008-11-25 20:17:08 +01002350
Alexander Graf219b65d2009-06-15 15:21:25 +02002351 kvm_clear_exception_queue(&svm->vcpu);
2352 kvm_clear_interrupt_queue(&svm->vcpu);
Alexander Grafcf74a782008-11-25 20:17:08 +01002353
Joerg Roedel4b161842010-09-10 17:31:03 +02002354 svm->nested.nested_cr3 = 0;
2355
Alexander Grafcf74a782008-11-25 20:17:08 +01002356 /* Restore selected save entries */
2357 svm->vmcb->save.es = hsave->save.es;
2358 svm->vmcb->save.cs = hsave->save.cs;
2359 svm->vmcb->save.ss = hsave->save.ss;
2360 svm->vmcb->save.ds = hsave->save.ds;
2361 svm->vmcb->save.gdtr = hsave->save.gdtr;
2362 svm->vmcb->save.idtr = hsave->save.idtr;
Avi Kivityf6e78472010-08-02 15:30:20 +03002363 kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
Alexander Grafcf74a782008-11-25 20:17:08 +01002364 svm_set_efer(&svm->vcpu, hsave->save.efer);
2365 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
2366 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
2367 if (npt_enabled) {
2368 svm->vmcb->save.cr3 = hsave->save.cr3;
2369 svm->vcpu.arch.cr3 = hsave->save.cr3;
2370 } else {
Avi Kivity23902182010-06-10 17:02:16 +03002371 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
Alexander Grafcf74a782008-11-25 20:17:08 +01002372 }
2373 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
2374 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
2375 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
2376 svm->vmcb->save.dr7 = 0;
2377 svm->vmcb->save.cpl = 0;
2378 svm->vmcb->control.exit_int_info = 0;
2379
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01002380 mark_all_dirty(svm->vmcb);
2381
Joerg Roedel7597f122010-02-19 16:23:00 +01002382 nested_svm_unmap(page);
Alexander Grafcf74a782008-11-25 20:17:08 +01002383
Joerg Roedel4b161842010-09-10 17:31:03 +02002384 nested_svm_uninit_mmu_context(&svm->vcpu);
Alexander Grafcf74a782008-11-25 20:17:08 +01002385 kvm_mmu_reset_context(&svm->vcpu);
2386 kvm_mmu_load(&svm->vcpu);
2387
2388 return 0;
2389}
Alexander Graf3d6368e2008-11-25 20:17:07 +01002390
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002391static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
Alexander Graf3d6368e2008-11-25 20:17:07 +01002392{
Joerg Roedel323c3d82010-03-01 15:34:37 +01002393 /*
2394 * This function merges the msr permission bitmaps of kvm and the
Guo Chaoc5ec2e52012-06-28 15:16:43 +08002395 * nested vmcb. It is optimized in that it only merges the parts where
Joerg Roedel323c3d82010-03-01 15:34:37 +01002396 * the kvm msr permission bitmap may contain zero bits
2397 */
Alexander Graf3d6368e2008-11-25 20:17:07 +01002398 int i;
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002399
Joerg Roedel323c3d82010-03-01 15:34:37 +01002400 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
2401 return true;
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002402
Joerg Roedel323c3d82010-03-01 15:34:37 +01002403 for (i = 0; i < MSRPM_OFFSETS; i++) {
2404 u32 value, p;
2405 u64 offset;
2406
2407 if (msrpm_offsets[i] == 0xffffffff)
2408 break;
2409
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002410 p = msrpm_offsets[i];
2411 offset = svm->nested.vmcb_msrpm + (p * 4);
Joerg Roedel323c3d82010-03-01 15:34:37 +01002412
2413 if (kvm_read_guest(svm->vcpu.kvm, offset, &value, 4))
2414 return false;
2415
2416 svm->nested.msrpm[p] = svm->msrpm[p] | value;
2417 }
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002418
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02002419 svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002420
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002421 return true;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002422}
2423
Joerg Roedel52c65a302010-08-02 16:46:44 +02002424static bool nested_vmcb_checks(struct vmcb *vmcb)
2425{
2426 if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
2427 return false;
2428
Joerg Roedeldbe77582010-08-02 16:46:45 +02002429 if (vmcb->control.asid == 0)
2430 return false;
2431
Joerg Roedel4b161842010-09-10 17:31:03 +02002432 if (vmcb->control.nested_ctl && !npt_enabled)
2433 return false;
2434
Joerg Roedel52c65a302010-08-02 16:46:44 +02002435 return true;
2436}
2437
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002438static bool nested_svm_vmrun(struct vcpu_svm *svm)
Alexander Graf3d6368e2008-11-25 20:17:07 +01002439{
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002440 struct vmcb *nested_vmcb;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02002441 struct vmcb *hsave = svm->nested.hsave;
Joerg Roedeldefbba52009-08-07 11:49:30 +02002442 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01002443 struct page *page;
Joerg Roedel06fc77722010-02-19 16:23:07 +01002444 u64 vmcb_gpa;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002445
Joerg Roedel06fc77722010-02-19 16:23:07 +01002446 vmcb_gpa = svm->vmcb->save.rax;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002447
Joerg Roedel7597f122010-02-19 16:23:00 +01002448 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002449 if (!nested_vmcb)
2450 return false;
2451
Joerg Roedel52c65a302010-08-02 16:46:44 +02002452 if (!nested_vmcb_checks(nested_vmcb)) {
2453 nested_vmcb->control.exit_code = SVM_EXIT_ERR;
2454 nested_vmcb->control.exit_code_hi = 0;
2455 nested_vmcb->control.exit_info_1 = 0;
2456 nested_vmcb->control.exit_info_2 = 0;
2457
2458 nested_svm_unmap(page);
2459
2460 return false;
2461 }
2462
Roedel, Joergb75f4eb2010-09-03 14:21:40 +02002463 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
Joerg Roedel0ac406d2009-10-09 16:08:27 +02002464 nested_vmcb->save.rip,
2465 nested_vmcb->control.int_ctl,
2466 nested_vmcb->control.event_inj,
2467 nested_vmcb->control.nested_ctl);
2468
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002469 trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
2470 nested_vmcb->control.intercept_cr >> 16,
Joerg Roedel2e554e82010-02-24 18:59:14 +01002471 nested_vmcb->control.intercept_exceptions,
2472 nested_vmcb->control.intercept);
2473
Alexander Graf3d6368e2008-11-25 20:17:07 +01002474 /* Clear internal status */
Alexander Graf219b65d2009-06-15 15:21:25 +02002475 kvm_clear_exception_queue(&svm->vcpu);
2476 kvm_clear_interrupt_queue(&svm->vcpu);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002477
Joerg Roedele0231712010-02-24 18:59:10 +01002478 /*
2479 * Save the old vmcb, so we don't need to pick what we save, but can
2480 * restore everything when a VMEXIT occurs
2481 */
Joerg Roedeldefbba52009-08-07 11:49:30 +02002482 hsave->save.es = vmcb->save.es;
2483 hsave->save.cs = vmcb->save.cs;
2484 hsave->save.ss = vmcb->save.ss;
2485 hsave->save.ds = vmcb->save.ds;
2486 hsave->save.gdtr = vmcb->save.gdtr;
2487 hsave->save.idtr = vmcb->save.idtr;
Avi Kivityf6801df2010-01-21 15:31:50 +02002488 hsave->save.efer = svm->vcpu.arch.efer;
Avi Kivity4d4ec082009-12-29 18:07:30 +02002489 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
Joerg Roedeldefbba52009-08-07 11:49:30 +02002490 hsave->save.cr4 = svm->vcpu.arch.cr4;
Avi Kivityf6e78472010-08-02 15:30:20 +03002491 hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
Roedel, Joergb75f4eb2010-09-03 14:21:40 +02002492 hsave->save.rip = kvm_rip_read(&svm->vcpu);
Joerg Roedeldefbba52009-08-07 11:49:30 +02002493 hsave->save.rsp = vmcb->save.rsp;
2494 hsave->save.rax = vmcb->save.rax;
2495 if (npt_enabled)
2496 hsave->save.cr3 = vmcb->save.cr3;
2497 else
Avi Kivity9f8fe502010-12-05 17:30:00 +02002498 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
Joerg Roedeldefbba52009-08-07 11:49:30 +02002499
Joerg Roedel0460a972009-08-07 11:49:31 +02002500 copy_vmcb_control_area(hsave, vmcb);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002501
Avi Kivityf6e78472010-08-02 15:30:20 +03002502 if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
Alexander Graf3d6368e2008-11-25 20:17:07 +01002503 svm->vcpu.arch.hflags |= HF_HIF_MASK;
2504 else
2505 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
2506
Joerg Roedel4b161842010-09-10 17:31:03 +02002507 if (nested_vmcb->control.nested_ctl) {
2508 kvm_mmu_unload(&svm->vcpu);
2509 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
2510 nested_svm_init_mmu_context(&svm->vcpu);
2511 }
2512
Alexander Graf3d6368e2008-11-25 20:17:07 +01002513 /* Load the nested guest state */
2514 svm->vmcb->save.es = nested_vmcb->save.es;
2515 svm->vmcb->save.cs = nested_vmcb->save.cs;
2516 svm->vmcb->save.ss = nested_vmcb->save.ss;
2517 svm->vmcb->save.ds = nested_vmcb->save.ds;
2518 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
2519 svm->vmcb->save.idtr = nested_vmcb->save.idtr;
Avi Kivityf6e78472010-08-02 15:30:20 +03002520 kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002521 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
2522 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
2523 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
2524 if (npt_enabled) {
2525 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
2526 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
Joerg Roedel0e5cbe32010-02-24 18:59:11 +01002527 } else
Avi Kivity23902182010-06-10 17:02:16 +03002528 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
Joerg Roedel0e5cbe32010-02-24 18:59:11 +01002529
2530 /* Guest paging mode is active - reset mmu */
2531 kvm_mmu_reset_context(&svm->vcpu);
2532
Joerg Roedeldefbba52009-08-07 11:49:30 +02002533 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002534 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
2535 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
2536 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
Joerg Roedele0231712010-02-24 18:59:10 +01002537
Alexander Graf3d6368e2008-11-25 20:17:07 +01002538 /* In case we don't even reach vcpu_run, the fields are not updated */
2539 svm->vmcb->save.rax = nested_vmcb->save.rax;
2540 svm->vmcb->save.rsp = nested_vmcb->save.rsp;
2541 svm->vmcb->save.rip = nested_vmcb->save.rip;
2542 svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
2543 svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
2544 svm->vmcb->save.cpl = nested_vmcb->save.cpl;
2545
Joerg Roedelf7138532010-03-01 15:34:40 +01002546 svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002547 svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002548
Joerg Roedelaad42c62009-08-07 11:49:34 +02002549 /* cache intercepts */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002550 svm->nested.intercept_cr = nested_vmcb->control.intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +01002551 svm->nested.intercept_dr = nested_vmcb->control.intercept_dr;
Joerg Roedelaad42c62009-08-07 11:49:34 +02002552 svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
2553 svm->nested.intercept = nested_vmcb->control.intercept;
2554
Joerg Roedelf40f6a42010-12-03 15:25:15 +01002555 svm_flush_tlb(&svm->vcpu);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002556 svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002557 if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
2558 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
2559 else
2560 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
2561
Joerg Roedel88ab24a2010-02-19 16:23:06 +01002562 if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
2563 /* We only want the cr8 intercept bits of the guest */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002564 clr_cr_intercept(svm, INTERCEPT_CR8_READ);
2565 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Joerg Roedel88ab24a2010-02-19 16:23:06 +01002566 }
2567
Joerg Roedel0d945bd2010-05-05 16:04:45 +02002568 /* We don't want to see VMMCALLs from a nested guest */
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01002569 clr_intercept(svm, INTERCEPT_VMMCALL);
Joerg Roedel0d945bd2010-05-05 16:04:45 +02002570
Joerg Roedel88ab24a2010-02-19 16:23:06 +01002571 svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002572 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
2573 svm->vmcb->control.int_state = nested_vmcb->control.int_state;
2574 svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002575 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
2576 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
2577
Joerg Roedel7597f122010-02-19 16:23:00 +01002578 nested_svm_unmap(page);
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002579
Joerg Roedel20307532010-11-29 17:51:48 +01002580 /* Enter Guest-Mode */
2581 enter_guest_mode(&svm->vcpu);
2582
Joerg Roedel384c6362010-11-30 18:03:56 +01002583 /*
2584 * Merge guest and host intercepts - must be called with vcpu in
2585 * guest-mode to take affect here
2586 */
2587 recalc_intercepts(svm);
2588
Joerg Roedel06fc77722010-02-19 16:23:07 +01002589 svm->nested.vmcb = vmcb_gpa;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002590
Joerg Roedel2af91942009-08-07 11:49:28 +02002591 enable_gif(svm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002592
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01002593 mark_all_dirty(svm->vmcb);
2594
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002595 return true;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002596}
2597
Joerg Roedel9966bf62009-08-07 11:49:40 +02002598static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
Alexander Graf55426752008-11-25 20:17:06 +01002599{
2600 to_vmcb->save.fs = from_vmcb->save.fs;
2601 to_vmcb->save.gs = from_vmcb->save.gs;
2602 to_vmcb->save.tr = from_vmcb->save.tr;
2603 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
2604 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
2605 to_vmcb->save.star = from_vmcb->save.star;
2606 to_vmcb->save.lstar = from_vmcb->save.lstar;
2607 to_vmcb->save.cstar = from_vmcb->save.cstar;
2608 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
2609 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
2610 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
2611 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
Alexander Graf55426752008-11-25 20:17:06 +01002612}
2613
Avi Kivity851ba692009-08-24 11:10:17 +03002614static int vmload_interception(struct vcpu_svm *svm)
Alexander Graf55426752008-11-25 20:17:06 +01002615{
Joerg Roedel9966bf62009-08-07 11:49:40 +02002616 struct vmcb *nested_vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01002617 struct page *page;
Joerg Roedel9966bf62009-08-07 11:49:40 +02002618
Alexander Graf55426752008-11-25 20:17:06 +01002619 if (nested_svm_check_permissions(svm))
2620 return 1;
2621
Joerg Roedel7597f122010-02-19 16:23:00 +01002622 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
Joerg Roedel9966bf62009-08-07 11:49:40 +02002623 if (!nested_vmcb)
2624 return 1;
2625
Joerg Roedele3e9ed32011-04-06 12:30:03 +02002626 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2627 skip_emulated_instruction(&svm->vcpu);
2628
Joerg Roedel9966bf62009-08-07 11:49:40 +02002629 nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
Joerg Roedel7597f122010-02-19 16:23:00 +01002630 nested_svm_unmap(page);
Alexander Graf55426752008-11-25 20:17:06 +01002631
2632 return 1;
2633}
2634
Avi Kivity851ba692009-08-24 11:10:17 +03002635static int vmsave_interception(struct vcpu_svm *svm)
Alexander Graf55426752008-11-25 20:17:06 +01002636{
Joerg Roedel9966bf62009-08-07 11:49:40 +02002637 struct vmcb *nested_vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01002638 struct page *page;
Joerg Roedel9966bf62009-08-07 11:49:40 +02002639
Alexander Graf55426752008-11-25 20:17:06 +01002640 if (nested_svm_check_permissions(svm))
2641 return 1;
2642
Joerg Roedel7597f122010-02-19 16:23:00 +01002643 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
Joerg Roedel9966bf62009-08-07 11:49:40 +02002644 if (!nested_vmcb)
2645 return 1;
2646
Joerg Roedele3e9ed32011-04-06 12:30:03 +02002647 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2648 skip_emulated_instruction(&svm->vcpu);
2649
Joerg Roedel9966bf62009-08-07 11:49:40 +02002650 nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
Joerg Roedel7597f122010-02-19 16:23:00 +01002651 nested_svm_unmap(page);
Alexander Graf55426752008-11-25 20:17:06 +01002652
2653 return 1;
2654}
2655
Avi Kivity851ba692009-08-24 11:10:17 +03002656static int vmrun_interception(struct vcpu_svm *svm)
Alexander Graf3d6368e2008-11-25 20:17:07 +01002657{
Alexander Graf3d6368e2008-11-25 20:17:07 +01002658 if (nested_svm_check_permissions(svm))
2659 return 1;
2660
Roedel, Joergb75f4eb2010-09-03 14:21:40 +02002661 /* Save rip after vmrun instruction */
2662 kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002663
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002664 if (!nested_svm_vmrun(svm))
Alexander Graf3d6368e2008-11-25 20:17:07 +01002665 return 1;
2666
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002667 if (!nested_svm_vmrun_msrpm(svm))
Joerg Roedel1f8da472009-08-07 11:49:43 +02002668 goto failed;
2669
2670 return 1;
2671
2672failed:
2673
2674 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
2675 svm->vmcb->control.exit_code_hi = 0;
2676 svm->vmcb->control.exit_info_1 = 0;
2677 svm->vmcb->control.exit_info_2 = 0;
2678
2679 nested_svm_vmexit(svm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002680
2681 return 1;
2682}
2683
Avi Kivity851ba692009-08-24 11:10:17 +03002684static int stgi_interception(struct vcpu_svm *svm)
Alexander Graf1371d902008-11-25 20:17:04 +01002685{
2686 if (nested_svm_check_permissions(svm))
2687 return 1;
2688
2689 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2690 skip_emulated_instruction(&svm->vcpu);
Avi Kivity3842d132010-07-27 12:30:24 +03002691 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Alexander Graf1371d902008-11-25 20:17:04 +01002692
Joerg Roedel2af91942009-08-07 11:49:28 +02002693 enable_gif(svm);
Alexander Graf1371d902008-11-25 20:17:04 +01002694
2695 return 1;
2696}
2697
Avi Kivity851ba692009-08-24 11:10:17 +03002698static int clgi_interception(struct vcpu_svm *svm)
Alexander Graf1371d902008-11-25 20:17:04 +01002699{
2700 if (nested_svm_check_permissions(svm))
2701 return 1;
2702
2703 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2704 skip_emulated_instruction(&svm->vcpu);
2705
Joerg Roedel2af91942009-08-07 11:49:28 +02002706 disable_gif(svm);
Alexander Graf1371d902008-11-25 20:17:04 +01002707
2708 /* After a CLGI no interrupts should come */
2709 svm_clear_vintr(svm);
2710 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
2711
Joerg Roedeldecdbf62010-12-03 11:45:52 +01002712 mark_dirty(svm->vmcb, VMCB_INTR);
2713
Alexander Graf1371d902008-11-25 20:17:04 +01002714 return 1;
2715}
2716
Avi Kivity851ba692009-08-24 11:10:17 +03002717static int invlpga_interception(struct vcpu_svm *svm)
Alexander Grafff092382009-06-15 15:21:24 +02002718{
2719 struct kvm_vcpu *vcpu = &svm->vcpu;
Alexander Grafff092382009-06-15 15:21:24 +02002720
Joerg Roedelec1ff792009-10-09 16:08:31 +02002721 trace_kvm_invlpga(svm->vmcb->save.rip, vcpu->arch.regs[VCPU_REGS_RCX],
2722 vcpu->arch.regs[VCPU_REGS_RAX]);
2723
Alexander Grafff092382009-06-15 15:21:24 +02002724 /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
2725 kvm_mmu_invlpg(vcpu, vcpu->arch.regs[VCPU_REGS_RAX]);
2726
2727 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2728 skip_emulated_instruction(&svm->vcpu);
2729 return 1;
2730}
2731
Joerg Roedel532a46b2009-10-09 16:08:32 +02002732static int skinit_interception(struct vcpu_svm *svm)
2733{
2734 trace_kvm_skinit(svm->vmcb->save.rip, svm->vcpu.arch.regs[VCPU_REGS_RAX]);
2735
2736 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2737 return 1;
2738}
2739
Joerg Roedel81dd35d2010-12-07 17:15:06 +01002740static int xsetbv_interception(struct vcpu_svm *svm)
2741{
2742 u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
2743 u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
2744
2745 if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
2746 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2747 skip_emulated_instruction(&svm->vcpu);
2748 }
2749
2750 return 1;
2751}
2752
Avi Kivity851ba692009-08-24 11:10:17 +03002753static int invalid_op_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002754{
Avi Kivity7ee5d9402007-11-25 15:22:50 +02002755 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002756 return 1;
2757}
2758
Avi Kivity851ba692009-08-24 11:10:17 +03002759static int task_switch_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002760{
Izik Eidus37817f22008-03-24 23:14:53 +02002761 u16 tss_selector;
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002762 int reason;
2763 int int_type = svm->vmcb->control.exit_int_info &
2764 SVM_EXITINTINFO_TYPE_MASK;
Gleb Natapov8317c292009-04-12 13:37:02 +03002765 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002766 uint32_t type =
2767 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
2768 uint32_t idt_v =
2769 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
Jan Kiszkae269fb22010-04-14 15:51:09 +02002770 bool has_error_code = false;
2771 u32 error_code = 0;
Izik Eidus37817f22008-03-24 23:14:53 +02002772
2773 tss_selector = (u16)svm->vmcb->control.exit_info_1;
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002774
Izik Eidus37817f22008-03-24 23:14:53 +02002775 if (svm->vmcb->control.exit_info_2 &
2776 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002777 reason = TASK_SWITCH_IRET;
2778 else if (svm->vmcb->control.exit_info_2 &
2779 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
2780 reason = TASK_SWITCH_JMP;
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002781 else if (idt_v)
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002782 reason = TASK_SWITCH_GATE;
2783 else
2784 reason = TASK_SWITCH_CALL;
2785
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002786 if (reason == TASK_SWITCH_GATE) {
2787 switch (type) {
2788 case SVM_EXITINTINFO_TYPE_NMI:
2789 svm->vcpu.arch.nmi_injected = false;
2790 break;
2791 case SVM_EXITINTINFO_TYPE_EXEPT:
Jan Kiszkae269fb22010-04-14 15:51:09 +02002792 if (svm->vmcb->control.exit_info_2 &
2793 (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
2794 has_error_code = true;
2795 error_code =
2796 (u32)svm->vmcb->control.exit_info_2;
2797 }
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03002798 kvm_clear_exception_queue(&svm->vcpu);
2799 break;
2800 case SVM_EXITINTINFO_TYPE_INTR:
2801 kvm_clear_interrupt_queue(&svm->vcpu);
2802 break;
2803 default:
2804 break;
2805 }
2806 }
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002807
Gleb Natapov8317c292009-04-12 13:37:02 +03002808 if (reason != TASK_SWITCH_GATE ||
2809 int_type == SVM_EXITINTINFO_TYPE_SOFT ||
2810 (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
Gleb Natapovf629cf82009-05-11 13:35:49 +03002811 (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
2812 skip_emulated_instruction(&svm->vcpu);
Gleb Natapov64a7ec02009-03-30 16:03:29 +03002813
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01002814 if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
2815 int_vec = -1;
2816
2817 if (kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason,
Gleb Natapovacb54512010-04-15 21:03:50 +03002818 has_error_code, error_code) == EMULATE_FAIL) {
2819 svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2820 svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
2821 svm->vcpu.run->internal.ndata = 0;
2822 return 0;
2823 }
2824 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002825}
2826
Avi Kivity851ba692009-08-24 11:10:17 +03002827static int cpuid_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002828{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002829 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Rusty Russelle756fc62007-07-30 20:07:08 +10002830 kvm_emulate_cpuid(&svm->vcpu);
Avi Kivity06465c52007-02-28 20:46:53 +02002831 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002832}
2833
Avi Kivity851ba692009-08-24 11:10:17 +03002834static int iret_interception(struct vcpu_svm *svm)
Gleb Natapov95ba8273132009-04-21 17:45:08 +03002835{
2836 ++svm->vcpu.stat.nmi_window_exits;
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01002837 clr_intercept(svm, INTERCEPT_IRET);
Gleb Natapov44c11432009-05-11 13:35:52 +03002838 svm->vcpu.arch.hflags |= HF_IRET_MASK;
Avi Kivitybd3d1ec2011-02-03 15:29:52 +02002839 svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03002840 return 1;
2841}
2842
Avi Kivity851ba692009-08-24 11:10:17 +03002843static int invlpg_interception(struct vcpu_svm *svm)
Marcelo Tosattia7052892008-09-23 13:18:35 -03002844{
Andre Przywaradf4f31082010-12-21 11:12:06 +01002845 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2846 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
2847
2848 kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
2849 skip_emulated_instruction(&svm->vcpu);
2850 return 1;
Marcelo Tosattia7052892008-09-23 13:18:35 -03002851}
2852
Avi Kivity851ba692009-08-24 11:10:17 +03002853static int emulate_on_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002854{
Andre Przywara51d8b662010-12-21 11:12:02 +01002855 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002856}
2857
Avi Kivity332b56e2011-11-10 14:57:24 +02002858static int rdpmc_interception(struct vcpu_svm *svm)
2859{
2860 int err;
2861
2862 if (!static_cpu_has(X86_FEATURE_NRIPS))
2863 return emulate_on_interception(svm);
2864
2865 err = kvm_rdpmc(&svm->vcpu);
2866 kvm_complete_insn_gp(&svm->vcpu, err);
2867
2868 return 1;
2869}
2870
Joerg Roedel628afd22011-04-04 12:39:36 +02002871bool check_selective_cr0_intercepted(struct vcpu_svm *svm, unsigned long val)
2872{
2873 unsigned long cr0 = svm->vcpu.arch.cr0;
2874 bool ret = false;
2875 u64 intercept;
2876
2877 intercept = svm->nested.intercept;
2878
2879 if (!is_guest_mode(&svm->vcpu) ||
2880 (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
2881 return false;
2882
2883 cr0 &= ~SVM_CR0_SELECTIVE_MASK;
2884 val &= ~SVM_CR0_SELECTIVE_MASK;
2885
2886 if (cr0 ^ val) {
2887 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
2888 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
2889 }
2890
2891 return ret;
2892}
2893
Andre Przywara7ff76d52010-12-21 11:12:04 +01002894#define CR_VALID (1ULL << 63)
2895
2896static int cr_interception(struct vcpu_svm *svm)
2897{
2898 int reg, cr;
2899 unsigned long val;
2900 int err;
2901
2902 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2903 return emulate_on_interception(svm);
2904
2905 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
2906 return emulate_on_interception(svm);
2907
2908 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2909 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
2910
2911 err = 0;
2912 if (cr >= 16) { /* mov to cr */
2913 cr -= 16;
2914 val = kvm_register_read(&svm->vcpu, reg);
2915 switch (cr) {
2916 case 0:
Joerg Roedel628afd22011-04-04 12:39:36 +02002917 if (!check_selective_cr0_intercepted(svm, val))
2918 err = kvm_set_cr0(&svm->vcpu, val);
Joerg Roedel977b2d02011-04-18 11:42:52 +02002919 else
2920 return 1;
2921
Andre Przywara7ff76d52010-12-21 11:12:04 +01002922 break;
2923 case 3:
2924 err = kvm_set_cr3(&svm->vcpu, val);
2925 break;
2926 case 4:
2927 err = kvm_set_cr4(&svm->vcpu, val);
2928 break;
2929 case 8:
2930 err = kvm_set_cr8(&svm->vcpu, val);
2931 break;
2932 default:
2933 WARN(1, "unhandled write to CR%d", cr);
2934 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2935 return 1;
2936 }
2937 } else { /* mov from cr */
2938 switch (cr) {
2939 case 0:
2940 val = kvm_read_cr0(&svm->vcpu);
2941 break;
2942 case 2:
2943 val = svm->vcpu.arch.cr2;
2944 break;
2945 case 3:
Avi Kivity9f8fe502010-12-05 17:30:00 +02002946 val = kvm_read_cr3(&svm->vcpu);
Andre Przywara7ff76d52010-12-21 11:12:04 +01002947 break;
2948 case 4:
2949 val = kvm_read_cr4(&svm->vcpu);
2950 break;
2951 case 8:
2952 val = kvm_get_cr8(&svm->vcpu);
2953 break;
2954 default:
2955 WARN(1, "unhandled read from CR%d", cr);
2956 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2957 return 1;
2958 }
2959 kvm_register_write(&svm->vcpu, reg, val);
2960 }
2961 kvm_complete_insn_gp(&svm->vcpu, err);
2962
2963 return 1;
2964}
2965
Andre Przywaracae37972010-12-21 11:12:05 +01002966static int dr_interception(struct vcpu_svm *svm)
2967{
2968 int reg, dr;
2969 unsigned long val;
2970 int err;
2971
2972 if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
2973 return emulate_on_interception(svm);
2974
2975 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2976 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
2977
2978 if (dr >= 16) { /* mov to DRn */
2979 val = kvm_register_read(&svm->vcpu, reg);
2980 kvm_set_dr(&svm->vcpu, dr - 16, val);
2981 } else {
2982 err = kvm_get_dr(&svm->vcpu, dr, &val);
2983 if (!err)
2984 kvm_register_write(&svm->vcpu, reg, val);
2985 }
2986
Joerg Roedel2c46d2a2011-02-09 18:29:39 +01002987 skip_emulated_instruction(&svm->vcpu);
2988
Andre Przywaracae37972010-12-21 11:12:05 +01002989 return 1;
2990}
2991
Avi Kivity851ba692009-08-24 11:10:17 +03002992static int cr8_write_interception(struct vcpu_svm *svm)
Joerg Roedel1d075432007-12-06 21:02:25 +01002993{
Avi Kivity851ba692009-08-24 11:10:17 +03002994 struct kvm_run *kvm_run = svm->vcpu.run;
Andre Przywaraeea1cff2010-12-21 11:12:00 +01002995 int r;
Avi Kivity851ba692009-08-24 11:10:17 +03002996
Gleb Natapov0a5fff192009-04-21 17:45:06 +03002997 u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
2998 /* instruction emulation calls kvm_set_cr8() */
Andre Przywara7ff76d52010-12-21 11:12:04 +01002999 r = cr_interception(svm);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003000 if (irqchip_in_kernel(svm->vcpu.kvm)) {
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003001 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Andre Przywara7ff76d52010-12-21 11:12:04 +01003002 return r;
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003003 }
Gleb Natapov0a5fff192009-04-21 17:45:06 +03003004 if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
Andre Przywara7ff76d52010-12-21 11:12:04 +01003005 return r;
Joerg Roedel1d075432007-12-06 21:02:25 +01003006 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
3007 return 0;
3008}
3009
Marcelo Tosatti886b4702012-11-27 23:28:58 -02003010u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
Nadav Har'Eld5c17852011-08-02 15:54:20 +03003011{
3012 struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu));
3013 return vmcb->control.tsc_offset +
Marcelo Tosatti886b4702012-11-27 23:28:58 -02003014 svm_scale_tsc(vcpu, host_tsc);
Nadav Har'Eld5c17852011-08-02 15:54:20 +03003015}
3016
Avi Kivity6aa8b732006-12-10 02:21:36 -08003017static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
3018{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003019 struct vcpu_svm *svm = to_svm(vcpu);
3020
Avi Kivity6aa8b732006-12-10 02:21:36 -08003021 switch (ecx) {
Jaswinder Singh Rajputaf24a4e2009-05-15 18:42:05 +05303022 case MSR_IA32_TSC: {
Nadav Har'El45133ec2011-08-02 15:55:23 +03003023 *data = svm->vmcb->control.tsc_offset +
Joerg Roedelfbc0db72011-03-25 09:44:46 +01003024 svm_scale_tsc(vcpu, native_read_tsc());
3025
Avi Kivity6aa8b732006-12-10 02:21:36 -08003026 break;
3027 }
Brian Gerst8c065852010-07-17 09:03:26 -04003028 case MSR_STAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003029 *data = svm->vmcb->save.star;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003030 break;
Avi Kivity0e859ca2006-12-22 01:05:08 -08003031#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08003032 case MSR_LSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003033 *data = svm->vmcb->save.lstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003034 break;
3035 case MSR_CSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003036 *data = svm->vmcb->save.cstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003037 break;
3038 case MSR_KERNEL_GS_BASE:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003039 *data = svm->vmcb->save.kernel_gs_base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003040 break;
3041 case MSR_SYSCALL_MASK:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003042 *data = svm->vmcb->save.sfmask;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003043 break;
3044#endif
3045 case MSR_IA32_SYSENTER_CS:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003046 *data = svm->vmcb->save.sysenter_cs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003047 break;
3048 case MSR_IA32_SYSENTER_EIP:
Andre Przywara017cb992009-05-28 11:56:31 +02003049 *data = svm->sysenter_eip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003050 break;
3051 case MSR_IA32_SYSENTER_ESP:
Andre Przywara017cb992009-05-28 11:56:31 +02003052 *data = svm->sysenter_esp;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003053 break;
Joerg Roedele0231712010-02-24 18:59:10 +01003054 /*
3055 * Nobody will change the following 5 values in the VMCB so we can
3056 * safely return them on rdmsr. They will always be 0 until LBRV is
3057 * implemented.
3058 */
Joerg Roedela2938c82008-02-13 16:30:28 +01003059 case MSR_IA32_DEBUGCTLMSR:
3060 *data = svm->vmcb->save.dbgctl;
3061 break;
3062 case MSR_IA32_LASTBRANCHFROMIP:
3063 *data = svm->vmcb->save.br_from;
3064 break;
3065 case MSR_IA32_LASTBRANCHTOIP:
3066 *data = svm->vmcb->save.br_to;
3067 break;
3068 case MSR_IA32_LASTINTFROMIP:
3069 *data = svm->vmcb->save.last_excp_from;
3070 break;
3071 case MSR_IA32_LASTINTTOIP:
3072 *data = svm->vmcb->save.last_excp_to;
3073 break;
Alexander Grafb286d5d2008-11-25 20:17:05 +01003074 case MSR_VM_HSAVE_PA:
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02003075 *data = svm->nested.hsave_msr;
Alexander Grafb286d5d2008-11-25 20:17:05 +01003076 break;
Joerg Roedeleb6f3022008-11-25 20:17:09 +01003077 case MSR_VM_CR:
Joerg Roedel4a810182010-02-24 18:59:15 +01003078 *data = svm->nested.vm_cr_msr;
Joerg Roedeleb6f3022008-11-25 20:17:09 +01003079 break;
Alexander Grafc8a73f12009-01-05 16:02:47 +01003080 case MSR_IA32_UCODE_REV:
3081 *data = 0x01000065;
3082 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003083 default:
Avi Kivity3bab1f52006-12-29 16:49:48 -08003084 return kvm_get_msr_common(vcpu, ecx, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003085 }
3086 return 0;
3087}
3088
Avi Kivity851ba692009-08-24 11:10:17 +03003089static int rdmsr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003090{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003091 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
Avi Kivity6aa8b732006-12-10 02:21:36 -08003092 u64 data;
3093
Avi Kivity59200272010-01-25 19:47:02 +02003094 if (svm_get_msr(&svm->vcpu, ecx, &data)) {
3095 trace_kvm_msr_read_ex(ecx);
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02003096 kvm_inject_gp(&svm->vcpu, 0);
Avi Kivity59200272010-01-25 19:47:02 +02003097 } else {
Marcelo Tosatti229456f2009-06-17 09:22:14 -03003098 trace_kvm_msr_read(ecx, data);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02003099
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003100 svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003101 svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003102 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Rusty Russelle756fc62007-07-30 20:07:08 +10003103 skip_emulated_instruction(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003104 }
3105 return 1;
3106}
3107
Joerg Roedel4a810182010-02-24 18:59:15 +01003108static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
3109{
3110 struct vcpu_svm *svm = to_svm(vcpu);
3111 int svm_dis, chg_mask;
3112
3113 if (data & ~SVM_VM_CR_VALID_MASK)
3114 return 1;
3115
3116 chg_mask = SVM_VM_CR_VALID_MASK;
3117
3118 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
3119 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
3120
3121 svm->nested.vm_cr_msr &= ~chg_mask;
3122 svm->nested.vm_cr_msr |= (data & chg_mask);
3123
3124 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
3125
3126 /* check for svm_disable while efer.svme is set */
3127 if (svm_dis && (vcpu->arch.efer & EFER_SVME))
3128 return 1;
3129
3130 return 0;
3131}
3132
Will Auld8fe8ab42012-11-29 12:42:12 -08003133static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003134{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003135 struct vcpu_svm *svm = to_svm(vcpu);
3136
Will Auld8fe8ab42012-11-29 12:42:12 -08003137 u32 ecx = msr->index;
3138 u64 data = msr->data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003139 switch (ecx) {
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10003140 case MSR_IA32_TSC:
Will Auld8fe8ab42012-11-29 12:42:12 -08003141 kvm_write_tsc(vcpu, msr);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003142 break;
Brian Gerst8c065852010-07-17 09:03:26 -04003143 case MSR_STAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003144 svm->vmcb->save.star = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003145 break;
Robert P. J. Day49b14f22007-01-29 13:19:50 -08003146#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08003147 case MSR_LSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003148 svm->vmcb->save.lstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003149 break;
3150 case MSR_CSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003151 svm->vmcb->save.cstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003152 break;
3153 case MSR_KERNEL_GS_BASE:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003154 svm->vmcb->save.kernel_gs_base = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003155 break;
3156 case MSR_SYSCALL_MASK:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003157 svm->vmcb->save.sfmask = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003158 break;
3159#endif
3160 case MSR_IA32_SYSENTER_CS:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003161 svm->vmcb->save.sysenter_cs = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003162 break;
3163 case MSR_IA32_SYSENTER_EIP:
Andre Przywara017cb992009-05-28 11:56:31 +02003164 svm->sysenter_eip = data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003165 svm->vmcb->save.sysenter_eip = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003166 break;
3167 case MSR_IA32_SYSENTER_ESP:
Andre Przywara017cb992009-05-28 11:56:31 +02003168 svm->sysenter_esp = data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003169 svm->vmcb->save.sysenter_esp = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003170 break;
Joerg Roedela2938c82008-02-13 16:30:28 +01003171 case MSR_IA32_DEBUGCTLMSR:
Avi Kivity2a6b20b2010-11-09 16:15:42 +02003172 if (!boot_cpu_has(X86_FEATURE_LBRV)) {
Christoffer Dalla737f252012-06-03 21:17:48 +03003173 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
3174 __func__, data);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01003175 break;
3176 }
3177 if (data & DEBUGCTL_RESERVED_BITS)
3178 return 1;
3179
3180 svm->vmcb->save.dbgctl = data;
Joerg Roedelb53ba3f2010-12-03 11:45:59 +01003181 mark_dirty(svm->vmcb, VMCB_LBR);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01003182 if (data & (1ULL<<0))
3183 svm_enable_lbrv(svm);
3184 else
3185 svm_disable_lbrv(svm);
Joerg Roedela2938c82008-02-13 16:30:28 +01003186 break;
Alexander Grafb286d5d2008-11-25 20:17:05 +01003187 case MSR_VM_HSAVE_PA:
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02003188 svm->nested.hsave_msr = data;
Alexander Grafb286d5d2008-11-25 20:17:05 +01003189 break;
Alexander Graf3c5d0a42009-06-15 15:21:23 +02003190 case MSR_VM_CR:
Joerg Roedel4a810182010-02-24 18:59:15 +01003191 return svm_set_vm_cr(vcpu, data);
Alexander Graf3c5d0a42009-06-15 15:21:23 +02003192 case MSR_VM_IGNNE:
Christoffer Dalla737f252012-06-03 21:17:48 +03003193 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
Alexander Graf3c5d0a42009-06-15 15:21:23 +02003194 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003195 default:
Will Auld8fe8ab42012-11-29 12:42:12 -08003196 return kvm_set_msr_common(vcpu, msr);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003197 }
3198 return 0;
3199}
3200
Avi Kivity851ba692009-08-24 11:10:17 +03003201static int wrmsr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003202{
Will Auld8fe8ab42012-11-29 12:42:12 -08003203 struct msr_data msr;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003204 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003205 u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003206 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02003207
Will Auld8fe8ab42012-11-29 12:42:12 -08003208 msr.data = data;
3209 msr.index = ecx;
3210 msr.host_initiated = false;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02003211
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003212 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Will Auld8fe8ab42012-11-29 12:42:12 -08003213 if (svm_set_msr(&svm->vcpu, &msr)) {
Avi Kivity59200272010-01-25 19:47:02 +02003214 trace_kvm_msr_write_ex(ecx, data);
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02003215 kvm_inject_gp(&svm->vcpu, 0);
Avi Kivity59200272010-01-25 19:47:02 +02003216 } else {
3217 trace_kvm_msr_write(ecx, data);
Rusty Russelle756fc62007-07-30 20:07:08 +10003218 skip_emulated_instruction(&svm->vcpu);
Avi Kivity59200272010-01-25 19:47:02 +02003219 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08003220 return 1;
3221}
3222
Avi Kivity851ba692009-08-24 11:10:17 +03003223static int msr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003224{
Rusty Russelle756fc62007-07-30 20:07:08 +10003225 if (svm->vmcb->control.exit_info_1)
Avi Kivity851ba692009-08-24 11:10:17 +03003226 return wrmsr_interception(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003227 else
Avi Kivity851ba692009-08-24 11:10:17 +03003228 return rdmsr_interception(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003229}
3230
Avi Kivity851ba692009-08-24 11:10:17 +03003231static int interrupt_window_interception(struct vcpu_svm *svm)
Dor Laorc1150d82007-01-05 16:36:24 -08003232{
Avi Kivity851ba692009-08-24 11:10:17 +03003233 struct kvm_run *kvm_run = svm->vcpu.run;
3234
Avi Kivity3842d132010-07-27 12:30:24 +03003235 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Alexander Graff0b85052008-11-25 20:17:01 +01003236 svm_clear_vintr(svm);
Eddie Dong85f455f2007-07-06 12:20:49 +03003237 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
Joerg Roedeldecdbf62010-12-03 11:45:52 +01003238 mark_dirty(svm->vmcb, VMCB_INTR);
Jason Wang675acb72012-03-08 18:07:56 +08003239 ++svm->vcpu.stat.irq_window_exits;
Dor Laorc1150d82007-01-05 16:36:24 -08003240 /*
3241 * If the user space waits to inject interrupts, exit as soon as
3242 * possible
3243 */
Gleb Natapov80618232009-04-21 17:44:56 +03003244 if (!irqchip_in_kernel(svm->vcpu.kvm) &&
3245 kvm_run->request_interrupt_window &&
3246 !kvm_cpu_has_interrupt(&svm->vcpu)) {
Dor Laorc1150d82007-01-05 16:36:24 -08003247 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
3248 return 0;
3249 }
3250
3251 return 1;
3252}
3253
Mark Langsdorf565d0992009-10-06 14:25:02 -05003254static int pause_interception(struct vcpu_svm *svm)
3255{
3256 kvm_vcpu_on_spin(&(svm->vcpu));
3257 return 1;
3258}
3259
Mathias Krause09941fb2012-08-30 01:30:20 +02003260static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
Andre Przywara7ff76d52010-12-21 11:12:04 +01003261 [SVM_EXIT_READ_CR0] = cr_interception,
3262 [SVM_EXIT_READ_CR3] = cr_interception,
3263 [SVM_EXIT_READ_CR4] = cr_interception,
3264 [SVM_EXIT_READ_CR8] = cr_interception,
Avi Kivityd2251572010-01-06 10:55:27 +02003265 [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
Joerg Roedel628afd22011-04-04 12:39:36 +02003266 [SVM_EXIT_WRITE_CR0] = cr_interception,
Andre Przywara7ff76d52010-12-21 11:12:04 +01003267 [SVM_EXIT_WRITE_CR3] = cr_interception,
3268 [SVM_EXIT_WRITE_CR4] = cr_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01003269 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
Andre Przywaracae37972010-12-21 11:12:05 +01003270 [SVM_EXIT_READ_DR0] = dr_interception,
3271 [SVM_EXIT_READ_DR1] = dr_interception,
3272 [SVM_EXIT_READ_DR2] = dr_interception,
3273 [SVM_EXIT_READ_DR3] = dr_interception,
3274 [SVM_EXIT_READ_DR4] = dr_interception,
3275 [SVM_EXIT_READ_DR5] = dr_interception,
3276 [SVM_EXIT_READ_DR6] = dr_interception,
3277 [SVM_EXIT_READ_DR7] = dr_interception,
3278 [SVM_EXIT_WRITE_DR0] = dr_interception,
3279 [SVM_EXIT_WRITE_DR1] = dr_interception,
3280 [SVM_EXIT_WRITE_DR2] = dr_interception,
3281 [SVM_EXIT_WRITE_DR3] = dr_interception,
3282 [SVM_EXIT_WRITE_DR4] = dr_interception,
3283 [SVM_EXIT_WRITE_DR5] = dr_interception,
3284 [SVM_EXIT_WRITE_DR6] = dr_interception,
3285 [SVM_EXIT_WRITE_DR7] = dr_interception,
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003286 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
3287 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05003288 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01003289 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
3290 [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
3291 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
3292 [SVM_EXIT_INTR] = intr_interception,
Joerg Roedelc47f0982008-04-30 17:56:00 +02003293 [SVM_EXIT_NMI] = nmi_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003294 [SVM_EXIT_SMI] = nop_on_interception,
3295 [SVM_EXIT_INIT] = nop_on_interception,
Dor Laorc1150d82007-01-05 16:36:24 -08003296 [SVM_EXIT_VINTR] = interrupt_window_interception,
Avi Kivity332b56e2011-11-10 14:57:24 +02003297 [SVM_EXIT_RDPMC] = rdpmc_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003298 [SVM_EXIT_CPUID] = cpuid_interception,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003299 [SVM_EXIT_IRET] = iret_interception,
Avi Kivitycf5a94d2007-10-28 16:11:58 +02003300 [SVM_EXIT_INVD] = emulate_on_interception,
Mark Langsdorf565d0992009-10-06 14:25:02 -05003301 [SVM_EXIT_PAUSE] = pause_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003302 [SVM_EXIT_HLT] = halt_interception,
Marcelo Tosattia7052892008-09-23 13:18:35 -03003303 [SVM_EXIT_INVLPG] = invlpg_interception,
Alexander Grafff092382009-06-15 15:21:24 +02003304 [SVM_EXIT_INVLPGA] = invlpga_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01003305 [SVM_EXIT_IOIO] = io_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003306 [SVM_EXIT_MSR] = msr_interception,
3307 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08003308 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
Alexander Graf3d6368e2008-11-25 20:17:07 +01003309 [SVM_EXIT_VMRUN] = vmrun_interception,
Avi Kivity02e235b2007-02-19 14:37:47 +02003310 [SVM_EXIT_VMMCALL] = vmmcall_interception,
Alexander Graf55426752008-11-25 20:17:06 +01003311 [SVM_EXIT_VMLOAD] = vmload_interception,
3312 [SVM_EXIT_VMSAVE] = vmsave_interception,
Alexander Graf1371d902008-11-25 20:17:04 +01003313 [SVM_EXIT_STGI] = stgi_interception,
3314 [SVM_EXIT_CLGI] = clgi_interception,
Joerg Roedel532a46b2009-10-09 16:08:32 +02003315 [SVM_EXIT_SKINIT] = skinit_interception,
Avi Kivitycf5a94d2007-10-28 16:11:58 +02003316 [SVM_EXIT_WBINVD] = emulate_on_interception,
Joerg Roedel916ce232007-03-21 19:47:00 +01003317 [SVM_EXIT_MONITOR] = invalid_op_interception,
3318 [SVM_EXIT_MWAIT] = invalid_op_interception,
Joerg Roedel81dd35d2010-12-07 17:15:06 +01003319 [SVM_EXIT_XSETBV] = xsetbv_interception,
Joerg Roedel709ddeb2008-02-07 13:47:45 +01003320 [SVM_EXIT_NPF] = pf_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003321};
3322
Joe Perchesae8cc052011-04-24 22:00:50 -07003323static void dump_vmcb(struct kvm_vcpu *vcpu)
Joerg Roedel3f10c842010-05-05 16:04:42 +02003324{
3325 struct vcpu_svm *svm = to_svm(vcpu);
3326 struct vmcb_control_area *control = &svm->vmcb->control;
3327 struct vmcb_save_area *save = &svm->vmcb->save;
3328
3329 pr_err("VMCB Control Area:\n");
Joe Perchesae8cc052011-04-24 22:00:50 -07003330 pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff);
3331 pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16);
3332 pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff);
3333 pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
3334 pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
3335 pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
3336 pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
3337 pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
3338 pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
3339 pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
3340 pr_err("%-20s%d\n", "asid:", control->asid);
3341 pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
3342 pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
3343 pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
3344 pr_err("%-20s%08x\n", "int_state:", control->int_state);
3345 pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
3346 pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
3347 pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
3348 pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
3349 pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
3350 pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
3351 pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
3352 pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
3353 pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
3354 pr_err("%-20s%lld\n", "lbr_ctl:", control->lbr_ctl);
3355 pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
Joerg Roedel3f10c842010-05-05 16:04:42 +02003356 pr_err("VMCB State Save Area:\n");
Joe Perchesae8cc052011-04-24 22:00:50 -07003357 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3358 "es:",
3359 save->es.selector, save->es.attrib,
3360 save->es.limit, save->es.base);
3361 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3362 "cs:",
3363 save->cs.selector, save->cs.attrib,
3364 save->cs.limit, save->cs.base);
3365 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3366 "ss:",
3367 save->ss.selector, save->ss.attrib,
3368 save->ss.limit, save->ss.base);
3369 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3370 "ds:",
3371 save->ds.selector, save->ds.attrib,
3372 save->ds.limit, save->ds.base);
3373 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3374 "fs:",
3375 save->fs.selector, save->fs.attrib,
3376 save->fs.limit, save->fs.base);
3377 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3378 "gs:",
3379 save->gs.selector, save->gs.attrib,
3380 save->gs.limit, save->gs.base);
3381 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3382 "gdtr:",
3383 save->gdtr.selector, save->gdtr.attrib,
3384 save->gdtr.limit, save->gdtr.base);
3385 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3386 "ldtr:",
3387 save->ldtr.selector, save->ldtr.attrib,
3388 save->ldtr.limit, save->ldtr.base);
3389 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3390 "idtr:",
3391 save->idtr.selector, save->idtr.attrib,
3392 save->idtr.limit, save->idtr.base);
3393 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3394 "tr:",
3395 save->tr.selector, save->tr.attrib,
3396 save->tr.limit, save->tr.base);
Joerg Roedel3f10c842010-05-05 16:04:42 +02003397 pr_err("cpl: %d efer: %016llx\n",
3398 save->cpl, save->efer);
Joe Perchesae8cc052011-04-24 22:00:50 -07003399 pr_err("%-15s %016llx %-13s %016llx\n",
3400 "cr0:", save->cr0, "cr2:", save->cr2);
3401 pr_err("%-15s %016llx %-13s %016llx\n",
3402 "cr3:", save->cr3, "cr4:", save->cr4);
3403 pr_err("%-15s %016llx %-13s %016llx\n",
3404 "dr6:", save->dr6, "dr7:", save->dr7);
3405 pr_err("%-15s %016llx %-13s %016llx\n",
3406 "rip:", save->rip, "rflags:", save->rflags);
3407 pr_err("%-15s %016llx %-13s %016llx\n",
3408 "rsp:", save->rsp, "rax:", save->rax);
3409 pr_err("%-15s %016llx %-13s %016llx\n",
3410 "star:", save->star, "lstar:", save->lstar);
3411 pr_err("%-15s %016llx %-13s %016llx\n",
3412 "cstar:", save->cstar, "sfmask:", save->sfmask);
3413 pr_err("%-15s %016llx %-13s %016llx\n",
3414 "kernel_gs_base:", save->kernel_gs_base,
3415 "sysenter_cs:", save->sysenter_cs);
3416 pr_err("%-15s %016llx %-13s %016llx\n",
3417 "sysenter_esp:", save->sysenter_esp,
3418 "sysenter_eip:", save->sysenter_eip);
3419 pr_err("%-15s %016llx %-13s %016llx\n",
3420 "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
3421 pr_err("%-15s %016llx %-13s %016llx\n",
3422 "br_from:", save->br_from, "br_to:", save->br_to);
3423 pr_err("%-15s %016llx %-13s %016llx\n",
3424 "excp_from:", save->last_excp_from,
3425 "excp_to:", save->last_excp_to);
Joerg Roedel3f10c842010-05-05 16:04:42 +02003426}
3427
Avi Kivity586f9602010-11-18 13:09:54 +02003428static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
3429{
3430 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
3431
3432 *info1 = control->exit_info_1;
3433 *info2 = control->exit_info_2;
3434}
3435
Avi Kivity851ba692009-08-24 11:10:17 +03003436static int handle_exit(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003437{
Avi Kivity04d2cc72007-09-10 18:10:54 +03003438 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity851ba692009-08-24 11:10:17 +03003439 struct kvm_run *kvm_run = vcpu->run;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003440 u32 exit_code = svm->vmcb->control.exit_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003441
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003442 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
Joerg Roedel2be4fc72010-04-22 12:33:09 +02003443 vcpu->arch.cr0 = svm->vmcb->save.cr0;
3444 if (npt_enabled)
3445 vcpu->arch.cr3 = svm->vmcb->save.cr3;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02003446
Joerg Roedelcd3ff652009-10-09 16:08:26 +02003447 if (unlikely(svm->nested.exit_required)) {
3448 nested_svm_vmexit(svm);
3449 svm->nested.exit_required = false;
3450
3451 return 1;
3452 }
3453
Joerg Roedel20307532010-11-29 17:51:48 +01003454 if (is_guest_mode(vcpu)) {
Joerg Roedel410e4d52009-08-07 11:49:44 +02003455 int vmexit;
3456
Joerg Roedeld8cabdd2009-10-09 16:08:28 +02003457 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
3458 svm->vmcb->control.exit_info_1,
3459 svm->vmcb->control.exit_info_2,
3460 svm->vmcb->control.exit_int_info,
Stefan Hajnoczie097e5f2011-07-22 12:46:52 +01003461 svm->vmcb->control.exit_int_info_err,
3462 KVM_ISA_SVM);
Joerg Roedeld8cabdd2009-10-09 16:08:28 +02003463
Joerg Roedel410e4d52009-08-07 11:49:44 +02003464 vmexit = nested_svm_exit_special(svm);
3465
3466 if (vmexit == NESTED_EXIT_CONTINUE)
3467 vmexit = nested_svm_exit_handled(svm);
3468
3469 if (vmexit == NESTED_EXIT_DONE)
Alexander Grafcf74a782008-11-25 20:17:08 +01003470 return 1;
Alexander Grafcf74a782008-11-25 20:17:08 +01003471 }
3472
Joerg Roedela5c38322009-08-07 11:49:32 +02003473 svm_complete_interrupts(svm);
3474
Avi Kivity04d2cc72007-09-10 18:10:54 +03003475 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
3476 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3477 kvm_run->fail_entry.hardware_entry_failure_reason
3478 = svm->vmcb->control.exit_code;
Joerg Roedel3f10c842010-05-05 16:04:42 +02003479 pr_err("KVM: FAILED VMRUN WITH VMCB:\n");
3480 dump_vmcb(vcpu);
Avi Kivity04d2cc72007-09-10 18:10:54 +03003481 return 0;
3482 }
3483
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003484 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
Joerg Roedel709ddeb2008-02-07 13:47:45 +01003485 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
Joerg Roedel55c5e462010-09-10 17:31:04 +02003486 exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
3487 exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003488 printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
3489 "exit_code 0x%x\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08003490 __func__, svm->vmcb->control.exit_int_info,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003491 exit_code);
3492
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +02003493 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
Joe Perches56919c52007-11-12 20:06:51 -08003494 || !svm_exit_handlers[exit_code]) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08003495 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
Avi Kivity364b6252007-04-16 14:28:40 +03003496 kvm_run->hw.hardware_exit_reason = exit_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003497 return 0;
3498 }
3499
Avi Kivity851ba692009-08-24 11:10:17 +03003500 return svm_exit_handlers[exit_code](svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003501}
3502
3503static void reload_tss(struct kvm_vcpu *vcpu)
3504{
3505 int cpu = raw_smp_processor_id();
3506
Tejun Heo0fe1e002009-10-29 22:34:14 +09003507 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
3508 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
Avi Kivity6aa8b732006-12-10 02:21:36 -08003509 load_TR_desc();
3510}
3511
Rusty Russelle756fc62007-07-30 20:07:08 +10003512static void pre_svm_run(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003513{
3514 int cpu = raw_smp_processor_id();
3515
Tejun Heo0fe1e002009-10-29 22:34:14 +09003516 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003517
Marcelo Tosatti4b656b12009-07-21 12:47:45 -03003518 /* FIXME: handle wraparound of asid_generation */
Tejun Heo0fe1e002009-10-29 22:34:14 +09003519 if (svm->asid_generation != sd->asid_generation)
3520 new_asid(svm, sd);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003521}
3522
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003523static void svm_inject_nmi(struct kvm_vcpu *vcpu)
3524{
3525 struct vcpu_svm *svm = to_svm(vcpu);
3526
3527 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
3528 vcpu->arch.hflags |= HF_NMI_MASK;
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01003529 set_intercept(svm, INTERCEPT_IRET);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003530 ++vcpu->stat.nmi_injections;
3531}
Avi Kivity6aa8b732006-12-10 02:21:36 -08003532
Eddie Dong85f455f2007-07-06 12:20:49 +03003533static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003534{
3535 struct vmcb_control_area *control;
3536
Rusty Russelle756fc62007-07-30 20:07:08 +10003537 control = &svm->vmcb->control;
Eddie Dong85f455f2007-07-06 12:20:49 +03003538 control->int_vector = irq;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003539 control->int_ctl &= ~V_INTR_PRIO_MASK;
3540 control->int_ctl |= V_IRQ_MASK |
3541 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
Joerg Roedeldecdbf62010-12-03 11:45:52 +01003542 mark_dirty(svm->vmcb, VMCB_INTR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003543}
3544
Gleb Natapov66fd3f72009-05-11 13:35:50 +03003545static void svm_set_irq(struct kvm_vcpu *vcpu)
Eddie Dong2a8067f2007-08-06 16:29:07 +03003546{
3547 struct vcpu_svm *svm = to_svm(vcpu);
3548
Joerg Roedel2af91942009-08-07 11:49:28 +02003549 BUG_ON(!(gif_set(svm)));
Alexander Grafcf74a782008-11-25 20:17:08 +01003550
Gleb Natapov9fb2d2b2010-05-23 14:28:26 +03003551 trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
3552 ++vcpu->stat.irq_injections;
3553
Alexander Graf219b65d2009-06-15 15:21:25 +02003554 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
3555 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
Eddie Dong2a8067f2007-08-06 16:29:07 +03003556}
3557
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003558static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3559{
3560 struct vcpu_svm *svm = to_svm(vcpu);
3561
Joerg Roedel20307532010-11-29 17:51:48 +01003562 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01003563 return;
3564
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003565 if (irr == -1)
3566 return;
3567
3568 if (tpr >= irr)
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003569 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003570}
3571
Yang Zhang8d146952013-01-25 10:18:50 +08003572static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
3573{
3574 return;
3575}
3576
Yang Zhangc7c9c562013-01-25 10:18:51 +08003577static int svm_vm_has_apicv(struct kvm *kvm)
3578{
3579 return 0;
3580}
3581
3582static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
3583{
3584 return;
3585}
3586
3587static void svm_hwapic_isr_update(struct kvm *kvm, int isr)
3588{
3589 return;
3590}
3591
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003592static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
Joerg Roedelaaacfc92008-04-16 16:51:18 +02003593{
3594 struct vcpu_svm *svm = to_svm(vcpu);
3595 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel924584c2010-04-22 12:33:07 +02003596 int ret;
3597 ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
3598 !(svm->vcpu.arch.hflags & HF_NMI_MASK);
3599 ret = ret && gif_set(svm) && nested_svm_nmi(svm);
3600
3601 return ret;
Joerg Roedelaaacfc92008-04-16 16:51:18 +02003602}
3603
Jan Kiszka3cfc3092009-11-12 01:04:25 +01003604static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
3605{
3606 struct vcpu_svm *svm = to_svm(vcpu);
3607
3608 return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
3609}
3610
3611static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
3612{
3613 struct vcpu_svm *svm = to_svm(vcpu);
3614
3615 if (masked) {
3616 svm->vcpu.arch.hflags |= HF_NMI_MASK;
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01003617 set_intercept(svm, INTERCEPT_IRET);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01003618 } else {
3619 svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
Joerg Roedel8a05a1b2010-11-30 18:04:00 +01003620 clr_intercept(svm, INTERCEPT_IRET);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01003621 }
3622}
3623
Gleb Natapov78646122009-03-23 12:12:11 +02003624static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
3625{
3626 struct vcpu_svm *svm = to_svm(vcpu);
3627 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel7fcdb512009-09-16 15:24:15 +02003628 int ret;
3629
3630 if (!gif_set(svm) ||
3631 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
3632 return 0;
3633
Avi Kivityf6e78472010-08-02 15:30:20 +03003634 ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
Joerg Roedel7fcdb512009-09-16 15:24:15 +02003635
Joerg Roedel20307532010-11-29 17:51:48 +01003636 if (is_guest_mode(vcpu))
Joerg Roedel7fcdb512009-09-16 15:24:15 +02003637 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
3638
3639 return ret;
Gleb Natapov78646122009-03-23 12:12:11 +02003640}
3641
Gleb Natapov9222be12009-04-23 17:14:37 +03003642static void enable_irq_window(struct kvm_vcpu *vcpu)
3643{
Alexander Graf219b65d2009-06-15 15:21:25 +02003644 struct vcpu_svm *svm = to_svm(vcpu);
Alexander Graf219b65d2009-06-15 15:21:25 +02003645
Joerg Roedele0231712010-02-24 18:59:10 +01003646 /*
3647 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
3648 * 1, because that's a separate STGI/VMRUN intercept. The next time we
3649 * get that intercept, this function will be called again though and
3650 * we'll get the vintr intercept.
3651 */
Joerg Roedel8fe54652010-02-19 16:23:01 +01003652 if (gif_set(svm) && nested_svm_intr(svm)) {
Alexander Graf219b65d2009-06-15 15:21:25 +02003653 svm_set_vintr(svm);
3654 svm_inject_irq(svm, 0x0);
3655 }
Gleb Natapov9222be12009-04-23 17:14:37 +03003656}
3657
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003658static void enable_nmi_window(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003659{
Avi Kivity04d2cc72007-09-10 18:10:54 +03003660 struct vcpu_svm *svm = to_svm(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03003661
Gleb Natapov44c11432009-05-11 13:35:52 +03003662 if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
3663 == HF_NMI_MASK)
3664 return; /* IRET will cause a vm exit */
3665
Joerg Roedele0231712010-02-24 18:59:10 +01003666 /*
3667 * Something prevents NMI from been injected. Single step over possible
3668 * problem (IRET or exception injection or interrupt shadow)
3669 */
Jan Kiszka6be7d302009-10-18 13:24:54 +02003670 svm->nmi_singlestep = true;
Gleb Natapov44c11432009-05-11 13:35:52 +03003671 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
Jan Kiszkac8639012012-09-21 05:42:55 +02003672 update_db_bp_intercept(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03003673}
3674
Izik Eiduscbc94022007-10-25 00:29:55 +02003675static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
3676{
3677 return 0;
3678}
3679
Avi Kivityd9e368d2007-06-07 19:18:30 +03003680static void svm_flush_tlb(struct kvm_vcpu *vcpu)
3681{
Joerg Roedel38e5e922010-12-03 15:25:16 +01003682 struct vcpu_svm *svm = to_svm(vcpu);
3683
3684 if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
3685 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
3686 else
3687 svm->asid_generation--;
Avi Kivityd9e368d2007-06-07 19:18:30 +03003688}
3689
Avi Kivity04d2cc72007-09-10 18:10:54 +03003690static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
3691{
3692}
3693
Joerg Roedeld7bf8222008-04-16 16:51:17 +02003694static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
3695{
3696 struct vcpu_svm *svm = to_svm(vcpu);
3697
Joerg Roedel20307532010-11-29 17:51:48 +01003698 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01003699 return;
3700
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003701 if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
Joerg Roedeld7bf8222008-04-16 16:51:17 +02003702 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
Gleb Natapov615d5192009-04-21 17:45:05 +03003703 kvm_set_cr8(vcpu, cr8);
Joerg Roedeld7bf8222008-04-16 16:51:17 +02003704 }
3705}
3706
Joerg Roedel649d6862008-04-16 16:51:15 +02003707static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
3708{
3709 struct vcpu_svm *svm = to_svm(vcpu);
3710 u64 cr8;
3711
Joerg Roedel20307532010-11-29 17:51:48 +01003712 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01003713 return;
3714
Joerg Roedel649d6862008-04-16 16:51:15 +02003715 cr8 = kvm_get_cr8(vcpu);
3716 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
3717 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
3718}
3719
Gleb Natapov9222be12009-04-23 17:14:37 +03003720static void svm_complete_interrupts(struct vcpu_svm *svm)
3721{
3722 u8 vector;
3723 int type;
3724 u32 exitintinfo = svm->vmcb->control.exit_int_info;
Jan Kiszka66b71382010-02-23 17:47:56 +01003725 unsigned int3_injected = svm->int3_injected;
3726
3727 svm->int3_injected = 0;
Gleb Natapov9222be12009-04-23 17:14:37 +03003728
Avi Kivitybd3d1ec2011-02-03 15:29:52 +02003729 /*
3730 * If we've made progress since setting HF_IRET_MASK, we've
3731 * executed an IRET and can allow NMI injection.
3732 */
3733 if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
3734 && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
Gleb Natapov44c11432009-05-11 13:35:52 +03003735 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
Avi Kivity3842d132010-07-27 12:30:24 +03003736 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3737 }
Gleb Natapov44c11432009-05-11 13:35:52 +03003738
Gleb Natapov9222be12009-04-23 17:14:37 +03003739 svm->vcpu.arch.nmi_injected = false;
3740 kvm_clear_exception_queue(&svm->vcpu);
3741 kvm_clear_interrupt_queue(&svm->vcpu);
3742
3743 if (!(exitintinfo & SVM_EXITINTINFO_VALID))
3744 return;
3745
Avi Kivity3842d132010-07-27 12:30:24 +03003746 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3747
Gleb Natapov9222be12009-04-23 17:14:37 +03003748 vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
3749 type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
3750
3751 switch (type) {
3752 case SVM_EXITINTINFO_TYPE_NMI:
3753 svm->vcpu.arch.nmi_injected = true;
3754 break;
3755 case SVM_EXITINTINFO_TYPE_EXEPT:
Jan Kiszka66b71382010-02-23 17:47:56 +01003756 /*
3757 * In case of software exceptions, do not reinject the vector,
3758 * but re-execute the instruction instead. Rewind RIP first
3759 * if we emulated INT3 before.
3760 */
3761 if (kvm_exception_is_soft(vector)) {
3762 if (vector == BP_VECTOR && int3_injected &&
3763 kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
3764 kvm_rip_write(&svm->vcpu,
3765 kvm_rip_read(&svm->vcpu) -
3766 int3_injected);
Alexander Graf219b65d2009-06-15 15:21:25 +02003767 break;
Jan Kiszka66b71382010-02-23 17:47:56 +01003768 }
Gleb Natapov9222be12009-04-23 17:14:37 +03003769 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
3770 u32 err = svm->vmcb->control.exit_int_info_err;
Joerg Roedelce7ddec2010-04-22 12:33:13 +02003771 kvm_requeue_exception_e(&svm->vcpu, vector, err);
Gleb Natapov9222be12009-04-23 17:14:37 +03003772
3773 } else
Joerg Roedelce7ddec2010-04-22 12:33:13 +02003774 kvm_requeue_exception(&svm->vcpu, vector);
Gleb Natapov9222be12009-04-23 17:14:37 +03003775 break;
3776 case SVM_EXITINTINFO_TYPE_INTR:
Gleb Natapov66fd3f72009-05-11 13:35:50 +03003777 kvm_queue_interrupt(&svm->vcpu, vector, false);
Gleb Natapov9222be12009-04-23 17:14:37 +03003778 break;
3779 default:
3780 break;
3781 }
3782}
3783
Avi Kivityb463a6f2010-07-20 15:06:17 +03003784static void svm_cancel_injection(struct kvm_vcpu *vcpu)
3785{
3786 struct vcpu_svm *svm = to_svm(vcpu);
3787 struct vmcb_control_area *control = &svm->vmcb->control;
3788
3789 control->exit_int_info = control->event_inj;
3790 control->exit_int_info_err = control->event_inj_err;
3791 control->event_inj = 0;
3792 svm_complete_interrupts(svm);
3793}
3794
Avi Kivity851ba692009-08-24 11:10:17 +03003795static void svm_vcpu_run(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003796{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003797 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivityd9e368d2007-06-07 19:18:30 +03003798
Joerg Roedel2041a062010-04-22 12:33:08 +02003799 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
3800 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
3801 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
3802
Joerg Roedelcd3ff652009-10-09 16:08:26 +02003803 /*
3804 * A vmexit emulation is required before the vcpu can be executed
3805 * again.
3806 */
3807 if (unlikely(svm->nested.exit_required))
3808 return;
3809
Rusty Russelle756fc62007-07-30 20:07:08 +10003810 pre_svm_run(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003811
Joerg Roedel649d6862008-04-16 16:51:15 +02003812 sync_lapic_to_cr8(vcpu);
3813
Joerg Roedelcda0ffd2009-08-07 11:49:45 +02003814 svm->vmcb->save.cr2 = vcpu->arch.cr2;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003815
Avi Kivity04d2cc72007-09-10 18:10:54 +03003816 clgi();
3817
3818 local_irq_enable();
Avi Kivity36241b82006-12-22 01:05:20 -08003819
Avi Kivity6aa8b732006-12-10 02:21:36 -08003820 asm volatile (
Avi Kivity74547662012-09-16 15:10:59 +03003821 "push %%" _ASM_BP "; \n\t"
3822 "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
3823 "mov %c[rcx](%[svm]), %%" _ASM_CX " \n\t"
3824 "mov %c[rdx](%[svm]), %%" _ASM_DX " \n\t"
3825 "mov %c[rsi](%[svm]), %%" _ASM_SI " \n\t"
3826 "mov %c[rdi](%[svm]), %%" _ASM_DI " \n\t"
3827 "mov %c[rbp](%[svm]), %%" _ASM_BP " \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08003828#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10003829 "mov %c[r8](%[svm]), %%r8 \n\t"
3830 "mov %c[r9](%[svm]), %%r9 \n\t"
3831 "mov %c[r10](%[svm]), %%r10 \n\t"
3832 "mov %c[r11](%[svm]), %%r11 \n\t"
3833 "mov %c[r12](%[svm]), %%r12 \n\t"
3834 "mov %c[r13](%[svm]), %%r13 \n\t"
3835 "mov %c[r14](%[svm]), %%r14 \n\t"
3836 "mov %c[r15](%[svm]), %%r15 \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08003837#endif
3838
Avi Kivity6aa8b732006-12-10 02:21:36 -08003839 /* Enter guest mode */
Avi Kivity74547662012-09-16 15:10:59 +03003840 "push %%" _ASM_AX " \n\t"
3841 "mov %c[vmcb](%[svm]), %%" _ASM_AX " \n\t"
Avi Kivity4ecac3f2008-05-13 13:23:38 +03003842 __ex(SVM_VMLOAD) "\n\t"
3843 __ex(SVM_VMRUN) "\n\t"
3844 __ex(SVM_VMSAVE) "\n\t"
Avi Kivity74547662012-09-16 15:10:59 +03003845 "pop %%" _ASM_AX " \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08003846
3847 /* Save guest registers, load host registers */
Avi Kivity74547662012-09-16 15:10:59 +03003848 "mov %%" _ASM_BX ", %c[rbx](%[svm]) \n\t"
3849 "mov %%" _ASM_CX ", %c[rcx](%[svm]) \n\t"
3850 "mov %%" _ASM_DX ", %c[rdx](%[svm]) \n\t"
3851 "mov %%" _ASM_SI ", %c[rsi](%[svm]) \n\t"
3852 "mov %%" _ASM_DI ", %c[rdi](%[svm]) \n\t"
3853 "mov %%" _ASM_BP ", %c[rbp](%[svm]) \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08003854#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10003855 "mov %%r8, %c[r8](%[svm]) \n\t"
3856 "mov %%r9, %c[r9](%[svm]) \n\t"
3857 "mov %%r10, %c[r10](%[svm]) \n\t"
3858 "mov %%r11, %c[r11](%[svm]) \n\t"
3859 "mov %%r12, %c[r12](%[svm]) \n\t"
3860 "mov %%r13, %c[r13](%[svm]) \n\t"
3861 "mov %%r14, %c[r14](%[svm]) \n\t"
3862 "mov %%r15, %c[r15](%[svm]) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08003863#endif
Avi Kivity74547662012-09-16 15:10:59 +03003864 "pop %%" _ASM_BP
Avi Kivity6aa8b732006-12-10 02:21:36 -08003865 :
Rusty Russellfb3f0f52007-07-27 17:16:56 +10003866 : [svm]"a"(svm),
Avi Kivity6aa8b732006-12-10 02:21:36 -08003867 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003868 [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
3869 [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
3870 [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
3871 [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
3872 [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
3873 [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
Avi Kivity05b3e0c2006-12-13 00:33:45 -08003874#ifdef CONFIG_X86_64
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003875 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
3876 [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
3877 [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
3878 [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
3879 [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
3880 [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
3881 [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
3882 [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
Avi Kivity6aa8b732006-12-10 02:21:36 -08003883#endif
Laurent Vivier54a08c02007-10-25 14:18:53 +02003884 : "cc", "memory"
3885#ifdef CONFIG_X86_64
Avi Kivity74547662012-09-16 15:10:59 +03003886 , "rbx", "rcx", "rdx", "rsi", "rdi"
Laurent Vivier54a08c02007-10-25 14:18:53 +02003887 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
Avi Kivity74547662012-09-16 15:10:59 +03003888#else
3889 , "ebx", "ecx", "edx", "esi", "edi"
Laurent Vivier54a08c02007-10-25 14:18:53 +02003890#endif
3891 );
Avi Kivity6aa8b732006-12-10 02:21:36 -08003892
Avi Kivity82ca2d12010-10-21 12:20:34 +02003893#ifdef CONFIG_X86_64
3894 wrmsrl(MSR_GS_BASE, svm->host.gs_base);
3895#else
Avi Kivitydacccfd2010-10-21 12:20:33 +02003896 loadsegment(fs, svm->host.fs);
Avi Kivity831ca602011-03-08 16:09:51 +02003897#ifndef CONFIG_X86_32_LAZY_GS
3898 loadsegment(gs, svm->host.gs);
3899#endif
Avi Kivity9581d442010-10-19 16:46:55 +02003900#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -08003901
3902 reload_tss(vcpu);
3903
Avi Kivity56ba47d2007-11-07 17:14:18 +02003904 local_irq_disable();
3905
Avi Kivity13c34e02010-10-21 12:20:31 +02003906 vcpu->arch.cr2 = svm->vmcb->save.cr2;
3907 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
3908 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
3909 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
3910
Jan Kiszka1e2b1dd2011-09-12 10:52:24 +02003911 trace_kvm_exit(svm->vmcb->control.exit_code, vcpu, KVM_ISA_SVM);
3912
Joerg Roedel3781c012011-01-14 16:45:02 +01003913 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3914 kvm_before_handle_nmi(&svm->vcpu);
3915
3916 stgi();
3917
3918 /* Any pending NMI will happen here */
3919
3920 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3921 kvm_after_handle_nmi(&svm->vcpu);
3922
Joerg Roedeld7bf8222008-04-16 16:51:17 +02003923 sync_cr8_to_lapic(vcpu);
3924
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003925 svm->next_rip = 0;
Gleb Natapov9222be12009-04-23 17:14:37 +03003926
Joerg Roedel38e5e922010-12-03 15:25:16 +01003927 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
3928
Gleb Natapov631bc482010-10-14 11:22:52 +02003929 /* if exit due to PF check for async PF */
3930 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
3931 svm->apf_reason = kvm_read_and_reset_pf_reason();
3932
Avi Kivity6de4f3a2009-05-31 22:58:47 +03003933 if (npt_enabled) {
3934 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
3935 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
3936 }
Joerg Roedelfe5913e2010-05-17 14:43:34 +02003937
3938 /*
3939 * We need to handle MC intercepts here before the vcpu has a chance to
3940 * change the physical cpu
3941 */
3942 if (unlikely(svm->vmcb->control.exit_code ==
3943 SVM_EXIT_EXCP_BASE + MC_VECTOR))
3944 svm_handle_mce(svm);
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01003945
3946 mark_all_clean(svm->vmcb);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003947}
3948
Avi Kivity6aa8b732006-12-10 02:21:36 -08003949static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
3950{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003951 struct vcpu_svm *svm = to_svm(vcpu);
3952
3953 svm->vmcb->save.cr3 = root;
Joerg Roedeldcca1a62010-12-03 11:45:54 +01003954 mark_dirty(svm->vmcb, VMCB_CR);
Joerg Roedelf40f6a42010-12-03 15:25:15 +01003955 svm_flush_tlb(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003956}
3957
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02003958static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
3959{
3960 struct vcpu_svm *svm = to_svm(vcpu);
3961
3962 svm->vmcb->control.nested_cr3 = root;
Joerg Roedelb2747162010-12-03 11:45:53 +01003963 mark_dirty(svm->vmcb, VMCB_NPT);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02003964
3965 /* Also sync guest cr3 here in case we live migrate */
Avi Kivity9f8fe502010-12-05 17:30:00 +02003966 svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
Joerg Roedeldcca1a62010-12-03 11:45:54 +01003967 mark_dirty(svm->vmcb, VMCB_CR);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02003968
Joerg Roedelf40f6a42010-12-03 15:25:15 +01003969 svm_flush_tlb(vcpu);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02003970}
3971
Avi Kivity6aa8b732006-12-10 02:21:36 -08003972static int is_disabled(void)
3973{
Joerg Roedel6031a612007-06-22 12:29:50 +03003974 u64 vm_cr;
3975
3976 rdmsrl(MSR_VM_CR, vm_cr);
3977 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
3978 return 1;
3979
Avi Kivity6aa8b732006-12-10 02:21:36 -08003980 return 0;
3981}
3982
Ingo Molnar102d8322007-02-19 14:37:47 +02003983static void
3984svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
3985{
3986 /*
3987 * Patch in the VMMCALL instruction:
3988 */
3989 hypercall[0] = 0x0f;
3990 hypercall[1] = 0x01;
3991 hypercall[2] = 0xd9;
Ingo Molnar102d8322007-02-19 14:37:47 +02003992}
3993
Yang, Sheng002c7f72007-07-31 14:23:01 +03003994static void svm_check_processor_compat(void *rtn)
3995{
3996 *(int *)rtn = 0;
3997}
3998
Avi Kivity774ead32007-12-26 13:57:04 +02003999static bool svm_cpu_has_accelerated_tpr(void)
4000{
4001 return false;
4002}
4003
Sheng Yang4b12f0d2009-04-27 20:35:42 +08004004static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
Sheng Yang64d4d522008-10-09 16:01:57 +08004005{
4006 return 0;
4007}
4008
Sheng Yang0e851882009-12-18 16:48:46 +08004009static void svm_cpuid_update(struct kvm_vcpu *vcpu)
4010{
4011}
4012
Joerg Roedeld4330ef2010-04-22 12:33:11 +02004013static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
4014{
Joerg Roedelc2c63a42010-04-22 12:33:12 +02004015 switch (func) {
Joerg Roedel4c62a2d2010-09-10 17:31:06 +02004016 case 0x80000001:
4017 if (nested)
4018 entry->ecx |= (1 << 2); /* Set SVM bit */
4019 break;
Joerg Roedelc2c63a42010-04-22 12:33:12 +02004020 case 0x8000000A:
4021 entry->eax = 1; /* SVM revision 1 */
4022 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
4023 ASID emulation to nested SVM */
4024 entry->ecx = 0; /* Reserved */
Joerg Roedel7a190662010-07-27 18:14:21 +02004025 entry->edx = 0; /* Per default do not support any
4026 additional features */
4027
4028 /* Support next_rip if host supports it */
Avi Kivity2a6b20b2010-11-09 16:15:42 +02004029 if (boot_cpu_has(X86_FEATURE_NRIPS))
Joerg Roedel7a190662010-07-27 18:14:21 +02004030 entry->edx |= SVM_FEATURE_NRIP;
Joerg Roedelc2c63a42010-04-22 12:33:12 +02004031
Joerg Roedel3d4aeaa2010-09-10 17:31:05 +02004032 /* Support NPT for the guest if enabled */
4033 if (npt_enabled)
4034 entry->edx |= SVM_FEATURE_NPT;
4035
Joerg Roedelc2c63a42010-04-22 12:33:12 +02004036 break;
4037 }
Joerg Roedeld4330ef2010-04-22 12:33:11 +02004038}
4039
Sheng Yang17cc3932010-01-05 19:02:27 +08004040static int svm_get_lpage_level(void)
Joerg Roedel344f4142009-07-27 16:30:48 +02004041{
Sheng Yang17cc3932010-01-05 19:02:27 +08004042 return PT_PDPE_LEVEL;
Joerg Roedel344f4142009-07-27 16:30:48 +02004043}
4044
Sheng Yang4e47c7a2009-12-18 16:48:47 +08004045static bool svm_rdtscp_supported(void)
4046{
4047 return false;
4048}
4049
Mao, Junjiead756a12012-07-02 01:18:48 +00004050static bool svm_invpcid_supported(void)
4051{
4052 return false;
4053}
4054
Sheng Yangf5f48ee2010-06-30 12:25:15 +08004055static bool svm_has_wbinvd_exit(void)
4056{
4057 return true;
4058}
4059
Avi Kivity02daab22009-12-30 12:40:26 +02004060static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
4061{
4062 struct vcpu_svm *svm = to_svm(vcpu);
4063
Joerg Roedel18c918c2010-11-30 18:03:59 +01004064 set_exception_intercept(svm, NM_VECTOR);
Joerg Roedel66a562f2010-02-19 16:23:08 +01004065 update_cr0_intercept(svm);
Avi Kivity02daab22009-12-30 12:40:26 +02004066}
4067
Joerg Roedel80612522011-04-04 12:39:33 +02004068#define PRE_EX(exit) { .exit_code = (exit), \
Avi Kivity40e19b52011-04-21 12:35:41 +03004069 .stage = X86_ICPT_PRE_EXCEPT, }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004070#define POST_EX(exit) { .exit_code = (exit), \
Avi Kivity40e19b52011-04-21 12:35:41 +03004071 .stage = X86_ICPT_POST_EXCEPT, }
Joerg Roedeld7eb8202011-04-04 12:39:32 +02004072#define POST_MEM(exit) { .exit_code = (exit), \
Avi Kivity40e19b52011-04-21 12:35:41 +03004073 .stage = X86_ICPT_POST_MEMACCESS, }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004074
Mathias Krause09941fb2012-08-30 01:30:20 +02004075static const struct __x86_intercept {
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004076 u32 exit_code;
4077 enum x86_intercept_stage stage;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004078} x86_intercept_map[] = {
4079 [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0),
4080 [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0),
4081 [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0),
4082 [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0),
4083 [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0),
Joerg Roedel3b88e412011-04-04 12:39:29 +02004084 [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0),
4085 [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0),
Joerg Roedeldee6bb72011-04-04 12:39:30 +02004086 [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ),
4087 [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ),
4088 [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE),
4089 [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE),
4090 [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ),
4091 [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ),
4092 [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE),
4093 [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE),
Joerg Roedel01de8b02011-04-04 12:39:31 +02004094 [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN),
4095 [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL),
4096 [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD),
4097 [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE),
4098 [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI),
4099 [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI),
4100 [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT),
4101 [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02004102 [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP),
4103 [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR),
4104 [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT),
Joerg Roedel80612522011-04-04 12:39:33 +02004105 [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG),
4106 [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD),
4107 [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD),
4108 [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR),
4109 [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC),
4110 [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR),
4111 [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC),
4112 [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID),
4113 [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM),
Joerg Roedelbf608f82011-04-04 12:39:34 +02004114 [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE),
4115 [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF),
4116 [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF),
4117 [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT),
4118 [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET),
4119 [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP),
4120 [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT),
Joerg Roedelf6511932011-04-04 12:39:35 +02004121 [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO),
4122 [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO),
4123 [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO),
4124 [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO),
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004125};
4126
Joerg Roedel80612522011-04-04 12:39:33 +02004127#undef PRE_EX
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004128#undef POST_EX
Joerg Roedeld7eb8202011-04-04 12:39:32 +02004129#undef POST_MEM
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004130
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02004131static int svm_check_intercept(struct kvm_vcpu *vcpu,
4132 struct x86_instruction_info *info,
4133 enum x86_intercept_stage stage)
4134{
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004135 struct vcpu_svm *svm = to_svm(vcpu);
4136 int vmexit, ret = X86EMUL_CONTINUE;
4137 struct __x86_intercept icpt_info;
4138 struct vmcb *vmcb = svm->vmcb;
4139
4140 if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
4141 goto out;
4142
4143 icpt_info = x86_intercept_map[info->intercept];
4144
Avi Kivity40e19b52011-04-21 12:35:41 +03004145 if (stage != icpt_info.stage)
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004146 goto out;
4147
4148 switch (icpt_info.exit_code) {
4149 case SVM_EXIT_READ_CR0:
4150 if (info->intercept == x86_intercept_cr_read)
4151 icpt_info.exit_code += info->modrm_reg;
4152 break;
4153 case SVM_EXIT_WRITE_CR0: {
4154 unsigned long cr0, val;
4155 u64 intercept;
4156
4157 if (info->intercept == x86_intercept_cr_write)
4158 icpt_info.exit_code += info->modrm_reg;
4159
4160 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0)
4161 break;
4162
4163 intercept = svm->nested.intercept;
4164
4165 if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
4166 break;
4167
4168 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
4169 val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
4170
4171 if (info->intercept == x86_intercept_lmsw) {
4172 cr0 &= 0xfUL;
4173 val &= 0xfUL;
4174 /* lmsw can't clear PE - catch this here */
4175 if (cr0 & X86_CR0_PE)
4176 val |= X86_CR0_PE;
4177 }
4178
4179 if (cr0 ^ val)
4180 icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
4181
4182 break;
4183 }
Joerg Roedel3b88e412011-04-04 12:39:29 +02004184 case SVM_EXIT_READ_DR0:
4185 case SVM_EXIT_WRITE_DR0:
4186 icpt_info.exit_code += info->modrm_reg;
4187 break;
Joerg Roedel80612522011-04-04 12:39:33 +02004188 case SVM_EXIT_MSR:
4189 if (info->intercept == x86_intercept_wrmsr)
4190 vmcb->control.exit_info_1 = 1;
4191 else
4192 vmcb->control.exit_info_1 = 0;
4193 break;
Joerg Roedelbf608f82011-04-04 12:39:34 +02004194 case SVM_EXIT_PAUSE:
4195 /*
4196 * We get this for NOP only, but pause
4197 * is rep not, check this here
4198 */
4199 if (info->rep_prefix != REPE_PREFIX)
4200 goto out;
Joerg Roedelf6511932011-04-04 12:39:35 +02004201 case SVM_EXIT_IOIO: {
4202 u64 exit_info;
4203 u32 bytes;
4204
4205 exit_info = (vcpu->arch.regs[VCPU_REGS_RDX] & 0xffff) << 16;
4206
4207 if (info->intercept == x86_intercept_in ||
4208 info->intercept == x86_intercept_ins) {
4209 exit_info |= SVM_IOIO_TYPE_MASK;
4210 bytes = info->src_bytes;
4211 } else {
4212 bytes = info->dst_bytes;
4213 }
4214
4215 if (info->intercept == x86_intercept_outs ||
4216 info->intercept == x86_intercept_ins)
4217 exit_info |= SVM_IOIO_STR_MASK;
4218
4219 if (info->rep_prefix)
4220 exit_info |= SVM_IOIO_REP_MASK;
4221
4222 bytes = min(bytes, 4u);
4223
4224 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
4225
4226 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
4227
4228 vmcb->control.exit_info_1 = exit_info;
4229 vmcb->control.exit_info_2 = info->next_rip;
4230
4231 break;
4232 }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02004233 default:
4234 break;
4235 }
4236
4237 vmcb->control.next_rip = info->next_rip;
4238 vmcb->control.exit_code = icpt_info.exit_code;
4239 vmexit = nested_svm_exit_handled(svm);
4240
4241 ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
4242 : X86EMUL_CONTINUE;
4243
4244out:
4245 return ret;
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02004246}
4247
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03004248static struct kvm_x86_ops svm_x86_ops = {
Avi Kivity6aa8b732006-12-10 02:21:36 -08004249 .cpu_has_kvm_support = has_svm,
4250 .disabled_by_bios = is_disabled,
4251 .hardware_setup = svm_hardware_setup,
4252 .hardware_unsetup = svm_hardware_unsetup,
Yang, Sheng002c7f72007-07-31 14:23:01 +03004253 .check_processor_compatibility = svm_check_processor_compat,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004254 .hardware_enable = svm_hardware_enable,
4255 .hardware_disable = svm_hardware_disable,
Avi Kivity774ead32007-12-26 13:57:04 +02004256 .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004257
4258 .vcpu_create = svm_create_vcpu,
4259 .vcpu_free = svm_free_vcpu,
Avi Kivity04d2cc72007-09-10 18:10:54 +03004260 .vcpu_reset = svm_vcpu_reset,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004261
Avi Kivity04d2cc72007-09-10 18:10:54 +03004262 .prepare_guest_switch = svm_prepare_guest_switch,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004263 .vcpu_load = svm_vcpu_load,
4264 .vcpu_put = svm_vcpu_put,
4265
Jan Kiszkac8639012012-09-21 05:42:55 +02004266 .update_db_bp_intercept = update_db_bp_intercept,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004267 .get_msr = svm_get_msr,
4268 .set_msr = svm_set_msr,
4269 .get_segment_base = svm_get_segment_base,
4270 .get_segment = svm_get_segment,
4271 .set_segment = svm_set_segment,
Izik Eidus2e4d2652008-03-24 19:38:34 +02004272 .get_cpl = svm_get_cpl,
Rusty Russell1747fb72007-09-06 01:21:32 +10004273 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
Avi Kivitye8467fd2009-12-29 18:43:06 +02004274 .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
Avi Kivityaff48ba2010-12-05 18:56:11 +02004275 .decache_cr3 = svm_decache_cr3,
Anthony Liguori25c4c272007-04-27 09:29:21 +03004276 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004277 .set_cr0 = svm_set_cr0,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004278 .set_cr3 = svm_set_cr3,
4279 .set_cr4 = svm_set_cr4,
4280 .set_efer = svm_set_efer,
4281 .get_idt = svm_get_idt,
4282 .set_idt = svm_set_idt,
4283 .get_gdt = svm_get_gdt,
4284 .set_gdt = svm_set_gdt,
Gleb Natapov020df072010-04-13 10:05:23 +03004285 .set_dr7 = svm_set_dr7,
Avi Kivity6de4f3a2009-05-31 22:58:47 +03004286 .cache_reg = svm_cache_reg,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004287 .get_rflags = svm_get_rflags,
4288 .set_rflags = svm_set_rflags,
Avi Kivity6b52d182010-01-21 15:31:47 +02004289 .fpu_activate = svm_fpu_activate,
Avi Kivity02daab22009-12-30 12:40:26 +02004290 .fpu_deactivate = svm_fpu_deactivate,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004291
Avi Kivity6aa8b732006-12-10 02:21:36 -08004292 .tlb_flush = svm_flush_tlb,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004293
Avi Kivity6aa8b732006-12-10 02:21:36 -08004294 .run = svm_vcpu_run,
Avi Kivity04d2cc72007-09-10 18:10:54 +03004295 .handle_exit = handle_exit,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004296 .skip_emulated_instruction = skip_emulated_instruction,
Glauber Costa2809f5d2009-05-12 16:21:05 -04004297 .set_interrupt_shadow = svm_set_interrupt_shadow,
4298 .get_interrupt_shadow = svm_get_interrupt_shadow,
Ingo Molnar102d8322007-02-19 14:37:47 +02004299 .patch_hypercall = svm_patch_hypercall,
Eddie Dong2a8067f2007-08-06 16:29:07 +03004300 .set_irq = svm_set_irq,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004301 .set_nmi = svm_inject_nmi,
Avi Kivity298101d2007-11-25 13:41:11 +02004302 .queue_exception = svm_queue_exception,
Avi Kivityb463a6f2010-07-20 15:06:17 +03004303 .cancel_injection = svm_cancel_injection,
Gleb Natapov78646122009-03-23 12:12:11 +02004304 .interrupt_allowed = svm_interrupt_allowed,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004305 .nmi_allowed = svm_nmi_allowed,
Jan Kiszka3cfc3092009-11-12 01:04:25 +01004306 .get_nmi_mask = svm_get_nmi_mask,
4307 .set_nmi_mask = svm_set_nmi_mask,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004308 .enable_nmi_window = enable_nmi_window,
4309 .enable_irq_window = enable_irq_window,
4310 .update_cr8_intercept = update_cr8_intercept,
Yang Zhang8d146952013-01-25 10:18:50 +08004311 .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
Yang Zhangc7c9c562013-01-25 10:18:51 +08004312 .vm_has_apicv = svm_vm_has_apicv,
4313 .load_eoi_exitmap = svm_load_eoi_exitmap,
4314 .hwapic_isr_update = svm_hwapic_isr_update,
Izik Eiduscbc94022007-10-25 00:29:55 +02004315
4316 .set_tss_addr = svm_set_tss_addr,
Sheng Yang67253af2008-04-25 10:20:22 +08004317 .get_tdp_level = get_npt_level,
Sheng Yang4b12f0d2009-04-27 20:35:42 +08004318 .get_mt_mask = svm_get_mt_mask,
Marcelo Tosatti229456f2009-06-17 09:22:14 -03004319
Avi Kivity586f9602010-11-18 13:09:54 +02004320 .get_exit_info = svm_get_exit_info,
Avi Kivity586f9602010-11-18 13:09:54 +02004321
Sheng Yang17cc3932010-01-05 19:02:27 +08004322 .get_lpage_level = svm_get_lpage_level,
Sheng Yang0e851882009-12-18 16:48:46 +08004323
4324 .cpuid_update = svm_cpuid_update,
Sheng Yang4e47c7a2009-12-18 16:48:47 +08004325
4326 .rdtscp_supported = svm_rdtscp_supported,
Mao, Junjiead756a12012-07-02 01:18:48 +00004327 .invpcid_supported = svm_invpcid_supported,
Joerg Roedeld4330ef2010-04-22 12:33:11 +02004328
4329 .set_supported_cpuid = svm_set_supported_cpuid,
Sheng Yangf5f48ee2010-06-30 12:25:15 +08004330
4331 .has_wbinvd_exit = svm_has_wbinvd_exit,
Zachary Amsden99e3e302010-08-19 22:07:17 -10004332
Joerg Roedel4051b182011-03-25 09:44:49 +01004333 .set_tsc_khz = svm_set_tsc_khz,
Will Auldba904632012-11-29 12:42:50 -08004334 .read_tsc_offset = svm_read_tsc_offset,
Zachary Amsden99e3e302010-08-19 22:07:17 -10004335 .write_tsc_offset = svm_write_tsc_offset,
Zachary Amsdene48672f2010-08-19 22:07:23 -10004336 .adjust_tsc_offset = svm_adjust_tsc_offset,
Joerg Roedel857e4092011-03-25 09:44:50 +01004337 .compute_tsc_offset = svm_compute_tsc_offset,
Nadav Har'Eld5c17852011-08-02 15:54:20 +03004338 .read_l1_tsc = svm_read_l1_tsc,
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02004339
4340 .set_tdp_cr3 = set_tdp_cr3,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02004341
4342 .check_intercept = svm_check_intercept,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004343};
4344
4345static int __init svm_init(void)
4346{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08004347 return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
Avi Kivity0ee75be2010-04-28 15:39:01 +03004348 __alignof__(struct vcpu_svm), THIS_MODULE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004349}
4350
4351static void __exit svm_exit(void)
4352{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08004353 kvm_exit();
Avi Kivity6aa8b732006-12-10 02:21:36 -08004354}
4355
4356module_init(svm_init)
4357module_exit(svm_exit)