blob: c02b9af2056a758c573738f9b8831b5103544ab3 [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * AMD SVM support
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
Nicolas Kaiser9611c182010-10-06 14:23:22 +02007 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Avi Kivity6aa8b732006-12-10 02:21:36 -08008 *
9 * Authors:
10 * Yaniv Kamay <yaniv@qumranet.com>
11 * Avi Kivity <avi@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -050017
18#define pr_fmt(fmt) "SVM: " fmt
19
Avi Kivityedf88412007-12-16 11:02:48 +020020#include <linux/kvm_host.h>
21
Eddie Dong85f455f2007-07-06 12:20:49 +030022#include "irq.h"
Zhang Xiantao1d737c82007-12-14 09:35:10 +080023#include "mmu.h"
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030024#include "kvm_cache_regs.h"
Gleb Natapovfe4c7b12009-03-23 11:23:18 +020025#include "x86.h"
Julian Stecklina66f7b722012-12-05 15:26:19 +010026#include "cpuid.h"
Wei Huang25462f72015-06-19 15:45:05 +020027#include "pmu.h"
Avi Kivitye4956062007-06-28 14:15:57 -040028
Avi Kivity6aa8b732006-12-10 02:21:36 -080029#include <linux/module.h>
Josh Triplettae759542012-03-28 11:32:28 -070030#include <linux/mod_devicetable.h>
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +020031#include <linux/kernel.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080032#include <linux/vmalloc.h>
33#include <linux/highmem.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040034#include <linux/sched.h>
Steven Rostedt (Red Hat)af658dc2015-04-29 14:36:05 -040035#include <linux/trace_events.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -050037#include <linux/amd-iommu.h>
38#include <linux/hashtable.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080039
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -050040#include <asm/apic.h>
Joerg Roedel1018faa2012-02-29 14:57:32 +010041#include <asm/perf_event.h>
Joerg Roedel67ec6602010-05-17 14:43:35 +020042#include <asm/tlbflush.h>
Avi Kivitye4956062007-06-28 14:15:57 -040043#include <asm/desc.h>
Paolo Bonzinifacb0132014-02-21 10:32:27 +010044#include <asm/debugreg.h>
Gleb Natapov631bc482010-10-14 11:22:52 +020045#include <asm/kvm_para.h>
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -050046#include <asm/irq_remapping.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080047
Eduardo Habkost63d11422008-11-17 19:03:20 -020048#include <asm/virtext.h>
Marcelo Tosatti229456f2009-06-17 09:22:14 -030049#include "trace.h"
Eduardo Habkost63d11422008-11-17 19:03:20 -020050
Avi Kivity4ecac3f2008-05-13 13:23:38 +030051#define __ex(x) __kvm_handle_fault_on_reboot(x)
52
Avi Kivity6aa8b732006-12-10 02:21:36 -080053MODULE_AUTHOR("Qumranet");
54MODULE_LICENSE("GPL");
55
Josh Triplettae759542012-03-28 11:32:28 -070056static const struct x86_cpu_id svm_cpu_id[] = {
57 X86_FEATURE_MATCH(X86_FEATURE_SVM),
58 {}
59};
60MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
61
Avi Kivity6aa8b732006-12-10 02:21:36 -080062#define IOPM_ALLOC_ORDER 2
63#define MSRPM_ALLOC_ORDER 1
64
Avi Kivity6aa8b732006-12-10 02:21:36 -080065#define SEG_TYPE_LDT 2
66#define SEG_TYPE_BUSY_TSS16 3
67
Andre Przywara6bc31bd2010-04-11 23:07:28 +020068#define SVM_FEATURE_NPT (1 << 0)
69#define SVM_FEATURE_LBRV (1 << 1)
70#define SVM_FEATURE_SVML (1 << 2)
71#define SVM_FEATURE_NRIP (1 << 3)
Andre Przywaraddce97a2010-12-21 11:12:03 +010072#define SVM_FEATURE_TSC_RATE (1 << 4)
73#define SVM_FEATURE_VMCB_CLEAN (1 << 5)
74#define SVM_FEATURE_FLUSH_ASID (1 << 6)
75#define SVM_FEATURE_DECODE_ASSIST (1 << 7)
Andre Przywara6bc31bd2010-04-11 23:07:28 +020076#define SVM_FEATURE_PAUSE_FILTER (1 << 10)
Joerg Roedel80b77062007-03-30 17:02:14 +030077
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -050078#define SVM_AVIC_DOORBELL 0xc001011b
79
Joerg Roedel410e4d52009-08-07 11:49:44 +020080#define NESTED_EXIT_HOST 0 /* Exit handled on host level */
81#define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
82#define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
83
Joerg Roedel24e09cb2008-02-13 18:58:47 +010084#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
85
Joerg Roedelfbc0db72011-03-25 09:44:46 +010086#define TSC_RATIO_RSVD 0xffffff0000000000ULL
Joerg Roedel92a1f122011-03-25 09:44:51 +010087#define TSC_RATIO_MIN 0x0000000000000001ULL
88#define TSC_RATIO_MAX 0x000000ffffffffffULL
Joerg Roedelfbc0db72011-03-25 09:44:46 +010089
Dan Carpenter5446a972016-05-23 13:20:10 +030090#define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -050091
92/*
93 * 0xff is broadcast, so the max index allowed for physical APIC ID
94 * table is 0xfe. APIC IDs above 0xff are reserved.
95 */
96#define AVIC_MAX_PHYSICAL_ID_COUNT 255
97
Suravee Suthikulpanit18f40c52016-05-04 14:09:48 -050098#define AVIC_UNACCEL_ACCESS_WRITE_MASK 1
99#define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0
100#define AVIC_UNACCEL_ACCESS_VECTOR_MASK 0xFFFFFFFF
101
Suravee Suthikulpanit5ea11f22016-08-23 13:52:41 -0500102/* AVIC GATAG is encoded using VM and VCPU IDs */
103#define AVIC_VCPU_ID_BITS 8
104#define AVIC_VCPU_ID_MASK ((1 << AVIC_VCPU_ID_BITS) - 1)
105
106#define AVIC_VM_ID_BITS 24
107#define AVIC_VM_ID_NR (1 << AVIC_VM_ID_BITS)
108#define AVIC_VM_ID_MASK ((1 << AVIC_VM_ID_BITS) - 1)
109
110#define AVIC_GATAG(x, y) (((x & AVIC_VM_ID_MASK) << AVIC_VCPU_ID_BITS) | \
111 (y & AVIC_VCPU_ID_MASK))
112#define AVIC_GATAG_TO_VMID(x) ((x >> AVIC_VCPU_ID_BITS) & AVIC_VM_ID_MASK)
113#define AVIC_GATAG_TO_VCPUID(x) (x & AVIC_VCPU_ID_MASK)
114
Joerg Roedel67ec6602010-05-17 14:43:35 +0200115static bool erratum_383_found __read_mostly;
116
Avi Kivity6c8166a2009-05-31 18:15:37 +0300117static const u32 host_save_user_msrs[] = {
118#ifdef CONFIG_X86_64
119 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
120 MSR_FS_BASE,
121#endif
122 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
Paolo Bonzini46896c72015-11-12 14:49:16 +0100123 MSR_TSC_AUX,
Avi Kivity6c8166a2009-05-31 18:15:37 +0300124};
125
126#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
127
128struct kvm_vcpu;
129
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200130struct nested_state {
131 struct vmcb *hsave;
132 u64 hsave_msr;
Joerg Roedel4a810182010-02-24 18:59:15 +0100133 u64 vm_cr_msr;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200134 u64 vmcb;
135
136 /* These are the merged vectors */
137 u32 *msrpm;
138
139 /* gpa pointers to the real vectors */
140 u64 vmcb_msrpm;
Joerg Roedelce2ac082010-03-01 15:34:39 +0100141 u64 vmcb_iopm;
Joerg Roedelaad42c62009-08-07 11:49:34 +0200142
Joerg Roedelcd3ff652009-10-09 16:08:26 +0200143 /* A VMEXIT is required but not yet emulated */
144 bool exit_required;
145
Joerg Roedelaad42c62009-08-07 11:49:34 +0200146 /* cache for intercepts of the guest */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100147 u32 intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +0100148 u32 intercept_dr;
Joerg Roedelaad42c62009-08-07 11:49:34 +0200149 u32 intercept_exceptions;
150 u64 intercept;
151
Joerg Roedel5bd2edc2010-09-10 17:31:02 +0200152 /* Nested Paging related state */
153 u64 nested_cr3;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200154};
155
Joerg Roedel323c3d82010-03-01 15:34:37 +0100156#define MSRPM_OFFSETS 16
157static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
158
Boris Ostrovsky2b036c62012-01-09 14:00:35 -0500159/*
160 * Set osvw_len to higher value when updated Revision Guides
161 * are published and we know what the new status bits are
162 */
163static uint64_t osvw_len = 4, osvw_status;
164
Avi Kivity6c8166a2009-05-31 18:15:37 +0300165struct vcpu_svm {
166 struct kvm_vcpu vcpu;
167 struct vmcb *vmcb;
168 unsigned long vmcb_pa;
169 struct svm_cpu_data *svm_data;
170 uint64_t asid_generation;
171 uint64_t sysenter_esp;
172 uint64_t sysenter_eip;
Paolo Bonzini46896c72015-11-12 14:49:16 +0100173 uint64_t tsc_aux;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300174
175 u64 next_rip;
176
177 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
Avi Kivityafe9e662010-10-21 12:20:32 +0200178 struct {
Avi Kivitydacccfd2010-10-21 12:20:33 +0200179 u16 fs;
180 u16 gs;
181 u16 ldt;
Avi Kivityafe9e662010-10-21 12:20:32 +0200182 u64 gs_base;
183 } host;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300184
185 u32 *msrpm;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300186
Avi Kivitybd3d1ec2011-02-03 15:29:52 +0200187 ulong nmi_iret_rip;
188
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200189 struct nested_state nested;
Jan Kiszka6be7d302009-10-18 13:24:54 +0200190
191 bool nmi_singlestep;
Jan Kiszka66b71382010-02-23 17:47:56 +0100192
193 unsigned int3_injected;
194 unsigned long int3_rip;
Gleb Natapov631bc482010-10-14 11:22:52 +0200195 u32 apf_reason;
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100196
Joerg Roedel6092d3d2015-10-14 15:10:54 +0200197 /* cached guest cpuid flags for faster access */
198 bool nrips_enabled : 1;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500199
Suravee Suthikulpanit18f40c52016-05-04 14:09:48 -0500200 u32 ldr_reg;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500201 struct page *avic_backing_page;
202 u64 *avic_physical_id_cache;
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -0500203 bool avic_is_running;
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -0500204
205 /*
206 * Per-vcpu list of struct amd_svm_iommu_ir:
207 * This is used mainly to store interrupt remapping information used
208 * when update the vcpu affinity. This avoids the need to scan for
209 * IRTE and try to match ga_tag in the IOMMU driver.
210 */
211 struct list_head ir_list;
212 spinlock_t ir_list_lock;
213};
214
215/*
216 * This is a wrapper of struct amd_iommu_ir_data.
217 */
218struct amd_svm_iommu_ir {
219 struct list_head node; /* Used by SVM for per-vcpu ir_list */
220 void *data; /* Storing pointer to struct amd_ir_data */
Avi Kivity6c8166a2009-05-31 18:15:37 +0300221};
222
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500223#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
224#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
225
226#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
227#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
228#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
229#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
230
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100231static DEFINE_PER_CPU(u64, current_tsc_ratio);
232#define TSC_RATIO_DEFAULT 0x0100000000ULL
233
Joerg Roedel455716f2010-03-01 15:34:35 +0100234#define MSR_INVALID 0xffffffffU
235
Mathias Krause09941fb2012-08-30 01:30:20 +0200236static const struct svm_direct_access_msrs {
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100237 u32 index; /* Index of the MSR */
238 bool always; /* True if intercept is always on */
239} direct_access_msrs[] = {
Brian Gerst8c065852010-07-17 09:03:26 -0400240 { .index = MSR_STAR, .always = true },
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100241 { .index = MSR_IA32_SYSENTER_CS, .always = true },
242#ifdef CONFIG_X86_64
243 { .index = MSR_GS_BASE, .always = true },
244 { .index = MSR_FS_BASE, .always = true },
245 { .index = MSR_KERNEL_GS_BASE, .always = true },
246 { .index = MSR_LSTAR, .always = true },
247 { .index = MSR_CSTAR, .always = true },
248 { .index = MSR_SYSCALL_MASK, .always = true },
249#endif
250 { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
251 { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
252 { .index = MSR_IA32_LASTINTFROMIP, .always = false },
253 { .index = MSR_IA32_LASTINTTOIP, .always = false },
254 { .index = MSR_INVALID, .always = false },
Avi Kivity6aa8b732006-12-10 02:21:36 -0800255};
256
257/* enable NPT for AMD64 and X86 with PAE */
258#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
259static bool npt_enabled = true;
260#else
Joerg Roedele0231712010-02-24 18:59:10 +0100261static bool npt_enabled;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800262#endif
263
Davidlohr Buesoe2358852012-01-17 14:09:50 +0100264/* allow nested paging (virtualized MMU) for all guests */
265static int npt = true;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800266module_param(npt, int, S_IRUGO);
267
Davidlohr Buesoe2358852012-01-17 14:09:50 +0100268/* allow nested virtualization in KVM/SVM */
269static int nested = true;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800270module_param(nested, int, S_IRUGO);
271
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500272/* enable / disable AVIC */
273static int avic;
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -0500274#ifdef CONFIG_X86_LOCAL_APIC
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500275module_param(avic, int, S_IRUGO);
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -0500276#endif
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500277
Suravee Suthikulpanit5ea11f22016-08-23 13:52:41 -0500278/* AVIC VM ID bit masks and lock */
279static DECLARE_BITMAP(avic_vm_id_bitmap, AVIC_VM_ID_NR);
280static DEFINE_SPINLOCK(avic_vm_id_lock);
281
Paolo Bonzini79a80592015-09-21 07:46:55 +0200282static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800283static void svm_flush_tlb(struct kvm_vcpu *vcpu);
Joerg Roedela5c38322009-08-07 11:49:32 +0200284static void svm_complete_interrupts(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800285
Joerg Roedel410e4d52009-08-07 11:49:44 +0200286static int nested_svm_exit_handled(struct vcpu_svm *svm);
Joerg Roedelb8e88bc2010-02-19 16:23:02 +0100287static int nested_svm_intercept(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800288static int nested_svm_vmexit(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800289static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
290 bool has_error_code, u32 error_code);
291
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100292enum {
Joerg Roedel116a0a22010-12-03 11:45:49 +0100293 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
294 pause filter count */
Joerg Roedelf56838e2010-12-03 11:45:50 +0100295 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
Joerg Roedeld48086d2010-12-03 11:45:51 +0100296 VMCB_ASID, /* ASID */
Joerg Roedeldecdbf62010-12-03 11:45:52 +0100297 VMCB_INTR, /* int_ctl, int_vector */
Joerg Roedelb2747162010-12-03 11:45:53 +0100298 VMCB_NPT, /* npt_en, nCR3, gPAT */
Joerg Roedeldcca1a62010-12-03 11:45:54 +0100299 VMCB_CR, /* CR0, CR3, CR4, EFER */
Joerg Roedel72214b92010-12-03 11:45:55 +0100300 VMCB_DR, /* DR6, DR7 */
Joerg Roedel17a703c2010-12-03 11:45:56 +0100301 VMCB_DT, /* GDT, IDT */
Joerg Roedel060d0c92010-12-03 11:45:57 +0100302 VMCB_SEG, /* CS, DS, SS, ES, CPL */
Joerg Roedel0574dec2010-12-03 11:45:58 +0100303 VMCB_CR2, /* CR2 only */
Joerg Roedelb53ba3f2010-12-03 11:45:59 +0100304 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500305 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
306 * AVIC PHYSICAL_TABLE pointer,
307 * AVIC LOGICAL_TABLE pointer
308 */
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100309 VMCB_DIRTY_MAX,
310};
311
Joerg Roedel0574dec2010-12-03 11:45:58 +0100312/* TPR and CR2 are always written before VMRUN */
313#define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100314
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500315#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
316
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100317static inline void mark_all_dirty(struct vmcb *vmcb)
318{
319 vmcb->control.clean = 0;
320}
321
322static inline void mark_all_clean(struct vmcb *vmcb)
323{
324 vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
325 & ~VMCB_ALWAYS_DIRTY_MASK;
326}
327
328static inline void mark_dirty(struct vmcb *vmcb, int bit)
329{
330 vmcb->control.clean &= ~(1 << bit);
331}
332
Avi Kivity6aa8b732006-12-10 02:21:36 -0800333static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
334{
335 return container_of(vcpu, struct vcpu_svm, vcpu);
336}
337
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500338static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
339{
340 svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK;
341 mark_dirty(svm->vmcb, VMCB_AVIC);
342}
343
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -0500344static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
345{
346 struct vcpu_svm *svm = to_svm(vcpu);
347 u64 *entry = svm->avic_physical_id_cache;
348
349 if (!entry)
350 return false;
351
352 return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
353}
354
Joerg Roedel384c6362010-11-30 18:03:56 +0100355static void recalc_intercepts(struct vcpu_svm *svm)
356{
357 struct vmcb_control_area *c, *h;
358 struct nested_state *g;
359
Joerg Roedel116a0a22010-12-03 11:45:49 +0100360 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
361
Joerg Roedel384c6362010-11-30 18:03:56 +0100362 if (!is_guest_mode(&svm->vcpu))
363 return;
364
365 c = &svm->vmcb->control;
366 h = &svm->nested.hsave->control;
367 g = &svm->nested;
368
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100369 c->intercept_cr = h->intercept_cr | g->intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +0100370 c->intercept_dr = h->intercept_dr | g->intercept_dr;
Joerg Roedel384c6362010-11-30 18:03:56 +0100371 c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
372 c->intercept = h->intercept | g->intercept;
373}
374
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100375static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
376{
377 if (is_guest_mode(&svm->vcpu))
378 return svm->nested.hsave;
379 else
380 return svm->vmcb;
381}
382
383static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
384{
385 struct vmcb *vmcb = get_host_vmcb(svm);
386
387 vmcb->control.intercept_cr |= (1U << bit);
388
389 recalc_intercepts(svm);
390}
391
392static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
393{
394 struct vmcb *vmcb = get_host_vmcb(svm);
395
396 vmcb->control.intercept_cr &= ~(1U << bit);
397
398 recalc_intercepts(svm);
399}
400
401static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
402{
403 struct vmcb *vmcb = get_host_vmcb(svm);
404
405 return vmcb->control.intercept_cr & (1U << bit);
406}
407
Paolo Bonzini5315c712014-03-03 13:08:29 +0100408static inline void set_dr_intercepts(struct vcpu_svm *svm)
Joerg Roedel3aed0412010-11-30 18:03:58 +0100409{
410 struct vmcb *vmcb = get_host_vmcb(svm);
411
Paolo Bonzini5315c712014-03-03 13:08:29 +0100412 vmcb->control.intercept_dr = (1 << INTERCEPT_DR0_READ)
413 | (1 << INTERCEPT_DR1_READ)
414 | (1 << INTERCEPT_DR2_READ)
415 | (1 << INTERCEPT_DR3_READ)
416 | (1 << INTERCEPT_DR4_READ)
417 | (1 << INTERCEPT_DR5_READ)
418 | (1 << INTERCEPT_DR6_READ)
419 | (1 << INTERCEPT_DR7_READ)
420 | (1 << INTERCEPT_DR0_WRITE)
421 | (1 << INTERCEPT_DR1_WRITE)
422 | (1 << INTERCEPT_DR2_WRITE)
423 | (1 << INTERCEPT_DR3_WRITE)
424 | (1 << INTERCEPT_DR4_WRITE)
425 | (1 << INTERCEPT_DR5_WRITE)
426 | (1 << INTERCEPT_DR6_WRITE)
427 | (1 << INTERCEPT_DR7_WRITE);
Joerg Roedel3aed0412010-11-30 18:03:58 +0100428
429 recalc_intercepts(svm);
430}
431
Paolo Bonzini5315c712014-03-03 13:08:29 +0100432static inline void clr_dr_intercepts(struct vcpu_svm *svm)
Joerg Roedel3aed0412010-11-30 18:03:58 +0100433{
434 struct vmcb *vmcb = get_host_vmcb(svm);
435
Paolo Bonzini5315c712014-03-03 13:08:29 +0100436 vmcb->control.intercept_dr = 0;
Joerg Roedel3aed0412010-11-30 18:03:58 +0100437
438 recalc_intercepts(svm);
439}
440
Joerg Roedel18c918c2010-11-30 18:03:59 +0100441static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
442{
443 struct vmcb *vmcb = get_host_vmcb(svm);
444
445 vmcb->control.intercept_exceptions |= (1U << bit);
446
447 recalc_intercepts(svm);
448}
449
450static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
451{
452 struct vmcb *vmcb = get_host_vmcb(svm);
453
454 vmcb->control.intercept_exceptions &= ~(1U << bit);
455
456 recalc_intercepts(svm);
457}
458
Joerg Roedel8a05a1b82010-11-30 18:04:00 +0100459static inline void set_intercept(struct vcpu_svm *svm, int bit)
460{
461 struct vmcb *vmcb = get_host_vmcb(svm);
462
463 vmcb->control.intercept |= (1ULL << bit);
464
465 recalc_intercepts(svm);
466}
467
468static inline void clr_intercept(struct vcpu_svm *svm, int bit)
469{
470 struct vmcb *vmcb = get_host_vmcb(svm);
471
472 vmcb->control.intercept &= ~(1ULL << bit);
473
474 recalc_intercepts(svm);
475}
476
Joerg Roedel2af91942009-08-07 11:49:28 +0200477static inline void enable_gif(struct vcpu_svm *svm)
478{
479 svm->vcpu.arch.hflags |= HF_GIF_MASK;
480}
481
482static inline void disable_gif(struct vcpu_svm *svm)
483{
484 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
485}
486
487static inline bool gif_set(struct vcpu_svm *svm)
488{
489 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
490}
491
Avi Kivity6aa8b732006-12-10 02:21:36 -0800492static unsigned long iopm_base;
493
494struct kvm_ldttss_desc {
495 u16 limit0;
496 u16 base0;
Joerg Roedele0231712010-02-24 18:59:10 +0100497 unsigned base1:8, type:5, dpl:2, p:1;
498 unsigned limit1:4, zero0:3, g:1, base2:8;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800499 u32 base3;
500 u32 zero1;
501} __attribute__((packed));
502
503struct svm_cpu_data {
504 int cpu;
505
Avi Kivity5008fdf2007-04-02 13:05:50 +0300506 u64 asid_generation;
507 u32 max_asid;
508 u32 next_asid;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800509 struct kvm_ldttss_desc *tss_desc;
510
511 struct page *save_area;
512};
513
514static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
515
516struct svm_init_data {
517 int cpu;
518 int r;
519};
520
Mathias Krause09941fb2012-08-30 01:30:20 +0200521static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
Avi Kivity6aa8b732006-12-10 02:21:36 -0800522
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +0200523#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800524#define MSRS_RANGE_SIZE 2048
525#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
526
Joerg Roedel455716f2010-03-01 15:34:35 +0100527static u32 svm_msrpm_offset(u32 msr)
528{
529 u32 offset;
530 int i;
531
532 for (i = 0; i < NUM_MSR_MAPS; i++) {
533 if (msr < msrpm_ranges[i] ||
534 msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
535 continue;
536
537 offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
538 offset += (i * MSRS_RANGE_SIZE); /* add range offset */
539
540 /* Now we have the u8 offset - but need the u32 offset */
541 return offset / 4;
542 }
543
544 /* MSR not in any range */
545 return MSR_INVALID;
546}
547
Avi Kivity6aa8b732006-12-10 02:21:36 -0800548#define MAX_INST_SIZE 15
549
Avi Kivity6aa8b732006-12-10 02:21:36 -0800550static inline void clgi(void)
551{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300552 asm volatile (__ex(SVM_CLGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800553}
554
555static inline void stgi(void)
556{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300557 asm volatile (__ex(SVM_STGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800558}
559
560static inline void invlpga(unsigned long addr, u32 asid)
561{
Joerg Roedele0231712010-02-24 18:59:10 +0100562 asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800563}
564
Joerg Roedel4b161842010-09-10 17:31:03 +0200565static int get_npt_level(void)
566{
567#ifdef CONFIG_X86_64
568 return PT64_ROOT_LEVEL;
569#else
570 return PT32E_ROOT_LEVEL;
571#endif
572}
573
Avi Kivity6aa8b732006-12-10 02:21:36 -0800574static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
575{
Zachary Amsden6dc696d2010-05-26 15:09:43 -1000576 vcpu->arch.efer = efer;
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100577 if (!npt_enabled && !(efer & EFER_LMA))
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -0600578 efer &= ~EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800579
Alexander Graf9962d032008-11-25 20:17:02 +0100580 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
Joerg Roedeldcca1a62010-12-03 11:45:54 +0100581 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800582}
583
Avi Kivity6aa8b732006-12-10 02:21:36 -0800584static int is_external_interrupt(u32 info)
585{
586 info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
587 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
588}
589
Paolo Bonzini37ccdcb2014-05-20 14:29:47 +0200590static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
Glauber Costa2809f5d2009-05-12 16:21:05 -0400591{
592 struct vcpu_svm *svm = to_svm(vcpu);
593 u32 ret = 0;
594
595 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
Paolo Bonzini37ccdcb2014-05-20 14:29:47 +0200596 ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
597 return ret;
Glauber Costa2809f5d2009-05-12 16:21:05 -0400598}
599
600static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
601{
602 struct vcpu_svm *svm = to_svm(vcpu);
603
604 if (mask == 0)
605 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
606 else
607 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
608
609}
610
Avi Kivity6aa8b732006-12-10 02:21:36 -0800611static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
612{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400613 struct vcpu_svm *svm = to_svm(vcpu);
614
Bandan Dasf1047652015-06-11 02:05:33 -0400615 if (svm->vmcb->control.next_rip != 0) {
Dirk Müllerd2922422015-10-01 13:43:42 +0200616 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
Andre Przywara6bc31bd2010-04-11 23:07:28 +0200617 svm->next_rip = svm->vmcb->control.next_rip;
Bandan Dasf1047652015-06-11 02:05:33 -0400618 }
Andre Przywara6bc31bd2010-04-11 23:07:28 +0200619
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400620 if (!svm->next_rip) {
Andre Przywara51d8b662010-12-21 11:12:02 +0100621 if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
Gleb Natapovf629cf82009-05-11 13:35:49 +0300622 EMULATE_DONE)
623 printk(KERN_DEBUG "%s: NOP\n", __func__);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800624 return;
625 }
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300626 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
627 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
628 __func__, kvm_rip_read(vcpu), svm->next_rip);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800629
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300630 kvm_rip_write(vcpu, svm->next_rip);
Glauber Costa2809f5d2009-05-12 16:21:05 -0400631 svm_set_interrupt_shadow(vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800632}
633
Jan Kiszka116a4752010-02-23 17:47:54 +0100634static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200635 bool has_error_code, u32 error_code,
636 bool reinject)
Jan Kiszka116a4752010-02-23 17:47:54 +0100637{
638 struct vcpu_svm *svm = to_svm(vcpu);
639
Joerg Roedele0231712010-02-24 18:59:10 +0100640 /*
641 * If we are within a nested VM we'd better #VMEXIT and let the guest
642 * handle the exception
643 */
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200644 if (!reinject &&
645 nested_svm_check_exception(svm, nr, has_error_code, error_code))
Jan Kiszka116a4752010-02-23 17:47:54 +0100646 return;
647
Avi Kivity2a6b20b2010-11-09 16:15:42 +0200648 if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) {
Jan Kiszka66b71382010-02-23 17:47:56 +0100649 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
650
651 /*
652 * For guest debugging where we have to reinject #BP if some
653 * INT3 is guest-owned:
654 * Emulate nRIP by moving RIP forward. Will fail if injection
655 * raises a fault that is not intercepted. Still better than
656 * failing in all cases.
657 */
658 skip_emulated_instruction(&svm->vcpu);
659 rip = kvm_rip_read(&svm->vcpu);
660 svm->int3_rip = rip + svm->vmcb->save.cs.base;
661 svm->int3_injected = rip - old_rip;
662 }
663
Jan Kiszka116a4752010-02-23 17:47:54 +0100664 svm->vmcb->control.event_inj = nr
665 | SVM_EVTINJ_VALID
666 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
667 | SVM_EVTINJ_TYPE_EXEPT;
668 svm->vmcb->control.event_inj_err = error_code;
669}
670
Joerg Roedel67ec6602010-05-17 14:43:35 +0200671static void svm_init_erratum_383(void)
672{
673 u32 low, high;
674 int err;
675 u64 val;
676
Borislav Petkove6ee94d2013-03-20 15:07:27 +0100677 if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
Joerg Roedel67ec6602010-05-17 14:43:35 +0200678 return;
679
680 /* Use _safe variants to not break nested virtualization */
681 val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
682 if (err)
683 return;
684
685 val |= (1ULL << 47);
686
687 low = lower_32_bits(val);
688 high = upper_32_bits(val);
689
690 native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
691
692 erratum_383_found = true;
693}
694
Boris Ostrovsky2b036c62012-01-09 14:00:35 -0500695static void svm_init_osvw(struct kvm_vcpu *vcpu)
696{
697 /*
698 * Guests should see errata 400 and 415 as fixed (assuming that
699 * HLT and IO instructions are intercepted).
700 */
701 vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
702 vcpu->arch.osvw.status = osvw_status & ~(6ULL);
703
704 /*
705 * By increasing VCPU's osvw.length to 3 we are telling the guest that
706 * all osvw.status bits inside that length, including bit 0 (which is
707 * reserved for erratum 298), are valid. However, if host processor's
708 * osvw_len is 0 then osvw_status[0] carries no information. We need to
709 * be conservative here and therefore we tell the guest that erratum 298
710 * is present (because we really don't know).
711 */
712 if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
713 vcpu->arch.osvw.status |= 1;
714}
715
Avi Kivity6aa8b732006-12-10 02:21:36 -0800716static int has_svm(void)
717{
Eduardo Habkost63d11422008-11-17 19:03:20 -0200718 const char *msg;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800719
Eduardo Habkost63d11422008-11-17 19:03:20 -0200720 if (!cpu_has_svm(&msg)) {
Joe Perchesff81ff12009-01-08 11:05:17 -0800721 printk(KERN_INFO "has_svm: %s\n", msg);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800722 return 0;
723 }
724
Avi Kivity6aa8b732006-12-10 02:21:36 -0800725 return 1;
726}
727
Radim Krčmář13a34e02014-08-28 15:13:03 +0200728static void svm_hardware_disable(void)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800729{
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100730 /* Make sure we clean up behind us */
731 if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
732 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
733
Eduardo Habkost2c8dcee2008-11-17 19:03:21 -0200734 cpu_svm_disable();
Joerg Roedel1018faa2012-02-29 14:57:32 +0100735
736 amd_pmu_disable_virt();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800737}
738
Radim Krčmář13a34e02014-08-28 15:13:03 +0200739static int svm_hardware_enable(void)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800740{
741
Tejun Heo0fe1e002009-10-29 22:34:14 +0900742 struct svm_cpu_data *sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800743 uint64_t efer;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800744 struct desc_struct *gdt;
745 int me = raw_smp_processor_id();
746
Alexander Graf10474ae2009-09-15 11:37:46 +0200747 rdmsrl(MSR_EFER, efer);
748 if (efer & EFER_SVME)
749 return -EBUSY;
750
Avi Kivity6aa8b732006-12-10 02:21:36 -0800751 if (!has_svm()) {
Borislav Petkov1f5b77f2012-10-20 20:20:04 +0200752 pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
Alexander Graf10474ae2009-09-15 11:37:46 +0200753 return -EINVAL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800754 }
Tejun Heo0fe1e002009-10-29 22:34:14 +0900755 sd = per_cpu(svm_data, me);
Tejun Heo0fe1e002009-10-29 22:34:14 +0900756 if (!sd) {
Borislav Petkov1f5b77f2012-10-20 20:20:04 +0200757 pr_err("%s: svm_data is NULL on %d\n", __func__, me);
Alexander Graf10474ae2009-09-15 11:37:46 +0200758 return -EINVAL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800759 }
760
Tejun Heo0fe1e002009-10-29 22:34:14 +0900761 sd->asid_generation = 1;
762 sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
763 sd->next_asid = sd->max_asid + 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800764
Thomas Garnier45fc8752017-03-14 10:05:08 -0700765 gdt = get_current_gdt_rw();
Tejun Heo0fe1e002009-10-29 22:34:14 +0900766 sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800767
Alexander Graf9962d032008-11-25 20:17:02 +0100768 wrmsrl(MSR_EFER, efer | EFER_SVME);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800769
Linus Torvaldsd0316552009-12-14 09:58:24 -0800770 wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
Alexander Graf10474ae2009-09-15 11:37:46 +0200771
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100772 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
773 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
Christoph Lameter89cbc762014-08-17 12:30:40 -0500774 __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100775 }
776
Boris Ostrovsky2b036c62012-01-09 14:00:35 -0500777
778 /*
779 * Get OSVW bits.
780 *
781 * Note that it is possible to have a system with mixed processor
782 * revisions and therefore different OSVW bits. If bits are not the same
783 * on different processors then choose the worst case (i.e. if erratum
784 * is present on one processor and not on another then assume that the
785 * erratum is present everywhere).
786 */
787 if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
788 uint64_t len, status = 0;
789 int err;
790
791 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
792 if (!err)
793 status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
794 &err);
795
796 if (err)
797 osvw_status = osvw_len = 0;
798 else {
799 if (len < osvw_len)
800 osvw_len = len;
801 osvw_status |= status;
802 osvw_status &= (1ULL << osvw_len) - 1;
803 }
804 } else
805 osvw_status = osvw_len = 0;
806
Joerg Roedel67ec6602010-05-17 14:43:35 +0200807 svm_init_erratum_383();
808
Joerg Roedel1018faa2012-02-29 14:57:32 +0100809 amd_pmu_enable_virt();
810
Alexander Graf10474ae2009-09-15 11:37:46 +0200811 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800812}
813
Joerg Roedel0da1db752008-07-02 16:02:11 +0200814static void svm_cpu_uninit(int cpu)
815{
Tejun Heo0fe1e002009-10-29 22:34:14 +0900816 struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
Joerg Roedel0da1db752008-07-02 16:02:11 +0200817
Tejun Heo0fe1e002009-10-29 22:34:14 +0900818 if (!sd)
Joerg Roedel0da1db752008-07-02 16:02:11 +0200819 return;
820
821 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
Tejun Heo0fe1e002009-10-29 22:34:14 +0900822 __free_page(sd->save_area);
823 kfree(sd);
Joerg Roedel0da1db752008-07-02 16:02:11 +0200824}
825
Avi Kivity6aa8b732006-12-10 02:21:36 -0800826static int svm_cpu_init(int cpu)
827{
Tejun Heo0fe1e002009-10-29 22:34:14 +0900828 struct svm_cpu_data *sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800829 int r;
830
Tejun Heo0fe1e002009-10-29 22:34:14 +0900831 sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
832 if (!sd)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800833 return -ENOMEM;
Tejun Heo0fe1e002009-10-29 22:34:14 +0900834 sd->cpu = cpu;
835 sd->save_area = alloc_page(GFP_KERNEL);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800836 r = -ENOMEM;
Tejun Heo0fe1e002009-10-29 22:34:14 +0900837 if (!sd->save_area)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800838 goto err_1;
839
Tejun Heo0fe1e002009-10-29 22:34:14 +0900840 per_cpu(svm_data, cpu) = sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800841
842 return 0;
843
844err_1:
Tejun Heo0fe1e002009-10-29 22:34:14 +0900845 kfree(sd);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800846 return r;
847
848}
849
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100850static bool valid_msr_intercept(u32 index)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800851{
852 int i;
853
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100854 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
855 if (direct_access_msrs[i].index == index)
856 return true;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800857
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100858 return false;
859}
860
Avi Kivity6aa8b732006-12-10 02:21:36 -0800861static void set_msr_interception(u32 *msrpm, unsigned msr,
862 int read, int write)
863{
Joerg Roedel455716f2010-03-01 15:34:35 +0100864 u8 bit_read, bit_write;
865 unsigned long tmp;
866 u32 offset;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800867
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100868 /*
869 * If this warning triggers extend the direct_access_msrs list at the
870 * beginning of the file
871 */
872 WARN_ON(!valid_msr_intercept(msr));
873
Joerg Roedel455716f2010-03-01 15:34:35 +0100874 offset = svm_msrpm_offset(msr);
875 bit_read = 2 * (msr & 0x0f);
876 bit_write = 2 * (msr & 0x0f) + 1;
877 tmp = msrpm[offset];
Avi Kivity6aa8b732006-12-10 02:21:36 -0800878
Joerg Roedel455716f2010-03-01 15:34:35 +0100879 BUG_ON(offset == MSR_INVALID);
880
881 read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
882 write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
883
884 msrpm[offset] = tmp;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800885}
886
Joerg Roedelf65c2292008-02-13 18:58:46 +0100887static void svm_vcpu_init_msrpm(u32 *msrpm)
888{
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100889 int i;
890
Joerg Roedelf65c2292008-02-13 18:58:46 +0100891 memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
892
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100893 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
894 if (!direct_access_msrs[i].always)
895 continue;
896
897 set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
898 }
Joerg Roedelf65c2292008-02-13 18:58:46 +0100899}
900
Joerg Roedel323c3d82010-03-01 15:34:37 +0100901static void add_msr_offset(u32 offset)
902{
903 int i;
904
905 for (i = 0; i < MSRPM_OFFSETS; ++i) {
906
907 /* Offset already in list? */
908 if (msrpm_offsets[i] == offset)
909 return;
910
911 /* Slot used by another offset? */
912 if (msrpm_offsets[i] != MSR_INVALID)
913 continue;
914
915 /* Add offset to list */
916 msrpm_offsets[i] = offset;
917
918 return;
919 }
920
921 /*
922 * If this BUG triggers the msrpm_offsets table has an overflow. Just
923 * increase MSRPM_OFFSETS in this case.
924 */
925 BUG();
926}
927
928static void init_msrpm_offsets(void)
929{
930 int i;
931
932 memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
933
934 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
935 u32 offset;
936
937 offset = svm_msrpm_offset(direct_access_msrs[i].index);
938 BUG_ON(offset == MSR_INVALID);
939
940 add_msr_offset(offset);
941 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800942}
943
Joerg Roedel24e09cb2008-02-13 18:58:47 +0100944static void svm_enable_lbrv(struct vcpu_svm *svm)
945{
946 u32 *msrpm = svm->msrpm;
947
948 svm->vmcb->control.lbr_ctl = 1;
949 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
950 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
951 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
952 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
953}
954
955static void svm_disable_lbrv(struct vcpu_svm *svm)
956{
957 u32 *msrpm = svm->msrpm;
958
959 svm->vmcb->control.lbr_ctl = 0;
960 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
961 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
962 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
963 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
964}
965
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -0500966/* Note:
967 * This hash table is used to map VM_ID to a struct kvm_arch,
968 * when handling AMD IOMMU GALOG notification to schedule in
969 * a particular vCPU.
970 */
971#define SVM_VM_DATA_HASH_BITS 8
David Hildenbrand681bcea2017-01-24 22:21:16 +0100972static DEFINE_HASHTABLE(svm_vm_data_hash, SVM_VM_DATA_HASH_BITS);
973static DEFINE_SPINLOCK(svm_vm_data_hash_lock);
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -0500974
975/* Note:
976 * This function is called from IOMMU driver to notify
977 * SVM to schedule in a particular vCPU of a particular VM.
978 */
979static int avic_ga_log_notifier(u32 ga_tag)
980{
981 unsigned long flags;
982 struct kvm_arch *ka = NULL;
983 struct kvm_vcpu *vcpu = NULL;
984 u32 vm_id = AVIC_GATAG_TO_VMID(ga_tag);
985 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag);
986
987 pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id);
988
989 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
990 hash_for_each_possible(svm_vm_data_hash, ka, hnode, vm_id) {
991 struct kvm *kvm = container_of(ka, struct kvm, arch);
992 struct kvm_arch *vm_data = &kvm->arch;
993
994 if (vm_data->avic_vm_id != vm_id)
995 continue;
996 vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
997 break;
998 }
999 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
1000
1001 if (!vcpu)
1002 return 0;
1003
1004 /* Note:
1005 * At this point, the IOMMU should have already set the pending
1006 * bit in the vAPIC backing page. So, we just need to schedule
1007 * in the vcpu.
1008 */
1009 if (vcpu->mode == OUTSIDE_GUEST_MODE)
1010 kvm_vcpu_wake_up(vcpu);
1011
1012 return 0;
1013}
1014
Avi Kivity6aa8b732006-12-10 02:21:36 -08001015static __init int svm_hardware_setup(void)
1016{
1017 int cpu;
1018 struct page *iopm_pages;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001019 void *iopm_va;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001020 int r;
1021
Avi Kivity6aa8b732006-12-10 02:21:36 -08001022 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
1023
1024 if (!iopm_pages)
1025 return -ENOMEM;
Anthony Liguoric8681332007-04-30 09:48:11 +03001026
1027 iopm_va = page_address(iopm_pages);
1028 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001029 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
1030
Joerg Roedel323c3d82010-03-01 15:34:37 +01001031 init_msrpm_offsets();
1032
Joerg Roedel50a37eb2008-01-31 14:57:38 +01001033 if (boot_cpu_has(X86_FEATURE_NX))
1034 kvm_enable_efer_bits(EFER_NX);
1035
Alexander Graf1b2fd702009-02-02 16:23:51 +01001036 if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
1037 kvm_enable_efer_bits(EFER_FFXSR);
1038
Joerg Roedel92a1f122011-03-25 09:44:51 +01001039 if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
Joerg Roedel92a1f122011-03-25 09:44:51 +01001040 kvm_has_tsc_control = true;
Haozhong Zhangbc9b9612015-10-20 15:39:01 +08001041 kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
1042 kvm_tsc_scaling_ratio_frac_bits = 32;
Joerg Roedel92a1f122011-03-25 09:44:51 +01001043 }
1044
Alexander Graf236de052008-11-25 20:17:10 +01001045 if (nested) {
1046 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
Joerg Roedeleec4b142010-05-05 16:04:44 +02001047 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
Alexander Graf236de052008-11-25 20:17:10 +01001048 }
1049
Zachary Amsden3230bb42009-09-29 11:38:37 -10001050 for_each_possible_cpu(cpu) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001051 r = svm_cpu_init(cpu);
1052 if (r)
Joerg Roedelf65c2292008-02-13 18:58:46 +01001053 goto err;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001054 }
Joerg Roedel33bd6a02008-02-07 13:47:38 +01001055
Avi Kivity2a6b20b2010-11-09 16:15:42 +02001056 if (!boot_cpu_has(X86_FEATURE_NPT))
Joerg Roedele3da3ac2008-02-07 13:47:39 +01001057 npt_enabled = false;
1058
Joerg Roedel6c7dac72008-02-07 13:47:40 +01001059 if (npt_enabled && !npt) {
1060 printk(KERN_INFO "kvm: Nested Paging disabled\n");
1061 npt_enabled = false;
1062 }
1063
Joerg Roedel18552672008-02-07 13:47:41 +01001064 if (npt_enabled) {
Joerg Roedele3da3ac2008-02-07 13:47:39 +01001065 printk(KERN_INFO "kvm: Nested Paging enabled\n");
Joerg Roedel18552672008-02-07 13:47:41 +01001066 kvm_enable_tdp();
Joerg Roedel5f4cb662008-07-14 20:36:36 +02001067 } else
1068 kvm_disable_tdp();
Joerg Roedele3da3ac2008-02-07 13:47:39 +01001069
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -05001070 if (avic) {
1071 if (!npt_enabled ||
1072 !boot_cpu_has(X86_FEATURE_AVIC) ||
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001073 !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -05001074 avic = false;
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001075 } else {
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -05001076 pr_info("AVIC enabled\n");
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001077
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001078 amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
1079 }
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -05001080 }
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001081
Avi Kivity6aa8b732006-12-10 02:21:36 -08001082 return 0;
1083
Joerg Roedelf65c2292008-02-13 18:58:46 +01001084err:
Avi Kivity6aa8b732006-12-10 02:21:36 -08001085 __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
1086 iopm_base = 0;
1087 return r;
1088}
1089
1090static __exit void svm_hardware_unsetup(void)
1091{
Joerg Roedel0da1db752008-07-02 16:02:11 +02001092 int cpu;
1093
Zachary Amsden3230bb42009-09-29 11:38:37 -10001094 for_each_possible_cpu(cpu)
Joerg Roedel0da1db752008-07-02 16:02:11 +02001095 svm_cpu_uninit(cpu);
1096
Avi Kivity6aa8b732006-12-10 02:21:36 -08001097 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
Joerg Roedelf65c2292008-02-13 18:58:46 +01001098 iopm_base = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001099}
1100
1101static void init_seg(struct vmcb_seg *seg)
1102{
1103 seg->selector = 0;
1104 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
Joerg Roedele0231712010-02-24 18:59:10 +01001105 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
Avi Kivity6aa8b732006-12-10 02:21:36 -08001106 seg->limit = 0xffff;
1107 seg->base = 0;
1108}
1109
1110static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
1111{
1112 seg->selector = 0;
1113 seg->attrib = SVM_SELECTOR_P_MASK | type;
1114 seg->limit = 0xffff;
1115 seg->base = 0;
1116}
1117
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10001118static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1119{
1120 struct vcpu_svm *svm = to_svm(vcpu);
1121 u64 g_tsc_offset = 0;
1122
Joerg Roedel20307532010-11-29 17:51:48 +01001123 if (is_guest_mode(vcpu)) {
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10001124 g_tsc_offset = svm->vmcb->control.tsc_offset -
1125 svm->nested.hsave->control.tsc_offset;
1126 svm->nested.hsave->control.tsc_offset = offset;
Yoshihiro YUNOMAE489223e2013-06-12 16:43:44 +09001127 } else
1128 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
1129 svm->vmcb->control.tsc_offset,
1130 offset);
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10001131
1132 svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
Joerg Roedel116a0a22010-12-03 11:45:49 +01001133
1134 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10001135}
1136
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001137static void avic_init_vmcb(struct vcpu_svm *svm)
1138{
1139 struct vmcb *vmcb = svm->vmcb;
1140 struct kvm_arch *vm_data = &svm->vcpu.kvm->arch;
1141 phys_addr_t bpa = page_to_phys(svm->avic_backing_page);
1142 phys_addr_t lpa = page_to_phys(vm_data->avic_logical_id_table_page);
1143 phys_addr_t ppa = page_to_phys(vm_data->avic_physical_id_table_page);
1144
1145 vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK;
1146 vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
1147 vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK;
1148 vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID_COUNT;
1149 vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
1150 svm->vcpu.arch.apicv_active = true;
1151}
1152
Paolo Bonzini56908912015-10-19 11:30:19 +02001153static void init_vmcb(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001154{
Joerg Roedele6101a92008-02-13 18:58:45 +01001155 struct vmcb_control_area *control = &svm->vmcb->control;
1156 struct vmcb_save_area *save = &svm->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001157
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001158 svm->vcpu.arch.hflags = 0;
Avi Kivitybff78272010-01-07 13:16:08 +02001159
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001160 set_cr_intercept(svm, INTERCEPT_CR0_READ);
1161 set_cr_intercept(svm, INTERCEPT_CR3_READ);
1162 set_cr_intercept(svm, INTERCEPT_CR4_READ);
1163 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1164 set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1165 set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
Suravee Suthikulpanit3bbf3562016-05-04 14:09:51 -05001166 if (!kvm_vcpu_apicv_active(&svm->vcpu))
1167 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001168
Paolo Bonzini5315c712014-03-03 13:08:29 +01001169 set_dr_intercepts(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001170
Joerg Roedel18c918c2010-11-30 18:03:59 +01001171 set_exception_intercept(svm, PF_VECTOR);
1172 set_exception_intercept(svm, UD_VECTOR);
1173 set_exception_intercept(svm, MC_VECTOR);
Eric Northup54a20552015-11-03 18:03:53 +01001174 set_exception_intercept(svm, AC_VECTOR);
Paolo Bonzinicbdb9672015-11-10 09:14:39 +01001175 set_exception_intercept(svm, DB_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001176
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001177 set_intercept(svm, INTERCEPT_INTR);
1178 set_intercept(svm, INTERCEPT_NMI);
1179 set_intercept(svm, INTERCEPT_SMI);
1180 set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
Avi Kivity332b56e2011-11-10 14:57:24 +02001181 set_intercept(svm, INTERCEPT_RDPMC);
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001182 set_intercept(svm, INTERCEPT_CPUID);
1183 set_intercept(svm, INTERCEPT_INVD);
1184 set_intercept(svm, INTERCEPT_HLT);
1185 set_intercept(svm, INTERCEPT_INVLPG);
1186 set_intercept(svm, INTERCEPT_INVLPGA);
1187 set_intercept(svm, INTERCEPT_IOIO_PROT);
1188 set_intercept(svm, INTERCEPT_MSR_PROT);
1189 set_intercept(svm, INTERCEPT_TASK_SWITCH);
1190 set_intercept(svm, INTERCEPT_SHUTDOWN);
1191 set_intercept(svm, INTERCEPT_VMRUN);
1192 set_intercept(svm, INTERCEPT_VMMCALL);
1193 set_intercept(svm, INTERCEPT_VMLOAD);
1194 set_intercept(svm, INTERCEPT_VMSAVE);
1195 set_intercept(svm, INTERCEPT_STGI);
1196 set_intercept(svm, INTERCEPT_CLGI);
1197 set_intercept(svm, INTERCEPT_SKINIT);
1198 set_intercept(svm, INTERCEPT_WBINVD);
1199 set_intercept(svm, INTERCEPT_MONITOR);
1200 set_intercept(svm, INTERCEPT_MWAIT);
Joerg Roedel81dd35d2010-12-07 17:15:06 +01001201 set_intercept(svm, INTERCEPT_XSETBV);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001202
1203 control->iopm_base_pa = iopm_base;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001204 control->msrpm_base_pa = __pa(svm->msrpm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001205 control->int_ctl = V_INTR_MASKING_MASK;
1206
1207 init_seg(&save->es);
1208 init_seg(&save->ss);
1209 init_seg(&save->ds);
1210 init_seg(&save->fs);
1211 init_seg(&save->gs);
1212
1213 save->cs.selector = 0xf000;
Paolo Bonzini04b66832013-03-19 16:30:26 +01001214 save->cs.base = 0xffff0000;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001215 /* Executable/Readable Code Segment */
1216 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1217 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1218 save->cs.limit = 0xffff;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001219
1220 save->gdtr.limit = 0xffff;
1221 save->idtr.limit = 0xffff;
1222
1223 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1224 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1225
Paolo Bonzini56908912015-10-19 11:30:19 +02001226 svm_set_efer(&svm->vcpu, 0);
Mike Dayd77c26f2007-10-08 09:02:08 -04001227 save->dr6 = 0xffff0ff0;
Avi Kivityf6e78472010-08-02 15:30:20 +03001228 kvm_set_rflags(&svm->vcpu, 2);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001229 save->rip = 0x0000fff0;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001230 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001231
Joerg Roedele0231712010-02-24 18:59:10 +01001232 /*
Eduardo Habkost18fa0002009-10-24 02:49:59 -02001233 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
Nadav Amitd28bc9d2015-04-13 14:34:08 +03001234 * It also updates the guest-visible cr0 value.
Avi Kivity6aa8b732006-12-10 02:21:36 -08001235 */
Paolo Bonzini79a80592015-09-21 07:46:55 +02001236 svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
Igor Mammedovebae8712015-09-18 15:39:05 +02001237 kvm_mmu_reset_context(&svm->vcpu);
Eduardo Habkost18fa0002009-10-24 02:49:59 -02001238
Rusty Russell66aee912007-07-17 23:34:16 +10001239 save->cr4 = X86_CR4_PAE;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001240 /* rdx = ?? */
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001241
1242 if (npt_enabled) {
1243 /* Setup VMCB for Nested Paging */
1244 control->nested_ctl = 1;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001245 clr_intercept(svm, INTERCEPT_INVLPG);
Joerg Roedel18c918c2010-11-30 18:03:59 +01001246 clr_exception_intercept(svm, PF_VECTOR);
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001247 clr_cr_intercept(svm, INTERCEPT_CR3_READ);
1248 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
Radim Krčmář74545702015-04-27 15:11:25 +02001249 save->g_pat = svm->vcpu.arch.pat;
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001250 save->cr3 = 0;
1251 save->cr4 = 0;
1252 }
Joerg Roedelf40f6a42010-12-03 15:25:15 +01001253 svm->asid_generation = 0;
Alexander Graf1371d902008-11-25 20:17:04 +01001254
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001255 svm->nested.vmcb = 0;
Joerg Roedel2af91942009-08-07 11:49:28 +02001256 svm->vcpu.arch.hflags = 0;
1257
Avi Kivity2a6b20b2010-11-09 16:15:42 +02001258 if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
Mark Langsdorf565d0992009-10-06 14:25:02 -05001259 control->pause_filter_count = 3000;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001260 set_intercept(svm, INTERCEPT_PAUSE);
Mark Langsdorf565d0992009-10-06 14:25:02 -05001261 }
1262
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001263 if (avic)
1264 avic_init_vmcb(svm);
1265
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01001266 mark_all_dirty(svm->vmcb);
1267
Joerg Roedel2af91942009-08-07 11:49:28 +02001268 enable_gif(svm);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001269
1270}
1271
1272static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu, int index)
1273{
1274 u64 *avic_physical_id_table;
1275 struct kvm_arch *vm_data = &vcpu->kvm->arch;
1276
1277 if (index >= AVIC_MAX_PHYSICAL_ID_COUNT)
1278 return NULL;
1279
1280 avic_physical_id_table = page_address(vm_data->avic_physical_id_table_page);
1281
1282 return &avic_physical_id_table[index];
1283}
1284
1285/**
1286 * Note:
1287 * AVIC hardware walks the nested page table to check permissions,
1288 * but does not use the SPA address specified in the leaf page
1289 * table entry since it uses address in the AVIC_BACKING_PAGE pointer
1290 * field of the VMCB. Therefore, we set up the
1291 * APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (4KB) here.
1292 */
1293static int avic_init_access_page(struct kvm_vcpu *vcpu)
1294{
1295 struct kvm *kvm = vcpu->kvm;
1296 int ret;
1297
1298 if (kvm->arch.apic_access_page_done)
1299 return 0;
1300
1301 ret = x86_set_memory_region(kvm,
1302 APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
1303 APIC_DEFAULT_PHYS_BASE,
1304 PAGE_SIZE);
1305 if (ret)
1306 return ret;
1307
1308 kvm->arch.apic_access_page_done = true;
1309 return 0;
1310}
1311
1312static int avic_init_backing_page(struct kvm_vcpu *vcpu)
1313{
1314 int ret;
1315 u64 *entry, new_entry;
1316 int id = vcpu->vcpu_id;
1317 struct vcpu_svm *svm = to_svm(vcpu);
1318
1319 ret = avic_init_access_page(vcpu);
1320 if (ret)
1321 return ret;
1322
1323 if (id >= AVIC_MAX_PHYSICAL_ID_COUNT)
1324 return -EINVAL;
1325
1326 if (!svm->vcpu.arch.apic->regs)
1327 return -EINVAL;
1328
1329 svm->avic_backing_page = virt_to_page(svm->vcpu.arch.apic->regs);
1330
1331 /* Setting AVIC backing page address in the phy APIC ID table */
1332 entry = avic_get_physical_id_entry(vcpu, id);
1333 if (!entry)
1334 return -EINVAL;
1335
1336 new_entry = READ_ONCE(*entry);
1337 new_entry = (page_to_phys(svm->avic_backing_page) &
1338 AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
1339 AVIC_PHYSICAL_ID_ENTRY_VALID_MASK;
1340 WRITE_ONCE(*entry, new_entry);
1341
1342 svm->avic_physical_id_cache = entry;
1343
1344 return 0;
1345}
1346
Suravee Suthikulpanit5ea11f22016-08-23 13:52:41 -05001347static inline int avic_get_next_vm_id(void)
1348{
1349 int id;
1350
1351 spin_lock(&avic_vm_id_lock);
1352
1353 /* AVIC VM ID is one-based. */
1354 id = find_next_zero_bit(avic_vm_id_bitmap, AVIC_VM_ID_NR, 1);
1355 if (id <= AVIC_VM_ID_MASK)
1356 __set_bit(id, avic_vm_id_bitmap);
1357 else
1358 id = -EAGAIN;
1359
1360 spin_unlock(&avic_vm_id_lock);
1361 return id;
1362}
1363
1364static inline int avic_free_vm_id(int id)
1365{
1366 if (id <= 0 || id > AVIC_VM_ID_MASK)
1367 return -EINVAL;
1368
1369 spin_lock(&avic_vm_id_lock);
1370 __clear_bit(id, avic_vm_id_bitmap);
1371 spin_unlock(&avic_vm_id_lock);
1372 return 0;
1373}
1374
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001375static void avic_vm_destroy(struct kvm *kvm)
1376{
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001377 unsigned long flags;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001378 struct kvm_arch *vm_data = &kvm->arch;
1379
Suravee Suthikulpanit5ea11f22016-08-23 13:52:41 -05001380 avic_free_vm_id(vm_data->avic_vm_id);
1381
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001382 if (vm_data->avic_logical_id_table_page)
1383 __free_page(vm_data->avic_logical_id_table_page);
1384 if (vm_data->avic_physical_id_table_page)
1385 __free_page(vm_data->avic_physical_id_table_page);
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001386
1387 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
1388 hash_del(&vm_data->hnode);
1389 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001390}
1391
1392static int avic_vm_init(struct kvm *kvm)
1393{
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001394 unsigned long flags;
Colin Ian Kingadad0d02016-09-19 07:11:59 +01001395 int vm_id, err = -ENOMEM;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001396 struct kvm_arch *vm_data = &kvm->arch;
1397 struct page *p_page;
1398 struct page *l_page;
1399
1400 if (!avic)
1401 return 0;
1402
Colin Ian Kingadad0d02016-09-19 07:11:59 +01001403 vm_id = avic_get_next_vm_id();
1404 if (vm_id < 0)
1405 return vm_id;
1406 vm_data->avic_vm_id = (u32)vm_id;
Suravee Suthikulpanit5ea11f22016-08-23 13:52:41 -05001407
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001408 /* Allocating physical APIC ID table (4KB) */
1409 p_page = alloc_page(GFP_KERNEL);
1410 if (!p_page)
1411 goto free_avic;
1412
1413 vm_data->avic_physical_id_table_page = p_page;
1414 clear_page(page_address(p_page));
1415
1416 /* Allocating logical APIC ID table (4KB) */
1417 l_page = alloc_page(GFP_KERNEL);
1418 if (!l_page)
1419 goto free_avic;
1420
1421 vm_data->avic_logical_id_table_page = l_page;
1422 clear_page(page_address(l_page));
1423
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001424 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
1425 hash_add(svm_vm_data_hash, &vm_data->hnode, vm_data->avic_vm_id);
1426 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
1427
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001428 return 0;
1429
1430free_avic:
1431 avic_vm_destroy(kvm);
1432 return err;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001433}
1434
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001435static inline int
1436avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001437{
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001438 int ret = 0;
1439 unsigned long flags;
1440 struct amd_svm_iommu_ir *ir;
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001441 struct vcpu_svm *svm = to_svm(vcpu);
1442
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001443 if (!kvm_arch_has_assigned_device(vcpu->kvm))
1444 return 0;
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001445
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001446 /*
1447 * Here, we go through the per-vcpu ir_list to update all existing
1448 * interrupt remapping table entry targeting this vcpu.
1449 */
1450 spin_lock_irqsave(&svm->ir_list_lock, flags);
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001451
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001452 if (list_empty(&svm->ir_list))
1453 goto out;
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001454
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001455 list_for_each_entry(ir, &svm->ir_list, node) {
1456 ret = amd_iommu_update_ga(cpu, r, ir->data);
1457 if (ret)
1458 break;
1459 }
1460out:
1461 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
1462 return ret;
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001463}
1464
1465static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1466{
1467 u64 entry;
1468 /* ID = 0xff (broadcast), ID > 0xff (reserved) */
Suravee Suthikulpanit7d669f52016-06-15 17:23:45 -05001469 int h_physical_id = kvm_cpu_get_apicid(cpu);
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001470 struct vcpu_svm *svm = to_svm(vcpu);
1471
1472 if (!kvm_vcpu_apicv_active(vcpu))
1473 return;
1474
1475 if (WARN_ON(h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT))
1476 return;
1477
1478 entry = READ_ONCE(*(svm->avic_physical_id_cache));
1479 WARN_ON(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
1480
1481 entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
1482 entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
1483
1484 entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1485 if (svm->avic_is_running)
1486 entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1487
1488 WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001489 avic_update_iommu_vcpu_affinity(vcpu, h_physical_id,
1490 svm->avic_is_running);
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001491}
1492
1493static void avic_vcpu_put(struct kvm_vcpu *vcpu)
1494{
1495 u64 entry;
1496 struct vcpu_svm *svm = to_svm(vcpu);
1497
1498 if (!kvm_vcpu_apicv_active(vcpu))
1499 return;
1500
1501 entry = READ_ONCE(*(svm->avic_physical_id_cache));
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001502 if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
1503 avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
1504
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001505 entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1506 WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001507}
1508
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001509/**
1510 * This function is called during VCPU halt/unhalt.
1511 */
1512static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
1513{
1514 struct vcpu_svm *svm = to_svm(vcpu);
1515
1516 svm->avic_is_running = is_run;
1517 if (is_run)
1518 avic_vcpu_load(vcpu, vcpu->cpu);
1519 else
1520 avic_vcpu_put(vcpu);
1521}
1522
Nadav Amitd28bc9d2015-04-13 14:34:08 +03001523static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
Avi Kivity04d2cc72007-09-10 18:10:54 +03001524{
1525 struct vcpu_svm *svm = to_svm(vcpu);
Julian Stecklina66f7b722012-12-05 15:26:19 +01001526 u32 dummy;
1527 u32 eax = 1;
Avi Kivity04d2cc72007-09-10 18:10:54 +03001528
Nadav Amitd28bc9d2015-04-13 14:34:08 +03001529 if (!init_event) {
1530 svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
1531 MSR_IA32_APICBASE_ENABLE;
1532 if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
1533 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
1534 }
Paolo Bonzini56908912015-10-19 11:30:19 +02001535 init_vmcb(svm);
Avi Kivity70433382007-11-07 12:57:23 +02001536
Julian Stecklina66f7b722012-12-05 15:26:19 +01001537 kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy);
1538 kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001539
1540 if (kvm_vcpu_apicv_active(vcpu) && !init_event)
1541 avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE);
Avi Kivity04d2cc72007-09-10 18:10:54 +03001542}
1543
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001544static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001545{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001546 struct vcpu_svm *svm;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001547 struct page *page;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001548 struct page *msrpm_pages;
Alexander Grafb286d5d2008-11-25 20:17:05 +01001549 struct page *hsave_page;
Alexander Graf3d6368e2008-11-25 20:17:07 +01001550 struct page *nested_msrpm_pages;
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001551 int err;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001552
Rusty Russellc16f8622007-07-30 21:12:19 +10001553 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001554 if (!svm) {
1555 err = -ENOMEM;
1556 goto out;
1557 }
1558
1559 err = kvm_vcpu_init(&svm->vcpu, kvm, id);
1560 if (err)
1561 goto free_svm;
1562
Joerg Roedelf65c2292008-02-13 18:58:46 +01001563 err = -ENOMEM;
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001564 page = alloc_page(GFP_KERNEL);
1565 if (!page)
1566 goto uninit;
1567
Joerg Roedelf65c2292008-02-13 18:58:46 +01001568 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1569 if (!msrpm_pages)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001570 goto free_page1;
Alexander Graf3d6368e2008-11-25 20:17:07 +01001571
1572 nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1573 if (!nested_msrpm_pages)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001574 goto free_page2;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001575
Alexander Grafb286d5d2008-11-25 20:17:05 +01001576 hsave_page = alloc_page(GFP_KERNEL);
1577 if (!hsave_page)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001578 goto free_page3;
1579
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001580 if (avic) {
1581 err = avic_init_backing_page(&svm->vcpu);
1582 if (err)
1583 goto free_page4;
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001584
1585 INIT_LIST_HEAD(&svm->ir_list);
1586 spin_lock_init(&svm->ir_list_lock);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001587 }
1588
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001589 /* We initialize this flag to true to make sure that the is_running
1590 * bit would be set the first time the vcpu is loaded.
1591 */
1592 svm->avic_is_running = true;
1593
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001594 svm->nested.hsave = page_address(hsave_page);
Alexander Grafb286d5d2008-11-25 20:17:05 +01001595
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001596 svm->msrpm = page_address(msrpm_pages);
1597 svm_vcpu_init_msrpm(svm->msrpm);
1598
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001599 svm->nested.msrpm = page_address(nested_msrpm_pages);
Joerg Roedel323c3d82010-03-01 15:34:37 +01001600 svm_vcpu_init_msrpm(svm->nested.msrpm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01001601
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001602 svm->vmcb = page_address(page);
1603 clear_page(svm->vmcb);
1604 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
1605 svm->asid_generation = 0;
Paolo Bonzini56908912015-10-19 11:30:19 +02001606 init_vmcb(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001607
Boris Ostrovsky2b036c62012-01-09 14:00:35 -05001608 svm_init_osvw(&svm->vcpu);
1609
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001610 return &svm->vcpu;
Avi Kivity36241b82006-12-22 01:05:20 -08001611
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001612free_page4:
1613 __free_page(hsave_page);
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001614free_page3:
1615 __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
1616free_page2:
1617 __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
1618free_page1:
1619 __free_page(page);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001620uninit:
1621 kvm_vcpu_uninit(&svm->vcpu);
1622free_svm:
Rusty Russella4770342007-08-01 14:46:11 +10001623 kmem_cache_free(kvm_vcpu_cache, svm);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001624out:
1625 return ERR_PTR(err);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001626}
1627
1628static void svm_free_vcpu(struct kvm_vcpu *vcpu)
1629{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001630 struct vcpu_svm *svm = to_svm(vcpu);
1631
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001632 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
Joerg Roedelf65c2292008-02-13 18:58:46 +01001633 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001634 __free_page(virt_to_page(svm->nested.hsave));
1635 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001636 kvm_vcpu_uninit(vcpu);
Rusty Russella4770342007-08-01 14:46:11 +10001637 kmem_cache_free(kvm_vcpu_cache, svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001638}
1639
Avi Kivity15ad7142007-07-11 18:17:21 +03001640static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001641{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001642 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity15ad7142007-07-11 18:17:21 +03001643 int i;
Avi Kivity0cc50642007-03-25 12:07:27 +02001644
Avi Kivity0cc50642007-03-25 12:07:27 +02001645 if (unlikely(cpu != vcpu->cpu)) {
Marcelo Tosatti4b656b12009-07-21 12:47:45 -03001646 svm->asid_generation = 0;
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01001647 mark_all_dirty(svm->vmcb);
Avi Kivity0cc50642007-03-25 12:07:27 +02001648 }
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001649
Avi Kivity82ca2d12010-10-21 12:20:34 +02001650#ifdef CONFIG_X86_64
1651 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
1652#endif
Avi Kivitydacccfd2010-10-21 12:20:33 +02001653 savesegment(fs, svm->host.fs);
1654 savesegment(gs, svm->host.gs);
1655 svm->host.ldt = kvm_read_ldt();
1656
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001657 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001658 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Joerg Roedelfbc0db72011-03-25 09:44:46 +01001659
Haozhong Zhangad7218832015-10-20 15:39:02 +08001660 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
1661 u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
1662 if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
1663 __this_cpu_write(current_tsc_ratio, tsc_ratio);
1664 wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
1665 }
Joerg Roedelfbc0db72011-03-25 09:44:46 +01001666 }
Paolo Bonzini46896c72015-11-12 14:49:16 +01001667 /* This assumes that the kernel never uses MSR_TSC_AUX */
1668 if (static_cpu_has(X86_FEATURE_RDTSCP))
1669 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001670
1671 avic_vcpu_load(vcpu, cpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001672}
1673
1674static void svm_vcpu_put(struct kvm_vcpu *vcpu)
1675{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001676 struct vcpu_svm *svm = to_svm(vcpu);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001677 int i;
1678
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001679 avic_vcpu_put(vcpu);
1680
Avi Kivitye1beb1d2007-11-18 13:50:24 +02001681 ++vcpu->stat.host_state_reload;
Avi Kivitydacccfd2010-10-21 12:20:33 +02001682 kvm_load_ldt(svm->host.ldt);
1683#ifdef CONFIG_X86_64
1684 loadsegment(fs, svm->host.fs);
Andy Lutomirski296f7812016-04-26 12:23:29 -07001685 wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase);
Joerg Roedel893a5ab2011-01-14 16:45:01 +01001686 load_gs_index(svm->host.gs);
Avi Kivitydacccfd2010-10-21 12:20:33 +02001687#else
Avi Kivity831ca602011-03-08 16:09:51 +02001688#ifdef CONFIG_X86_32_LAZY_GS
Avi Kivitydacccfd2010-10-21 12:20:33 +02001689 loadsegment(gs, svm->host.gs);
1690#endif
Avi Kivity831ca602011-03-08 16:09:51 +02001691#endif
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03001692 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001693 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001694}
1695
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001696static void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
1697{
1698 avic_set_running(vcpu, false);
1699}
1700
1701static void svm_vcpu_unblocking(struct kvm_vcpu *vcpu)
1702{
1703 avic_set_running(vcpu, true);
1704}
1705
Avi Kivity6aa8b732006-12-10 02:21:36 -08001706static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
1707{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001708 return to_svm(vcpu)->vmcb->save.rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001709}
1710
1711static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1712{
Paolo Bonziniae9fedc2014-05-14 09:39:49 +02001713 /*
Andrea Gelminibb3541f2016-05-21 14:14:44 +02001714 * Any change of EFLAGS.VM is accompanied by a reload of SS
Paolo Bonziniae9fedc2014-05-14 09:39:49 +02001715 * (caused by either a task switch or an inter-privilege IRET),
1716 * so we do not need to update the CPL here.
1717 */
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001718 to_svm(vcpu)->vmcb->save.rflags = rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001719}
1720
Huaitong Hanbe94f6b2016-03-22 16:51:20 +08001721static u32 svm_get_pkru(struct kvm_vcpu *vcpu)
1722{
1723 return 0;
1724}
1725
Avi Kivity6de4f3a2009-05-31 22:58:47 +03001726static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1727{
1728 switch (reg) {
1729 case VCPU_EXREG_PDPTR:
1730 BUG_ON(!npt_enabled);
Avi Kivity9f8fe502010-12-05 17:30:00 +02001731 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
Avi Kivity6de4f3a2009-05-31 22:58:47 +03001732 break;
1733 default:
1734 BUG();
1735 }
1736}
1737
Alexander Graff0b85052008-11-25 20:17:01 +01001738static void svm_set_vintr(struct vcpu_svm *svm)
1739{
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001740 set_intercept(svm, INTERCEPT_VINTR);
Alexander Graff0b85052008-11-25 20:17:01 +01001741}
1742
1743static void svm_clear_vintr(struct vcpu_svm *svm)
1744{
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001745 clr_intercept(svm, INTERCEPT_VINTR);
Alexander Graff0b85052008-11-25 20:17:01 +01001746}
1747
Avi Kivity6aa8b732006-12-10 02:21:36 -08001748static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
1749{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001750 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001751
1752 switch (seg) {
1753 case VCPU_SREG_CS: return &save->cs;
1754 case VCPU_SREG_DS: return &save->ds;
1755 case VCPU_SREG_ES: return &save->es;
1756 case VCPU_SREG_FS: return &save->fs;
1757 case VCPU_SREG_GS: return &save->gs;
1758 case VCPU_SREG_SS: return &save->ss;
1759 case VCPU_SREG_TR: return &save->tr;
1760 case VCPU_SREG_LDTR: return &save->ldtr;
1761 }
1762 BUG();
Al Viro8b6d44c2007-02-09 16:38:40 +00001763 return NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001764}
1765
1766static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1767{
1768 struct vmcb_seg *s = svm_seg(vcpu, seg);
1769
1770 return s->base;
1771}
1772
1773static void svm_get_segment(struct kvm_vcpu *vcpu,
1774 struct kvm_segment *var, int seg)
1775{
1776 struct vmcb_seg *s = svm_seg(vcpu, seg);
1777
1778 var->base = s->base;
1779 var->limit = s->limit;
1780 var->selector = s->selector;
1781 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
1782 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
1783 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
1784 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
1785 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
1786 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
1787 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
Jim Mattson80112c82014-07-08 09:47:41 +05301788
1789 /*
1790 * AMD CPUs circa 2014 track the G bit for all segments except CS.
1791 * However, the SVM spec states that the G bit is not observed by the
1792 * CPU, and some VMware virtual CPUs drop the G bit for all segments.
1793 * So let's synthesize a legal G bit for all segments, this helps
1794 * running KVM nested. It also helps cross-vendor migration, because
1795 * Intel's vmentry has a check on the 'G' bit.
1796 */
1797 var->g = s->limit > 0xfffff;
Amit Shah25022ac2008-10-27 09:04:17 +00001798
Joerg Roedele0231712010-02-24 18:59:10 +01001799 /*
1800 * AMD's VMCB does not have an explicit unusable field, so emulate it
Andre Przywara19bca6a2009-04-28 12:45:30 +02001801 * for cross vendor migration purposes by "not present"
1802 */
1803 var->unusable = !var->present || (var->type == 0);
1804
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001805 switch (seg) {
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001806 case VCPU_SREG_TR:
1807 /*
1808 * Work around a bug where the busy flag in the tr selector
1809 * isn't exposed
1810 */
Amit Shahc0d09822008-10-27 09:04:18 +00001811 var->type |= 0x2;
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001812 break;
1813 case VCPU_SREG_DS:
1814 case VCPU_SREG_ES:
1815 case VCPU_SREG_FS:
1816 case VCPU_SREG_GS:
1817 /*
1818 * The accessed bit must always be set in the segment
1819 * descriptor cache, although it can be cleared in the
1820 * descriptor, the cached bit always remains at 1. Since
1821 * Intel has a check on this, set it here to support
1822 * cross-vendor migration.
1823 */
1824 if (!var->unusable)
1825 var->type |= 0x1;
1826 break;
Andre Przywarab586eb02009-04-28 12:45:43 +02001827 case VCPU_SREG_SS:
Joerg Roedele0231712010-02-24 18:59:10 +01001828 /*
1829 * On AMD CPUs sometimes the DB bit in the segment
Andre Przywarab586eb02009-04-28 12:45:43 +02001830 * descriptor is left as 1, although the whole segment has
1831 * been made unusable. Clear it here to pass an Intel VMX
1832 * entry check when cross vendor migrating.
1833 */
1834 if (var->unusable)
1835 var->db = 0;
Jan Kiszka33b458d2014-06-29 17:12:43 +02001836 var->dpl = to_svm(vcpu)->vmcb->save.cpl;
Andre Przywarab586eb02009-04-28 12:45:43 +02001837 break;
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01001838 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001839}
1840
Izik Eidus2e4d2652008-03-24 19:38:34 +02001841static int svm_get_cpl(struct kvm_vcpu *vcpu)
1842{
1843 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1844
1845 return save->cpl;
1846}
1847
Gleb Natapov89a27f42010-02-16 10:51:48 +02001848static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001849{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001850 struct vcpu_svm *svm = to_svm(vcpu);
1851
Gleb Natapov89a27f42010-02-16 10:51:48 +02001852 dt->size = svm->vmcb->save.idtr.limit;
1853 dt->address = svm->vmcb->save.idtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001854}
1855
Gleb Natapov89a27f42010-02-16 10:51:48 +02001856static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001857{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001858 struct vcpu_svm *svm = to_svm(vcpu);
1859
Gleb Natapov89a27f42010-02-16 10:51:48 +02001860 svm->vmcb->save.idtr.limit = dt->size;
1861 svm->vmcb->save.idtr.base = dt->address ;
Joerg Roedel17a703c2010-12-03 11:45:56 +01001862 mark_dirty(svm->vmcb, VMCB_DT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001863}
1864
Gleb Natapov89a27f42010-02-16 10:51:48 +02001865static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001866{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001867 struct vcpu_svm *svm = to_svm(vcpu);
1868
Gleb Natapov89a27f42010-02-16 10:51:48 +02001869 dt->size = svm->vmcb->save.gdtr.limit;
1870 dt->address = svm->vmcb->save.gdtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001871}
1872
Gleb Natapov89a27f42010-02-16 10:51:48 +02001873static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001874{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001875 struct vcpu_svm *svm = to_svm(vcpu);
1876
Gleb Natapov89a27f42010-02-16 10:51:48 +02001877 svm->vmcb->save.gdtr.limit = dt->size;
1878 svm->vmcb->save.gdtr.base = dt->address ;
Joerg Roedel17a703c2010-12-03 11:45:56 +01001879 mark_dirty(svm->vmcb, VMCB_DT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001880}
1881
Avi Kivitye8467fd2009-12-29 18:43:06 +02001882static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
1883{
1884}
1885
Avi Kivityaff48ba2010-12-05 18:56:11 +02001886static void svm_decache_cr3(struct kvm_vcpu *vcpu)
1887{
1888}
1889
Anthony Liguori25c4c272007-04-27 09:29:21 +03001890static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
Avi Kivity399badf2007-01-05 16:36:38 -08001891{
1892}
1893
Avi Kivityd2251572010-01-06 10:55:27 +02001894static void update_cr0_intercept(struct vcpu_svm *svm)
1895{
1896 ulong gcr0 = svm->vcpu.arch.cr0;
1897 u64 *hcr0 = &svm->vmcb->save.cr0;
1898
Paolo Bonzinibd7e5b02017-02-03 21:18:52 -08001899 *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
1900 | (gcr0 & SVM_CR0_SELECTIVE_MASK);
Avi Kivityd2251572010-01-06 10:55:27 +02001901
Joerg Roedeldcca1a62010-12-03 11:45:54 +01001902 mark_dirty(svm->vmcb, VMCB_CR);
Avi Kivityd2251572010-01-06 10:55:27 +02001903
Paolo Bonzinibd7e5b02017-02-03 21:18:52 -08001904 if (gcr0 == *hcr0) {
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001905 clr_cr_intercept(svm, INTERCEPT_CR0_READ);
1906 clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
Avi Kivityd2251572010-01-06 10:55:27 +02001907 } else {
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001908 set_cr_intercept(svm, INTERCEPT_CR0_READ);
1909 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
Avi Kivityd2251572010-01-06 10:55:27 +02001910 }
1911}
1912
Avi Kivity6aa8b732006-12-10 02:21:36 -08001913static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1914{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001915 struct vcpu_svm *svm = to_svm(vcpu);
1916
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001917#ifdef CONFIG_X86_64
Avi Kivityf6801df2010-01-21 15:31:50 +02001918 if (vcpu->arch.efer & EFER_LME) {
Rusty Russell707d92fa2007-07-17 23:19:08 +10001919 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
Avi Kivityf6801df2010-01-21 15:31:50 +02001920 vcpu->arch.efer |= EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -06001921 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001922 }
1923
Mike Dayd77c26f2007-10-08 09:02:08 -04001924 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
Avi Kivityf6801df2010-01-21 15:31:50 +02001925 vcpu->arch.efer &= ~EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -06001926 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001927 }
1928 }
1929#endif
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001930 vcpu->arch.cr0 = cr0;
Avi Kivity888f9f32010-01-10 12:14:04 +02001931
1932 if (!npt_enabled)
1933 cr0 |= X86_CR0_PG | X86_CR0_WP;
Avi Kivity02daab22009-12-30 12:40:26 +02001934
Paolo Bonzinibcf166a2015-10-01 13:19:55 +02001935 /*
1936 * re-enable caching here because the QEMU bios
1937 * does not do it - this results in some delay at
1938 * reboot
1939 */
1940 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
1941 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001942 svm->vmcb->save.cr0 = cr0;
Joerg Roedeldcca1a62010-12-03 11:45:54 +01001943 mark_dirty(svm->vmcb, VMCB_CR);
Avi Kivityd2251572010-01-06 10:55:27 +02001944 update_cr0_intercept(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001945}
1946
Nadav Har'El5e1746d2011-05-25 23:03:24 +03001947static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001948{
Andy Lutomirski1e02ce42014-10-24 15:58:08 -07001949 unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
Joerg Roedele5eab0c2008-09-09 19:11:51 +02001950 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
1951
Nadav Har'El5e1746d2011-05-25 23:03:24 +03001952 if (cr4 & X86_CR4_VMXE)
1953 return 1;
1954
Joerg Roedele5eab0c2008-09-09 19:11:51 +02001955 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
Joerg Roedelf40f6a42010-12-03 15:25:15 +01001956 svm_flush_tlb(vcpu);
Joerg Roedel6394b642008-04-09 14:15:29 +02001957
Joerg Roedelec077262008-04-09 14:15:28 +02001958 vcpu->arch.cr4 = cr4;
1959 if (!npt_enabled)
1960 cr4 |= X86_CR4_PAE;
Joerg Roedel6394b642008-04-09 14:15:29 +02001961 cr4 |= host_cr4_mce;
Joerg Roedelec077262008-04-09 14:15:28 +02001962 to_svm(vcpu)->vmcb->save.cr4 = cr4;
Joerg Roedeldcca1a62010-12-03 11:45:54 +01001963 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
Nadav Har'El5e1746d2011-05-25 23:03:24 +03001964 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001965}
1966
1967static void svm_set_segment(struct kvm_vcpu *vcpu,
1968 struct kvm_segment *var, int seg)
1969{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001970 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001971 struct vmcb_seg *s = svm_seg(vcpu, seg);
1972
1973 s->base = var->base;
1974 s->limit = var->limit;
1975 s->selector = var->selector;
1976 if (var->unusable)
1977 s->attrib = 0;
1978 else {
1979 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
1980 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
1981 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
1982 s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
1983 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
1984 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1985 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1986 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
1987 }
Paolo Bonziniae9fedc2014-05-14 09:39:49 +02001988
1989 /*
1990 * This is always accurate, except if SYSRET returned to a segment
1991 * with SS.DPL != 3. Intel does not have this quirk, and always
1992 * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
1993 * would entail passing the CPL to userspace and back.
1994 */
1995 if (seg == VCPU_SREG_SS)
1996 svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001997
Joerg Roedel060d0c92010-12-03 11:45:57 +01001998 mark_dirty(svm->vmcb, VMCB_SEG);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001999}
2000
Paolo Bonzinicbdb9672015-11-10 09:14:39 +01002001static void update_bp_intercept(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002002{
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002003 struct vcpu_svm *svm = to_svm(vcpu);
2004
Joerg Roedel18c918c2010-11-30 18:03:59 +01002005 clr_exception_intercept(svm, BP_VECTOR);
Gleb Natapov44c11432009-05-11 13:35:52 +03002006
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002007 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002008 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
Joerg Roedel18c918c2010-11-30 18:03:59 +01002009 set_exception_intercept(svm, BP_VECTOR);
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002010 } else
2011 vcpu->guest_debug = 0;
Gleb Natapov44c11432009-05-11 13:35:52 +03002012}
2013
Tejun Heo0fe1e002009-10-29 22:34:14 +09002014static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002015{
Tejun Heo0fe1e002009-10-29 22:34:14 +09002016 if (sd->next_asid > sd->max_asid) {
2017 ++sd->asid_generation;
2018 sd->next_asid = 1;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002019 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002020 }
2021
Tejun Heo0fe1e002009-10-29 22:34:14 +09002022 svm->asid_generation = sd->asid_generation;
2023 svm->vmcb->control.asid = sd->next_asid++;
Joerg Roedeld48086d2010-12-03 11:45:51 +01002024
2025 mark_dirty(svm->vmcb, VMCB_ASID);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002026}
2027
Jan Kiszka73aaf249e2014-01-04 18:47:16 +01002028static u64 svm_get_dr6(struct kvm_vcpu *vcpu)
2029{
2030 return to_svm(vcpu)->vmcb->save.dr6;
2031}
2032
2033static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
2034{
2035 struct vcpu_svm *svm = to_svm(vcpu);
2036
2037 svm->vmcb->save.dr6 = value;
2038 mark_dirty(svm->vmcb, VMCB_DR);
2039}
2040
Paolo Bonzinifacb0132014-02-21 10:32:27 +01002041static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
2042{
2043 struct vcpu_svm *svm = to_svm(vcpu);
2044
2045 get_debugreg(vcpu->arch.db[0], 0);
2046 get_debugreg(vcpu->arch.db[1], 1);
2047 get_debugreg(vcpu->arch.db[2], 2);
2048 get_debugreg(vcpu->arch.db[3], 3);
2049 vcpu->arch.dr6 = svm_get_dr6(vcpu);
2050 vcpu->arch.dr7 = svm->vmcb->save.dr7;
2051
2052 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
2053 set_dr_intercepts(svm);
2054}
2055
Gleb Natapov020df072010-04-13 10:05:23 +03002056static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002057{
Jan Kiszka42dbaa52008-12-15 13:52:10 +01002058 struct vcpu_svm *svm = to_svm(vcpu);
Jan Kiszka42dbaa52008-12-15 13:52:10 +01002059
Gleb Natapov020df072010-04-13 10:05:23 +03002060 svm->vmcb->save.dr7 = value;
Joerg Roedel72214b92010-12-03 11:45:55 +01002061 mark_dirty(svm->vmcb, VMCB_DR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002062}
2063
Avi Kivity851ba692009-08-24 11:10:17 +03002064static int pf_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002065{
Gleb Natapov631bc482010-10-14 11:22:52 +02002066 u64 fault_address = svm->vmcb->control.exit_info_2;
Tom Lendacky14727752016-11-23 12:01:38 -05002067 u64 error_code;
Gleb Natapov631bc482010-10-14 11:22:52 +02002068 int r = 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002069
Gleb Natapov631bc482010-10-14 11:22:52 +02002070 switch (svm->apf_reason) {
2071 default:
2072 error_code = svm->vmcb->control.exit_info_1;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02002073
Gleb Natapov631bc482010-10-14 11:22:52 +02002074 trace_kvm_page_fault(fault_address, error_code);
2075 if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
2076 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
Andre Przywaradc25e892010-12-21 11:12:07 +01002077 r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
2078 svm->vmcb->control.insn_bytes,
2079 svm->vmcb->control.insn_len);
Gleb Natapov631bc482010-10-14 11:22:52 +02002080 break;
2081 case KVM_PV_REASON_PAGE_NOT_PRESENT:
2082 svm->apf_reason = 0;
2083 local_irq_disable();
2084 kvm_async_pf_task_wait(fault_address);
2085 local_irq_enable();
2086 break;
2087 case KVM_PV_REASON_PAGE_READY:
2088 svm->apf_reason = 0;
2089 local_irq_disable();
2090 kvm_async_pf_task_wake(fault_address);
2091 local_irq_enable();
2092 break;
2093 }
2094 return r;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002095}
2096
Avi Kivity851ba692009-08-24 11:10:17 +03002097static int db_interception(struct vcpu_svm *svm)
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002098{
Avi Kivity851ba692009-08-24 11:10:17 +03002099 struct kvm_run *kvm_run = svm->vcpu.run;
2100
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002101 if (!(svm->vcpu.guest_debug &
Gleb Natapov44c11432009-05-11 13:35:52 +03002102 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
Jan Kiszka6be7d302009-10-18 13:24:54 +02002103 !svm->nmi_singlestep) {
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002104 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
2105 return 1;
2106 }
Gleb Natapov44c11432009-05-11 13:35:52 +03002107
Jan Kiszka6be7d302009-10-18 13:24:54 +02002108 if (svm->nmi_singlestep) {
2109 svm->nmi_singlestep = false;
Gleb Natapov44c11432009-05-11 13:35:52 +03002110 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
2111 svm->vmcb->save.rflags &=
2112 ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
Gleb Natapov44c11432009-05-11 13:35:52 +03002113 }
2114
2115 if (svm->vcpu.guest_debug &
Joerg Roedele0231712010-02-24 18:59:10 +01002116 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
Gleb Natapov44c11432009-05-11 13:35:52 +03002117 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2118 kvm_run->debug.arch.pc =
2119 svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2120 kvm_run->debug.arch.exception = DB_VECTOR;
2121 return 0;
2122 }
2123
2124 return 1;
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002125}
2126
Avi Kivity851ba692009-08-24 11:10:17 +03002127static int bp_interception(struct vcpu_svm *svm)
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002128{
Avi Kivity851ba692009-08-24 11:10:17 +03002129 struct kvm_run *kvm_run = svm->vcpu.run;
2130
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002131 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2132 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2133 kvm_run->debug.arch.exception = BP_VECTOR;
2134 return 0;
2135}
2136
Avi Kivity851ba692009-08-24 11:10:17 +03002137static int ud_interception(struct vcpu_svm *svm)
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05002138{
2139 int er;
2140
Andre Przywara51d8b662010-12-21 11:12:02 +01002141 er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05002142 if (er != EMULATE_DONE)
Avi Kivity7ee5d9402007-11-25 15:22:50 +02002143 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05002144 return 1;
2145}
2146
Eric Northup54a20552015-11-03 18:03:53 +01002147static int ac_interception(struct vcpu_svm *svm)
2148{
2149 kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0);
2150 return 1;
2151}
2152
Joerg Roedel67ec6602010-05-17 14:43:35 +02002153static bool is_erratum_383(void)
2154{
2155 int err, i;
2156 u64 value;
2157
2158 if (!erratum_383_found)
2159 return false;
2160
2161 value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
2162 if (err)
2163 return false;
2164
2165 /* Bit 62 may or may not be set for this mce */
2166 value &= ~(1ULL << 62);
2167
2168 if (value != 0xb600000000010015ULL)
2169 return false;
2170
2171 /* Clear MCi_STATUS registers */
2172 for (i = 0; i < 6; ++i)
2173 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
2174
2175 value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
2176 if (!err) {
2177 u32 low, high;
2178
2179 value &= ~(1ULL << 2);
2180 low = lower_32_bits(value);
2181 high = upper_32_bits(value);
2182
2183 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
2184 }
2185
2186 /* Flush tlb to evict multi-match entries */
2187 __flush_tlb_all();
2188
2189 return true;
2190}
2191
Joerg Roedelfe5913e2010-05-17 14:43:34 +02002192static void svm_handle_mce(struct vcpu_svm *svm)
Joerg Roedel53371b52008-04-09 14:15:30 +02002193{
Joerg Roedel67ec6602010-05-17 14:43:35 +02002194 if (is_erratum_383()) {
2195 /*
2196 * Erratum 383 triggered. Guest state is corrupt so kill the
2197 * guest.
2198 */
2199 pr_err("KVM: Guest triggered AMD Erratum 383\n");
2200
Avi Kivitya8eeb042010-05-10 12:34:53 +03002201 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
Joerg Roedel67ec6602010-05-17 14:43:35 +02002202
2203 return;
2204 }
2205
Joerg Roedel53371b52008-04-09 14:15:30 +02002206 /*
2207 * On an #MC intercept the MCE handler is not called automatically in
2208 * the host. So do it by hand here.
2209 */
2210 asm volatile (
2211 "int $0x12\n");
2212 /* not sure if we ever come back to this point */
2213
Joerg Roedelfe5913e2010-05-17 14:43:34 +02002214 return;
2215}
2216
2217static int mc_interception(struct vcpu_svm *svm)
2218{
Joerg Roedel53371b52008-04-09 14:15:30 +02002219 return 1;
2220}
2221
Avi Kivity851ba692009-08-24 11:10:17 +03002222static int shutdown_interception(struct vcpu_svm *svm)
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08002223{
Avi Kivity851ba692009-08-24 11:10:17 +03002224 struct kvm_run *kvm_run = svm->vcpu.run;
2225
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08002226 /*
2227 * VMCB is undefined after a SHUTDOWN intercept
2228 * so reinitialize it.
2229 */
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002230 clear_page(svm->vmcb);
Paolo Bonzini56908912015-10-19 11:30:19 +02002231 init_vmcb(svm);
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08002232
2233 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
2234 return 0;
2235}
2236
Avi Kivity851ba692009-08-24 11:10:17 +03002237static int io_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002238{
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02002239 struct kvm_vcpu *vcpu = &svm->vcpu;
Mike Dayd77c26f2007-10-08 09:02:08 -04002240 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
Jan Kiszka34c33d12009-02-08 13:28:15 +01002241 int size, in, string;
Avi Kivity039576c2007-03-20 12:46:50 +02002242 unsigned port;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002243
Rusty Russelle756fc62007-07-30 20:07:08 +10002244 ++svm->vcpu.stat.io_exits;
Laurent Viviere70669a2007-08-05 10:36:40 +03002245 string = (io_info & SVM_IOIO_STR_MASK) != 0;
Avi Kivity039576c2007-03-20 12:46:50 +02002246 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
Tom Lendacky8370c3d2016-11-23 12:01:50 -05002247 if (string)
Andre Przywara51d8b662010-12-21 11:12:02 +01002248 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02002249
Avi Kivity039576c2007-03-20 12:46:50 +02002250 port = io_info >> 16;
2251 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02002252 svm->next_rip = svm->vmcb->control.exit_info_2;
Guillaume Thouvenine93f36b2008-10-28 10:51:30 +01002253 skip_emulated_instruction(&svm->vcpu);
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02002254
Tom Lendacky8370c3d2016-11-23 12:01:50 -05002255 return in ? kvm_fast_pio_in(vcpu, size, port)
2256 : kvm_fast_pio_out(vcpu, size, port);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002257}
2258
Avi Kivity851ba692009-08-24 11:10:17 +03002259static int nmi_interception(struct vcpu_svm *svm)
Joerg Roedelc47f0982008-04-30 17:56:00 +02002260{
2261 return 1;
2262}
2263
Avi Kivity851ba692009-08-24 11:10:17 +03002264static int intr_interception(struct vcpu_svm *svm)
Joerg Roedela0698052008-04-30 17:56:01 +02002265{
2266 ++svm->vcpu.stat.irq_exits;
2267 return 1;
2268}
2269
Avi Kivity851ba692009-08-24 11:10:17 +03002270static int nop_on_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002271{
2272 return 1;
2273}
2274
Avi Kivity851ba692009-08-24 11:10:17 +03002275static int halt_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002276{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002277 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
Rusty Russelle756fc62007-07-30 20:07:08 +10002278 return kvm_emulate_halt(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002279}
2280
Avi Kivity851ba692009-08-24 11:10:17 +03002281static int vmmcall_interception(struct vcpu_svm *svm)
Avi Kivity02e235b2007-02-19 14:37:47 +02002282{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002283 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Andrey Smetanin0d9c0552016-02-11 16:44:59 +03002284 return kvm_emulate_hypercall(&svm->vcpu);
Avi Kivity02e235b2007-02-19 14:37:47 +02002285}
2286
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02002287static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
2288{
2289 struct vcpu_svm *svm = to_svm(vcpu);
2290
2291 return svm->nested.nested_cr3;
2292}
2293
Avi Kivitye4e517b2011-07-28 11:36:17 +03002294static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
2295{
2296 struct vcpu_svm *svm = to_svm(vcpu);
2297 u64 cr3 = svm->nested.nested_cr3;
2298 u64 pdpte;
2299 int ret;
2300
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02002301 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
2302 offset_in_page(cr3) + index * 8, 8);
Avi Kivitye4e517b2011-07-28 11:36:17 +03002303 if (ret)
2304 return 0;
2305 return pdpte;
2306}
2307
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02002308static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
2309 unsigned long root)
2310{
2311 struct vcpu_svm *svm = to_svm(vcpu);
2312
2313 svm->vmcb->control.nested_cr3 = root;
Joerg Roedelb2747162010-12-03 11:45:53 +01002314 mark_dirty(svm->vmcb, VMCB_NPT);
Joerg Roedelf40f6a42010-12-03 15:25:15 +01002315 svm_flush_tlb(vcpu);
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02002316}
2317
Avi Kivity6389ee92010-11-29 16:12:30 +02002318static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
2319 struct x86_exception *fault)
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02002320{
2321 struct vcpu_svm *svm = to_svm(vcpu);
2322
Paolo Bonzini5e352512014-09-02 13:18:37 +02002323 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
2324 /*
2325 * TODO: track the cause of the nested page fault, and
2326 * correctly fill in the high bits of exit_info_1.
2327 */
2328 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
2329 svm->vmcb->control.exit_code_hi = 0;
2330 svm->vmcb->control.exit_info_1 = (1ULL << 32);
2331 svm->vmcb->control.exit_info_2 = fault->address;
2332 }
2333
2334 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
2335 svm->vmcb->control.exit_info_1 |= fault->error_code;
2336
2337 /*
2338 * The present bit is always zero for page structure faults on real
2339 * hardware.
2340 */
2341 if (svm->vmcb->control.exit_info_1 & (2ULL << 32))
2342 svm->vmcb->control.exit_info_1 &= ~1;
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02002343
2344 nested_svm_vmexit(svm);
2345}
2346
Paolo Bonzini8a3c1a332013-10-02 16:56:13 +02002347static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
Joerg Roedel4b161842010-09-10 17:31:03 +02002348{
Paolo Bonziniad896af2013-10-02 16:56:14 +02002349 WARN_ON(mmu_is_nested(vcpu));
2350 kvm_init_shadow_mmu(vcpu);
Joerg Roedel4b161842010-09-10 17:31:03 +02002351 vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
2352 vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
Avi Kivitye4e517b2011-07-28 11:36:17 +03002353 vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr;
Joerg Roedel4b161842010-09-10 17:31:03 +02002354 vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
2355 vcpu->arch.mmu.shadow_root_level = get_npt_level();
Xiao Guangrongc258b622015-08-05 12:04:24 +08002356 reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu);
Joerg Roedel4b161842010-09-10 17:31:03 +02002357 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
Joerg Roedel4b161842010-09-10 17:31:03 +02002358}
2359
2360static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
2361{
2362 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
2363}
2364
Alexander Grafc0725422008-11-25 20:17:03 +01002365static int nested_svm_check_permissions(struct vcpu_svm *svm)
2366{
Avi Kivityf6801df2010-01-21 15:31:50 +02002367 if (!(svm->vcpu.arch.efer & EFER_SVME)
Alexander Grafc0725422008-11-25 20:17:03 +01002368 || !is_paging(&svm->vcpu)) {
2369 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2370 return 1;
2371 }
2372
2373 if (svm->vmcb->save.cpl) {
2374 kvm_inject_gp(&svm->vcpu, 0);
2375 return 1;
2376 }
2377
2378 return 0;
2379}
2380
Alexander Grafcf74a782008-11-25 20:17:08 +01002381static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
2382 bool has_error_code, u32 error_code)
2383{
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01002384 int vmexit;
2385
Joerg Roedel20307532010-11-29 17:51:48 +01002386 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel0295ad72009-08-07 11:49:37 +02002387 return 0;
Alexander Grafcf74a782008-11-25 20:17:08 +01002388
Joerg Roedel0295ad72009-08-07 11:49:37 +02002389 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
2390 svm->vmcb->control.exit_code_hi = 0;
2391 svm->vmcb->control.exit_info_1 = error_code;
2392 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
2393
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01002394 vmexit = nested_svm_intercept(svm);
2395 if (vmexit == NESTED_EXIT_DONE)
2396 svm->nested.exit_required = true;
2397
2398 return vmexit;
Alexander Grafcf74a782008-11-25 20:17:08 +01002399}
2400
Joerg Roedel8fe54652010-02-19 16:23:01 +01002401/* This function returns true if it is save to enable the irq window */
2402static inline bool nested_svm_intr(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01002403{
Joerg Roedel20307532010-11-29 17:51:48 +01002404 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel8fe54652010-02-19 16:23:01 +01002405 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01002406
Joerg Roedel26666952009-08-07 11:49:46 +02002407 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
Joerg Roedel8fe54652010-02-19 16:23:01 +01002408 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01002409
Joerg Roedel26666952009-08-07 11:49:46 +02002410 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
Joerg Roedel8fe54652010-02-19 16:23:01 +01002411 return false;
Alexander Grafcf74a782008-11-25 20:17:08 +01002412
Gleb Natapova0a07cd2010-09-20 10:15:32 +02002413 /*
2414 * if vmexit was already requested (by intercepted exception
2415 * for instance) do not overwrite it with "external interrupt"
2416 * vmexit.
2417 */
2418 if (svm->nested.exit_required)
2419 return false;
2420
Joerg Roedel197717d2010-02-24 18:59:19 +01002421 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
2422 svm->vmcb->control.exit_info_1 = 0;
2423 svm->vmcb->control.exit_info_2 = 0;
Joerg Roedel26666952009-08-07 11:49:46 +02002424
Joerg Roedelcd3ff652009-10-09 16:08:26 +02002425 if (svm->nested.intercept & 1ULL) {
2426 /*
2427 * The #vmexit can't be emulated here directly because this
Guo Chaoc5ec2e52012-06-28 15:16:43 +08002428 * code path runs with irqs and preemption disabled. A
Joerg Roedelcd3ff652009-10-09 16:08:26 +02002429 * #vmexit emulation might sleep. Only signal request for
2430 * the #vmexit here.
2431 */
2432 svm->nested.exit_required = true;
Joerg Roedel236649d2009-10-09 16:08:30 +02002433 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
Joerg Roedel8fe54652010-02-19 16:23:01 +01002434 return false;
Alexander Grafcf74a782008-11-25 20:17:08 +01002435 }
2436
Joerg Roedel8fe54652010-02-19 16:23:01 +01002437 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01002438}
2439
Joerg Roedel887f5002010-02-24 18:59:12 +01002440/* This function returns true if it is save to enable the nmi window */
2441static inline bool nested_svm_nmi(struct vcpu_svm *svm)
2442{
Joerg Roedel20307532010-11-29 17:51:48 +01002443 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel887f5002010-02-24 18:59:12 +01002444 return true;
2445
2446 if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
2447 return true;
2448
2449 svm->vmcb->control.exit_code = SVM_EXIT_NMI;
2450 svm->nested.exit_required = true;
2451
2452 return false;
2453}
2454
Joerg Roedel7597f122010-02-19 16:23:00 +01002455static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002456{
2457 struct page *page;
2458
Joerg Roedel6c3bd3d2010-02-19 16:23:04 +01002459 might_sleep();
2460
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02002461 page = kvm_vcpu_gfn_to_page(&svm->vcpu, gpa >> PAGE_SHIFT);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002462 if (is_error_page(page))
2463 goto error;
2464
Joerg Roedel7597f122010-02-19 16:23:00 +01002465 *_page = page;
2466
2467 return kmap(page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002468
2469error:
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002470 kvm_inject_gp(&svm->vcpu, 0);
2471
2472 return NULL;
2473}
2474
Joerg Roedel7597f122010-02-19 16:23:00 +01002475static void nested_svm_unmap(struct page *page)
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002476{
Joerg Roedel7597f122010-02-19 16:23:00 +01002477 kunmap(page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002478 kvm_release_page_dirty(page);
2479}
2480
Joerg Roedelce2ac082010-03-01 15:34:39 +01002481static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01002482{
Jan Kiszka9bf41832014-06-30 10:54:17 +02002483 unsigned port, size, iopm_len;
2484 u16 val, mask;
2485 u8 start_bit;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002486 u64 gpa;
2487
2488 if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
2489 return NESTED_EXIT_HOST;
2490
2491 port = svm->vmcb->control.exit_info_1 >> 16;
Jan Kiszka9bf41832014-06-30 10:54:17 +02002492 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
2493 SVM_IOIO_SIZE_SHIFT;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002494 gpa = svm->nested.vmcb_iopm + (port / 8);
Jan Kiszka9bf41832014-06-30 10:54:17 +02002495 start_bit = port % 8;
2496 iopm_len = (start_bit + size > 8) ? 2 : 1;
2497 mask = (0xf >> (4 - size)) << start_bit;
2498 val = 0;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002499
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02002500 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
Jan Kiszka9bf41832014-06-30 10:54:17 +02002501 return NESTED_EXIT_DONE;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002502
Jan Kiszka9bf41832014-06-30 10:54:17 +02002503 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002504}
2505
Joerg Roedeld2477822010-03-01 15:34:34 +01002506static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01002507{
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002508 u32 offset, msr, value;
2509 int write, mask;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002510
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002511 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
Joerg Roedeld2477822010-03-01 15:34:34 +01002512 return NESTED_EXIT_HOST;
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002513
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002514 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
2515 offset = svm_msrpm_offset(msr);
2516 write = svm->vmcb->control.exit_info_1 & 1;
2517 mask = 1 << ((2 * (msr & 0xf)) + write);
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002518
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002519 if (offset == MSR_INVALID)
2520 return NESTED_EXIT_DONE;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002521
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002522 /* Offset is in 32 bit units but need in 8 bit units */
2523 offset *= 4;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002524
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02002525 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4))
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002526 return NESTED_EXIT_DONE;
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002527
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002528 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002529}
2530
Joerg Roedel410e4d52009-08-07 11:49:44 +02002531static int nested_svm_exit_special(struct vcpu_svm *svm)
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002532{
Alexander Grafcf74a782008-11-25 20:17:08 +01002533 u32 exit_code = svm->vmcb->control.exit_code;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002534
Joerg Roedel410e4d52009-08-07 11:49:44 +02002535 switch (exit_code) {
2536 case SVM_EXIT_INTR:
2537 case SVM_EXIT_NMI:
Joerg Roedelff47a492010-04-22 12:33:14 +02002538 case SVM_EXIT_EXCP_BASE + MC_VECTOR:
Joerg Roedel410e4d52009-08-07 11:49:44 +02002539 return NESTED_EXIT_HOST;
Joerg Roedel410e4d52009-08-07 11:49:44 +02002540 case SVM_EXIT_NPF:
Joerg Roedele0231712010-02-24 18:59:10 +01002541 /* For now we are always handling NPFs when using them */
Joerg Roedel410e4d52009-08-07 11:49:44 +02002542 if (npt_enabled)
2543 return NESTED_EXIT_HOST;
2544 break;
Joerg Roedel410e4d52009-08-07 11:49:44 +02002545 case SVM_EXIT_EXCP_BASE + PF_VECTOR:
Gleb Natapov631bc482010-10-14 11:22:52 +02002546 /* When we're shadowing, trap PFs, but not async PF */
2547 if (!npt_enabled && svm->apf_reason == 0)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002548 return NESTED_EXIT_HOST;
2549 break;
2550 default:
2551 break;
Alexander Grafcf74a782008-11-25 20:17:08 +01002552 }
2553
Joerg Roedel410e4d52009-08-07 11:49:44 +02002554 return NESTED_EXIT_CONTINUE;
2555}
2556
2557/*
2558 * If this function returns true, this #vmexit was already handled
2559 */
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01002560static int nested_svm_intercept(struct vcpu_svm *svm)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002561{
2562 u32 exit_code = svm->vmcb->control.exit_code;
2563 int vmexit = NESTED_EXIT_HOST;
2564
Alexander Grafcf74a782008-11-25 20:17:08 +01002565 switch (exit_code) {
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002566 case SVM_EXIT_MSR:
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002567 vmexit = nested_svm_exit_handled_msr(svm);
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002568 break;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002569 case SVM_EXIT_IOIO:
2570 vmexit = nested_svm_intercept_ioio(svm);
2571 break;
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002572 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
2573 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
2574 if (svm->nested.intercept_cr & bit)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002575 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002576 break;
2577 }
Joerg Roedel3aed0412010-11-30 18:03:58 +01002578 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
2579 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
2580 if (svm->nested.intercept_dr & bit)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002581 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002582 break;
2583 }
2584 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
2585 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
Joerg Roedelaad42c62009-08-07 11:49:34 +02002586 if (svm->nested.intercept_exceptions & excp_bits)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002587 vmexit = NESTED_EXIT_DONE;
Gleb Natapov631bc482010-10-14 11:22:52 +02002588 /* async page fault always cause vmexit */
2589 else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
2590 svm->apf_reason != 0)
2591 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002592 break;
2593 }
Joerg Roedel228070b2010-04-22 12:33:10 +02002594 case SVM_EXIT_ERR: {
2595 vmexit = NESTED_EXIT_DONE;
2596 break;
2597 }
Alexander Grafcf74a782008-11-25 20:17:08 +01002598 default: {
2599 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
Joerg Roedelaad42c62009-08-07 11:49:34 +02002600 if (svm->nested.intercept & exit_bits)
Joerg Roedel410e4d52009-08-07 11:49:44 +02002601 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01002602 }
2603 }
2604
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01002605 return vmexit;
2606}
2607
2608static int nested_svm_exit_handled(struct vcpu_svm *svm)
2609{
2610 int vmexit;
2611
2612 vmexit = nested_svm_intercept(svm);
2613
2614 if (vmexit == NESTED_EXIT_DONE)
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002615 nested_svm_vmexit(svm);
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02002616
2617 return vmexit;
Alexander Grafcf74a782008-11-25 20:17:08 +01002618}
2619
Joerg Roedel0460a972009-08-07 11:49:31 +02002620static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
2621{
2622 struct vmcb_control_area *dst = &dst_vmcb->control;
2623 struct vmcb_control_area *from = &from_vmcb->control;
2624
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002625 dst->intercept_cr = from->intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +01002626 dst->intercept_dr = from->intercept_dr;
Joerg Roedel0460a972009-08-07 11:49:31 +02002627 dst->intercept_exceptions = from->intercept_exceptions;
2628 dst->intercept = from->intercept;
2629 dst->iopm_base_pa = from->iopm_base_pa;
2630 dst->msrpm_base_pa = from->msrpm_base_pa;
2631 dst->tsc_offset = from->tsc_offset;
2632 dst->asid = from->asid;
2633 dst->tlb_ctl = from->tlb_ctl;
2634 dst->int_ctl = from->int_ctl;
2635 dst->int_vector = from->int_vector;
2636 dst->int_state = from->int_state;
2637 dst->exit_code = from->exit_code;
2638 dst->exit_code_hi = from->exit_code_hi;
2639 dst->exit_info_1 = from->exit_info_1;
2640 dst->exit_info_2 = from->exit_info_2;
2641 dst->exit_int_info = from->exit_int_info;
2642 dst->exit_int_info_err = from->exit_int_info_err;
2643 dst->nested_ctl = from->nested_ctl;
2644 dst->event_inj = from->event_inj;
2645 dst->event_inj_err = from->event_inj_err;
2646 dst->nested_cr3 = from->nested_cr3;
2647 dst->lbr_ctl = from->lbr_ctl;
2648}
2649
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002650static int nested_svm_vmexit(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01002651{
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002652 struct vmcb *nested_vmcb;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02002653 struct vmcb *hsave = svm->nested.hsave;
Joerg Roedel33740e42009-08-07 11:49:29 +02002654 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01002655 struct page *page;
Alexander Grafcf74a782008-11-25 20:17:08 +01002656
Joerg Roedel17897f32009-10-09 16:08:29 +02002657 trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
2658 vmcb->control.exit_info_1,
2659 vmcb->control.exit_info_2,
2660 vmcb->control.exit_int_info,
Stefan Hajnoczie097e5f2011-07-22 12:46:52 +01002661 vmcb->control.exit_int_info_err,
2662 KVM_ISA_SVM);
Joerg Roedel17897f32009-10-09 16:08:29 +02002663
Joerg Roedel7597f122010-02-19 16:23:00 +01002664 nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002665 if (!nested_vmcb)
2666 return 1;
2667
Joerg Roedel20307532010-11-29 17:51:48 +01002668 /* Exit Guest-Mode */
2669 leave_guest_mode(&svm->vcpu);
Joerg Roedel06fc77722010-02-19 16:23:07 +01002670 svm->nested.vmcb = 0;
2671
Alexander Grafcf74a782008-11-25 20:17:08 +01002672 /* Give the current vmcb to the guest */
Joerg Roedel33740e42009-08-07 11:49:29 +02002673 disable_gif(svm);
2674
2675 nested_vmcb->save.es = vmcb->save.es;
2676 nested_vmcb->save.cs = vmcb->save.cs;
2677 nested_vmcb->save.ss = vmcb->save.ss;
2678 nested_vmcb->save.ds = vmcb->save.ds;
2679 nested_vmcb->save.gdtr = vmcb->save.gdtr;
2680 nested_vmcb->save.idtr = vmcb->save.idtr;
Joerg Roedel3f6a9d12010-07-27 18:14:20 +02002681 nested_vmcb->save.efer = svm->vcpu.arch.efer;
Joerg Roedelcdbbdc12010-02-19 16:23:03 +01002682 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
Avi Kivity9f8fe502010-12-05 17:30:00 +02002683 nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu);
Joerg Roedel33740e42009-08-07 11:49:29 +02002684 nested_vmcb->save.cr2 = vmcb->save.cr2;
Joerg Roedelcdbbdc12010-02-19 16:23:03 +01002685 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
Avi Kivityf6e78472010-08-02 15:30:20 +03002686 nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
Joerg Roedel33740e42009-08-07 11:49:29 +02002687 nested_vmcb->save.rip = vmcb->save.rip;
2688 nested_vmcb->save.rsp = vmcb->save.rsp;
2689 nested_vmcb->save.rax = vmcb->save.rax;
2690 nested_vmcb->save.dr7 = vmcb->save.dr7;
2691 nested_vmcb->save.dr6 = vmcb->save.dr6;
2692 nested_vmcb->save.cpl = vmcb->save.cpl;
2693
2694 nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
2695 nested_vmcb->control.int_vector = vmcb->control.int_vector;
2696 nested_vmcb->control.int_state = vmcb->control.int_state;
2697 nested_vmcb->control.exit_code = vmcb->control.exit_code;
2698 nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
2699 nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
2700 nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
2701 nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
2702 nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
Joerg Roedel6092d3d2015-10-14 15:10:54 +02002703
2704 if (svm->nrips_enabled)
2705 nested_vmcb->control.next_rip = vmcb->control.next_rip;
Alexander Graf8d23c462009-10-09 16:08:25 +02002706
2707 /*
2708 * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
2709 * to make sure that we do not lose injected events. So check event_inj
2710 * here and copy it to exit_int_info if it is valid.
2711 * Exit_int_info and event_inj can't be both valid because the case
2712 * below only happens on a VMRUN instruction intercept which has
2713 * no valid exit_int_info set.
2714 */
2715 if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
2716 struct vmcb_control_area *nc = &nested_vmcb->control;
2717
2718 nc->exit_int_info = vmcb->control.event_inj;
2719 nc->exit_int_info_err = vmcb->control.event_inj_err;
2720 }
2721
Joerg Roedel33740e42009-08-07 11:49:29 +02002722 nested_vmcb->control.tlb_ctl = 0;
2723 nested_vmcb->control.event_inj = 0;
2724 nested_vmcb->control.event_inj_err = 0;
Alexander Grafcf74a782008-11-25 20:17:08 +01002725
2726 /* We always set V_INTR_MASKING and remember the old value in hflags */
2727 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
2728 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
2729
Alexander Grafcf74a782008-11-25 20:17:08 +01002730 /* Restore the original control entries */
Joerg Roedel0460a972009-08-07 11:49:31 +02002731 copy_vmcb_control_area(vmcb, hsave);
Alexander Grafcf74a782008-11-25 20:17:08 +01002732
Alexander Graf219b65d2009-06-15 15:21:25 +02002733 kvm_clear_exception_queue(&svm->vcpu);
2734 kvm_clear_interrupt_queue(&svm->vcpu);
Alexander Grafcf74a782008-11-25 20:17:08 +01002735
Joerg Roedel4b161842010-09-10 17:31:03 +02002736 svm->nested.nested_cr3 = 0;
2737
Alexander Grafcf74a782008-11-25 20:17:08 +01002738 /* Restore selected save entries */
2739 svm->vmcb->save.es = hsave->save.es;
2740 svm->vmcb->save.cs = hsave->save.cs;
2741 svm->vmcb->save.ss = hsave->save.ss;
2742 svm->vmcb->save.ds = hsave->save.ds;
2743 svm->vmcb->save.gdtr = hsave->save.gdtr;
2744 svm->vmcb->save.idtr = hsave->save.idtr;
Avi Kivityf6e78472010-08-02 15:30:20 +03002745 kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
Alexander Grafcf74a782008-11-25 20:17:08 +01002746 svm_set_efer(&svm->vcpu, hsave->save.efer);
2747 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
2748 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
2749 if (npt_enabled) {
2750 svm->vmcb->save.cr3 = hsave->save.cr3;
2751 svm->vcpu.arch.cr3 = hsave->save.cr3;
2752 } else {
Avi Kivity23902182010-06-10 17:02:16 +03002753 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
Alexander Grafcf74a782008-11-25 20:17:08 +01002754 }
2755 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
2756 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
2757 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
2758 svm->vmcb->save.dr7 = 0;
2759 svm->vmcb->save.cpl = 0;
2760 svm->vmcb->control.exit_int_info = 0;
2761
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01002762 mark_all_dirty(svm->vmcb);
2763
Joerg Roedel7597f122010-02-19 16:23:00 +01002764 nested_svm_unmap(page);
Alexander Grafcf74a782008-11-25 20:17:08 +01002765
Joerg Roedel4b161842010-09-10 17:31:03 +02002766 nested_svm_uninit_mmu_context(&svm->vcpu);
Alexander Grafcf74a782008-11-25 20:17:08 +01002767 kvm_mmu_reset_context(&svm->vcpu);
2768 kvm_mmu_load(&svm->vcpu);
2769
2770 return 0;
2771}
Alexander Graf3d6368e2008-11-25 20:17:07 +01002772
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002773static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
Alexander Graf3d6368e2008-11-25 20:17:07 +01002774{
Joerg Roedel323c3d82010-03-01 15:34:37 +01002775 /*
2776 * This function merges the msr permission bitmaps of kvm and the
Guo Chaoc5ec2e52012-06-28 15:16:43 +08002777 * nested vmcb. It is optimized in that it only merges the parts where
Joerg Roedel323c3d82010-03-01 15:34:37 +01002778 * the kvm msr permission bitmap may contain zero bits
2779 */
Alexander Graf3d6368e2008-11-25 20:17:07 +01002780 int i;
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002781
Joerg Roedel323c3d82010-03-01 15:34:37 +01002782 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
2783 return true;
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002784
Joerg Roedel323c3d82010-03-01 15:34:37 +01002785 for (i = 0; i < MSRPM_OFFSETS; i++) {
2786 u32 value, p;
2787 u64 offset;
2788
2789 if (msrpm_offsets[i] == 0xffffffff)
2790 break;
2791
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002792 p = msrpm_offsets[i];
2793 offset = svm->nested.vmcb_msrpm + (p * 4);
Joerg Roedel323c3d82010-03-01 15:34:37 +01002794
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02002795 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
Joerg Roedel323c3d82010-03-01 15:34:37 +01002796 return false;
2797
2798 svm->nested.msrpm[p] = svm->msrpm[p] | value;
2799 }
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002800
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02002801 svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002802
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002803 return true;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002804}
2805
Joerg Roedel52c65a302010-08-02 16:46:44 +02002806static bool nested_vmcb_checks(struct vmcb *vmcb)
2807{
2808 if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
2809 return false;
2810
Joerg Roedeldbe77582010-08-02 16:46:45 +02002811 if (vmcb->control.asid == 0)
2812 return false;
2813
Joerg Roedel4b161842010-09-10 17:31:03 +02002814 if (vmcb->control.nested_ctl && !npt_enabled)
2815 return false;
2816
Joerg Roedel52c65a302010-08-02 16:46:44 +02002817 return true;
2818}
2819
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002820static bool nested_svm_vmrun(struct vcpu_svm *svm)
Alexander Graf3d6368e2008-11-25 20:17:07 +01002821{
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002822 struct vmcb *nested_vmcb;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02002823 struct vmcb *hsave = svm->nested.hsave;
Joerg Roedeldefbba52009-08-07 11:49:30 +02002824 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01002825 struct page *page;
Joerg Roedel06fc77722010-02-19 16:23:07 +01002826 u64 vmcb_gpa;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002827
Joerg Roedel06fc77722010-02-19 16:23:07 +01002828 vmcb_gpa = svm->vmcb->save.rax;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002829
Joerg Roedel7597f122010-02-19 16:23:00 +01002830 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002831 if (!nested_vmcb)
2832 return false;
2833
Joerg Roedel52c65a302010-08-02 16:46:44 +02002834 if (!nested_vmcb_checks(nested_vmcb)) {
2835 nested_vmcb->control.exit_code = SVM_EXIT_ERR;
2836 nested_vmcb->control.exit_code_hi = 0;
2837 nested_vmcb->control.exit_info_1 = 0;
2838 nested_vmcb->control.exit_info_2 = 0;
2839
2840 nested_svm_unmap(page);
2841
2842 return false;
2843 }
2844
Roedel, Joergb75f4eb2010-09-03 14:21:40 +02002845 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
Joerg Roedel0ac406d2009-10-09 16:08:27 +02002846 nested_vmcb->save.rip,
2847 nested_vmcb->control.int_ctl,
2848 nested_vmcb->control.event_inj,
2849 nested_vmcb->control.nested_ctl);
2850
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002851 trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
2852 nested_vmcb->control.intercept_cr >> 16,
Joerg Roedel2e554e82010-02-24 18:59:14 +01002853 nested_vmcb->control.intercept_exceptions,
2854 nested_vmcb->control.intercept);
2855
Alexander Graf3d6368e2008-11-25 20:17:07 +01002856 /* Clear internal status */
Alexander Graf219b65d2009-06-15 15:21:25 +02002857 kvm_clear_exception_queue(&svm->vcpu);
2858 kvm_clear_interrupt_queue(&svm->vcpu);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002859
Joerg Roedele0231712010-02-24 18:59:10 +01002860 /*
2861 * Save the old vmcb, so we don't need to pick what we save, but can
2862 * restore everything when a VMEXIT occurs
2863 */
Joerg Roedeldefbba52009-08-07 11:49:30 +02002864 hsave->save.es = vmcb->save.es;
2865 hsave->save.cs = vmcb->save.cs;
2866 hsave->save.ss = vmcb->save.ss;
2867 hsave->save.ds = vmcb->save.ds;
2868 hsave->save.gdtr = vmcb->save.gdtr;
2869 hsave->save.idtr = vmcb->save.idtr;
Avi Kivityf6801df2010-01-21 15:31:50 +02002870 hsave->save.efer = svm->vcpu.arch.efer;
Avi Kivity4d4ec082009-12-29 18:07:30 +02002871 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
Joerg Roedeldefbba52009-08-07 11:49:30 +02002872 hsave->save.cr4 = svm->vcpu.arch.cr4;
Avi Kivityf6e78472010-08-02 15:30:20 +03002873 hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
Roedel, Joergb75f4eb2010-09-03 14:21:40 +02002874 hsave->save.rip = kvm_rip_read(&svm->vcpu);
Joerg Roedeldefbba52009-08-07 11:49:30 +02002875 hsave->save.rsp = vmcb->save.rsp;
2876 hsave->save.rax = vmcb->save.rax;
2877 if (npt_enabled)
2878 hsave->save.cr3 = vmcb->save.cr3;
2879 else
Avi Kivity9f8fe502010-12-05 17:30:00 +02002880 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
Joerg Roedeldefbba52009-08-07 11:49:30 +02002881
Joerg Roedel0460a972009-08-07 11:49:31 +02002882 copy_vmcb_control_area(hsave, vmcb);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002883
Avi Kivityf6e78472010-08-02 15:30:20 +03002884 if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
Alexander Graf3d6368e2008-11-25 20:17:07 +01002885 svm->vcpu.arch.hflags |= HF_HIF_MASK;
2886 else
2887 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
2888
Joerg Roedel4b161842010-09-10 17:31:03 +02002889 if (nested_vmcb->control.nested_ctl) {
2890 kvm_mmu_unload(&svm->vcpu);
2891 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
2892 nested_svm_init_mmu_context(&svm->vcpu);
2893 }
2894
Alexander Graf3d6368e2008-11-25 20:17:07 +01002895 /* Load the nested guest state */
2896 svm->vmcb->save.es = nested_vmcb->save.es;
2897 svm->vmcb->save.cs = nested_vmcb->save.cs;
2898 svm->vmcb->save.ss = nested_vmcb->save.ss;
2899 svm->vmcb->save.ds = nested_vmcb->save.ds;
2900 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
2901 svm->vmcb->save.idtr = nested_vmcb->save.idtr;
Avi Kivityf6e78472010-08-02 15:30:20 +03002902 kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002903 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
2904 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
2905 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
2906 if (npt_enabled) {
2907 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
2908 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
Joerg Roedel0e5cbe32010-02-24 18:59:11 +01002909 } else
Avi Kivity23902182010-06-10 17:02:16 +03002910 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
Joerg Roedel0e5cbe32010-02-24 18:59:11 +01002911
2912 /* Guest paging mode is active - reset mmu */
2913 kvm_mmu_reset_context(&svm->vcpu);
2914
Joerg Roedeldefbba52009-08-07 11:49:30 +02002915 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002916 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
2917 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
2918 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
Joerg Roedele0231712010-02-24 18:59:10 +01002919
Alexander Graf3d6368e2008-11-25 20:17:07 +01002920 /* In case we don't even reach vcpu_run, the fields are not updated */
2921 svm->vmcb->save.rax = nested_vmcb->save.rax;
2922 svm->vmcb->save.rsp = nested_vmcb->save.rsp;
2923 svm->vmcb->save.rip = nested_vmcb->save.rip;
2924 svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
2925 svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
2926 svm->vmcb->save.cpl = nested_vmcb->save.cpl;
2927
Joerg Roedelf7138532010-03-01 15:34:40 +01002928 svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002929 svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002930
Joerg Roedelaad42c62009-08-07 11:49:34 +02002931 /* cache intercepts */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002932 svm->nested.intercept_cr = nested_vmcb->control.intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +01002933 svm->nested.intercept_dr = nested_vmcb->control.intercept_dr;
Joerg Roedelaad42c62009-08-07 11:49:34 +02002934 svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
2935 svm->nested.intercept = nested_vmcb->control.intercept;
2936
Joerg Roedelf40f6a42010-12-03 15:25:15 +01002937 svm_flush_tlb(&svm->vcpu);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002938 svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002939 if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
2940 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
2941 else
2942 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
2943
Joerg Roedel88ab24a2010-02-19 16:23:06 +01002944 if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
2945 /* We only want the cr8 intercept bits of the guest */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002946 clr_cr_intercept(svm, INTERCEPT_CR8_READ);
2947 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Joerg Roedel88ab24a2010-02-19 16:23:06 +01002948 }
2949
Joerg Roedel0d945bd2010-05-05 16:04:45 +02002950 /* We don't want to see VMMCALLs from a nested guest */
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01002951 clr_intercept(svm, INTERCEPT_VMMCALL);
Joerg Roedel0d945bd2010-05-05 16:04:45 +02002952
Joerg Roedel88ab24a2010-02-19 16:23:06 +01002953 svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002954 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
2955 svm->vmcb->control.int_state = nested_vmcb->control.int_state;
2956 svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002957 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
2958 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
2959
Joerg Roedel7597f122010-02-19 16:23:00 +01002960 nested_svm_unmap(page);
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002961
Joerg Roedel20307532010-11-29 17:51:48 +01002962 /* Enter Guest-Mode */
2963 enter_guest_mode(&svm->vcpu);
2964
Joerg Roedel384c6362010-11-30 18:03:56 +01002965 /*
2966 * Merge guest and host intercepts - must be called with vcpu in
2967 * guest-mode to take affect here
2968 */
2969 recalc_intercepts(svm);
2970
Joerg Roedel06fc77722010-02-19 16:23:07 +01002971 svm->nested.vmcb = vmcb_gpa;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002972
Joerg Roedel2af91942009-08-07 11:49:28 +02002973 enable_gif(svm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002974
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01002975 mark_all_dirty(svm->vmcb);
2976
Joerg Roedel9738b2c2009-08-07 11:49:41 +02002977 return true;
Alexander Graf3d6368e2008-11-25 20:17:07 +01002978}
2979
Joerg Roedel9966bf62009-08-07 11:49:40 +02002980static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
Alexander Graf55426752008-11-25 20:17:06 +01002981{
2982 to_vmcb->save.fs = from_vmcb->save.fs;
2983 to_vmcb->save.gs = from_vmcb->save.gs;
2984 to_vmcb->save.tr = from_vmcb->save.tr;
2985 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
2986 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
2987 to_vmcb->save.star = from_vmcb->save.star;
2988 to_vmcb->save.lstar = from_vmcb->save.lstar;
2989 to_vmcb->save.cstar = from_vmcb->save.cstar;
2990 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
2991 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
2992 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
2993 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
Alexander Graf55426752008-11-25 20:17:06 +01002994}
2995
Avi Kivity851ba692009-08-24 11:10:17 +03002996static int vmload_interception(struct vcpu_svm *svm)
Alexander Graf55426752008-11-25 20:17:06 +01002997{
Joerg Roedel9966bf62009-08-07 11:49:40 +02002998 struct vmcb *nested_vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01002999 struct page *page;
Joerg Roedel9966bf62009-08-07 11:49:40 +02003000
Alexander Graf55426752008-11-25 20:17:06 +01003001 if (nested_svm_check_permissions(svm))
3002 return 1;
3003
Joerg Roedel7597f122010-02-19 16:23:00 +01003004 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
Joerg Roedel9966bf62009-08-07 11:49:40 +02003005 if (!nested_vmcb)
3006 return 1;
3007
Joerg Roedele3e9ed32011-04-06 12:30:03 +02003008 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
3009 skip_emulated_instruction(&svm->vcpu);
3010
Joerg Roedel9966bf62009-08-07 11:49:40 +02003011 nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
Joerg Roedel7597f122010-02-19 16:23:00 +01003012 nested_svm_unmap(page);
Alexander Graf55426752008-11-25 20:17:06 +01003013
3014 return 1;
3015}
3016
Avi Kivity851ba692009-08-24 11:10:17 +03003017static int vmsave_interception(struct vcpu_svm *svm)
Alexander Graf55426752008-11-25 20:17:06 +01003018{
Joerg Roedel9966bf62009-08-07 11:49:40 +02003019 struct vmcb *nested_vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01003020 struct page *page;
Joerg Roedel9966bf62009-08-07 11:49:40 +02003021
Alexander Graf55426752008-11-25 20:17:06 +01003022 if (nested_svm_check_permissions(svm))
3023 return 1;
3024
Joerg Roedel7597f122010-02-19 16:23:00 +01003025 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
Joerg Roedel9966bf62009-08-07 11:49:40 +02003026 if (!nested_vmcb)
3027 return 1;
3028
Joerg Roedele3e9ed32011-04-06 12:30:03 +02003029 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
3030 skip_emulated_instruction(&svm->vcpu);
3031
Joerg Roedel9966bf62009-08-07 11:49:40 +02003032 nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
Joerg Roedel7597f122010-02-19 16:23:00 +01003033 nested_svm_unmap(page);
Alexander Graf55426752008-11-25 20:17:06 +01003034
3035 return 1;
3036}
3037
Avi Kivity851ba692009-08-24 11:10:17 +03003038static int vmrun_interception(struct vcpu_svm *svm)
Alexander Graf3d6368e2008-11-25 20:17:07 +01003039{
Alexander Graf3d6368e2008-11-25 20:17:07 +01003040 if (nested_svm_check_permissions(svm))
3041 return 1;
3042
Roedel, Joergb75f4eb2010-09-03 14:21:40 +02003043 /* Save rip after vmrun instruction */
3044 kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3);
Alexander Graf3d6368e2008-11-25 20:17:07 +01003045
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003046 if (!nested_svm_vmrun(svm))
Alexander Graf3d6368e2008-11-25 20:17:07 +01003047 return 1;
3048
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003049 if (!nested_svm_vmrun_msrpm(svm))
Joerg Roedel1f8da472009-08-07 11:49:43 +02003050 goto failed;
3051
3052 return 1;
3053
3054failed:
3055
3056 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
3057 svm->vmcb->control.exit_code_hi = 0;
3058 svm->vmcb->control.exit_info_1 = 0;
3059 svm->vmcb->control.exit_info_2 = 0;
3060
3061 nested_svm_vmexit(svm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01003062
3063 return 1;
3064}
3065
Avi Kivity851ba692009-08-24 11:10:17 +03003066static int stgi_interception(struct vcpu_svm *svm)
Alexander Graf1371d902008-11-25 20:17:04 +01003067{
3068 if (nested_svm_check_permissions(svm))
3069 return 1;
3070
3071 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
3072 skip_emulated_instruction(&svm->vcpu);
Avi Kivity3842d132010-07-27 12:30:24 +03003073 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Alexander Graf1371d902008-11-25 20:17:04 +01003074
Joerg Roedel2af91942009-08-07 11:49:28 +02003075 enable_gif(svm);
Alexander Graf1371d902008-11-25 20:17:04 +01003076
3077 return 1;
3078}
3079
Avi Kivity851ba692009-08-24 11:10:17 +03003080static int clgi_interception(struct vcpu_svm *svm)
Alexander Graf1371d902008-11-25 20:17:04 +01003081{
3082 if (nested_svm_check_permissions(svm))
3083 return 1;
3084
3085 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
3086 skip_emulated_instruction(&svm->vcpu);
3087
Joerg Roedel2af91942009-08-07 11:49:28 +02003088 disable_gif(svm);
Alexander Graf1371d902008-11-25 20:17:04 +01003089
3090 /* After a CLGI no interrupts should come */
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05003091 if (!kvm_vcpu_apicv_active(&svm->vcpu)) {
3092 svm_clear_vintr(svm);
3093 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
3094 mark_dirty(svm->vmcb, VMCB_INTR);
3095 }
Joerg Roedeldecdbf62010-12-03 11:45:52 +01003096
Alexander Graf1371d902008-11-25 20:17:04 +01003097 return 1;
3098}
3099
Avi Kivity851ba692009-08-24 11:10:17 +03003100static int invlpga_interception(struct vcpu_svm *svm)
Alexander Grafff092382009-06-15 15:21:24 +02003101{
3102 struct kvm_vcpu *vcpu = &svm->vcpu;
Alexander Grafff092382009-06-15 15:21:24 +02003103
David Kaplan668f1982015-02-20 16:02:10 -06003104 trace_kvm_invlpga(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RCX),
3105 kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
Joerg Roedelec1ff792009-10-09 16:08:31 +02003106
Alexander Grafff092382009-06-15 15:21:24 +02003107 /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
David Kaplan668f1982015-02-20 16:02:10 -06003108 kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
Alexander Grafff092382009-06-15 15:21:24 +02003109
3110 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
3111 skip_emulated_instruction(&svm->vcpu);
3112 return 1;
3113}
3114
Joerg Roedel532a46b2009-10-09 16:08:32 +02003115static int skinit_interception(struct vcpu_svm *svm)
3116{
David Kaplan668f1982015-02-20 16:02:10 -06003117 trace_kvm_skinit(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
Joerg Roedel532a46b2009-10-09 16:08:32 +02003118
3119 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
3120 return 1;
3121}
3122
David Kaplandab429a2015-03-02 13:43:37 -06003123static int wbinvd_interception(struct vcpu_svm *svm)
3124{
Kyle Huey6affcbe2016-11-29 12:40:40 -08003125 return kvm_emulate_wbinvd(&svm->vcpu);
David Kaplandab429a2015-03-02 13:43:37 -06003126}
3127
Joerg Roedel81dd35d2010-12-07 17:15:06 +01003128static int xsetbv_interception(struct vcpu_svm *svm)
3129{
3130 u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
3131 u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
3132
3133 if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
3134 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
3135 skip_emulated_instruction(&svm->vcpu);
3136 }
3137
3138 return 1;
3139}
3140
Avi Kivity851ba692009-08-24 11:10:17 +03003141static int task_switch_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003142{
Izik Eidus37817f22008-03-24 23:14:53 +02003143 u16 tss_selector;
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003144 int reason;
3145 int int_type = svm->vmcb->control.exit_int_info &
3146 SVM_EXITINTINFO_TYPE_MASK;
Gleb Natapov8317c292009-04-12 13:37:02 +03003147 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03003148 uint32_t type =
3149 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
3150 uint32_t idt_v =
3151 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
Jan Kiszkae269fb22010-04-14 15:51:09 +02003152 bool has_error_code = false;
3153 u32 error_code = 0;
Izik Eidus37817f22008-03-24 23:14:53 +02003154
3155 tss_selector = (u16)svm->vmcb->control.exit_info_1;
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003156
Izik Eidus37817f22008-03-24 23:14:53 +02003157 if (svm->vmcb->control.exit_info_2 &
3158 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003159 reason = TASK_SWITCH_IRET;
3160 else if (svm->vmcb->control.exit_info_2 &
3161 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
3162 reason = TASK_SWITCH_JMP;
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03003163 else if (idt_v)
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003164 reason = TASK_SWITCH_GATE;
3165 else
3166 reason = TASK_SWITCH_CALL;
3167
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03003168 if (reason == TASK_SWITCH_GATE) {
3169 switch (type) {
3170 case SVM_EXITINTINFO_TYPE_NMI:
3171 svm->vcpu.arch.nmi_injected = false;
3172 break;
3173 case SVM_EXITINTINFO_TYPE_EXEPT:
Jan Kiszkae269fb22010-04-14 15:51:09 +02003174 if (svm->vmcb->control.exit_info_2 &
3175 (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
3176 has_error_code = true;
3177 error_code =
3178 (u32)svm->vmcb->control.exit_info_2;
3179 }
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03003180 kvm_clear_exception_queue(&svm->vcpu);
3181 break;
3182 case SVM_EXITINTINFO_TYPE_INTR:
3183 kvm_clear_interrupt_queue(&svm->vcpu);
3184 break;
3185 default:
3186 break;
3187 }
3188 }
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003189
Gleb Natapov8317c292009-04-12 13:37:02 +03003190 if (reason != TASK_SWITCH_GATE ||
3191 int_type == SVM_EXITINTINFO_TYPE_SOFT ||
3192 (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
Gleb Natapovf629cf82009-05-11 13:35:49 +03003193 (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
3194 skip_emulated_instruction(&svm->vcpu);
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003195
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01003196 if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
3197 int_vec = -1;
3198
3199 if (kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason,
Gleb Natapovacb54512010-04-15 21:03:50 +03003200 has_error_code, error_code) == EMULATE_FAIL) {
3201 svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3202 svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
3203 svm->vcpu.run->internal.ndata = 0;
3204 return 0;
3205 }
3206 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003207}
3208
Avi Kivity851ba692009-08-24 11:10:17 +03003209static int cpuid_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003210{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003211 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Kyle Huey6a908b62016-11-29 12:40:37 -08003212 return kvm_emulate_cpuid(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003213}
3214
Avi Kivity851ba692009-08-24 11:10:17 +03003215static int iret_interception(struct vcpu_svm *svm)
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003216{
3217 ++svm->vcpu.stat.nmi_window_exits;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01003218 clr_intercept(svm, INTERCEPT_IRET);
Gleb Natapov44c11432009-05-11 13:35:52 +03003219 svm->vcpu.arch.hflags |= HF_IRET_MASK;
Avi Kivitybd3d1ec2011-02-03 15:29:52 +02003220 svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
Radim Krčmářf303b4c2014-01-17 20:52:42 +01003221 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003222 return 1;
3223}
3224
Avi Kivity851ba692009-08-24 11:10:17 +03003225static int invlpg_interception(struct vcpu_svm *svm)
Marcelo Tosattia7052892008-09-23 13:18:35 -03003226{
Andre Przywaradf4f31082010-12-21 11:12:06 +01003227 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
3228 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
3229
3230 kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
3231 skip_emulated_instruction(&svm->vcpu);
3232 return 1;
Marcelo Tosattia7052892008-09-23 13:18:35 -03003233}
3234
Avi Kivity851ba692009-08-24 11:10:17 +03003235static int emulate_on_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003236{
Andre Przywara51d8b662010-12-21 11:12:02 +01003237 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003238}
3239
Avi Kivity332b56e2011-11-10 14:57:24 +02003240static int rdpmc_interception(struct vcpu_svm *svm)
3241{
3242 int err;
3243
3244 if (!static_cpu_has(X86_FEATURE_NRIPS))
3245 return emulate_on_interception(svm);
3246
3247 err = kvm_rdpmc(&svm->vcpu);
Kyle Huey6affcbe2016-11-29 12:40:40 -08003248 return kvm_complete_insn_gp(&svm->vcpu, err);
Avi Kivity332b56e2011-11-10 14:57:24 +02003249}
3250
Xiubo Li52eb5a62015-03-13 17:39:45 +08003251static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
3252 unsigned long val)
Joerg Roedel628afd22011-04-04 12:39:36 +02003253{
3254 unsigned long cr0 = svm->vcpu.arch.cr0;
3255 bool ret = false;
3256 u64 intercept;
3257
3258 intercept = svm->nested.intercept;
3259
3260 if (!is_guest_mode(&svm->vcpu) ||
3261 (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
3262 return false;
3263
3264 cr0 &= ~SVM_CR0_SELECTIVE_MASK;
3265 val &= ~SVM_CR0_SELECTIVE_MASK;
3266
3267 if (cr0 ^ val) {
3268 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
3269 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
3270 }
3271
3272 return ret;
3273}
3274
Andre Przywara7ff76d52010-12-21 11:12:04 +01003275#define CR_VALID (1ULL << 63)
3276
3277static int cr_interception(struct vcpu_svm *svm)
3278{
3279 int reg, cr;
3280 unsigned long val;
3281 int err;
3282
3283 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
3284 return emulate_on_interception(svm);
3285
3286 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
3287 return emulate_on_interception(svm);
3288
3289 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
David Kaplan5e575182015-03-06 14:44:35 -06003290 if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
3291 cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
3292 else
3293 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
Andre Przywara7ff76d52010-12-21 11:12:04 +01003294
3295 err = 0;
3296 if (cr >= 16) { /* mov to cr */
3297 cr -= 16;
3298 val = kvm_register_read(&svm->vcpu, reg);
3299 switch (cr) {
3300 case 0:
Joerg Roedel628afd22011-04-04 12:39:36 +02003301 if (!check_selective_cr0_intercepted(svm, val))
3302 err = kvm_set_cr0(&svm->vcpu, val);
Joerg Roedel977b2d02011-04-18 11:42:52 +02003303 else
3304 return 1;
3305
Andre Przywara7ff76d52010-12-21 11:12:04 +01003306 break;
3307 case 3:
3308 err = kvm_set_cr3(&svm->vcpu, val);
3309 break;
3310 case 4:
3311 err = kvm_set_cr4(&svm->vcpu, val);
3312 break;
3313 case 8:
3314 err = kvm_set_cr8(&svm->vcpu, val);
3315 break;
3316 default:
3317 WARN(1, "unhandled write to CR%d", cr);
3318 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
3319 return 1;
3320 }
3321 } else { /* mov from cr */
3322 switch (cr) {
3323 case 0:
3324 val = kvm_read_cr0(&svm->vcpu);
3325 break;
3326 case 2:
3327 val = svm->vcpu.arch.cr2;
3328 break;
3329 case 3:
Avi Kivity9f8fe502010-12-05 17:30:00 +02003330 val = kvm_read_cr3(&svm->vcpu);
Andre Przywara7ff76d52010-12-21 11:12:04 +01003331 break;
3332 case 4:
3333 val = kvm_read_cr4(&svm->vcpu);
3334 break;
3335 case 8:
3336 val = kvm_get_cr8(&svm->vcpu);
3337 break;
3338 default:
3339 WARN(1, "unhandled read from CR%d", cr);
3340 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
3341 return 1;
3342 }
3343 kvm_register_write(&svm->vcpu, reg, val);
3344 }
Kyle Huey6affcbe2016-11-29 12:40:40 -08003345 return kvm_complete_insn_gp(&svm->vcpu, err);
Andre Przywara7ff76d52010-12-21 11:12:04 +01003346}
3347
Andre Przywaracae37972010-12-21 11:12:05 +01003348static int dr_interception(struct vcpu_svm *svm)
3349{
3350 int reg, dr;
3351 unsigned long val;
Andre Przywaracae37972010-12-21 11:12:05 +01003352
Paolo Bonzinifacb0132014-02-21 10:32:27 +01003353 if (svm->vcpu.guest_debug == 0) {
3354 /*
3355 * No more DR vmexits; force a reload of the debug registers
3356 * and reenter on this instruction. The next vmexit will
3357 * retrieve the full state of the debug registers.
3358 */
3359 clr_dr_intercepts(svm);
3360 svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
3361 return 1;
3362 }
3363
Andre Przywaracae37972010-12-21 11:12:05 +01003364 if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
3365 return emulate_on_interception(svm);
3366
3367 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
3368 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
3369
3370 if (dr >= 16) { /* mov to DRn */
Nadav Amit16f8a6f2014-10-03 01:10:05 +03003371 if (!kvm_require_dr(&svm->vcpu, dr - 16))
3372 return 1;
Andre Przywaracae37972010-12-21 11:12:05 +01003373 val = kvm_register_read(&svm->vcpu, reg);
3374 kvm_set_dr(&svm->vcpu, dr - 16, val);
3375 } else {
Nadav Amit16f8a6f2014-10-03 01:10:05 +03003376 if (!kvm_require_dr(&svm->vcpu, dr))
3377 return 1;
3378 kvm_get_dr(&svm->vcpu, dr, &val);
3379 kvm_register_write(&svm->vcpu, reg, val);
Andre Przywaracae37972010-12-21 11:12:05 +01003380 }
3381
Joerg Roedel2c46d2a2011-02-09 18:29:39 +01003382 skip_emulated_instruction(&svm->vcpu);
3383
Andre Przywaracae37972010-12-21 11:12:05 +01003384 return 1;
3385}
3386
Avi Kivity851ba692009-08-24 11:10:17 +03003387static int cr8_write_interception(struct vcpu_svm *svm)
Joerg Roedel1d075432007-12-06 21:02:25 +01003388{
Avi Kivity851ba692009-08-24 11:10:17 +03003389 struct kvm_run *kvm_run = svm->vcpu.run;
Andre Przywaraeea1cff2010-12-21 11:12:00 +01003390 int r;
Avi Kivity851ba692009-08-24 11:10:17 +03003391
Gleb Natapov0a5fff192009-04-21 17:45:06 +03003392 u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
3393 /* instruction emulation calls kvm_set_cr8() */
Andre Przywara7ff76d52010-12-21 11:12:04 +01003394 r = cr_interception(svm);
Paolo Bonzini35754c92015-07-29 12:05:37 +02003395 if (lapic_in_kernel(&svm->vcpu))
Andre Przywara7ff76d52010-12-21 11:12:04 +01003396 return r;
Gleb Natapov0a5fff192009-04-21 17:45:06 +03003397 if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
Andre Przywara7ff76d52010-12-21 11:12:04 +01003398 return r;
Joerg Roedel1d075432007-12-06 21:02:25 +01003399 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
3400 return 0;
3401}
3402
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003403static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003404{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003405 struct vcpu_svm *svm = to_svm(vcpu);
3406
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003407 switch (msr_info->index) {
Jaswinder Singh Rajputaf24a4e2009-05-15 18:42:05 +05303408 case MSR_IA32_TSC: {
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003409 msr_info->data = svm->vmcb->control.tsc_offset +
Haozhong Zhang35181e82015-10-20 15:39:03 +08003410 kvm_scale_tsc(vcpu, rdtsc());
Joerg Roedelfbc0db72011-03-25 09:44:46 +01003411
Avi Kivity6aa8b732006-12-10 02:21:36 -08003412 break;
3413 }
Brian Gerst8c065852010-07-17 09:03:26 -04003414 case MSR_STAR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003415 msr_info->data = svm->vmcb->save.star;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003416 break;
Avi Kivity0e859ca2006-12-22 01:05:08 -08003417#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08003418 case MSR_LSTAR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003419 msr_info->data = svm->vmcb->save.lstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003420 break;
3421 case MSR_CSTAR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003422 msr_info->data = svm->vmcb->save.cstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003423 break;
3424 case MSR_KERNEL_GS_BASE:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003425 msr_info->data = svm->vmcb->save.kernel_gs_base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003426 break;
3427 case MSR_SYSCALL_MASK:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003428 msr_info->data = svm->vmcb->save.sfmask;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003429 break;
3430#endif
3431 case MSR_IA32_SYSENTER_CS:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003432 msr_info->data = svm->vmcb->save.sysenter_cs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003433 break;
3434 case MSR_IA32_SYSENTER_EIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003435 msr_info->data = svm->sysenter_eip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003436 break;
3437 case MSR_IA32_SYSENTER_ESP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003438 msr_info->data = svm->sysenter_esp;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003439 break;
Paolo Bonzini46896c72015-11-12 14:49:16 +01003440 case MSR_TSC_AUX:
3441 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
3442 return 1;
3443 msr_info->data = svm->tsc_aux;
3444 break;
Joerg Roedele0231712010-02-24 18:59:10 +01003445 /*
3446 * Nobody will change the following 5 values in the VMCB so we can
3447 * safely return them on rdmsr. They will always be 0 until LBRV is
3448 * implemented.
3449 */
Joerg Roedela2938c82008-02-13 16:30:28 +01003450 case MSR_IA32_DEBUGCTLMSR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003451 msr_info->data = svm->vmcb->save.dbgctl;
Joerg Roedela2938c82008-02-13 16:30:28 +01003452 break;
3453 case MSR_IA32_LASTBRANCHFROMIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003454 msr_info->data = svm->vmcb->save.br_from;
Joerg Roedela2938c82008-02-13 16:30:28 +01003455 break;
3456 case MSR_IA32_LASTBRANCHTOIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003457 msr_info->data = svm->vmcb->save.br_to;
Joerg Roedela2938c82008-02-13 16:30:28 +01003458 break;
3459 case MSR_IA32_LASTINTFROMIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003460 msr_info->data = svm->vmcb->save.last_excp_from;
Joerg Roedela2938c82008-02-13 16:30:28 +01003461 break;
3462 case MSR_IA32_LASTINTTOIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003463 msr_info->data = svm->vmcb->save.last_excp_to;
Joerg Roedela2938c82008-02-13 16:30:28 +01003464 break;
Alexander Grafb286d5d2008-11-25 20:17:05 +01003465 case MSR_VM_HSAVE_PA:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003466 msr_info->data = svm->nested.hsave_msr;
Alexander Grafb286d5d2008-11-25 20:17:05 +01003467 break;
Joerg Roedeleb6f3022008-11-25 20:17:09 +01003468 case MSR_VM_CR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003469 msr_info->data = svm->nested.vm_cr_msr;
Joerg Roedeleb6f3022008-11-25 20:17:09 +01003470 break;
Alexander Grafc8a73f12009-01-05 16:02:47 +01003471 case MSR_IA32_UCODE_REV:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003472 msr_info->data = 0x01000065;
Alexander Grafc8a73f12009-01-05 16:02:47 +01003473 break;
Borislav Petkovae8b7872015-11-23 11:12:23 +01003474 case MSR_F15H_IC_CFG: {
3475
3476 int family, model;
3477
3478 family = guest_cpuid_family(vcpu);
3479 model = guest_cpuid_model(vcpu);
3480
3481 if (family < 0 || model < 0)
3482 return kvm_get_msr_common(vcpu, msr_info);
3483
3484 msr_info->data = 0;
3485
3486 if (family == 0x15 &&
3487 (model >= 0x2 && model < 0x20))
3488 msr_info->data = 0x1E;
3489 }
3490 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003491 default:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003492 return kvm_get_msr_common(vcpu, msr_info);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003493 }
3494 return 0;
3495}
3496
Avi Kivity851ba692009-08-24 11:10:17 +03003497static int rdmsr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003498{
David Kaplan668f1982015-02-20 16:02:10 -06003499 u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003500 struct msr_data msr_info;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003501
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003502 msr_info.index = ecx;
3503 msr_info.host_initiated = false;
3504 if (svm_get_msr(&svm->vcpu, &msr_info)) {
Avi Kivity59200272010-01-25 19:47:02 +02003505 trace_kvm_msr_read_ex(ecx);
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02003506 kvm_inject_gp(&svm->vcpu, 0);
Avi Kivity59200272010-01-25 19:47:02 +02003507 } else {
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003508 trace_kvm_msr_read(ecx, msr_info.data);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02003509
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003510 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX,
3511 msr_info.data & 0xffffffff);
3512 kvm_register_write(&svm->vcpu, VCPU_REGS_RDX,
3513 msr_info.data >> 32);
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003514 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Rusty Russelle756fc62007-07-30 20:07:08 +10003515 skip_emulated_instruction(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003516 }
3517 return 1;
3518}
3519
Joerg Roedel4a810182010-02-24 18:59:15 +01003520static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
3521{
3522 struct vcpu_svm *svm = to_svm(vcpu);
3523 int svm_dis, chg_mask;
3524
3525 if (data & ~SVM_VM_CR_VALID_MASK)
3526 return 1;
3527
3528 chg_mask = SVM_VM_CR_VALID_MASK;
3529
3530 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
3531 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
3532
3533 svm->nested.vm_cr_msr &= ~chg_mask;
3534 svm->nested.vm_cr_msr |= (data & chg_mask);
3535
3536 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
3537
3538 /* check for svm_disable while efer.svme is set */
3539 if (svm_dis && (vcpu->arch.efer & EFER_SVME))
3540 return 1;
3541
3542 return 0;
3543}
3544
Will Auld8fe8ab42012-11-29 12:42:12 -08003545static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003546{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003547 struct vcpu_svm *svm = to_svm(vcpu);
3548
Will Auld8fe8ab42012-11-29 12:42:12 -08003549 u32 ecx = msr->index;
3550 u64 data = msr->data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003551 switch (ecx) {
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10003552 case MSR_IA32_TSC:
Will Auld8fe8ab42012-11-29 12:42:12 -08003553 kvm_write_tsc(vcpu, msr);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003554 break;
Brian Gerst8c065852010-07-17 09:03:26 -04003555 case MSR_STAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003556 svm->vmcb->save.star = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003557 break;
Robert P. J. Day49b14f22007-01-29 13:19:50 -08003558#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08003559 case MSR_LSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003560 svm->vmcb->save.lstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003561 break;
3562 case MSR_CSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003563 svm->vmcb->save.cstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003564 break;
3565 case MSR_KERNEL_GS_BASE:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003566 svm->vmcb->save.kernel_gs_base = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003567 break;
3568 case MSR_SYSCALL_MASK:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003569 svm->vmcb->save.sfmask = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003570 break;
3571#endif
3572 case MSR_IA32_SYSENTER_CS:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003573 svm->vmcb->save.sysenter_cs = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003574 break;
3575 case MSR_IA32_SYSENTER_EIP:
Andre Przywara017cb992009-05-28 11:56:31 +02003576 svm->sysenter_eip = data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003577 svm->vmcb->save.sysenter_eip = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003578 break;
3579 case MSR_IA32_SYSENTER_ESP:
Andre Przywara017cb992009-05-28 11:56:31 +02003580 svm->sysenter_esp = data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003581 svm->vmcb->save.sysenter_esp = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003582 break;
Paolo Bonzini46896c72015-11-12 14:49:16 +01003583 case MSR_TSC_AUX:
3584 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
3585 return 1;
3586
3587 /*
3588 * This is rare, so we update the MSR here instead of using
3589 * direct_access_msrs. Doing that would require a rdmsr in
3590 * svm_vcpu_put.
3591 */
3592 svm->tsc_aux = data;
3593 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
3594 break;
Joerg Roedela2938c82008-02-13 16:30:28 +01003595 case MSR_IA32_DEBUGCTLMSR:
Avi Kivity2a6b20b2010-11-09 16:15:42 +02003596 if (!boot_cpu_has(X86_FEATURE_LBRV)) {
Christoffer Dalla737f252012-06-03 21:17:48 +03003597 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
3598 __func__, data);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01003599 break;
3600 }
3601 if (data & DEBUGCTL_RESERVED_BITS)
3602 return 1;
3603
3604 svm->vmcb->save.dbgctl = data;
Joerg Roedelb53ba3f2010-12-03 11:45:59 +01003605 mark_dirty(svm->vmcb, VMCB_LBR);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01003606 if (data & (1ULL<<0))
3607 svm_enable_lbrv(svm);
3608 else
3609 svm_disable_lbrv(svm);
Joerg Roedela2938c82008-02-13 16:30:28 +01003610 break;
Alexander Grafb286d5d2008-11-25 20:17:05 +01003611 case MSR_VM_HSAVE_PA:
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02003612 svm->nested.hsave_msr = data;
Alexander Grafb286d5d2008-11-25 20:17:05 +01003613 break;
Alexander Graf3c5d0a42009-06-15 15:21:23 +02003614 case MSR_VM_CR:
Joerg Roedel4a810182010-02-24 18:59:15 +01003615 return svm_set_vm_cr(vcpu, data);
Alexander Graf3c5d0a42009-06-15 15:21:23 +02003616 case MSR_VM_IGNNE:
Christoffer Dalla737f252012-06-03 21:17:48 +03003617 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
Alexander Graf3c5d0a42009-06-15 15:21:23 +02003618 break;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05003619 case MSR_IA32_APICBASE:
3620 if (kvm_vcpu_apicv_active(vcpu))
3621 avic_update_vapic_bar(to_svm(vcpu), data);
3622 /* Follow through */
Avi Kivity6aa8b732006-12-10 02:21:36 -08003623 default:
Will Auld8fe8ab42012-11-29 12:42:12 -08003624 return kvm_set_msr_common(vcpu, msr);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003625 }
3626 return 0;
3627}
3628
Avi Kivity851ba692009-08-24 11:10:17 +03003629static int wrmsr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003630{
Will Auld8fe8ab42012-11-29 12:42:12 -08003631 struct msr_data msr;
David Kaplan668f1982015-02-20 16:02:10 -06003632 u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
3633 u64 data = kvm_read_edx_eax(&svm->vcpu);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02003634
Will Auld8fe8ab42012-11-29 12:42:12 -08003635 msr.data = data;
3636 msr.index = ecx;
3637 msr.host_initiated = false;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02003638
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003639 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Nadav Amit854e8bb2014-09-16 03:24:05 +03003640 if (kvm_set_msr(&svm->vcpu, &msr)) {
Avi Kivity59200272010-01-25 19:47:02 +02003641 trace_kvm_msr_write_ex(ecx, data);
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02003642 kvm_inject_gp(&svm->vcpu, 0);
Avi Kivity59200272010-01-25 19:47:02 +02003643 } else {
3644 trace_kvm_msr_write(ecx, data);
Rusty Russelle756fc62007-07-30 20:07:08 +10003645 skip_emulated_instruction(&svm->vcpu);
Avi Kivity59200272010-01-25 19:47:02 +02003646 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08003647 return 1;
3648}
3649
Avi Kivity851ba692009-08-24 11:10:17 +03003650static int msr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003651{
Rusty Russelle756fc62007-07-30 20:07:08 +10003652 if (svm->vmcb->control.exit_info_1)
Avi Kivity851ba692009-08-24 11:10:17 +03003653 return wrmsr_interception(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003654 else
Avi Kivity851ba692009-08-24 11:10:17 +03003655 return rdmsr_interception(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003656}
3657
Avi Kivity851ba692009-08-24 11:10:17 +03003658static int interrupt_window_interception(struct vcpu_svm *svm)
Dor Laorc1150d82007-01-05 16:36:24 -08003659{
Avi Kivity3842d132010-07-27 12:30:24 +03003660 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Alexander Graff0b85052008-11-25 20:17:01 +01003661 svm_clear_vintr(svm);
Eddie Dong85f455f2007-07-06 12:20:49 +03003662 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
Joerg Roedeldecdbf62010-12-03 11:45:52 +01003663 mark_dirty(svm->vmcb, VMCB_INTR);
Jason Wang675acb72012-03-08 18:07:56 +08003664 ++svm->vcpu.stat.irq_window_exits;
Dor Laorc1150d82007-01-05 16:36:24 -08003665 return 1;
3666}
3667
Mark Langsdorf565d0992009-10-06 14:25:02 -05003668static int pause_interception(struct vcpu_svm *svm)
3669{
3670 kvm_vcpu_on_spin(&(svm->vcpu));
3671 return 1;
3672}
3673
Gabriel L. Somlo87c00572014-05-07 16:52:13 -04003674static int nop_interception(struct vcpu_svm *svm)
3675{
3676 skip_emulated_instruction(&(svm->vcpu));
3677 return 1;
3678}
3679
3680static int monitor_interception(struct vcpu_svm *svm)
3681{
3682 printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
3683 return nop_interception(svm);
3684}
3685
3686static int mwait_interception(struct vcpu_svm *svm)
3687{
3688 printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
3689 return nop_interception(svm);
3690}
3691
Suravee Suthikulpanit18f40c52016-05-04 14:09:48 -05003692enum avic_ipi_failure_cause {
3693 AVIC_IPI_FAILURE_INVALID_INT_TYPE,
3694 AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
3695 AVIC_IPI_FAILURE_INVALID_TARGET,
3696 AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
3697};
3698
3699static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
3700{
3701 u32 icrh = svm->vmcb->control.exit_info_1 >> 32;
3702 u32 icrl = svm->vmcb->control.exit_info_1;
3703 u32 id = svm->vmcb->control.exit_info_2 >> 32;
Dan Carpenter5446a972016-05-23 13:20:10 +03003704 u32 index = svm->vmcb->control.exit_info_2 & 0xFF;
Suravee Suthikulpanit18f40c52016-05-04 14:09:48 -05003705 struct kvm_lapic *apic = svm->vcpu.arch.apic;
3706
3707 trace_kvm_avic_incomplete_ipi(svm->vcpu.vcpu_id, icrh, icrl, id, index);
3708
3709 switch (id) {
3710 case AVIC_IPI_FAILURE_INVALID_INT_TYPE:
3711 /*
3712 * AVIC hardware handles the generation of
3713 * IPIs when the specified Message Type is Fixed
3714 * (also known as fixed delivery mode) and
3715 * the Trigger Mode is edge-triggered. The hardware
3716 * also supports self and broadcast delivery modes
3717 * specified via the Destination Shorthand(DSH)
3718 * field of the ICRL. Logical and physical APIC ID
3719 * formats are supported. All other IPI types cause
3720 * a #VMEXIT, which needs to emulated.
3721 */
3722 kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
3723 kvm_lapic_reg_write(apic, APIC_ICR, icrl);
3724 break;
3725 case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
3726 int i;
3727 struct kvm_vcpu *vcpu;
3728 struct kvm *kvm = svm->vcpu.kvm;
3729 struct kvm_lapic *apic = svm->vcpu.arch.apic;
3730
3731 /*
3732 * At this point, we expect that the AVIC HW has already
3733 * set the appropriate IRR bits on the valid target
3734 * vcpus. So, we just need to kick the appropriate vcpu.
3735 */
3736 kvm_for_each_vcpu(i, vcpu, kvm) {
3737 bool m = kvm_apic_match_dest(vcpu, apic,
3738 icrl & KVM_APIC_SHORT_MASK,
3739 GET_APIC_DEST_FIELD(icrh),
3740 icrl & KVM_APIC_DEST_MASK);
3741
3742 if (m && !avic_vcpu_is_running(vcpu))
3743 kvm_vcpu_wake_up(vcpu);
3744 }
3745 break;
3746 }
3747 case AVIC_IPI_FAILURE_INVALID_TARGET:
3748 break;
3749 case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
3750 WARN_ONCE(1, "Invalid backing page\n");
3751 break;
3752 default:
3753 pr_err("Unknown IPI interception\n");
3754 }
3755
3756 return 1;
3757}
3758
3759static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
3760{
3761 struct kvm_arch *vm_data = &vcpu->kvm->arch;
3762 int index;
3763 u32 *logical_apic_id_table;
3764 int dlid = GET_APIC_LOGICAL_ID(ldr);
3765
3766 if (!dlid)
3767 return NULL;
3768
3769 if (flat) { /* flat */
3770 index = ffs(dlid) - 1;
3771 if (index > 7)
3772 return NULL;
3773 } else { /* cluster */
3774 int cluster = (dlid & 0xf0) >> 4;
3775 int apic = ffs(dlid & 0x0f) - 1;
3776
3777 if ((apic < 0) || (apic > 7) ||
3778 (cluster >= 0xf))
3779 return NULL;
3780 index = (cluster << 2) + apic;
3781 }
3782
3783 logical_apic_id_table = (u32 *) page_address(vm_data->avic_logical_id_table_page);
3784
3785 return &logical_apic_id_table[index];
3786}
3787
3788static int avic_ldr_write(struct kvm_vcpu *vcpu, u8 g_physical_id, u32 ldr,
3789 bool valid)
3790{
3791 bool flat;
3792 u32 *entry, new_entry;
3793
3794 flat = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR) == APIC_DFR_FLAT;
3795 entry = avic_get_logical_id_entry(vcpu, ldr, flat);
3796 if (!entry)
3797 return -EINVAL;
3798
3799 new_entry = READ_ONCE(*entry);
3800 new_entry &= ~AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
3801 new_entry |= (g_physical_id & AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK);
3802 if (valid)
3803 new_entry |= AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
3804 else
3805 new_entry &= ~AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
3806 WRITE_ONCE(*entry, new_entry);
3807
3808 return 0;
3809}
3810
3811static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
3812{
3813 int ret;
3814 struct vcpu_svm *svm = to_svm(vcpu);
3815 u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR);
3816
3817 if (!ldr)
3818 return 1;
3819
3820 ret = avic_ldr_write(vcpu, vcpu->vcpu_id, ldr, true);
3821 if (ret && svm->ldr_reg) {
3822 avic_ldr_write(vcpu, 0, svm->ldr_reg, false);
3823 svm->ldr_reg = 0;
3824 } else {
3825 svm->ldr_reg = ldr;
3826 }
3827 return ret;
3828}
3829
3830static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu)
3831{
3832 u64 *old, *new;
3833 struct vcpu_svm *svm = to_svm(vcpu);
3834 u32 apic_id_reg = kvm_lapic_get_reg(vcpu->arch.apic, APIC_ID);
3835 u32 id = (apic_id_reg >> 24) & 0xff;
3836
3837 if (vcpu->vcpu_id == id)
3838 return 0;
3839
3840 old = avic_get_physical_id_entry(vcpu, vcpu->vcpu_id);
3841 new = avic_get_physical_id_entry(vcpu, id);
3842 if (!new || !old)
3843 return 1;
3844
3845 /* We need to move physical_id_entry to new offset */
3846 *new = *old;
3847 *old = 0ULL;
3848 to_svm(vcpu)->avic_physical_id_cache = new;
3849
3850 /*
3851 * Also update the guest physical APIC ID in the logical
3852 * APIC ID table entry if already setup the LDR.
3853 */
3854 if (svm->ldr_reg)
3855 avic_handle_ldr_update(vcpu);
3856
3857 return 0;
3858}
3859
3860static int avic_handle_dfr_update(struct kvm_vcpu *vcpu)
3861{
3862 struct vcpu_svm *svm = to_svm(vcpu);
3863 struct kvm_arch *vm_data = &vcpu->kvm->arch;
3864 u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR);
3865 u32 mod = (dfr >> 28) & 0xf;
3866
3867 /*
3868 * We assume that all local APICs are using the same type.
3869 * If this changes, we need to flush the AVIC logical
3870 * APID id table.
3871 */
3872 if (vm_data->ldr_mode == mod)
3873 return 0;
3874
3875 clear_page(page_address(vm_data->avic_logical_id_table_page));
3876 vm_data->ldr_mode = mod;
3877
3878 if (svm->ldr_reg)
3879 avic_handle_ldr_update(vcpu);
3880 return 0;
3881}
3882
3883static int avic_unaccel_trap_write(struct vcpu_svm *svm)
3884{
3885 struct kvm_lapic *apic = svm->vcpu.arch.apic;
3886 u32 offset = svm->vmcb->control.exit_info_1 &
3887 AVIC_UNACCEL_ACCESS_OFFSET_MASK;
3888
3889 switch (offset) {
3890 case APIC_ID:
3891 if (avic_handle_apic_id_update(&svm->vcpu))
3892 return 0;
3893 break;
3894 case APIC_LDR:
3895 if (avic_handle_ldr_update(&svm->vcpu))
3896 return 0;
3897 break;
3898 case APIC_DFR:
3899 avic_handle_dfr_update(&svm->vcpu);
3900 break;
3901 default:
3902 break;
3903 }
3904
3905 kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
3906
3907 return 1;
3908}
3909
3910static bool is_avic_unaccelerated_access_trap(u32 offset)
3911{
3912 bool ret = false;
3913
3914 switch (offset) {
3915 case APIC_ID:
3916 case APIC_EOI:
3917 case APIC_RRR:
3918 case APIC_LDR:
3919 case APIC_DFR:
3920 case APIC_SPIV:
3921 case APIC_ESR:
3922 case APIC_ICR:
3923 case APIC_LVTT:
3924 case APIC_LVTTHMR:
3925 case APIC_LVTPC:
3926 case APIC_LVT0:
3927 case APIC_LVT1:
3928 case APIC_LVTERR:
3929 case APIC_TMICT:
3930 case APIC_TDCR:
3931 ret = true;
3932 break;
3933 default:
3934 break;
3935 }
3936 return ret;
3937}
3938
3939static int avic_unaccelerated_access_interception(struct vcpu_svm *svm)
3940{
3941 int ret = 0;
3942 u32 offset = svm->vmcb->control.exit_info_1 &
3943 AVIC_UNACCEL_ACCESS_OFFSET_MASK;
3944 u32 vector = svm->vmcb->control.exit_info_2 &
3945 AVIC_UNACCEL_ACCESS_VECTOR_MASK;
3946 bool write = (svm->vmcb->control.exit_info_1 >> 32) &
3947 AVIC_UNACCEL_ACCESS_WRITE_MASK;
3948 bool trap = is_avic_unaccelerated_access_trap(offset);
3949
3950 trace_kvm_avic_unaccelerated_access(svm->vcpu.vcpu_id, offset,
3951 trap, write, vector);
3952 if (trap) {
3953 /* Handling Trap */
3954 WARN_ONCE(!write, "svm: Handling trap read.\n");
3955 ret = avic_unaccel_trap_write(svm);
3956 } else {
3957 /* Handling Fault */
3958 ret = (emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE);
3959 }
3960
3961 return ret;
3962}
3963
Mathias Krause09941fb2012-08-30 01:30:20 +02003964static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
Andre Przywara7ff76d52010-12-21 11:12:04 +01003965 [SVM_EXIT_READ_CR0] = cr_interception,
3966 [SVM_EXIT_READ_CR3] = cr_interception,
3967 [SVM_EXIT_READ_CR4] = cr_interception,
3968 [SVM_EXIT_READ_CR8] = cr_interception,
David Kaplan5e575182015-03-06 14:44:35 -06003969 [SVM_EXIT_CR0_SEL_WRITE] = cr_interception,
Joerg Roedel628afd22011-04-04 12:39:36 +02003970 [SVM_EXIT_WRITE_CR0] = cr_interception,
Andre Przywara7ff76d52010-12-21 11:12:04 +01003971 [SVM_EXIT_WRITE_CR3] = cr_interception,
3972 [SVM_EXIT_WRITE_CR4] = cr_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01003973 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
Andre Przywaracae37972010-12-21 11:12:05 +01003974 [SVM_EXIT_READ_DR0] = dr_interception,
3975 [SVM_EXIT_READ_DR1] = dr_interception,
3976 [SVM_EXIT_READ_DR2] = dr_interception,
3977 [SVM_EXIT_READ_DR3] = dr_interception,
3978 [SVM_EXIT_READ_DR4] = dr_interception,
3979 [SVM_EXIT_READ_DR5] = dr_interception,
3980 [SVM_EXIT_READ_DR6] = dr_interception,
3981 [SVM_EXIT_READ_DR7] = dr_interception,
3982 [SVM_EXIT_WRITE_DR0] = dr_interception,
3983 [SVM_EXIT_WRITE_DR1] = dr_interception,
3984 [SVM_EXIT_WRITE_DR2] = dr_interception,
3985 [SVM_EXIT_WRITE_DR3] = dr_interception,
3986 [SVM_EXIT_WRITE_DR4] = dr_interception,
3987 [SVM_EXIT_WRITE_DR5] = dr_interception,
3988 [SVM_EXIT_WRITE_DR6] = dr_interception,
3989 [SVM_EXIT_WRITE_DR7] = dr_interception,
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003990 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
3991 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05003992 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01003993 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01003994 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
Eric Northup54a20552015-11-03 18:03:53 +01003995 [SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01003996 [SVM_EXIT_INTR] = intr_interception,
Joerg Roedelc47f0982008-04-30 17:56:00 +02003997 [SVM_EXIT_NMI] = nmi_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003998 [SVM_EXIT_SMI] = nop_on_interception,
3999 [SVM_EXIT_INIT] = nop_on_interception,
Dor Laorc1150d82007-01-05 16:36:24 -08004000 [SVM_EXIT_VINTR] = interrupt_window_interception,
Avi Kivity332b56e2011-11-10 14:57:24 +02004001 [SVM_EXIT_RDPMC] = rdpmc_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004002 [SVM_EXIT_CPUID] = cpuid_interception,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004003 [SVM_EXIT_IRET] = iret_interception,
Avi Kivitycf5a94d2007-10-28 16:11:58 +02004004 [SVM_EXIT_INVD] = emulate_on_interception,
Mark Langsdorf565d0992009-10-06 14:25:02 -05004005 [SVM_EXIT_PAUSE] = pause_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004006 [SVM_EXIT_HLT] = halt_interception,
Marcelo Tosattia7052892008-09-23 13:18:35 -03004007 [SVM_EXIT_INVLPG] = invlpg_interception,
Alexander Grafff092382009-06-15 15:21:24 +02004008 [SVM_EXIT_INVLPGA] = invlpga_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01004009 [SVM_EXIT_IOIO] = io_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004010 [SVM_EXIT_MSR] = msr_interception,
4011 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08004012 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
Alexander Graf3d6368e2008-11-25 20:17:07 +01004013 [SVM_EXIT_VMRUN] = vmrun_interception,
Avi Kivity02e235b2007-02-19 14:37:47 +02004014 [SVM_EXIT_VMMCALL] = vmmcall_interception,
Alexander Graf55426752008-11-25 20:17:06 +01004015 [SVM_EXIT_VMLOAD] = vmload_interception,
4016 [SVM_EXIT_VMSAVE] = vmsave_interception,
Alexander Graf1371d902008-11-25 20:17:04 +01004017 [SVM_EXIT_STGI] = stgi_interception,
4018 [SVM_EXIT_CLGI] = clgi_interception,
Joerg Roedel532a46b2009-10-09 16:08:32 +02004019 [SVM_EXIT_SKINIT] = skinit_interception,
David Kaplandab429a2015-03-02 13:43:37 -06004020 [SVM_EXIT_WBINVD] = wbinvd_interception,
Gabriel L. Somlo87c00572014-05-07 16:52:13 -04004021 [SVM_EXIT_MONITOR] = monitor_interception,
4022 [SVM_EXIT_MWAIT] = mwait_interception,
Joerg Roedel81dd35d2010-12-07 17:15:06 +01004023 [SVM_EXIT_XSETBV] = xsetbv_interception,
Joerg Roedel709ddeb2008-02-07 13:47:45 +01004024 [SVM_EXIT_NPF] = pf_interception,
Paolo Bonzini64d60672015-05-07 11:36:11 +02004025 [SVM_EXIT_RSM] = emulate_on_interception,
Suravee Suthikulpanit18f40c52016-05-04 14:09:48 -05004026 [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception,
4027 [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004028};
4029
Joe Perchesae8cc052011-04-24 22:00:50 -07004030static void dump_vmcb(struct kvm_vcpu *vcpu)
Joerg Roedel3f10c842010-05-05 16:04:42 +02004031{
4032 struct vcpu_svm *svm = to_svm(vcpu);
4033 struct vmcb_control_area *control = &svm->vmcb->control;
4034 struct vmcb_save_area *save = &svm->vmcb->save;
4035
4036 pr_err("VMCB Control Area:\n");
Joe Perchesae8cc052011-04-24 22:00:50 -07004037 pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff);
4038 pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16);
4039 pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff);
4040 pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
4041 pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
4042 pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
4043 pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
4044 pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
4045 pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
4046 pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
4047 pr_err("%-20s%d\n", "asid:", control->asid);
4048 pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
4049 pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
4050 pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
4051 pr_err("%-20s%08x\n", "int_state:", control->int_state);
4052 pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
4053 pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
4054 pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
4055 pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
4056 pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
4057 pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
4058 pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004059 pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
Joe Perchesae8cc052011-04-24 22:00:50 -07004060 pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
4061 pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
4062 pr_err("%-20s%lld\n", "lbr_ctl:", control->lbr_ctl);
4063 pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004064 pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page);
4065 pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
4066 pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
Joerg Roedel3f10c842010-05-05 16:04:42 +02004067 pr_err("VMCB State Save Area:\n");
Joe Perchesae8cc052011-04-24 22:00:50 -07004068 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4069 "es:",
4070 save->es.selector, save->es.attrib,
4071 save->es.limit, save->es.base);
4072 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4073 "cs:",
4074 save->cs.selector, save->cs.attrib,
4075 save->cs.limit, save->cs.base);
4076 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4077 "ss:",
4078 save->ss.selector, save->ss.attrib,
4079 save->ss.limit, save->ss.base);
4080 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4081 "ds:",
4082 save->ds.selector, save->ds.attrib,
4083 save->ds.limit, save->ds.base);
4084 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4085 "fs:",
4086 save->fs.selector, save->fs.attrib,
4087 save->fs.limit, save->fs.base);
4088 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4089 "gs:",
4090 save->gs.selector, save->gs.attrib,
4091 save->gs.limit, save->gs.base);
4092 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4093 "gdtr:",
4094 save->gdtr.selector, save->gdtr.attrib,
4095 save->gdtr.limit, save->gdtr.base);
4096 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4097 "ldtr:",
4098 save->ldtr.selector, save->ldtr.attrib,
4099 save->ldtr.limit, save->ldtr.base);
4100 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4101 "idtr:",
4102 save->idtr.selector, save->idtr.attrib,
4103 save->idtr.limit, save->idtr.base);
4104 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4105 "tr:",
4106 save->tr.selector, save->tr.attrib,
4107 save->tr.limit, save->tr.base);
Joerg Roedel3f10c842010-05-05 16:04:42 +02004108 pr_err("cpl: %d efer: %016llx\n",
4109 save->cpl, save->efer);
Joe Perchesae8cc052011-04-24 22:00:50 -07004110 pr_err("%-15s %016llx %-13s %016llx\n",
4111 "cr0:", save->cr0, "cr2:", save->cr2);
4112 pr_err("%-15s %016llx %-13s %016llx\n",
4113 "cr3:", save->cr3, "cr4:", save->cr4);
4114 pr_err("%-15s %016llx %-13s %016llx\n",
4115 "dr6:", save->dr6, "dr7:", save->dr7);
4116 pr_err("%-15s %016llx %-13s %016llx\n",
4117 "rip:", save->rip, "rflags:", save->rflags);
4118 pr_err("%-15s %016llx %-13s %016llx\n",
4119 "rsp:", save->rsp, "rax:", save->rax);
4120 pr_err("%-15s %016llx %-13s %016llx\n",
4121 "star:", save->star, "lstar:", save->lstar);
4122 pr_err("%-15s %016llx %-13s %016llx\n",
4123 "cstar:", save->cstar, "sfmask:", save->sfmask);
4124 pr_err("%-15s %016llx %-13s %016llx\n",
4125 "kernel_gs_base:", save->kernel_gs_base,
4126 "sysenter_cs:", save->sysenter_cs);
4127 pr_err("%-15s %016llx %-13s %016llx\n",
4128 "sysenter_esp:", save->sysenter_esp,
4129 "sysenter_eip:", save->sysenter_eip);
4130 pr_err("%-15s %016llx %-13s %016llx\n",
4131 "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
4132 pr_err("%-15s %016llx %-13s %016llx\n",
4133 "br_from:", save->br_from, "br_to:", save->br_to);
4134 pr_err("%-15s %016llx %-13s %016llx\n",
4135 "excp_from:", save->last_excp_from,
4136 "excp_to:", save->last_excp_to);
Joerg Roedel3f10c842010-05-05 16:04:42 +02004137}
4138
Avi Kivity586f9602010-11-18 13:09:54 +02004139static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
4140{
4141 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
4142
4143 *info1 = control->exit_info_1;
4144 *info2 = control->exit_info_2;
4145}
4146
Avi Kivity851ba692009-08-24 11:10:17 +03004147static int handle_exit(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004148{
Avi Kivity04d2cc72007-09-10 18:10:54 +03004149 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity851ba692009-08-24 11:10:17 +03004150 struct kvm_run *kvm_run = vcpu->run;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04004151 u32 exit_code = svm->vmcb->control.exit_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004152
Paolo Bonzini8b89fe12015-12-10 18:37:32 +01004153 trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
4154
Tom Lendacky0f89b202016-12-14 14:59:23 -05004155 vcpu->arch.gpa_available = (exit_code == SVM_EXIT_NPF);
4156
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01004157 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
Joerg Roedel2be4fc72010-04-22 12:33:09 +02004158 vcpu->arch.cr0 = svm->vmcb->save.cr0;
4159 if (npt_enabled)
4160 vcpu->arch.cr3 = svm->vmcb->save.cr3;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02004161
Joerg Roedelcd3ff652009-10-09 16:08:26 +02004162 if (unlikely(svm->nested.exit_required)) {
4163 nested_svm_vmexit(svm);
4164 svm->nested.exit_required = false;
4165
4166 return 1;
4167 }
4168
Joerg Roedel20307532010-11-29 17:51:48 +01004169 if (is_guest_mode(vcpu)) {
Joerg Roedel410e4d52009-08-07 11:49:44 +02004170 int vmexit;
4171
Joerg Roedeld8cabdd2009-10-09 16:08:28 +02004172 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
4173 svm->vmcb->control.exit_info_1,
4174 svm->vmcb->control.exit_info_2,
4175 svm->vmcb->control.exit_int_info,
Stefan Hajnoczie097e5f2011-07-22 12:46:52 +01004176 svm->vmcb->control.exit_int_info_err,
4177 KVM_ISA_SVM);
Joerg Roedeld8cabdd2009-10-09 16:08:28 +02004178
Joerg Roedel410e4d52009-08-07 11:49:44 +02004179 vmexit = nested_svm_exit_special(svm);
4180
4181 if (vmexit == NESTED_EXIT_CONTINUE)
4182 vmexit = nested_svm_exit_handled(svm);
4183
4184 if (vmexit == NESTED_EXIT_DONE)
Alexander Grafcf74a782008-11-25 20:17:08 +01004185 return 1;
Alexander Grafcf74a782008-11-25 20:17:08 +01004186 }
4187
Joerg Roedela5c38322009-08-07 11:49:32 +02004188 svm_complete_interrupts(svm);
4189
Avi Kivity04d2cc72007-09-10 18:10:54 +03004190 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
4191 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
4192 kvm_run->fail_entry.hardware_entry_failure_reason
4193 = svm->vmcb->control.exit_code;
Joerg Roedel3f10c842010-05-05 16:04:42 +02004194 pr_err("KVM: FAILED VMRUN WITH VMCB:\n");
4195 dump_vmcb(vcpu);
Avi Kivity04d2cc72007-09-10 18:10:54 +03004196 return 0;
4197 }
4198
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04004199 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
Joerg Roedel709ddeb2008-02-07 13:47:45 +01004200 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
Joerg Roedel55c5e462010-09-10 17:31:04 +02004201 exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
4202 exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
Borislav Petkov6614c7d2013-04-26 00:22:01 +02004203 printk(KERN_ERR "%s: unexpected exit_int_info 0x%x "
Avi Kivity6aa8b732006-12-10 02:21:36 -08004204 "exit_code 0x%x\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08004205 __func__, svm->vmcb->control.exit_int_info,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004206 exit_code);
4207
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +02004208 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
Joe Perches56919c52007-11-12 20:06:51 -08004209 || !svm_exit_handlers[exit_code]) {
Bandan Dasfaac2452015-03-16 17:18:25 -04004210 WARN_ONCE(1, "svm: unexpected exit reason 0x%x\n", exit_code);
Michael S. Tsirkin2bc19dc2014-09-18 16:21:16 +03004211 kvm_queue_exception(vcpu, UD_VECTOR);
4212 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004213 }
4214
Avi Kivity851ba692009-08-24 11:10:17 +03004215 return svm_exit_handlers[exit_code](svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004216}
4217
4218static void reload_tss(struct kvm_vcpu *vcpu)
4219{
4220 int cpu = raw_smp_processor_id();
4221
Tejun Heo0fe1e002009-10-29 22:34:14 +09004222 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
4223 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
Avi Kivity6aa8b732006-12-10 02:21:36 -08004224 load_TR_desc();
4225}
4226
Rusty Russelle756fc62007-07-30 20:07:08 +10004227static void pre_svm_run(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004228{
4229 int cpu = raw_smp_processor_id();
4230
Tejun Heo0fe1e002009-10-29 22:34:14 +09004231 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004232
Marcelo Tosatti4b656b12009-07-21 12:47:45 -03004233 /* FIXME: handle wraparound of asid_generation */
Tejun Heo0fe1e002009-10-29 22:34:14 +09004234 if (svm->asid_generation != sd->asid_generation)
4235 new_asid(svm, sd);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004236}
4237
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004238static void svm_inject_nmi(struct kvm_vcpu *vcpu)
4239{
4240 struct vcpu_svm *svm = to_svm(vcpu);
4241
4242 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
4243 vcpu->arch.hflags |= HF_NMI_MASK;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01004244 set_intercept(svm, INTERCEPT_IRET);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004245 ++vcpu->stat.nmi_injections;
4246}
Avi Kivity6aa8b732006-12-10 02:21:36 -08004247
Eddie Dong85f455f2007-07-06 12:20:49 +03004248static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004249{
4250 struct vmcb_control_area *control;
4251
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05004252 /* The following fields are ignored when AVIC is enabled */
Rusty Russelle756fc62007-07-30 20:07:08 +10004253 control = &svm->vmcb->control;
Eddie Dong85f455f2007-07-06 12:20:49 +03004254 control->int_vector = irq;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004255 control->int_ctl &= ~V_INTR_PRIO_MASK;
4256 control->int_ctl |= V_IRQ_MASK |
4257 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
Joerg Roedeldecdbf62010-12-03 11:45:52 +01004258 mark_dirty(svm->vmcb, VMCB_INTR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004259}
4260
Gleb Natapov66fd3f72009-05-11 13:35:50 +03004261static void svm_set_irq(struct kvm_vcpu *vcpu)
Eddie Dong2a8067f2007-08-06 16:29:07 +03004262{
4263 struct vcpu_svm *svm = to_svm(vcpu);
4264
Joerg Roedel2af91942009-08-07 11:49:28 +02004265 BUG_ON(!(gif_set(svm)));
Alexander Grafcf74a782008-11-25 20:17:08 +01004266
Gleb Natapov9fb2d2b2010-05-23 14:28:26 +03004267 trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
4268 ++vcpu->stat.irq_injections;
4269
Alexander Graf219b65d2009-06-15 15:21:25 +02004270 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
4271 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
Eddie Dong2a8067f2007-08-06 16:29:07 +03004272}
4273
Suravee Suthikulpanit3bbf3562016-05-04 14:09:51 -05004274static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu)
4275{
4276 return is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK);
4277}
4278
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004279static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
4280{
4281 struct vcpu_svm *svm = to_svm(vcpu);
4282
Suravee Suthikulpanit3bbf3562016-05-04 14:09:51 -05004283 if (svm_nested_virtualize_tpr(vcpu) ||
4284 kvm_vcpu_apicv_active(vcpu))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01004285 return;
4286
Radim Krčmář596f3142014-03-11 19:11:18 +01004287 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
4288
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004289 if (irr == -1)
4290 return;
4291
4292 if (tpr >= irr)
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01004293 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004294}
4295
Yang Zhang8d146952013-01-25 10:18:50 +08004296static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
4297{
4298 return;
4299}
4300
Andrey Smetanind62caab2015-11-10 15:36:33 +03004301static bool svm_get_enable_apicv(void)
Yang Zhangc7c9c562013-01-25 10:18:51 +08004302{
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004303 return avic;
Yang Zhangc7c9c562013-01-25 10:18:51 +08004304}
4305
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004306static void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
4307{
4308}
4309
Paolo Bonzini67c9ddd2016-05-10 17:01:23 +02004310static void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004311{
4312}
4313
4314/* Note: Currently only used by Hyper-V. */
Andrey Smetanind62caab2015-11-10 15:36:33 +03004315static void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
4316{
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004317 struct vcpu_svm *svm = to_svm(vcpu);
4318 struct vmcb *vmcb = svm->vmcb;
4319
4320 if (!avic)
4321 return;
4322
4323 vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
4324 mark_dirty(vmcb, VMCB_INTR);
Yang Zhangc7c9c562013-01-25 10:18:51 +08004325}
4326
Andrey Smetanin63086302015-11-10 15:36:32 +03004327static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
Yang Zhangc7c9c562013-01-25 10:18:51 +08004328{
4329 return;
4330}
4331
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05004332static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
4333{
4334 kvm_lapic_set_irr(vec, vcpu->arch.apic);
4335 smp_mb__after_atomic();
4336
4337 if (avic_vcpu_is_running(vcpu))
4338 wrmsrl(SVM_AVIC_DOORBELL,
Suravee Suthikulpanit7d669f52016-06-15 17:23:45 -05004339 kvm_cpu_get_apicid(vcpu->cpu));
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05004340 else
4341 kvm_vcpu_wake_up(vcpu);
4342}
4343
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05004344static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
4345{
4346 unsigned long flags;
4347 struct amd_svm_iommu_ir *cur;
4348
4349 spin_lock_irqsave(&svm->ir_list_lock, flags);
4350 list_for_each_entry(cur, &svm->ir_list, node) {
4351 if (cur->data != pi->ir_data)
4352 continue;
4353 list_del(&cur->node);
4354 kfree(cur);
4355 break;
4356 }
4357 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
4358}
4359
4360static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
4361{
4362 int ret = 0;
4363 unsigned long flags;
4364 struct amd_svm_iommu_ir *ir;
4365
4366 /**
4367 * In some cases, the existing irte is updaed and re-set,
4368 * so we need to check here if it's already been * added
4369 * to the ir_list.
4370 */
4371 if (pi->ir_data && (pi->prev_ga_tag != 0)) {
4372 struct kvm *kvm = svm->vcpu.kvm;
4373 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(pi->prev_ga_tag);
4374 struct kvm_vcpu *prev_vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
4375 struct vcpu_svm *prev_svm;
4376
4377 if (!prev_vcpu) {
4378 ret = -EINVAL;
4379 goto out;
4380 }
4381
4382 prev_svm = to_svm(prev_vcpu);
4383 svm_ir_list_del(prev_svm, pi);
4384 }
4385
4386 /**
4387 * Allocating new amd_iommu_pi_data, which will get
4388 * add to the per-vcpu ir_list.
4389 */
4390 ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL);
4391 if (!ir) {
4392 ret = -ENOMEM;
4393 goto out;
4394 }
4395 ir->data = pi->ir_data;
4396
4397 spin_lock_irqsave(&svm->ir_list_lock, flags);
4398 list_add(&ir->node, &svm->ir_list);
4399 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
4400out:
4401 return ret;
4402}
4403
4404/**
4405 * Note:
4406 * The HW cannot support posting multicast/broadcast
4407 * interrupts to a vCPU. So, we still use legacy interrupt
4408 * remapping for these kind of interrupts.
4409 *
4410 * For lowest-priority interrupts, we only support
4411 * those with single CPU as the destination, e.g. user
4412 * configures the interrupts via /proc/irq or uses
4413 * irqbalance to make the interrupts single-CPU.
4414 */
4415static int
4416get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
4417 struct vcpu_data *vcpu_info, struct vcpu_svm **svm)
4418{
4419 struct kvm_lapic_irq irq;
4420 struct kvm_vcpu *vcpu = NULL;
4421
4422 kvm_set_msi_irq(kvm, e, &irq);
4423
4424 if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) {
4425 pr_debug("SVM: %s: use legacy intr remap mode for irq %u\n",
4426 __func__, irq.vector);
4427 return -1;
4428 }
4429
4430 pr_debug("SVM: %s: use GA mode for irq %u\n", __func__,
4431 irq.vector);
4432 *svm = to_svm(vcpu);
4433 vcpu_info->pi_desc_addr = page_to_phys((*svm)->avic_backing_page);
4434 vcpu_info->vector = irq.vector;
4435
4436 return 0;
4437}
4438
4439/*
4440 * svm_update_pi_irte - set IRTE for Posted-Interrupts
4441 *
4442 * @kvm: kvm
4443 * @host_irq: host irq of the interrupt
4444 * @guest_irq: gsi of the interrupt
4445 * @set: set or unset PI
4446 * returns 0 on success, < 0 on failure
4447 */
4448static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
4449 uint32_t guest_irq, bool set)
4450{
4451 struct kvm_kernel_irq_routing_entry *e;
4452 struct kvm_irq_routing_table *irq_rt;
4453 int idx, ret = -EINVAL;
4454
4455 if (!kvm_arch_has_assigned_device(kvm) ||
4456 !irq_remapping_cap(IRQ_POSTING_CAP))
4457 return 0;
4458
4459 pr_debug("SVM: %s: host_irq=%#x, guest_irq=%#x, set=%#x\n",
4460 __func__, host_irq, guest_irq, set);
4461
4462 idx = srcu_read_lock(&kvm->irq_srcu);
4463 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
4464 WARN_ON(guest_irq >= irq_rt->nr_rt_entries);
4465
4466 hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
4467 struct vcpu_data vcpu_info;
4468 struct vcpu_svm *svm = NULL;
4469
4470 if (e->type != KVM_IRQ_ROUTING_MSI)
4471 continue;
4472
4473 /**
4474 * Here, we setup with legacy mode in the following cases:
4475 * 1. When cannot target interrupt to a specific vcpu.
4476 * 2. Unsetting posted interrupt.
4477 * 3. APIC virtialization is disabled for the vcpu.
4478 */
4479 if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set &&
4480 kvm_vcpu_apicv_active(&svm->vcpu)) {
4481 struct amd_iommu_pi_data pi;
4482
4483 /* Try to enable guest_mode in IRTE */
4484 pi.base = page_to_phys(svm->avic_backing_page) & AVIC_HPA_MASK;
4485 pi.ga_tag = AVIC_GATAG(kvm->arch.avic_vm_id,
4486 svm->vcpu.vcpu_id);
4487 pi.is_guest_mode = true;
4488 pi.vcpu_data = &vcpu_info;
4489 ret = irq_set_vcpu_affinity(host_irq, &pi);
4490
4491 /**
4492 * Here, we successfully setting up vcpu affinity in
4493 * IOMMU guest mode. Now, we need to store the posted
4494 * interrupt information in a per-vcpu ir_list so that
4495 * we can reference to them directly when we update vcpu
4496 * scheduling information in IOMMU irte.
4497 */
4498 if (!ret && pi.is_guest_mode)
4499 svm_ir_list_add(svm, &pi);
4500 } else {
4501 /* Use legacy mode in IRTE */
4502 struct amd_iommu_pi_data pi;
4503
4504 /**
4505 * Here, pi is used to:
4506 * - Tell IOMMU to use legacy mode for this interrupt.
4507 * - Retrieve ga_tag of prior interrupt remapping data.
4508 */
4509 pi.is_guest_mode = false;
4510 ret = irq_set_vcpu_affinity(host_irq, &pi);
4511
4512 /**
4513 * Check if the posted interrupt was previously
4514 * setup with the guest_mode by checking if the ga_tag
4515 * was cached. If so, we need to clean up the per-vcpu
4516 * ir_list.
4517 */
4518 if (!ret && pi.prev_ga_tag) {
4519 int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
4520 struct kvm_vcpu *vcpu;
4521
4522 vcpu = kvm_get_vcpu_by_id(kvm, id);
4523 if (vcpu)
4524 svm_ir_list_del(to_svm(vcpu), &pi);
4525 }
4526 }
4527
4528 if (!ret && svm) {
4529 trace_kvm_pi_irte_update(svm->vcpu.vcpu_id,
4530 host_irq, e->gsi,
4531 vcpu_info.vector,
4532 vcpu_info.pi_desc_addr, set);
4533 }
4534
4535 if (ret < 0) {
4536 pr_err("%s: failed to update PI IRTE\n", __func__);
4537 goto out;
4538 }
4539 }
4540
4541 ret = 0;
4542out:
4543 srcu_read_unlock(&kvm->irq_srcu, idx);
4544 return ret;
4545}
4546
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004547static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
Joerg Roedelaaacfc92008-04-16 16:51:18 +02004548{
4549 struct vcpu_svm *svm = to_svm(vcpu);
4550 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel924584c2010-04-22 12:33:07 +02004551 int ret;
4552 ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
4553 !(svm->vcpu.arch.hflags & HF_NMI_MASK);
4554 ret = ret && gif_set(svm) && nested_svm_nmi(svm);
4555
4556 return ret;
Joerg Roedelaaacfc92008-04-16 16:51:18 +02004557}
4558
Jan Kiszka3cfc3092009-11-12 01:04:25 +01004559static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
4560{
4561 struct vcpu_svm *svm = to_svm(vcpu);
4562
4563 return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
4564}
4565
4566static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
4567{
4568 struct vcpu_svm *svm = to_svm(vcpu);
4569
4570 if (masked) {
4571 svm->vcpu.arch.hflags |= HF_NMI_MASK;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01004572 set_intercept(svm, INTERCEPT_IRET);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01004573 } else {
4574 svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01004575 clr_intercept(svm, INTERCEPT_IRET);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01004576 }
4577}
4578
Gleb Natapov78646122009-03-23 12:12:11 +02004579static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
4580{
4581 struct vcpu_svm *svm = to_svm(vcpu);
4582 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel7fcdb512009-09-16 15:24:15 +02004583 int ret;
4584
4585 if (!gif_set(svm) ||
4586 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
4587 return 0;
4588
Avi Kivityf6e78472010-08-02 15:30:20 +03004589 ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
Joerg Roedel7fcdb512009-09-16 15:24:15 +02004590
Joerg Roedel20307532010-11-29 17:51:48 +01004591 if (is_guest_mode(vcpu))
Joerg Roedel7fcdb512009-09-16 15:24:15 +02004592 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
4593
4594 return ret;
Gleb Natapov78646122009-03-23 12:12:11 +02004595}
4596
Jan Kiszkac9a79532014-03-07 20:03:15 +01004597static void enable_irq_window(struct kvm_vcpu *vcpu)
Gleb Natapov9222be12009-04-23 17:14:37 +03004598{
Alexander Graf219b65d2009-06-15 15:21:25 +02004599 struct vcpu_svm *svm = to_svm(vcpu);
Alexander Graf219b65d2009-06-15 15:21:25 +02004600
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05004601 if (kvm_vcpu_apicv_active(vcpu))
4602 return;
4603
Joerg Roedele0231712010-02-24 18:59:10 +01004604 /*
4605 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
4606 * 1, because that's a separate STGI/VMRUN intercept. The next time we
4607 * get that intercept, this function will be called again though and
4608 * we'll get the vintr intercept.
4609 */
Joerg Roedel8fe54652010-02-19 16:23:01 +01004610 if (gif_set(svm) && nested_svm_intr(svm)) {
Alexander Graf219b65d2009-06-15 15:21:25 +02004611 svm_set_vintr(svm);
4612 svm_inject_irq(svm, 0x0);
4613 }
Gleb Natapov9222be12009-04-23 17:14:37 +03004614}
4615
Jan Kiszkac9a79532014-03-07 20:03:15 +01004616static void enable_nmi_window(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004617{
Avi Kivity04d2cc72007-09-10 18:10:54 +03004618 struct vcpu_svm *svm = to_svm(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03004619
Gleb Natapov44c11432009-05-11 13:35:52 +03004620 if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
4621 == HF_NMI_MASK)
Jan Kiszkac9a79532014-03-07 20:03:15 +01004622 return; /* IRET will cause a vm exit */
Gleb Natapov44c11432009-05-11 13:35:52 +03004623
Joerg Roedele0231712010-02-24 18:59:10 +01004624 /*
4625 * Something prevents NMI from been injected. Single step over possible
4626 * problem (IRET or exception injection or interrupt shadow)
4627 */
Jan Kiszka6be7d302009-10-18 13:24:54 +02004628 svm->nmi_singlestep = true;
Gleb Natapov44c11432009-05-11 13:35:52 +03004629 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
Eddie Dong85f455f2007-07-06 12:20:49 +03004630}
4631
Izik Eiduscbc94022007-10-25 00:29:55 +02004632static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
4633{
4634 return 0;
4635}
4636
Avi Kivityd9e368d2007-06-07 19:18:30 +03004637static void svm_flush_tlb(struct kvm_vcpu *vcpu)
4638{
Joerg Roedel38e5e922010-12-03 15:25:16 +01004639 struct vcpu_svm *svm = to_svm(vcpu);
4640
4641 if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
4642 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
4643 else
4644 svm->asid_generation--;
Avi Kivityd9e368d2007-06-07 19:18:30 +03004645}
4646
Avi Kivity04d2cc72007-09-10 18:10:54 +03004647static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
4648{
4649}
4650
Joerg Roedeld7bf8222008-04-16 16:51:17 +02004651static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
4652{
4653 struct vcpu_svm *svm = to_svm(vcpu);
4654
Suravee Suthikulpanit3bbf3562016-05-04 14:09:51 -05004655 if (svm_nested_virtualize_tpr(vcpu))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01004656 return;
4657
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01004658 if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
Joerg Roedeld7bf8222008-04-16 16:51:17 +02004659 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
Gleb Natapov615d5192009-04-21 17:45:05 +03004660 kvm_set_cr8(vcpu, cr8);
Joerg Roedeld7bf8222008-04-16 16:51:17 +02004661 }
4662}
4663
Joerg Roedel649d6862008-04-16 16:51:15 +02004664static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
4665{
4666 struct vcpu_svm *svm = to_svm(vcpu);
4667 u64 cr8;
4668
Suravee Suthikulpanit3bbf3562016-05-04 14:09:51 -05004669 if (svm_nested_virtualize_tpr(vcpu) ||
4670 kvm_vcpu_apicv_active(vcpu))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01004671 return;
4672
Joerg Roedel649d6862008-04-16 16:51:15 +02004673 cr8 = kvm_get_cr8(vcpu);
4674 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
4675 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
4676}
4677
Gleb Natapov9222be12009-04-23 17:14:37 +03004678static void svm_complete_interrupts(struct vcpu_svm *svm)
4679{
4680 u8 vector;
4681 int type;
4682 u32 exitintinfo = svm->vmcb->control.exit_int_info;
Jan Kiszka66b71382010-02-23 17:47:56 +01004683 unsigned int3_injected = svm->int3_injected;
4684
4685 svm->int3_injected = 0;
Gleb Natapov9222be12009-04-23 17:14:37 +03004686
Avi Kivitybd3d1ec2011-02-03 15:29:52 +02004687 /*
4688 * If we've made progress since setting HF_IRET_MASK, we've
4689 * executed an IRET and can allow NMI injection.
4690 */
4691 if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
4692 && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
Gleb Natapov44c11432009-05-11 13:35:52 +03004693 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
Avi Kivity3842d132010-07-27 12:30:24 +03004694 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
4695 }
Gleb Natapov44c11432009-05-11 13:35:52 +03004696
Gleb Natapov9222be12009-04-23 17:14:37 +03004697 svm->vcpu.arch.nmi_injected = false;
4698 kvm_clear_exception_queue(&svm->vcpu);
4699 kvm_clear_interrupt_queue(&svm->vcpu);
4700
4701 if (!(exitintinfo & SVM_EXITINTINFO_VALID))
4702 return;
4703
Avi Kivity3842d132010-07-27 12:30:24 +03004704 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
4705
Gleb Natapov9222be12009-04-23 17:14:37 +03004706 vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
4707 type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
4708
4709 switch (type) {
4710 case SVM_EXITINTINFO_TYPE_NMI:
4711 svm->vcpu.arch.nmi_injected = true;
4712 break;
4713 case SVM_EXITINTINFO_TYPE_EXEPT:
Jan Kiszka66b71382010-02-23 17:47:56 +01004714 /*
4715 * In case of software exceptions, do not reinject the vector,
4716 * but re-execute the instruction instead. Rewind RIP first
4717 * if we emulated INT3 before.
4718 */
4719 if (kvm_exception_is_soft(vector)) {
4720 if (vector == BP_VECTOR && int3_injected &&
4721 kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
4722 kvm_rip_write(&svm->vcpu,
4723 kvm_rip_read(&svm->vcpu) -
4724 int3_injected);
Alexander Graf219b65d2009-06-15 15:21:25 +02004725 break;
Jan Kiszka66b71382010-02-23 17:47:56 +01004726 }
Gleb Natapov9222be12009-04-23 17:14:37 +03004727 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
4728 u32 err = svm->vmcb->control.exit_int_info_err;
Joerg Roedelce7ddec2010-04-22 12:33:13 +02004729 kvm_requeue_exception_e(&svm->vcpu, vector, err);
Gleb Natapov9222be12009-04-23 17:14:37 +03004730
4731 } else
Joerg Roedelce7ddec2010-04-22 12:33:13 +02004732 kvm_requeue_exception(&svm->vcpu, vector);
Gleb Natapov9222be12009-04-23 17:14:37 +03004733 break;
4734 case SVM_EXITINTINFO_TYPE_INTR:
Gleb Natapov66fd3f72009-05-11 13:35:50 +03004735 kvm_queue_interrupt(&svm->vcpu, vector, false);
Gleb Natapov9222be12009-04-23 17:14:37 +03004736 break;
4737 default:
4738 break;
4739 }
4740}
4741
Avi Kivityb463a6f2010-07-20 15:06:17 +03004742static void svm_cancel_injection(struct kvm_vcpu *vcpu)
4743{
4744 struct vcpu_svm *svm = to_svm(vcpu);
4745 struct vmcb_control_area *control = &svm->vmcb->control;
4746
4747 control->exit_int_info = control->event_inj;
4748 control->exit_int_info_err = control->event_inj_err;
4749 control->event_inj = 0;
4750 svm_complete_interrupts(svm);
4751}
4752
Avi Kivity851ba692009-08-24 11:10:17 +03004753static void svm_vcpu_run(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004754{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04004755 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivityd9e368d2007-06-07 19:18:30 +03004756
Joerg Roedel2041a062010-04-22 12:33:08 +02004757 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
4758 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
4759 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
4760
Joerg Roedelcd3ff652009-10-09 16:08:26 +02004761 /*
4762 * A vmexit emulation is required before the vcpu can be executed
4763 * again.
4764 */
4765 if (unlikely(svm->nested.exit_required))
4766 return;
4767
Rusty Russelle756fc62007-07-30 20:07:08 +10004768 pre_svm_run(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004769
Joerg Roedel649d6862008-04-16 16:51:15 +02004770 sync_lapic_to_cr8(vcpu);
4771
Joerg Roedelcda0ffd2009-08-07 11:49:45 +02004772 svm->vmcb->save.cr2 = vcpu->arch.cr2;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004773
Avi Kivity04d2cc72007-09-10 18:10:54 +03004774 clgi();
4775
4776 local_irq_enable();
Avi Kivity36241b82006-12-22 01:05:20 -08004777
Avi Kivity6aa8b732006-12-10 02:21:36 -08004778 asm volatile (
Avi Kivity74547662012-09-16 15:10:59 +03004779 "push %%" _ASM_BP "; \n\t"
4780 "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
4781 "mov %c[rcx](%[svm]), %%" _ASM_CX " \n\t"
4782 "mov %c[rdx](%[svm]), %%" _ASM_DX " \n\t"
4783 "mov %c[rsi](%[svm]), %%" _ASM_SI " \n\t"
4784 "mov %c[rdi](%[svm]), %%" _ASM_DI " \n\t"
4785 "mov %c[rbp](%[svm]), %%" _ASM_BP " \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08004786#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10004787 "mov %c[r8](%[svm]), %%r8 \n\t"
4788 "mov %c[r9](%[svm]), %%r9 \n\t"
4789 "mov %c[r10](%[svm]), %%r10 \n\t"
4790 "mov %c[r11](%[svm]), %%r11 \n\t"
4791 "mov %c[r12](%[svm]), %%r12 \n\t"
4792 "mov %c[r13](%[svm]), %%r13 \n\t"
4793 "mov %c[r14](%[svm]), %%r14 \n\t"
4794 "mov %c[r15](%[svm]), %%r15 \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08004795#endif
4796
Avi Kivity6aa8b732006-12-10 02:21:36 -08004797 /* Enter guest mode */
Avi Kivity74547662012-09-16 15:10:59 +03004798 "push %%" _ASM_AX " \n\t"
4799 "mov %c[vmcb](%[svm]), %%" _ASM_AX " \n\t"
Avi Kivity4ecac3f2008-05-13 13:23:38 +03004800 __ex(SVM_VMLOAD) "\n\t"
4801 __ex(SVM_VMRUN) "\n\t"
4802 __ex(SVM_VMSAVE) "\n\t"
Avi Kivity74547662012-09-16 15:10:59 +03004803 "pop %%" _ASM_AX " \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08004804
4805 /* Save guest registers, load host registers */
Avi Kivity74547662012-09-16 15:10:59 +03004806 "mov %%" _ASM_BX ", %c[rbx](%[svm]) \n\t"
4807 "mov %%" _ASM_CX ", %c[rcx](%[svm]) \n\t"
4808 "mov %%" _ASM_DX ", %c[rdx](%[svm]) \n\t"
4809 "mov %%" _ASM_SI ", %c[rsi](%[svm]) \n\t"
4810 "mov %%" _ASM_DI ", %c[rdi](%[svm]) \n\t"
4811 "mov %%" _ASM_BP ", %c[rbp](%[svm]) \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08004812#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10004813 "mov %%r8, %c[r8](%[svm]) \n\t"
4814 "mov %%r9, %c[r9](%[svm]) \n\t"
4815 "mov %%r10, %c[r10](%[svm]) \n\t"
4816 "mov %%r11, %c[r11](%[svm]) \n\t"
4817 "mov %%r12, %c[r12](%[svm]) \n\t"
4818 "mov %%r13, %c[r13](%[svm]) \n\t"
4819 "mov %%r14, %c[r14](%[svm]) \n\t"
4820 "mov %%r15, %c[r15](%[svm]) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08004821#endif
Avi Kivity74547662012-09-16 15:10:59 +03004822 "pop %%" _ASM_BP
Avi Kivity6aa8b732006-12-10 02:21:36 -08004823 :
Rusty Russellfb3f0f52007-07-27 17:16:56 +10004824 : [svm]"a"(svm),
Avi Kivity6aa8b732006-12-10 02:21:36 -08004825 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004826 [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
4827 [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
4828 [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
4829 [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
4830 [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
4831 [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
Avi Kivity05b3e0c2006-12-13 00:33:45 -08004832#ifdef CONFIG_X86_64
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004833 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
4834 [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
4835 [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
4836 [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
4837 [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
4838 [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
4839 [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
4840 [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
Avi Kivity6aa8b732006-12-10 02:21:36 -08004841#endif
Laurent Vivier54a08c02007-10-25 14:18:53 +02004842 : "cc", "memory"
4843#ifdef CONFIG_X86_64
Avi Kivity74547662012-09-16 15:10:59 +03004844 , "rbx", "rcx", "rdx", "rsi", "rdi"
Laurent Vivier54a08c02007-10-25 14:18:53 +02004845 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
Avi Kivity74547662012-09-16 15:10:59 +03004846#else
4847 , "ebx", "ecx", "edx", "esi", "edi"
Laurent Vivier54a08c02007-10-25 14:18:53 +02004848#endif
4849 );
Avi Kivity6aa8b732006-12-10 02:21:36 -08004850
Avi Kivity82ca2d12010-10-21 12:20:34 +02004851#ifdef CONFIG_X86_64
4852 wrmsrl(MSR_GS_BASE, svm->host.gs_base);
4853#else
Avi Kivitydacccfd2010-10-21 12:20:33 +02004854 loadsegment(fs, svm->host.fs);
Avi Kivity831ca602011-03-08 16:09:51 +02004855#ifndef CONFIG_X86_32_LAZY_GS
4856 loadsegment(gs, svm->host.gs);
4857#endif
Avi Kivity9581d442010-10-19 16:46:55 +02004858#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -08004859
4860 reload_tss(vcpu);
4861
Avi Kivity56ba47d2007-11-07 17:14:18 +02004862 local_irq_disable();
4863
Avi Kivity13c34e02010-10-21 12:20:31 +02004864 vcpu->arch.cr2 = svm->vmcb->save.cr2;
4865 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
4866 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
4867 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
4868
Joerg Roedel3781c012011-01-14 16:45:02 +01004869 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
4870 kvm_before_handle_nmi(&svm->vcpu);
4871
4872 stgi();
4873
4874 /* Any pending NMI will happen here */
4875
4876 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
4877 kvm_after_handle_nmi(&svm->vcpu);
4878
Joerg Roedeld7bf8222008-04-16 16:51:17 +02004879 sync_cr8_to_lapic(vcpu);
4880
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04004881 svm->next_rip = 0;
Gleb Natapov9222be12009-04-23 17:14:37 +03004882
Joerg Roedel38e5e922010-12-03 15:25:16 +01004883 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
4884
Gleb Natapov631bc482010-10-14 11:22:52 +02004885 /* if exit due to PF check for async PF */
4886 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
4887 svm->apf_reason = kvm_read_and_reset_pf_reason();
4888
Avi Kivity6de4f3a2009-05-31 22:58:47 +03004889 if (npt_enabled) {
4890 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
4891 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
4892 }
Joerg Roedelfe5913e2010-05-17 14:43:34 +02004893
4894 /*
4895 * We need to handle MC intercepts here before the vcpu has a chance to
4896 * change the physical cpu
4897 */
4898 if (unlikely(svm->vmcb->control.exit_code ==
4899 SVM_EXIT_EXCP_BASE + MC_VECTOR))
4900 svm_handle_mce(svm);
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01004901
4902 mark_all_clean(svm->vmcb);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004903}
4904
Avi Kivity6aa8b732006-12-10 02:21:36 -08004905static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
4906{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04004907 struct vcpu_svm *svm = to_svm(vcpu);
4908
4909 svm->vmcb->save.cr3 = root;
Joerg Roedeldcca1a62010-12-03 11:45:54 +01004910 mark_dirty(svm->vmcb, VMCB_CR);
Joerg Roedelf40f6a42010-12-03 15:25:15 +01004911 svm_flush_tlb(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004912}
4913
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02004914static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
4915{
4916 struct vcpu_svm *svm = to_svm(vcpu);
4917
4918 svm->vmcb->control.nested_cr3 = root;
Joerg Roedelb2747162010-12-03 11:45:53 +01004919 mark_dirty(svm->vmcb, VMCB_NPT);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02004920
4921 /* Also sync guest cr3 here in case we live migrate */
Avi Kivity9f8fe502010-12-05 17:30:00 +02004922 svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
Joerg Roedeldcca1a62010-12-03 11:45:54 +01004923 mark_dirty(svm->vmcb, VMCB_CR);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02004924
Joerg Roedelf40f6a42010-12-03 15:25:15 +01004925 svm_flush_tlb(vcpu);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02004926}
4927
Avi Kivity6aa8b732006-12-10 02:21:36 -08004928static int is_disabled(void)
4929{
Joerg Roedel6031a612007-06-22 12:29:50 +03004930 u64 vm_cr;
4931
4932 rdmsrl(MSR_VM_CR, vm_cr);
4933 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
4934 return 1;
4935
Avi Kivity6aa8b732006-12-10 02:21:36 -08004936 return 0;
4937}
4938
Ingo Molnar102d8322007-02-19 14:37:47 +02004939static void
4940svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
4941{
4942 /*
4943 * Patch in the VMMCALL instruction:
4944 */
4945 hypercall[0] = 0x0f;
4946 hypercall[1] = 0x01;
4947 hypercall[2] = 0xd9;
Ingo Molnar102d8322007-02-19 14:37:47 +02004948}
4949
Yang, Sheng002c7f72007-07-31 14:23:01 +03004950static void svm_check_processor_compat(void *rtn)
4951{
4952 *(int *)rtn = 0;
4953}
4954
Avi Kivity774ead32007-12-26 13:57:04 +02004955static bool svm_cpu_has_accelerated_tpr(void)
4956{
4957 return false;
4958}
4959
Paolo Bonzini6d396b52015-04-01 14:25:33 +02004960static bool svm_has_high_real_mode_segbase(void)
4961{
4962 return true;
4963}
4964
Paolo Bonzinifc07e762015-10-01 13:20:22 +02004965static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
4966{
4967 return 0;
4968}
4969
Sheng Yang0e851882009-12-18 16:48:46 +08004970static void svm_cpuid_update(struct kvm_vcpu *vcpu)
4971{
Joerg Roedel6092d3d2015-10-14 15:10:54 +02004972 struct vcpu_svm *svm = to_svm(vcpu);
Suravee Suthikulpanit46781ea2016-05-04 14:09:50 -05004973 struct kvm_cpuid_entry2 *entry;
Joerg Roedel6092d3d2015-10-14 15:10:54 +02004974
4975 /* Update nrips enabled cache */
4976 svm->nrips_enabled = !!guest_cpuid_has_nrips(&svm->vcpu);
Suravee Suthikulpanit46781ea2016-05-04 14:09:50 -05004977
4978 if (!kvm_vcpu_apicv_active(vcpu))
4979 return;
4980
4981 entry = kvm_find_cpuid_entry(vcpu, 1, 0);
4982 if (entry)
4983 entry->ecx &= ~bit(X86_FEATURE_X2APIC);
Sheng Yang0e851882009-12-18 16:48:46 +08004984}
4985
Joerg Roedeld4330ef2010-04-22 12:33:11 +02004986static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
4987{
Joerg Roedelc2c63a42010-04-22 12:33:12 +02004988 switch (func) {
Suravee Suthikulpanit46781ea2016-05-04 14:09:50 -05004989 case 0x1:
4990 if (avic)
4991 entry->ecx &= ~bit(X86_FEATURE_X2APIC);
4992 break;
Joerg Roedel4c62a2d2010-09-10 17:31:06 +02004993 case 0x80000001:
4994 if (nested)
4995 entry->ecx |= (1 << 2); /* Set SVM bit */
4996 break;
Joerg Roedelc2c63a42010-04-22 12:33:12 +02004997 case 0x8000000A:
4998 entry->eax = 1; /* SVM revision 1 */
4999 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
5000 ASID emulation to nested SVM */
5001 entry->ecx = 0; /* Reserved */
Joerg Roedel7a190662010-07-27 18:14:21 +02005002 entry->edx = 0; /* Per default do not support any
5003 additional features */
5004
5005 /* Support next_rip if host supports it */
Avi Kivity2a6b20b2010-11-09 16:15:42 +02005006 if (boot_cpu_has(X86_FEATURE_NRIPS))
Joerg Roedel7a190662010-07-27 18:14:21 +02005007 entry->edx |= SVM_FEATURE_NRIP;
Joerg Roedelc2c63a42010-04-22 12:33:12 +02005008
Joerg Roedel3d4aeaa2010-09-10 17:31:05 +02005009 /* Support NPT for the guest if enabled */
5010 if (npt_enabled)
5011 entry->edx |= SVM_FEATURE_NPT;
5012
Joerg Roedelc2c63a42010-04-22 12:33:12 +02005013 break;
5014 }
Joerg Roedeld4330ef2010-04-22 12:33:11 +02005015}
5016
Sheng Yang17cc3932010-01-05 19:02:27 +08005017static int svm_get_lpage_level(void)
Joerg Roedel344f4142009-07-27 16:30:48 +02005018{
Sheng Yang17cc3932010-01-05 19:02:27 +08005019 return PT_PDPE_LEVEL;
Joerg Roedel344f4142009-07-27 16:30:48 +02005020}
5021
Sheng Yang4e47c7a2009-12-18 16:48:47 +08005022static bool svm_rdtscp_supported(void)
5023{
Paolo Bonzini46896c72015-11-12 14:49:16 +01005024 return boot_cpu_has(X86_FEATURE_RDTSCP);
Sheng Yang4e47c7a2009-12-18 16:48:47 +08005025}
5026
Mao, Junjiead756a12012-07-02 01:18:48 +00005027static bool svm_invpcid_supported(void)
5028{
5029 return false;
5030}
5031
Paolo Bonzini93c4adc2014-03-05 23:19:52 +01005032static bool svm_mpx_supported(void)
5033{
5034 return false;
5035}
5036
Wanpeng Li55412b22014-12-02 19:21:30 +08005037static bool svm_xsaves_supported(void)
5038{
5039 return false;
5040}
5041
Sheng Yangf5f48ee2010-06-30 12:25:15 +08005042static bool svm_has_wbinvd_exit(void)
5043{
5044 return true;
5045}
5046
Joerg Roedel80612522011-04-04 12:39:33 +02005047#define PRE_EX(exit) { .exit_code = (exit), \
Avi Kivity40e19b52011-04-21 12:35:41 +03005048 .stage = X86_ICPT_PRE_EXCEPT, }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005049#define POST_EX(exit) { .exit_code = (exit), \
Avi Kivity40e19b52011-04-21 12:35:41 +03005050 .stage = X86_ICPT_POST_EXCEPT, }
Joerg Roedeld7eb8202011-04-04 12:39:32 +02005051#define POST_MEM(exit) { .exit_code = (exit), \
Avi Kivity40e19b52011-04-21 12:35:41 +03005052 .stage = X86_ICPT_POST_MEMACCESS, }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005053
Mathias Krause09941fb2012-08-30 01:30:20 +02005054static const struct __x86_intercept {
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005055 u32 exit_code;
5056 enum x86_intercept_stage stage;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005057} x86_intercept_map[] = {
5058 [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0),
5059 [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0),
5060 [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0),
5061 [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0),
5062 [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0),
Joerg Roedel3b88e412011-04-04 12:39:29 +02005063 [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0),
5064 [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0),
Joerg Roedeldee6bb72011-04-04 12:39:30 +02005065 [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ),
5066 [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ),
5067 [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE),
5068 [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE),
5069 [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ),
5070 [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ),
5071 [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE),
5072 [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE),
Joerg Roedel01de8b02011-04-04 12:39:31 +02005073 [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN),
5074 [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL),
5075 [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD),
5076 [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE),
5077 [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI),
5078 [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI),
5079 [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT),
5080 [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02005081 [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP),
5082 [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR),
5083 [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT),
Joerg Roedel80612522011-04-04 12:39:33 +02005084 [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG),
5085 [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD),
5086 [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD),
5087 [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR),
5088 [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC),
5089 [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR),
5090 [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC),
5091 [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID),
5092 [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM),
Joerg Roedelbf608f82011-04-04 12:39:34 +02005093 [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE),
5094 [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF),
5095 [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF),
5096 [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT),
5097 [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET),
5098 [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP),
5099 [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT),
Joerg Roedelf6511932011-04-04 12:39:35 +02005100 [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO),
5101 [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO),
5102 [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO),
5103 [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO),
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005104};
5105
Joerg Roedel80612522011-04-04 12:39:33 +02005106#undef PRE_EX
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005107#undef POST_EX
Joerg Roedeld7eb8202011-04-04 12:39:32 +02005108#undef POST_MEM
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005109
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02005110static int svm_check_intercept(struct kvm_vcpu *vcpu,
5111 struct x86_instruction_info *info,
5112 enum x86_intercept_stage stage)
5113{
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005114 struct vcpu_svm *svm = to_svm(vcpu);
5115 int vmexit, ret = X86EMUL_CONTINUE;
5116 struct __x86_intercept icpt_info;
5117 struct vmcb *vmcb = svm->vmcb;
5118
5119 if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
5120 goto out;
5121
5122 icpt_info = x86_intercept_map[info->intercept];
5123
Avi Kivity40e19b52011-04-21 12:35:41 +03005124 if (stage != icpt_info.stage)
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005125 goto out;
5126
5127 switch (icpt_info.exit_code) {
5128 case SVM_EXIT_READ_CR0:
5129 if (info->intercept == x86_intercept_cr_read)
5130 icpt_info.exit_code += info->modrm_reg;
5131 break;
5132 case SVM_EXIT_WRITE_CR0: {
5133 unsigned long cr0, val;
5134 u64 intercept;
5135
5136 if (info->intercept == x86_intercept_cr_write)
5137 icpt_info.exit_code += info->modrm_reg;
5138
Jan Kiszka62baf442014-06-29 21:55:53 +02005139 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
5140 info->intercept == x86_intercept_clts)
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005141 break;
5142
5143 intercept = svm->nested.intercept;
5144
5145 if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
5146 break;
5147
5148 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
5149 val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
5150
5151 if (info->intercept == x86_intercept_lmsw) {
5152 cr0 &= 0xfUL;
5153 val &= 0xfUL;
5154 /* lmsw can't clear PE - catch this here */
5155 if (cr0 & X86_CR0_PE)
5156 val |= X86_CR0_PE;
5157 }
5158
5159 if (cr0 ^ val)
5160 icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
5161
5162 break;
5163 }
Joerg Roedel3b88e412011-04-04 12:39:29 +02005164 case SVM_EXIT_READ_DR0:
5165 case SVM_EXIT_WRITE_DR0:
5166 icpt_info.exit_code += info->modrm_reg;
5167 break;
Joerg Roedel80612522011-04-04 12:39:33 +02005168 case SVM_EXIT_MSR:
5169 if (info->intercept == x86_intercept_wrmsr)
5170 vmcb->control.exit_info_1 = 1;
5171 else
5172 vmcb->control.exit_info_1 = 0;
5173 break;
Joerg Roedelbf608f82011-04-04 12:39:34 +02005174 case SVM_EXIT_PAUSE:
5175 /*
5176 * We get this for NOP only, but pause
5177 * is rep not, check this here
5178 */
5179 if (info->rep_prefix != REPE_PREFIX)
5180 goto out;
Joerg Roedelf6511932011-04-04 12:39:35 +02005181 case SVM_EXIT_IOIO: {
5182 u64 exit_info;
5183 u32 bytes;
5184
Joerg Roedelf6511932011-04-04 12:39:35 +02005185 if (info->intercept == x86_intercept_in ||
5186 info->intercept == x86_intercept_ins) {
Jan Kiszka6cbc5f52014-06-30 12:52:55 +02005187 exit_info = ((info->src_val & 0xffff) << 16) |
5188 SVM_IOIO_TYPE_MASK;
Joerg Roedelf6511932011-04-04 12:39:35 +02005189 bytes = info->dst_bytes;
Jan Kiszka6493f152014-06-30 11:07:05 +02005190 } else {
Jan Kiszka6cbc5f52014-06-30 12:52:55 +02005191 exit_info = (info->dst_val & 0xffff) << 16;
Jan Kiszka6493f152014-06-30 11:07:05 +02005192 bytes = info->src_bytes;
Joerg Roedelf6511932011-04-04 12:39:35 +02005193 }
5194
5195 if (info->intercept == x86_intercept_outs ||
5196 info->intercept == x86_intercept_ins)
5197 exit_info |= SVM_IOIO_STR_MASK;
5198
5199 if (info->rep_prefix)
5200 exit_info |= SVM_IOIO_REP_MASK;
5201
5202 bytes = min(bytes, 4u);
5203
5204 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
5205
5206 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
5207
5208 vmcb->control.exit_info_1 = exit_info;
5209 vmcb->control.exit_info_2 = info->next_rip;
5210
5211 break;
5212 }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005213 default:
5214 break;
5215 }
5216
Bandan Dasf1047652015-06-11 02:05:33 -04005217 /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
5218 if (static_cpu_has(X86_FEATURE_NRIPS))
5219 vmcb->control.next_rip = info->next_rip;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005220 vmcb->control.exit_code = icpt_info.exit_code;
5221 vmexit = nested_svm_exit_handled(svm);
5222
5223 ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
5224 : X86EMUL_CONTINUE;
5225
5226out:
5227 return ret;
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02005228}
5229
Yang Zhanga547c6d2013-04-11 19:25:10 +08005230static void svm_handle_external_intr(struct kvm_vcpu *vcpu)
5231{
5232 local_irq_enable();
Paolo Bonzinif2485b32016-06-15 15:23:11 +02005233 /*
5234 * We must have an instruction with interrupts enabled, so
5235 * the timer interrupt isn't delayed by the interrupt shadow.
5236 */
5237 asm("nop");
5238 local_irq_disable();
Yang Zhanga547c6d2013-04-11 19:25:10 +08005239}
5240
Radim Krčmářae97a3b2014-08-21 18:08:06 +02005241static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
5242{
5243}
5244
Suravee Suthikulpanitbe8ca172016-05-04 14:09:49 -05005245static inline void avic_post_state_restore(struct kvm_vcpu *vcpu)
5246{
5247 if (avic_handle_apic_id_update(vcpu) != 0)
5248 return;
5249 if (avic_handle_dfr_update(vcpu) != 0)
5250 return;
5251 avic_handle_ldr_update(vcpu);
5252}
5253
Kees Cook404f6aa2016-08-08 16:29:06 -07005254static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
Avi Kivity6aa8b732006-12-10 02:21:36 -08005255 .cpu_has_kvm_support = has_svm,
5256 .disabled_by_bios = is_disabled,
5257 .hardware_setup = svm_hardware_setup,
5258 .hardware_unsetup = svm_hardware_unsetup,
Yang, Sheng002c7f72007-07-31 14:23:01 +03005259 .check_processor_compatibility = svm_check_processor_compat,
Avi Kivity6aa8b732006-12-10 02:21:36 -08005260 .hardware_enable = svm_hardware_enable,
5261 .hardware_disable = svm_hardware_disable,
Avi Kivity774ead32007-12-26 13:57:04 +02005262 .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
Paolo Bonzini6d396b52015-04-01 14:25:33 +02005263 .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
Avi Kivity6aa8b732006-12-10 02:21:36 -08005264
5265 .vcpu_create = svm_create_vcpu,
5266 .vcpu_free = svm_free_vcpu,
Avi Kivity04d2cc72007-09-10 18:10:54 +03005267 .vcpu_reset = svm_vcpu_reset,
Avi Kivity6aa8b732006-12-10 02:21:36 -08005268
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05005269 .vm_init = avic_vm_init,
5270 .vm_destroy = avic_vm_destroy,
5271
Avi Kivity04d2cc72007-09-10 18:10:54 +03005272 .prepare_guest_switch = svm_prepare_guest_switch,
Avi Kivity6aa8b732006-12-10 02:21:36 -08005273 .vcpu_load = svm_vcpu_load,
5274 .vcpu_put = svm_vcpu_put,
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05005275 .vcpu_blocking = svm_vcpu_blocking,
5276 .vcpu_unblocking = svm_vcpu_unblocking,
Avi Kivity6aa8b732006-12-10 02:21:36 -08005277
Paolo Bonzinia96036b2015-11-10 11:55:36 +01005278 .update_bp_intercept = update_bp_intercept,
Avi Kivity6aa8b732006-12-10 02:21:36 -08005279 .get_msr = svm_get_msr,
5280 .set_msr = svm_set_msr,
5281 .get_segment_base = svm_get_segment_base,
5282 .get_segment = svm_get_segment,
5283 .set_segment = svm_set_segment,
Izik Eidus2e4d2652008-03-24 19:38:34 +02005284 .get_cpl = svm_get_cpl,
Rusty Russell1747fb72007-09-06 01:21:32 +10005285 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
Avi Kivitye8467fd2009-12-29 18:43:06 +02005286 .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
Avi Kivityaff48ba2010-12-05 18:56:11 +02005287 .decache_cr3 = svm_decache_cr3,
Anthony Liguori25c4c272007-04-27 09:29:21 +03005288 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
Avi Kivity6aa8b732006-12-10 02:21:36 -08005289 .set_cr0 = svm_set_cr0,
Avi Kivity6aa8b732006-12-10 02:21:36 -08005290 .set_cr3 = svm_set_cr3,
5291 .set_cr4 = svm_set_cr4,
5292 .set_efer = svm_set_efer,
5293 .get_idt = svm_get_idt,
5294 .set_idt = svm_set_idt,
5295 .get_gdt = svm_get_gdt,
5296 .set_gdt = svm_set_gdt,
Jan Kiszka73aaf249e2014-01-04 18:47:16 +01005297 .get_dr6 = svm_get_dr6,
5298 .set_dr6 = svm_set_dr6,
Gleb Natapov020df072010-04-13 10:05:23 +03005299 .set_dr7 = svm_set_dr7,
Paolo Bonzinifacb0132014-02-21 10:32:27 +01005300 .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
Avi Kivity6de4f3a2009-05-31 22:58:47 +03005301 .cache_reg = svm_cache_reg,
Avi Kivity6aa8b732006-12-10 02:21:36 -08005302 .get_rflags = svm_get_rflags,
5303 .set_rflags = svm_set_rflags,
Huaitong Hanbe94f6b2016-03-22 16:51:20 +08005304
5305 .get_pkru = svm_get_pkru,
5306
Avi Kivity6aa8b732006-12-10 02:21:36 -08005307 .tlb_flush = svm_flush_tlb,
Avi Kivity6aa8b732006-12-10 02:21:36 -08005308
Avi Kivity6aa8b732006-12-10 02:21:36 -08005309 .run = svm_vcpu_run,
Avi Kivity04d2cc72007-09-10 18:10:54 +03005310 .handle_exit = handle_exit,
Avi Kivity6aa8b732006-12-10 02:21:36 -08005311 .skip_emulated_instruction = skip_emulated_instruction,
Glauber Costa2809f5d2009-05-12 16:21:05 -04005312 .set_interrupt_shadow = svm_set_interrupt_shadow,
5313 .get_interrupt_shadow = svm_get_interrupt_shadow,
Ingo Molnar102d8322007-02-19 14:37:47 +02005314 .patch_hypercall = svm_patch_hypercall,
Eddie Dong2a8067f2007-08-06 16:29:07 +03005315 .set_irq = svm_set_irq,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03005316 .set_nmi = svm_inject_nmi,
Avi Kivity298101d2007-11-25 13:41:11 +02005317 .queue_exception = svm_queue_exception,
Avi Kivityb463a6f2010-07-20 15:06:17 +03005318 .cancel_injection = svm_cancel_injection,
Gleb Natapov78646122009-03-23 12:12:11 +02005319 .interrupt_allowed = svm_interrupt_allowed,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03005320 .nmi_allowed = svm_nmi_allowed,
Jan Kiszka3cfc3092009-11-12 01:04:25 +01005321 .get_nmi_mask = svm_get_nmi_mask,
5322 .set_nmi_mask = svm_set_nmi_mask,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03005323 .enable_nmi_window = enable_nmi_window,
5324 .enable_irq_window = enable_irq_window,
5325 .update_cr8_intercept = update_cr8_intercept,
Yang Zhang8d146952013-01-25 10:18:50 +08005326 .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
Andrey Smetanind62caab2015-11-10 15:36:33 +03005327 .get_enable_apicv = svm_get_enable_apicv,
5328 .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
Yang Zhangc7c9c562013-01-25 10:18:51 +08005329 .load_eoi_exitmap = svm_load_eoi_exitmap,
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05005330 .hwapic_irr_update = svm_hwapic_irr_update,
5331 .hwapic_isr_update = svm_hwapic_isr_update,
Suravee Suthikulpanitbe8ca172016-05-04 14:09:49 -05005332 .apicv_post_state_restore = avic_post_state_restore,
Izik Eiduscbc94022007-10-25 00:29:55 +02005333
5334 .set_tss_addr = svm_set_tss_addr,
Sheng Yang67253af2008-04-25 10:20:22 +08005335 .get_tdp_level = get_npt_level,
Sheng Yang4b12f0d2009-04-27 20:35:42 +08005336 .get_mt_mask = svm_get_mt_mask,
Marcelo Tosatti229456f2009-06-17 09:22:14 -03005337
Avi Kivity586f9602010-11-18 13:09:54 +02005338 .get_exit_info = svm_get_exit_info,
Avi Kivity586f9602010-11-18 13:09:54 +02005339
Sheng Yang17cc3932010-01-05 19:02:27 +08005340 .get_lpage_level = svm_get_lpage_level,
Sheng Yang0e851882009-12-18 16:48:46 +08005341
5342 .cpuid_update = svm_cpuid_update,
Sheng Yang4e47c7a2009-12-18 16:48:47 +08005343
5344 .rdtscp_supported = svm_rdtscp_supported,
Mao, Junjiead756a12012-07-02 01:18:48 +00005345 .invpcid_supported = svm_invpcid_supported,
Paolo Bonzini93c4adc2014-03-05 23:19:52 +01005346 .mpx_supported = svm_mpx_supported,
Wanpeng Li55412b22014-12-02 19:21:30 +08005347 .xsaves_supported = svm_xsaves_supported,
Joerg Roedeld4330ef2010-04-22 12:33:11 +02005348
5349 .set_supported_cpuid = svm_set_supported_cpuid,
Sheng Yangf5f48ee2010-06-30 12:25:15 +08005350
5351 .has_wbinvd_exit = svm_has_wbinvd_exit,
Zachary Amsden99e3e302010-08-19 22:07:17 -10005352
5353 .write_tsc_offset = svm_write_tsc_offset,
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02005354
5355 .set_tdp_cr3 = set_tdp_cr3,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02005356
5357 .check_intercept = svm_check_intercept,
Yang Zhanga547c6d2013-04-11 19:25:10 +08005358 .handle_external_intr = svm_handle_external_intr,
Radim Krčmářae97a3b2014-08-21 18:08:06 +02005359
5360 .sched_in = svm_sched_in,
Wei Huang25462f72015-06-19 15:45:05 +02005361
5362 .pmu_ops = &amd_pmu_ops,
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05005363 .deliver_posted_interrupt = svm_deliver_avic_intr,
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05005364 .update_pi_irte = svm_update_pi_irte,
Avi Kivity6aa8b732006-12-10 02:21:36 -08005365};
5366
5367static int __init svm_init(void)
5368{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08005369 return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
Avi Kivity0ee75be2010-04-28 15:39:01 +03005370 __alignof__(struct vcpu_svm), THIS_MODULE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005371}
5372
5373static void __exit svm_exit(void)
5374{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08005375 kvm_exit();
Avi Kivity6aa8b732006-12-10 02:21:36 -08005376}
5377
5378module_init(svm_init)
5379module_exit(svm_exit)