blob: 08fcd59342ed882de817f7aa46a2dcf9493409be [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
Nicolas Kaiser9611c182010-10-06 14:23:22 +02008 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Avi Kivity6aa8b732006-12-10 02:21:36 -08009 *
10 * Authors:
11 * Avi Kivity <avi@qumranet.com>
12 * Yaniv Kamay <yaniv@qumranet.com>
13 *
14 * This work is licensed under the terms of the GNU GPL, version 2. See
15 * the COPYING file in the top-level directory.
16 *
17 */
18
Eddie Dong85f455f2007-07-06 12:20:49 +030019#include "irq.h"
Zhang Xiantao1d737c82007-12-14 09:35:10 +080020#include "mmu.h"
Avi Kivity00b27a32011-11-23 16:30:32 +020021#include "cpuid.h"
Andrey Smetanind62caab2015-11-10 15:36:33 +030022#include "lapic.h"
Avi Kivitye4956062007-06-28 14:15:57 -040023
Avi Kivityedf88412007-12-16 11:02:48 +020024#include <linux/kvm_host.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080025#include <linux/module.h>
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +020026#include <linux/kernel.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080027#include <linux/mm.h>
28#include <linux/highmem.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040029#include <linux/sched.h>
Avi Kivityc7addb92007-09-16 18:58:32 +020030#include <linux/moduleparam.h>
Josh Triplette9bda3b2012-03-20 23:33:51 -070031#include <linux/mod_devicetable.h>
Steven Rostedt (Red Hat)af658dc2015-04-29 14:36:05 -040032#include <linux/trace_events.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090033#include <linux/slab.h>
Shane Wangcafd6652010-04-29 12:09:01 -040034#include <linux/tboot.h>
Jan Kiszkaf4124502014-03-07 20:03:13 +010035#include <linux/hrtimer.h>
Josh Poimboeufc207aee2017-06-28 10:11:06 -050036#include <linux/frame.h>
Dan Williams085331d2018-01-31 17:47:03 -080037#include <linux/nospec.h>
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030038#include "kvm_cache_regs.h"
Avi Kivity35920a32008-07-03 14:50:12 +030039#include "x86.h"
Avi Kivitye4956062007-06-28 14:15:57 -040040
Feng Wu28b835d2015-09-18 22:29:54 +080041#include <asm/cpu.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080042#include <asm/io.h>
Anthony Liguori3b3be0d2006-12-13 00:33:43 -080043#include <asm/desc.h>
Eduardo Habkost13673a92008-11-17 19:03:13 -020044#include <asm/vmx.h>
Eduardo Habkost6210e372008-11-17 19:03:16 -020045#include <asm/virtext.h>
Andi Kleena0861c02009-06-08 17:37:09 +080046#include <asm/mce.h>
Ingo Molnar952f07e2015-04-26 16:56:05 +020047#include <asm/fpu/internal.h>
Gleb Natapovd7cd9792011-10-05 14:01:23 +020048#include <asm/perf_event.h>
Paolo Bonzini81908bf2014-02-21 10:32:27 +010049#include <asm/debugreg.h>
Zhang Yanfei8f536b72012-12-06 23:43:34 +080050#include <asm/kexec.h>
Radim Krčmářdab20872015-02-09 22:44:07 +010051#include <asm/apic.h>
Feng Wuefc64402015-09-18 22:29:51 +080052#include <asm/irq_remapping.h>
Andy Lutomirskid6e41f12017-05-28 10:00:17 -070053#include <asm/mmu_context.h>
Thomas Gleixner28a27752018-04-29 15:01:37 +020054#include <asm/spec-ctrl.h>
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +010055#include <asm/mshyperv.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080056
Marcelo Tosatti229456f2009-06-17 09:22:14 -030057#include "trace.h"
Wei Huang25462f72015-06-19 15:45:05 +020058#include "pmu.h"
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +010059#include "vmx_evmcs.h"
Marcelo Tosatti229456f2009-06-17 09:22:14 -030060
Avi Kivity4ecac3f2008-05-13 13:23:38 +030061#define __ex(x) __kvm_handle_fault_on_reboot(x)
Avi Kivity5e520e62011-05-15 10:13:12 -040062#define __ex_clear(x, reg) \
63 ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
Avi Kivity4ecac3f2008-05-13 13:23:38 +030064
Avi Kivity6aa8b732006-12-10 02:21:36 -080065MODULE_AUTHOR("Qumranet");
66MODULE_LICENSE("GPL");
67
Josh Triplette9bda3b2012-03-20 23:33:51 -070068static const struct x86_cpu_id vmx_cpu_id[] = {
69 X86_FEATURE_MATCH(X86_FEATURE_VMX),
70 {}
71};
72MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
73
Konrad Rzeszutek Wilk26acfb62018-06-20 11:29:53 -040074static bool __read_mostly nosmt;
75module_param(nosmt, bool, S_IRUGO);
76
Rusty Russell476bc002012-01-13 09:32:18 +103077static bool __read_mostly enable_vpid = 1;
Avi Kivity736caef2009-03-23 17:39:48 +020078module_param_named(vpid, enable_vpid, bool, 0444);
Sheng Yang2384d2b2008-01-17 15:14:33 +080079
Paolo Bonzinid02fcf52017-11-06 13:31:13 +010080static bool __read_mostly enable_vnmi = 1;
81module_param_named(vnmi, enable_vnmi, bool, S_IRUGO);
82
Rusty Russell476bc002012-01-13 09:32:18 +103083static bool __read_mostly flexpriority_enabled = 1;
Avi Kivity736caef2009-03-23 17:39:48 +020084module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
Avi Kivity4c9fc8e2008-03-24 18:15:14 +020085
Rusty Russell476bc002012-01-13 09:32:18 +103086static bool __read_mostly enable_ept = 1;
Avi Kivity736caef2009-03-23 17:39:48 +020087module_param_named(ept, enable_ept, bool, S_IRUGO);
Sheng Yangd56f5462008-04-25 10:13:16 +080088
Rusty Russell476bc002012-01-13 09:32:18 +103089static bool __read_mostly enable_unrestricted_guest = 1;
Nitin A Kamble3a624e22009-06-08 11:34:16 -070090module_param_named(unrestricted_guest,
91 enable_unrestricted_guest, bool, S_IRUGO);
92
Xudong Hao83c3a332012-05-28 19:33:35 +080093static bool __read_mostly enable_ept_ad_bits = 1;
94module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
95
Avi Kivitya27685c2012-06-12 20:30:18 +030096static bool __read_mostly emulate_invalid_guest_state = true;
Avi Kivityc1f8bc02009-03-23 15:41:17 +020097module_param(emulate_invalid_guest_state, bool, S_IRUGO);
Mohammed Gamal04fa4d32008-08-17 16:39:48 +030098
Rusty Russell476bc002012-01-13 09:32:18 +103099static bool __read_mostly fasteoi = 1;
Kevin Tian58fbbf22011-08-30 13:56:17 +0300100module_param(fasteoi, bool, S_IRUGO);
101
Yang Zhang5a717852013-04-11 19:25:16 +0800102static bool __read_mostly enable_apicv = 1;
Yang Zhang01e439b2013-04-11 19:25:12 +0800103module_param(enable_apicv, bool, S_IRUGO);
Yang Zhang83d4c282013-01-25 10:18:49 +0800104
Abel Gordonabc4fc52013-04-18 14:35:25 +0300105static bool __read_mostly enable_shadow_vmcs = 1;
106module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
Nadav Har'El801d3422011-05-25 23:02:23 +0300107/*
108 * If nested=1, nested virtualization is supported, i.e., guests may use
109 * VMX and be a hypervisor for its own guests. If nested=0, guests may not
110 * use VMX instructions.
111 */
Rusty Russell476bc002012-01-13 09:32:18 +1030112static bool __read_mostly nested = 0;
Nadav Har'El801d3422011-05-25 23:02:23 +0300113module_param(nested, bool, S_IRUGO);
114
Wanpeng Li20300092014-12-02 19:14:59 +0800115static u64 __read_mostly host_xss;
116
Kai Huang843e4332015-01-28 10:54:28 +0800117static bool __read_mostly enable_pml = 1;
118module_param_named(pml, enable_pml, bool, S_IRUGO);
119
Paolo Bonzini904e14f2018-01-16 16:51:18 +0100120#define MSR_TYPE_R 1
121#define MSR_TYPE_W 2
122#define MSR_TYPE_RW 3
123
124#define MSR_BITMAP_MODE_X2APIC 1
125#define MSR_BITMAP_MODE_X2APIC_APICV 2
126#define MSR_BITMAP_MODE_LM 4
127
Haozhong Zhang64903d62015-10-20 15:39:09 +0800128#define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL
129
Yunhong Jiang64672c92016-06-13 14:19:59 -0700130/* Guest_tsc -> host_tsc conversion requires 64-bit division. */
131static int __read_mostly cpu_preemption_timer_multi;
132static bool __read_mostly enable_preemption_timer = 1;
133#ifdef CONFIG_X86_64
134module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
135#endif
136
Gleb Natapov50378782013-02-04 16:00:28 +0200137#define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
Sean Christopherson1706bd02018-03-05 12:04:38 -0800138#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
139#define KVM_VM_CR0_ALWAYS_ON \
140 (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | \
141 X86_CR0_WP | X86_CR0_PG | X86_CR0_PE)
Avi Kivity4c386092009-12-07 12:26:18 +0200142#define KVM_CR4_GUEST_OWNED_BITS \
143 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
Yu Zhangfd8cb432017-08-24 20:27:56 +0800144 | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD)
Avi Kivity4c386092009-12-07 12:26:18 +0200145
Sean Christopherson5dc1f042018-03-05 12:04:39 -0800146#define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
Avi Kivitycdc0e242009-12-06 17:21:14 +0200147#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
148#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
149
Avi Kivity78ac8b42010-04-08 18:19:35 +0300150#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
151
Jan Kiszkaf4124502014-03-07 20:03:13 +0100152#define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
153
Zhai, Edwin4b8d54f2009-10-09 18:03:20 +0800154/*
Jan Dakinevich16c2aec2016-10-28 07:00:30 +0300155 * Hyper-V requires all of these, so mark them as supported even though
156 * they are just treated the same as all-context.
157 */
158#define VMX_VPID_EXTENT_SUPPORTED_MASK \
159 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
160 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
161 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
162 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
163
Zhai, Edwin4b8d54f2009-10-09 18:03:20 +0800164/*
165 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
166 * ple_gap: upper bound on the amount of time between two successive
167 * executions of PAUSE in a loop. Also indicate if ple enabled.
Rik van Riel00c25bc2011-01-04 09:51:33 -0500168 * According to test, this time is usually smaller than 128 cycles.
Zhai, Edwin4b8d54f2009-10-09 18:03:20 +0800169 * ple_window: upper bound on the amount of time a guest is allowed to execute
170 * in a PAUSE loop. Tests indicate that most spinlocks are held for
171 * less than 2^12 cycles
172 * Time is measured based on a counter that runs at the same rate as the TSC,
173 * refer SDM volume 3b section 21.6.13 & 22.1.3.
174 */
Babu Mogerc8e88712018-03-16 16:37:24 -0400175static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP;
Radim Krčmářb4a2d312014-08-21 18:08:08 +0200176
Babu Moger7fbc85a2018-03-16 16:37:22 -0400177static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
178module_param(ple_window, uint, 0444);
Zhai, Edwin4b8d54f2009-10-09 18:03:20 +0800179
Radim Krčmářb4a2d312014-08-21 18:08:08 +0200180/* Default doubles per-vcpu window every exit. */
Babu Mogerc8e88712018-03-16 16:37:24 -0400181static unsigned int ple_window_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
Babu Moger7fbc85a2018-03-16 16:37:22 -0400182module_param(ple_window_grow, uint, 0444);
Radim Krčmářb4a2d312014-08-21 18:08:08 +0200183
184/* Default resets per-vcpu window every exit to ple_window. */
Babu Mogerc8e88712018-03-16 16:37:24 -0400185static unsigned int ple_window_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
Babu Moger7fbc85a2018-03-16 16:37:22 -0400186module_param(ple_window_shrink, uint, 0444);
Radim Krčmářb4a2d312014-08-21 18:08:08 +0200187
188/* Default is to compute the maximum so we can never overflow. */
Babu Moger7fbc85a2018-03-16 16:37:22 -0400189static unsigned int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
190module_param(ple_window_max, uint, 0444);
Radim Krčmářb4a2d312014-08-21 18:08:08 +0200191
Avi Kivity83287ea422012-09-16 15:10:57 +0300192extern const ulong vmx_return;
193
Konrad Rzeszutek Wilka3994772018-07-02 12:29:30 +0200194static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
Thomas Gleixner4c6523e2018-07-13 16:23:20 +0200195static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_always);
Thomas Gleixnerdd4bfa72018-07-13 16:23:21 +0200196static DEFINE_MUTEX(vmx_l1d_flush_mutex);
Konrad Rzeszutek Wilka3994772018-07-02 12:29:30 +0200197
Thomas Gleixner7db92e12018-07-13 16:23:19 +0200198/* Storage for pre module init parameter parsing */
199static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO;
Konrad Rzeszutek Wilka3994772018-07-02 12:29:30 +0200200
201static const struct {
202 const char *option;
203 enum vmx_l1d_flush_state cmd;
204} vmentry_l1d_param[] = {
Thomas Gleixner72c6d2d2018-07-13 16:23:16 +0200205 {"auto", VMENTER_L1D_FLUSH_AUTO},
Konrad Rzeszutek Wilka3994772018-07-02 12:29:30 +0200206 {"never", VMENTER_L1D_FLUSH_NEVER},
207 {"cond", VMENTER_L1D_FLUSH_COND},
208 {"always", VMENTER_L1D_FLUSH_ALWAYS},
209};
210
Thomas Gleixner7db92e12018-07-13 16:23:19 +0200211#define L1D_CACHE_ORDER 4
212static void *vmx_l1d_flush_pages;
213
214static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
215{
216 struct page *page;
217
218 /* If set to 'auto' select 'cond' */
219 if (l1tf == VMENTER_L1D_FLUSH_AUTO)
220 l1tf = VMENTER_L1D_FLUSH_COND;
221
222 if (!enable_ept) {
223 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
224 return 0;
225 }
226
227 if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
228 !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
229 page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
230 if (!page)
231 return -ENOMEM;
232 vmx_l1d_flush_pages = page_address(page);
233 }
234
235 l1tf_vmx_mitigation = l1tf;
236
Thomas Gleixner895ae472018-07-13 16:23:22 +0200237 if (l1tf != VMENTER_L1D_FLUSH_NEVER)
238 static_branch_enable(&vmx_l1d_should_flush);
239 else
240 static_branch_disable(&vmx_l1d_should_flush);
Thomas Gleixner4c6523e2018-07-13 16:23:20 +0200241
Thomas Gleixner4c6523e2018-07-13 16:23:20 +0200242 if (l1tf == VMENTER_L1D_FLUSH_ALWAYS)
243 static_branch_enable(&vmx_l1d_flush_always);
Thomas Gleixner895ae472018-07-13 16:23:22 +0200244 else
245 static_branch_disable(&vmx_l1d_flush_always);
Thomas Gleixner7db92e12018-07-13 16:23:19 +0200246 return 0;
247}
248
249static int vmentry_l1d_flush_parse(const char *s)
Konrad Rzeszutek Wilka3994772018-07-02 12:29:30 +0200250{
251 unsigned int i;
252
Thomas Gleixner7db92e12018-07-13 16:23:19 +0200253 if (s) {
254 for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
Thomas Gleixner895ae472018-07-13 16:23:22 +0200255 if (sysfs_streq(s, vmentry_l1d_param[i].option))
Thomas Gleixner7db92e12018-07-13 16:23:19 +0200256 return vmentry_l1d_param[i].cmd;
Konrad Rzeszutek Wilka3994772018-07-02 12:29:30 +0200257 }
258 }
Konrad Rzeszutek Wilka3994772018-07-02 12:29:30 +0200259 return -EINVAL;
260}
261
Thomas Gleixner7db92e12018-07-13 16:23:19 +0200262static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
263{
Thomas Gleixnerdd4bfa72018-07-13 16:23:21 +0200264 int l1tf, ret;
Thomas Gleixner7db92e12018-07-13 16:23:19 +0200265
266 if (!boot_cpu_has(X86_BUG_L1TF))
267 return 0;
268
269 l1tf = vmentry_l1d_flush_parse(s);
270 if (l1tf < 0)
271 return l1tf;
272
273 /*
274 * Has vmx_init() run already? If not then this is the pre init
275 * parameter parsing. In that case just store the value and let
276 * vmx_init() do the proper setup after enable_ept has been
277 * established.
278 */
279 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) {
280 vmentry_l1d_flush_param = l1tf;
281 return 0;
282 }
283
Thomas Gleixnerdd4bfa72018-07-13 16:23:21 +0200284 mutex_lock(&vmx_l1d_flush_mutex);
285 ret = vmx_setup_l1d_flush(l1tf);
286 mutex_unlock(&vmx_l1d_flush_mutex);
287 return ret;
Thomas Gleixner7db92e12018-07-13 16:23:19 +0200288}
289
Konrad Rzeszutek Wilka3994772018-07-02 12:29:30 +0200290static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
291{
Thomas Gleixner7db92e12018-07-13 16:23:19 +0200292 return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
Konrad Rzeszutek Wilka3994772018-07-02 12:29:30 +0200293}
294
295static const struct kernel_param_ops vmentry_l1d_flush_ops = {
296 .set = vmentry_l1d_flush_set,
297 .get = vmentry_l1d_flush_get,
298};
Thomas Gleixner895ae472018-07-13 16:23:22 +0200299module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644);
Konrad Rzeszutek Wilka3994772018-07-02 12:29:30 +0200300
Sean Christopherson40bbb9d2018-03-20 12:17:20 -0700301struct kvm_vmx {
302 struct kvm kvm;
303
304 unsigned int tss_addr;
305 bool ept_identity_pagetable_done;
306 gpa_t ept_identity_map_addr;
307};
308
Gleb Natapov8bf00a52011-10-05 14:01:22 +0200309#define NR_AUTOLOAD_MSRS 8
Avi Kivity61d2ef22010-04-28 16:40:38 +0300310
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400311struct vmcs {
312 u32 revision_id;
313 u32 abort;
314 char data[0];
315};
316
Nadav Har'Eld462b812011-05-24 15:26:10 +0300317/*
318 * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
319 * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
320 * loaded on this CPU (so we can clear them if the CPU goes down).
321 */
322struct loaded_vmcs {
323 struct vmcs *vmcs;
Jim Mattson355f4fb2016-10-28 08:29:39 -0700324 struct vmcs *shadow_vmcs;
Nadav Har'Eld462b812011-05-24 15:26:10 +0300325 int cpu;
Paolo Bonzini4c4a6f72017-07-14 13:36:11 +0200326 bool launched;
327 bool nmi_known_unmasked;
Ladi Prosek44889942017-09-22 07:53:15 +0200328 unsigned long vmcs_host_cr3; /* May not match real cr3 */
329 unsigned long vmcs_host_cr4; /* May not match real cr4 */
Paolo Bonzini8a1b4392017-11-06 13:31:12 +0100330 /* Support for vnmi-less CPUs */
331 int soft_vnmi_blocked;
332 ktime_t entry_time;
333 s64 vnmi_blocked_time;
Paolo Bonzini904e14f2018-01-16 16:51:18 +0100334 unsigned long *msr_bitmap;
Nadav Har'Eld462b812011-05-24 15:26:10 +0300335 struct list_head loaded_vmcss_on_cpu_link;
336};
337
Avi Kivity26bb0982009-09-07 11:14:12 +0300338struct shared_msr_entry {
339 unsigned index;
340 u64 data;
Avi Kivityd5696722009-12-02 12:28:47 +0200341 u64 mask;
Avi Kivity26bb0982009-09-07 11:14:12 +0300342};
343
Nadav Har'Elec378ae2011-05-25 23:02:54 +0300344/*
Nadav Har'Ela9d30f32011-05-25 23:03:55 +0300345 * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
346 * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
347 * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
348 * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
349 * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
350 * More than one of these structures may exist, if L1 runs multiple L2 guests.
Jim Mattsonde3a0022017-11-27 17:22:25 -0600351 * nested_vmx_run() will use the data here to build the vmcs02: a VMCS for the
Nadav Har'Ela9d30f32011-05-25 23:03:55 +0300352 * underlying hardware which will be used to run L2.
353 * This structure is packed to ensure that its layout is identical across
354 * machines (necessary for live migration).
Jim Mattsonb348e792018-05-01 15:40:27 -0700355 *
356 * IMPORTANT: Changing the layout of existing fields in this structure
357 * will break save/restore compatibility with older kvm releases. When
358 * adding new fields, either use space in the reserved padding* arrays
359 * or add the new fields to the end of the structure.
Nadav Har'Ela9d30f32011-05-25 23:03:55 +0300360 */
Nadav Har'El22bd0352011-05-25 23:05:57 +0300361typedef u64 natural_width;
Nadav Har'Ela9d30f32011-05-25 23:03:55 +0300362struct __packed vmcs12 {
363 /* According to the Intel spec, a VMCS region must start with the
364 * following two fields. Then follow implementation-specific data.
365 */
366 u32 revision_id;
367 u32 abort;
Nadav Har'El22bd0352011-05-25 23:05:57 +0300368
Nadav Har'El27d6c862011-05-25 23:06:59 +0300369 u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
370 u32 padding[7]; /* room for future expansion */
371
Nadav Har'El22bd0352011-05-25 23:05:57 +0300372 u64 io_bitmap_a;
373 u64 io_bitmap_b;
374 u64 msr_bitmap;
375 u64 vm_exit_msr_store_addr;
376 u64 vm_exit_msr_load_addr;
377 u64 vm_entry_msr_load_addr;
378 u64 tsc_offset;
379 u64 virtual_apic_page_addr;
380 u64 apic_access_addr;
Wincy Van705699a2015-02-03 23:58:17 +0800381 u64 posted_intr_desc_addr;
Nadav Har'El22bd0352011-05-25 23:05:57 +0300382 u64 ept_pointer;
Wincy Van608406e2015-02-03 23:57:51 +0800383 u64 eoi_exit_bitmap0;
384 u64 eoi_exit_bitmap1;
385 u64 eoi_exit_bitmap2;
386 u64 eoi_exit_bitmap3;
Wanpeng Li81dc01f2014-12-04 19:11:07 +0800387 u64 xss_exit_bitmap;
Nadav Har'El22bd0352011-05-25 23:05:57 +0300388 u64 guest_physical_address;
389 u64 vmcs_link_pointer;
390 u64 guest_ia32_debugctl;
391 u64 guest_ia32_pat;
392 u64 guest_ia32_efer;
393 u64 guest_ia32_perf_global_ctrl;
394 u64 guest_pdptr0;
395 u64 guest_pdptr1;
396 u64 guest_pdptr2;
397 u64 guest_pdptr3;
Paolo Bonzini36be0b92014-02-24 12:30:04 +0100398 u64 guest_bndcfgs;
Nadav Har'El22bd0352011-05-25 23:05:57 +0300399 u64 host_ia32_pat;
400 u64 host_ia32_efer;
401 u64 host_ia32_perf_global_ctrl;
Jim Mattsonb348e792018-05-01 15:40:27 -0700402 u64 vmread_bitmap;
403 u64 vmwrite_bitmap;
404 u64 vm_function_control;
405 u64 eptp_list_address;
406 u64 pml_address;
407 u64 padding64[3]; /* room for future expansion */
Nadav Har'El22bd0352011-05-25 23:05:57 +0300408 /*
409 * To allow migration of L1 (complete with its L2 guests) between
410 * machines of different natural widths (32 or 64 bit), we cannot have
411 * unsigned long fields with no explict size. We use u64 (aliased
412 * natural_width) instead. Luckily, x86 is little-endian.
413 */
414 natural_width cr0_guest_host_mask;
415 natural_width cr4_guest_host_mask;
416 natural_width cr0_read_shadow;
417 natural_width cr4_read_shadow;
418 natural_width cr3_target_value0;
419 natural_width cr3_target_value1;
420 natural_width cr3_target_value2;
421 natural_width cr3_target_value3;
422 natural_width exit_qualification;
423 natural_width guest_linear_address;
424 natural_width guest_cr0;
425 natural_width guest_cr3;
426 natural_width guest_cr4;
427 natural_width guest_es_base;
428 natural_width guest_cs_base;
429 natural_width guest_ss_base;
430 natural_width guest_ds_base;
431 natural_width guest_fs_base;
432 natural_width guest_gs_base;
433 natural_width guest_ldtr_base;
434 natural_width guest_tr_base;
435 natural_width guest_gdtr_base;
436 natural_width guest_idtr_base;
437 natural_width guest_dr7;
438 natural_width guest_rsp;
439 natural_width guest_rip;
440 natural_width guest_rflags;
441 natural_width guest_pending_dbg_exceptions;
442 natural_width guest_sysenter_esp;
443 natural_width guest_sysenter_eip;
444 natural_width host_cr0;
445 natural_width host_cr3;
446 natural_width host_cr4;
447 natural_width host_fs_base;
448 natural_width host_gs_base;
449 natural_width host_tr_base;
450 natural_width host_gdtr_base;
451 natural_width host_idtr_base;
452 natural_width host_ia32_sysenter_esp;
453 natural_width host_ia32_sysenter_eip;
454 natural_width host_rsp;
455 natural_width host_rip;
456 natural_width paddingl[8]; /* room for future expansion */
457 u32 pin_based_vm_exec_control;
458 u32 cpu_based_vm_exec_control;
459 u32 exception_bitmap;
460 u32 page_fault_error_code_mask;
461 u32 page_fault_error_code_match;
462 u32 cr3_target_count;
463 u32 vm_exit_controls;
464 u32 vm_exit_msr_store_count;
465 u32 vm_exit_msr_load_count;
466 u32 vm_entry_controls;
467 u32 vm_entry_msr_load_count;
468 u32 vm_entry_intr_info_field;
469 u32 vm_entry_exception_error_code;
470 u32 vm_entry_instruction_len;
471 u32 tpr_threshold;
472 u32 secondary_vm_exec_control;
473 u32 vm_instruction_error;
474 u32 vm_exit_reason;
475 u32 vm_exit_intr_info;
476 u32 vm_exit_intr_error_code;
477 u32 idt_vectoring_info_field;
478 u32 idt_vectoring_error_code;
479 u32 vm_exit_instruction_len;
480 u32 vmx_instruction_info;
481 u32 guest_es_limit;
482 u32 guest_cs_limit;
483 u32 guest_ss_limit;
484 u32 guest_ds_limit;
485 u32 guest_fs_limit;
486 u32 guest_gs_limit;
487 u32 guest_ldtr_limit;
488 u32 guest_tr_limit;
489 u32 guest_gdtr_limit;
490 u32 guest_idtr_limit;
491 u32 guest_es_ar_bytes;
492 u32 guest_cs_ar_bytes;
493 u32 guest_ss_ar_bytes;
494 u32 guest_ds_ar_bytes;
495 u32 guest_fs_ar_bytes;
496 u32 guest_gs_ar_bytes;
497 u32 guest_ldtr_ar_bytes;
498 u32 guest_tr_ar_bytes;
499 u32 guest_interruptibility_info;
500 u32 guest_activity_state;
501 u32 guest_sysenter_cs;
502 u32 host_ia32_sysenter_cs;
Jan Kiszka0238ea92013-03-13 11:31:24 +0100503 u32 vmx_preemption_timer_value;
504 u32 padding32[7]; /* room for future expansion */
Nadav Har'El22bd0352011-05-25 23:05:57 +0300505 u16 virtual_processor_id;
Wincy Van705699a2015-02-03 23:58:17 +0800506 u16 posted_intr_nv;
Nadav Har'El22bd0352011-05-25 23:05:57 +0300507 u16 guest_es_selector;
508 u16 guest_cs_selector;
509 u16 guest_ss_selector;
510 u16 guest_ds_selector;
511 u16 guest_fs_selector;
512 u16 guest_gs_selector;
513 u16 guest_ldtr_selector;
514 u16 guest_tr_selector;
Wincy Van608406e2015-02-03 23:57:51 +0800515 u16 guest_intr_status;
Nadav Har'El22bd0352011-05-25 23:05:57 +0300516 u16 host_es_selector;
517 u16 host_cs_selector;
518 u16 host_ss_selector;
519 u16 host_ds_selector;
520 u16 host_fs_selector;
521 u16 host_gs_selector;
522 u16 host_tr_selector;
Jim Mattsonb348e792018-05-01 15:40:27 -0700523 u16 guest_pml_index;
Nadav Har'Ela9d30f32011-05-25 23:03:55 +0300524};
525
526/*
Jim Mattson21ebf532018-05-01 15:40:28 -0700527 * For save/restore compatibility, the vmcs12 field offsets must not change.
528 */
529#define CHECK_OFFSET(field, loc) \
530 BUILD_BUG_ON_MSG(offsetof(struct vmcs12, field) != (loc), \
531 "Offset of " #field " in struct vmcs12 has changed.")
532
533static inline void vmx_check_vmcs12_offsets(void) {
534 CHECK_OFFSET(revision_id, 0);
535 CHECK_OFFSET(abort, 4);
536 CHECK_OFFSET(launch_state, 8);
537 CHECK_OFFSET(io_bitmap_a, 40);
538 CHECK_OFFSET(io_bitmap_b, 48);
539 CHECK_OFFSET(msr_bitmap, 56);
540 CHECK_OFFSET(vm_exit_msr_store_addr, 64);
541 CHECK_OFFSET(vm_exit_msr_load_addr, 72);
542 CHECK_OFFSET(vm_entry_msr_load_addr, 80);
543 CHECK_OFFSET(tsc_offset, 88);
544 CHECK_OFFSET(virtual_apic_page_addr, 96);
545 CHECK_OFFSET(apic_access_addr, 104);
546 CHECK_OFFSET(posted_intr_desc_addr, 112);
547 CHECK_OFFSET(ept_pointer, 120);
548 CHECK_OFFSET(eoi_exit_bitmap0, 128);
549 CHECK_OFFSET(eoi_exit_bitmap1, 136);
550 CHECK_OFFSET(eoi_exit_bitmap2, 144);
551 CHECK_OFFSET(eoi_exit_bitmap3, 152);
552 CHECK_OFFSET(xss_exit_bitmap, 160);
553 CHECK_OFFSET(guest_physical_address, 168);
554 CHECK_OFFSET(vmcs_link_pointer, 176);
555 CHECK_OFFSET(guest_ia32_debugctl, 184);
556 CHECK_OFFSET(guest_ia32_pat, 192);
557 CHECK_OFFSET(guest_ia32_efer, 200);
558 CHECK_OFFSET(guest_ia32_perf_global_ctrl, 208);
559 CHECK_OFFSET(guest_pdptr0, 216);
560 CHECK_OFFSET(guest_pdptr1, 224);
561 CHECK_OFFSET(guest_pdptr2, 232);
562 CHECK_OFFSET(guest_pdptr3, 240);
563 CHECK_OFFSET(guest_bndcfgs, 248);
564 CHECK_OFFSET(host_ia32_pat, 256);
565 CHECK_OFFSET(host_ia32_efer, 264);
566 CHECK_OFFSET(host_ia32_perf_global_ctrl, 272);
567 CHECK_OFFSET(vmread_bitmap, 280);
568 CHECK_OFFSET(vmwrite_bitmap, 288);
569 CHECK_OFFSET(vm_function_control, 296);
570 CHECK_OFFSET(eptp_list_address, 304);
571 CHECK_OFFSET(pml_address, 312);
572 CHECK_OFFSET(cr0_guest_host_mask, 344);
573 CHECK_OFFSET(cr4_guest_host_mask, 352);
574 CHECK_OFFSET(cr0_read_shadow, 360);
575 CHECK_OFFSET(cr4_read_shadow, 368);
576 CHECK_OFFSET(cr3_target_value0, 376);
577 CHECK_OFFSET(cr3_target_value1, 384);
578 CHECK_OFFSET(cr3_target_value2, 392);
579 CHECK_OFFSET(cr3_target_value3, 400);
580 CHECK_OFFSET(exit_qualification, 408);
581 CHECK_OFFSET(guest_linear_address, 416);
582 CHECK_OFFSET(guest_cr0, 424);
583 CHECK_OFFSET(guest_cr3, 432);
584 CHECK_OFFSET(guest_cr4, 440);
585 CHECK_OFFSET(guest_es_base, 448);
586 CHECK_OFFSET(guest_cs_base, 456);
587 CHECK_OFFSET(guest_ss_base, 464);
588 CHECK_OFFSET(guest_ds_base, 472);
589 CHECK_OFFSET(guest_fs_base, 480);
590 CHECK_OFFSET(guest_gs_base, 488);
591 CHECK_OFFSET(guest_ldtr_base, 496);
592 CHECK_OFFSET(guest_tr_base, 504);
593 CHECK_OFFSET(guest_gdtr_base, 512);
594 CHECK_OFFSET(guest_idtr_base, 520);
595 CHECK_OFFSET(guest_dr7, 528);
596 CHECK_OFFSET(guest_rsp, 536);
597 CHECK_OFFSET(guest_rip, 544);
598 CHECK_OFFSET(guest_rflags, 552);
599 CHECK_OFFSET(guest_pending_dbg_exceptions, 560);
600 CHECK_OFFSET(guest_sysenter_esp, 568);
601 CHECK_OFFSET(guest_sysenter_eip, 576);
602 CHECK_OFFSET(host_cr0, 584);
603 CHECK_OFFSET(host_cr3, 592);
604 CHECK_OFFSET(host_cr4, 600);
605 CHECK_OFFSET(host_fs_base, 608);
606 CHECK_OFFSET(host_gs_base, 616);
607 CHECK_OFFSET(host_tr_base, 624);
608 CHECK_OFFSET(host_gdtr_base, 632);
609 CHECK_OFFSET(host_idtr_base, 640);
610 CHECK_OFFSET(host_ia32_sysenter_esp, 648);
611 CHECK_OFFSET(host_ia32_sysenter_eip, 656);
612 CHECK_OFFSET(host_rsp, 664);
613 CHECK_OFFSET(host_rip, 672);
614 CHECK_OFFSET(pin_based_vm_exec_control, 744);
615 CHECK_OFFSET(cpu_based_vm_exec_control, 748);
616 CHECK_OFFSET(exception_bitmap, 752);
617 CHECK_OFFSET(page_fault_error_code_mask, 756);
618 CHECK_OFFSET(page_fault_error_code_match, 760);
619 CHECK_OFFSET(cr3_target_count, 764);
620 CHECK_OFFSET(vm_exit_controls, 768);
621 CHECK_OFFSET(vm_exit_msr_store_count, 772);
622 CHECK_OFFSET(vm_exit_msr_load_count, 776);
623 CHECK_OFFSET(vm_entry_controls, 780);
624 CHECK_OFFSET(vm_entry_msr_load_count, 784);
625 CHECK_OFFSET(vm_entry_intr_info_field, 788);
626 CHECK_OFFSET(vm_entry_exception_error_code, 792);
627 CHECK_OFFSET(vm_entry_instruction_len, 796);
628 CHECK_OFFSET(tpr_threshold, 800);
629 CHECK_OFFSET(secondary_vm_exec_control, 804);
630 CHECK_OFFSET(vm_instruction_error, 808);
631 CHECK_OFFSET(vm_exit_reason, 812);
632 CHECK_OFFSET(vm_exit_intr_info, 816);
633 CHECK_OFFSET(vm_exit_intr_error_code, 820);
634 CHECK_OFFSET(idt_vectoring_info_field, 824);
635 CHECK_OFFSET(idt_vectoring_error_code, 828);
636 CHECK_OFFSET(vm_exit_instruction_len, 832);
637 CHECK_OFFSET(vmx_instruction_info, 836);
638 CHECK_OFFSET(guest_es_limit, 840);
639 CHECK_OFFSET(guest_cs_limit, 844);
640 CHECK_OFFSET(guest_ss_limit, 848);
641 CHECK_OFFSET(guest_ds_limit, 852);
642 CHECK_OFFSET(guest_fs_limit, 856);
643 CHECK_OFFSET(guest_gs_limit, 860);
644 CHECK_OFFSET(guest_ldtr_limit, 864);
645 CHECK_OFFSET(guest_tr_limit, 868);
646 CHECK_OFFSET(guest_gdtr_limit, 872);
647 CHECK_OFFSET(guest_idtr_limit, 876);
648 CHECK_OFFSET(guest_es_ar_bytes, 880);
649 CHECK_OFFSET(guest_cs_ar_bytes, 884);
650 CHECK_OFFSET(guest_ss_ar_bytes, 888);
651 CHECK_OFFSET(guest_ds_ar_bytes, 892);
652 CHECK_OFFSET(guest_fs_ar_bytes, 896);
653 CHECK_OFFSET(guest_gs_ar_bytes, 900);
654 CHECK_OFFSET(guest_ldtr_ar_bytes, 904);
655 CHECK_OFFSET(guest_tr_ar_bytes, 908);
656 CHECK_OFFSET(guest_interruptibility_info, 912);
657 CHECK_OFFSET(guest_activity_state, 916);
658 CHECK_OFFSET(guest_sysenter_cs, 920);
659 CHECK_OFFSET(host_ia32_sysenter_cs, 924);
660 CHECK_OFFSET(vmx_preemption_timer_value, 928);
661 CHECK_OFFSET(virtual_processor_id, 960);
662 CHECK_OFFSET(posted_intr_nv, 962);
663 CHECK_OFFSET(guest_es_selector, 964);
664 CHECK_OFFSET(guest_cs_selector, 966);
665 CHECK_OFFSET(guest_ss_selector, 968);
666 CHECK_OFFSET(guest_ds_selector, 970);
667 CHECK_OFFSET(guest_fs_selector, 972);
668 CHECK_OFFSET(guest_gs_selector, 974);
669 CHECK_OFFSET(guest_ldtr_selector, 976);
670 CHECK_OFFSET(guest_tr_selector, 978);
671 CHECK_OFFSET(guest_intr_status, 980);
672 CHECK_OFFSET(host_es_selector, 982);
673 CHECK_OFFSET(host_cs_selector, 984);
674 CHECK_OFFSET(host_ss_selector, 986);
675 CHECK_OFFSET(host_ds_selector, 988);
676 CHECK_OFFSET(host_fs_selector, 990);
677 CHECK_OFFSET(host_gs_selector, 992);
678 CHECK_OFFSET(host_tr_selector, 994);
679 CHECK_OFFSET(guest_pml_index, 996);
680}
681
682/*
Nadav Har'Ela9d30f32011-05-25 23:03:55 +0300683 * VMCS12_REVISION is an arbitrary id that should be changed if the content or
684 * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
685 * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
Jim Mattsonb348e792018-05-01 15:40:27 -0700686 *
687 * IMPORTANT: Changing this value will break save/restore compatibility with
688 * older kvm releases.
Nadav Har'Ela9d30f32011-05-25 23:03:55 +0300689 */
690#define VMCS12_REVISION 0x11e57ed0
691
692/*
693 * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
694 * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
695 * current implementation, 4K are reserved to avoid future complications.
696 */
697#define VMCS12_SIZE 0x1000
698
699/*
Jim Mattson5b157062017-12-22 12:11:12 -0800700 * VMCS12_MAX_FIELD_INDEX is the highest index value used in any
701 * supported VMCS12 field encoding.
702 */
703#define VMCS12_MAX_FIELD_INDEX 0x17
704
Paolo Bonzini6677f3d2018-02-26 13:40:08 +0100705struct nested_vmx_msrs {
706 /*
707 * We only store the "true" versions of the VMX capability MSRs. We
708 * generate the "non-true" versions by setting the must-be-1 bits
709 * according to the SDM.
710 */
711 u32 procbased_ctls_low;
712 u32 procbased_ctls_high;
713 u32 secondary_ctls_low;
714 u32 secondary_ctls_high;
715 u32 pinbased_ctls_low;
716 u32 pinbased_ctls_high;
717 u32 exit_ctls_low;
718 u32 exit_ctls_high;
719 u32 entry_ctls_low;
720 u32 entry_ctls_high;
721 u32 misc_low;
722 u32 misc_high;
723 u32 ept_caps;
724 u32 vpid_caps;
725 u64 basic;
726 u64 cr0_fixed0;
727 u64 cr0_fixed1;
728 u64 cr4_fixed0;
729 u64 cr4_fixed1;
730 u64 vmcs_enum;
731 u64 vmfunc_controls;
732};
733
Jim Mattson5b157062017-12-22 12:11:12 -0800734/*
Nadav Har'Elec378ae2011-05-25 23:02:54 +0300735 * The nested_vmx structure is part of vcpu_vmx, and holds information we need
736 * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
737 */
738struct nested_vmx {
739 /* Has the level1 guest done vmxon? */
740 bool vmxon;
Bandan Das3573e222014-05-06 02:19:16 -0400741 gpa_t vmxon_ptr;
Bandan Dasc5f983f2017-05-05 15:25:14 -0400742 bool pml_full;
Nadav Har'Ela9d30f32011-05-25 23:03:55 +0300743
744 /* The guest-physical address of the current VMCS L1 keeps for L2 */
745 gpa_t current_vmptr;
David Matlack4f2777b2016-07-13 17:16:37 -0700746 /*
747 * Cache of the guest's VMCS, existing outside of guest memory.
748 * Loaded from guest memory during VMPTRLD. Flushed to guest
David Matlack8ca44e82017-08-01 14:00:39 -0700749 * memory during VMCLEAR and VMPTRLD.
David Matlack4f2777b2016-07-13 17:16:37 -0700750 */
751 struct vmcs12 *cached_vmcs12;
Abel Gordon012f83c2013-04-18 14:39:25 +0300752 /*
753 * Indicates if the shadow vmcs must be updated with the
754 * data hold by vmcs12
755 */
756 bool sync_shadow_vmcs;
Paolo Bonzini74a497f2017-12-20 13:55:39 +0100757 bool dirty_vmcs12;
Nadav Har'Elff2f6fe2011-05-25 23:05:27 +0300758
Jim Mattson8d860bb2018-05-09 16:56:05 -0400759 bool change_vmcs01_virtual_apic_mode;
760
Nadav Har'El644d7112011-05-25 23:12:35 +0300761 /* L2 must run next, and mustn't decide to exit to L1. */
762 bool nested_run_pending;
Jim Mattsonde3a0022017-11-27 17:22:25 -0600763
764 struct loaded_vmcs vmcs02;
765
Nadav Har'Elfe3ef052011-05-25 23:10:02 +0300766 /*
Jim Mattsonde3a0022017-11-27 17:22:25 -0600767 * Guest pages referred to in the vmcs02 with host-physical
768 * pointers, so we must keep them pinned while L2 runs.
Nadav Har'Elfe3ef052011-05-25 23:10:02 +0300769 */
770 struct page *apic_access_page;
Wanpeng Lia7c0b072014-08-21 19:46:50 +0800771 struct page *virtual_apic_page;
Wincy Van705699a2015-02-03 23:58:17 +0800772 struct page *pi_desc_page;
773 struct pi_desc *pi_desc;
774 bool pi_pending;
775 u16 posted_intr_nv;
Jan Kiszkaf4124502014-03-07 20:03:13 +0100776
777 struct hrtimer preemption_timer;
778 bool preemption_timer_expired;
Jan Kiszka2996fca2014-06-16 13:59:43 +0200779
780 /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
781 u64 vmcs01_debugctl;
Wincy Vanb9c237b2015-02-03 23:56:30 +0800782
Wanpeng Li5c614b32015-10-13 09:18:36 -0700783 u16 vpid02;
784 u16 last_vpid;
785
Paolo Bonzini6677f3d2018-02-26 13:40:08 +0100786 struct nested_vmx_msrs msrs;
Ladi Prosek72e9cbd2017-10-11 16:54:43 +0200787
788 /* SMM related state */
789 struct {
790 /* in VMX operation on SMM entry? */
791 bool vmxon;
792 /* in guest mode on SMM entry? */
793 bool guest_mode;
794 } smm;
Nadav Har'Elec378ae2011-05-25 23:02:54 +0300795};
796
Yang Zhang01e439b2013-04-11 19:25:12 +0800797#define POSTED_INTR_ON 0
Feng Wuebbfc762015-09-18 22:29:46 +0800798#define POSTED_INTR_SN 1
799
Yang Zhang01e439b2013-04-11 19:25:12 +0800800/* Posted-Interrupt Descriptor */
801struct pi_desc {
802 u32 pir[8]; /* Posted interrupt requested */
Feng Wu6ef15222015-09-18 22:29:45 +0800803 union {
804 struct {
805 /* bit 256 - Outstanding Notification */
806 u16 on : 1,
807 /* bit 257 - Suppress Notification */
808 sn : 1,
809 /* bit 271:258 - Reserved */
810 rsvd_1 : 14;
811 /* bit 279:272 - Notification Vector */
812 u8 nv;
813 /* bit 287:280 - Reserved */
814 u8 rsvd_2;
815 /* bit 319:288 - Notification Destination */
816 u32 ndst;
817 };
818 u64 control;
819 };
820 u32 rsvd[6];
Yang Zhang01e439b2013-04-11 19:25:12 +0800821} __aligned(64);
822
Yang Zhanga20ed542013-04-11 19:25:15 +0800823static bool pi_test_and_set_on(struct pi_desc *pi_desc)
824{
825 return test_and_set_bit(POSTED_INTR_ON,
826 (unsigned long *)&pi_desc->control);
827}
828
829static bool pi_test_and_clear_on(struct pi_desc *pi_desc)
830{
831 return test_and_clear_bit(POSTED_INTR_ON,
832 (unsigned long *)&pi_desc->control);
833}
834
835static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
836{
837 return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
838}
839
Feng Wuebbfc762015-09-18 22:29:46 +0800840static inline void pi_clear_sn(struct pi_desc *pi_desc)
841{
842 return clear_bit(POSTED_INTR_SN,
843 (unsigned long *)&pi_desc->control);
844}
845
846static inline void pi_set_sn(struct pi_desc *pi_desc)
847{
848 return set_bit(POSTED_INTR_SN,
849 (unsigned long *)&pi_desc->control);
850}
851
Paolo Bonziniad361092016-09-20 16:15:05 +0200852static inline void pi_clear_on(struct pi_desc *pi_desc)
853{
854 clear_bit(POSTED_INTR_ON,
855 (unsigned long *)&pi_desc->control);
856}
857
Feng Wuebbfc762015-09-18 22:29:46 +0800858static inline int pi_test_on(struct pi_desc *pi_desc)
859{
860 return test_bit(POSTED_INTR_ON,
861 (unsigned long *)&pi_desc->control);
862}
863
864static inline int pi_test_sn(struct pi_desc *pi_desc)
865{
866 return test_bit(POSTED_INTR_SN,
867 (unsigned long *)&pi_desc->control);
868}
869
Konrad Rzeszutek Wilk33966dd62018-06-20 13:58:37 -0400870struct vmx_msrs {
871 unsigned int nr;
872 struct vmx_msr_entry val[NR_AUTOLOAD_MSRS];
873};
874
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400875struct vcpu_vmx {
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000876 struct kvm_vcpu vcpu;
Avi Kivity313dbd492008-07-17 18:04:30 +0300877 unsigned long host_rsp;
Avi Kivity29bd8a72007-09-10 17:27:03 +0300878 u8 fail;
Paolo Bonzini904e14f2018-01-16 16:51:18 +0100879 u8 msr_bitmap_mode;
Avi Kivity51aa01d2010-07-20 14:31:20 +0300880 u32 exit_intr_info;
Avi Kivity1155f762007-11-22 11:30:47 +0200881 u32 idt_vectoring_info;
Avi Kivity6de12732011-03-07 12:51:22 +0200882 ulong rflags;
Avi Kivity26bb0982009-09-07 11:14:12 +0300883 struct shared_msr_entry *guest_msrs;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400884 int nmsrs;
885 int save_nmsrs;
Yang Zhanga547c6d2013-04-11 19:25:10 +0800886 unsigned long host_idt_base;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400887#ifdef CONFIG_X86_64
Avi Kivity44ea2b12009-09-06 15:55:37 +0300888 u64 msr_host_kernel_gs_base;
889 u64 msr_guest_kernel_gs_base;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400890#endif
Ashok Raj15d45072018-02-01 22:59:43 +0100891
KarimAllah Ahmed28c1c9f2018-02-01 22:59:44 +0100892 u64 arch_capabilities;
KarimAllah Ahmedd28b3872018-02-01 22:59:45 +0100893 u64 spec_ctrl;
KarimAllah Ahmed28c1c9f2018-02-01 22:59:44 +0100894
Gleb Natapov2961e8762013-11-25 15:37:13 +0200895 u32 vm_entry_controls_shadow;
896 u32 vm_exit_controls_shadow;
Paolo Bonzini80154d72017-08-24 13:55:35 +0200897 u32 secondary_exec_control;
898
Nadav Har'Eld462b812011-05-24 15:26:10 +0300899 /*
900 * loaded_vmcs points to the VMCS currently used in this vcpu. For a
901 * non-nested (L1) guest, it always points to vmcs01. For a nested
902 * guest (L2), it points to a different VMCS.
903 */
904 struct loaded_vmcs vmcs01;
905 struct loaded_vmcs *loaded_vmcs;
906 bool __launched; /* temporary, used in vmx_vcpu_run */
Avi Kivity61d2ef22010-04-28 16:40:38 +0300907 struct msr_autoload {
Konrad Rzeszutek Wilk33966dd62018-06-20 13:58:37 -0400908 struct vmx_msrs guest;
909 struct vmx_msrs host;
Avi Kivity61d2ef22010-04-28 16:40:38 +0300910 } msr_autoload;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400911 struct {
912 int loaded;
913 u16 fs_sel, gs_sel, ldt_sel;
Avi Kivityb2da15a2012-05-13 19:53:24 +0300914#ifdef CONFIG_X86_64
915 u16 ds_sel, es_sel;
916#endif
Laurent Vivier152d3f22007-08-23 16:33:11 +0200917 int gs_ldt_reload_needed;
918 int fs_reload_needed;
Liu, Jinsongda8999d2014-02-24 10:55:46 +0000919 u64 msr_host_bndcfgs;
Mike Dayd77c26f2007-10-08 09:02:08 -0400920 } host_state;
Avi Kivity9c8cba32007-11-22 11:42:59 +0200921 struct {
Avi Kivity7ffd92c2009-06-09 14:10:45 +0300922 int vm86_active;
Avi Kivity78ac8b42010-04-08 18:19:35 +0300923 ulong save_rflags;
Avi Kivityf5f7b2f2012-08-21 17:07:00 +0300924 struct kvm_segment segs[8];
925 } rmode;
926 struct {
927 u32 bitmask; /* 4 bits per segment (1 bit per field) */
Avi Kivity7ffd92c2009-06-09 14:10:45 +0300928 struct kvm_save_segment {
929 u16 selector;
930 unsigned long base;
931 u32 limit;
932 u32 ar;
Avi Kivityf5f7b2f2012-08-21 17:07:00 +0300933 } seg[8];
Avi Kivity2fb92db2011-04-27 19:42:18 +0300934 } segment_cache;
Sheng Yang2384d2b2008-01-17 15:14:33 +0800935 int vpid;
Mohammed Gamal04fa4d32008-08-17 16:39:48 +0300936 bool emulation_required;
Jan Kiszka3b86cd92008-09-26 09:30:57 +0200937
Andi Kleena0861c02009-06-08 17:37:09 +0800938 u32 exit_reason;
Sheng Yang4e47c7a2009-12-18 16:48:47 +0800939
Yang Zhang01e439b2013-04-11 19:25:12 +0800940 /* Posted interrupt descriptor */
941 struct pi_desc pi_desc;
942
Nadav Har'Elec378ae2011-05-25 23:02:54 +0300943 /* Support for a guest hypervisor (nested VMX) */
944 struct nested_vmx nested;
Radim Krčmářa7653ec2014-08-21 18:08:07 +0200945
946 /* Dynamic PLE window. */
947 int ple_window;
948 bool ple_window_dirty;
Kai Huang843e4332015-01-28 10:54:28 +0800949
950 /* Support for PML */
951#define PML_ENTITY_NUM 512
952 struct page *pml_pg;
Owen Hofmann2680d6d2016-03-01 13:36:13 -0800953
Yunhong Jiang64672c92016-06-13 14:19:59 -0700954 /* apic deadline value in host tsc */
955 u64 hv_deadline_tsc;
956
Owen Hofmann2680d6d2016-03-01 13:36:13 -0800957 u64 current_tsc_ratio;
Xiao Guangrong1be0e612016-03-22 16:51:18 +0800958
Xiao Guangrong1be0e612016-03-22 16:51:18 +0800959 u32 host_pkru;
Haozhong Zhang3b840802016-06-22 14:59:54 +0800960
Wanpeng Li74c55932017-11-29 01:31:20 -0800961 unsigned long host_debugctlmsr;
962
Haozhong Zhang37e4c992016-06-22 14:59:55 +0800963 /*
964 * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
965 * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included
966 * in msr_ia32_feature_control_valid_bits.
967 */
Haozhong Zhang3b840802016-06-22 14:59:54 +0800968 u64 msr_ia32_feature_control;
Haozhong Zhang37e4c992016-06-22 14:59:55 +0800969 u64 msr_ia32_feature_control_valid_bits;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400970};
971
Avi Kivity2fb92db2011-04-27 19:42:18 +0300972enum segment_cache_field {
973 SEG_FIELD_SEL = 0,
974 SEG_FIELD_BASE = 1,
975 SEG_FIELD_LIMIT = 2,
976 SEG_FIELD_AR = 3,
977
978 SEG_FIELD_NR = 4
979};
980
Sean Christopherson40bbb9d2018-03-20 12:17:20 -0700981static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
982{
983 return container_of(kvm, struct kvm_vmx, kvm);
984}
985
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400986static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
987{
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000988 return container_of(vcpu, struct vcpu_vmx, vcpu);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400989}
990
Feng Wuefc64402015-09-18 22:29:51 +0800991static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
992{
993 return &(to_vmx(vcpu)->pi_desc);
994}
995
Jim Mattson58e9ffa2017-12-22 12:13:13 -0800996#define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n)))))
Nadav Har'El22bd0352011-05-25 23:05:57 +0300997#define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
Jim Mattson58e9ffa2017-12-22 12:13:13 -0800998#define FIELD(number, name) [ROL16(number, 6)] = VMCS12_OFFSET(name)
999#define FIELD64(number, name) \
1000 FIELD(number, name), \
1001 [ROL16(number##_HIGH, 6)] = VMCS12_OFFSET(name) + sizeof(u32)
Nadav Har'El22bd0352011-05-25 23:05:57 +03001002
Abel Gordon4607c2d2013-04-18 14:35:55 +03001003
Paolo Bonzini44900ba2017-12-13 12:58:02 +01001004static u16 shadow_read_only_fields[] = {
Paolo Bonzinic9e9dea2017-12-20 13:16:29 +01001005#define SHADOW_FIELD_RO(x) x,
1006#include "vmx_shadow_fields.h"
Abel Gordon4607c2d2013-04-18 14:35:55 +03001007};
Bandan Dasfe2b2012014-04-21 15:20:14 -04001008static int max_shadow_read_only_fields =
Abel Gordon4607c2d2013-04-18 14:35:55 +03001009 ARRAY_SIZE(shadow_read_only_fields);
1010
Paolo Bonzini44900ba2017-12-13 12:58:02 +01001011static u16 shadow_read_write_fields[] = {
Paolo Bonzinic9e9dea2017-12-20 13:16:29 +01001012#define SHADOW_FIELD_RW(x) x,
1013#include "vmx_shadow_fields.h"
Abel Gordon4607c2d2013-04-18 14:35:55 +03001014};
Bandan Dasfe2b2012014-04-21 15:20:14 -04001015static int max_shadow_read_write_fields =
Abel Gordon4607c2d2013-04-18 14:35:55 +03001016 ARRAY_SIZE(shadow_read_write_fields);
1017
Mathias Krause772e0312012-08-30 01:30:19 +02001018static const unsigned short vmcs_field_to_offset_table[] = {
Nadav Har'El22bd0352011-05-25 23:05:57 +03001019 FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
Wincy Van705699a2015-02-03 23:58:17 +08001020 FIELD(POSTED_INTR_NV, posted_intr_nv),
Nadav Har'El22bd0352011-05-25 23:05:57 +03001021 FIELD(GUEST_ES_SELECTOR, guest_es_selector),
1022 FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
1023 FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
1024 FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
1025 FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
1026 FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
1027 FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
1028 FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
Wincy Van608406e2015-02-03 23:57:51 +08001029 FIELD(GUEST_INTR_STATUS, guest_intr_status),
Bandan Dasc5f983f2017-05-05 15:25:14 -04001030 FIELD(GUEST_PML_INDEX, guest_pml_index),
Nadav Har'El22bd0352011-05-25 23:05:57 +03001031 FIELD(HOST_ES_SELECTOR, host_es_selector),
1032 FIELD(HOST_CS_SELECTOR, host_cs_selector),
1033 FIELD(HOST_SS_SELECTOR, host_ss_selector),
1034 FIELD(HOST_DS_SELECTOR, host_ds_selector),
1035 FIELD(HOST_FS_SELECTOR, host_fs_selector),
1036 FIELD(HOST_GS_SELECTOR, host_gs_selector),
1037 FIELD(HOST_TR_SELECTOR, host_tr_selector),
1038 FIELD64(IO_BITMAP_A, io_bitmap_a),
1039 FIELD64(IO_BITMAP_B, io_bitmap_b),
1040 FIELD64(MSR_BITMAP, msr_bitmap),
1041 FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
1042 FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
1043 FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
Jim Mattsonb348e792018-05-01 15:40:27 -07001044 FIELD64(PML_ADDRESS, pml_address),
Nadav Har'El22bd0352011-05-25 23:05:57 +03001045 FIELD64(TSC_OFFSET, tsc_offset),
1046 FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
1047 FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
Wincy Van705699a2015-02-03 23:58:17 +08001048 FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr),
Bandan Das27c42a12017-08-03 15:54:42 -04001049 FIELD64(VM_FUNCTION_CONTROL, vm_function_control),
Nadav Har'El22bd0352011-05-25 23:05:57 +03001050 FIELD64(EPT_POINTER, ept_pointer),
Wincy Van608406e2015-02-03 23:57:51 +08001051 FIELD64(EOI_EXIT_BITMAP0, eoi_exit_bitmap0),
1052 FIELD64(EOI_EXIT_BITMAP1, eoi_exit_bitmap1),
1053 FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2),
1054 FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3),
Bandan Das41ab9372017-08-03 15:54:43 -04001055 FIELD64(EPTP_LIST_ADDRESS, eptp_list_address),
Jim Mattsonb348e792018-05-01 15:40:27 -07001056 FIELD64(VMREAD_BITMAP, vmread_bitmap),
1057 FIELD64(VMWRITE_BITMAP, vmwrite_bitmap),
Wanpeng Li81dc01f2014-12-04 19:11:07 +08001058 FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap),
Nadav Har'El22bd0352011-05-25 23:05:57 +03001059 FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
1060 FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
1061 FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
1062 FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
1063 FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
1064 FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
1065 FIELD64(GUEST_PDPTR0, guest_pdptr0),
1066 FIELD64(GUEST_PDPTR1, guest_pdptr1),
1067 FIELD64(GUEST_PDPTR2, guest_pdptr2),
1068 FIELD64(GUEST_PDPTR3, guest_pdptr3),
Paolo Bonzini36be0b92014-02-24 12:30:04 +01001069 FIELD64(GUEST_BNDCFGS, guest_bndcfgs),
Nadav Har'El22bd0352011-05-25 23:05:57 +03001070 FIELD64(HOST_IA32_PAT, host_ia32_pat),
1071 FIELD64(HOST_IA32_EFER, host_ia32_efer),
1072 FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
1073 FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
1074 FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
1075 FIELD(EXCEPTION_BITMAP, exception_bitmap),
1076 FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
1077 FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
1078 FIELD(CR3_TARGET_COUNT, cr3_target_count),
1079 FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
1080 FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
1081 FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
1082 FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
1083 FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
1084 FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
1085 FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
1086 FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
1087 FIELD(TPR_THRESHOLD, tpr_threshold),
1088 FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
1089 FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
1090 FIELD(VM_EXIT_REASON, vm_exit_reason),
1091 FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
1092 FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
1093 FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
1094 FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
1095 FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
1096 FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
1097 FIELD(GUEST_ES_LIMIT, guest_es_limit),
1098 FIELD(GUEST_CS_LIMIT, guest_cs_limit),
1099 FIELD(GUEST_SS_LIMIT, guest_ss_limit),
1100 FIELD(GUEST_DS_LIMIT, guest_ds_limit),
1101 FIELD(GUEST_FS_LIMIT, guest_fs_limit),
1102 FIELD(GUEST_GS_LIMIT, guest_gs_limit),
1103 FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
1104 FIELD(GUEST_TR_LIMIT, guest_tr_limit),
1105 FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
1106 FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
1107 FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
1108 FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
1109 FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
1110 FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
1111 FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
1112 FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
1113 FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
1114 FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
1115 FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
1116 FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
1117 FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
1118 FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
Jan Kiszka0238ea92013-03-13 11:31:24 +01001119 FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value),
Nadav Har'El22bd0352011-05-25 23:05:57 +03001120 FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
1121 FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
1122 FIELD(CR0_READ_SHADOW, cr0_read_shadow),
1123 FIELD(CR4_READ_SHADOW, cr4_read_shadow),
1124 FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
1125 FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
1126 FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
1127 FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
1128 FIELD(EXIT_QUALIFICATION, exit_qualification),
1129 FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
1130 FIELD(GUEST_CR0, guest_cr0),
1131 FIELD(GUEST_CR3, guest_cr3),
1132 FIELD(GUEST_CR4, guest_cr4),
1133 FIELD(GUEST_ES_BASE, guest_es_base),
1134 FIELD(GUEST_CS_BASE, guest_cs_base),
1135 FIELD(GUEST_SS_BASE, guest_ss_base),
1136 FIELD(GUEST_DS_BASE, guest_ds_base),
1137 FIELD(GUEST_FS_BASE, guest_fs_base),
1138 FIELD(GUEST_GS_BASE, guest_gs_base),
1139 FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
1140 FIELD(GUEST_TR_BASE, guest_tr_base),
1141 FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
1142 FIELD(GUEST_IDTR_BASE, guest_idtr_base),
1143 FIELD(GUEST_DR7, guest_dr7),
1144 FIELD(GUEST_RSP, guest_rsp),
1145 FIELD(GUEST_RIP, guest_rip),
1146 FIELD(GUEST_RFLAGS, guest_rflags),
1147 FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
1148 FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
1149 FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
1150 FIELD(HOST_CR0, host_cr0),
1151 FIELD(HOST_CR3, host_cr3),
1152 FIELD(HOST_CR4, host_cr4),
1153 FIELD(HOST_FS_BASE, host_fs_base),
1154 FIELD(HOST_GS_BASE, host_gs_base),
1155 FIELD(HOST_TR_BASE, host_tr_base),
1156 FIELD(HOST_GDTR_BASE, host_gdtr_base),
1157 FIELD(HOST_IDTR_BASE, host_idtr_base),
1158 FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
1159 FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
1160 FIELD(HOST_RSP, host_rsp),
1161 FIELD(HOST_RIP, host_rip),
1162};
Nadav Har'El22bd0352011-05-25 23:05:57 +03001163
1164static inline short vmcs_field_to_offset(unsigned long field)
1165{
Dan Williams085331d2018-01-31 17:47:03 -08001166 const size_t size = ARRAY_SIZE(vmcs_field_to_offset_table);
1167 unsigned short offset;
Jim Mattson58e9ffa2017-12-22 12:13:13 -08001168 unsigned index;
Paolo Bonzinia2ae9df2014-11-04 18:31:19 +01001169
Jim Mattson58e9ffa2017-12-22 12:13:13 -08001170 if (field >> 15)
Andrew Honig75f139a2018-01-10 10:12:03 -08001171 return -ENOENT;
1172
Jim Mattson58e9ffa2017-12-22 12:13:13 -08001173 index = ROL16(field, 6);
Linus Torvalds15303ba2018-02-10 13:16:35 -08001174 if (index >= size)
Andrew Honig75f139a2018-01-10 10:12:03 -08001175 return -ENOENT;
1176
Linus Torvalds15303ba2018-02-10 13:16:35 -08001177 index = array_index_nospec(index, size);
1178 offset = vmcs_field_to_offset_table[index];
Dan Williams085331d2018-01-31 17:47:03 -08001179 if (offset == 0)
Paolo Bonzinia2ae9df2014-11-04 18:31:19 +01001180 return -ENOENT;
Dan Williams085331d2018-01-31 17:47:03 -08001181 return offset;
Nadav Har'El22bd0352011-05-25 23:05:57 +03001182}
1183
Nadav Har'Ela9d30f32011-05-25 23:03:55 +03001184static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
1185{
David Matlack4f2777b2016-07-13 17:16:37 -07001186 return to_vmx(vcpu)->nested.cached_vmcs12;
Nadav Har'Ela9d30f32011-05-25 23:03:55 +03001187}
1188
Peter Feiner995f00a2017-06-30 17:26:32 -07001189static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu);
Nadav Har'Elbfd0a562013-08-05 11:07:17 +03001190static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
Peter Feiner995f00a2017-06-30 17:26:32 -07001191static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
Wanpeng Lif53cd632014-12-02 19:14:58 +08001192static bool vmx_xsaves_supported(void);
Orit Wassermanb246dd52012-05-31 14:49:22 +03001193static void vmx_set_segment(struct kvm_vcpu *vcpu,
1194 struct kvm_segment *var, int seg);
1195static void vmx_get_segment(struct kvm_vcpu *vcpu,
1196 struct kvm_segment *var, int seg);
Gleb Natapovd99e4152012-12-20 16:57:45 +02001197static bool guest_state_valid(struct kvm_vcpu *vcpu);
1198static u32 vmx_segment_access_rights(struct kvm_segment *var);
Abel Gordon16f5b902013-04-18 14:38:25 +03001199static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
Paolo Bonzinib96fb432017-07-27 12:29:32 +02001200static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
1201static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
1202static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
1203 u16 error_code);
Paolo Bonzini904e14f2018-01-16 16:51:18 +01001204static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
Ashok Raj15d45072018-02-01 22:59:43 +01001205static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
1206 u32 msr, int type);
Avi Kivity75880a02007-06-20 11:20:04 +03001207
Avi Kivity6aa8b732006-12-10 02:21:36 -08001208static DEFINE_PER_CPU(struct vmcs *, vmxarea);
1209static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
Nadav Har'Eld462b812011-05-24 15:26:10 +03001210/*
1211 * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
1212 * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
1213 */
1214static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001215
Feng Wubf9f6ac2015-09-18 22:29:55 +08001216/*
1217 * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we
1218 * can find which vCPU should be waken up.
1219 */
1220static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
1221static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
1222
Radim Krčmář23611332016-09-29 22:41:33 +02001223enum {
Radim Krčmář23611332016-09-29 22:41:33 +02001224 VMX_VMREAD_BITMAP,
1225 VMX_VMWRITE_BITMAP,
1226 VMX_BITMAP_NR
1227};
1228
1229static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
1230
Radim Krčmář23611332016-09-29 22:41:33 +02001231#define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP])
1232#define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP])
He, Qingfdef3ad2007-04-30 09:45:24 +03001233
Avi Kivity110312c2010-12-21 12:54:20 +02001234static bool cpu_has_load_ia32_efer;
Gleb Natapov8bf00a52011-10-05 14:01:22 +02001235static bool cpu_has_load_perf_global_ctrl;
Avi Kivity110312c2010-12-21 12:54:20 +02001236
Sheng Yang2384d2b2008-01-17 15:14:33 +08001237static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
1238static DEFINE_SPINLOCK(vmx_vpid_lock);
1239
Yang, Sheng1c3d14fe2007-07-29 11:07:42 +03001240static struct vmcs_config {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001241 int size;
1242 int order;
Jan Dakinevich9ac7e3e2016-09-04 21:23:15 +03001243 u32 basic_cap;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001244 u32 revision_id;
Yang, Sheng1c3d14fe2007-07-29 11:07:42 +03001245 u32 pin_based_exec_ctrl;
1246 u32 cpu_based_exec_ctrl;
Sheng Yangf78e0e22007-10-29 09:40:42 +08001247 u32 cpu_based_2nd_exec_ctrl;
Yang, Sheng1c3d14fe2007-07-29 11:07:42 +03001248 u32 vmexit_ctrl;
1249 u32 vmentry_ctrl;
Paolo Bonzini13893092018-02-26 13:40:09 +01001250 struct nested_vmx_msrs nested;
Yang, Sheng1c3d14fe2007-07-29 11:07:42 +03001251} vmcs_config;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001252
Hannes Ederefff9e52008-11-28 17:02:06 +01001253static struct vmx_capability {
Sheng Yangd56f5462008-04-25 10:13:16 +08001254 u32 ept;
1255 u32 vpid;
1256} vmx_capability;
1257
Avi Kivity6aa8b732006-12-10 02:21:36 -08001258#define VMX_SEGMENT_FIELD(seg) \
1259 [VCPU_SREG_##seg] = { \
1260 .selector = GUEST_##seg##_SELECTOR, \
1261 .base = GUEST_##seg##_BASE, \
1262 .limit = GUEST_##seg##_LIMIT, \
1263 .ar_bytes = GUEST_##seg##_AR_BYTES, \
1264 }
1265
Mathias Krause772e0312012-08-30 01:30:19 +02001266static const struct kvm_vmx_segment_field {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001267 unsigned selector;
1268 unsigned base;
1269 unsigned limit;
1270 unsigned ar_bytes;
1271} kvm_vmx_segment_fields[] = {
1272 VMX_SEGMENT_FIELD(CS),
1273 VMX_SEGMENT_FIELD(DS),
1274 VMX_SEGMENT_FIELD(ES),
1275 VMX_SEGMENT_FIELD(FS),
1276 VMX_SEGMENT_FIELD(GS),
1277 VMX_SEGMENT_FIELD(SS),
1278 VMX_SEGMENT_FIELD(TR),
1279 VMX_SEGMENT_FIELD(LDTR),
1280};
1281
Avi Kivity26bb0982009-09-07 11:14:12 +03001282static u64 host_efer;
1283
Avi Kivity6de4f3a2009-05-31 22:58:47 +03001284static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
1285
Avi Kivity4d56c8a2007-04-19 14:28:44 +03001286/*
Brian Gerst8c065852010-07-17 09:03:26 -04001287 * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
Avi Kivity4d56c8a2007-04-19 14:28:44 +03001288 * away by decrementing the array size.
1289 */
Avi Kivity6aa8b732006-12-10 02:21:36 -08001290static const u32 vmx_msr_index[] = {
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001291#ifdef CONFIG_X86_64
Avi Kivity44ea2b12009-09-06 15:55:37 +03001292 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001293#endif
Brian Gerst8c065852010-07-17 09:03:26 -04001294 MSR_EFER, MSR_TSC_AUX, MSR_STAR,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001295};
Avi Kivity6aa8b732006-12-10 02:21:36 -08001296
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +01001297DEFINE_STATIC_KEY_FALSE(enable_evmcs);
1298
1299#define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs))
1300
1301#define KVM_EVMCS_VERSION 1
1302
1303#if IS_ENABLED(CONFIG_HYPERV)
1304static bool __read_mostly enlightened_vmcs = true;
1305module_param(enlightened_vmcs, bool, 0444);
1306
1307static inline void evmcs_write64(unsigned long field, u64 value)
1308{
1309 u16 clean_field;
1310 int offset = get_evmcs_offset(field, &clean_field);
1311
1312 if (offset < 0)
1313 return;
1314
1315 *(u64 *)((char *)current_evmcs + offset) = value;
1316
1317 current_evmcs->hv_clean_fields &= ~clean_field;
1318}
1319
1320static inline void evmcs_write32(unsigned long field, u32 value)
1321{
1322 u16 clean_field;
1323 int offset = get_evmcs_offset(field, &clean_field);
1324
1325 if (offset < 0)
1326 return;
1327
1328 *(u32 *)((char *)current_evmcs + offset) = value;
1329 current_evmcs->hv_clean_fields &= ~clean_field;
1330}
1331
1332static inline void evmcs_write16(unsigned long field, u16 value)
1333{
1334 u16 clean_field;
1335 int offset = get_evmcs_offset(field, &clean_field);
1336
1337 if (offset < 0)
1338 return;
1339
1340 *(u16 *)((char *)current_evmcs + offset) = value;
1341 current_evmcs->hv_clean_fields &= ~clean_field;
1342}
1343
1344static inline u64 evmcs_read64(unsigned long field)
1345{
1346 int offset = get_evmcs_offset(field, NULL);
1347
1348 if (offset < 0)
1349 return 0;
1350
1351 return *(u64 *)((char *)current_evmcs + offset);
1352}
1353
1354static inline u32 evmcs_read32(unsigned long field)
1355{
1356 int offset = get_evmcs_offset(field, NULL);
1357
1358 if (offset < 0)
1359 return 0;
1360
1361 return *(u32 *)((char *)current_evmcs + offset);
1362}
1363
1364static inline u16 evmcs_read16(unsigned long field)
1365{
1366 int offset = get_evmcs_offset(field, NULL);
1367
1368 if (offset < 0)
1369 return 0;
1370
1371 return *(u16 *)((char *)current_evmcs + offset);
1372}
1373
Vitaly Kuznetsovceef7d12018-04-16 12:50:33 +02001374static inline void evmcs_touch_msr_bitmap(void)
1375{
1376 if (unlikely(!current_evmcs))
1377 return;
1378
1379 if (current_evmcs->hv_enlightenments_control.msr_bitmap)
1380 current_evmcs->hv_clean_fields &=
1381 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
1382}
1383
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +01001384static void evmcs_load(u64 phys_addr)
1385{
1386 struct hv_vp_assist_page *vp_ap =
1387 hv_get_vp_assist_page(smp_processor_id());
1388
1389 vp_ap->current_nested_vmcs = phys_addr;
1390 vp_ap->enlighten_vmentry = 1;
1391}
1392
1393static void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
1394{
1395 /*
1396 * Enlightened VMCSv1 doesn't support these:
1397 *
1398 * POSTED_INTR_NV = 0x00000002,
1399 * GUEST_INTR_STATUS = 0x00000810,
1400 * APIC_ACCESS_ADDR = 0x00002014,
1401 * POSTED_INTR_DESC_ADDR = 0x00002016,
1402 * EOI_EXIT_BITMAP0 = 0x0000201c,
1403 * EOI_EXIT_BITMAP1 = 0x0000201e,
1404 * EOI_EXIT_BITMAP2 = 0x00002020,
1405 * EOI_EXIT_BITMAP3 = 0x00002022,
1406 */
1407 vmcs_conf->pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
1408 vmcs_conf->cpu_based_2nd_exec_ctrl &=
1409 ~SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
1410 vmcs_conf->cpu_based_2nd_exec_ctrl &=
1411 ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
1412 vmcs_conf->cpu_based_2nd_exec_ctrl &=
1413 ~SECONDARY_EXEC_APIC_REGISTER_VIRT;
1414
1415 /*
1416 * GUEST_PML_INDEX = 0x00000812,
1417 * PML_ADDRESS = 0x0000200e,
1418 */
1419 vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_ENABLE_PML;
1420
1421 /* VM_FUNCTION_CONTROL = 0x00002018, */
1422 vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_ENABLE_VMFUNC;
1423
1424 /*
1425 * EPTP_LIST_ADDRESS = 0x00002024,
1426 * VMREAD_BITMAP = 0x00002026,
1427 * VMWRITE_BITMAP = 0x00002028,
1428 */
1429 vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_SHADOW_VMCS;
1430
1431 /*
1432 * TSC_MULTIPLIER = 0x00002032,
1433 */
1434 vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_TSC_SCALING;
1435
1436 /*
1437 * PLE_GAP = 0x00004020,
1438 * PLE_WINDOW = 0x00004022,
1439 */
1440 vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
1441
1442 /*
1443 * VMX_PREEMPTION_TIMER_VALUE = 0x0000482E,
1444 */
1445 vmcs_conf->pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
1446
1447 /*
1448 * GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808,
1449 * HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04,
1450 */
1451 vmcs_conf->vmexit_ctrl &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
1452 vmcs_conf->vmentry_ctrl &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
1453
1454 /*
1455 * Currently unsupported in KVM:
1456 * GUEST_IA32_RTIT_CTL = 0x00002814,
1457 */
1458}
1459#else /* !IS_ENABLED(CONFIG_HYPERV) */
1460static inline void evmcs_write64(unsigned long field, u64 value) {}
1461static inline void evmcs_write32(unsigned long field, u32 value) {}
1462static inline void evmcs_write16(unsigned long field, u16 value) {}
1463static inline u64 evmcs_read64(unsigned long field) { return 0; }
1464static inline u32 evmcs_read32(unsigned long field) { return 0; }
1465static inline u16 evmcs_read16(unsigned long field) { return 0; }
1466static inline void evmcs_load(u64 phys_addr) {}
1467static inline void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) {}
Vitaly Kuznetsovceef7d12018-04-16 12:50:33 +02001468static inline void evmcs_touch_msr_bitmap(void) {}
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +01001469#endif /* IS_ENABLED(CONFIG_HYPERV) */
1470
Jan Kiszka5bb16012016-02-09 20:14:21 +01001471static inline bool is_exception_n(u32 intr_info, u8 vector)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001472{
1473 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
1474 INTR_INFO_VALID_MASK)) ==
Jan Kiszka5bb16012016-02-09 20:14:21 +01001475 (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK);
1476}
1477
Jan Kiszka6f054852016-02-09 20:15:18 +01001478static inline bool is_debug(u32 intr_info)
1479{
1480 return is_exception_n(intr_info, DB_VECTOR);
1481}
1482
1483static inline bool is_breakpoint(u32 intr_info)
1484{
1485 return is_exception_n(intr_info, BP_VECTOR);
1486}
1487
Jan Kiszka5bb16012016-02-09 20:14:21 +01001488static inline bool is_page_fault(u32 intr_info)
1489{
1490 return is_exception_n(intr_info, PF_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001491}
1492
Gui Jianfeng31299942010-03-15 17:29:09 +08001493static inline bool is_no_device(u32 intr_info)
Anthony Liguori2ab455c2007-04-27 09:29:49 +03001494{
Jan Kiszka5bb16012016-02-09 20:14:21 +01001495 return is_exception_n(intr_info, NM_VECTOR);
Anthony Liguori2ab455c2007-04-27 09:29:49 +03001496}
1497
Gui Jianfeng31299942010-03-15 17:29:09 +08001498static inline bool is_invalid_opcode(u32 intr_info)
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001499{
Jan Kiszka5bb16012016-02-09 20:14:21 +01001500 return is_exception_n(intr_info, UD_VECTOR);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001501}
1502
Liran Alon9e869482018-03-12 13:12:51 +02001503static inline bool is_gp_fault(u32 intr_info)
1504{
1505 return is_exception_n(intr_info, GP_VECTOR);
1506}
1507
Gui Jianfeng31299942010-03-15 17:29:09 +08001508static inline bool is_external_interrupt(u32 intr_info)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001509{
1510 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1511 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1512}
1513
Gui Jianfeng31299942010-03-15 17:29:09 +08001514static inline bool is_machine_check(u32 intr_info)
Andi Kleena0861c02009-06-08 17:37:09 +08001515{
1516 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
1517 INTR_INFO_VALID_MASK)) ==
1518 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
1519}
1520
Linus Torvalds32d43cd2018-03-20 12:16:59 -07001521/* Undocumented: icebp/int1 */
1522static inline bool is_icebp(u32 intr_info)
1523{
1524 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1525 == (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK);
1526}
1527
Gui Jianfeng31299942010-03-15 17:29:09 +08001528static inline bool cpu_has_vmx_msr_bitmap(void)
Sheng Yang25c5f222008-03-28 13:18:56 +08001529{
Sheng Yang04547152009-04-01 15:52:31 +08001530 return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
Sheng Yang25c5f222008-03-28 13:18:56 +08001531}
1532
Gui Jianfeng31299942010-03-15 17:29:09 +08001533static inline bool cpu_has_vmx_tpr_shadow(void)
Yang, Sheng6e5d8652007-09-12 18:03:11 +08001534{
Sheng Yang04547152009-04-01 15:52:31 +08001535 return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
Yang, Sheng6e5d8652007-09-12 18:03:11 +08001536}
1537
Paolo Bonzini35754c92015-07-29 12:05:37 +02001538static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu)
Yang, Sheng6e5d8652007-09-12 18:03:11 +08001539{
Paolo Bonzini35754c92015-07-29 12:05:37 +02001540 return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu);
Yang, Sheng6e5d8652007-09-12 18:03:11 +08001541}
1542
Gui Jianfeng31299942010-03-15 17:29:09 +08001543static inline bool cpu_has_secondary_exec_ctrls(void)
Sheng Yangf78e0e22007-10-29 09:40:42 +08001544{
Sheng Yang04547152009-04-01 15:52:31 +08001545 return vmcs_config.cpu_based_exec_ctrl &
1546 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
Sheng Yangf78e0e22007-10-29 09:40:42 +08001547}
1548
Avi Kivity774ead32007-12-26 13:57:04 +02001549static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
Sheng Yangf78e0e22007-10-29 09:40:42 +08001550{
Sheng Yang04547152009-04-01 15:52:31 +08001551 return vmcs_config.cpu_based_2nd_exec_ctrl &
1552 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
1553}
1554
Yang Zhang8d146952013-01-25 10:18:50 +08001555static inline bool cpu_has_vmx_virtualize_x2apic_mode(void)
1556{
1557 return vmcs_config.cpu_based_2nd_exec_ctrl &
1558 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
1559}
1560
Yang Zhang83d4c282013-01-25 10:18:49 +08001561static inline bool cpu_has_vmx_apic_register_virt(void)
1562{
1563 return vmcs_config.cpu_based_2nd_exec_ctrl &
1564 SECONDARY_EXEC_APIC_REGISTER_VIRT;
1565}
1566
Yang Zhangc7c9c562013-01-25 10:18:51 +08001567static inline bool cpu_has_vmx_virtual_intr_delivery(void)
1568{
1569 return vmcs_config.cpu_based_2nd_exec_ctrl &
1570 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
1571}
1572
Yunhong Jiang64672c92016-06-13 14:19:59 -07001573/*
1574 * Comment's format: document - errata name - stepping - processor name.
1575 * Refer from
1576 * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp
1577 */
1578static u32 vmx_preemption_cpu_tfms[] = {
1579/* 323344.pdf - BA86 - D0 - Xeon 7500 Series */
15800x000206E6,
1581/* 323056.pdf - AAX65 - C2 - Xeon L3406 */
1582/* 322814.pdf - AAT59 - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */
1583/* 322911.pdf - AAU65 - C2 - i5-600, i3-500 Desktop and Pentium G6950 */
15840x00020652,
1585/* 322911.pdf - AAU65 - K0 - i5-600, i3-500 Desktop and Pentium G6950 */
15860x00020655,
1587/* 322373.pdf - AAO95 - B1 - Xeon 3400 Series */
1588/* 322166.pdf - AAN92 - B1 - i7-800 and i5-700 Desktop */
1589/*
1590 * 320767.pdf - AAP86 - B1 -
1591 * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile
1592 */
15930x000106E5,
1594/* 321333.pdf - AAM126 - C0 - Xeon 3500 */
15950x000106A0,
1596/* 321333.pdf - AAM126 - C1 - Xeon 3500 */
15970x000106A1,
1598/* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */
15990x000106A4,
1600 /* 321333.pdf - AAM126 - D0 - Xeon 3500 */
1601 /* 321324.pdf - AAK139 - D0 - Xeon 5500 */
1602 /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */
16030x000106A5,
1604};
1605
1606static inline bool cpu_has_broken_vmx_preemption_timer(void)
1607{
1608 u32 eax = cpuid_eax(0x00000001), i;
1609
1610 /* Clear the reserved bits */
1611 eax &= ~(0x3U << 14 | 0xfU << 28);
Wei Yongjun03f6a222016-07-04 15:13:07 +00001612 for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++)
Yunhong Jiang64672c92016-06-13 14:19:59 -07001613 if (eax == vmx_preemption_cpu_tfms[i])
1614 return true;
1615
1616 return false;
1617}
1618
1619static inline bool cpu_has_vmx_preemption_timer(void)
1620{
Yunhong Jiang64672c92016-06-13 14:19:59 -07001621 return vmcs_config.pin_based_exec_ctrl &
1622 PIN_BASED_VMX_PREEMPTION_TIMER;
1623}
1624
Yang Zhang01e439b2013-04-11 19:25:12 +08001625static inline bool cpu_has_vmx_posted_intr(void)
1626{
Paolo Bonzinid6a858d2015-09-28 11:58:14 +02001627 return IS_ENABLED(CONFIG_X86_LOCAL_APIC) &&
1628 vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
Yang Zhang01e439b2013-04-11 19:25:12 +08001629}
1630
1631static inline bool cpu_has_vmx_apicv(void)
1632{
1633 return cpu_has_vmx_apic_register_virt() &&
1634 cpu_has_vmx_virtual_intr_delivery() &&
1635 cpu_has_vmx_posted_intr();
1636}
1637
Sheng Yang04547152009-04-01 15:52:31 +08001638static inline bool cpu_has_vmx_flexpriority(void)
1639{
1640 return cpu_has_vmx_tpr_shadow() &&
1641 cpu_has_vmx_virtualize_apic_accesses();
Sheng Yangf78e0e22007-10-29 09:40:42 +08001642}
1643
Marcelo Tosattie7997942009-06-11 12:07:40 -03001644static inline bool cpu_has_vmx_ept_execute_only(void)
1645{
Gui Jianfeng31299942010-03-15 17:29:09 +08001646 return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
Marcelo Tosattie7997942009-06-11 12:07:40 -03001647}
1648
Marcelo Tosattie7997942009-06-11 12:07:40 -03001649static inline bool cpu_has_vmx_ept_2m_page(void)
1650{
Gui Jianfeng31299942010-03-15 17:29:09 +08001651 return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
Marcelo Tosattie7997942009-06-11 12:07:40 -03001652}
1653
Sheng Yang878403b2010-01-05 19:02:29 +08001654static inline bool cpu_has_vmx_ept_1g_page(void)
1655{
Gui Jianfeng31299942010-03-15 17:29:09 +08001656 return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
Sheng Yang878403b2010-01-05 19:02:29 +08001657}
1658
Sheng Yang4bc9b982010-06-02 14:05:24 +08001659static inline bool cpu_has_vmx_ept_4levels(void)
1660{
1661 return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
1662}
1663
David Hildenbrand42aa53b2017-08-10 23:15:29 +02001664static inline bool cpu_has_vmx_ept_mt_wb(void)
1665{
1666 return vmx_capability.ept & VMX_EPTP_WB_BIT;
1667}
1668
Yu Zhang855feb62017-08-24 20:27:55 +08001669static inline bool cpu_has_vmx_ept_5levels(void)
1670{
1671 return vmx_capability.ept & VMX_EPT_PAGE_WALK_5_BIT;
1672}
1673
Xudong Hao83c3a332012-05-28 19:33:35 +08001674static inline bool cpu_has_vmx_ept_ad_bits(void)
1675{
1676 return vmx_capability.ept & VMX_EPT_AD_BIT;
1677}
1678
Gui Jianfeng31299942010-03-15 17:29:09 +08001679static inline bool cpu_has_vmx_invept_context(void)
Sheng Yangd56f5462008-04-25 10:13:16 +08001680{
Gui Jianfeng31299942010-03-15 17:29:09 +08001681 return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
Sheng Yangd56f5462008-04-25 10:13:16 +08001682}
1683
Gui Jianfeng31299942010-03-15 17:29:09 +08001684static inline bool cpu_has_vmx_invept_global(void)
Sheng Yangd56f5462008-04-25 10:13:16 +08001685{
Gui Jianfeng31299942010-03-15 17:29:09 +08001686 return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
Sheng Yangd56f5462008-04-25 10:13:16 +08001687}
1688
Liran Aloncd9a4912018-05-22 17:16:15 +03001689static inline bool cpu_has_vmx_invvpid_individual_addr(void)
1690{
1691 return vmx_capability.vpid & VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT;
1692}
1693
Gui Jianfeng518c8ae2010-06-04 08:51:39 +08001694static inline bool cpu_has_vmx_invvpid_single(void)
1695{
1696 return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
1697}
1698
Gui Jianfengb9d762f2010-06-07 10:32:29 +08001699static inline bool cpu_has_vmx_invvpid_global(void)
1700{
1701 return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
1702}
1703
Wanpeng Li08d839c2017-03-23 05:30:08 -07001704static inline bool cpu_has_vmx_invvpid(void)
1705{
1706 return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
1707}
1708
Gui Jianfeng31299942010-03-15 17:29:09 +08001709static inline bool cpu_has_vmx_ept(void)
Sheng Yangd56f5462008-04-25 10:13:16 +08001710{
Sheng Yang04547152009-04-01 15:52:31 +08001711 return vmcs_config.cpu_based_2nd_exec_ctrl &
1712 SECONDARY_EXEC_ENABLE_EPT;
Sheng Yangd56f5462008-04-25 10:13:16 +08001713}
1714
Gui Jianfeng31299942010-03-15 17:29:09 +08001715static inline bool cpu_has_vmx_unrestricted_guest(void)
Nitin A Kamble3a624e22009-06-08 11:34:16 -07001716{
1717 return vmcs_config.cpu_based_2nd_exec_ctrl &
1718 SECONDARY_EXEC_UNRESTRICTED_GUEST;
1719}
1720
Gui Jianfeng31299942010-03-15 17:29:09 +08001721static inline bool cpu_has_vmx_ple(void)
Zhai, Edwin4b8d54f2009-10-09 18:03:20 +08001722{
1723 return vmcs_config.cpu_based_2nd_exec_ctrl &
1724 SECONDARY_EXEC_PAUSE_LOOP_EXITING;
1725}
1726
Jan Dakinevich9ac7e3e2016-09-04 21:23:15 +03001727static inline bool cpu_has_vmx_basic_inout(void)
1728{
1729 return (((u64)vmcs_config.basic_cap << 32) & VMX_BASIC_INOUT);
1730}
1731
Paolo Bonzini35754c92015-07-29 12:05:37 +02001732static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
Sheng Yangf78e0e22007-10-29 09:40:42 +08001733{
Paolo Bonzini35754c92015-07-29 12:05:37 +02001734 return flexpriority_enabled && lapic_in_kernel(vcpu);
Sheng Yangf78e0e22007-10-29 09:40:42 +08001735}
1736
Gui Jianfeng31299942010-03-15 17:29:09 +08001737static inline bool cpu_has_vmx_vpid(void)
Sheng Yang2384d2b2008-01-17 15:14:33 +08001738{
Sheng Yang04547152009-04-01 15:52:31 +08001739 return vmcs_config.cpu_based_2nd_exec_ctrl &
1740 SECONDARY_EXEC_ENABLE_VPID;
Sheng Yang2384d2b2008-01-17 15:14:33 +08001741}
1742
Gui Jianfeng31299942010-03-15 17:29:09 +08001743static inline bool cpu_has_vmx_rdtscp(void)
Sheng Yang4e47c7a2009-12-18 16:48:47 +08001744{
1745 return vmcs_config.cpu_based_2nd_exec_ctrl &
1746 SECONDARY_EXEC_RDTSCP;
1747}
1748
Mao, Junjiead756a12012-07-02 01:18:48 +00001749static inline bool cpu_has_vmx_invpcid(void)
1750{
1751 return vmcs_config.cpu_based_2nd_exec_ctrl &
1752 SECONDARY_EXEC_ENABLE_INVPCID;
1753}
1754
Paolo Bonzini8a1b4392017-11-06 13:31:12 +01001755static inline bool cpu_has_virtual_nmis(void)
1756{
1757 return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
1758}
1759
Sheng Yangf5f48ee2010-06-30 12:25:15 +08001760static inline bool cpu_has_vmx_wbinvd_exit(void)
1761{
1762 return vmcs_config.cpu_based_2nd_exec_ctrl &
1763 SECONDARY_EXEC_WBINVD_EXITING;
1764}
1765
Abel Gordonabc4fc52013-04-18 14:35:25 +03001766static inline bool cpu_has_vmx_shadow_vmcs(void)
1767{
1768 u64 vmx_msr;
1769 rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
1770 /* check if the cpu supports writing r/o exit information fields */
1771 if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
1772 return false;
1773
1774 return vmcs_config.cpu_based_2nd_exec_ctrl &
1775 SECONDARY_EXEC_SHADOW_VMCS;
1776}
1777
Kai Huang843e4332015-01-28 10:54:28 +08001778static inline bool cpu_has_vmx_pml(void)
1779{
1780 return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML;
1781}
1782
Haozhong Zhang64903d62015-10-20 15:39:09 +08001783static inline bool cpu_has_vmx_tsc_scaling(void)
1784{
1785 return vmcs_config.cpu_based_2nd_exec_ctrl &
1786 SECONDARY_EXEC_TSC_SCALING;
1787}
1788
Bandan Das2a499e42017-08-03 15:54:41 -04001789static inline bool cpu_has_vmx_vmfunc(void)
1790{
1791 return vmcs_config.cpu_based_2nd_exec_ctrl &
1792 SECONDARY_EXEC_ENABLE_VMFUNC;
1793}
1794
Sean Christopherson64f7a112018-04-30 10:01:06 -07001795static bool vmx_umip_emulated(void)
1796{
1797 return vmcs_config.cpu_based_2nd_exec_ctrl &
1798 SECONDARY_EXEC_DESC;
1799}
1800
Sheng Yang04547152009-04-01 15:52:31 +08001801static inline bool report_flexpriority(void)
1802{
1803 return flexpriority_enabled;
1804}
1805
Jim Mattsonc7c2c702017-05-05 11:28:09 -07001806static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
1807{
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01001808 return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low);
Jim Mattsonc7c2c702017-05-05 11:28:09 -07001809}
1810
Jim Mattsonf4160e42018-05-29 09:11:33 -07001811/*
1812 * Do the virtual VMX capability MSRs specify that L1 can use VMWRITE
1813 * to modify any valid field of the VMCS, or are the VM-exit
1814 * information fields read-only?
1815 */
1816static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
1817{
1818 return to_vmx(vcpu)->nested.msrs.misc_low &
1819 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
1820}
1821
Nadav Har'Elfe3ef052011-05-25 23:10:02 +03001822static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
1823{
1824 return vmcs12->cpu_based_vm_exec_control & bit;
1825}
1826
1827static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
1828{
1829 return (vmcs12->cpu_based_vm_exec_control &
1830 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
1831 (vmcs12->secondary_vm_exec_control & bit);
1832}
1833
Jan Kiszkaf4124502014-03-07 20:03:13 +01001834static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
1835{
1836 return vmcs12->pin_based_vm_exec_control &
1837 PIN_BASED_VMX_PREEMPTION_TIMER;
1838}
1839
Krish Sadhukhan0c7f6502018-02-20 21:24:39 -05001840static inline bool nested_cpu_has_nmi_exiting(struct vmcs12 *vmcs12)
1841{
1842 return vmcs12->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING;
1843}
1844
1845static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
1846{
1847 return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
1848}
1849
Nadav Har'El155a97a2013-08-05 11:07:16 +03001850static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
1851{
1852 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
1853}
1854
Wanpeng Li81dc01f2014-12-04 19:11:07 +08001855static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
1856{
Paolo Bonzini3db13482017-08-24 14:48:03 +02001857 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
Wanpeng Li81dc01f2014-12-04 19:11:07 +08001858}
1859
Bandan Dasc5f983f2017-05-05 15:25:14 -04001860static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
1861{
1862 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML);
1863}
1864
Wincy Vanf2b93282015-02-03 23:56:03 +08001865static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
1866{
1867 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
1868}
1869
Wanpeng Li5c614b32015-10-13 09:18:36 -07001870static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
1871{
1872 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
1873}
1874
Wincy Van82f0dd42015-02-03 23:57:18 +08001875static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
1876{
1877 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
1878}
1879
Wincy Van608406e2015-02-03 23:57:51 +08001880static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
1881{
1882 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
1883}
1884
Wincy Van705699a2015-02-03 23:58:17 +08001885static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
1886{
1887 return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
1888}
1889
Bandan Das27c42a12017-08-03 15:54:42 -04001890static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12)
1891{
1892 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC);
1893}
1894
Bandan Das41ab9372017-08-03 15:54:43 -04001895static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12)
1896{
1897 return nested_cpu_has_vmfunc(vmcs12) &&
1898 (vmcs12->vm_function_control &
1899 VMX_VMFUNC_EPTP_SWITCHING);
1900}
1901
Jim Mattsonef85b672016-12-12 11:01:37 -08001902static inline bool is_nmi(u32 intr_info)
Nadav Har'El644d7112011-05-25 23:12:35 +03001903{
1904 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
Jim Mattsonef85b672016-12-12 11:01:37 -08001905 == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
Nadav Har'El644d7112011-05-25 23:12:35 +03001906}
1907
Jan Kiszka533558b2014-01-04 18:47:20 +01001908static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
1909 u32 exit_intr_info,
1910 unsigned long exit_qualification);
Nadav Har'El7c177932011-05-25 23:12:04 +03001911static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
1912 struct vmcs12 *vmcs12,
1913 u32 reason, unsigned long qualification);
1914
Rusty Russell8b9cf982007-07-30 16:31:43 +10001915static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
Avi Kivity7725f0b2006-12-13 00:34:01 -08001916{
1917 int i;
1918
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001919 for (i = 0; i < vmx->nmsrs; ++i)
Avi Kivity26bb0982009-09-07 11:14:12 +03001920 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
Eddie Donga75beee2007-05-17 18:55:15 +03001921 return i;
1922 return -1;
1923}
1924
Sheng Yang2384d2b2008-01-17 15:14:33 +08001925static inline void __invvpid(int ext, u16 vpid, gva_t gva)
1926{
1927 struct {
1928 u64 vpid : 16;
1929 u64 rsvd : 48;
1930 u64 gva;
1931 } operand = { vpid, 0, gva };
1932
Avi Kivity4ecac3f2008-05-13 13:23:38 +03001933 asm volatile (__ex(ASM_VMX_INVVPID)
Sheng Yang2384d2b2008-01-17 15:14:33 +08001934 /* CF==1 or ZF==1 --> rc = -1 */
1935 "; ja 1f ; ud2 ; 1:"
1936 : : "a"(&operand), "c"(ext) : "cc", "memory");
1937}
1938
Sheng Yang14394422008-04-28 12:24:45 +08001939static inline void __invept(int ext, u64 eptp, gpa_t gpa)
1940{
1941 struct {
1942 u64 eptp, gpa;
1943 } operand = {eptp, gpa};
1944
Avi Kivity4ecac3f2008-05-13 13:23:38 +03001945 asm volatile (__ex(ASM_VMX_INVEPT)
Sheng Yang14394422008-04-28 12:24:45 +08001946 /* CF==1 or ZF==1 --> rc = -1 */
1947 "; ja 1f ; ud2 ; 1:\n"
1948 : : "a" (&operand), "c" (ext) : "cc", "memory");
1949}
1950
Avi Kivity26bb0982009-09-07 11:14:12 +03001951static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
Eddie Donga75beee2007-05-17 18:55:15 +03001952{
1953 int i;
1954
Rusty Russell8b9cf982007-07-30 16:31:43 +10001955 i = __find_msr_index(vmx, msr);
Eddie Donga75beee2007-05-17 18:55:15 +03001956 if (i >= 0)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001957 return &vmx->guest_msrs[i];
Al Viro8b6d44c2007-02-09 16:38:40 +00001958 return NULL;
Avi Kivity7725f0b2006-12-13 00:34:01 -08001959}
1960
Avi Kivity6aa8b732006-12-10 02:21:36 -08001961static void vmcs_clear(struct vmcs *vmcs)
1962{
1963 u64 phys_addr = __pa(vmcs);
1964 u8 error;
1965
Avi Kivity4ecac3f2008-05-13 13:23:38 +03001966 asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
Avi Kivity16d8f722010-12-21 16:51:50 +02001967 : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001968 : "cc", "memory");
1969 if (error)
1970 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
1971 vmcs, phys_addr);
1972}
1973
Nadav Har'Eld462b812011-05-24 15:26:10 +03001974static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
1975{
1976 vmcs_clear(loaded_vmcs->vmcs);
Jim Mattson355f4fb2016-10-28 08:29:39 -07001977 if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
1978 vmcs_clear(loaded_vmcs->shadow_vmcs);
Nadav Har'Eld462b812011-05-24 15:26:10 +03001979 loaded_vmcs->cpu = -1;
1980 loaded_vmcs->launched = 0;
1981}
1982
Dongxiao Xu7725b892010-05-11 18:29:38 +08001983static void vmcs_load(struct vmcs *vmcs)
1984{
1985 u64 phys_addr = __pa(vmcs);
1986 u8 error;
1987
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +01001988 if (static_branch_unlikely(&enable_evmcs))
1989 return evmcs_load(phys_addr);
1990
Dongxiao Xu7725b892010-05-11 18:29:38 +08001991 asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
Avi Kivity16d8f722010-12-21 16:51:50 +02001992 : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
Dongxiao Xu7725b892010-05-11 18:29:38 +08001993 : "cc", "memory");
1994 if (error)
Nadav Har'El2844d842011-05-25 23:16:40 +03001995 printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
Dongxiao Xu7725b892010-05-11 18:29:38 +08001996 vmcs, phys_addr);
1997}
1998
Dave Young2965faa2015-09-09 15:38:55 -07001999#ifdef CONFIG_KEXEC_CORE
Zhang Yanfei8f536b72012-12-06 23:43:34 +08002000/*
2001 * This bitmap is used to indicate whether the vmclear
2002 * operation is enabled on all cpus. All disabled by
2003 * default.
2004 */
2005static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
2006
2007static inline void crash_enable_local_vmclear(int cpu)
2008{
2009 cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
2010}
2011
2012static inline void crash_disable_local_vmclear(int cpu)
2013{
2014 cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
2015}
2016
2017static inline int crash_local_vmclear_enabled(int cpu)
2018{
2019 return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
2020}
2021
2022static void crash_vmclear_local_loaded_vmcss(void)
2023{
2024 int cpu = raw_smp_processor_id();
2025 struct loaded_vmcs *v;
2026
2027 if (!crash_local_vmclear_enabled(cpu))
2028 return;
2029
2030 list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
2031 loaded_vmcss_on_cpu_link)
2032 vmcs_clear(v->vmcs);
2033}
2034#else
2035static inline void crash_enable_local_vmclear(int cpu) { }
2036static inline void crash_disable_local_vmclear(int cpu) { }
Dave Young2965faa2015-09-09 15:38:55 -07002037#endif /* CONFIG_KEXEC_CORE */
Zhang Yanfei8f536b72012-12-06 23:43:34 +08002038
Nadav Har'Eld462b812011-05-24 15:26:10 +03002039static void __loaded_vmcs_clear(void *arg)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002040{
Nadav Har'Eld462b812011-05-24 15:26:10 +03002041 struct loaded_vmcs *loaded_vmcs = arg;
Ingo Molnard3b2c332007-01-05 16:36:23 -08002042 int cpu = raw_smp_processor_id();
Avi Kivity6aa8b732006-12-10 02:21:36 -08002043
Nadav Har'Eld462b812011-05-24 15:26:10 +03002044 if (loaded_vmcs->cpu != cpu)
2045 return; /* vcpu migration can race with cpu offline */
2046 if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002047 per_cpu(current_vmcs, cpu) = NULL;
Zhang Yanfei8f536b72012-12-06 23:43:34 +08002048 crash_disable_local_vmclear(cpu);
Nadav Har'Eld462b812011-05-24 15:26:10 +03002049 list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
Xiao Guangrong5a560f82012-11-28 20:54:14 +08002050
2051 /*
2052 * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link
2053 * is before setting loaded_vmcs->vcpu to -1 which is done in
2054 * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist
2055 * then adds the vmcs into percpu list before it is deleted.
2056 */
2057 smp_wmb();
2058
Nadav Har'Eld462b812011-05-24 15:26:10 +03002059 loaded_vmcs_init(loaded_vmcs);
Zhang Yanfei8f536b72012-12-06 23:43:34 +08002060 crash_enable_local_vmclear(cpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002061}
2062
Nadav Har'Eld462b812011-05-24 15:26:10 +03002063static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
Avi Kivity8d0be2b2007-02-12 00:54:46 -08002064{
Xiao Guangronge6c7d322012-11-28 20:53:15 +08002065 int cpu = loaded_vmcs->cpu;
2066
2067 if (cpu != -1)
2068 smp_call_function_single(cpu,
2069 __loaded_vmcs_clear, loaded_vmcs, 1);
Avi Kivity8d0be2b2007-02-12 00:54:46 -08002070}
2071
Wanpeng Lidd5f5342015-09-23 18:26:57 +08002072static inline void vpid_sync_vcpu_single(int vpid)
Sheng Yang2384d2b2008-01-17 15:14:33 +08002073{
Wanpeng Lidd5f5342015-09-23 18:26:57 +08002074 if (vpid == 0)
Sheng Yang2384d2b2008-01-17 15:14:33 +08002075 return;
2076
Gui Jianfeng518c8ae2010-06-04 08:51:39 +08002077 if (cpu_has_vmx_invvpid_single())
Wanpeng Lidd5f5342015-09-23 18:26:57 +08002078 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
Sheng Yang2384d2b2008-01-17 15:14:33 +08002079}
2080
Gui Jianfengb9d762f2010-06-07 10:32:29 +08002081static inline void vpid_sync_vcpu_global(void)
2082{
2083 if (cpu_has_vmx_invvpid_global())
2084 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
2085}
2086
Wanpeng Lidd5f5342015-09-23 18:26:57 +08002087static inline void vpid_sync_context(int vpid)
Gui Jianfengb9d762f2010-06-07 10:32:29 +08002088{
2089 if (cpu_has_vmx_invvpid_single())
Wanpeng Lidd5f5342015-09-23 18:26:57 +08002090 vpid_sync_vcpu_single(vpid);
Gui Jianfengb9d762f2010-06-07 10:32:29 +08002091 else
2092 vpid_sync_vcpu_global();
2093}
2094
Sheng Yang14394422008-04-28 12:24:45 +08002095static inline void ept_sync_global(void)
2096{
David Hildenbrandf5f51582017-08-24 20:51:30 +02002097 __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
Sheng Yang14394422008-04-28 12:24:45 +08002098}
2099
2100static inline void ept_sync_context(u64 eptp)
2101{
David Hildenbrand0e1252d2017-08-24 20:51:28 +02002102 if (cpu_has_vmx_invept_context())
2103 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
2104 else
2105 ept_sync_global();
Sheng Yang14394422008-04-28 12:24:45 +08002106}
2107
Paolo Bonzini8a86aea92015-12-03 15:56:55 +01002108static __always_inline void vmcs_check16(unsigned long field)
2109{
2110 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
2111 "16-bit accessor invalid for 64-bit field");
2112 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
2113 "16-bit accessor invalid for 64-bit high field");
2114 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
2115 "16-bit accessor invalid for 32-bit high field");
2116 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
2117 "16-bit accessor invalid for natural width field");
2118}
2119
2120static __always_inline void vmcs_check32(unsigned long field)
2121{
2122 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
2123 "32-bit accessor invalid for 16-bit field");
2124 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
2125 "32-bit accessor invalid for natural width field");
2126}
2127
2128static __always_inline void vmcs_check64(unsigned long field)
2129{
2130 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
2131 "64-bit accessor invalid for 16-bit field");
2132 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
2133 "64-bit accessor invalid for 64-bit high field");
2134 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
2135 "64-bit accessor invalid for 32-bit field");
2136 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
2137 "64-bit accessor invalid for natural width field");
2138}
2139
2140static __always_inline void vmcs_checkl(unsigned long field)
2141{
2142 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
2143 "Natural width accessor invalid for 16-bit field");
2144 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
2145 "Natural width accessor invalid for 64-bit field");
2146 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
2147 "Natural width accessor invalid for 64-bit high field");
2148 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
2149 "Natural width accessor invalid for 32-bit field");
2150}
2151
2152static __always_inline unsigned long __vmcs_readl(unsigned long field)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002153{
Avi Kivity5e520e62011-05-15 10:13:12 -04002154 unsigned long value;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002155
Avi Kivity5e520e62011-05-15 10:13:12 -04002156 asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
2157 : "=a"(value) : "d"(field) : "cc");
Avi Kivity6aa8b732006-12-10 02:21:36 -08002158 return value;
2159}
2160
Avi Kivity96304212011-05-15 10:13:13 -04002161static __always_inline u16 vmcs_read16(unsigned long field)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002162{
Paolo Bonzini8a86aea92015-12-03 15:56:55 +01002163 vmcs_check16(field);
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +01002164 if (static_branch_unlikely(&enable_evmcs))
2165 return evmcs_read16(field);
Paolo Bonzini8a86aea92015-12-03 15:56:55 +01002166 return __vmcs_readl(field);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002167}
2168
Avi Kivity96304212011-05-15 10:13:13 -04002169static __always_inline u32 vmcs_read32(unsigned long field)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002170{
Paolo Bonzini8a86aea92015-12-03 15:56:55 +01002171 vmcs_check32(field);
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +01002172 if (static_branch_unlikely(&enable_evmcs))
2173 return evmcs_read32(field);
Paolo Bonzini8a86aea92015-12-03 15:56:55 +01002174 return __vmcs_readl(field);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002175}
2176
Avi Kivity96304212011-05-15 10:13:13 -04002177static __always_inline u64 vmcs_read64(unsigned long field)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002178{
Paolo Bonzini8a86aea92015-12-03 15:56:55 +01002179 vmcs_check64(field);
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +01002180 if (static_branch_unlikely(&enable_evmcs))
2181 return evmcs_read64(field);
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002182#ifdef CONFIG_X86_64
Paolo Bonzini8a86aea92015-12-03 15:56:55 +01002183 return __vmcs_readl(field);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002184#else
Paolo Bonzini8a86aea92015-12-03 15:56:55 +01002185 return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002186#endif
2187}
2188
Paolo Bonzini8a86aea92015-12-03 15:56:55 +01002189static __always_inline unsigned long vmcs_readl(unsigned long field)
2190{
2191 vmcs_checkl(field);
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +01002192 if (static_branch_unlikely(&enable_evmcs))
2193 return evmcs_read64(field);
Paolo Bonzini8a86aea92015-12-03 15:56:55 +01002194 return __vmcs_readl(field);
2195}
2196
Avi Kivitye52de1b2007-01-05 16:36:56 -08002197static noinline void vmwrite_error(unsigned long field, unsigned long value)
2198{
2199 printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
2200 field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
2201 dump_stack();
2202}
2203
Paolo Bonzini8a86aea92015-12-03 15:56:55 +01002204static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002205{
2206 u8 error;
2207
Avi Kivity4ecac3f2008-05-13 13:23:38 +03002208 asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
Mike Dayd77c26f2007-10-08 09:02:08 -04002209 : "=q"(error) : "a"(value), "d"(field) : "cc");
Avi Kivitye52de1b2007-01-05 16:36:56 -08002210 if (unlikely(error))
2211 vmwrite_error(field, value);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002212}
2213
Paolo Bonzini8a86aea92015-12-03 15:56:55 +01002214static __always_inline void vmcs_write16(unsigned long field, u16 value)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002215{
Paolo Bonzini8a86aea92015-12-03 15:56:55 +01002216 vmcs_check16(field);
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +01002217 if (static_branch_unlikely(&enable_evmcs))
2218 return evmcs_write16(field, value);
2219
Paolo Bonzini8a86aea92015-12-03 15:56:55 +01002220 __vmcs_writel(field, value);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002221}
2222
Paolo Bonzini8a86aea92015-12-03 15:56:55 +01002223static __always_inline void vmcs_write32(unsigned long field, u32 value)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002224{
Paolo Bonzini8a86aea92015-12-03 15:56:55 +01002225 vmcs_check32(field);
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +01002226 if (static_branch_unlikely(&enable_evmcs))
2227 return evmcs_write32(field, value);
2228
Paolo Bonzini8a86aea92015-12-03 15:56:55 +01002229 __vmcs_writel(field, value);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002230}
2231
Paolo Bonzini8a86aea92015-12-03 15:56:55 +01002232static __always_inline void vmcs_write64(unsigned long field, u64 value)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002233{
Paolo Bonzini8a86aea92015-12-03 15:56:55 +01002234 vmcs_check64(field);
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +01002235 if (static_branch_unlikely(&enable_evmcs))
2236 return evmcs_write64(field, value);
2237
Paolo Bonzini8a86aea92015-12-03 15:56:55 +01002238 __vmcs_writel(field, value);
Avi Kivity7682f2d2008-05-12 19:25:43 +03002239#ifndef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08002240 asm volatile ("");
Paolo Bonzini8a86aea92015-12-03 15:56:55 +01002241 __vmcs_writel(field+1, value >> 32);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002242#endif
2243}
2244
Paolo Bonzini8a86aea92015-12-03 15:56:55 +01002245static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
Anthony Liguori2ab455c2007-04-27 09:29:49 +03002246{
Paolo Bonzini8a86aea92015-12-03 15:56:55 +01002247 vmcs_checkl(field);
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +01002248 if (static_branch_unlikely(&enable_evmcs))
2249 return evmcs_write64(field, value);
2250
Paolo Bonzini8a86aea92015-12-03 15:56:55 +01002251 __vmcs_writel(field, value);
Anthony Liguori2ab455c2007-04-27 09:29:49 +03002252}
2253
Paolo Bonzini8a86aea92015-12-03 15:56:55 +01002254static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
Anthony Liguori2ab455c2007-04-27 09:29:49 +03002255{
Paolo Bonzini8a86aea92015-12-03 15:56:55 +01002256 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
2257 "vmcs_clear_bits does not support 64-bit fields");
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +01002258 if (static_branch_unlikely(&enable_evmcs))
2259 return evmcs_write32(field, evmcs_read32(field) & ~mask);
2260
Paolo Bonzini8a86aea92015-12-03 15:56:55 +01002261 __vmcs_writel(field, __vmcs_readl(field) & ~mask);
2262}
2263
2264static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
2265{
2266 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
2267 "vmcs_set_bits does not support 64-bit fields");
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +01002268 if (static_branch_unlikely(&enable_evmcs))
2269 return evmcs_write32(field, evmcs_read32(field) | mask);
2270
Paolo Bonzini8a86aea92015-12-03 15:56:55 +01002271 __vmcs_writel(field, __vmcs_readl(field) | mask);
Anthony Liguori2ab455c2007-04-27 09:29:49 +03002272}
2273
Paolo Bonzini8391ce42016-07-07 14:58:33 +02002274static inline void vm_entry_controls_reset_shadow(struct vcpu_vmx *vmx)
2275{
2276 vmx->vm_entry_controls_shadow = vmcs_read32(VM_ENTRY_CONTROLS);
2277}
2278
Gleb Natapov2961e8762013-11-25 15:37:13 +02002279static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
2280{
2281 vmcs_write32(VM_ENTRY_CONTROLS, val);
2282 vmx->vm_entry_controls_shadow = val;
2283}
2284
2285static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
2286{
2287 if (vmx->vm_entry_controls_shadow != val)
2288 vm_entry_controls_init(vmx, val);
2289}
2290
2291static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
2292{
2293 return vmx->vm_entry_controls_shadow;
2294}
2295
2296
2297static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
2298{
2299 vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
2300}
2301
2302static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
2303{
2304 vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
2305}
2306
Paolo Bonzini8391ce42016-07-07 14:58:33 +02002307static inline void vm_exit_controls_reset_shadow(struct vcpu_vmx *vmx)
2308{
2309 vmx->vm_exit_controls_shadow = vmcs_read32(VM_EXIT_CONTROLS);
2310}
2311
Gleb Natapov2961e8762013-11-25 15:37:13 +02002312static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
2313{
2314 vmcs_write32(VM_EXIT_CONTROLS, val);
2315 vmx->vm_exit_controls_shadow = val;
2316}
2317
2318static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
2319{
2320 if (vmx->vm_exit_controls_shadow != val)
2321 vm_exit_controls_init(vmx, val);
2322}
2323
2324static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
2325{
2326 return vmx->vm_exit_controls_shadow;
2327}
2328
2329
2330static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
2331{
2332 vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
2333}
2334
2335static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
2336{
2337 vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
2338}
2339
Avi Kivity2fb92db2011-04-27 19:42:18 +03002340static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
2341{
2342 vmx->segment_cache.bitmask = 0;
2343}
2344
2345static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
2346 unsigned field)
2347{
2348 bool ret;
2349 u32 mask = 1 << (seg * SEG_FIELD_NR + field);
2350
2351 if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
2352 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
2353 vmx->segment_cache.bitmask = 0;
2354 }
2355 ret = vmx->segment_cache.bitmask & mask;
2356 vmx->segment_cache.bitmask |= mask;
2357 return ret;
2358}
2359
2360static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
2361{
2362 u16 *p = &vmx->segment_cache.seg[seg].selector;
2363
2364 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
2365 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
2366 return *p;
2367}
2368
2369static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
2370{
2371 ulong *p = &vmx->segment_cache.seg[seg].base;
2372
2373 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
2374 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
2375 return *p;
2376}
2377
2378static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
2379{
2380 u32 *p = &vmx->segment_cache.seg[seg].limit;
2381
2382 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
2383 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
2384 return *p;
2385}
2386
2387static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
2388{
2389 u32 *p = &vmx->segment_cache.seg[seg].ar;
2390
2391 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
2392 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
2393 return *p;
2394}
2395
Avi Kivityabd3f2d2007-05-02 17:57:40 +03002396static void update_exception_bitmap(struct kvm_vcpu *vcpu)
2397{
2398 u32 eb;
2399
Jan Kiszkafd7373c2010-01-20 18:20:20 +01002400 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
Paolo Bonzinibd7e5b02017-02-03 21:18:52 -08002401 (1u << DB_VECTOR) | (1u << AC_VECTOR);
Liran Alon9e869482018-03-12 13:12:51 +02002402 /*
2403 * Guest access to VMware backdoor ports could legitimately
2404 * trigger #GP because of TSS I/O permission bitmap.
2405 * We intercept those #GP and allow access to them anyway
2406 * as VMware does.
2407 */
2408 if (enable_vmware_backdoor)
2409 eb |= (1u << GP_VECTOR);
Jan Kiszkafd7373c2010-01-20 18:20:20 +01002410 if ((vcpu->guest_debug &
2411 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
2412 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
2413 eb |= 1u << BP_VECTOR;
Avi Kivity7ffd92c2009-06-09 14:10:45 +03002414 if (to_vmx(vcpu)->rmode.vm86_active)
Avi Kivityabd3f2d2007-05-02 17:57:40 +03002415 eb = ~0;
Avi Kivity089d0342009-03-23 18:26:32 +02002416 if (enable_ept)
Sheng Yang14394422008-04-28 12:24:45 +08002417 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
Nadav Har'El36cf24e2011-05-25 23:15:08 +03002418
2419 /* When we are running a nested L2 guest and L1 specified for it a
2420 * certain exception bitmap, we must trap the same exceptions and pass
2421 * them to L1. When running L2, we will only handle the exceptions
2422 * specified above if L1 did not want them.
2423 */
2424 if (is_guest_mode(vcpu))
2425 eb |= get_vmcs12(vcpu)->exception_bitmap;
2426
Avi Kivityabd3f2d2007-05-02 17:57:40 +03002427 vmcs_write32(EXCEPTION_BITMAP, eb);
2428}
2429
Ashok Raj15d45072018-02-01 22:59:43 +01002430/*
KarimAllah Ahmedd28b3872018-02-01 22:59:45 +01002431 * Check if MSR is intercepted for currently loaded MSR bitmap.
2432 */
2433static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
2434{
2435 unsigned long *msr_bitmap;
2436 int f = sizeof(unsigned long);
2437
2438 if (!cpu_has_vmx_msr_bitmap())
2439 return true;
2440
2441 msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap;
2442
2443 if (msr <= 0x1fff) {
2444 return !!test_bit(msr, msr_bitmap + 0x800 / f);
2445 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
2446 msr &= 0x1fff;
2447 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
2448 }
2449
2450 return true;
2451}
2452
2453/*
Ashok Raj15d45072018-02-01 22:59:43 +01002454 * Check if MSR is intercepted for L01 MSR bitmap.
2455 */
2456static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
2457{
2458 unsigned long *msr_bitmap;
2459 int f = sizeof(unsigned long);
2460
2461 if (!cpu_has_vmx_msr_bitmap())
2462 return true;
2463
2464 msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
2465
2466 if (msr <= 0x1fff) {
2467 return !!test_bit(msr, msr_bitmap + 0x800 / f);
2468 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
2469 msr &= 0x1fff;
2470 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
2471 }
2472
2473 return true;
2474}
2475
Gleb Natapov2961e8762013-11-25 15:37:13 +02002476static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
2477 unsigned long entry, unsigned long exit)
Gleb Natapov8bf00a52011-10-05 14:01:22 +02002478{
Gleb Natapov2961e8762013-11-25 15:37:13 +02002479 vm_entry_controls_clearbit(vmx, entry);
2480 vm_exit_controls_clearbit(vmx, exit);
Gleb Natapov8bf00a52011-10-05 14:01:22 +02002481}
2482
Konrad Rzeszutek Wilkca83b4a2018-06-20 20:11:39 -04002483static int find_msr(struct vmx_msrs *m, unsigned int msr)
2484{
2485 unsigned int i;
2486
2487 for (i = 0; i < m->nr; ++i) {
2488 if (m->val[i].index == msr)
2489 return i;
2490 }
2491 return -ENOENT;
2492}
2493
Avi Kivity61d2ef22010-04-28 16:40:38 +03002494static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
2495{
Konrad Rzeszutek Wilkca83b4a2018-06-20 20:11:39 -04002496 int i;
Avi Kivity61d2ef22010-04-28 16:40:38 +03002497 struct msr_autoload *m = &vmx->msr_autoload;
2498
Gleb Natapov8bf00a52011-10-05 14:01:22 +02002499 switch (msr) {
2500 case MSR_EFER:
2501 if (cpu_has_load_ia32_efer) {
Gleb Natapov2961e8762013-11-25 15:37:13 +02002502 clear_atomic_switch_msr_special(vmx,
2503 VM_ENTRY_LOAD_IA32_EFER,
Gleb Natapov8bf00a52011-10-05 14:01:22 +02002504 VM_EXIT_LOAD_IA32_EFER);
2505 return;
2506 }
2507 break;
2508 case MSR_CORE_PERF_GLOBAL_CTRL:
2509 if (cpu_has_load_perf_global_ctrl) {
Gleb Natapov2961e8762013-11-25 15:37:13 +02002510 clear_atomic_switch_msr_special(vmx,
Gleb Natapov8bf00a52011-10-05 14:01:22 +02002511 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
2512 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
2513 return;
2514 }
2515 break;
Avi Kivity110312c2010-12-21 12:54:20 +02002516 }
Konrad Rzeszutek Wilkca83b4a2018-06-20 20:11:39 -04002517 i = find_msr(&m->guest, msr);
2518 if (i < 0)
Konrad Rzeszutek Wilk31907092018-06-20 22:00:47 -04002519 goto skip_guest;
Konrad Rzeszutek Wilk33966dd62018-06-20 13:58:37 -04002520 --m->guest.nr;
Konrad Rzeszutek Wilk33966dd62018-06-20 13:58:37 -04002521 m->guest.val[i] = m->guest.val[m->guest.nr];
Konrad Rzeszutek Wilk33966dd62018-06-20 13:58:37 -04002522 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
Konrad Rzeszutek Wilk31907092018-06-20 22:00:47 -04002523
2524skip_guest:
2525 i = find_msr(&m->host, msr);
2526 if (i < 0)
2527 return;
2528
2529 --m->host.nr;
2530 m->host.val[i] = m->host.val[m->host.nr];
Konrad Rzeszutek Wilk33966dd62018-06-20 13:58:37 -04002531 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
Avi Kivity61d2ef22010-04-28 16:40:38 +03002532}
2533
Gleb Natapov2961e8762013-11-25 15:37:13 +02002534static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
2535 unsigned long entry, unsigned long exit,
2536 unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
2537 u64 guest_val, u64 host_val)
Gleb Natapov8bf00a52011-10-05 14:01:22 +02002538{
2539 vmcs_write64(guest_val_vmcs, guest_val);
2540 vmcs_write64(host_val_vmcs, host_val);
Gleb Natapov2961e8762013-11-25 15:37:13 +02002541 vm_entry_controls_setbit(vmx, entry);
2542 vm_exit_controls_setbit(vmx, exit);
Gleb Natapov8bf00a52011-10-05 14:01:22 +02002543}
2544
Avi Kivity61d2ef22010-04-28 16:40:38 +03002545static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
Konrad Rzeszutek Wilk989e3992018-06-20 22:01:22 -04002546 u64 guest_val, u64 host_val, bool entry_only)
Avi Kivity61d2ef22010-04-28 16:40:38 +03002547{
Konrad Rzeszutek Wilk989e3992018-06-20 22:01:22 -04002548 int i, j = 0;
Avi Kivity61d2ef22010-04-28 16:40:38 +03002549 struct msr_autoload *m = &vmx->msr_autoload;
2550
Gleb Natapov8bf00a52011-10-05 14:01:22 +02002551 switch (msr) {
2552 case MSR_EFER:
2553 if (cpu_has_load_ia32_efer) {
Gleb Natapov2961e8762013-11-25 15:37:13 +02002554 add_atomic_switch_msr_special(vmx,
2555 VM_ENTRY_LOAD_IA32_EFER,
Gleb Natapov8bf00a52011-10-05 14:01:22 +02002556 VM_EXIT_LOAD_IA32_EFER,
2557 GUEST_IA32_EFER,
2558 HOST_IA32_EFER,
2559 guest_val, host_val);
2560 return;
2561 }
2562 break;
2563 case MSR_CORE_PERF_GLOBAL_CTRL:
2564 if (cpu_has_load_perf_global_ctrl) {
Gleb Natapov2961e8762013-11-25 15:37:13 +02002565 add_atomic_switch_msr_special(vmx,
Gleb Natapov8bf00a52011-10-05 14:01:22 +02002566 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
2567 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
2568 GUEST_IA32_PERF_GLOBAL_CTRL,
2569 HOST_IA32_PERF_GLOBAL_CTRL,
2570 guest_val, host_val);
2571 return;
2572 }
2573 break;
Radim Krčmář7099e2e2016-03-04 15:08:42 +01002574 case MSR_IA32_PEBS_ENABLE:
2575 /* PEBS needs a quiescent period after being disabled (to write
2576 * a record). Disabling PEBS through VMX MSR swapping doesn't
2577 * provide that period, so a CPU could write host's record into
2578 * guest's memory.
2579 */
2580 wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
Avi Kivity110312c2010-12-21 12:54:20 +02002581 }
2582
Konrad Rzeszutek Wilkca83b4a2018-06-20 20:11:39 -04002583 i = find_msr(&m->guest, msr);
Konrad Rzeszutek Wilk989e3992018-06-20 22:01:22 -04002584 if (!entry_only)
2585 j = find_msr(&m->host, msr);
2586
Konrad Rzeszutek Wilk31907092018-06-20 22:00:47 -04002587 if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
Michael S. Tsirkin60266202013-10-31 00:34:56 +02002588 printk_once(KERN_WARNING "Not enough msr switch entries. "
Gleb Natapove7fc6f93b2011-10-05 14:01:24 +02002589 "Can't add msr %x\n", msr);
2590 return;
Konrad Rzeszutek Wilk31907092018-06-20 22:00:47 -04002591 }
2592 if (i < 0) {
Konrad Rzeszutek Wilkca83b4a2018-06-20 20:11:39 -04002593 i = m->guest.nr++;
Konrad Rzeszutek Wilk33966dd62018-06-20 13:58:37 -04002594 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
Konrad Rzeszutek Wilk31907092018-06-20 22:00:47 -04002595 }
Konrad Rzeszutek Wilk989e3992018-06-20 22:01:22 -04002596 m->guest.val[i].index = msr;
2597 m->guest.val[i].value = guest_val;
2598
2599 if (entry_only)
2600 return;
2601
Konrad Rzeszutek Wilk31907092018-06-20 22:00:47 -04002602 if (j < 0) {
2603 j = m->host.nr++;
Konrad Rzeszutek Wilk33966dd62018-06-20 13:58:37 -04002604 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
Avi Kivity61d2ef22010-04-28 16:40:38 +03002605 }
Konrad Rzeszutek Wilk31907092018-06-20 22:00:47 -04002606 m->host.val[j].index = msr;
2607 m->host.val[j].value = host_val;
Avi Kivity61d2ef22010-04-28 16:40:38 +03002608}
2609
Avi Kivity92c0d902009-10-29 11:00:16 +02002610static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
Eddie Dong2cc51562007-05-21 07:28:09 +03002611{
Paolo Bonzini844a5fe2016-03-08 12:13:39 +01002612 u64 guest_efer = vmx->vcpu.arch.efer;
2613 u64 ignore_bits = 0;
Eddie Dong2cc51562007-05-21 07:28:09 +03002614
Paolo Bonzini844a5fe2016-03-08 12:13:39 +01002615 if (!enable_ept) {
2616 /*
2617 * NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing
2618 * host CPUID is more efficient than testing guest CPUID
2619 * or CR4. Host SMEP is anyway a requirement for guest SMEP.
2620 */
2621 if (boot_cpu_has(X86_FEATURE_SMEP))
2622 guest_efer |= EFER_NX;
2623 else if (!(guest_efer & EFER_NX))
2624 ignore_bits |= EFER_NX;
2625 }
Roel Kluin3a34a882009-08-04 02:08:45 -07002626
Avi Kivity51c6cf62007-08-29 03:48:05 +03002627 /*
Paolo Bonzini844a5fe2016-03-08 12:13:39 +01002628 * LMA and LME handled by hardware; SCE meaningless outside long mode.
Avi Kivity51c6cf62007-08-29 03:48:05 +03002629 */
Paolo Bonzini844a5fe2016-03-08 12:13:39 +01002630 ignore_bits |= EFER_SCE;
Avi Kivity51c6cf62007-08-29 03:48:05 +03002631#ifdef CONFIG_X86_64
2632 ignore_bits |= EFER_LMA | EFER_LME;
2633 /* SCE is meaningful only in long mode on Intel */
2634 if (guest_efer & EFER_LMA)
2635 ignore_bits &= ~(u64)EFER_SCE;
2636#endif
Avi Kivity84ad33e2010-04-28 16:42:29 +03002637
2638 clear_atomic_switch_msr(vmx, MSR_EFER);
Andy Lutomirskif6577a5f2014-11-07 18:25:18 -08002639
2640 /*
2641 * On EPT, we can't emulate NX, so we must switch EFER atomically.
2642 * On CPUs that support "load IA32_EFER", always switch EFER
2643 * atomically, since it's faster than switching it manually.
2644 */
2645 if (cpu_has_load_ia32_efer ||
2646 (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
Avi Kivity84ad33e2010-04-28 16:42:29 +03002647 if (!(guest_efer & EFER_LMA))
2648 guest_efer &= ~EFER_LME;
Andy Lutomirski54b98bf2014-11-10 11:19:15 -08002649 if (guest_efer != host_efer)
2650 add_atomic_switch_msr(vmx, MSR_EFER,
Konrad Rzeszutek Wilk989e3992018-06-20 22:01:22 -04002651 guest_efer, host_efer, false);
Avi Kivity84ad33e2010-04-28 16:42:29 +03002652 return false;
Paolo Bonzini844a5fe2016-03-08 12:13:39 +01002653 } else {
2654 guest_efer &= ~ignore_bits;
2655 guest_efer |= host_efer & ignore_bits;
Avi Kivity84ad33e2010-04-28 16:42:29 +03002656
Paolo Bonzini844a5fe2016-03-08 12:13:39 +01002657 vmx->guest_msrs[efer_offset].data = guest_efer;
2658 vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
2659
2660 return true;
2661 }
Avi Kivity51c6cf62007-08-29 03:48:05 +03002662}
2663
Andy Lutomirskie28baea2017-02-20 08:56:11 -08002664#ifdef CONFIG_X86_32
2665/*
2666 * On 32-bit kernels, VM exits still load the FS and GS bases from the
2667 * VMCS rather than the segment table. KVM uses this helper to figure
2668 * out the current bases to poke them into the VMCS before entry.
2669 */
Gleb Natapov2d49ec72010-02-25 12:43:09 +02002670static unsigned long segment_base(u16 selector)
2671{
Andy Lutomirski8c2e41f2017-02-20 08:56:12 -08002672 struct desc_struct *table;
Gleb Natapov2d49ec72010-02-25 12:43:09 +02002673 unsigned long v;
2674
Andy Lutomirski8c2e41f2017-02-20 08:56:12 -08002675 if (!(selector & ~SEGMENT_RPL_MASK))
Gleb Natapov2d49ec72010-02-25 12:43:09 +02002676 return 0;
2677
Thomas Garnier45fc8752017-03-14 10:05:08 -07002678 table = get_current_gdt_ro();
Gleb Natapov2d49ec72010-02-25 12:43:09 +02002679
Andy Lutomirski8c2e41f2017-02-20 08:56:12 -08002680 if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) {
Gleb Natapov2d49ec72010-02-25 12:43:09 +02002681 u16 ldt_selector = kvm_read_ldt();
2682
Andy Lutomirski8c2e41f2017-02-20 08:56:12 -08002683 if (!(ldt_selector & ~SEGMENT_RPL_MASK))
Gleb Natapov2d49ec72010-02-25 12:43:09 +02002684 return 0;
2685
Andy Lutomirski8c2e41f2017-02-20 08:56:12 -08002686 table = (struct desc_struct *)segment_base(ldt_selector);
Gleb Natapov2d49ec72010-02-25 12:43:09 +02002687 }
Andy Lutomirski8c2e41f2017-02-20 08:56:12 -08002688 v = get_desc_base(&table[selector >> 3]);
Gleb Natapov2d49ec72010-02-25 12:43:09 +02002689 return v;
2690}
Andy Lutomirskie28baea2017-02-20 08:56:11 -08002691#endif
Gleb Natapov2d49ec72010-02-25 12:43:09 +02002692
Avi Kivity04d2cc72007-09-10 18:10:54 +03002693static void vmx_save_host_state(struct kvm_vcpu *vcpu)
Avi Kivity33ed6322007-05-02 16:54:03 +03002694{
Avi Kivity04d2cc72007-09-10 18:10:54 +03002695 struct vcpu_vmx *vmx = to_vmx(vcpu);
Arnd Bergmann51e8a8c2018-04-04 12:44:14 +02002696#ifdef CONFIG_X86_64
Vitaly Kuznetsov35060ed2018-03-13 18:48:05 +01002697 int cpu = raw_smp_processor_id();
Arnd Bergmann51e8a8c2018-04-04 12:44:14 +02002698#endif
Avi Kivity26bb0982009-09-07 11:14:12 +03002699 int i;
Avi Kivity04d2cc72007-09-10 18:10:54 +03002700
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002701 if (vmx->host_state.loaded)
Avi Kivity33ed6322007-05-02 16:54:03 +03002702 return;
2703
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002704 vmx->host_state.loaded = 1;
Avi Kivity33ed6322007-05-02 16:54:03 +03002705 /*
2706 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
2707 * allow segment selectors with cpl > 0 or ti == 1.
2708 */
Avi Kivityd6e88ae2008-07-10 16:53:33 +03002709 vmx->host_state.ldt_sel = kvm_read_ldt();
Laurent Vivier152d3f22007-08-23 16:33:11 +02002710 vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
Vitaly Kuznetsov42b933b2018-03-13 18:48:04 +01002711
2712#ifdef CONFIG_X86_64
2713 save_fsgs_for_kvm();
2714 vmx->host_state.fs_sel = current->thread.fsindex;
2715 vmx->host_state.gs_sel = current->thread.gsindex;
2716#else
Avi Kivity9581d442010-10-19 16:46:55 +02002717 savesegment(fs, vmx->host_state.fs_sel);
Vitaly Kuznetsov42b933b2018-03-13 18:48:04 +01002718 savesegment(gs, vmx->host_state.gs_sel);
2719#endif
Laurent Vivier152d3f22007-08-23 16:33:11 +02002720 if (!(vmx->host_state.fs_sel & 7)) {
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002721 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
Laurent Vivier152d3f22007-08-23 16:33:11 +02002722 vmx->host_state.fs_reload_needed = 0;
2723 } else {
Avi Kivity33ed6322007-05-02 16:54:03 +03002724 vmcs_write16(HOST_FS_SELECTOR, 0);
Laurent Vivier152d3f22007-08-23 16:33:11 +02002725 vmx->host_state.fs_reload_needed = 1;
Avi Kivity33ed6322007-05-02 16:54:03 +03002726 }
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002727 if (!(vmx->host_state.gs_sel & 7))
2728 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
Avi Kivity33ed6322007-05-02 16:54:03 +03002729 else {
2730 vmcs_write16(HOST_GS_SELECTOR, 0);
Laurent Vivier152d3f22007-08-23 16:33:11 +02002731 vmx->host_state.gs_ldt_reload_needed = 1;
Avi Kivity33ed6322007-05-02 16:54:03 +03002732 }
2733
2734#ifdef CONFIG_X86_64
Avi Kivityb2da15a2012-05-13 19:53:24 +03002735 savesegment(ds, vmx->host_state.ds_sel);
2736 savesegment(es, vmx->host_state.es_sel);
Avi Kivityb2da15a2012-05-13 19:53:24 +03002737
Vitaly Kuznetsov42b933b2018-03-13 18:48:04 +01002738 vmcs_writel(HOST_FS_BASE, current->thread.fsbase);
Vitaly Kuznetsov35060ed2018-03-13 18:48:05 +01002739 vmcs_writel(HOST_GS_BASE, cpu_kernelmode_gs_base(cpu));
Avi Kivity707c0872007-05-02 17:33:43 +03002740
Vitaly Kuznetsov42b933b2018-03-13 18:48:04 +01002741 vmx->msr_host_kernel_gs_base = current->thread.gsbase;
Avi Kivityc8770e72010-11-11 12:37:26 +02002742 if (is_long_mode(&vmx->vcpu))
Avi Kivity44ea2b12009-09-06 15:55:37 +03002743 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
Avi Kivity33ed6322007-05-02 16:54:03 +03002744#else
2745 vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
2746 vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
2747#endif
Liu, Jinsongda8999d2014-02-24 10:55:46 +00002748 if (boot_cpu_has(X86_FEATURE_MPX))
2749 rdmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
Avi Kivity26bb0982009-09-07 11:14:12 +03002750 for (i = 0; i < vmx->save_nmsrs; ++i)
2751 kvm_set_shared_msr(vmx->guest_msrs[i].index,
Avi Kivityd5696722009-12-02 12:28:47 +02002752 vmx->guest_msrs[i].data,
2753 vmx->guest_msrs[i].mask);
Avi Kivity33ed6322007-05-02 16:54:03 +03002754}
2755
Avi Kivitya9b21b62008-06-24 11:48:49 +03002756static void __vmx_load_host_state(struct vcpu_vmx *vmx)
Avi Kivity33ed6322007-05-02 16:54:03 +03002757{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002758 if (!vmx->host_state.loaded)
Avi Kivity33ed6322007-05-02 16:54:03 +03002759 return;
2760
Avi Kivitye1beb1d2007-11-18 13:50:24 +02002761 ++vmx->vcpu.stat.host_state_reload;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002762 vmx->host_state.loaded = 0;
Avi Kivityc8770e72010-11-11 12:37:26 +02002763#ifdef CONFIG_X86_64
2764 if (is_long_mode(&vmx->vcpu))
2765 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2766#endif
Laurent Vivier152d3f22007-08-23 16:33:11 +02002767 if (vmx->host_state.gs_ldt_reload_needed) {
Avi Kivityd6e88ae2008-07-10 16:53:33 +03002768 kvm_load_ldt(vmx->host_state.ldt_sel);
Avi Kivity33ed6322007-05-02 16:54:03 +03002769#ifdef CONFIG_X86_64
Avi Kivity9581d442010-10-19 16:46:55 +02002770 load_gs_index(vmx->host_state.gs_sel);
Avi Kivity9581d442010-10-19 16:46:55 +02002771#else
2772 loadsegment(gs, vmx->host_state.gs_sel);
Avi Kivity33ed6322007-05-02 16:54:03 +03002773#endif
Avi Kivity33ed6322007-05-02 16:54:03 +03002774 }
Avi Kivity0a77fe42010-10-19 18:48:35 +02002775 if (vmx->host_state.fs_reload_needed)
2776 loadsegment(fs, vmx->host_state.fs_sel);
Avi Kivityb2da15a2012-05-13 19:53:24 +03002777#ifdef CONFIG_X86_64
2778 if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
2779 loadsegment(ds, vmx->host_state.ds_sel);
2780 loadsegment(es, vmx->host_state.es_sel);
2781 }
Avi Kivityb2da15a2012-05-13 19:53:24 +03002782#endif
Andy Lutomirskib7ffc442017-02-20 08:56:14 -08002783 invalidate_tss_limit();
Avi Kivity44ea2b12009-09-06 15:55:37 +03002784#ifdef CONFIG_X86_64
Avi Kivityc8770e72010-11-11 12:37:26 +02002785 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
Avi Kivity44ea2b12009-09-06 15:55:37 +03002786#endif
Liu, Jinsongda8999d2014-02-24 10:55:46 +00002787 if (vmx->host_state.msr_host_bndcfgs)
2788 wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
Thomas Garnier45fc8752017-03-14 10:05:08 -07002789 load_fixmap_gdt(raw_smp_processor_id());
Avi Kivity33ed6322007-05-02 16:54:03 +03002790}
2791
Avi Kivitya9b21b62008-06-24 11:48:49 +03002792static void vmx_load_host_state(struct vcpu_vmx *vmx)
2793{
2794 preempt_disable();
2795 __vmx_load_host_state(vmx);
2796 preempt_enable();
2797}
2798
Feng Wu28b835d2015-09-18 22:29:54 +08002799static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
2800{
2801 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
2802 struct pi_desc old, new;
2803 unsigned int dest;
2804
Paolo Bonzini31afb2e2017-06-06 12:57:06 +02002805 /*
2806 * In case of hot-plug or hot-unplug, we may have to undo
2807 * vmx_vcpu_pi_put even if there is no assigned device. And we
2808 * always keep PI.NDST up to date for simplicity: it makes the
2809 * code easier, and CPU migration is not a fast path.
2810 */
2811 if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
Feng Wu28b835d2015-09-18 22:29:54 +08002812 return;
2813
Paolo Bonzini31afb2e2017-06-06 12:57:06 +02002814 /*
2815 * First handle the simple case where no cmpxchg is necessary; just
2816 * allow posting non-urgent interrupts.
2817 *
2818 * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
2819 * PI.NDST: pi_post_block will do it for us and the wakeup_handler
2820 * expects the VCPU to be on the blocked_vcpu_list that matches
2821 * PI.NDST.
2822 */
2823 if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
2824 vcpu->cpu == cpu) {
2825 pi_clear_sn(pi_desc);
2826 return;
2827 }
2828
2829 /* The full case. */
Feng Wu28b835d2015-09-18 22:29:54 +08002830 do {
2831 old.control = new.control = pi_desc->control;
2832
Paolo Bonzini31afb2e2017-06-06 12:57:06 +02002833 dest = cpu_physical_id(cpu);
Feng Wu28b835d2015-09-18 22:29:54 +08002834
Paolo Bonzini31afb2e2017-06-06 12:57:06 +02002835 if (x2apic_enabled())
2836 new.ndst = dest;
2837 else
2838 new.ndst = (dest << 8) & 0xFF00;
Feng Wu28b835d2015-09-18 22:29:54 +08002839
Feng Wu28b835d2015-09-18 22:29:54 +08002840 new.sn = 0;
Paolo Bonzinic0a16662017-09-28 17:58:41 +02002841 } while (cmpxchg64(&pi_desc->control, old.control,
2842 new.control) != old.control);
Feng Wu28b835d2015-09-18 22:29:54 +08002843}
Xiao Guangrong1be0e612016-03-22 16:51:18 +08002844
Peter Feinerc95ba922016-08-17 09:36:47 -07002845static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
2846{
2847 vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
2848 vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
2849}
2850
Avi Kivity6aa8b732006-12-10 02:21:36 -08002851/*
2852 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
2853 * vcpu mutex is already taken.
2854 */
Avi Kivity15ad7142007-07-11 18:17:21 +03002855static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002856{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002857 struct vcpu_vmx *vmx = to_vmx(vcpu);
Jim Mattsonb80c76e2016-07-29 18:56:53 -07002858 bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002859
Jim Mattsonb80c76e2016-07-29 18:56:53 -07002860 if (!already_loaded) {
David Hildenbrandfe0e80b2017-03-10 12:47:13 +01002861 loaded_vmcs_clear(vmx->loaded_vmcs);
Dongxiao Xu92fe13b2010-05-11 18:29:42 +08002862 local_irq_disable();
Zhang Yanfei8f536b72012-12-06 23:43:34 +08002863 crash_disable_local_vmclear(cpu);
Xiao Guangrong5a560f82012-11-28 20:54:14 +08002864
2865 /*
2866 * Read loaded_vmcs->cpu should be before fetching
2867 * loaded_vmcs->loaded_vmcss_on_cpu_link.
2868 * See the comments in __loaded_vmcs_clear().
2869 */
2870 smp_rmb();
2871
Nadav Har'Eld462b812011-05-24 15:26:10 +03002872 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
2873 &per_cpu(loaded_vmcss_on_cpu, cpu));
Zhang Yanfei8f536b72012-12-06 23:43:34 +08002874 crash_enable_local_vmclear(cpu);
Dongxiao Xu92fe13b2010-05-11 18:29:42 +08002875 local_irq_enable();
Jim Mattsonb80c76e2016-07-29 18:56:53 -07002876 }
2877
2878 if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
2879 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
2880 vmcs_load(vmx->loaded_vmcs->vmcs);
Ashok Raj15d45072018-02-01 22:59:43 +01002881 indirect_branch_prediction_barrier();
Jim Mattsonb80c76e2016-07-29 18:56:53 -07002882 }
2883
2884 if (!already_loaded) {
Andy Lutomirski59c58ceb2017-03-22 14:32:33 -07002885 void *gdt = get_current_gdt_ro();
Jim Mattsonb80c76e2016-07-29 18:56:53 -07002886 unsigned long sysenter_esp;
2887
2888 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
Dongxiao Xu92fe13b2010-05-11 18:29:42 +08002889
Avi Kivity6aa8b732006-12-10 02:21:36 -08002890 /*
2891 * Linux uses per-cpu TSS and GDT, so set these when switching
Andy Lutomirskie0c23062017-02-20 08:56:10 -08002892 * processors. See 22.2.4.
Avi Kivity6aa8b732006-12-10 02:21:36 -08002893 */
Andy Lutomirskie0c23062017-02-20 08:56:10 -08002894 vmcs_writel(HOST_TR_BASE,
Andy Lutomirski72f5e082017-12-04 15:07:20 +01002895 (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
Andy Lutomirski59c58ceb2017-03-22 14:32:33 -07002896 vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */
Avi Kivity6aa8b732006-12-10 02:21:36 -08002897
Andy Lutomirskib7ffc442017-02-20 08:56:14 -08002898 /*
2899 * VM exits change the host TR limit to 0x67 after a VM
2900 * exit. This is okay, since 0x67 covers everything except
2901 * the IO bitmap and have have code to handle the IO bitmap
2902 * being lost after a VM exit.
2903 */
2904 BUILD_BUG_ON(IO_BITMAP_OFFSET - 1 != 0x67);
2905
Avi Kivity6aa8b732006-12-10 02:21:36 -08002906 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
2907 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
Haozhong Zhangff2c3a12015-10-20 15:39:10 +08002908
Nadav Har'Eld462b812011-05-24 15:26:10 +03002909 vmx->loaded_vmcs->cpu = cpu;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002910 }
Feng Wu28b835d2015-09-18 22:29:54 +08002911
Owen Hofmann2680d6d2016-03-01 13:36:13 -08002912 /* Setup TSC multiplier */
2913 if (kvm_has_tsc_control &&
Peter Feinerc95ba922016-08-17 09:36:47 -07002914 vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
2915 decache_tsc_multiplier(vmx);
Owen Hofmann2680d6d2016-03-01 13:36:13 -08002916
Feng Wu28b835d2015-09-18 22:29:54 +08002917 vmx_vcpu_pi_load(vcpu, cpu);
Xiao Guangrong1be0e612016-03-22 16:51:18 +08002918 vmx->host_pkru = read_pkru();
Wanpeng Li74c55932017-11-29 01:31:20 -08002919 vmx->host_debugctlmsr = get_debugctlmsr();
Feng Wu28b835d2015-09-18 22:29:54 +08002920}
2921
2922static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
2923{
2924 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
2925
2926 if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
Yang Zhanga0052192016-06-13 09:56:56 +08002927 !irq_remapping_cap(IRQ_POSTING_CAP) ||
2928 !kvm_vcpu_apicv_active(vcpu))
Feng Wu28b835d2015-09-18 22:29:54 +08002929 return;
2930
2931 /* Set SN when the vCPU is preempted */
2932 if (vcpu->preempted)
2933 pi_set_sn(pi_desc);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002934}
2935
2936static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
2937{
Feng Wu28b835d2015-09-18 22:29:54 +08002938 vmx_vcpu_pi_put(vcpu);
2939
Avi Kivitya9b21b62008-06-24 11:48:49 +03002940 __vmx_load_host_state(to_vmx(vcpu));
Avi Kivity6aa8b732006-12-10 02:21:36 -08002941}
2942
Wanpeng Lif244dee2017-07-20 01:11:54 -07002943static bool emulation_required(struct kvm_vcpu *vcpu)
2944{
2945 return emulate_invalid_guest_state && !guest_state_valid(vcpu);
2946}
2947
Avi Kivityedcafe32009-12-30 18:07:40 +02002948static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
2949
Nadav Har'Elfe3ef052011-05-25 23:10:02 +03002950/*
2951 * Return the cr0 value that a nested guest would read. This is a combination
2952 * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
2953 * its hypervisor (cr0_read_shadow).
2954 */
2955static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
2956{
2957 return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
2958 (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
2959}
2960static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
2961{
2962 return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
2963 (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
2964}
2965
Avi Kivity6aa8b732006-12-10 02:21:36 -08002966static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
2967{
Avi Kivity78ac8b42010-04-08 18:19:35 +03002968 unsigned long rflags, save_rflags;
Avi Kivity345dcaa2009-08-12 15:29:37 +03002969
Avi Kivity6de12732011-03-07 12:51:22 +02002970 if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
2971 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
2972 rflags = vmcs_readl(GUEST_RFLAGS);
2973 if (to_vmx(vcpu)->rmode.vm86_active) {
2974 rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
2975 save_rflags = to_vmx(vcpu)->rmode.save_rflags;
2976 rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
2977 }
2978 to_vmx(vcpu)->rflags = rflags;
Avi Kivity78ac8b42010-04-08 18:19:35 +03002979 }
Avi Kivity6de12732011-03-07 12:51:22 +02002980 return to_vmx(vcpu)->rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002981}
2982
2983static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
2984{
Wanpeng Lif244dee2017-07-20 01:11:54 -07002985 unsigned long old_rflags = vmx_get_rflags(vcpu);
2986
Avi Kivity6de12732011-03-07 12:51:22 +02002987 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
2988 to_vmx(vcpu)->rflags = rflags;
Avi Kivity78ac8b42010-04-08 18:19:35 +03002989 if (to_vmx(vcpu)->rmode.vm86_active) {
2990 to_vmx(vcpu)->rmode.save_rflags = rflags;
Glauber de Oliveira Costa053de042008-01-30 13:31:27 +01002991 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
Avi Kivity78ac8b42010-04-08 18:19:35 +03002992 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08002993 vmcs_writel(GUEST_RFLAGS, rflags);
Wanpeng Lif244dee2017-07-20 01:11:54 -07002994
2995 if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM)
2996 to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002997}
2998
Paolo Bonzini37ccdcb2014-05-20 14:29:47 +02002999static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
Glauber Costa2809f5d2009-05-12 16:21:05 -04003000{
3001 u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
3002 int ret = 0;
3003
3004 if (interruptibility & GUEST_INTR_STATE_STI)
Jan Kiszka48005f62010-02-19 19:38:07 +01003005 ret |= KVM_X86_SHADOW_INT_STI;
Glauber Costa2809f5d2009-05-12 16:21:05 -04003006 if (interruptibility & GUEST_INTR_STATE_MOV_SS)
Jan Kiszka48005f62010-02-19 19:38:07 +01003007 ret |= KVM_X86_SHADOW_INT_MOV_SS;
Glauber Costa2809f5d2009-05-12 16:21:05 -04003008
Paolo Bonzini37ccdcb2014-05-20 14:29:47 +02003009 return ret;
Glauber Costa2809f5d2009-05-12 16:21:05 -04003010}
3011
3012static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
3013{
3014 u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
3015 u32 interruptibility = interruptibility_old;
3016
3017 interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
3018
Jan Kiszka48005f62010-02-19 19:38:07 +01003019 if (mask & KVM_X86_SHADOW_INT_MOV_SS)
Glauber Costa2809f5d2009-05-12 16:21:05 -04003020 interruptibility |= GUEST_INTR_STATE_MOV_SS;
Jan Kiszka48005f62010-02-19 19:38:07 +01003021 else if (mask & KVM_X86_SHADOW_INT_STI)
Glauber Costa2809f5d2009-05-12 16:21:05 -04003022 interruptibility |= GUEST_INTR_STATE_STI;
3023
3024 if ((interruptibility != interruptibility_old))
3025 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
3026}
3027
Avi Kivity6aa8b732006-12-10 02:21:36 -08003028static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
3029{
3030 unsigned long rip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003031
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003032 rip = kvm_rip_read(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003033 rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003034 kvm_rip_write(vcpu, rip);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003035
Glauber Costa2809f5d2009-05-12 16:21:05 -04003036 /* skipping an emulated instruction also counts */
3037 vmx_set_interrupt_shadow(vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003038}
3039
Paolo Bonzinib96fb432017-07-27 12:29:32 +02003040static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
3041 unsigned long exit_qual)
3042{
3043 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3044 unsigned int nr = vcpu->arch.exception.nr;
3045 u32 intr_info = nr | INTR_INFO_VALID_MASK;
3046
3047 if (vcpu->arch.exception.has_error_code) {
3048 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
3049 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
3050 }
3051
3052 if (kvm_exception_is_soft(nr))
3053 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
3054 else
3055 intr_info |= INTR_TYPE_HARD_EXCEPTION;
3056
3057 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
3058 vmx_get_nmi_mask(vcpu))
3059 intr_info |= INTR_INFO_UNBLOCK_NMI;
3060
3061 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
3062}
3063
Nadav Har'El0b6ac342011-05-25 23:13:36 +03003064/*
3065 * KVM wants to inject page-faults which it got to the guest. This function
3066 * checks whether in a nested guest, we need to inject them to L1 or L2.
Nadav Har'El0b6ac342011-05-25 23:13:36 +03003067 */
Wanpeng Libfcf83b2017-08-24 03:35:11 -07003068static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
Nadav Har'El0b6ac342011-05-25 23:13:36 +03003069{
3070 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
Wanpeng Liadfe20f2017-07-13 18:30:41 -07003071 unsigned int nr = vcpu->arch.exception.nr;
Nadav Har'El0b6ac342011-05-25 23:13:36 +03003072
Paolo Bonzinib96fb432017-07-27 12:29:32 +02003073 if (nr == PF_VECTOR) {
3074 if (vcpu->arch.exception.nested_apf) {
Wanpeng Libfcf83b2017-08-24 03:35:11 -07003075 *exit_qual = vcpu->arch.apf.nested_apf_token;
Paolo Bonzinib96fb432017-07-27 12:29:32 +02003076 return 1;
3077 }
3078 /*
3079 * FIXME: we must not write CR2 when L1 intercepts an L2 #PF exception.
3080 * The fix is to add the ancillary datum (CR2 or DR6) to structs
3081 * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6
3082 * can be written only when inject_pending_event runs. This should be
3083 * conditional on a new capability---if the capability is disabled,
3084 * kvm_multiple_exception would write the ancillary information to
3085 * CR2 or DR6, for backwards ABI-compatibility.
3086 */
3087 if (nested_vmx_is_page_fault_vmexit(vmcs12,
3088 vcpu->arch.exception.error_code)) {
Wanpeng Libfcf83b2017-08-24 03:35:11 -07003089 *exit_qual = vcpu->arch.cr2;
Paolo Bonzinib96fb432017-07-27 12:29:32 +02003090 return 1;
3091 }
3092 } else {
Paolo Bonzinib96fb432017-07-27 12:29:32 +02003093 if (vmcs12->exception_bitmap & (1u << nr)) {
Wanpeng Libfcf83b2017-08-24 03:35:11 -07003094 if (nr == DB_VECTOR)
3095 *exit_qual = vcpu->arch.dr6;
3096 else
3097 *exit_qual = 0;
Paolo Bonzinib96fb432017-07-27 12:29:32 +02003098 return 1;
3099 }
Wanpeng Liadfe20f2017-07-13 18:30:41 -07003100 }
3101
Paolo Bonzinib96fb432017-07-27 12:29:32 +02003102 return 0;
Nadav Har'El0b6ac342011-05-25 23:13:36 +03003103}
3104
Wanpeng Licaa057a2018-03-12 04:53:03 -07003105static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
3106{
3107 /*
3108 * Ensure that we clear the HLT state in the VMCS. We don't need to
3109 * explicitly skip the instruction because if the HLT state is set,
3110 * then the instruction is already executing and RIP has already been
3111 * advanced.
3112 */
3113 if (kvm_hlt_in_guest(vcpu->kvm) &&
3114 vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT)
3115 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
3116}
3117
Wanpeng Licfcd20e2017-07-13 18:30:39 -07003118static void vmx_queue_exception(struct kvm_vcpu *vcpu)
Avi Kivity298101d2007-11-25 13:41:11 +02003119{
Jan Kiszka77ab6db2008-07-14 12:28:51 +02003120 struct vcpu_vmx *vmx = to_vmx(vcpu);
Wanpeng Licfcd20e2017-07-13 18:30:39 -07003121 unsigned nr = vcpu->arch.exception.nr;
3122 bool has_error_code = vcpu->arch.exception.has_error_code;
Wanpeng Licfcd20e2017-07-13 18:30:39 -07003123 u32 error_code = vcpu->arch.exception.error_code;
Jan Kiszka8ab2d2e2008-12-15 13:52:10 +01003124 u32 intr_info = nr | INTR_INFO_VALID_MASK;
Jan Kiszka77ab6db2008-07-14 12:28:51 +02003125
Jan Kiszka8ab2d2e2008-12-15 13:52:10 +01003126 if (has_error_code) {
Jan Kiszka77ab6db2008-07-14 12:28:51 +02003127 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
Jan Kiszka8ab2d2e2008-12-15 13:52:10 +01003128 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
3129 }
Jan Kiszka77ab6db2008-07-14 12:28:51 +02003130
Avi Kivity7ffd92c2009-06-09 14:10:45 +03003131 if (vmx->rmode.vm86_active) {
Serge E. Hallyn71f98332011-04-13 09:12:54 -05003132 int inc_eip = 0;
3133 if (kvm_exception_is_soft(nr))
3134 inc_eip = vcpu->arch.event_exit_inst_len;
3135 if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE)
Mohammed Gamala92601b2010-09-19 14:34:07 +02003136 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
Jan Kiszka77ab6db2008-07-14 12:28:51 +02003137 return;
3138 }
3139
Sean Christophersonadd5ff72018-03-23 09:34:00 -07003140 WARN_ON_ONCE(vmx->emulation_required);
3141
Gleb Natapov66fd3f72009-05-11 13:35:50 +03003142 if (kvm_exception_is_soft(nr)) {
3143 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
3144 vmx->vcpu.arch.event_exit_inst_len);
Jan Kiszka8ab2d2e2008-12-15 13:52:10 +01003145 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
3146 } else
3147 intr_info |= INTR_TYPE_HARD_EXCEPTION;
3148
3149 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
Wanpeng Licaa057a2018-03-12 04:53:03 -07003150
3151 vmx_clear_hlt(vcpu);
Avi Kivity298101d2007-11-25 13:41:11 +02003152}
3153
Sheng Yang4e47c7a2009-12-18 16:48:47 +08003154static bool vmx_rdtscp_supported(void)
3155{
3156 return cpu_has_vmx_rdtscp();
3157}
3158
Mao, Junjiead756a12012-07-02 01:18:48 +00003159static bool vmx_invpcid_supported(void)
3160{
3161 return cpu_has_vmx_invpcid() && enable_ept;
3162}
3163
Avi Kivity6aa8b732006-12-10 02:21:36 -08003164/*
Eddie Donga75beee2007-05-17 18:55:15 +03003165 * Swap MSR entry in host/guest MSR entry array.
3166 */
Rusty Russell8b9cf982007-07-30 16:31:43 +10003167static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
Eddie Donga75beee2007-05-17 18:55:15 +03003168{
Avi Kivity26bb0982009-09-07 11:14:12 +03003169 struct shared_msr_entry tmp;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003170
3171 tmp = vmx->guest_msrs[to];
3172 vmx->guest_msrs[to] = vmx->guest_msrs[from];
3173 vmx->guest_msrs[from] = tmp;
Eddie Donga75beee2007-05-17 18:55:15 +03003174}
3175
3176/*
Avi Kivitye38aea32007-04-19 13:22:48 +03003177 * Set up the vmcs to automatically save and restore system
3178 * msrs. Don't touch the 64-bit msrs if the guest is in legacy
3179 * mode, as fiddling with msrs is very expensive.
3180 */
Rusty Russell8b9cf982007-07-30 16:31:43 +10003181static void setup_msrs(struct vcpu_vmx *vmx)
Avi Kivitye38aea32007-04-19 13:22:48 +03003182{
Avi Kivity26bb0982009-09-07 11:14:12 +03003183 int save_nmsrs, index;
Avi Kivitye38aea32007-04-19 13:22:48 +03003184
Eddie Donga75beee2007-05-17 18:55:15 +03003185 save_nmsrs = 0;
Avi Kivity4d56c8a2007-04-19 14:28:44 +03003186#ifdef CONFIG_X86_64
Rusty Russell8b9cf982007-07-30 16:31:43 +10003187 if (is_long_mode(&vmx->vcpu)) {
Rusty Russell8b9cf982007-07-30 16:31:43 +10003188 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
Eddie Donga75beee2007-05-17 18:55:15 +03003189 if (index >= 0)
Rusty Russell8b9cf982007-07-30 16:31:43 +10003190 move_msr_up(vmx, index, save_nmsrs++);
3191 index = __find_msr_index(vmx, MSR_LSTAR);
Eddie Donga75beee2007-05-17 18:55:15 +03003192 if (index >= 0)
Rusty Russell8b9cf982007-07-30 16:31:43 +10003193 move_msr_up(vmx, index, save_nmsrs++);
3194 index = __find_msr_index(vmx, MSR_CSTAR);
Eddie Donga75beee2007-05-17 18:55:15 +03003195 if (index >= 0)
Rusty Russell8b9cf982007-07-30 16:31:43 +10003196 move_msr_up(vmx, index, save_nmsrs++);
Sheng Yang4e47c7a2009-12-18 16:48:47 +08003197 index = __find_msr_index(vmx, MSR_TSC_AUX);
Radim Krčmářd6321d42017-08-05 00:12:49 +02003198 if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
Sheng Yang4e47c7a2009-12-18 16:48:47 +08003199 move_msr_up(vmx, index, save_nmsrs++);
Eddie Donga75beee2007-05-17 18:55:15 +03003200 /*
Brian Gerst8c065852010-07-17 09:03:26 -04003201 * MSR_STAR is only needed on long mode guests, and only
Eddie Donga75beee2007-05-17 18:55:15 +03003202 * if efer.sce is enabled.
3203 */
Brian Gerst8c065852010-07-17 09:03:26 -04003204 index = __find_msr_index(vmx, MSR_STAR);
Avi Kivityf6801df2010-01-21 15:31:50 +02003205 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
Rusty Russell8b9cf982007-07-30 16:31:43 +10003206 move_msr_up(vmx, index, save_nmsrs++);
Avi Kivity4d56c8a2007-04-19 14:28:44 +03003207 }
Eddie Donga75beee2007-05-17 18:55:15 +03003208#endif
Avi Kivity92c0d902009-10-29 11:00:16 +02003209 index = __find_msr_index(vmx, MSR_EFER);
3210 if (index >= 0 && update_transition_efer(vmx, index))
Avi Kivity26bb0982009-09-07 11:14:12 +03003211 move_msr_up(vmx, index, save_nmsrs++);
Avi Kivity4d56c8a2007-04-19 14:28:44 +03003212
Avi Kivity26bb0982009-09-07 11:14:12 +03003213 vmx->save_nmsrs = save_nmsrs;
Avi Kivity58972972009-02-24 22:26:47 +02003214
Yang Zhang8d146952013-01-25 10:18:50 +08003215 if (cpu_has_vmx_msr_bitmap())
Paolo Bonzini904e14f2018-01-16 16:51:18 +01003216 vmx_update_msr_bitmap(&vmx->vcpu);
Avi Kivitye38aea32007-04-19 13:22:48 +03003217}
3218
KarimAllah Ahmede79f2452018-04-14 05:10:52 +02003219static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003220{
KarimAllah Ahmede79f2452018-04-14 05:10:52 +02003221 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003222
KarimAllah Ahmede79f2452018-04-14 05:10:52 +02003223 if (is_guest_mode(vcpu) &&
3224 (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING))
3225 return vcpu->arch.tsc_offset - vmcs12->tsc_offset;
3226
3227 return vcpu->arch.tsc_offset;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003228}
3229
3230/*
Zachary Amsden99e3e302010-08-19 22:07:17 -10003231 * writes 'offset' into guest's timestamp counter offset register
Avi Kivity6aa8b732006-12-10 02:21:36 -08003232 */
Zachary Amsden99e3e302010-08-19 22:07:17 -10003233static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003234{
Nadav Har'El27fc51b2011-08-02 15:54:52 +03003235 if (is_guest_mode(vcpu)) {
Nadav Har'El79918252011-05-25 23:15:39 +03003236 /*
Nadav Har'El27fc51b2011-08-02 15:54:52 +03003237 * We're here if L1 chose not to trap WRMSR to TSC. According
3238 * to the spec, this should set L1's TSC; The offset that L1
3239 * set for L2 remains unchanged, and still needs to be added
3240 * to the newly set TSC to get L2's TSC.
Nadav Har'El79918252011-05-25 23:15:39 +03003241 */
Nadav Har'El27fc51b2011-08-02 15:54:52 +03003242 struct vmcs12 *vmcs12;
Nadav Har'El27fc51b2011-08-02 15:54:52 +03003243 /* recalculate vmcs02.TSC_OFFSET: */
3244 vmcs12 = get_vmcs12(vcpu);
3245 vmcs_write64(TSC_OFFSET, offset +
3246 (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
3247 vmcs12->tsc_offset : 0));
3248 } else {
Yoshihiro YUNOMAE489223e2013-06-12 16:43:44 +09003249 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
3250 vmcs_read64(TSC_OFFSET), offset);
Nadav Har'El27fc51b2011-08-02 15:54:52 +03003251 vmcs_write64(TSC_OFFSET, offset);
3252 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08003253}
3254
Nadav Har'El801d3422011-05-25 23:02:23 +03003255/*
3256 * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
3257 * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
3258 * all guests if the "nested" module option is off, and can also be disabled
3259 * for a single guest by disabling its VMX cpuid bit.
3260 */
3261static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
3262{
Radim Krčmářd6321d42017-08-05 00:12:49 +02003263 return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX);
Nadav Har'El801d3422011-05-25 23:02:23 +03003264}
3265
Avi Kivity6aa8b732006-12-10 02:21:36 -08003266/*
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003267 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
3268 * returned for the various VMX controls MSRs when nested VMX is enabled.
3269 * The same values should also be used to verify that vmcs12 control fields are
3270 * valid during nested entry from L1 to L2.
3271 * Each of these control msrs has a low and high 32-bit half: A low bit is on
3272 * if the corresponding bit in the (32-bit) control field *must* be on, and a
3273 * bit in the high half is on if the corresponding bit in the control field
3274 * may be on. See also vmx_control_verify().
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003275 */
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003276static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003277{
Paolo Bonzini13893092018-02-26 13:40:09 +01003278 if (!nested) {
3279 memset(msrs, 0, sizeof(*msrs));
3280 return;
3281 }
3282
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003283 /*
3284 * Note that as a general rule, the high half of the MSRs (bits in
3285 * the control fields which may be 1) should be initialized by the
3286 * intersection of the underlying hardware's MSR (i.e., features which
3287 * can be supported) and the list of features we want to expose -
3288 * because they are known to be properly supported in our code.
3289 * Also, usually, the low half of the MSRs (bits which must be 1) can
3290 * be set to 0, meaning that L1 may turn off any of these bits. The
3291 * reason is that if one of these bits is necessary, it will appear
3292 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
3293 * fields of vmcs01 and vmcs02, will turn these bits off - and
Paolo Bonzini7313c692017-07-27 10:31:25 +02003294 * nested_vmx_exit_reflected() will not pass related exits to L1.
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003295 * These rules have exceptions below.
3296 */
3297
3298 /* pin-based controls */
Jan Kiszkaeabeaac2013-03-13 11:30:50 +01003299 rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003300 msrs->pinbased_ctls_low,
3301 msrs->pinbased_ctls_high);
3302 msrs->pinbased_ctls_low |=
Wincy Vanb9c237b2015-02-03 23:56:30 +08003303 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003304 msrs->pinbased_ctls_high &=
Wincy Vanb9c237b2015-02-03 23:56:30 +08003305 PIN_BASED_EXT_INTR_MASK |
3306 PIN_BASED_NMI_EXITING |
Paolo Bonzini13893092018-02-26 13:40:09 +01003307 PIN_BASED_VIRTUAL_NMIS |
3308 (apicv ? PIN_BASED_POSTED_INTR : 0);
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003309 msrs->pinbased_ctls_high |=
Wincy Vanb9c237b2015-02-03 23:56:30 +08003310 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
Jan Kiszka0238ea92013-03-13 11:31:24 +01003311 PIN_BASED_VMX_PREEMPTION_TIMER;
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003312
Jan Kiszka3dbcd8d2014-06-16 13:59:40 +02003313 /* exit controls */
Arthur Chunqi Lic0dfee52013-08-06 18:41:45 +08003314 rdmsr(MSR_IA32_VMX_EXIT_CTLS,
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003315 msrs->exit_ctls_low,
3316 msrs->exit_ctls_high);
3317 msrs->exit_ctls_low =
Wincy Vanb9c237b2015-02-03 23:56:30 +08003318 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
Bandan Dase0ba1a62014-04-19 18:17:46 -04003319
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003320 msrs->exit_ctls_high &=
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003321#ifdef CONFIG_X86_64
Arthur Chunqi Lic0dfee52013-08-06 18:41:45 +08003322 VM_EXIT_HOST_ADDR_SPACE_SIZE |
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003323#endif
Jan Kiszkaf4124502014-03-07 20:03:13 +01003324 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003325 msrs->exit_ctls_high |=
Wincy Vanb9c237b2015-02-03 23:56:30 +08003326 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
Jan Kiszkaf4124502014-03-07 20:03:13 +01003327 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
Bandan Dase0ba1a62014-04-19 18:17:46 -04003328 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
3329
Paolo Bonzinia87036a2016-03-08 09:52:13 +01003330 if (kvm_mpx_supported())
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003331 msrs->exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003332
Jan Kiszka2996fca2014-06-16 13:59:43 +02003333 /* We support free control of debug control saving. */
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003334 msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
Jan Kiszka2996fca2014-06-16 13:59:43 +02003335
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003336 /* entry controls */
3337 rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003338 msrs->entry_ctls_low,
3339 msrs->entry_ctls_high);
3340 msrs->entry_ctls_low =
Wincy Vanb9c237b2015-02-03 23:56:30 +08003341 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003342 msrs->entry_ctls_high &=
Jan Kiszka57435342013-08-06 10:39:56 +02003343#ifdef CONFIG_X86_64
3344 VM_ENTRY_IA32E_MODE |
3345#endif
3346 VM_ENTRY_LOAD_IA32_PAT;
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003347 msrs->entry_ctls_high |=
Wincy Vanb9c237b2015-02-03 23:56:30 +08003348 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
Paolo Bonzinia87036a2016-03-08 09:52:13 +01003349 if (kvm_mpx_supported())
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003350 msrs->entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
Jan Kiszka57435342013-08-06 10:39:56 +02003351
Jan Kiszka2996fca2014-06-16 13:59:43 +02003352 /* We support free control of debug control loading. */
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003353 msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
Jan Kiszka2996fca2014-06-16 13:59:43 +02003354
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003355 /* cpu-based controls */
3356 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003357 msrs->procbased_ctls_low,
3358 msrs->procbased_ctls_high);
3359 msrs->procbased_ctls_low =
Wincy Vanb9c237b2015-02-03 23:56:30 +08003360 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003361 msrs->procbased_ctls_high &=
Jan Kiszkaa294c9b2013-10-23 17:43:09 +01003362 CPU_BASED_VIRTUAL_INTR_PENDING |
3363 CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003364 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
3365 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
3366 CPU_BASED_CR3_STORE_EXITING |
3367#ifdef CONFIG_X86_64
3368 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
3369#endif
3370 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
Mihai Donțu5f3d45e2015-07-05 20:08:57 +03003371 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
3372 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
3373 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
3374 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003375 /*
3376 * We can allow some features even when not supported by the
3377 * hardware. For example, L1 can specify an MSR bitmap - and we
3378 * can use it to avoid exits to L1 - even when L0 runs L2
3379 * without MSR bitmaps.
3380 */
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003381 msrs->procbased_ctls_high |=
Wincy Vanb9c237b2015-02-03 23:56:30 +08003382 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
Jan Kiszka560b7ee2014-06-16 13:59:42 +02003383 CPU_BASED_USE_MSR_BITMAPS;
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003384
Jan Kiszka3dcdf3ec2014-06-16 13:59:41 +02003385 /* We support free control of CR3 access interception. */
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003386 msrs->procbased_ctls_low &=
Jan Kiszka3dcdf3ec2014-06-16 13:59:41 +02003387 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
3388
Paolo Bonzini80154d72017-08-24 13:55:35 +02003389 /*
3390 * secondary cpu-based controls. Do not include those that
3391 * depend on CPUID bits, they are added later by vmx_cpuid_update.
3392 */
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003393 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003394 msrs->secondary_ctls_low,
3395 msrs->secondary_ctls_high);
3396 msrs->secondary_ctls_low = 0;
3397 msrs->secondary_ctls_high &=
Jan Kiszkad6851fb2013-02-23 22:34:39 +01003398 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
Paolo Bonzini1b073042016-10-25 16:06:30 +02003399 SECONDARY_EXEC_DESC |
Wincy Vanf2b93282015-02-03 23:56:03 +08003400 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
Wincy Van82f0dd42015-02-03 23:57:18 +08003401 SECONDARY_EXEC_APIC_REGISTER_VIRT |
Wincy Van608406e2015-02-03 23:57:51 +08003402 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
Paolo Bonzini3db13482017-08-24 14:48:03 +02003403 SECONDARY_EXEC_WBINVD_EXITING;
Jan Kiszkac18911a2013-03-13 16:06:41 +01003404
Nadav Har'Elafa61f72013-08-07 14:59:22 +02003405 if (enable_ept) {
3406 /* nested EPT: emulate EPT also to L1 */
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003407 msrs->secondary_ctls_high |=
Radim Krčmář0790ec12015-03-17 14:02:32 +01003408 SECONDARY_EXEC_ENABLE_EPT;
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003409 msrs->ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
Paolo Bonzini7db74262017-03-08 10:49:19 +01003410 VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
Bandan Das02120c42016-07-12 18:18:52 -04003411 if (cpu_has_vmx_ept_execute_only())
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003412 msrs->ept_caps |=
Bandan Das02120c42016-07-12 18:18:52 -04003413 VMX_EPT_EXECUTE_ONLY_BIT;
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003414 msrs->ept_caps &= vmx_capability.ept;
3415 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
Paolo Bonzini7db74262017-03-08 10:49:19 +01003416 VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
3417 VMX_EPT_1GB_PAGE_BIT;
Bandan Das03efce62017-05-05 15:25:15 -04003418 if (enable_ept_ad_bits) {
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003419 msrs->secondary_ctls_high |=
Bandan Das03efce62017-05-05 15:25:15 -04003420 SECONDARY_EXEC_ENABLE_PML;
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003421 msrs->ept_caps |= VMX_EPT_AD_BIT;
Bandan Das03efce62017-05-05 15:25:15 -04003422 }
David Hildenbrand1c13bff2017-08-24 20:51:33 +02003423 }
Nadav Har'Elafa61f72013-08-07 14:59:22 +02003424
Bandan Das27c42a12017-08-03 15:54:42 -04003425 if (cpu_has_vmx_vmfunc()) {
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003426 msrs->secondary_ctls_high |=
Bandan Das27c42a12017-08-03 15:54:42 -04003427 SECONDARY_EXEC_ENABLE_VMFUNC;
Bandan Das41ab9372017-08-03 15:54:43 -04003428 /*
3429 * Advertise EPTP switching unconditionally
3430 * since we emulate it
3431 */
Wanpeng Li575b3a22017-10-19 07:00:34 +08003432 if (enable_ept)
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003433 msrs->vmfunc_controls =
Wanpeng Li575b3a22017-10-19 07:00:34 +08003434 VMX_VMFUNC_EPTP_SWITCHING;
Bandan Das27c42a12017-08-03 15:54:42 -04003435 }
3436
Paolo Bonzinief697a72016-03-18 16:58:38 +01003437 /*
3438 * Old versions of KVM use the single-context version without
3439 * checking for support, so declare that it is supported even
3440 * though it is treated as global context. The alternative is
3441 * not failing the single-context invvpid, and it is worse.
3442 */
Wanpeng Li63cb6d52017-03-20 21:18:53 -07003443 if (enable_vpid) {
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003444 msrs->secondary_ctls_high |=
Wanpeng Li63cb6d52017-03-20 21:18:53 -07003445 SECONDARY_EXEC_ENABLE_VPID;
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003446 msrs->vpid_caps = VMX_VPID_INVVPID_BIT |
Jan Dakinevichbcdde302016-10-28 07:00:30 +03003447 VMX_VPID_EXTENT_SUPPORTED_MASK;
David Hildenbrand1c13bff2017-08-24 20:51:33 +02003448 }
Wanpeng Li99b83ac2015-10-13 09:12:21 -07003449
Radim Krčmář0790ec12015-03-17 14:02:32 +01003450 if (enable_unrestricted_guest)
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003451 msrs->secondary_ctls_high |=
Radim Krčmář0790ec12015-03-17 14:02:32 +01003452 SECONDARY_EXEC_UNRESTRICTED_GUEST;
3453
Jan Kiszkac18911a2013-03-13 16:06:41 +01003454 /* miscellaneous data */
Wincy Vanb9c237b2015-02-03 23:56:30 +08003455 rdmsr(MSR_IA32_VMX_MISC,
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003456 msrs->misc_low,
3457 msrs->misc_high);
3458 msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA;
3459 msrs->misc_low |=
Jim Mattsonf4160e42018-05-29 09:11:33 -07003460 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
Wincy Vanb9c237b2015-02-03 23:56:30 +08003461 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
Jan Kiszkaf4124502014-03-07 20:03:13 +01003462 VMX_MISC_ACTIVITY_HLT;
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003463 msrs->misc_high = 0;
David Matlack62cc6b9d2016-11-29 18:14:07 -08003464
3465 /*
3466 * This MSR reports some information about VMX support. We
3467 * should return information about the VMX we emulate for the
3468 * guest, and the VMCS structure we give it - not about the
3469 * VMX support of the underlying hardware.
3470 */
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003471 msrs->basic =
David Matlack62cc6b9d2016-11-29 18:14:07 -08003472 VMCS12_REVISION |
3473 VMX_BASIC_TRUE_CTLS |
3474 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
3475 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
3476
3477 if (cpu_has_vmx_basic_inout())
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003478 msrs->basic |= VMX_BASIC_INOUT;
David Matlack62cc6b9d2016-11-29 18:14:07 -08003479
3480 /*
David Matlack8322ebb2016-11-29 18:14:09 -08003481 * These MSRs specify bits which the guest must keep fixed on
David Matlack62cc6b9d2016-11-29 18:14:07 -08003482 * while L1 is in VMXON mode (in L1's root mode, or running an L2).
3483 * We picked the standard core2 setting.
3484 */
3485#define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
3486#define VMXON_CR4_ALWAYSON X86_CR4_VMXE
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003487 msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON;
3488 msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON;
David Matlack8322ebb2016-11-29 18:14:09 -08003489
3490 /* These MSRs specify bits which the guest must keep fixed off. */
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003491 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
3492 rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
David Matlack62cc6b9d2016-11-29 18:14:07 -08003493
3494 /* highest index: VMX_PREEMPTION_TIMER_VALUE */
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003495 msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1;
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003496}
3497
David Matlack38991522016-11-29 18:14:08 -08003498/*
3499 * if fixed0[i] == 1: val[i] must be 1
3500 * if fixed1[i] == 0: val[i] must be 0
3501 */
3502static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
3503{
3504 return ((val & fixed1) | fixed0) == val;
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003505}
3506
3507static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
3508{
David Matlack38991522016-11-29 18:14:08 -08003509 return fixed_bits_valid(control, low, high);
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003510}
3511
3512static inline u64 vmx_control_msr(u32 low, u32 high)
3513{
3514 return low | ((u64)high << 32);
3515}
3516
David Matlack62cc6b9d2016-11-29 18:14:07 -08003517static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
3518{
3519 superset &= mask;
3520 subset &= mask;
3521
3522 return (superset | subset) == superset;
3523}
3524
3525static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
3526{
3527 const u64 feature_and_reserved =
3528 /* feature (except bit 48; see below) */
3529 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
3530 /* reserved */
3531 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003532 u64 vmx_basic = vmx->nested.msrs.basic;
David Matlack62cc6b9d2016-11-29 18:14:07 -08003533
3534 if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
3535 return -EINVAL;
3536
3537 /*
3538 * KVM does not emulate a version of VMX that constrains physical
3539 * addresses of VMX structures (e.g. VMCS) to 32-bits.
3540 */
3541 if (data & BIT_ULL(48))
3542 return -EINVAL;
3543
3544 if (vmx_basic_vmcs_revision_id(vmx_basic) !=
3545 vmx_basic_vmcs_revision_id(data))
3546 return -EINVAL;
3547
3548 if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
3549 return -EINVAL;
3550
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003551 vmx->nested.msrs.basic = data;
David Matlack62cc6b9d2016-11-29 18:14:07 -08003552 return 0;
3553}
3554
3555static int
3556vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
3557{
3558 u64 supported;
3559 u32 *lowp, *highp;
3560
3561 switch (msr_index) {
3562 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003563 lowp = &vmx->nested.msrs.pinbased_ctls_low;
3564 highp = &vmx->nested.msrs.pinbased_ctls_high;
David Matlack62cc6b9d2016-11-29 18:14:07 -08003565 break;
3566 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003567 lowp = &vmx->nested.msrs.procbased_ctls_low;
3568 highp = &vmx->nested.msrs.procbased_ctls_high;
David Matlack62cc6b9d2016-11-29 18:14:07 -08003569 break;
3570 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003571 lowp = &vmx->nested.msrs.exit_ctls_low;
3572 highp = &vmx->nested.msrs.exit_ctls_high;
David Matlack62cc6b9d2016-11-29 18:14:07 -08003573 break;
3574 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003575 lowp = &vmx->nested.msrs.entry_ctls_low;
3576 highp = &vmx->nested.msrs.entry_ctls_high;
David Matlack62cc6b9d2016-11-29 18:14:07 -08003577 break;
3578 case MSR_IA32_VMX_PROCBASED_CTLS2:
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003579 lowp = &vmx->nested.msrs.secondary_ctls_low;
3580 highp = &vmx->nested.msrs.secondary_ctls_high;
David Matlack62cc6b9d2016-11-29 18:14:07 -08003581 break;
3582 default:
3583 BUG();
3584 }
3585
3586 supported = vmx_control_msr(*lowp, *highp);
3587
3588 /* Check must-be-1 bits are still 1. */
3589 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
3590 return -EINVAL;
3591
3592 /* Check must-be-0 bits are still 0. */
3593 if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
3594 return -EINVAL;
3595
3596 *lowp = data;
3597 *highp = data >> 32;
3598 return 0;
3599}
3600
3601static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
3602{
3603 const u64 feature_and_reserved_bits =
3604 /* feature */
3605 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
3606 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
3607 /* reserved */
3608 GENMASK_ULL(13, 9) | BIT_ULL(31);
3609 u64 vmx_misc;
3610
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003611 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
3612 vmx->nested.msrs.misc_high);
David Matlack62cc6b9d2016-11-29 18:14:07 -08003613
3614 if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
3615 return -EINVAL;
3616
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003617 if ((vmx->nested.msrs.pinbased_ctls_high &
David Matlack62cc6b9d2016-11-29 18:14:07 -08003618 PIN_BASED_VMX_PREEMPTION_TIMER) &&
3619 vmx_misc_preemption_timer_rate(data) !=
3620 vmx_misc_preemption_timer_rate(vmx_misc))
3621 return -EINVAL;
3622
3623 if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
3624 return -EINVAL;
3625
3626 if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
3627 return -EINVAL;
3628
3629 if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
3630 return -EINVAL;
3631
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003632 vmx->nested.msrs.misc_low = data;
3633 vmx->nested.msrs.misc_high = data >> 32;
Jim Mattsonf4160e42018-05-29 09:11:33 -07003634
3635 /*
3636 * If L1 has read-only VM-exit information fields, use the
3637 * less permissive vmx_vmwrite_bitmap to specify write
3638 * permissions for the shadow VMCS.
3639 */
3640 if (enable_shadow_vmcs && !nested_cpu_has_vmwrite_any_field(&vmx->vcpu))
3641 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
3642
David Matlack62cc6b9d2016-11-29 18:14:07 -08003643 return 0;
3644}
3645
3646static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
3647{
3648 u64 vmx_ept_vpid_cap;
3649
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003650 vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps,
3651 vmx->nested.msrs.vpid_caps);
David Matlack62cc6b9d2016-11-29 18:14:07 -08003652
3653 /* Every bit is either reserved or a feature bit. */
3654 if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
3655 return -EINVAL;
3656
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003657 vmx->nested.msrs.ept_caps = data;
3658 vmx->nested.msrs.vpid_caps = data >> 32;
David Matlack62cc6b9d2016-11-29 18:14:07 -08003659 return 0;
3660}
3661
3662static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
3663{
3664 u64 *msr;
3665
3666 switch (msr_index) {
3667 case MSR_IA32_VMX_CR0_FIXED0:
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003668 msr = &vmx->nested.msrs.cr0_fixed0;
David Matlack62cc6b9d2016-11-29 18:14:07 -08003669 break;
3670 case MSR_IA32_VMX_CR4_FIXED0:
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003671 msr = &vmx->nested.msrs.cr4_fixed0;
David Matlack62cc6b9d2016-11-29 18:14:07 -08003672 break;
3673 default:
3674 BUG();
3675 }
3676
3677 /*
3678 * 1 bits (which indicates bits which "must-be-1" during VMX operation)
3679 * must be 1 in the restored value.
3680 */
3681 if (!is_bitwise_subset(data, *msr, -1ULL))
3682 return -EINVAL;
3683
3684 *msr = data;
3685 return 0;
3686}
3687
3688/*
3689 * Called when userspace is restoring VMX MSRs.
3690 *
3691 * Returns 0 on success, non-0 otherwise.
3692 */
3693static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
3694{
3695 struct vcpu_vmx *vmx = to_vmx(vcpu);
3696
Jim Mattsona943ac52018-05-29 09:11:32 -07003697 /*
3698 * Don't allow changes to the VMX capability MSRs while the vCPU
3699 * is in VMX operation.
3700 */
3701 if (vmx->nested.vmxon)
3702 return -EBUSY;
3703
David Matlack62cc6b9d2016-11-29 18:14:07 -08003704 switch (msr_index) {
3705 case MSR_IA32_VMX_BASIC:
3706 return vmx_restore_vmx_basic(vmx, data);
3707 case MSR_IA32_VMX_PINBASED_CTLS:
3708 case MSR_IA32_VMX_PROCBASED_CTLS:
3709 case MSR_IA32_VMX_EXIT_CTLS:
3710 case MSR_IA32_VMX_ENTRY_CTLS:
3711 /*
3712 * The "non-true" VMX capability MSRs are generated from the
3713 * "true" MSRs, so we do not support restoring them directly.
3714 *
3715 * If userspace wants to emulate VMX_BASIC[55]=0, userspace
3716 * should restore the "true" MSRs with the must-be-1 bits
3717 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
3718 * DEFAULT SETTINGS".
3719 */
3720 return -EINVAL;
3721 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3722 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3723 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3724 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3725 case MSR_IA32_VMX_PROCBASED_CTLS2:
3726 return vmx_restore_control_msr(vmx, msr_index, data);
3727 case MSR_IA32_VMX_MISC:
3728 return vmx_restore_vmx_misc(vmx, data);
3729 case MSR_IA32_VMX_CR0_FIXED0:
3730 case MSR_IA32_VMX_CR4_FIXED0:
3731 return vmx_restore_fixed0_msr(vmx, msr_index, data);
3732 case MSR_IA32_VMX_CR0_FIXED1:
3733 case MSR_IA32_VMX_CR4_FIXED1:
3734 /*
3735 * These MSRs are generated based on the vCPU's CPUID, so we
3736 * do not support restoring them directly.
3737 */
3738 return -EINVAL;
3739 case MSR_IA32_VMX_EPT_VPID_CAP:
3740 return vmx_restore_vmx_ept_vpid_cap(vmx, data);
3741 case MSR_IA32_VMX_VMCS_ENUM:
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003742 vmx->nested.msrs.vmcs_enum = data;
David Matlack62cc6b9d2016-11-29 18:14:07 -08003743 return 0;
3744 default:
3745 /*
3746 * The rest of the VMX capability MSRs do not support restore.
3747 */
3748 return -EINVAL;
3749 }
3750}
3751
Jan Kiszkacae50132014-01-04 18:47:22 +01003752/* Returns 0 on success, non-0 otherwise. */
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003753static int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata)
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003754{
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003755 switch (msr_index) {
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003756 case MSR_IA32_VMX_BASIC:
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003757 *pdata = msrs->basic;
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003758 break;
3759 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3760 case MSR_IA32_VMX_PINBASED_CTLS:
Wincy Vanb9c237b2015-02-03 23:56:30 +08003761 *pdata = vmx_control_msr(
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003762 msrs->pinbased_ctls_low,
3763 msrs->pinbased_ctls_high);
David Matlack0115f9c2016-11-29 18:14:06 -08003764 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
3765 *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003766 break;
3767 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3768 case MSR_IA32_VMX_PROCBASED_CTLS:
Wincy Vanb9c237b2015-02-03 23:56:30 +08003769 *pdata = vmx_control_msr(
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003770 msrs->procbased_ctls_low,
3771 msrs->procbased_ctls_high);
David Matlack0115f9c2016-11-29 18:14:06 -08003772 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
3773 *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003774 break;
3775 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3776 case MSR_IA32_VMX_EXIT_CTLS:
Wincy Vanb9c237b2015-02-03 23:56:30 +08003777 *pdata = vmx_control_msr(
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003778 msrs->exit_ctls_low,
3779 msrs->exit_ctls_high);
David Matlack0115f9c2016-11-29 18:14:06 -08003780 if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
3781 *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003782 break;
3783 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3784 case MSR_IA32_VMX_ENTRY_CTLS:
Wincy Vanb9c237b2015-02-03 23:56:30 +08003785 *pdata = vmx_control_msr(
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003786 msrs->entry_ctls_low,
3787 msrs->entry_ctls_high);
David Matlack0115f9c2016-11-29 18:14:06 -08003788 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
3789 *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003790 break;
3791 case MSR_IA32_VMX_MISC:
Wincy Vanb9c237b2015-02-03 23:56:30 +08003792 *pdata = vmx_control_msr(
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003793 msrs->misc_low,
3794 msrs->misc_high);
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003795 break;
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003796 case MSR_IA32_VMX_CR0_FIXED0:
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003797 *pdata = msrs->cr0_fixed0;
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003798 break;
3799 case MSR_IA32_VMX_CR0_FIXED1:
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003800 *pdata = msrs->cr0_fixed1;
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003801 break;
3802 case MSR_IA32_VMX_CR4_FIXED0:
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003803 *pdata = msrs->cr4_fixed0;
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003804 break;
3805 case MSR_IA32_VMX_CR4_FIXED1:
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003806 *pdata = msrs->cr4_fixed1;
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003807 break;
3808 case MSR_IA32_VMX_VMCS_ENUM:
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003809 *pdata = msrs->vmcs_enum;
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003810 break;
3811 case MSR_IA32_VMX_PROCBASED_CTLS2:
Wincy Vanb9c237b2015-02-03 23:56:30 +08003812 *pdata = vmx_control_msr(
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003813 msrs->secondary_ctls_low,
3814 msrs->secondary_ctls_high);
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003815 break;
3816 case MSR_IA32_VMX_EPT_VPID_CAP:
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003817 *pdata = msrs->ept_caps |
3818 ((u64)msrs->vpid_caps << 32);
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003819 break;
Bandan Das27c42a12017-08-03 15:54:42 -04003820 case MSR_IA32_VMX_VMFUNC:
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003821 *pdata = msrs->vmfunc_controls;
Bandan Das27c42a12017-08-03 15:54:42 -04003822 break;
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003823 default:
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003824 return 1;
Nadav Har'Elb3897a42013-07-08 19:12:35 +08003825 }
3826
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003827 return 0;
3828}
3829
Haozhong Zhang37e4c992016-06-22 14:59:55 +08003830static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu,
3831 uint64_t val)
3832{
3833 uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits;
3834
3835 return !(val & ~valid_bits);
3836}
3837
Tom Lendacky801e4592018-02-21 13:39:51 -06003838static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
3839{
Paolo Bonzini13893092018-02-26 13:40:09 +01003840 switch (msr->index) {
3841 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
3842 if (!nested)
3843 return 1;
3844 return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data);
3845 default:
3846 return 1;
3847 }
3848
3849 return 0;
Tom Lendacky801e4592018-02-21 13:39:51 -06003850}
3851
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03003852/*
Avi Kivity6aa8b732006-12-10 02:21:36 -08003853 * Reads an msr value (of 'msr_index') into 'pdata'.
3854 * Returns 0 on success, non-0 otherwise.
3855 * Assumes vcpu_load() was already called.
3856 */
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003857static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003858{
Borislav Petkova6cb0992017-12-20 12:50:28 +01003859 struct vcpu_vmx *vmx = to_vmx(vcpu);
Avi Kivity26bb0982009-09-07 11:14:12 +03003860 struct shared_msr_entry *msr;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003861
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003862 switch (msr_info->index) {
Avi Kivity05b3e0c2006-12-13 00:33:45 -08003863#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08003864 case MSR_FS_BASE:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003865 msr_info->data = vmcs_readl(GUEST_FS_BASE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003866 break;
3867 case MSR_GS_BASE:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003868 msr_info->data = vmcs_readl(GUEST_GS_BASE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003869 break;
Avi Kivity44ea2b12009-09-06 15:55:37 +03003870 case MSR_KERNEL_GS_BASE:
Borislav Petkova6cb0992017-12-20 12:50:28 +01003871 vmx_load_host_state(vmx);
3872 msr_info->data = vmx->msr_guest_kernel_gs_base;
Avi Kivity44ea2b12009-09-06 15:55:37 +03003873 break;
Avi Kivity26bb0982009-09-07 11:14:12 +03003874#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -08003875 case MSR_EFER:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003876 return kvm_get_msr_common(vcpu, msr_info);
KarimAllah Ahmedd28b3872018-02-01 22:59:45 +01003877 case MSR_IA32_SPEC_CTRL:
3878 if (!msr_info->host_initiated &&
KarimAllah Ahmedd28b3872018-02-01 22:59:45 +01003879 !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
3880 return 1;
3881
3882 msr_info->data = to_vmx(vcpu)->spec_ctrl;
3883 break;
KarimAllah Ahmed28c1c9f2018-02-01 22:59:44 +01003884 case MSR_IA32_ARCH_CAPABILITIES:
3885 if (!msr_info->host_initiated &&
3886 !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
3887 return 1;
3888 msr_info->data = to_vmx(vcpu)->arch_capabilities;
3889 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003890 case MSR_IA32_SYSENTER_CS:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003891 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003892 break;
3893 case MSR_IA32_SYSENTER_EIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003894 msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003895 break;
3896 case MSR_IA32_SYSENTER_ESP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003897 msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003898 break;
Liu, Jinsong0dd376e2014-02-24 10:56:53 +00003899 case MSR_IA32_BNDCFGS:
Haozhong Zhang691bd432017-07-04 10:27:41 +08003900 if (!kvm_mpx_supported() ||
Radim Krčmářd6321d42017-08-05 00:12:49 +02003901 (!msr_info->host_initiated &&
3902 !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
Paolo Bonzini93c4adc2014-03-05 23:19:52 +01003903 return 1;
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003904 msr_info->data = vmcs_read64(GUEST_BNDCFGS);
Liu, Jinsong0dd376e2014-02-24 10:56:53 +00003905 break;
Ashok Rajc45dcc72016-06-22 14:59:56 +08003906 case MSR_IA32_MCG_EXT_CTL:
3907 if (!msr_info->host_initiated &&
Borislav Petkova6cb0992017-12-20 12:50:28 +01003908 !(vmx->msr_ia32_feature_control &
Ashok Rajc45dcc72016-06-22 14:59:56 +08003909 FEATURE_CONTROL_LMCE))
Jan Kiszkacae50132014-01-04 18:47:22 +01003910 return 1;
Ashok Rajc45dcc72016-06-22 14:59:56 +08003911 msr_info->data = vcpu->arch.mcg_ext_ctl;
3912 break;
Jan Kiszkacae50132014-01-04 18:47:22 +01003913 case MSR_IA32_FEATURE_CONTROL:
Borislav Petkova6cb0992017-12-20 12:50:28 +01003914 msr_info->data = vmx->msr_ia32_feature_control;
Jan Kiszkacae50132014-01-04 18:47:22 +01003915 break;
3916 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
3917 if (!nested_vmx_allowed(vcpu))
3918 return 1;
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01003919 return vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
3920 &msr_info->data);
Wanpeng Li20300092014-12-02 19:14:59 +08003921 case MSR_IA32_XSS:
3922 if (!vmx_xsaves_supported())
3923 return 1;
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003924 msr_info->data = vcpu->arch.ia32_xss;
Wanpeng Li20300092014-12-02 19:14:59 +08003925 break;
Sheng Yang4e47c7a2009-12-18 16:48:47 +08003926 case MSR_TSC_AUX:
Radim Krčmářd6321d42017-08-05 00:12:49 +02003927 if (!msr_info->host_initiated &&
3928 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
Sheng Yang4e47c7a2009-12-18 16:48:47 +08003929 return 1;
3930 /* Otherwise falls through */
Avi Kivity6aa8b732006-12-10 02:21:36 -08003931 default:
Borislav Petkova6cb0992017-12-20 12:50:28 +01003932 msr = find_msr_entry(vmx, msr_info->index);
Avi Kivity3bab1f52006-12-29 16:49:48 -08003933 if (msr) {
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003934 msr_info->data = msr->data;
Avi Kivity3bab1f52006-12-29 16:49:48 -08003935 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003936 }
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003937 return kvm_get_msr_common(vcpu, msr_info);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003938 }
3939
Avi Kivity6aa8b732006-12-10 02:21:36 -08003940 return 0;
3941}
3942
Jan Kiszkacae50132014-01-04 18:47:22 +01003943static void vmx_leave_nested(struct kvm_vcpu *vcpu);
3944
Avi Kivity6aa8b732006-12-10 02:21:36 -08003945/*
3946 * Writes msr value into into the appropriate "register".
3947 * Returns 0 on success, non-0 otherwise.
3948 * Assumes vcpu_load() was already called.
3949 */
Will Auld8fe8ab42012-11-29 12:42:12 -08003950static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003951{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003952 struct vcpu_vmx *vmx = to_vmx(vcpu);
Avi Kivity26bb0982009-09-07 11:14:12 +03003953 struct shared_msr_entry *msr;
Eddie Dong2cc51562007-05-21 07:28:09 +03003954 int ret = 0;
Will Auld8fe8ab42012-11-29 12:42:12 -08003955 u32 msr_index = msr_info->index;
3956 u64 data = msr_info->data;
Eddie Dong2cc51562007-05-21 07:28:09 +03003957
Avi Kivity6aa8b732006-12-10 02:21:36 -08003958 switch (msr_index) {
Avi Kivity3bab1f52006-12-29 16:49:48 -08003959 case MSR_EFER:
Will Auld8fe8ab42012-11-29 12:42:12 -08003960 ret = kvm_set_msr_common(vcpu, msr_info);
Eddie Dong2cc51562007-05-21 07:28:09 +03003961 break;
Avi Kivity16175a72009-03-23 22:13:44 +02003962#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08003963 case MSR_FS_BASE:
Avi Kivity2fb92db2011-04-27 19:42:18 +03003964 vmx_segment_cache_clear(vmx);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003965 vmcs_writel(GUEST_FS_BASE, data);
3966 break;
3967 case MSR_GS_BASE:
Avi Kivity2fb92db2011-04-27 19:42:18 +03003968 vmx_segment_cache_clear(vmx);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003969 vmcs_writel(GUEST_GS_BASE, data);
3970 break;
Avi Kivity44ea2b12009-09-06 15:55:37 +03003971 case MSR_KERNEL_GS_BASE:
3972 vmx_load_host_state(vmx);
3973 vmx->msr_guest_kernel_gs_base = data;
3974 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003975#endif
3976 case MSR_IA32_SYSENTER_CS:
3977 vmcs_write32(GUEST_SYSENTER_CS, data);
3978 break;
3979 case MSR_IA32_SYSENTER_EIP:
Avi Kivityf5b42c32007-03-06 12:05:53 +02003980 vmcs_writel(GUEST_SYSENTER_EIP, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003981 break;
3982 case MSR_IA32_SYSENTER_ESP:
Avi Kivityf5b42c32007-03-06 12:05:53 +02003983 vmcs_writel(GUEST_SYSENTER_ESP, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003984 break;
Liu, Jinsong0dd376e2014-02-24 10:56:53 +00003985 case MSR_IA32_BNDCFGS:
Haozhong Zhang691bd432017-07-04 10:27:41 +08003986 if (!kvm_mpx_supported() ||
Radim Krčmářd6321d42017-08-05 00:12:49 +02003987 (!msr_info->host_initiated &&
3988 !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
Paolo Bonzini93c4adc2014-03-05 23:19:52 +01003989 return 1;
Yu Zhangfd8cb432017-08-24 20:27:56 +08003990 if (is_noncanonical_address(data & PAGE_MASK, vcpu) ||
Jim Mattson45316622017-05-23 11:52:54 -07003991 (data & MSR_IA32_BNDCFGS_RSVD))
Avi Kivity6aa8b732006-12-10 02:21:36 -08003992 return 1;
Sheng Yang468d4722008-10-09 16:01:55 +08003993 vmcs_write64(GUEST_BNDCFGS, data);
3994 break;
KarimAllah Ahmedd28b3872018-02-01 22:59:45 +01003995 case MSR_IA32_SPEC_CTRL:
3996 if (!msr_info->host_initiated &&
KarimAllah Ahmedd28b3872018-02-01 22:59:45 +01003997 !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
3998 return 1;
3999
4000 /* The STIBP bit doesn't fault even if it's not advertised */
Konrad Rzeszutek Wilk9f65fb22018-05-09 21:41:38 +02004001 if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
KarimAllah Ahmedd28b3872018-02-01 22:59:45 +01004002 return 1;
4003
4004 vmx->spec_ctrl = data;
4005
4006 if (!data)
4007 break;
4008
4009 /*
4010 * For non-nested:
4011 * When it's written (to non-zero) for the first time, pass
4012 * it through.
4013 *
4014 * For nested:
4015 * The handling of the MSR bitmap for L2 guests is done in
4016 * nested_vmx_merge_msr_bitmap. We should not touch the
4017 * vmcs02.msr_bitmap here since it gets completely overwritten
4018 * in the merging. We update the vmcs01 here for L1 as well
4019 * since it will end up touching the MSR anyway now.
4020 */
4021 vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap,
4022 MSR_IA32_SPEC_CTRL,
4023 MSR_TYPE_RW);
4024 break;
Ashok Raj15d45072018-02-01 22:59:43 +01004025 case MSR_IA32_PRED_CMD:
4026 if (!msr_info->host_initiated &&
Ashok Raj15d45072018-02-01 22:59:43 +01004027 !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
4028 return 1;
4029
4030 if (data & ~PRED_CMD_IBPB)
4031 return 1;
4032
4033 if (!data)
4034 break;
4035
4036 wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
4037
4038 /*
4039 * For non-nested:
4040 * When it's written (to non-zero) for the first time, pass
4041 * it through.
4042 *
4043 * For nested:
4044 * The handling of the MSR bitmap for L2 guests is done in
4045 * nested_vmx_merge_msr_bitmap. We should not touch the
4046 * vmcs02.msr_bitmap here since it gets completely overwritten
4047 * in the merging.
4048 */
4049 vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD,
4050 MSR_TYPE_W);
4051 break;
KarimAllah Ahmed28c1c9f2018-02-01 22:59:44 +01004052 case MSR_IA32_ARCH_CAPABILITIES:
4053 if (!msr_info->host_initiated)
4054 return 1;
4055 vmx->arch_capabilities = data;
4056 break;
Sheng Yang468d4722008-10-09 16:01:55 +08004057 case MSR_IA32_CR_PAT:
4058 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
Nadav Amit45666542014-09-18 22:39:44 +03004059 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
4060 return 1;
Sheng Yang468d4722008-10-09 16:01:55 +08004061 vmcs_write64(GUEST_IA32_PAT, data);
4062 vcpu->arch.pat = data;
4063 break;
4064 }
Will Auld8fe8ab42012-11-29 12:42:12 -08004065 ret = kvm_set_msr_common(vcpu, msr_info);
Sheng Yang4e47c7a2009-12-18 16:48:47 +08004066 break;
Will Auldba904632012-11-29 12:42:50 -08004067 case MSR_IA32_TSC_ADJUST:
4068 ret = kvm_set_msr_common(vcpu, msr_info);
Sheng Yang4e47c7a2009-12-18 16:48:47 +08004069 break;
Ashok Rajc45dcc72016-06-22 14:59:56 +08004070 case MSR_IA32_MCG_EXT_CTL:
4071 if ((!msr_info->host_initiated &&
4072 !(to_vmx(vcpu)->msr_ia32_feature_control &
4073 FEATURE_CONTROL_LMCE)) ||
4074 (data & ~MCG_EXT_CTL_LMCE_EN))
4075 return 1;
4076 vcpu->arch.mcg_ext_ctl = data;
4077 break;
Jan Kiszkacae50132014-01-04 18:47:22 +01004078 case MSR_IA32_FEATURE_CONTROL:
Haozhong Zhang37e4c992016-06-22 14:59:55 +08004079 if (!vmx_feature_control_msr_valid(vcpu, data) ||
Haozhong Zhang3b840802016-06-22 14:59:54 +08004080 (to_vmx(vcpu)->msr_ia32_feature_control &
Jan Kiszkacae50132014-01-04 18:47:22 +01004081 FEATURE_CONTROL_LOCKED && !msr_info->host_initiated))
4082 return 1;
Haozhong Zhang3b840802016-06-22 14:59:54 +08004083 vmx->msr_ia32_feature_control = data;
Jan Kiszkacae50132014-01-04 18:47:22 +01004084 if (msr_info->host_initiated && data == 0)
4085 vmx_leave_nested(vcpu);
4086 break;
4087 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
David Matlack62cc6b9d2016-11-29 18:14:07 -08004088 if (!msr_info->host_initiated)
4089 return 1; /* they are read-only */
4090 if (!nested_vmx_allowed(vcpu))
4091 return 1;
4092 return vmx_set_vmx_msr(vcpu, msr_index, data);
Wanpeng Li20300092014-12-02 19:14:59 +08004093 case MSR_IA32_XSS:
4094 if (!vmx_xsaves_supported())
4095 return 1;
4096 /*
4097 * The only supported bit as of Skylake is bit 8, but
4098 * it is not supported on KVM.
4099 */
4100 if (data != 0)
4101 return 1;
4102 vcpu->arch.ia32_xss = data;
4103 if (vcpu->arch.ia32_xss != host_xss)
4104 add_atomic_switch_msr(vmx, MSR_IA32_XSS,
Konrad Rzeszutek Wilk989e3992018-06-20 22:01:22 -04004105 vcpu->arch.ia32_xss, host_xss, false);
Wanpeng Li20300092014-12-02 19:14:59 +08004106 else
4107 clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
4108 break;
Sheng Yang4e47c7a2009-12-18 16:48:47 +08004109 case MSR_TSC_AUX:
Radim Krčmářd6321d42017-08-05 00:12:49 +02004110 if (!msr_info->host_initiated &&
4111 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
Sheng Yang4e47c7a2009-12-18 16:48:47 +08004112 return 1;
4113 /* Check reserved bit, higher 32 bits should be zero */
4114 if ((data >> 32) != 0)
4115 return 1;
4116 /* Otherwise falls through */
Avi Kivity6aa8b732006-12-10 02:21:36 -08004117 default:
Rusty Russell8b9cf982007-07-30 16:31:43 +10004118 msr = find_msr_entry(vmx, msr_index);
Avi Kivity3bab1f52006-12-29 16:49:48 -08004119 if (msr) {
Andy Honig8b3c3102014-08-27 11:16:44 -07004120 u64 old_msr_data = msr->data;
Avi Kivity3bab1f52006-12-29 16:49:48 -08004121 msr->data = data;
Avi Kivity2225fd52012-04-18 15:03:04 +03004122 if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
4123 preempt_disable();
Andy Honig8b3c3102014-08-27 11:16:44 -07004124 ret = kvm_set_shared_msr(msr->index, msr->data,
4125 msr->mask);
Avi Kivity2225fd52012-04-18 15:03:04 +03004126 preempt_enable();
Andy Honig8b3c3102014-08-27 11:16:44 -07004127 if (ret)
4128 msr->data = old_msr_data;
Avi Kivity2225fd52012-04-18 15:03:04 +03004129 }
Avi Kivity3bab1f52006-12-29 16:49:48 -08004130 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004131 }
Will Auld8fe8ab42012-11-29 12:42:12 -08004132 ret = kvm_set_msr_common(vcpu, msr_info);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004133 }
4134
Eddie Dong2cc51562007-05-21 07:28:09 +03004135 return ret;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004136}
4137
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004138static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004139{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004140 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
4141 switch (reg) {
4142 case VCPU_REGS_RSP:
4143 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
4144 break;
4145 case VCPU_REGS_RIP:
4146 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
4147 break;
Avi Kivity6de4f3a2009-05-31 22:58:47 +03004148 case VCPU_EXREG_PDPTR:
4149 if (enable_ept)
4150 ept_save_pdptrs(vcpu);
4151 break;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004152 default:
4153 break;
4154 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08004155}
4156
Avi Kivity6aa8b732006-12-10 02:21:36 -08004157static __init int cpu_has_kvm_support(void)
4158{
Eduardo Habkost6210e372008-11-17 19:03:16 -02004159 return cpu_has_vmx();
Avi Kivity6aa8b732006-12-10 02:21:36 -08004160}
4161
4162static __init int vmx_disabled_by_bios(void)
4163{
4164 u64 msr;
4165
4166 rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
Shane Wangcafd6652010-04-29 12:09:01 -04004167 if (msr & FEATURE_CONTROL_LOCKED) {
Joseph Cihula23f3e992011-02-08 11:45:56 -08004168 /* launched w/ TXT and VMX disabled */
Shane Wangcafd6652010-04-29 12:09:01 -04004169 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
4170 && tboot_enabled())
4171 return 1;
Joseph Cihula23f3e992011-02-08 11:45:56 -08004172 /* launched w/o TXT and VMX only enabled w/ TXT */
Shane Wangcafd6652010-04-29 12:09:01 -04004173 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
Joseph Cihula23f3e992011-02-08 11:45:56 -08004174 && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
Shane Wangf9335af2010-11-17 11:40:17 +08004175 && !tboot_enabled()) {
4176 printk(KERN_WARNING "kvm: disable TXT in the BIOS or "
Joseph Cihula23f3e992011-02-08 11:45:56 -08004177 "activate TXT before enabling KVM\n");
Shane Wangcafd6652010-04-29 12:09:01 -04004178 return 1;
Shane Wangf9335af2010-11-17 11:40:17 +08004179 }
Joseph Cihula23f3e992011-02-08 11:45:56 -08004180 /* launched w/o TXT and VMX disabled */
4181 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
4182 && !tboot_enabled())
4183 return 1;
Shane Wangcafd6652010-04-29 12:09:01 -04004184 }
4185
4186 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004187}
4188
Dongxiao Xu7725b892010-05-11 18:29:38 +08004189static void kvm_cpu_vmxon(u64 addr)
4190{
David Hildenbrandfe0e80b2017-03-10 12:47:13 +01004191 cr4_set_bits(X86_CR4_VMXE);
Alexander Shishkin1c5ac212016-03-29 17:43:10 +03004192 intel_pt_handle_vmx(1);
4193
Dongxiao Xu7725b892010-05-11 18:29:38 +08004194 asm volatile (ASM_VMX_VMXON_RAX
4195 : : "a"(&addr), "m"(addr)
4196 : "memory", "cc");
4197}
4198
Radim Krčmář13a34e02014-08-28 15:13:03 +02004199static int hardware_enable(void)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004200{
4201 int cpu = raw_smp_processor_id();
4202 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
Shane Wangcafd6652010-04-29 12:09:01 -04004203 u64 old, test_bits;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004204
Andy Lutomirski1e02ce42014-10-24 15:58:08 -07004205 if (cr4_read_shadow() & X86_CR4_VMXE)
Alexander Graf10474ae2009-09-15 11:37:46 +02004206 return -EBUSY;
4207
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +01004208 /*
4209 * This can happen if we hot-added a CPU but failed to allocate
4210 * VP assist page for it.
4211 */
4212 if (static_branch_unlikely(&enable_evmcs) &&
4213 !hv_get_vp_assist_page(cpu))
4214 return -EFAULT;
4215
Nadav Har'Eld462b812011-05-24 15:26:10 +03004216 INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
Feng Wubf9f6ac2015-09-18 22:29:55 +08004217 INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
4218 spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
Zhang Yanfei8f536b72012-12-06 23:43:34 +08004219
4220 /*
4221 * Now we can enable the vmclear operation in kdump
4222 * since the loaded_vmcss_on_cpu list on this cpu
4223 * has been initialized.
4224 *
4225 * Though the cpu is not in VMX operation now, there
4226 * is no problem to enable the vmclear operation
4227 * for the loaded_vmcss_on_cpu list is empty!
4228 */
4229 crash_enable_local_vmclear(cpu);
4230
Avi Kivity6aa8b732006-12-10 02:21:36 -08004231 rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
Shane Wangcafd6652010-04-29 12:09:01 -04004232
4233 test_bits = FEATURE_CONTROL_LOCKED;
4234 test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
4235 if (tboot_enabled())
4236 test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
4237
4238 if ((old & test_bits) != test_bits) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08004239 /* enable and lock */
Shane Wangcafd6652010-04-29 12:09:01 -04004240 wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
4241 }
David Hildenbrandfe0e80b2017-03-10 12:47:13 +01004242 kvm_cpu_vmxon(phys_addr);
David Hildenbrandfdf288b2017-08-24 20:51:29 +02004243 if (enable_ept)
4244 ept_sync_global();
Alexander Graf10474ae2009-09-15 11:37:46 +02004245
4246 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004247}
4248
Nadav Har'Eld462b812011-05-24 15:26:10 +03004249static void vmclear_local_loaded_vmcss(void)
Avi Kivity543e4242008-05-13 16:22:47 +03004250{
4251 int cpu = raw_smp_processor_id();
Nadav Har'Eld462b812011-05-24 15:26:10 +03004252 struct loaded_vmcs *v, *n;
Avi Kivity543e4242008-05-13 16:22:47 +03004253
Nadav Har'Eld462b812011-05-24 15:26:10 +03004254 list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
4255 loaded_vmcss_on_cpu_link)
4256 __loaded_vmcs_clear(v);
Avi Kivity543e4242008-05-13 16:22:47 +03004257}
4258
Eduardo Habkost710ff4a2008-11-17 19:03:18 -02004259
4260/* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot()
4261 * tricks.
4262 */
4263static void kvm_cpu_vmxoff(void)
4264{
4265 asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
Alexander Shishkin1c5ac212016-03-29 17:43:10 +03004266
4267 intel_pt_handle_vmx(0);
David Hildenbrandfe0e80b2017-03-10 12:47:13 +01004268 cr4_clear_bits(X86_CR4_VMXE);
Eduardo Habkost710ff4a2008-11-17 19:03:18 -02004269}
4270
Radim Krčmář13a34e02014-08-28 15:13:03 +02004271static void hardware_disable(void)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004272{
David Hildenbrandfe0e80b2017-03-10 12:47:13 +01004273 vmclear_local_loaded_vmcss();
4274 kvm_cpu_vmxoff();
Avi Kivity6aa8b732006-12-10 02:21:36 -08004275}
4276
Yang, Sheng1c3d14fe2007-07-29 11:07:42 +03004277static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
Mike Dayd77c26f2007-10-08 09:02:08 -04004278 u32 msr, u32 *result)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004279{
4280 u32 vmx_msr_low, vmx_msr_high;
Yang, Sheng1c3d14fe2007-07-29 11:07:42 +03004281 u32 ctl = ctl_min | ctl_opt;
4282
4283 rdmsr(msr, vmx_msr_low, vmx_msr_high);
4284
4285 ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
4286 ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */
4287
4288 /* Ensure minimum (required) set of control bits are supported. */
4289 if (ctl_min & ~ctl)
Yang, Sheng002c7f72007-07-31 14:23:01 +03004290 return -EIO;
Yang, Sheng1c3d14fe2007-07-29 11:07:42 +03004291
4292 *result = ctl;
4293 return 0;
4294}
4295
Avi Kivity110312c2010-12-21 12:54:20 +02004296static __init bool allow_1_setting(u32 msr, u32 ctl)
4297{
4298 u32 vmx_msr_low, vmx_msr_high;
4299
4300 rdmsr(msr, vmx_msr_low, vmx_msr_high);
4301 return vmx_msr_high & ctl;
4302}
4303
Yang, Sheng002c7f72007-07-31 14:23:01 +03004304static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
Yang, Sheng1c3d14fe2007-07-29 11:07:42 +03004305{
4306 u32 vmx_msr_low, vmx_msr_high;
Sheng Yangd56f5462008-04-25 10:13:16 +08004307 u32 min, opt, min2, opt2;
Yang, Sheng1c3d14fe2007-07-29 11:07:42 +03004308 u32 _pin_based_exec_control = 0;
4309 u32 _cpu_based_exec_control = 0;
Sheng Yangf78e0e22007-10-29 09:40:42 +08004310 u32 _cpu_based_2nd_exec_control = 0;
Yang, Sheng1c3d14fe2007-07-29 11:07:42 +03004311 u32 _vmexit_control = 0;
4312 u32 _vmentry_control = 0;
4313
Paolo Bonzini13893092018-02-26 13:40:09 +01004314 memset(vmcs_conf, 0, sizeof(*vmcs_conf));
Raghavendra K T10166742012-02-07 23:19:20 +05304315 min = CPU_BASED_HLT_EXITING |
Yang, Sheng1c3d14fe2007-07-29 11:07:42 +03004316#ifdef CONFIG_X86_64
4317 CPU_BASED_CR8_LOAD_EXITING |
4318 CPU_BASED_CR8_STORE_EXITING |
4319#endif
Sheng Yangd56f5462008-04-25 10:13:16 +08004320 CPU_BASED_CR3_LOAD_EXITING |
4321 CPU_BASED_CR3_STORE_EXITING |
Quan Xu8eb73e22017-12-12 16:44:21 +08004322 CPU_BASED_UNCOND_IO_EXITING |
Yang, Sheng1c3d14fe2007-07-29 11:07:42 +03004323 CPU_BASED_MOV_DR_EXITING |
Marcelo Tosattia7052892008-09-23 13:18:35 -03004324 CPU_BASED_USE_TSC_OFFSETING |
Wanpeng Li4d5422c2018-03-12 04:53:02 -07004325 CPU_BASED_MWAIT_EXITING |
4326 CPU_BASED_MONITOR_EXITING |
Avi Kivityfee84b02011-11-10 14:57:25 +02004327 CPU_BASED_INVLPG_EXITING |
4328 CPU_BASED_RDPMC_EXITING;
Anthony Liguori443381a2010-12-06 10:53:38 -06004329
Sheng Yangf78e0e22007-10-29 09:40:42 +08004330 opt = CPU_BASED_TPR_SHADOW |
Sheng Yang25c5f222008-03-28 13:18:56 +08004331 CPU_BASED_USE_MSR_BITMAPS |
Sheng Yangf78e0e22007-10-29 09:40:42 +08004332 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
Yang, Sheng1c3d14fe2007-07-29 11:07:42 +03004333 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
4334 &_cpu_based_exec_control) < 0)
Yang, Sheng002c7f72007-07-31 14:23:01 +03004335 return -EIO;
Yang, Sheng6e5d8652007-09-12 18:03:11 +08004336#ifdef CONFIG_X86_64
4337 if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
4338 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
4339 ~CPU_BASED_CR8_STORE_EXITING;
4340#endif
Sheng Yangf78e0e22007-10-29 09:40:42 +08004341 if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
Sheng Yangd56f5462008-04-25 10:13:16 +08004342 min2 = 0;
4343 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
Yang Zhang8d146952013-01-25 10:18:50 +08004344 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
Sheng Yang2384d2b2008-01-17 15:14:33 +08004345 SECONDARY_EXEC_WBINVD_EXITING |
Sheng Yangd56f5462008-04-25 10:13:16 +08004346 SECONDARY_EXEC_ENABLE_VPID |
Nitin A Kamble3a624e22009-06-08 11:34:16 -07004347 SECONDARY_EXEC_ENABLE_EPT |
Zhai, Edwin4b8d54f2009-10-09 18:03:20 +08004348 SECONDARY_EXEC_UNRESTRICTED_GUEST |
Sheng Yang4e47c7a2009-12-18 16:48:47 +08004349 SECONDARY_EXEC_PAUSE_LOOP_EXITING |
Paolo Bonzini0367f202016-07-12 10:44:55 +02004350 SECONDARY_EXEC_DESC |
Mao, Junjiead756a12012-07-02 01:18:48 +00004351 SECONDARY_EXEC_RDTSCP |
Yang Zhang83d4c282013-01-25 10:18:49 +08004352 SECONDARY_EXEC_ENABLE_INVPCID |
Yang Zhangc7c9c562013-01-25 10:18:51 +08004353 SECONDARY_EXEC_APIC_REGISTER_VIRT |
Abel Gordonabc4fc52013-04-18 14:35:25 +03004354 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
Wanpeng Li20300092014-12-02 19:14:59 +08004355 SECONDARY_EXEC_SHADOW_VMCS |
Kai Huang843e4332015-01-28 10:54:28 +08004356 SECONDARY_EXEC_XSAVES |
David Hildenbrand736fdf72017-08-24 20:51:37 +02004357 SECONDARY_EXEC_RDSEED_EXITING |
4358 SECONDARY_EXEC_RDRAND_EXITING |
Xiao Guangrong8b3e34e2015-09-09 14:05:51 +08004359 SECONDARY_EXEC_ENABLE_PML |
Bandan Das2a499e42017-08-03 15:54:41 -04004360 SECONDARY_EXEC_TSC_SCALING |
4361 SECONDARY_EXEC_ENABLE_VMFUNC;
Sheng Yangd56f5462008-04-25 10:13:16 +08004362 if (adjust_vmx_controls(min2, opt2,
4363 MSR_IA32_VMX_PROCBASED_CTLS2,
Sheng Yangf78e0e22007-10-29 09:40:42 +08004364 &_cpu_based_2nd_exec_control) < 0)
4365 return -EIO;
4366 }
4367#ifndef CONFIG_X86_64
4368 if (!(_cpu_based_2nd_exec_control &
4369 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
4370 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
4371#endif
Yang Zhang83d4c282013-01-25 10:18:49 +08004372
4373 if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
4374 _cpu_based_2nd_exec_control &= ~(
Yang Zhang8d146952013-01-25 10:18:50 +08004375 SECONDARY_EXEC_APIC_REGISTER_VIRT |
Yang Zhangc7c9c562013-01-25 10:18:51 +08004376 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
4377 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
Yang Zhang83d4c282013-01-25 10:18:49 +08004378
Wanpeng Li61f1dd92017-10-18 16:02:19 -07004379 rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP,
4380 &vmx_capability.ept, &vmx_capability.vpid);
4381
Sheng Yangd56f5462008-04-25 10:13:16 +08004382 if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
Marcelo Tosattia7052892008-09-23 13:18:35 -03004383 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
4384 enabled */
Gleb Natapov5fff7d22009-08-27 18:41:30 +03004385 _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
4386 CPU_BASED_CR3_STORE_EXITING |
4387 CPU_BASED_INVLPG_EXITING);
Wanpeng Li61f1dd92017-10-18 16:02:19 -07004388 } else if (vmx_capability.ept) {
4389 vmx_capability.ept = 0;
4390 pr_warn_once("EPT CAP should not exist if not support "
4391 "1-setting enable EPT VM-execution control\n");
4392 }
4393 if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_VPID) &&
4394 vmx_capability.vpid) {
4395 vmx_capability.vpid = 0;
4396 pr_warn_once("VPID CAP should not exist if not support "
4397 "1-setting enable VPID VM-execution control\n");
Sheng Yangd56f5462008-04-25 10:13:16 +08004398 }
Yang, Sheng1c3d14fe2007-07-29 11:07:42 +03004399
Paolo Bonzini91fa0f82016-06-15 20:55:08 +02004400 min = VM_EXIT_SAVE_DEBUG_CONTROLS | VM_EXIT_ACK_INTR_ON_EXIT;
Yang, Sheng1c3d14fe2007-07-29 11:07:42 +03004401#ifdef CONFIG_X86_64
4402 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
4403#endif
Yang Zhanga547c6d2013-04-11 19:25:10 +08004404 opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT |
Paolo Bonzini91fa0f82016-06-15 20:55:08 +02004405 VM_EXIT_CLEAR_BNDCFGS;
Yang, Sheng1c3d14fe2007-07-29 11:07:42 +03004406 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
4407 &_vmexit_control) < 0)
Yang, Sheng002c7f72007-07-31 14:23:01 +03004408 return -EIO;
Yang, Sheng1c3d14fe2007-07-29 11:07:42 +03004409
Paolo Bonzini8a1b4392017-11-06 13:31:12 +01004410 min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
4411 opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR |
4412 PIN_BASED_VMX_PREEMPTION_TIMER;
Yang Zhang01e439b2013-04-11 19:25:12 +08004413 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
4414 &_pin_based_exec_control) < 0)
4415 return -EIO;
4416
Paolo Bonzini1c17c3e2016-07-08 11:53:38 +02004417 if (cpu_has_broken_vmx_preemption_timer())
4418 _pin_based_exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
Yang Zhang01e439b2013-04-11 19:25:12 +08004419 if (!(_cpu_based_2nd_exec_control &
Paolo Bonzini91fa0f82016-06-15 20:55:08 +02004420 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY))
Yang Zhang01e439b2013-04-11 19:25:12 +08004421 _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR;
4422
Paolo Bonzinic845f9c2014-02-21 10:55:44 +01004423 min = VM_ENTRY_LOAD_DEBUG_CONTROLS;
Liu, Jinsongda8999d2014-02-24 10:55:46 +00004424 opt = VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS;
Yang, Sheng1c3d14fe2007-07-29 11:07:42 +03004425 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
4426 &_vmentry_control) < 0)
Yang, Sheng002c7f72007-07-31 14:23:01 +03004427 return -EIO;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004428
Nguyen Anh Quynhc68876f2006-12-29 16:49:54 -08004429 rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
Yang, Sheng1c3d14fe2007-07-29 11:07:42 +03004430
4431 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
4432 if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
Yang, Sheng002c7f72007-07-31 14:23:01 +03004433 return -EIO;
Yang, Sheng1c3d14fe2007-07-29 11:07:42 +03004434
4435#ifdef CONFIG_X86_64
4436 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
4437 if (vmx_msr_high & (1u<<16))
Yang, Sheng002c7f72007-07-31 14:23:01 +03004438 return -EIO;
Yang, Sheng1c3d14fe2007-07-29 11:07:42 +03004439#endif
4440
4441 /* Require Write-Back (WB) memory type for VMCS accesses. */
4442 if (((vmx_msr_high >> 18) & 15) != 6)
Yang, Sheng002c7f72007-07-31 14:23:01 +03004443 return -EIO;
Yang, Sheng1c3d14fe2007-07-29 11:07:42 +03004444
Yang, Sheng002c7f72007-07-31 14:23:01 +03004445 vmcs_conf->size = vmx_msr_high & 0x1fff;
Paolo Bonzini16cb0252016-09-05 15:57:00 +02004446 vmcs_conf->order = get_order(vmcs_conf->size);
Jan Dakinevich9ac7e3e2016-09-04 21:23:15 +03004447 vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff;
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +01004448
4449 /* KVM supports Enlightened VMCS v1 only */
4450 if (static_branch_unlikely(&enable_evmcs))
4451 vmcs_conf->revision_id = KVM_EVMCS_VERSION;
4452 else
4453 vmcs_conf->revision_id = vmx_msr_low;
Yang, Sheng1c3d14fe2007-07-29 11:07:42 +03004454
Yang, Sheng002c7f72007-07-31 14:23:01 +03004455 vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
4456 vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
Sheng Yangf78e0e22007-10-29 09:40:42 +08004457 vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
Yang, Sheng002c7f72007-07-31 14:23:01 +03004458 vmcs_conf->vmexit_ctrl = _vmexit_control;
4459 vmcs_conf->vmentry_ctrl = _vmentry_control;
Yang, Sheng1c3d14fe2007-07-29 11:07:42 +03004460
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +01004461 if (static_branch_unlikely(&enable_evmcs))
4462 evmcs_sanitize_exec_ctrls(vmcs_conf);
4463
Avi Kivity110312c2010-12-21 12:54:20 +02004464 cpu_has_load_ia32_efer =
4465 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
4466 VM_ENTRY_LOAD_IA32_EFER)
4467 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
4468 VM_EXIT_LOAD_IA32_EFER);
4469
Gleb Natapov8bf00a52011-10-05 14:01:22 +02004470 cpu_has_load_perf_global_ctrl =
4471 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
4472 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
4473 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
4474 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
4475
4476 /*
4477 * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL
Andrea Gelminibb3541f2016-05-21 14:14:44 +02004478 * but due to errata below it can't be used. Workaround is to use
Gleb Natapov8bf00a52011-10-05 14:01:22 +02004479 * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL.
4480 *
4481 * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32]
4482 *
4483 * AAK155 (model 26)
4484 * AAP115 (model 30)
4485 * AAT100 (model 37)
4486 * BC86,AAY89,BD102 (model 44)
4487 * BA97 (model 46)
4488 *
4489 */
4490 if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) {
4491 switch (boot_cpu_data.x86_model) {
4492 case 26:
4493 case 30:
4494 case 37:
4495 case 44:
4496 case 46:
4497 cpu_has_load_perf_global_ctrl = false;
4498 printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
4499 "does not work properly. Using workaround\n");
4500 break;
4501 default:
4502 break;
4503 }
4504 }
4505
Borislav Petkov782511b2016-04-04 22:25:03 +02004506 if (boot_cpu_has(X86_FEATURE_XSAVES))
Wanpeng Li20300092014-12-02 19:14:59 +08004507 rdmsrl(MSR_IA32_XSS, host_xss);
4508
Yang, Sheng1c3d14fe2007-07-29 11:07:42 +03004509 return 0;
Nguyen Anh Quynhc68876f2006-12-29 16:49:54 -08004510}
Avi Kivity6aa8b732006-12-10 02:21:36 -08004511
4512static struct vmcs *alloc_vmcs_cpu(int cpu)
4513{
4514 int node = cpu_to_node(cpu);
4515 struct page *pages;
4516 struct vmcs *vmcs;
4517
Vlastimil Babka96db8002015-09-08 15:03:50 -07004518 pages = __alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004519 if (!pages)
4520 return NULL;
4521 vmcs = page_address(pages);
Yang, Sheng1c3d14fe2007-07-29 11:07:42 +03004522 memset(vmcs, 0, vmcs_config.size);
4523 vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
Avi Kivity6aa8b732006-12-10 02:21:36 -08004524 return vmcs;
4525}
4526
Avi Kivity6aa8b732006-12-10 02:21:36 -08004527static void free_vmcs(struct vmcs *vmcs)
4528{
Yang, Sheng1c3d14fe2007-07-29 11:07:42 +03004529 free_pages((unsigned long)vmcs, vmcs_config.order);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004530}
4531
Nadav Har'Eld462b812011-05-24 15:26:10 +03004532/*
4533 * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded
4534 */
4535static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
4536{
4537 if (!loaded_vmcs->vmcs)
4538 return;
4539 loaded_vmcs_clear(loaded_vmcs);
4540 free_vmcs(loaded_vmcs->vmcs);
4541 loaded_vmcs->vmcs = NULL;
Paolo Bonzini904e14f2018-01-16 16:51:18 +01004542 if (loaded_vmcs->msr_bitmap)
4543 free_page((unsigned long)loaded_vmcs->msr_bitmap);
Jim Mattson355f4fb2016-10-28 08:29:39 -07004544 WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
Nadav Har'Eld462b812011-05-24 15:26:10 +03004545}
4546
Paolo Bonzinif21f1652018-01-11 12:16:15 +01004547static struct vmcs *alloc_vmcs(void)
4548{
4549 return alloc_vmcs_cpu(raw_smp_processor_id());
4550}
4551
4552static int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
4553{
4554 loaded_vmcs->vmcs = alloc_vmcs();
4555 if (!loaded_vmcs->vmcs)
4556 return -ENOMEM;
4557
4558 loaded_vmcs->shadow_vmcs = NULL;
4559 loaded_vmcs_init(loaded_vmcs);
Paolo Bonzini904e14f2018-01-16 16:51:18 +01004560
4561 if (cpu_has_vmx_msr_bitmap()) {
4562 loaded_vmcs->msr_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
4563 if (!loaded_vmcs->msr_bitmap)
4564 goto out_vmcs;
4565 memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
Vitaly Kuznetsovceef7d12018-04-16 12:50:33 +02004566
Arnd Bergmann1f008e12018-05-25 17:36:17 +02004567 if (IS_ENABLED(CONFIG_HYPERV) &&
4568 static_branch_unlikely(&enable_evmcs) &&
Vitaly Kuznetsovceef7d12018-04-16 12:50:33 +02004569 (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
4570 struct hv_enlightened_vmcs *evmcs =
4571 (struct hv_enlightened_vmcs *)loaded_vmcs->vmcs;
4572
4573 evmcs->hv_enlightenments_control.msr_bitmap = 1;
4574 }
Paolo Bonzini904e14f2018-01-16 16:51:18 +01004575 }
Paolo Bonzinif21f1652018-01-11 12:16:15 +01004576 return 0;
Paolo Bonzini904e14f2018-01-16 16:51:18 +01004577
4578out_vmcs:
4579 free_loaded_vmcs(loaded_vmcs);
4580 return -ENOMEM;
Paolo Bonzinif21f1652018-01-11 12:16:15 +01004581}
4582
Sam Ravnborg39959582007-06-01 00:47:13 -07004583static void free_kvm_area(void)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004584{
4585 int cpu;
4586
Zachary Amsden3230bb42009-09-29 11:38:37 -10004587 for_each_possible_cpu(cpu) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08004588 free_vmcs(per_cpu(vmxarea, cpu));
Zachary Amsden3230bb42009-09-29 11:38:37 -10004589 per_cpu(vmxarea, cpu) = NULL;
4590 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08004591}
4592
Jim Mattsond37f4262017-12-22 12:12:16 -08004593enum vmcs_field_width {
4594 VMCS_FIELD_WIDTH_U16 = 0,
4595 VMCS_FIELD_WIDTH_U64 = 1,
4596 VMCS_FIELD_WIDTH_U32 = 2,
4597 VMCS_FIELD_WIDTH_NATURAL_WIDTH = 3
Jim Mattson85fd5142017-07-07 12:51:41 -07004598};
4599
Jim Mattsond37f4262017-12-22 12:12:16 -08004600static inline int vmcs_field_width(unsigned long field)
Jim Mattson85fd5142017-07-07 12:51:41 -07004601{
4602 if (0x1 & field) /* the *_HIGH fields are all 32 bit */
Jim Mattsond37f4262017-12-22 12:12:16 -08004603 return VMCS_FIELD_WIDTH_U32;
Jim Mattson85fd5142017-07-07 12:51:41 -07004604 return (field >> 13) & 0x3 ;
4605}
4606
4607static inline int vmcs_field_readonly(unsigned long field)
4608{
4609 return (((field >> 10) & 0x3) == 1);
4610}
4611
Bandan Dasfe2b2012014-04-21 15:20:14 -04004612static void init_vmcs_shadow_fields(void)
4613{
4614 int i, j;
4615
Paolo Bonzini44900ba2017-12-13 12:58:02 +01004616 for (i = j = 0; i < max_shadow_read_only_fields; i++) {
4617 u16 field = shadow_read_only_fields[i];
Jim Mattsond37f4262017-12-22 12:12:16 -08004618 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
Paolo Bonzini44900ba2017-12-13 12:58:02 +01004619 (i + 1 == max_shadow_read_only_fields ||
4620 shadow_read_only_fields[i + 1] != field + 1))
4621 pr_err("Missing field from shadow_read_only_field %x\n",
4622 field + 1);
4623
4624 clear_bit(field, vmx_vmread_bitmap);
4625#ifdef CONFIG_X86_64
4626 if (field & 1)
4627 continue;
4628#endif
4629 if (j < i)
4630 shadow_read_only_fields[j] = field;
4631 j++;
4632 }
4633 max_shadow_read_only_fields = j;
Bandan Dasfe2b2012014-04-21 15:20:14 -04004634
4635 for (i = j = 0; i < max_shadow_read_write_fields; i++) {
Paolo Bonzini44900ba2017-12-13 12:58:02 +01004636 u16 field = shadow_read_write_fields[i];
Jim Mattsond37f4262017-12-22 12:12:16 -08004637 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
Paolo Bonzini44900ba2017-12-13 12:58:02 +01004638 (i + 1 == max_shadow_read_write_fields ||
4639 shadow_read_write_fields[i + 1] != field + 1))
4640 pr_err("Missing field from shadow_read_write_field %x\n",
4641 field + 1);
4642
Paolo Bonzinic5d167b2017-12-13 11:05:19 +01004643 /*
4644 * PML and the preemption timer can be emulated, but the
4645 * processor cannot vmwrite to fields that don't exist
4646 * on bare metal.
4647 */
Paolo Bonzini44900ba2017-12-13 12:58:02 +01004648 switch (field) {
Paolo Bonzinic5d167b2017-12-13 11:05:19 +01004649 case GUEST_PML_INDEX:
4650 if (!cpu_has_vmx_pml())
4651 continue;
4652 break;
4653 case VMX_PREEMPTION_TIMER_VALUE:
4654 if (!cpu_has_vmx_preemption_timer())
4655 continue;
4656 break;
4657 case GUEST_INTR_STATUS:
4658 if (!cpu_has_vmx_apicv())
Bandan Dasfe2b2012014-04-21 15:20:14 -04004659 continue;
4660 break;
4661 default:
4662 break;
4663 }
4664
Paolo Bonzini44900ba2017-12-13 12:58:02 +01004665 clear_bit(field, vmx_vmwrite_bitmap);
4666 clear_bit(field, vmx_vmread_bitmap);
4667#ifdef CONFIG_X86_64
4668 if (field & 1)
4669 continue;
4670#endif
Bandan Dasfe2b2012014-04-21 15:20:14 -04004671 if (j < i)
Paolo Bonzini44900ba2017-12-13 12:58:02 +01004672 shadow_read_write_fields[j] = field;
Bandan Dasfe2b2012014-04-21 15:20:14 -04004673 j++;
4674 }
4675 max_shadow_read_write_fields = j;
Bandan Dasfe2b2012014-04-21 15:20:14 -04004676}
4677
Avi Kivity6aa8b732006-12-10 02:21:36 -08004678static __init int alloc_kvm_area(void)
4679{
4680 int cpu;
4681
Zachary Amsden3230bb42009-09-29 11:38:37 -10004682 for_each_possible_cpu(cpu) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08004683 struct vmcs *vmcs;
4684
4685 vmcs = alloc_vmcs_cpu(cpu);
4686 if (!vmcs) {
4687 free_kvm_area();
4688 return -ENOMEM;
4689 }
4690
4691 per_cpu(vmxarea, cpu) = vmcs;
4692 }
4693 return 0;
4694}
4695
Gleb Natapov91b0aa22013-01-21 15:36:47 +02004696static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
Gleb Natapovd99e4152012-12-20 16:57:45 +02004697 struct kvm_segment *save)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004698{
Gleb Natapovd99e4152012-12-20 16:57:45 +02004699 if (!emulate_invalid_guest_state) {
4700 /*
4701 * CS and SS RPL should be equal during guest entry according
4702 * to VMX spec, but in reality it is not always so. Since vcpu
4703 * is in the middle of the transition from real mode to
4704 * protected mode it is safe to assume that RPL 0 is a good
4705 * default value.
4706 */
4707 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
Nadav Amitb32a9912015-03-29 16:33:04 +03004708 save->selector &= ~SEGMENT_RPL_MASK;
4709 save->dpl = save->selector & SEGMENT_RPL_MASK;
Gleb Natapovd99e4152012-12-20 16:57:45 +02004710 save->s = 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004711 }
Gleb Natapovd99e4152012-12-20 16:57:45 +02004712 vmx_set_segment(vcpu, save, seg);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004713}
4714
4715static void enter_pmode(struct kvm_vcpu *vcpu)
4716{
4717 unsigned long flags;
Mohammed Gamala89a8fb2008-08-17 16:42:16 +03004718 struct vcpu_vmx *vmx = to_vmx(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004719
Gleb Natapovd99e4152012-12-20 16:57:45 +02004720 /*
4721 * Update real mode segment cache. It may be not up-to-date if sement
4722 * register was written while vcpu was in a guest mode.
4723 */
4724 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
4725 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
4726 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
4727 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
4728 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
4729 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
4730
Avi Kivity7ffd92c2009-06-09 14:10:45 +03004731 vmx->rmode.vm86_active = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004732
Avi Kivity2fb92db2011-04-27 19:42:18 +03004733 vmx_segment_cache_clear(vmx);
4734
Avi Kivityf5f7b2f2012-08-21 17:07:00 +03004735 vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004736
4737 flags = vmcs_readl(GUEST_RFLAGS);
Avi Kivity78ac8b42010-04-08 18:19:35 +03004738 flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
4739 flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004740 vmcs_writel(GUEST_RFLAGS, flags);
4741
Rusty Russell66aee912007-07-17 23:34:16 +10004742 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
4743 (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
Avi Kivity6aa8b732006-12-10 02:21:36 -08004744
4745 update_exception_bitmap(vcpu);
4746
Gleb Natapov91b0aa22013-01-21 15:36:47 +02004747 fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
4748 fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
4749 fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
4750 fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
4751 fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
4752 fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004753}
4754
Avi Kivityf5f7b2f2012-08-21 17:07:00 +03004755static void fix_rmode_seg(int seg, struct kvm_segment *save)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004756{
Mathias Krause772e0312012-08-30 01:30:19 +02004757 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
Gleb Natapovd99e4152012-12-20 16:57:45 +02004758 struct kvm_segment var = *save;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004759
Gleb Natapovd99e4152012-12-20 16:57:45 +02004760 var.dpl = 0x3;
4761 if (seg == VCPU_SREG_CS)
4762 var.type = 0x3;
4763
4764 if (!emulate_invalid_guest_state) {
4765 var.selector = var.base >> 4;
4766 var.base = var.base & 0xffff0;
4767 var.limit = 0xffff;
4768 var.g = 0;
4769 var.db = 0;
4770 var.present = 1;
4771 var.s = 1;
4772 var.l = 0;
4773 var.unusable = 0;
4774 var.type = 0x3;
4775 var.avl = 0;
4776 if (save->base & 0xf)
4777 printk_once(KERN_WARNING "kvm: segment base is not "
4778 "paragraph aligned when entering "
4779 "protected mode (seg=%d)", seg);
4780 }
4781
4782 vmcs_write16(sf->selector, var.selector);
Chao Peng96794e42017-02-21 03:50:01 -05004783 vmcs_writel(sf->base, var.base);
Gleb Natapovd99e4152012-12-20 16:57:45 +02004784 vmcs_write32(sf->limit, var.limit);
4785 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
Avi Kivity6aa8b732006-12-10 02:21:36 -08004786}
4787
4788static void enter_rmode(struct kvm_vcpu *vcpu)
4789{
4790 unsigned long flags;
Mohammed Gamala89a8fb2008-08-17 16:42:16 +03004791 struct vcpu_vmx *vmx = to_vmx(vcpu);
Sean Christopherson40bbb9d2018-03-20 12:17:20 -07004792 struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004793
Avi Kivityf5f7b2f2012-08-21 17:07:00 +03004794 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
4795 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
4796 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
4797 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
4798 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
Gleb Natapovc6ad11532012-12-12 19:10:51 +02004799 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
4800 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
Avi Kivityf5f7b2f2012-08-21 17:07:00 +03004801
Avi Kivity7ffd92c2009-06-09 14:10:45 +03004802 vmx->rmode.vm86_active = 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004803
Gleb Natapov776e58e2011-03-13 12:34:27 +02004804 /*
4805 * Very old userspace does not call KVM_SET_TSS_ADDR before entering
Jan Kiszka4918c6c2013-03-15 08:38:56 +01004806 * vcpu. Warn the user that an update is overdue.
Gleb Natapov776e58e2011-03-13 12:34:27 +02004807 */
Sean Christopherson40bbb9d2018-03-20 12:17:20 -07004808 if (!kvm_vmx->tss_addr)
Gleb Natapov776e58e2011-03-13 12:34:27 +02004809 printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be "
4810 "called before entering vcpu\n");
Gleb Natapov776e58e2011-03-13 12:34:27 +02004811
Avi Kivity2fb92db2011-04-27 19:42:18 +03004812 vmx_segment_cache_clear(vmx);
4813
Sean Christopherson40bbb9d2018-03-20 12:17:20 -07004814 vmcs_writel(GUEST_TR_BASE, kvm_vmx->tss_addr);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004815 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004816 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
4817
4818 flags = vmcs_readl(GUEST_RFLAGS);
Avi Kivity78ac8b42010-04-08 18:19:35 +03004819 vmx->rmode.save_rflags = flags;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004820
Glauber de Oliveira Costa053de042008-01-30 13:31:27 +01004821 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004822
4823 vmcs_writel(GUEST_RFLAGS, flags);
Rusty Russell66aee912007-07-17 23:34:16 +10004824 vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004825 update_exception_bitmap(vcpu);
4826
Gleb Natapovd99e4152012-12-20 16:57:45 +02004827 fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
4828 fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
4829 fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
4830 fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
4831 fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
4832 fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
Mohammed Gamala89a8fb2008-08-17 16:42:16 +03004833
Eddie Dong8668a3c2007-10-10 14:26:45 +08004834 kvm_mmu_reset_context(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004835}
4836
Amit Shah401d10d2009-02-20 22:53:37 +05304837static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
4838{
4839 struct vcpu_vmx *vmx = to_vmx(vcpu);
Avi Kivity26bb0982009-09-07 11:14:12 +03004840 struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
4841
4842 if (!msr)
4843 return;
Amit Shah401d10d2009-02-20 22:53:37 +05304844
Avi Kivity44ea2b12009-09-06 15:55:37 +03004845 /*
4846 * Force kernel_gs_base reloading before EFER changes, as control
4847 * of this msr depends on is_long_mode().
4848 */
4849 vmx_load_host_state(to_vmx(vcpu));
Avi Kivityf6801df2010-01-21 15:31:50 +02004850 vcpu->arch.efer = efer;
Amit Shah401d10d2009-02-20 22:53:37 +05304851 if (efer & EFER_LMA) {
Gleb Natapov2961e8762013-11-25 15:37:13 +02004852 vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
Amit Shah401d10d2009-02-20 22:53:37 +05304853 msr->data = efer;
4854 } else {
Gleb Natapov2961e8762013-11-25 15:37:13 +02004855 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
Amit Shah401d10d2009-02-20 22:53:37 +05304856
4857 msr->data = efer & ~EFER_LME;
4858 }
4859 setup_msrs(vmx);
4860}
4861
Avi Kivity05b3e0c2006-12-13 00:33:45 -08004862#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08004863
4864static void enter_lmode(struct kvm_vcpu *vcpu)
4865{
4866 u32 guest_tr_ar;
4867
Avi Kivity2fb92db2011-04-27 19:42:18 +03004868 vmx_segment_cache_clear(to_vmx(vcpu));
4869
Avi Kivity6aa8b732006-12-10 02:21:36 -08004870 guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
Andy Lutomirski4d283ec2015-08-13 13:18:48 -07004871 if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) {
Jan Kiszkabd801582011-09-12 11:26:22 +02004872 pr_debug_ratelimited("%s: tss fixup for long mode. \n",
4873 __func__);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004874 vmcs_write32(GUEST_TR_AR_BYTES,
Andy Lutomirski4d283ec2015-08-13 13:18:48 -07004875 (guest_tr_ar & ~VMX_AR_TYPE_MASK)
4876 | VMX_AR_TYPE_BUSY_64_TSS);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004877 }
Avi Kivityda38f432010-07-06 11:30:49 +03004878 vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004879}
4880
4881static void exit_lmode(struct kvm_vcpu *vcpu)
4882{
Gleb Natapov2961e8762013-11-25 15:37:13 +02004883 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
Avi Kivityda38f432010-07-06 11:30:49 +03004884 vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004885}
4886
4887#endif
4888
Wanpeng Lic2ba05c2017-12-12 17:33:03 -08004889static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid,
4890 bool invalidate_gpa)
Sheng Yang2384d2b2008-01-17 15:14:33 +08004891{
Wanpeng Lic2ba05c2017-12-12 17:33:03 -08004892 if (enable_ept && (invalidate_gpa || !enable_vpid)) {
Xiao Guangrongdd180b32010-07-03 16:02:42 +08004893 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
4894 return;
Peter Feiner995f00a2017-06-30 17:26:32 -07004895 ept_sync_context(construct_eptp(vcpu, vcpu->arch.mmu.root_hpa));
Jim Mattsonf0b98c02017-03-15 07:56:11 -07004896 } else {
4897 vpid_sync_context(vpid);
Xiao Guangrongdd180b32010-07-03 16:02:42 +08004898 }
Sheng Yang2384d2b2008-01-17 15:14:33 +08004899}
4900
Wanpeng Lic2ba05c2017-12-12 17:33:03 -08004901static void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
Wanpeng Lidd5f5342015-09-23 18:26:57 +08004902{
Wanpeng Lic2ba05c2017-12-12 17:33:03 -08004903 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
Wanpeng Lidd5f5342015-09-23 18:26:57 +08004904}
4905
Avi Kivitye8467fd2009-12-29 18:43:06 +02004906static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
4907{
4908 ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
4909
4910 vcpu->arch.cr0 &= ~cr0_guest_owned_bits;
4911 vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
4912}
4913
Avi Kivityaff48ba2010-12-05 18:56:11 +02004914static void vmx_decache_cr3(struct kvm_vcpu *vcpu)
4915{
Sean Christophersonb4d18512018-03-05 12:04:40 -08004916 if (enable_unrestricted_guest || (enable_ept && is_paging(vcpu)))
Avi Kivityaff48ba2010-12-05 18:56:11 +02004917 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
4918 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
4919}
4920
Anthony Liguori25c4c272007-04-27 09:29:21 +03004921static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
Avi Kivity399badf2007-01-05 16:36:38 -08004922{
Avi Kivityfc78f512009-12-07 12:16:48 +02004923 ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
4924
4925 vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
4926 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
Avi Kivity399badf2007-01-05 16:36:38 -08004927}
4928
Sheng Yang14394422008-04-28 12:24:45 +08004929static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
4930{
Gleb Natapovd0d538b2013-10-09 19:13:19 +03004931 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
4932
Avi Kivity6de4f3a2009-05-31 22:58:47 +03004933 if (!test_bit(VCPU_EXREG_PDPTR,
4934 (unsigned long *)&vcpu->arch.regs_dirty))
4935 return;
4936
Sheng Yang14394422008-04-28 12:24:45 +08004937 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
Gleb Natapovd0d538b2013-10-09 19:13:19 +03004938 vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]);
4939 vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]);
4940 vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]);
4941 vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]);
Sheng Yang14394422008-04-28 12:24:45 +08004942 }
4943}
4944
Avi Kivity8f5d5492009-05-31 18:41:29 +03004945static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
4946{
Gleb Natapovd0d538b2013-10-09 19:13:19 +03004947 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
4948
Avi Kivity8f5d5492009-05-31 18:41:29 +03004949 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
Gleb Natapovd0d538b2013-10-09 19:13:19 +03004950 mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
4951 mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
4952 mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
4953 mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
Avi Kivity8f5d5492009-05-31 18:41:29 +03004954 }
Avi Kivity6de4f3a2009-05-31 22:58:47 +03004955
4956 __set_bit(VCPU_EXREG_PDPTR,
4957 (unsigned long *)&vcpu->arch.regs_avail);
4958 __set_bit(VCPU_EXREG_PDPTR,
4959 (unsigned long *)&vcpu->arch.regs_dirty);
Avi Kivity8f5d5492009-05-31 18:41:29 +03004960}
4961
David Matlack38991522016-11-29 18:14:08 -08004962static bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
4963{
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01004964 u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
4965 u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
David Matlack38991522016-11-29 18:14:08 -08004966 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4967
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01004968 if (to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
David Matlack38991522016-11-29 18:14:08 -08004969 SECONDARY_EXEC_UNRESTRICTED_GUEST &&
4970 nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
4971 fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
4972
4973 return fixed_bits_valid(val, fixed0, fixed1);
4974}
4975
4976static bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
4977{
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01004978 u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
4979 u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
David Matlack38991522016-11-29 18:14:08 -08004980
4981 return fixed_bits_valid(val, fixed0, fixed1);
4982}
4983
4984static bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val)
4985{
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01004986 u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr4_fixed0;
4987 u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr4_fixed1;
David Matlack38991522016-11-29 18:14:08 -08004988
4989 return fixed_bits_valid(val, fixed0, fixed1);
4990}
4991
4992/* No difference in the restrictions on guest and host CR4 in VMX operation. */
4993#define nested_guest_cr4_valid nested_cr4_valid
4994#define nested_host_cr4_valid nested_cr4_valid
4995
Nadav Har'El5e1746d2011-05-25 23:03:24 +03004996static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
Sheng Yang14394422008-04-28 12:24:45 +08004997
4998static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
4999 unsigned long cr0,
5000 struct kvm_vcpu *vcpu)
5001{
Marcelo Tosatti5233dd52011-06-06 14:27:47 -03005002 if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
5003 vmx_decache_cr3(vcpu);
Sheng Yang14394422008-04-28 12:24:45 +08005004 if (!(cr0 & X86_CR0_PG)) {
5005 /* From paging/starting to nonpaging */
5006 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
Sheng Yang65267ea2008-06-18 14:43:38 +08005007 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) |
Sheng Yang14394422008-04-28 12:24:45 +08005008 (CPU_BASED_CR3_LOAD_EXITING |
5009 CPU_BASED_CR3_STORE_EXITING));
5010 vcpu->arch.cr0 = cr0;
Avi Kivityfc78f512009-12-07 12:16:48 +02005011 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
Sheng Yang14394422008-04-28 12:24:45 +08005012 } else if (!is_paging(vcpu)) {
5013 /* From nonpaging to paging */
5014 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
Sheng Yang65267ea2008-06-18 14:43:38 +08005015 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
Sheng Yang14394422008-04-28 12:24:45 +08005016 ~(CPU_BASED_CR3_LOAD_EXITING |
5017 CPU_BASED_CR3_STORE_EXITING));
5018 vcpu->arch.cr0 = cr0;
Avi Kivityfc78f512009-12-07 12:16:48 +02005019 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
Sheng Yang14394422008-04-28 12:24:45 +08005020 }
Sheng Yang95eb84a2009-08-19 09:52:18 +08005021
5022 if (!(cr0 & X86_CR0_WP))
5023 *hw_cr0 &= ~X86_CR0_WP;
Sheng Yang14394422008-04-28 12:24:45 +08005024}
5025
Avi Kivity6aa8b732006-12-10 02:21:36 -08005026static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
5027{
Avi Kivity7ffd92c2009-06-09 14:10:45 +03005028 struct vcpu_vmx *vmx = to_vmx(vcpu);
Nitin A Kamble3a624e22009-06-08 11:34:16 -07005029 unsigned long hw_cr0;
5030
Gleb Natapov50378782013-02-04 16:00:28 +02005031 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK);
Nitin A Kamble3a624e22009-06-08 11:34:16 -07005032 if (enable_unrestricted_guest)
Gleb Natapov50378782013-02-04 16:00:28 +02005033 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
Gleb Natapov218e7632013-01-21 15:36:45 +02005034 else {
Gleb Natapov50378782013-02-04 16:00:28 +02005035 hw_cr0 |= KVM_VM_CR0_ALWAYS_ON;
Sheng Yang14394422008-04-28 12:24:45 +08005036
Gleb Natapov218e7632013-01-21 15:36:45 +02005037 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
5038 enter_pmode(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005039
Gleb Natapov218e7632013-01-21 15:36:45 +02005040 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
5041 enter_rmode(vcpu);
5042 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08005043
Avi Kivity05b3e0c2006-12-13 00:33:45 -08005044#ifdef CONFIG_X86_64
Avi Kivityf6801df2010-01-21 15:31:50 +02005045 if (vcpu->arch.efer & EFER_LME) {
Rusty Russell707d92fa2007-07-17 23:19:08 +10005046 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
Avi Kivity6aa8b732006-12-10 02:21:36 -08005047 enter_lmode(vcpu);
Rusty Russell707d92fa2007-07-17 23:19:08 +10005048 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
Avi Kivity6aa8b732006-12-10 02:21:36 -08005049 exit_lmode(vcpu);
5050 }
5051#endif
5052
Sean Christophersonb4d18512018-03-05 12:04:40 -08005053 if (enable_ept && !enable_unrestricted_guest)
Sheng Yang14394422008-04-28 12:24:45 +08005054 ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
5055
Avi Kivity6aa8b732006-12-10 02:21:36 -08005056 vmcs_writel(CR0_READ_SHADOW, cr0);
Sheng Yang14394422008-04-28 12:24:45 +08005057 vmcs_writel(GUEST_CR0, hw_cr0);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005058 vcpu->arch.cr0 = cr0;
Gleb Natapov14168782013-01-21 15:36:49 +02005059
5060 /* depends on vcpu->arch.cr0 to be set to a new value */
5061 vmx->emulation_required = emulation_required(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005062}
5063
Yu Zhang855feb62017-08-24 20:27:55 +08005064static int get_ept_level(struct kvm_vcpu *vcpu)
5065{
5066 if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48))
5067 return 5;
5068 return 4;
5069}
5070
Peter Feiner995f00a2017-06-30 17:26:32 -07005071static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
Sheng Yang14394422008-04-28 12:24:45 +08005072{
Yu Zhang855feb62017-08-24 20:27:55 +08005073 u64 eptp = VMX_EPTP_MT_WB;
Sheng Yang14394422008-04-28 12:24:45 +08005074
Yu Zhang855feb62017-08-24 20:27:55 +08005075 eptp |= (get_ept_level(vcpu) == 5) ? VMX_EPTP_PWL_5 : VMX_EPTP_PWL_4;
Sheng Yang14394422008-04-28 12:24:45 +08005076
Peter Feiner995f00a2017-06-30 17:26:32 -07005077 if (enable_ept_ad_bits &&
5078 (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu)))
David Hildenbrandbb97a012017-08-10 23:15:28 +02005079 eptp |= VMX_EPTP_AD_ENABLE_BIT;
Sheng Yang14394422008-04-28 12:24:45 +08005080 eptp |= (root_hpa & PAGE_MASK);
5081
5082 return eptp;
5083}
5084
Avi Kivity6aa8b732006-12-10 02:21:36 -08005085static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
5086{
Sheng Yang14394422008-04-28 12:24:45 +08005087 unsigned long guest_cr3;
5088 u64 eptp;
5089
5090 guest_cr3 = cr3;
Avi Kivity089d0342009-03-23 18:26:32 +02005091 if (enable_ept) {
Peter Feiner995f00a2017-06-30 17:26:32 -07005092 eptp = construct_eptp(vcpu, cr3);
Sheng Yang14394422008-04-28 12:24:45 +08005093 vmcs_write64(EPT_POINTER, eptp);
Sean Christophersone90008d2018-03-05 12:04:37 -08005094 if (enable_unrestricted_guest || is_paging(vcpu) ||
5095 is_guest_mode(vcpu))
Jan Kiszka59ab5a82013-08-08 16:26:29 +02005096 guest_cr3 = kvm_read_cr3(vcpu);
5097 else
Sean Christopherson40bbb9d2018-03-20 12:17:20 -07005098 guest_cr3 = to_kvm_vmx(vcpu->kvm)->ept_identity_map_addr;
Marcelo Tosatti7c93be442009-10-26 16:48:33 -02005099 ept_load_pdptrs(vcpu);
Sheng Yang14394422008-04-28 12:24:45 +08005100 }
5101
Wanpeng Lic2ba05c2017-12-12 17:33:03 -08005102 vmx_flush_tlb(vcpu, true);
Sheng Yang14394422008-04-28 12:24:45 +08005103 vmcs_writel(GUEST_CR3, guest_cr3);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005104}
5105
Nadav Har'El5e1746d2011-05-25 23:03:24 +03005106static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
Avi Kivity6aa8b732006-12-10 02:21:36 -08005107{
Ben Serebrin085e68e2015-04-16 11:58:05 -07005108 /*
5109 * Pass through host's Machine Check Enable value to hw_cr4, which
5110 * is in force while we are in guest mode. Do not let guests control
5111 * this bit, even if host CR4.MCE == 0.
5112 */
Sean Christopherson5dc1f042018-03-05 12:04:39 -08005113 unsigned long hw_cr4;
5114
5115 hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE);
5116 if (enable_unrestricted_guest)
5117 hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST;
5118 else if (to_vmx(vcpu)->rmode.vm86_active)
5119 hw_cr4 |= KVM_RMODE_VM_CR4_ALWAYS_ON;
5120 else
5121 hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON;
Sheng Yang14394422008-04-28 12:24:45 +08005122
Sean Christopherson64f7a112018-04-30 10:01:06 -07005123 if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) {
5124 if (cr4 & X86_CR4_UMIP) {
5125 vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
Paolo Bonzini0367f202016-07-12 10:44:55 +02005126 SECONDARY_EXEC_DESC);
Sean Christopherson64f7a112018-04-30 10:01:06 -07005127 hw_cr4 &= ~X86_CR4_UMIP;
5128 } else if (!is_guest_mode(vcpu) ||
5129 !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC))
5130 vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
5131 SECONDARY_EXEC_DESC);
5132 }
Paolo Bonzini0367f202016-07-12 10:44:55 +02005133
Nadav Har'El5e1746d2011-05-25 23:03:24 +03005134 if (cr4 & X86_CR4_VMXE) {
5135 /*
5136 * To use VMXON (and later other VMX instructions), a guest
5137 * must first be able to turn on cr4.VMXE (see handle_vmon()).
5138 * So basically the check on whether to allow nested VMX
5139 * is here.
5140 */
5141 if (!nested_vmx_allowed(vcpu))
5142 return 1;
Jan Kiszka1a0d74e2013-03-07 14:08:07 +01005143 }
David Matlack38991522016-11-29 18:14:08 -08005144
5145 if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4))
Nadav Har'El5e1746d2011-05-25 23:03:24 +03005146 return 1;
5147
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005148 vcpu->arch.cr4 = cr4;
Sheng Yang14394422008-04-28 12:24:45 +08005149
Sean Christopherson5dc1f042018-03-05 12:04:39 -08005150 if (!enable_unrestricted_guest) {
5151 if (enable_ept) {
5152 if (!is_paging(vcpu)) {
5153 hw_cr4 &= ~X86_CR4_PAE;
5154 hw_cr4 |= X86_CR4_PSE;
5155 } else if (!(cr4 & X86_CR4_PAE)) {
5156 hw_cr4 &= ~X86_CR4_PAE;
5157 }
5158 }
5159
Radim Krčmář656ec4a2015-11-02 22:20:00 +01005160 /*
Huaitong Handdba2622016-03-22 16:51:15 +08005161 * SMEP/SMAP/PKU is disabled if CPU is in non-paging mode in
5162 * hardware. To emulate this behavior, SMEP/SMAP/PKU needs
5163 * to be manually disabled when guest switches to non-paging
5164 * mode.
5165 *
5166 * If !enable_unrestricted_guest, the CPU is always running
5167 * with CR0.PG=1 and CR4 needs to be modified.
5168 * If enable_unrestricted_guest, the CPU automatically
5169 * disables SMEP/SMAP/PKU when the guest sets CR0.PG=0.
Radim Krčmář656ec4a2015-11-02 22:20:00 +01005170 */
Sean Christopherson5dc1f042018-03-05 12:04:39 -08005171 if (!is_paging(vcpu))
5172 hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
5173 }
Radim Krčmář656ec4a2015-11-02 22:20:00 +01005174
Sheng Yang14394422008-04-28 12:24:45 +08005175 vmcs_writel(CR4_READ_SHADOW, cr4);
5176 vmcs_writel(GUEST_CR4, hw_cr4);
Nadav Har'El5e1746d2011-05-25 23:03:24 +03005177 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005178}
5179
Avi Kivity6aa8b732006-12-10 02:21:36 -08005180static void vmx_get_segment(struct kvm_vcpu *vcpu,
5181 struct kvm_segment *var, int seg)
5182{
Avi Kivitya9179492011-01-03 14:28:52 +02005183 struct vcpu_vmx *vmx = to_vmx(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005184 u32 ar;
5185
Gleb Natapovc6ad11532012-12-12 19:10:51 +02005186 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
Avi Kivityf5f7b2f2012-08-21 17:07:00 +03005187 *var = vmx->rmode.segs[seg];
Avi Kivitya9179492011-01-03 14:28:52 +02005188 if (seg == VCPU_SREG_TR
Avi Kivity2fb92db2011-04-27 19:42:18 +03005189 || var->selector == vmx_read_guest_seg_selector(vmx, seg))
Avi Kivityf5f7b2f2012-08-21 17:07:00 +03005190 return;
Avi Kivity1390a282012-08-21 17:07:08 +03005191 var->base = vmx_read_guest_seg_base(vmx, seg);
5192 var->selector = vmx_read_guest_seg_selector(vmx, seg);
5193 return;
Avi Kivitya9179492011-01-03 14:28:52 +02005194 }
Avi Kivity2fb92db2011-04-27 19:42:18 +03005195 var->base = vmx_read_guest_seg_base(vmx, seg);
5196 var->limit = vmx_read_guest_seg_limit(vmx, seg);
5197 var->selector = vmx_read_guest_seg_selector(vmx, seg);
5198 ar = vmx_read_guest_seg_ar(vmx, seg);
Gleb Natapov03617c12013-06-28 13:17:18 +03005199 var->unusable = (ar >> 16) & 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005200 var->type = ar & 15;
5201 var->s = (ar >> 4) & 1;
5202 var->dpl = (ar >> 5) & 3;
Gleb Natapov03617c12013-06-28 13:17:18 +03005203 /*
5204 * Some userspaces do not preserve unusable property. Since usable
5205 * segment has to be present according to VMX spec we can use present
5206 * property to amend userspace bug by making unusable segment always
5207 * nonpresent. vmx_segment_access_rights() already marks nonpresent
5208 * segment as unusable.
5209 */
5210 var->present = !var->unusable;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005211 var->avl = (ar >> 12) & 1;
5212 var->l = (ar >> 13) & 1;
5213 var->db = (ar >> 14) & 1;
5214 var->g = (ar >> 15) & 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005215}
5216
Avi Kivitya9179492011-01-03 14:28:52 +02005217static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
5218{
Avi Kivitya9179492011-01-03 14:28:52 +02005219 struct kvm_segment s;
5220
5221 if (to_vmx(vcpu)->rmode.vm86_active) {
5222 vmx_get_segment(vcpu, &s, seg);
5223 return s.base;
5224 }
Avi Kivity2fb92db2011-04-27 19:42:18 +03005225 return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
Avi Kivitya9179492011-01-03 14:28:52 +02005226}
5227
Marcelo Tosattib09408d2013-01-07 19:27:06 -02005228static int vmx_get_cpl(struct kvm_vcpu *vcpu)
Izik Eidus2e4d2652008-03-24 19:38:34 +02005229{
Marcelo Tosattib09408d2013-01-07 19:27:06 -02005230 struct vcpu_vmx *vmx = to_vmx(vcpu);
5231
Paolo Bonziniae9fedc2014-05-14 09:39:49 +02005232 if (unlikely(vmx->rmode.vm86_active))
Izik Eidus2e4d2652008-03-24 19:38:34 +02005233 return 0;
Paolo Bonziniae9fedc2014-05-14 09:39:49 +02005234 else {
5235 int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS);
Andy Lutomirski4d283ec2015-08-13 13:18:48 -07005236 return VMX_AR_DPL(ar);
Avi Kivity69c73022011-03-07 15:26:44 +02005237 }
Avi Kivity69c73022011-03-07 15:26:44 +02005238}
5239
Avi Kivity653e3102007-05-07 10:55:37 +03005240static u32 vmx_segment_access_rights(struct kvm_segment *var)
Avi Kivity6aa8b732006-12-10 02:21:36 -08005241{
Avi Kivity6aa8b732006-12-10 02:21:36 -08005242 u32 ar;
5243
Avi Kivityf0495f92012-06-07 17:06:10 +03005244 if (var->unusable || !var->present)
Avi Kivity6aa8b732006-12-10 02:21:36 -08005245 ar = 1 << 16;
5246 else {
5247 ar = var->type & 15;
5248 ar |= (var->s & 1) << 4;
5249 ar |= (var->dpl & 3) << 5;
5250 ar |= (var->present & 1) << 7;
5251 ar |= (var->avl & 1) << 12;
5252 ar |= (var->l & 1) << 13;
5253 ar |= (var->db & 1) << 14;
5254 ar |= (var->g & 1) << 15;
5255 }
Avi Kivity653e3102007-05-07 10:55:37 +03005256
5257 return ar;
5258}
5259
5260static void vmx_set_segment(struct kvm_vcpu *vcpu,
5261 struct kvm_segment *var, int seg)
5262{
Avi Kivity7ffd92c2009-06-09 14:10:45 +03005263 struct vcpu_vmx *vmx = to_vmx(vcpu);
Mathias Krause772e0312012-08-30 01:30:19 +02005264 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
Avi Kivity653e3102007-05-07 10:55:37 +03005265
Avi Kivity2fb92db2011-04-27 19:42:18 +03005266 vmx_segment_cache_clear(vmx);
5267
Gleb Natapov1ecd50a2012-12-12 19:10:54 +02005268 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
5269 vmx->rmode.segs[seg] = *var;
5270 if (seg == VCPU_SREG_TR)
5271 vmcs_write16(sf->selector, var->selector);
5272 else if (var->s)
5273 fix_rmode_seg(seg, &vmx->rmode.segs[seg]);
Gleb Natapovd99e4152012-12-20 16:57:45 +02005274 goto out;
Avi Kivity653e3102007-05-07 10:55:37 +03005275 }
Gleb Natapov1ecd50a2012-12-12 19:10:54 +02005276
Avi Kivity653e3102007-05-07 10:55:37 +03005277 vmcs_writel(sf->base, var->base);
5278 vmcs_write32(sf->limit, var->limit);
5279 vmcs_write16(sf->selector, var->selector);
Nitin A Kamble3a624e22009-06-08 11:34:16 -07005280
5281 /*
5282 * Fix the "Accessed" bit in AR field of segment registers for older
5283 * qemu binaries.
5284 * IA32 arch specifies that at the time of processor reset the
5285 * "Accessed" bit in the AR field of segment registers is 1. And qemu
Guo Chao0fa06072012-06-28 15:16:19 +08005286 * is setting it to 0 in the userland code. This causes invalid guest
Nitin A Kamble3a624e22009-06-08 11:34:16 -07005287 * state vmexit when "unrestricted guest" mode is turned on.
5288 * Fix for this setup issue in cpu_reset is being pushed in the qemu
5289 * tree. Newer qemu binaries with that qemu fix would not need this
5290 * kvm hack.
5291 */
5292 if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR))
Gleb Natapovf924d662012-12-12 19:10:55 +02005293 var->type |= 0x1; /* Accessed */
Nitin A Kamble3a624e22009-06-08 11:34:16 -07005294
Gleb Natapovf924d662012-12-12 19:10:55 +02005295 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var));
Gleb Natapovd99e4152012-12-20 16:57:45 +02005296
5297out:
Paolo Bonzini98eb2f82014-03-27 09:51:52 +01005298 vmx->emulation_required = emulation_required(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005299}
5300
Avi Kivity6aa8b732006-12-10 02:21:36 -08005301static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
5302{
Avi Kivity2fb92db2011-04-27 19:42:18 +03005303 u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005304
5305 *db = (ar >> 14) & 1;
5306 *l = (ar >> 13) & 1;
5307}
5308
Gleb Natapov89a27f42010-02-16 10:51:48 +02005309static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08005310{
Gleb Natapov89a27f42010-02-16 10:51:48 +02005311 dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
5312 dt->address = vmcs_readl(GUEST_IDTR_BASE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005313}
5314
Gleb Natapov89a27f42010-02-16 10:51:48 +02005315static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08005316{
Gleb Natapov89a27f42010-02-16 10:51:48 +02005317 vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
5318 vmcs_writel(GUEST_IDTR_BASE, dt->address);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005319}
5320
Gleb Natapov89a27f42010-02-16 10:51:48 +02005321static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08005322{
Gleb Natapov89a27f42010-02-16 10:51:48 +02005323 dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
5324 dt->address = vmcs_readl(GUEST_GDTR_BASE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005325}
5326
Gleb Natapov89a27f42010-02-16 10:51:48 +02005327static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08005328{
Gleb Natapov89a27f42010-02-16 10:51:48 +02005329 vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
5330 vmcs_writel(GUEST_GDTR_BASE, dt->address);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005331}
5332
Mohammed Gamal648dfaa2008-08-17 16:38:32 +03005333static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
5334{
5335 struct kvm_segment var;
5336 u32 ar;
5337
5338 vmx_get_segment(vcpu, &var, seg);
Gleb Natapov07f42f52012-12-12 19:10:49 +02005339 var.dpl = 0x3;
Gleb Natapov0647f4a2012-12-12 19:10:50 +02005340 if (seg == VCPU_SREG_CS)
5341 var.type = 0x3;
Mohammed Gamal648dfaa2008-08-17 16:38:32 +03005342 ar = vmx_segment_access_rights(&var);
5343
5344 if (var.base != (var.selector << 4))
5345 return false;
Gleb Natapov89efbed2012-12-20 16:57:44 +02005346 if (var.limit != 0xffff)
Mohammed Gamal648dfaa2008-08-17 16:38:32 +03005347 return false;
Gleb Natapov07f42f52012-12-12 19:10:49 +02005348 if (ar != 0xf3)
Mohammed Gamal648dfaa2008-08-17 16:38:32 +03005349 return false;
5350
5351 return true;
5352}
5353
5354static bool code_segment_valid(struct kvm_vcpu *vcpu)
5355{
5356 struct kvm_segment cs;
5357 unsigned int cs_rpl;
5358
5359 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
Nadav Amitb32a9912015-03-29 16:33:04 +03005360 cs_rpl = cs.selector & SEGMENT_RPL_MASK;
Mohammed Gamal648dfaa2008-08-17 16:38:32 +03005361
Avi Kivity1872a3f2009-01-04 23:26:52 +02005362 if (cs.unusable)
5363 return false;
Andy Lutomirski4d283ec2015-08-13 13:18:48 -07005364 if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK))
Mohammed Gamal648dfaa2008-08-17 16:38:32 +03005365 return false;
5366 if (!cs.s)
5367 return false;
Andy Lutomirski4d283ec2015-08-13 13:18:48 -07005368 if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) {
Mohammed Gamal648dfaa2008-08-17 16:38:32 +03005369 if (cs.dpl > cs_rpl)
5370 return false;
Avi Kivity1872a3f2009-01-04 23:26:52 +02005371 } else {
Mohammed Gamal648dfaa2008-08-17 16:38:32 +03005372 if (cs.dpl != cs_rpl)
5373 return false;
5374 }
5375 if (!cs.present)
5376 return false;
5377
5378 /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
5379 return true;
5380}
5381
5382static bool stack_segment_valid(struct kvm_vcpu *vcpu)
5383{
5384 struct kvm_segment ss;
5385 unsigned int ss_rpl;
5386
5387 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
Nadav Amitb32a9912015-03-29 16:33:04 +03005388 ss_rpl = ss.selector & SEGMENT_RPL_MASK;
Mohammed Gamal648dfaa2008-08-17 16:38:32 +03005389
Avi Kivity1872a3f2009-01-04 23:26:52 +02005390 if (ss.unusable)
5391 return true;
5392 if (ss.type != 3 && ss.type != 7)
Mohammed Gamal648dfaa2008-08-17 16:38:32 +03005393 return false;
5394 if (!ss.s)
5395 return false;
5396 if (ss.dpl != ss_rpl) /* DPL != RPL */
5397 return false;
5398 if (!ss.present)
5399 return false;
5400
5401 return true;
5402}
5403
5404static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
5405{
5406 struct kvm_segment var;
5407 unsigned int rpl;
5408
5409 vmx_get_segment(vcpu, &var, seg);
Nadav Amitb32a9912015-03-29 16:33:04 +03005410 rpl = var.selector & SEGMENT_RPL_MASK;
Mohammed Gamal648dfaa2008-08-17 16:38:32 +03005411
Avi Kivity1872a3f2009-01-04 23:26:52 +02005412 if (var.unusable)
5413 return true;
Mohammed Gamal648dfaa2008-08-17 16:38:32 +03005414 if (!var.s)
5415 return false;
5416 if (!var.present)
5417 return false;
Andy Lutomirski4d283ec2015-08-13 13:18:48 -07005418 if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) {
Mohammed Gamal648dfaa2008-08-17 16:38:32 +03005419 if (var.dpl < rpl) /* DPL < RPL */
5420 return false;
5421 }
5422
5423 /* TODO: Add other members to kvm_segment_field to allow checking for other access
5424 * rights flags
5425 */
5426 return true;
5427}
5428
5429static bool tr_valid(struct kvm_vcpu *vcpu)
5430{
5431 struct kvm_segment tr;
5432
5433 vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
5434
Avi Kivity1872a3f2009-01-04 23:26:52 +02005435 if (tr.unusable)
5436 return false;
Nadav Amitb32a9912015-03-29 16:33:04 +03005437 if (tr.selector & SEGMENT_TI_MASK) /* TI = 1 */
Mohammed Gamal648dfaa2008-08-17 16:38:32 +03005438 return false;
Avi Kivity1872a3f2009-01-04 23:26:52 +02005439 if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
Mohammed Gamal648dfaa2008-08-17 16:38:32 +03005440 return false;
5441 if (!tr.present)
5442 return false;
5443
5444 return true;
5445}
5446
5447static bool ldtr_valid(struct kvm_vcpu *vcpu)
5448{
5449 struct kvm_segment ldtr;
5450
5451 vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
5452
Avi Kivity1872a3f2009-01-04 23:26:52 +02005453 if (ldtr.unusable)
5454 return true;
Nadav Amitb32a9912015-03-29 16:33:04 +03005455 if (ldtr.selector & SEGMENT_TI_MASK) /* TI = 1 */
Mohammed Gamal648dfaa2008-08-17 16:38:32 +03005456 return false;
5457 if (ldtr.type != 2)
5458 return false;
5459 if (!ldtr.present)
5460 return false;
5461
5462 return true;
5463}
5464
5465static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
5466{
5467 struct kvm_segment cs, ss;
5468
5469 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
5470 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
5471
Nadav Amitb32a9912015-03-29 16:33:04 +03005472 return ((cs.selector & SEGMENT_RPL_MASK) ==
5473 (ss.selector & SEGMENT_RPL_MASK));
Mohammed Gamal648dfaa2008-08-17 16:38:32 +03005474}
5475
5476/*
5477 * Check if guest state is valid. Returns true if valid, false if
5478 * not.
5479 * We assume that registers are always usable
5480 */
5481static bool guest_state_valid(struct kvm_vcpu *vcpu)
5482{
Gleb Natapovc5e97c82013-01-21 15:36:43 +02005483 if (enable_unrestricted_guest)
5484 return true;
5485
Mohammed Gamal648dfaa2008-08-17 16:38:32 +03005486 /* real mode guest state checks */
Gleb Natapovf13882d2013-04-14 16:07:37 +03005487 if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
Mohammed Gamal648dfaa2008-08-17 16:38:32 +03005488 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
5489 return false;
5490 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
5491 return false;
5492 if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
5493 return false;
5494 if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
5495 return false;
5496 if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
5497 return false;
5498 if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
5499 return false;
5500 } else {
5501 /* protected mode guest state checks */
5502 if (!cs_ss_rpl_check(vcpu))
5503 return false;
5504 if (!code_segment_valid(vcpu))
5505 return false;
5506 if (!stack_segment_valid(vcpu))
5507 return false;
5508 if (!data_segment_valid(vcpu, VCPU_SREG_DS))
5509 return false;
5510 if (!data_segment_valid(vcpu, VCPU_SREG_ES))
5511 return false;
5512 if (!data_segment_valid(vcpu, VCPU_SREG_FS))
5513 return false;
5514 if (!data_segment_valid(vcpu, VCPU_SREG_GS))
5515 return false;
5516 if (!tr_valid(vcpu))
5517 return false;
5518 if (!ldtr_valid(vcpu))
5519 return false;
5520 }
5521 /* TODO:
5522 * - Add checks on RIP
5523 * - Add checks on RFLAGS
5524 */
5525
5526 return true;
5527}
5528
Jim Mattson5fa99cb2017-07-06 16:33:07 -07005529static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
5530{
5531 return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
5532}
5533
Mike Dayd77c26f2007-10-08 09:02:08 -04005534static int init_rmode_tss(struct kvm *kvm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08005535{
Xiao Guangrong40dcaa92011-03-09 15:41:04 +08005536 gfn_t fn;
Izik Eidus195aefd2007-10-01 22:14:18 +02005537 u16 data = 0;
Paolo Bonzini1f755a82014-09-16 13:37:40 +02005538 int idx, r;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005539
Xiao Guangrong40dcaa92011-03-09 15:41:04 +08005540 idx = srcu_read_lock(&kvm->srcu);
Sean Christopherson40bbb9d2018-03-20 12:17:20 -07005541 fn = to_kvm_vmx(kvm)->tss_addr >> PAGE_SHIFT;
Izik Eidus195aefd2007-10-01 22:14:18 +02005542 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
5543 if (r < 0)
Marcelo Tosatti10589a42007-12-20 19:18:22 -05005544 goto out;
Izik Eidus195aefd2007-10-01 22:14:18 +02005545 data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
Sheng Yang464d17c2008-08-13 14:10:33 +08005546 r = kvm_write_guest_page(kvm, fn++, &data,
5547 TSS_IOPB_BASE_OFFSET, sizeof(u16));
Izik Eidus195aefd2007-10-01 22:14:18 +02005548 if (r < 0)
Marcelo Tosatti10589a42007-12-20 19:18:22 -05005549 goto out;
Izik Eidus195aefd2007-10-01 22:14:18 +02005550 r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
5551 if (r < 0)
Marcelo Tosatti10589a42007-12-20 19:18:22 -05005552 goto out;
Izik Eidus195aefd2007-10-01 22:14:18 +02005553 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
5554 if (r < 0)
Marcelo Tosatti10589a42007-12-20 19:18:22 -05005555 goto out;
Izik Eidus195aefd2007-10-01 22:14:18 +02005556 data = ~0;
Marcelo Tosatti10589a42007-12-20 19:18:22 -05005557 r = kvm_write_guest_page(kvm, fn, &data,
5558 RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
5559 sizeof(u8));
Marcelo Tosatti10589a42007-12-20 19:18:22 -05005560out:
Xiao Guangrong40dcaa92011-03-09 15:41:04 +08005561 srcu_read_unlock(&kvm->srcu, idx);
Paolo Bonzini1f755a82014-09-16 13:37:40 +02005562 return r;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005563}
5564
Sheng Yangb7ebfb02008-04-25 21:44:52 +08005565static int init_rmode_identity_map(struct kvm *kvm)
5566{
Sean Christopherson40bbb9d2018-03-20 12:17:20 -07005567 struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
Tang Chenf51770e2014-09-16 18:41:59 +08005568 int i, idx, r = 0;
Dan Williamsba049e92016-01-15 16:56:11 -08005569 kvm_pfn_t identity_map_pfn;
Sheng Yangb7ebfb02008-04-25 21:44:52 +08005570 u32 tmp;
5571
Sean Christopherson40bbb9d2018-03-20 12:17:20 -07005572 /* Protect kvm_vmx->ept_identity_pagetable_done. */
Tang Chena255d472014-09-16 18:41:58 +08005573 mutex_lock(&kvm->slots_lock);
5574
Sean Christopherson40bbb9d2018-03-20 12:17:20 -07005575 if (likely(kvm_vmx->ept_identity_pagetable_done))
Tang Chena255d472014-09-16 18:41:58 +08005576 goto out2;
Tang Chena255d472014-09-16 18:41:58 +08005577
Sean Christopherson40bbb9d2018-03-20 12:17:20 -07005578 if (!kvm_vmx->ept_identity_map_addr)
5579 kvm_vmx->ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR;
5580 identity_map_pfn = kvm_vmx->ept_identity_map_addr >> PAGE_SHIFT;
Tang Chena255d472014-09-16 18:41:58 +08005581
David Hildenbrandd8a6e362017-08-24 20:51:34 +02005582 r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
Sean Christopherson40bbb9d2018-03-20 12:17:20 -07005583 kvm_vmx->ept_identity_map_addr, PAGE_SIZE);
Tang Chenf51770e2014-09-16 18:41:59 +08005584 if (r < 0)
Tang Chena255d472014-09-16 18:41:58 +08005585 goto out2;
5586
Xiao Guangrong40dcaa92011-03-09 15:41:04 +08005587 idx = srcu_read_lock(&kvm->srcu);
Sheng Yangb7ebfb02008-04-25 21:44:52 +08005588 r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
5589 if (r < 0)
5590 goto out;
5591 /* Set up identity-mapping pagetable for EPT in real mode */
5592 for (i = 0; i < PT32_ENT_PER_PAGE; i++) {
5593 tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
5594 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
5595 r = kvm_write_guest_page(kvm, identity_map_pfn,
5596 &tmp, i * sizeof(tmp), sizeof(tmp));
5597 if (r < 0)
5598 goto out;
5599 }
Sean Christopherson40bbb9d2018-03-20 12:17:20 -07005600 kvm_vmx->ept_identity_pagetable_done = true;
Tang Chenf51770e2014-09-16 18:41:59 +08005601
Sheng Yangb7ebfb02008-04-25 21:44:52 +08005602out:
Xiao Guangrong40dcaa92011-03-09 15:41:04 +08005603 srcu_read_unlock(&kvm->srcu, idx);
Tang Chena255d472014-09-16 18:41:58 +08005604
5605out2:
5606 mutex_unlock(&kvm->slots_lock);
Tang Chenf51770e2014-09-16 18:41:59 +08005607 return r;
Sheng Yangb7ebfb02008-04-25 21:44:52 +08005608}
5609
Avi Kivity6aa8b732006-12-10 02:21:36 -08005610static void seg_setup(int seg)
5611{
Mathias Krause772e0312012-08-30 01:30:19 +02005612 const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
Nitin A Kamble3a624e22009-06-08 11:34:16 -07005613 unsigned int ar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005614
5615 vmcs_write16(sf->selector, 0);
5616 vmcs_writel(sf->base, 0);
5617 vmcs_write32(sf->limit, 0xffff);
Gleb Natapovd54d07b2012-12-20 16:57:46 +02005618 ar = 0x93;
5619 if (seg == VCPU_SREG_CS)
5620 ar |= 0x08; /* code segment */
Nitin A Kamble3a624e22009-06-08 11:34:16 -07005621
5622 vmcs_write32(sf->ar_bytes, ar);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005623}
5624
Sheng Yangf78e0e22007-10-29 09:40:42 +08005625static int alloc_apic_access_page(struct kvm *kvm)
5626{
Xiao Guangrong44841412012-09-07 14:14:20 +08005627 struct page *page;
Sheng Yangf78e0e22007-10-29 09:40:42 +08005628 int r = 0;
5629
Marcelo Tosatti79fac952009-12-23 14:35:26 -02005630 mutex_lock(&kvm->slots_lock);
Tang Chenc24ae0d2014-09-24 15:57:58 +08005631 if (kvm->arch.apic_access_page_done)
Sheng Yangf78e0e22007-10-29 09:40:42 +08005632 goto out;
Paolo Bonzini1d8007b2015-10-12 13:38:32 +02005633 r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
5634 APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
Sheng Yangf78e0e22007-10-29 09:40:42 +08005635 if (r)
5636 goto out;
Izik Eidus72dc67a2008-02-10 18:04:15 +02005637
Tang Chen73a6d942014-09-11 13:38:00 +08005638 page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
Xiao Guangrong44841412012-09-07 14:14:20 +08005639 if (is_error_page(page)) {
5640 r = -EFAULT;
5641 goto out;
5642 }
5643
Tang Chenc24ae0d2014-09-24 15:57:58 +08005644 /*
5645 * Do not pin the page in memory, so that memory hot-unplug
5646 * is able to migrate it.
5647 */
5648 put_page(page);
5649 kvm->arch.apic_access_page_done = true;
Sheng Yangf78e0e22007-10-29 09:40:42 +08005650out:
Marcelo Tosatti79fac952009-12-23 14:35:26 -02005651 mutex_unlock(&kvm->slots_lock);
Sheng Yangf78e0e22007-10-29 09:40:42 +08005652 return r;
5653}
5654
Wanpeng Li991e7a02015-09-16 17:30:05 +08005655static int allocate_vpid(void)
Sheng Yang2384d2b2008-01-17 15:14:33 +08005656{
5657 int vpid;
5658
Avi Kivity919818a2009-03-23 18:01:29 +02005659 if (!enable_vpid)
Wanpeng Li991e7a02015-09-16 17:30:05 +08005660 return 0;
Sheng Yang2384d2b2008-01-17 15:14:33 +08005661 spin_lock(&vmx_vpid_lock);
5662 vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
Wanpeng Li991e7a02015-09-16 17:30:05 +08005663 if (vpid < VMX_NR_VPIDS)
Sheng Yang2384d2b2008-01-17 15:14:33 +08005664 __set_bit(vpid, vmx_vpid_bitmap);
Wanpeng Li991e7a02015-09-16 17:30:05 +08005665 else
5666 vpid = 0;
Sheng Yang2384d2b2008-01-17 15:14:33 +08005667 spin_unlock(&vmx_vpid_lock);
Wanpeng Li991e7a02015-09-16 17:30:05 +08005668 return vpid;
Sheng Yang2384d2b2008-01-17 15:14:33 +08005669}
5670
Wanpeng Li991e7a02015-09-16 17:30:05 +08005671static void free_vpid(int vpid)
Lai Jiangshancdbecfc2010-04-17 16:41:47 +08005672{
Wanpeng Li991e7a02015-09-16 17:30:05 +08005673 if (!enable_vpid || vpid == 0)
Lai Jiangshancdbecfc2010-04-17 16:41:47 +08005674 return;
5675 spin_lock(&vmx_vpid_lock);
Wanpeng Li991e7a02015-09-16 17:30:05 +08005676 __clear_bit(vpid, vmx_vpid_bitmap);
Lai Jiangshancdbecfc2010-04-17 16:41:47 +08005677 spin_unlock(&vmx_vpid_lock);
5678}
5679
Paolo Bonzini904e14f2018-01-16 16:51:18 +01005680static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
5681 u32 msr, int type)
Sheng Yang25c5f222008-03-28 13:18:56 +08005682{
Avi Kivity3e7c73e2009-02-24 21:46:19 +02005683 int f = sizeof(unsigned long);
Sheng Yang25c5f222008-03-28 13:18:56 +08005684
5685 if (!cpu_has_vmx_msr_bitmap())
5686 return;
5687
Vitaly Kuznetsovceef7d12018-04-16 12:50:33 +02005688 if (static_branch_unlikely(&enable_evmcs))
5689 evmcs_touch_msr_bitmap();
5690
Sheng Yang25c5f222008-03-28 13:18:56 +08005691 /*
5692 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
5693 * have the write-low and read-high bitmap offsets the wrong way round.
5694 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
5695 */
Sheng Yang25c5f222008-03-28 13:18:56 +08005696 if (msr <= 0x1fff) {
Yang Zhang8d146952013-01-25 10:18:50 +08005697 if (type & MSR_TYPE_R)
5698 /* read-low */
5699 __clear_bit(msr, msr_bitmap + 0x000 / f);
5700
5701 if (type & MSR_TYPE_W)
5702 /* write-low */
5703 __clear_bit(msr, msr_bitmap + 0x800 / f);
5704
Sheng Yang25c5f222008-03-28 13:18:56 +08005705 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
5706 msr &= 0x1fff;
Yang Zhang8d146952013-01-25 10:18:50 +08005707 if (type & MSR_TYPE_R)
5708 /* read-high */
5709 __clear_bit(msr, msr_bitmap + 0x400 / f);
5710
5711 if (type & MSR_TYPE_W)
5712 /* write-high */
5713 __clear_bit(msr, msr_bitmap + 0xc00 / f);
5714
5715 }
5716}
5717
Paolo Bonzini904e14f2018-01-16 16:51:18 +01005718static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
5719 u32 msr, int type)
5720{
5721 int f = sizeof(unsigned long);
5722
5723 if (!cpu_has_vmx_msr_bitmap())
5724 return;
5725
Vitaly Kuznetsovceef7d12018-04-16 12:50:33 +02005726 if (static_branch_unlikely(&enable_evmcs))
5727 evmcs_touch_msr_bitmap();
5728
Paolo Bonzini904e14f2018-01-16 16:51:18 +01005729 /*
5730 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
5731 * have the write-low and read-high bitmap offsets the wrong way round.
5732 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
5733 */
5734 if (msr <= 0x1fff) {
5735 if (type & MSR_TYPE_R)
5736 /* read-low */
5737 __set_bit(msr, msr_bitmap + 0x000 / f);
5738
5739 if (type & MSR_TYPE_W)
5740 /* write-low */
5741 __set_bit(msr, msr_bitmap + 0x800 / f);
5742
5743 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
5744 msr &= 0x1fff;
5745 if (type & MSR_TYPE_R)
5746 /* read-high */
5747 __set_bit(msr, msr_bitmap + 0x400 / f);
5748
5749 if (type & MSR_TYPE_W)
5750 /* write-high */
5751 __set_bit(msr, msr_bitmap + 0xc00 / f);
5752
5753 }
5754}
5755
5756static void __always_inline vmx_set_intercept_for_msr(unsigned long *msr_bitmap,
5757 u32 msr, int type, bool value)
5758{
5759 if (value)
5760 vmx_enable_intercept_for_msr(msr_bitmap, msr, type);
5761 else
5762 vmx_disable_intercept_for_msr(msr_bitmap, msr, type);
5763}
5764
Wincy Vanf2b93282015-02-03 23:56:03 +08005765/*
5766 * If a msr is allowed by L0, we should check whether it is allowed by L1.
5767 * The corresponding bit will be cleared unless both of L0 and L1 allow it.
5768 */
5769static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
5770 unsigned long *msr_bitmap_nested,
5771 u32 msr, int type)
5772{
5773 int f = sizeof(unsigned long);
5774
Wincy Vanf2b93282015-02-03 23:56:03 +08005775 /*
5776 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
5777 * have the write-low and read-high bitmap offsets the wrong way round.
5778 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
5779 */
5780 if (msr <= 0x1fff) {
5781 if (type & MSR_TYPE_R &&
5782 !test_bit(msr, msr_bitmap_l1 + 0x000 / f))
5783 /* read-low */
5784 __clear_bit(msr, msr_bitmap_nested + 0x000 / f);
5785
5786 if (type & MSR_TYPE_W &&
5787 !test_bit(msr, msr_bitmap_l1 + 0x800 / f))
5788 /* write-low */
5789 __clear_bit(msr, msr_bitmap_nested + 0x800 / f);
5790
5791 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
5792 msr &= 0x1fff;
5793 if (type & MSR_TYPE_R &&
5794 !test_bit(msr, msr_bitmap_l1 + 0x400 / f))
5795 /* read-high */
5796 __clear_bit(msr, msr_bitmap_nested + 0x400 / f);
5797
5798 if (type & MSR_TYPE_W &&
5799 !test_bit(msr, msr_bitmap_l1 + 0xc00 / f))
5800 /* write-high */
5801 __clear_bit(msr, msr_bitmap_nested + 0xc00 / f);
5802
5803 }
5804}
5805
Paolo Bonzini904e14f2018-01-16 16:51:18 +01005806static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
Avi Kivity58972972009-02-24 22:26:47 +02005807{
Paolo Bonzini904e14f2018-01-16 16:51:18 +01005808 u8 mode = 0;
5809
5810 if (cpu_has_secondary_exec_ctrls() &&
5811 (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
5812 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
5813 mode |= MSR_BITMAP_MODE_X2APIC;
5814 if (enable_apicv && kvm_vcpu_apicv_active(vcpu))
5815 mode |= MSR_BITMAP_MODE_X2APIC_APICV;
5816 }
5817
5818 if (is_long_mode(vcpu))
5819 mode |= MSR_BITMAP_MODE_LM;
5820
5821 return mode;
Yang Zhang8d146952013-01-25 10:18:50 +08005822}
5823
Paolo Bonzini904e14f2018-01-16 16:51:18 +01005824#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
5825
5826static void vmx_update_msr_bitmap_x2apic(unsigned long *msr_bitmap,
5827 u8 mode)
Yang Zhang8d146952013-01-25 10:18:50 +08005828{
Paolo Bonzini904e14f2018-01-16 16:51:18 +01005829 int msr;
5830
5831 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
5832 unsigned word = msr / BITS_PER_LONG;
5833 msr_bitmap[word] = (mode & MSR_BITMAP_MODE_X2APIC_APICV) ? 0 : ~0;
5834 msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
Wanpeng Lif6e90f92016-09-22 07:43:25 +08005835 }
Paolo Bonzini904e14f2018-01-16 16:51:18 +01005836
5837 if (mode & MSR_BITMAP_MODE_X2APIC) {
5838 /*
5839 * TPR reads and writes can be virtualized even if virtual interrupt
5840 * delivery is not in use.
5841 */
5842 vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW);
5843 if (mode & MSR_BITMAP_MODE_X2APIC_APICV) {
5844 vmx_enable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TMCCT), MSR_TYPE_R);
5845 vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_EOI), MSR_TYPE_W);
5846 vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W);
5847 }
5848 }
5849}
5850
5851static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
5852{
5853 struct vcpu_vmx *vmx = to_vmx(vcpu);
5854 unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
5855 u8 mode = vmx_msr_bitmap_mode(vcpu);
5856 u8 changed = mode ^ vmx->msr_bitmap_mode;
5857
5858 if (!changed)
5859 return;
5860
5861 vmx_set_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW,
5862 !(mode & MSR_BITMAP_MODE_LM));
5863
5864 if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV))
5865 vmx_update_msr_bitmap_x2apic(msr_bitmap, mode);
5866
5867 vmx->msr_bitmap_mode = mode;
Avi Kivity58972972009-02-24 22:26:47 +02005868}
5869
Suravee Suthikulpanitb2a05fe2017-09-12 10:42:41 -05005870static bool vmx_get_enable_apicv(struct kvm_vcpu *vcpu)
Paolo Bonzinid50ab6c2015-07-29 11:49:59 +02005871{
Andrey Smetanind62caab2015-11-10 15:36:33 +03005872 return enable_apicv;
Paolo Bonzinid50ab6c2015-07-29 11:49:59 +02005873}
5874
David Matlackc9f04402017-08-01 14:00:40 -07005875static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
5876{
5877 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5878 gfn_t gfn;
5879
5880 /*
5881 * Don't need to mark the APIC access page dirty; it is never
5882 * written to by the CPU during APIC virtualization.
5883 */
5884
5885 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
5886 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT;
5887 kvm_vcpu_mark_page_dirty(vcpu, gfn);
5888 }
5889
5890 if (nested_cpu_has_posted_intr(vmcs12)) {
5891 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT;
5892 kvm_vcpu_mark_page_dirty(vcpu, gfn);
5893 }
5894}
5895
5896
David Hildenbrand6342c502017-01-25 11:58:58 +01005897static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
Wincy Van705699a2015-02-03 23:58:17 +08005898{
5899 struct vcpu_vmx *vmx = to_vmx(vcpu);
5900 int max_irr;
5901 void *vapic_page;
5902 u16 status;
5903
David Matlackc9f04402017-08-01 14:00:40 -07005904 if (!vmx->nested.pi_desc || !vmx->nested.pi_pending)
5905 return;
Wincy Van705699a2015-02-03 23:58:17 +08005906
David Matlackc9f04402017-08-01 14:00:40 -07005907 vmx->nested.pi_pending = false;
5908 if (!pi_test_and_clear_on(vmx->nested.pi_desc))
5909 return;
Wincy Van705699a2015-02-03 23:58:17 +08005910
David Matlackc9f04402017-08-01 14:00:40 -07005911 max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
5912 if (max_irr != 256) {
Wincy Van705699a2015-02-03 23:58:17 +08005913 vapic_page = kmap(vmx->nested.virtual_apic_page);
Liran Alone7387b02017-12-24 18:12:54 +02005914 __kvm_apic_update_irr(vmx->nested.pi_desc->pir,
5915 vapic_page, &max_irr);
Wincy Van705699a2015-02-03 23:58:17 +08005916 kunmap(vmx->nested.virtual_apic_page);
5917
5918 status = vmcs_read16(GUEST_INTR_STATUS);
5919 if ((u8)max_irr > ((u8)status & 0xff)) {
5920 status &= ~0xff;
5921 status |= (u8)max_irr;
5922 vmcs_write16(GUEST_INTR_STATUS, status);
5923 }
5924 }
David Matlackc9f04402017-08-01 14:00:40 -07005925
5926 nested_mark_vmcs12_pages_dirty(vcpu);
Wincy Van705699a2015-02-03 23:58:17 +08005927}
5928
Wincy Van06a55242017-04-28 13:13:59 +08005929static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
5930 bool nested)
Radim Krčmář21bc8dc2015-02-16 15:36:33 +01005931{
5932#ifdef CONFIG_SMP
Wincy Van06a55242017-04-28 13:13:59 +08005933 int pi_vec = nested ? POSTED_INTR_NESTED_VECTOR : POSTED_INTR_VECTOR;
5934
Radim Krčmář21bc8dc2015-02-16 15:36:33 +01005935 if (vcpu->mode == IN_GUEST_MODE) {
Feng Wu28b835d2015-09-18 22:29:54 +08005936 /*
Haozhong Zhang5753743f2017-09-18 09:56:50 +08005937 * The vector of interrupt to be delivered to vcpu had
5938 * been set in PIR before this function.
Feng Wu28b835d2015-09-18 22:29:54 +08005939 *
Haozhong Zhang5753743f2017-09-18 09:56:50 +08005940 * Following cases will be reached in this block, and
5941 * we always send a notification event in all cases as
5942 * explained below.
5943 *
5944 * Case 1: vcpu keeps in non-root mode. Sending a
5945 * notification event posts the interrupt to vcpu.
5946 *
5947 * Case 2: vcpu exits to root mode and is still
5948 * runnable. PIR will be synced to vIRR before the
5949 * next vcpu entry. Sending a notification event in
5950 * this case has no effect, as vcpu is not in root
5951 * mode.
5952 *
5953 * Case 3: vcpu exits to root mode and is blocked.
5954 * vcpu_block() has already synced PIR to vIRR and
5955 * never blocks vcpu if vIRR is not cleared. Therefore,
5956 * a blocked vcpu here does not wait for any requested
5957 * interrupts in PIR, and sending a notification event
5958 * which has no effect is safe here.
Feng Wu28b835d2015-09-18 22:29:54 +08005959 */
Feng Wu28b835d2015-09-18 22:29:54 +08005960
Wincy Van06a55242017-04-28 13:13:59 +08005961 apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
Radim Krčmář21bc8dc2015-02-16 15:36:33 +01005962 return true;
5963 }
5964#endif
5965 return false;
5966}
5967
Wincy Van705699a2015-02-03 23:58:17 +08005968static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
5969 int vector)
5970{
5971 struct vcpu_vmx *vmx = to_vmx(vcpu);
5972
5973 if (is_guest_mode(vcpu) &&
5974 vector == vmx->nested.posted_intr_nv) {
Wincy Van705699a2015-02-03 23:58:17 +08005975 /*
5976 * If a posted intr is not recognized by hardware,
5977 * we will accomplish it in the next vmentry.
5978 */
5979 vmx->nested.pi_pending = true;
5980 kvm_make_request(KVM_REQ_EVENT, vcpu);
Liran Alon6b697712017-11-09 20:27:20 +02005981 /* the PIR and ON have been set by L1. */
5982 if (!kvm_vcpu_trigger_posted_interrupt(vcpu, true))
5983 kvm_vcpu_kick(vcpu);
Wincy Van705699a2015-02-03 23:58:17 +08005984 return 0;
5985 }
5986 return -1;
5987}
Avi Kivity6aa8b732006-12-10 02:21:36 -08005988/*
Yang Zhanga20ed542013-04-11 19:25:15 +08005989 * Send interrupt to vcpu via posted interrupt way.
5990 * 1. If target vcpu is running(non-root mode), send posted interrupt
5991 * notification to vcpu and hardware will sync PIR to vIRR atomically.
5992 * 2. If target vcpu isn't running(root mode), kick it to pick up the
5993 * interrupt from PIR in next vmentry.
5994 */
5995static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
5996{
5997 struct vcpu_vmx *vmx = to_vmx(vcpu);
5998 int r;
5999
Wincy Van705699a2015-02-03 23:58:17 +08006000 r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
6001 if (!r)
6002 return;
6003
Yang Zhanga20ed542013-04-11 19:25:15 +08006004 if (pi_test_and_set_pir(vector, &vmx->pi_desc))
6005 return;
6006
Paolo Bonzinib95234c2016-12-19 13:57:33 +01006007 /* If a previous notification has sent the IPI, nothing to do. */
6008 if (pi_test_and_set_on(&vmx->pi_desc))
6009 return;
6010
Wincy Van06a55242017-04-28 13:13:59 +08006011 if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
Yang Zhanga20ed542013-04-11 19:25:15 +08006012 kvm_vcpu_kick(vcpu);
6013}
6014
Avi Kivity6aa8b732006-12-10 02:21:36 -08006015/*
Nadav Har'Ela3a8ff82011-05-25 23:09:01 +03006016 * Set up the vmcs's constant host-state fields, i.e., host-state fields that
6017 * will not change in the lifetime of the guest.
6018 * Note that host-state that does change is set elsewhere. E.g., host-state
6019 * that is set differently for each CPU is set in vmx_vcpu_load(), not here.
6020 */
Yang Zhanga547c6d2013-04-11 19:25:10 +08006021static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
Nadav Har'Ela3a8ff82011-05-25 23:09:01 +03006022{
6023 u32 low32, high32;
6024 unsigned long tmpl;
6025 struct desc_ptr dt;
Andy Lutomirskid6e41f12017-05-28 10:00:17 -07006026 unsigned long cr0, cr3, cr4;
Nadav Har'Ela3a8ff82011-05-25 23:09:01 +03006027
Andy Lutomirski04ac88a2016-10-31 15:18:45 -07006028 cr0 = read_cr0();
6029 WARN_ON(cr0 & X86_CR0_TS);
6030 vmcs_writel(HOST_CR0, cr0); /* 22.2.3 */
Andy Lutomirskid6e41f12017-05-28 10:00:17 -07006031
6032 /*
6033 * Save the most likely value for this task's CR3 in the VMCS.
6034 * We can't use __get_current_cr3_fast() because we're not atomic.
6035 */
Andy Lutomirski6c690ee2017-06-12 10:26:14 -07006036 cr3 = __read_cr3();
Andy Lutomirskid6e41f12017-05-28 10:00:17 -07006037 vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */
Ladi Prosek44889942017-09-22 07:53:15 +02006038 vmx->loaded_vmcs->vmcs_host_cr3 = cr3;
Nadav Har'Ela3a8ff82011-05-25 23:09:01 +03006039
Andy Lutomirskid974baa2014-10-08 09:02:13 -07006040 /* Save the most likely value for this task's CR4 in the VMCS. */
Andy Lutomirski1e02ce42014-10-24 15:58:08 -07006041 cr4 = cr4_read_shadow();
Andy Lutomirskid974baa2014-10-08 09:02:13 -07006042 vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */
Ladi Prosek44889942017-09-22 07:53:15 +02006043 vmx->loaded_vmcs->vmcs_host_cr4 = cr4;
Andy Lutomirskid974baa2014-10-08 09:02:13 -07006044
Nadav Har'Ela3a8ff82011-05-25 23:09:01 +03006045 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
Avi Kivityb2da15a2012-05-13 19:53:24 +03006046#ifdef CONFIG_X86_64
6047 /*
6048 * Load null selectors, so we can avoid reloading them in
6049 * __vmx_load_host_state(), in case userspace uses the null selectors
6050 * too (the expected case).
6051 */
6052 vmcs_write16(HOST_DS_SELECTOR, 0);
6053 vmcs_write16(HOST_ES_SELECTOR, 0);
6054#else
Nadav Har'Ela3a8ff82011-05-25 23:09:01 +03006055 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
6056 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
Avi Kivityb2da15a2012-05-13 19:53:24 +03006057#endif
Nadav Har'Ela3a8ff82011-05-25 23:09:01 +03006058 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
6059 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
6060
Juergen Gross87930012017-09-04 12:25:27 +02006061 store_idt(&dt);
Nadav Har'Ela3a8ff82011-05-25 23:09:01 +03006062 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
Yang Zhanga547c6d2013-04-11 19:25:10 +08006063 vmx->host_idt_base = dt.address;
Nadav Har'Ela3a8ff82011-05-25 23:09:01 +03006064
Avi Kivity83287ea422012-09-16 15:10:57 +03006065 vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
Nadav Har'Ela3a8ff82011-05-25 23:09:01 +03006066
6067 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
6068 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
6069 rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
6070 vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */
6071
6072 if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
6073 rdmsr(MSR_IA32_CR_PAT, low32, high32);
6074 vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32));
6075 }
6076}
6077
Nadav Har'Elbf8179a2011-05-25 23:09:31 +03006078static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
6079{
6080 vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
6081 if (enable_ept)
6082 vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
Nadav Har'Elfe3ef052011-05-25 23:10:02 +03006083 if (is_guest_mode(&vmx->vcpu))
6084 vmx->vcpu.arch.cr4_guest_owned_bits &=
6085 ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask;
Nadav Har'Elbf8179a2011-05-25 23:09:31 +03006086 vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
6087}
6088
Yang Zhang01e439b2013-04-11 19:25:12 +08006089static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
6090{
6091 u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl;
6092
Andrey Smetanind62caab2015-11-10 15:36:33 +03006093 if (!kvm_vcpu_apicv_active(&vmx->vcpu))
Yang Zhang01e439b2013-04-11 19:25:12 +08006094 pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
Paolo Bonzinid02fcf52017-11-06 13:31:13 +01006095
6096 if (!enable_vnmi)
6097 pin_based_exec_ctrl &= ~PIN_BASED_VIRTUAL_NMIS;
6098
Yunhong Jiang64672c92016-06-13 14:19:59 -07006099 /* Enable the preemption timer dynamically */
6100 pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
Yang Zhang01e439b2013-04-11 19:25:12 +08006101 return pin_based_exec_ctrl;
6102}
6103
Andrey Smetanind62caab2015-11-10 15:36:33 +03006104static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
6105{
6106 struct vcpu_vmx *vmx = to_vmx(vcpu);
6107
6108 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx));
Roman Kagan3ce424e2016-05-18 17:48:20 +03006109 if (cpu_has_secondary_exec_ctrls()) {
6110 if (kvm_vcpu_apicv_active(vcpu))
6111 vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
6112 SECONDARY_EXEC_APIC_REGISTER_VIRT |
6113 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
6114 else
6115 vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
6116 SECONDARY_EXEC_APIC_REGISTER_VIRT |
6117 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
6118 }
6119
6120 if (cpu_has_vmx_msr_bitmap())
Paolo Bonzini904e14f2018-01-16 16:51:18 +01006121 vmx_update_msr_bitmap(vcpu);
Andrey Smetanind62caab2015-11-10 15:36:33 +03006122}
6123
Nadav Har'Elbf8179a2011-05-25 23:09:31 +03006124static u32 vmx_exec_control(struct vcpu_vmx *vmx)
6125{
6126 u32 exec_control = vmcs_config.cpu_based_exec_ctrl;
Paolo Bonzinid16c2932014-02-21 10:36:37 +01006127
6128 if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
6129 exec_control &= ~CPU_BASED_MOV_DR_EXITING;
6130
Paolo Bonzini35754c92015-07-29 12:05:37 +02006131 if (!cpu_need_tpr_shadow(&vmx->vcpu)) {
Nadav Har'Elbf8179a2011-05-25 23:09:31 +03006132 exec_control &= ~CPU_BASED_TPR_SHADOW;
6133#ifdef CONFIG_X86_64
6134 exec_control |= CPU_BASED_CR8_STORE_EXITING |
6135 CPU_BASED_CR8_LOAD_EXITING;
6136#endif
6137 }
6138 if (!enable_ept)
6139 exec_control |= CPU_BASED_CR3_STORE_EXITING |
6140 CPU_BASED_CR3_LOAD_EXITING |
6141 CPU_BASED_INVLPG_EXITING;
Wanpeng Li4d5422c2018-03-12 04:53:02 -07006142 if (kvm_mwait_in_guest(vmx->vcpu.kvm))
6143 exec_control &= ~(CPU_BASED_MWAIT_EXITING |
6144 CPU_BASED_MONITOR_EXITING);
Wanpeng Licaa057a2018-03-12 04:53:03 -07006145 if (kvm_hlt_in_guest(vmx->vcpu.kvm))
6146 exec_control &= ~CPU_BASED_HLT_EXITING;
Nadav Har'Elbf8179a2011-05-25 23:09:31 +03006147 return exec_control;
6148}
6149
Jim Mattson45ec3682017-08-23 16:32:04 -07006150static bool vmx_rdrand_supported(void)
Nadav Har'Elbf8179a2011-05-25 23:09:31 +03006151{
Jim Mattson45ec3682017-08-23 16:32:04 -07006152 return vmcs_config.cpu_based_2nd_exec_ctrl &
David Hildenbrand736fdf72017-08-24 20:51:37 +02006153 SECONDARY_EXEC_RDRAND_EXITING;
Jim Mattson45ec3682017-08-23 16:32:04 -07006154}
6155
Jim Mattson75f4fc82017-08-23 16:32:03 -07006156static bool vmx_rdseed_supported(void)
6157{
6158 return vmcs_config.cpu_based_2nd_exec_ctrl &
David Hildenbrand736fdf72017-08-24 20:51:37 +02006159 SECONDARY_EXEC_RDSEED_EXITING;
Jim Mattson75f4fc82017-08-23 16:32:03 -07006160}
6161
Paolo Bonzini80154d72017-08-24 13:55:35 +02006162static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
Nadav Har'Elbf8179a2011-05-25 23:09:31 +03006163{
Paolo Bonzini80154d72017-08-24 13:55:35 +02006164 struct kvm_vcpu *vcpu = &vmx->vcpu;
6165
Nadav Har'Elbf8179a2011-05-25 23:09:31 +03006166 u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
Paolo Bonzini0367f202016-07-12 10:44:55 +02006167
Paolo Bonzini80154d72017-08-24 13:55:35 +02006168 if (!cpu_need_virtualize_apic_accesses(vcpu))
Nadav Har'Elbf8179a2011-05-25 23:09:31 +03006169 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
6170 if (vmx->vpid == 0)
6171 exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
6172 if (!enable_ept) {
6173 exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
6174 enable_unrestricted_guest = 0;
Mao, Junjiead756a12012-07-02 01:18:48 +00006175 /* Enable INVPCID for non-ept guests may cause performance regression. */
6176 exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
Nadav Har'Elbf8179a2011-05-25 23:09:31 +03006177 }
6178 if (!enable_unrestricted_guest)
6179 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
Wanpeng Lib31c1142018-03-12 04:53:04 -07006180 if (kvm_pause_in_guest(vmx->vcpu.kvm))
Nadav Har'Elbf8179a2011-05-25 23:09:31 +03006181 exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
Paolo Bonzini80154d72017-08-24 13:55:35 +02006182 if (!kvm_vcpu_apicv_active(vcpu))
Yang Zhangc7c9c562013-01-25 10:18:51 +08006183 exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
6184 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
Yang Zhang8d146952013-01-25 10:18:50 +08006185 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
Paolo Bonzini0367f202016-07-12 10:44:55 +02006186
6187 /* SECONDARY_EXEC_DESC is enabled/disabled on writes to CR4.UMIP,
6188 * in vmx_set_cr4. */
6189 exec_control &= ~SECONDARY_EXEC_DESC;
6190
Abel Gordonabc4fc52013-04-18 14:35:25 +03006191 /* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD
6192 (handle_vmptrld).
6193 We can NOT enable shadow_vmcs here because we don't have yet
6194 a current VMCS12
6195 */
6196 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
Kai Huanga3eaa862015-11-04 13:46:05 +08006197
6198 if (!enable_pml)
6199 exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
Kai Huang843e4332015-01-28 10:54:28 +08006200
Paolo Bonzini3db13482017-08-24 14:48:03 +02006201 if (vmx_xsaves_supported()) {
6202 /* Exposing XSAVES only when XSAVE is exposed */
6203 bool xsaves_enabled =
6204 guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
6205 guest_cpuid_has(vcpu, X86_FEATURE_XSAVES);
6206
6207 if (!xsaves_enabled)
6208 exec_control &= ~SECONDARY_EXEC_XSAVES;
6209
6210 if (nested) {
6211 if (xsaves_enabled)
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01006212 vmx->nested.msrs.secondary_ctls_high |=
Paolo Bonzini3db13482017-08-24 14:48:03 +02006213 SECONDARY_EXEC_XSAVES;
6214 else
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01006215 vmx->nested.msrs.secondary_ctls_high &=
Paolo Bonzini3db13482017-08-24 14:48:03 +02006216 ~SECONDARY_EXEC_XSAVES;
6217 }
6218 }
6219
Paolo Bonzini80154d72017-08-24 13:55:35 +02006220 if (vmx_rdtscp_supported()) {
6221 bool rdtscp_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP);
6222 if (!rdtscp_enabled)
6223 exec_control &= ~SECONDARY_EXEC_RDTSCP;
6224
6225 if (nested) {
6226 if (rdtscp_enabled)
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01006227 vmx->nested.msrs.secondary_ctls_high |=
Paolo Bonzini80154d72017-08-24 13:55:35 +02006228 SECONDARY_EXEC_RDTSCP;
6229 else
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01006230 vmx->nested.msrs.secondary_ctls_high &=
Paolo Bonzini80154d72017-08-24 13:55:35 +02006231 ~SECONDARY_EXEC_RDTSCP;
6232 }
6233 }
6234
6235 if (vmx_invpcid_supported()) {
6236 /* Exposing INVPCID only when PCID is exposed */
6237 bool invpcid_enabled =
6238 guest_cpuid_has(vcpu, X86_FEATURE_INVPCID) &&
6239 guest_cpuid_has(vcpu, X86_FEATURE_PCID);
6240
6241 if (!invpcid_enabled) {
6242 exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
6243 guest_cpuid_clear(vcpu, X86_FEATURE_INVPCID);
6244 }
6245
6246 if (nested) {
6247 if (invpcid_enabled)
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01006248 vmx->nested.msrs.secondary_ctls_high |=
Paolo Bonzini80154d72017-08-24 13:55:35 +02006249 SECONDARY_EXEC_ENABLE_INVPCID;
6250 else
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01006251 vmx->nested.msrs.secondary_ctls_high &=
Paolo Bonzini80154d72017-08-24 13:55:35 +02006252 ~SECONDARY_EXEC_ENABLE_INVPCID;
6253 }
6254 }
6255
Jim Mattson45ec3682017-08-23 16:32:04 -07006256 if (vmx_rdrand_supported()) {
6257 bool rdrand_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDRAND);
6258 if (rdrand_enabled)
David Hildenbrand736fdf72017-08-24 20:51:37 +02006259 exec_control &= ~SECONDARY_EXEC_RDRAND_EXITING;
Jim Mattson45ec3682017-08-23 16:32:04 -07006260
6261 if (nested) {
6262 if (rdrand_enabled)
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01006263 vmx->nested.msrs.secondary_ctls_high |=
David Hildenbrand736fdf72017-08-24 20:51:37 +02006264 SECONDARY_EXEC_RDRAND_EXITING;
Jim Mattson45ec3682017-08-23 16:32:04 -07006265 else
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01006266 vmx->nested.msrs.secondary_ctls_high &=
David Hildenbrand736fdf72017-08-24 20:51:37 +02006267 ~SECONDARY_EXEC_RDRAND_EXITING;
Jim Mattson45ec3682017-08-23 16:32:04 -07006268 }
6269 }
6270
Jim Mattson75f4fc82017-08-23 16:32:03 -07006271 if (vmx_rdseed_supported()) {
6272 bool rdseed_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDSEED);
6273 if (rdseed_enabled)
David Hildenbrand736fdf72017-08-24 20:51:37 +02006274 exec_control &= ~SECONDARY_EXEC_RDSEED_EXITING;
Jim Mattson75f4fc82017-08-23 16:32:03 -07006275
6276 if (nested) {
6277 if (rdseed_enabled)
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01006278 vmx->nested.msrs.secondary_ctls_high |=
David Hildenbrand736fdf72017-08-24 20:51:37 +02006279 SECONDARY_EXEC_RDSEED_EXITING;
Jim Mattson75f4fc82017-08-23 16:32:03 -07006280 else
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01006281 vmx->nested.msrs.secondary_ctls_high &=
David Hildenbrand736fdf72017-08-24 20:51:37 +02006282 ~SECONDARY_EXEC_RDSEED_EXITING;
Jim Mattson75f4fc82017-08-23 16:32:03 -07006283 }
6284 }
6285
Paolo Bonzini80154d72017-08-24 13:55:35 +02006286 vmx->secondary_exec_control = exec_control;
Nadav Har'Elbf8179a2011-05-25 23:09:31 +03006287}
6288
Xiao Guangrongce88dec2011-07-12 03:33:44 +08006289static void ept_set_mmio_spte_mask(void)
6290{
6291 /*
6292 * EPT Misconfigurations can be generated if the value of bits 2:0
6293 * of an EPT paging-structure entry is 110b (write/execute).
Xiao Guangrongce88dec2011-07-12 03:33:44 +08006294 */
Peter Feinerdcdca5f2017-06-30 17:26:30 -07006295 kvm_mmu_set_mmio_spte_mask(VMX_EPT_RWX_MASK,
6296 VMX_EPT_MISCONFIG_WX_VALUE);
Xiao Guangrongce88dec2011-07-12 03:33:44 +08006297}
6298
Wanpeng Lif53cd632014-12-02 19:14:58 +08006299#define VMX_XSS_EXIT_BITMAP 0
Nadav Har'Ela3a8ff82011-05-25 23:09:01 +03006300/*
Avi Kivity6aa8b732006-12-10 02:21:36 -08006301 * Sets up the vmcs for emulated real mode.
6302 */
David Hildenbrand12d79912017-08-24 20:51:26 +02006303static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
Avi Kivity6aa8b732006-12-10 02:21:36 -08006304{
Jan Kiszka2e4ce7f2011-06-01 12:57:30 +02006305#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08006306 unsigned long a;
Jan Kiszka2e4ce7f2011-06-01 12:57:30 +02006307#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -08006308 int i;
Avi Kivity6aa8b732006-12-10 02:21:36 -08006309
Abel Gordon4607c2d2013-04-18 14:35:55 +03006310 if (enable_shadow_vmcs) {
Jim Mattsonf4160e42018-05-29 09:11:33 -07006311 /*
6312 * At vCPU creation, "VMWRITE to any supported field
6313 * in the VMCS" is supported, so use the more
6314 * permissive vmx_vmread_bitmap to specify both read
6315 * and write permissions for the shadow VMCS.
6316 */
Abel Gordon4607c2d2013-04-18 14:35:55 +03006317 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
Jim Mattsonf4160e42018-05-29 09:11:33 -07006318 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmread_bitmap));
Abel Gordon4607c2d2013-04-18 14:35:55 +03006319 }
Sheng Yang25c5f222008-03-28 13:18:56 +08006320 if (cpu_has_vmx_msr_bitmap())
Paolo Bonzini904e14f2018-01-16 16:51:18 +01006321 vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap));
Sheng Yang25c5f222008-03-28 13:18:56 +08006322
Avi Kivity6aa8b732006-12-10 02:21:36 -08006323 vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
6324
Avi Kivity6aa8b732006-12-10 02:21:36 -08006325 /* Control */
Yang Zhang01e439b2013-04-11 19:25:12 +08006326 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx));
Yunhong Jiang64672c92016-06-13 14:19:59 -07006327 vmx->hv_deadline_tsc = -1;
Yang, Sheng6e5d8652007-09-12 18:03:11 +08006328
Nadav Har'Elbf8179a2011-05-25 23:09:31 +03006329 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx));
Avi Kivity6aa8b732006-12-10 02:21:36 -08006330
Dan Williamsdfa169b2016-06-02 11:17:24 -07006331 if (cpu_has_secondary_exec_ctrls()) {
Paolo Bonzini80154d72017-08-24 13:55:35 +02006332 vmx_compute_secondary_exec_control(vmx);
Nadav Har'Elbf8179a2011-05-25 23:09:31 +03006333 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
Paolo Bonzini80154d72017-08-24 13:55:35 +02006334 vmx->secondary_exec_control);
Dan Williamsdfa169b2016-06-02 11:17:24 -07006335 }
Sheng Yangf78e0e22007-10-29 09:40:42 +08006336
Andrey Smetanind62caab2015-11-10 15:36:33 +03006337 if (kvm_vcpu_apicv_active(&vmx->vcpu)) {
Yang Zhangc7c9c562013-01-25 10:18:51 +08006338 vmcs_write64(EOI_EXIT_BITMAP0, 0);
6339 vmcs_write64(EOI_EXIT_BITMAP1, 0);
6340 vmcs_write64(EOI_EXIT_BITMAP2, 0);
6341 vmcs_write64(EOI_EXIT_BITMAP3, 0);
6342
6343 vmcs_write16(GUEST_INTR_STATUS, 0);
Yang Zhang01e439b2013-04-11 19:25:12 +08006344
Li RongQing0bcf2612015-12-03 13:29:34 +08006345 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR);
Yang Zhang01e439b2013-04-11 19:25:12 +08006346 vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc)));
Yang Zhangc7c9c562013-01-25 10:18:51 +08006347 }
6348
Wanpeng Lib31c1142018-03-12 04:53:04 -07006349 if (!kvm_pause_in_guest(vmx->vcpu.kvm)) {
Zhai, Edwin4b8d54f2009-10-09 18:03:20 +08006350 vmcs_write32(PLE_GAP, ple_gap);
Radim Krčmářa7653ec2014-08-21 18:08:07 +02006351 vmx->ple_window = ple_window;
6352 vmx->ple_window_dirty = true;
Zhai, Edwin4b8d54f2009-10-09 18:03:20 +08006353 }
6354
Xiao Guangrongc3707952011-07-12 03:28:04 +08006355 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
6356 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08006357 vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
6358
Avi Kivity9581d442010-10-19 16:46:55 +02006359 vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */
6360 vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */
Yang Zhanga547c6d2013-04-11 19:25:10 +08006361 vmx_set_constant_host_state(vmx);
Avi Kivity05b3e0c2006-12-13 00:33:45 -08006362#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08006363 rdmsrl(MSR_FS_BASE, a);
6364 vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
6365 rdmsrl(MSR_GS_BASE, a);
6366 vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
6367#else
6368 vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
6369 vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
6370#endif
6371
Bandan Das2a499e42017-08-03 15:54:41 -04006372 if (cpu_has_vmx_vmfunc())
6373 vmcs_write64(VM_FUNCTION_CONTROL, 0);
6374
Eddie Dong2cc51562007-05-21 07:28:09 +03006375 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
6376 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
Konrad Rzeszutek Wilk33966dd62018-06-20 13:58:37 -04006377 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
Eddie Dong2cc51562007-05-21 07:28:09 +03006378 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
Konrad Rzeszutek Wilk33966dd62018-06-20 13:58:37 -04006379 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
Avi Kivity6aa8b732006-12-10 02:21:36 -08006380
Radim Krčmář74545702015-04-27 15:11:25 +02006381 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
6382 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
Sheng Yang468d4722008-10-09 16:01:55 +08006383
Paolo Bonzini03916db2014-07-24 14:21:57 +02006384 for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08006385 u32 index = vmx_msr_index[i];
6386 u32 data_low, data_high;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04006387 int j = vmx->nmsrs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08006388
6389 if (rdmsr_safe(index, &data_low, &data_high) < 0)
6390 continue;
Avi Kivity432bd6c2007-01-31 23:48:13 -08006391 if (wrmsr_safe(index, data_low, data_high) < 0)
6392 continue;
Avi Kivity26bb0982009-09-07 11:14:12 +03006393 vmx->guest_msrs[j].index = i;
6394 vmx->guest_msrs[j].data = 0;
Avi Kivityd5696722009-12-02 12:28:47 +02006395 vmx->guest_msrs[j].mask = -1ull;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04006396 ++vmx->nmsrs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08006397 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08006398
KarimAllah Ahmed28c1c9f2018-02-01 22:59:44 +01006399 if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
6400 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, vmx->arch_capabilities);
Gleb Natapov2961e8762013-11-25 15:37:13 +02006401
6402 vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
Avi Kivity6aa8b732006-12-10 02:21:36 -08006403
6404 /* 22.2.1, 20.8.1 */
Gleb Natapov2961e8762013-11-25 15:37:13 +02006405 vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl);
Yang, Sheng1c3d14fe2007-07-29 11:07:42 +03006406
Paolo Bonzinibd7e5b02017-02-03 21:18:52 -08006407 vmx->vcpu.arch.cr0_guest_owned_bits = X86_CR0_TS;
6408 vmcs_writel(CR0_GUEST_HOST_MASK, ~X86_CR0_TS);
6409
Nadav Har'Elbf8179a2011-05-25 23:09:31 +03006410 set_cr4_guest_host_mask(vmx);
Avi Kivitye00c8cf2007-10-21 11:00:39 +02006411
Wanpeng Lif53cd632014-12-02 19:14:58 +08006412 if (vmx_xsaves_supported())
6413 vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
6414
Peter Feiner4e595162016-07-07 14:49:58 -07006415 if (enable_pml) {
6416 ASSERT(vmx->pml_pg);
6417 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
6418 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
6419 }
Avi Kivitye00c8cf2007-10-21 11:00:39 +02006420}
6421
Nadav Amitd28bc9d2015-04-13 14:34:08 +03006422static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
Avi Kivitye00c8cf2007-10-21 11:00:39 +02006423{
6424 struct vcpu_vmx *vmx = to_vmx(vcpu);
Jan Kiszka58cb6282014-01-24 16:48:44 +01006425 struct msr_data apic_base_msr;
Nadav Amitd28bc9d2015-04-13 14:34:08 +03006426 u64 cr0;
Avi Kivitye00c8cf2007-10-21 11:00:39 +02006427
Avi Kivity7ffd92c2009-06-09 14:10:45 +03006428 vmx->rmode.vm86_active = 0;
KarimAllah Ahmedd28b3872018-02-01 22:59:45 +01006429 vmx->spec_ctrl = 0;
Avi Kivitye00c8cf2007-10-21 11:00:39 +02006430
Wanpeng Li518e7b92018-02-28 14:03:31 +08006431 vcpu->arch.microcode_version = 0x100000000ULL;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08006432 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
Nadav Amitd28bc9d2015-04-13 14:34:08 +03006433 kvm_set_cr8(vcpu, 0);
6434
6435 if (!init_event) {
6436 apic_base_msr.data = APIC_DEFAULT_PHYS_BASE |
6437 MSR_IA32_APICBASE_ENABLE;
6438 if (kvm_vcpu_is_reset_bsp(vcpu))
6439 apic_base_msr.data |= MSR_IA32_APICBASE_BSP;
6440 apic_base_msr.host_initiated = true;
6441 kvm_set_apic_base(vcpu, &apic_base_msr);
6442 }
Avi Kivitye00c8cf2007-10-21 11:00:39 +02006443
Avi Kivity2fb92db2011-04-27 19:42:18 +03006444 vmx_segment_cache_clear(vmx);
6445
Avi Kivity5706be02008-08-20 15:07:31 +03006446 seg_setup(VCPU_SREG_CS);
Jan Kiszka66450a22013-03-13 12:42:34 +01006447 vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
Paolo Bonzinif3531052015-12-03 15:49:56 +01006448 vmcs_writel(GUEST_CS_BASE, 0xffff0000ul);
Avi Kivitye00c8cf2007-10-21 11:00:39 +02006449
6450 seg_setup(VCPU_SREG_DS);
6451 seg_setup(VCPU_SREG_ES);
6452 seg_setup(VCPU_SREG_FS);
6453 seg_setup(VCPU_SREG_GS);
6454 seg_setup(VCPU_SREG_SS);
6455
6456 vmcs_write16(GUEST_TR_SELECTOR, 0);
6457 vmcs_writel(GUEST_TR_BASE, 0);
6458 vmcs_write32(GUEST_TR_LIMIT, 0xffff);
6459 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
6460
6461 vmcs_write16(GUEST_LDTR_SELECTOR, 0);
6462 vmcs_writel(GUEST_LDTR_BASE, 0);
6463 vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
6464 vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
6465
Nadav Amitd28bc9d2015-04-13 14:34:08 +03006466 if (!init_event) {
6467 vmcs_write32(GUEST_SYSENTER_CS, 0);
6468 vmcs_writel(GUEST_SYSENTER_ESP, 0);
6469 vmcs_writel(GUEST_SYSENTER_EIP, 0);
6470 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
6471 }
Avi Kivitye00c8cf2007-10-21 11:00:39 +02006472
Wanpeng Lic37c2872017-11-20 14:52:21 -08006473 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
Jan Kiszka66450a22013-03-13 12:42:34 +01006474 kvm_rip_write(vcpu, 0xfff0);
Avi Kivitye00c8cf2007-10-21 11:00:39 +02006475
Avi Kivitye00c8cf2007-10-21 11:00:39 +02006476 vmcs_writel(GUEST_GDTR_BASE, 0);
6477 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
6478
6479 vmcs_writel(GUEST_IDTR_BASE, 0);
6480 vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
6481
Anthony Liguori443381a2010-12-06 10:53:38 -06006482 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
Avi Kivitye00c8cf2007-10-21 11:00:39 +02006483 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
Paolo Bonzinif3531052015-12-03 15:49:56 +01006484 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 0);
Wanpeng Lia554d202017-10-11 05:10:19 -07006485 if (kvm_mpx_supported())
6486 vmcs_write64(GUEST_BNDCFGS, 0);
Avi Kivitye00c8cf2007-10-21 11:00:39 +02006487
Avi Kivitye00c8cf2007-10-21 11:00:39 +02006488 setup_msrs(vmx);
6489
Avi Kivity6aa8b732006-12-10 02:21:36 -08006490 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */
6491
Nadav Amitd28bc9d2015-04-13 14:34:08 +03006492 if (cpu_has_vmx_tpr_shadow() && !init_event) {
Sheng Yangf78e0e22007-10-29 09:40:42 +08006493 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
Paolo Bonzini35754c92015-07-29 12:05:37 +02006494 if (cpu_need_tpr_shadow(vcpu))
Sheng Yangf78e0e22007-10-29 09:40:42 +08006495 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
Nadav Amitd28bc9d2015-04-13 14:34:08 +03006496 __pa(vcpu->arch.apic->regs));
Sheng Yangf78e0e22007-10-29 09:40:42 +08006497 vmcs_write32(TPR_THRESHOLD, 0);
6498 }
6499
Paolo Bonzinia73896c2014-11-02 07:54:30 +01006500 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08006501
Sheng Yang2384d2b2008-01-17 15:14:33 +08006502 if (vmx->vpid != 0)
6503 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
6504
Nadav Amitd28bc9d2015-04-13 14:34:08 +03006505 cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
Nadav Amitd28bc9d2015-04-13 14:34:08 +03006506 vmx->vcpu.arch.cr0 = cr0;
Bruce Rogersf2463242016-04-28 14:49:21 -06006507 vmx_set_cr0(vcpu, cr0); /* enter rmode */
Nadav Amitd28bc9d2015-04-13 14:34:08 +03006508 vmx_set_cr4(vcpu, 0);
Paolo Bonzini56908912015-10-19 11:30:19 +02006509 vmx_set_efer(vcpu, 0);
Paolo Bonzinibd7e5b02017-02-03 21:18:52 -08006510
Nadav Amitd28bc9d2015-04-13 14:34:08 +03006511 update_exception_bitmap(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08006512
Wanpeng Lidd5f5342015-09-23 18:26:57 +08006513 vpid_sync_context(vmx->vpid);
Wanpeng Licaa057a2018-03-12 04:53:03 -07006514 if (init_event)
6515 vmx_clear_hlt(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08006516}
6517
Nadav Har'Elb6f12502011-05-25 23:13:06 +03006518/*
6519 * In nested virtualization, check if L1 asked to exit on external interrupts.
6520 * For most existing hypervisors, this will always return true.
6521 */
6522static bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
6523{
6524 return get_vmcs12(vcpu)->pin_based_vm_exec_control &
6525 PIN_BASED_EXT_INTR_MASK;
6526}
6527
Bandan Das77b0f5d2014-04-19 18:17:45 -04006528/*
6529 * In nested virtualization, check if L1 has set
6530 * VM_EXIT_ACK_INTR_ON_EXIT
6531 */
6532static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
6533{
6534 return get_vmcs12(vcpu)->vm_exit_controls &
6535 VM_EXIT_ACK_INTR_ON_EXIT;
6536}
6537
Jan Kiszkaea8ceb82013-04-14 21:04:26 +02006538static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
6539{
Krish Sadhukhan0c7f6502018-02-20 21:24:39 -05006540 return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu));
Jan Kiszkaea8ceb82013-04-14 21:04:26 +02006541}
6542
Jan Kiszkac9a79532014-03-07 20:03:15 +01006543static void enable_irq_window(struct kvm_vcpu *vcpu)
Jan Kiszka3b86cd92008-09-26 09:30:57 +02006544{
Paolo Bonzini47c01522016-12-19 11:44:07 +01006545 vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
6546 CPU_BASED_VIRTUAL_INTR_PENDING);
Jan Kiszka3b86cd92008-09-26 09:30:57 +02006547}
6548
Jan Kiszkac9a79532014-03-07 20:03:15 +01006549static void enable_nmi_window(struct kvm_vcpu *vcpu)
Jan Kiszka3b86cd92008-09-26 09:30:57 +02006550{
Paolo Bonzinid02fcf52017-11-06 13:31:13 +01006551 if (!enable_vnmi ||
Paolo Bonzini8a1b4392017-11-06 13:31:12 +01006552 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
Jan Kiszkac9a79532014-03-07 20:03:15 +01006553 enable_irq_window(vcpu);
6554 return;
6555 }
Jan Kiszka03b28f82013-04-29 16:46:42 +02006556
Paolo Bonzini47c01522016-12-19 11:44:07 +01006557 vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
6558 CPU_BASED_VIRTUAL_NMI_PENDING);
Jan Kiszka3b86cd92008-09-26 09:30:57 +02006559}
6560
Gleb Natapov66fd3f72009-05-11 13:35:50 +03006561static void vmx_inject_irq(struct kvm_vcpu *vcpu)
Eddie Dong85f455f2007-07-06 12:20:49 +03006562{
Avi Kivity9c8cba32007-11-22 11:42:59 +02006563 struct vcpu_vmx *vmx = to_vmx(vcpu);
Gleb Natapov66fd3f72009-05-11 13:35:50 +03006564 uint32_t intr;
6565 int irq = vcpu->arch.interrupt.nr;
Avi Kivity9c8cba32007-11-22 11:42:59 +02006566
Marcelo Tosatti229456f2009-06-17 09:22:14 -03006567 trace_kvm_inj_virq(irq);
Feng (Eric) Liu2714d1d2008-04-10 15:31:10 -04006568
Avi Kivityfa89a812008-09-01 15:57:51 +03006569 ++vcpu->stat.irq_injections;
Avi Kivity7ffd92c2009-06-09 14:10:45 +03006570 if (vmx->rmode.vm86_active) {
Serge E. Hallyn71f98332011-04-13 09:12:54 -05006571 int inc_eip = 0;
6572 if (vcpu->arch.interrupt.soft)
6573 inc_eip = vcpu->arch.event_exit_inst_len;
6574 if (kvm_inject_realmode_interrupt(vcpu, irq, inc_eip) != EMULATE_DONE)
Mohammed Gamala92601b2010-09-19 14:34:07 +02006575 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03006576 return;
6577 }
Gleb Natapov66fd3f72009-05-11 13:35:50 +03006578 intr = irq | INTR_INFO_VALID_MASK;
6579 if (vcpu->arch.interrupt.soft) {
6580 intr |= INTR_TYPE_SOFT_INTR;
6581 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
6582 vmx->vcpu.arch.event_exit_inst_len);
6583 } else
6584 intr |= INTR_TYPE_EXT_INTR;
6585 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
Wanpeng Licaa057a2018-03-12 04:53:03 -07006586
6587 vmx_clear_hlt(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03006588}
6589
Sheng Yangf08864b2008-05-15 18:23:25 +08006590static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
6591{
Jan Kiszka66a5a342008-09-26 09:30:51 +02006592 struct vcpu_vmx *vmx = to_vmx(vcpu);
6593
Paolo Bonzinid02fcf52017-11-06 13:31:13 +01006594 if (!enable_vnmi) {
Paolo Bonzini8a1b4392017-11-06 13:31:12 +01006595 /*
6596 * Tracking the NMI-blocked state in software is built upon
6597 * finding the next open IRQ window. This, in turn, depends on
6598 * well-behaving guests: They have to keep IRQs disabled at
6599 * least as long as the NMI handler runs. Otherwise we may
6600 * cause NMI nesting, maybe breaking the guest. But as this is
6601 * highly unlikely, we can live with the residual risk.
6602 */
6603 vmx->loaded_vmcs->soft_vnmi_blocked = 1;
6604 vmx->loaded_vmcs->vnmi_blocked_time = 0;
6605 }
6606
Paolo Bonzini4c4a6f72017-07-14 13:36:11 +02006607 ++vcpu->stat.nmi_injections;
6608 vmx->loaded_vmcs->nmi_known_unmasked = false;
Jan Kiszka3b86cd92008-09-26 09:30:57 +02006609
Avi Kivity7ffd92c2009-06-09 14:10:45 +03006610 if (vmx->rmode.vm86_active) {
Serge E. Hallyn71f98332011-04-13 09:12:54 -05006611 if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE)
Mohammed Gamala92601b2010-09-19 14:34:07 +02006612 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
Jan Kiszka66a5a342008-09-26 09:30:51 +02006613 return;
6614 }
Wanpeng Lic5a6d5f2016-09-22 17:55:54 +08006615
Sheng Yangf08864b2008-05-15 18:23:25 +08006616 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
6617 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
Wanpeng Licaa057a2018-03-12 04:53:03 -07006618
6619 vmx_clear_hlt(vcpu);
Sheng Yangf08864b2008-05-15 18:23:25 +08006620}
6621
Jan Kiszka3cfc3092009-11-12 01:04:25 +01006622static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
6623{
Paolo Bonzini4c4a6f72017-07-14 13:36:11 +02006624 struct vcpu_vmx *vmx = to_vmx(vcpu);
6625 bool masked;
6626
Paolo Bonzinid02fcf52017-11-06 13:31:13 +01006627 if (!enable_vnmi)
Paolo Bonzini8a1b4392017-11-06 13:31:12 +01006628 return vmx->loaded_vmcs->soft_vnmi_blocked;
Paolo Bonzini4c4a6f72017-07-14 13:36:11 +02006629 if (vmx->loaded_vmcs->nmi_known_unmasked)
Avi Kivity9d58b932011-03-07 16:52:07 +02006630 return false;
Paolo Bonzini4c4a6f72017-07-14 13:36:11 +02006631 masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
6632 vmx->loaded_vmcs->nmi_known_unmasked = !masked;
6633 return masked;
Jan Kiszka3cfc3092009-11-12 01:04:25 +01006634}
6635
6636static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
6637{
6638 struct vcpu_vmx *vmx = to_vmx(vcpu);
6639
Paolo Bonzinid02fcf52017-11-06 13:31:13 +01006640 if (!enable_vnmi) {
Paolo Bonzini8a1b4392017-11-06 13:31:12 +01006641 if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) {
6642 vmx->loaded_vmcs->soft_vnmi_blocked = masked;
6643 vmx->loaded_vmcs->vnmi_blocked_time = 0;
6644 }
6645 } else {
6646 vmx->loaded_vmcs->nmi_known_unmasked = !masked;
6647 if (masked)
6648 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
6649 GUEST_INTR_STATE_NMI);
6650 else
6651 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
6652 GUEST_INTR_STATE_NMI);
6653 }
Jan Kiszka3cfc3092009-11-12 01:04:25 +01006654}
6655
Jan Kiszka2505dc92013-04-14 12:12:47 +02006656static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
6657{
Jan Kiszkab6b8a142014-03-07 20:03:12 +01006658 if (to_vmx(vcpu)->nested.nested_run_pending)
6659 return 0;
Jan Kiszkaea8ceb82013-04-14 21:04:26 +02006660
Paolo Bonzinid02fcf52017-11-06 13:31:13 +01006661 if (!enable_vnmi &&
Paolo Bonzini8a1b4392017-11-06 13:31:12 +01006662 to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
6663 return 0;
6664
Jan Kiszka2505dc92013-04-14 12:12:47 +02006665 return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
6666 (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
6667 | GUEST_INTR_STATE_NMI));
6668}
6669
Gleb Natapov78646122009-03-23 12:12:11 +02006670static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
6671{
Jan Kiszkab6b8a142014-03-07 20:03:12 +01006672 return (!to_vmx(vcpu)->nested.nested_run_pending &&
6673 vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
Gleb Natapovc4282df2009-04-21 17:45:07 +03006674 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
6675 (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
Gleb Natapov78646122009-03-23 12:12:11 +02006676}
6677
Izik Eiduscbc94022007-10-25 00:29:55 +02006678static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
6679{
6680 int ret;
Izik Eiduscbc94022007-10-25 00:29:55 +02006681
Sean Christophersonf7eaeb02018-03-05 12:04:36 -08006682 if (enable_unrestricted_guest)
6683 return 0;
6684
Paolo Bonzini1d8007b2015-10-12 13:38:32 +02006685 ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
6686 PAGE_SIZE * 3);
Izik Eiduscbc94022007-10-25 00:29:55 +02006687 if (ret)
6688 return ret;
Sean Christopherson40bbb9d2018-03-20 12:17:20 -07006689 to_kvm_vmx(kvm)->tss_addr = addr;
Paolo Bonzini1f755a82014-09-16 13:37:40 +02006690 return init_rmode_tss(kvm);
Izik Eiduscbc94022007-10-25 00:29:55 +02006691}
6692
Sean Christopherson2ac52ab2018-03-20 12:17:19 -07006693static int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
6694{
Sean Christopherson40bbb9d2018-03-20 12:17:20 -07006695 to_kvm_vmx(kvm)->ept_identity_map_addr = ident_addr;
Sean Christopherson2ac52ab2018-03-20 12:17:19 -07006696 return 0;
6697}
6698
Gleb Natapov0ca1b4f2012-12-20 16:57:47 +02006699static bool rmode_exception(struct kvm_vcpu *vcpu, int vec)
Avi Kivity6aa8b732006-12-10 02:21:36 -08006700{
Jan Kiszka77ab6db2008-07-14 12:28:51 +02006701 switch (vec) {
Jan Kiszka77ab6db2008-07-14 12:28:51 +02006702 case BP_VECTOR:
Jan Kiszkac573cd22010-02-23 17:47:53 +01006703 /*
6704 * Update instruction length as we may reinject the exception
6705 * from user space while in guest debugging mode.
6706 */
6707 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
6708 vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
Jan Kiszkad0bfb942008-12-15 13:52:10 +01006709 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
Gleb Natapov0ca1b4f2012-12-20 16:57:47 +02006710 return false;
6711 /* fall through */
6712 case DB_VECTOR:
6713 if (vcpu->guest_debug &
6714 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
6715 return false;
Jan Kiszkad0bfb942008-12-15 13:52:10 +01006716 /* fall through */
6717 case DE_VECTOR:
Jan Kiszka77ab6db2008-07-14 12:28:51 +02006718 case OF_VECTOR:
6719 case BR_VECTOR:
6720 case UD_VECTOR:
6721 case DF_VECTOR:
6722 case SS_VECTOR:
6723 case GP_VECTOR:
6724 case MF_VECTOR:
Gleb Natapov0ca1b4f2012-12-20 16:57:47 +02006725 return true;
6726 break;
Jan Kiszka77ab6db2008-07-14 12:28:51 +02006727 }
Gleb Natapov0ca1b4f2012-12-20 16:57:47 +02006728 return false;
6729}
6730
6731static int handle_rmode_exception(struct kvm_vcpu *vcpu,
6732 int vec, u32 err_code)
6733{
6734 /*
6735 * Instruction with address size override prefix opcode 0x67
6736 * Cause the #SS fault with 0 error code in VM86 mode.
6737 */
6738 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {
6739 if (emulate_instruction(vcpu, 0) == EMULATE_DONE) {
6740 if (vcpu->arch.halt_request) {
6741 vcpu->arch.halt_request = 0;
Joel Schopp5cb56052015-03-02 13:43:31 -06006742 return kvm_vcpu_halt(vcpu);
Gleb Natapov0ca1b4f2012-12-20 16:57:47 +02006743 }
6744 return 1;
6745 }
6746 return 0;
6747 }
6748
6749 /*
6750 * Forward all other exceptions that are valid in real mode.
6751 * FIXME: Breaks guest debugging in real mode, needs to be fixed with
6752 * the required debugging infrastructure rework.
6753 */
6754 kvm_queue_exception(vcpu, vec);
6755 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08006756}
6757
Andi Kleena0861c02009-06-08 17:37:09 +08006758/*
6759 * Trigger machine check on the host. We assume all the MSRs are already set up
6760 * by the CPU and that we still run on the same CPU as the MCE occurred on.
6761 * We pass a fake environment to the machine check handler because we want
6762 * the guest to be always treated like user space, no matter what context
6763 * it used internally.
6764 */
6765static void kvm_machine_check(void)
6766{
6767#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
6768 struct pt_regs regs = {
6769 .cs = 3, /* Fake ring 3 no matter what the guest ran on */
6770 .flags = X86_EFLAGS_IF,
6771 };
6772
6773 do_machine_check(&regs, 0);
6774#endif
6775}
6776
Avi Kivity851ba692009-08-24 11:10:17 +03006777static int handle_machine_check(struct kvm_vcpu *vcpu)
Andi Kleena0861c02009-06-08 17:37:09 +08006778{
6779 /* already handled by vcpu_run */
6780 return 1;
6781}
6782
Avi Kivity851ba692009-08-24 11:10:17 +03006783static int handle_exception(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08006784{
Avi Kivity1155f762007-11-22 11:30:47 +02006785 struct vcpu_vmx *vmx = to_vmx(vcpu);
Avi Kivity851ba692009-08-24 11:10:17 +03006786 struct kvm_run *kvm_run = vcpu->run;
Jan Kiszkad0bfb942008-12-15 13:52:10 +01006787 u32 intr_info, ex_no, error_code;
Jan Kiszka42dbaa52008-12-15 13:52:10 +01006788 unsigned long cr2, rip, dr6;
Avi Kivity6aa8b732006-12-10 02:21:36 -08006789 u32 vect_info;
6790 enum emulation_result er;
6791
Avi Kivity1155f762007-11-22 11:30:47 +02006792 vect_info = vmx->idt_vectoring_info;
Avi Kivity88786472011-03-07 17:39:45 +02006793 intr_info = vmx->exit_intr_info;
Avi Kivity6aa8b732006-12-10 02:21:36 -08006794
Andi Kleena0861c02009-06-08 17:37:09 +08006795 if (is_machine_check(intr_info))
Avi Kivity851ba692009-08-24 11:10:17 +03006796 return handle_machine_check(vcpu);
Andi Kleena0861c02009-06-08 17:37:09 +08006797
Jim Mattsonef85b672016-12-12 11:01:37 -08006798 if (is_nmi(intr_info))
Avi Kivity1b6269d2007-10-09 12:12:19 +02006799 return 1; /* already handled by vmx_vcpu_run() */
Anthony Liguori2ab455c2007-04-27 09:29:49 +03006800
Wanpeng Li082d06e2018-04-03 16:28:48 -07006801 if (is_invalid_opcode(intr_info))
6802 return handle_ud(vcpu);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05006803
Avi Kivity6aa8b732006-12-10 02:21:36 -08006804 error_code = 0;
Ryan Harper2e113842008-02-11 10:26:38 -06006805 if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
Avi Kivity6aa8b732006-12-10 02:21:36 -08006806 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
Xiao Guangrongbf4ca232012-10-17 13:48:06 +08006807
Liran Alon9e869482018-03-12 13:12:51 +02006808 if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) {
6809 WARN_ON_ONCE(!enable_vmware_backdoor);
6810 er = emulate_instruction(vcpu,
6811 EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL);
6812 if (er == EMULATE_USER_EXIT)
6813 return 0;
6814 else if (er != EMULATE_DONE)
6815 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
6816 return 1;
6817 }
6818
Xiao Guangrongbf4ca232012-10-17 13:48:06 +08006819 /*
6820 * The #PF with PFEC.RSVD = 1 indicates the guest is accessing
6821 * MMIO, it is better to report an internal error.
6822 * See the comments in vmx_handle_exit.
6823 */
6824 if ((vect_info & VECTORING_INFO_VALID_MASK) &&
6825 !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) {
6826 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
6827 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
Radim Krčmář80f0e952015-04-02 21:11:05 +02006828 vcpu->run->internal.ndata = 3;
Xiao Guangrongbf4ca232012-10-17 13:48:06 +08006829 vcpu->run->internal.data[0] = vect_info;
6830 vcpu->run->internal.data[1] = intr_info;
Radim Krčmář80f0e952015-04-02 21:11:05 +02006831 vcpu->run->internal.data[2] = error_code;
Xiao Guangrongbf4ca232012-10-17 13:48:06 +08006832 return 0;
6833 }
6834
Avi Kivity6aa8b732006-12-10 02:21:36 -08006835 if (is_page_fault(intr_info)) {
6836 cr2 = vmcs_readl(EXIT_QUALIFICATION);
Wanpeng Li1261bfa2017-07-13 18:30:40 -07006837 /* EPT won't cause page fault directly */
6838 WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && enable_ept);
Paolo Bonzinid0006532017-08-11 18:36:43 +02006839 return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08006840 }
6841
Jan Kiszkad0bfb942008-12-15 13:52:10 +01006842 ex_no = intr_info & INTR_INFO_VECTOR_MASK;
Gleb Natapov0ca1b4f2012-12-20 16:57:47 +02006843
6844 if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no))
6845 return handle_rmode_exception(vcpu, ex_no, error_code);
6846
Jan Kiszka42dbaa52008-12-15 13:52:10 +01006847 switch (ex_no) {
Eric Northup54a20552015-11-03 18:03:53 +01006848 case AC_VECTOR:
6849 kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
6850 return 1;
Jan Kiszka42dbaa52008-12-15 13:52:10 +01006851 case DB_VECTOR:
6852 dr6 = vmcs_readl(EXIT_QUALIFICATION);
6853 if (!(vcpu->guest_debug &
6854 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
Jan Kiszka8246bf52014-01-04 18:47:17 +01006855 vcpu->arch.dr6 &= ~15;
Nadav Amit6f43ed02014-07-15 17:37:46 +03006856 vcpu->arch.dr6 |= dr6 | DR6_RTM;
Linus Torvalds32d43cd2018-03-20 12:16:59 -07006857 if (is_icebp(intr_info))
Huw Daviesfd2a4452014-04-16 10:02:51 +01006858 skip_emulated_instruction(vcpu);
6859
Jan Kiszka42dbaa52008-12-15 13:52:10 +01006860 kvm_queue_exception(vcpu, DB_VECTOR);
6861 return 1;
6862 }
6863 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
6864 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
6865 /* fall through */
6866 case BP_VECTOR:
Jan Kiszkac573cd22010-02-23 17:47:53 +01006867 /*
6868 * Update instruction length as we may reinject #BP from
6869 * user space while in guest debugging mode. Reading it for
6870 * #DB as well causes no harm, it is not used in that case.
6871 */
6872 vmx->vcpu.arch.event_exit_inst_len =
6873 vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
Avi Kivity6aa8b732006-12-10 02:21:36 -08006874 kvm_run->exit_reason = KVM_EXIT_DEBUG;
Avi Kivity0a434bb2011-04-28 15:59:33 +03006875 rip = kvm_rip_read(vcpu);
Jan Kiszkad0bfb942008-12-15 13:52:10 +01006876 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
6877 kvm_run->debug.arch.exception = ex_no;
Jan Kiszka42dbaa52008-12-15 13:52:10 +01006878 break;
6879 default:
Jan Kiszkad0bfb942008-12-15 13:52:10 +01006880 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
6881 kvm_run->ex.exception = ex_no;
6882 kvm_run->ex.error_code = error_code;
Jan Kiszka42dbaa52008-12-15 13:52:10 +01006883 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08006884 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08006885 return 0;
6886}
6887
Avi Kivity851ba692009-08-24 11:10:17 +03006888static int handle_external_interrupt(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08006889{
Avi Kivity1165f5f2007-04-19 17:27:43 +03006890 ++vcpu->stat.irq_exits;
Avi Kivity6aa8b732006-12-10 02:21:36 -08006891 return 1;
6892}
6893
Avi Kivity851ba692009-08-24 11:10:17 +03006894static int handle_triple_fault(struct kvm_vcpu *vcpu)
Avi Kivity988ad742007-02-12 00:54:36 -08006895{
Avi Kivity851ba692009-08-24 11:10:17 +03006896 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
Wanpeng Libbeac282017-08-09 22:33:12 -07006897 vcpu->mmio_needed = 0;
Avi Kivity988ad742007-02-12 00:54:36 -08006898 return 0;
6899}
Avi Kivity6aa8b732006-12-10 02:21:36 -08006900
Avi Kivity851ba692009-08-24 11:10:17 +03006901static int handle_io(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08006902{
He, Qingbfdaab02007-09-12 14:18:28 +08006903 unsigned long exit_qualification;
Sean Christophersondca7f122018-03-08 08:57:27 -08006904 int size, in, string;
Avi Kivity039576c2007-03-20 12:46:50 +02006905 unsigned port;
Avi Kivity6aa8b732006-12-10 02:21:36 -08006906
He, Qingbfdaab02007-09-12 14:18:28 +08006907 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
Avi Kivity039576c2007-03-20 12:46:50 +02006908 string = (exit_qualification & 16) != 0;
Laurent Viviere70669a2007-08-05 10:36:40 +03006909
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02006910 ++vcpu->stat.io_exits;
6911
Sean Christopherson432baf62018-03-08 08:57:26 -08006912 if (string)
Andre Przywara51d8b662010-12-21 11:12:02 +01006913 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02006914
6915 port = exit_qualification >> 16;
6916 size = (exit_qualification & 7) + 1;
Sean Christopherson432baf62018-03-08 08:57:26 -08006917 in = (exit_qualification & 8) != 0;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02006918
Sean Christophersondca7f122018-03-08 08:57:27 -08006919 return kvm_fast_pio(vcpu, size, port, in);
Avi Kivity6aa8b732006-12-10 02:21:36 -08006920}
6921
Ingo Molnar102d8322007-02-19 14:37:47 +02006922static void
6923vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
6924{
6925 /*
6926 * Patch in the VMCALL instruction:
6927 */
6928 hypercall[0] = 0x0f;
6929 hypercall[1] = 0x01;
6930 hypercall[2] = 0xc1;
Ingo Molnar102d8322007-02-19 14:37:47 +02006931}
6932
Guo Chao0fa06072012-06-28 15:16:19 +08006933/* called to set cr0 as appropriate for a mov-to-cr0 exit. */
Nadav Har'Eleeadf9e2011-05-25 23:14:38 +03006934static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
6935{
Nadav Har'Eleeadf9e2011-05-25 23:14:38 +03006936 if (is_guest_mode(vcpu)) {
Jan Kiszka1a0d74e2013-03-07 14:08:07 +01006937 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6938 unsigned long orig_val = val;
6939
Nadav Har'Eleeadf9e2011-05-25 23:14:38 +03006940 /*
6941 * We get here when L2 changed cr0 in a way that did not change
6942 * any of L1's shadowed bits (see nested_vmx_exit_handled_cr),
Jan Kiszka1a0d74e2013-03-07 14:08:07 +01006943 * but did change L0 shadowed bits. So we first calculate the
6944 * effective cr0 value that L1 would like to write into the
6945 * hardware. It consists of the L2-owned bits from the new
6946 * value combined with the L1-owned bits from L1's guest_cr0.
Nadav Har'Eleeadf9e2011-05-25 23:14:38 +03006947 */
Jan Kiszka1a0d74e2013-03-07 14:08:07 +01006948 val = (val & ~vmcs12->cr0_guest_host_mask) |
6949 (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
6950
David Matlack38991522016-11-29 18:14:08 -08006951 if (!nested_guest_cr0_valid(vcpu, val))
Nadav Har'Eleeadf9e2011-05-25 23:14:38 +03006952 return 1;
Jan Kiszka1a0d74e2013-03-07 14:08:07 +01006953
6954 if (kvm_set_cr0(vcpu, val))
6955 return 1;
6956 vmcs_writel(CR0_READ_SHADOW, orig_val);
Nadav Har'Eleeadf9e2011-05-25 23:14:38 +03006957 return 0;
Jan Kiszka1a0d74e2013-03-07 14:08:07 +01006958 } else {
6959 if (to_vmx(vcpu)->nested.vmxon &&
David Matlack38991522016-11-29 18:14:08 -08006960 !nested_host_cr0_valid(vcpu, val))
Jan Kiszka1a0d74e2013-03-07 14:08:07 +01006961 return 1;
David Matlack38991522016-11-29 18:14:08 -08006962
Nadav Har'Eleeadf9e2011-05-25 23:14:38 +03006963 return kvm_set_cr0(vcpu, val);
Jan Kiszka1a0d74e2013-03-07 14:08:07 +01006964 }
Nadav Har'Eleeadf9e2011-05-25 23:14:38 +03006965}
6966
6967static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
6968{
6969 if (is_guest_mode(vcpu)) {
Jan Kiszka1a0d74e2013-03-07 14:08:07 +01006970 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6971 unsigned long orig_val = val;
6972
6973 /* analogously to handle_set_cr0 */
6974 val = (val & ~vmcs12->cr4_guest_host_mask) |
6975 (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask);
6976 if (kvm_set_cr4(vcpu, val))
Nadav Har'Eleeadf9e2011-05-25 23:14:38 +03006977 return 1;
Jan Kiszka1a0d74e2013-03-07 14:08:07 +01006978 vmcs_writel(CR4_READ_SHADOW, orig_val);
Nadav Har'Eleeadf9e2011-05-25 23:14:38 +03006979 return 0;
6980 } else
6981 return kvm_set_cr4(vcpu, val);
6982}
6983
Paolo Bonzini0367f202016-07-12 10:44:55 +02006984static int handle_desc(struct kvm_vcpu *vcpu)
6985{
6986 WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP));
6987 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
6988}
6989
Avi Kivity851ba692009-08-24 11:10:17 +03006990static int handle_cr(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08006991{
Marcelo Tosatti229456f2009-06-17 09:22:14 -03006992 unsigned long exit_qualification, val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08006993 int cr;
6994 int reg;
Avi Kivity49a9b072010-06-10 17:02:14 +03006995 int err;
Kyle Huey6affcbe2016-11-29 12:40:40 -08006996 int ret;
Avi Kivity6aa8b732006-12-10 02:21:36 -08006997
He, Qingbfdaab02007-09-12 14:18:28 +08006998 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
Avi Kivity6aa8b732006-12-10 02:21:36 -08006999 cr = exit_qualification & 15;
7000 reg = (exit_qualification >> 8) & 15;
7001 switch ((exit_qualification >> 4) & 3) {
7002 case 0: /* mov to cr */
Nadav Amit1e32c072014-06-18 17:19:25 +03007003 val = kvm_register_readl(vcpu, reg);
Marcelo Tosatti229456f2009-06-17 09:22:14 -03007004 trace_kvm_cr_write(cr, val);
Avi Kivity6aa8b732006-12-10 02:21:36 -08007005 switch (cr) {
7006 case 0:
Nadav Har'Eleeadf9e2011-05-25 23:14:38 +03007007 err = handle_set_cr0(vcpu, val);
Kyle Huey6affcbe2016-11-29 12:40:40 -08007008 return kvm_complete_insn_gp(vcpu, err);
Avi Kivity6aa8b732006-12-10 02:21:36 -08007009 case 3:
Sean Christophersone1de91c2018-03-05 12:04:41 -08007010 WARN_ON_ONCE(enable_unrestricted_guest);
Avi Kivity23902182010-06-10 17:02:16 +03007011 err = kvm_set_cr3(vcpu, val);
Kyle Huey6affcbe2016-11-29 12:40:40 -08007012 return kvm_complete_insn_gp(vcpu, err);
Avi Kivity6aa8b732006-12-10 02:21:36 -08007013 case 4:
Nadav Har'Eleeadf9e2011-05-25 23:14:38 +03007014 err = handle_set_cr4(vcpu, val);
Kyle Huey6affcbe2016-11-29 12:40:40 -08007015 return kvm_complete_insn_gp(vcpu, err);
Gleb Natapov0a5fff192009-04-21 17:45:06 +03007016 case 8: {
7017 u8 cr8_prev = kvm_get_cr8(vcpu);
Nadav Amit1e32c072014-06-18 17:19:25 +03007018 u8 cr8 = (u8)val;
Andre Przywaraeea1cff2010-12-21 11:12:00 +01007019 err = kvm_set_cr8(vcpu, cr8);
Kyle Huey6affcbe2016-11-29 12:40:40 -08007020 ret = kvm_complete_insn_gp(vcpu, err);
Paolo Bonzini35754c92015-07-29 12:05:37 +02007021 if (lapic_in_kernel(vcpu))
Kyle Huey6affcbe2016-11-29 12:40:40 -08007022 return ret;
Gleb Natapov0a5fff192009-04-21 17:45:06 +03007023 if (cr8_prev <= cr8)
Kyle Huey6affcbe2016-11-29 12:40:40 -08007024 return ret;
7025 /*
7026 * TODO: we might be squashing a
7027 * KVM_GUESTDBG_SINGLESTEP-triggered
7028 * KVM_EXIT_DEBUG here.
7029 */
Avi Kivity851ba692009-08-24 11:10:17 +03007030 vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
Gleb Natapov0a5fff192009-04-21 17:45:06 +03007031 return 0;
7032 }
Peter Senna Tschudin4b8073e2012-09-18 18:36:14 +02007033 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08007034 break;
Anthony Liguori25c4c272007-04-27 09:29:21 +03007035 case 2: /* clts */
Paolo Bonzinibd7e5b02017-02-03 21:18:52 -08007036 WARN_ONCE(1, "Guest should always own CR0.TS");
7037 vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
Avi Kivity4d4ec082009-12-29 18:07:30 +02007038 trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
Kyle Huey6affcbe2016-11-29 12:40:40 -08007039 return kvm_skip_emulated_instruction(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08007040 case 1: /*mov from cr*/
7041 switch (cr) {
7042 case 3:
Sean Christophersone1de91c2018-03-05 12:04:41 -08007043 WARN_ON_ONCE(enable_unrestricted_guest);
Avi Kivity9f8fe502010-12-05 17:30:00 +02007044 val = kvm_read_cr3(vcpu);
7045 kvm_register_write(vcpu, reg, val);
7046 trace_kvm_cr_read(cr, val);
Kyle Huey6affcbe2016-11-29 12:40:40 -08007047 return kvm_skip_emulated_instruction(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08007048 case 8:
Marcelo Tosatti229456f2009-06-17 09:22:14 -03007049 val = kvm_get_cr8(vcpu);
7050 kvm_register_write(vcpu, reg, val);
7051 trace_kvm_cr_read(cr, val);
Kyle Huey6affcbe2016-11-29 12:40:40 -08007052 return kvm_skip_emulated_instruction(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08007053 }
7054 break;
7055 case 3: /* lmsw */
Avi Kivitya1f83a72009-12-29 17:33:58 +02007056 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
Avi Kivity4d4ec082009-12-29 18:07:30 +02007057 trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
Avi Kivitya1f83a72009-12-29 17:33:58 +02007058 kvm_lmsw(vcpu, val);
Avi Kivity6aa8b732006-12-10 02:21:36 -08007059
Kyle Huey6affcbe2016-11-29 12:40:40 -08007060 return kvm_skip_emulated_instruction(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08007061 default:
7062 break;
7063 }
Avi Kivity851ba692009-08-24 11:10:17 +03007064 vcpu->run->exit_reason = 0;
Christoffer Dalla737f252012-06-03 21:17:48 +03007065 vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
Avi Kivity6aa8b732006-12-10 02:21:36 -08007066 (int)(exit_qualification >> 4) & 3, cr);
7067 return 0;
7068}
7069
Avi Kivity851ba692009-08-24 11:10:17 +03007070static int handle_dr(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08007071{
He, Qingbfdaab02007-09-12 14:18:28 +08007072 unsigned long exit_qualification;
Nadav Amit16f8a6f2014-10-03 01:10:05 +03007073 int dr, dr7, reg;
7074
7075 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7076 dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
7077
7078 /* First, if DR does not exist, trigger UD */
7079 if (!kvm_require_dr(vcpu, dr))
7080 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08007081
Jan Kiszkaf2483412010-01-20 18:20:20 +01007082 /* Do not handle if the CPL > 0, will trigger GP on re-entry */
Avi Kivity0a79b002009-09-01 12:03:25 +03007083 if (!kvm_require_cpl(vcpu, 0))
7084 return 1;
Nadav Amit16f8a6f2014-10-03 01:10:05 +03007085 dr7 = vmcs_readl(GUEST_DR7);
7086 if (dr7 & DR7_GD) {
Jan Kiszka42dbaa52008-12-15 13:52:10 +01007087 /*
7088 * As the vm-exit takes precedence over the debug trap, we
7089 * need to emulate the latter, either for the host or the
7090 * guest debugging itself.
7091 */
7092 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
Avi Kivity851ba692009-08-24 11:10:17 +03007093 vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
Nadav Amit16f8a6f2014-10-03 01:10:05 +03007094 vcpu->run->debug.arch.dr7 = dr7;
Nadav Amit82b32772014-11-02 11:54:45 +02007095 vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu);
Avi Kivity851ba692009-08-24 11:10:17 +03007096 vcpu->run->debug.arch.exception = DB_VECTOR;
7097 vcpu->run->exit_reason = KVM_EXIT_DEBUG;
Jan Kiszka42dbaa52008-12-15 13:52:10 +01007098 return 0;
7099 } else {
Nadav Amit7305eb52014-11-02 11:54:44 +02007100 vcpu->arch.dr6 &= ~15;
Nadav Amit6f43ed02014-07-15 17:37:46 +03007101 vcpu->arch.dr6 |= DR6_BD | DR6_RTM;
Jan Kiszka42dbaa52008-12-15 13:52:10 +01007102 kvm_queue_exception(vcpu, DB_VECTOR);
7103 return 1;
7104 }
7105 }
7106
Paolo Bonzini81908bf2014-02-21 10:32:27 +01007107 if (vcpu->guest_debug == 0) {
Paolo Bonzini8f223722016-02-26 12:09:49 +01007108 vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
7109 CPU_BASED_MOV_DR_EXITING);
Paolo Bonzini81908bf2014-02-21 10:32:27 +01007110
7111 /*
7112 * No more DR vmexits; force a reload of the debug registers
7113 * and reenter on this instruction. The next vmexit will
7114 * retrieve the full state of the debug registers.
7115 */
7116 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
7117 return 1;
7118 }
7119
Jan Kiszka42dbaa52008-12-15 13:52:10 +01007120 reg = DEBUG_REG_ACCESS_REG(exit_qualification);
7121 if (exit_qualification & TYPE_MOV_FROM_DR) {
Gleb Natapov020df072010-04-13 10:05:23 +03007122 unsigned long val;
Jan Kiszka4c4d5632013-12-18 19:16:24 +01007123
7124 if (kvm_get_dr(vcpu, dr, &val))
7125 return 1;
7126 kvm_register_write(vcpu, reg, val);
Gleb Natapov020df072010-04-13 10:05:23 +03007127 } else
Nadav Amit57773922014-06-18 17:19:23 +03007128 if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg)))
Jan Kiszka4c4d5632013-12-18 19:16:24 +01007129 return 1;
7130
Kyle Huey6affcbe2016-11-29 12:40:40 -08007131 return kvm_skip_emulated_instruction(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08007132}
7133
Jan Kiszka73aaf249e2014-01-04 18:47:16 +01007134static u64 vmx_get_dr6(struct kvm_vcpu *vcpu)
7135{
7136 return vcpu->arch.dr6;
7137}
7138
7139static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
7140{
7141}
7142
Paolo Bonzini81908bf2014-02-21 10:32:27 +01007143static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
7144{
Paolo Bonzini81908bf2014-02-21 10:32:27 +01007145 get_debugreg(vcpu->arch.db[0], 0);
7146 get_debugreg(vcpu->arch.db[1], 1);
7147 get_debugreg(vcpu->arch.db[2], 2);
7148 get_debugreg(vcpu->arch.db[3], 3);
7149 get_debugreg(vcpu->arch.dr6, 6);
7150 vcpu->arch.dr7 = vmcs_readl(GUEST_DR7);
7151
7152 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
Paolo Bonzini8f223722016-02-26 12:09:49 +01007153 vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_MOV_DR_EXITING);
Paolo Bonzini81908bf2014-02-21 10:32:27 +01007154}
7155
Gleb Natapov020df072010-04-13 10:05:23 +03007156static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
7157{
7158 vmcs_writel(GUEST_DR7, val);
7159}
7160
Avi Kivity851ba692009-08-24 11:10:17 +03007161static int handle_cpuid(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08007162{
Kyle Huey6a908b62016-11-29 12:40:37 -08007163 return kvm_emulate_cpuid(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08007164}
7165
Avi Kivity851ba692009-08-24 11:10:17 +03007166static int handle_rdmsr(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08007167{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08007168 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
Paolo Bonzini609e36d2015-04-08 15:30:38 +02007169 struct msr_data msr_info;
Avi Kivity6aa8b732006-12-10 02:21:36 -08007170
Paolo Bonzini609e36d2015-04-08 15:30:38 +02007171 msr_info.index = ecx;
7172 msr_info.host_initiated = false;
7173 if (vmx_get_msr(vcpu, &msr_info)) {
Avi Kivity59200272010-01-25 19:47:02 +02007174 trace_kvm_msr_read_ex(ecx);
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02007175 kvm_inject_gp(vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08007176 return 1;
7177 }
7178
Paolo Bonzini609e36d2015-04-08 15:30:38 +02007179 trace_kvm_msr_read(ecx, msr_info.data);
Feng (Eric) Liu2714d1d2008-04-10 15:31:10 -04007180
Avi Kivity6aa8b732006-12-10 02:21:36 -08007181 /* FIXME: handling of bits 32:63 of rax, rdx */
Paolo Bonzini609e36d2015-04-08 15:30:38 +02007182 vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u;
7183 vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u;
Kyle Huey6affcbe2016-11-29 12:40:40 -08007184 return kvm_skip_emulated_instruction(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08007185}
7186
Avi Kivity851ba692009-08-24 11:10:17 +03007187static int handle_wrmsr(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08007188{
Will Auld8fe8ab42012-11-29 12:42:12 -08007189 struct msr_data msr;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08007190 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
7191 u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
7192 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
Avi Kivity6aa8b732006-12-10 02:21:36 -08007193
Will Auld8fe8ab42012-11-29 12:42:12 -08007194 msr.data = data;
7195 msr.index = ecx;
7196 msr.host_initiated = false;
Nadav Amit854e8bb2014-09-16 03:24:05 +03007197 if (kvm_set_msr(vcpu, &msr) != 0) {
Avi Kivity59200272010-01-25 19:47:02 +02007198 trace_kvm_msr_write_ex(ecx, data);
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02007199 kvm_inject_gp(vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08007200 return 1;
7201 }
7202
Avi Kivity59200272010-01-25 19:47:02 +02007203 trace_kvm_msr_write(ecx, data);
Kyle Huey6affcbe2016-11-29 12:40:40 -08007204 return kvm_skip_emulated_instruction(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08007205}
7206
Avi Kivity851ba692009-08-24 11:10:17 +03007207static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
Yang, Sheng6e5d8652007-09-12 18:03:11 +08007208{
Paolo Bonzinieb90f342016-12-18 14:02:21 +01007209 kvm_apic_update_ppr(vcpu);
Yang, Sheng6e5d8652007-09-12 18:03:11 +08007210 return 1;
7211}
7212
Avi Kivity851ba692009-08-24 11:10:17 +03007213static int handle_interrupt_window(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08007214{
Paolo Bonzini47c01522016-12-19 11:44:07 +01007215 vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
7216 CPU_BASED_VIRTUAL_INTR_PENDING);
Feng (Eric) Liu2714d1d2008-04-10 15:31:10 -04007217
Avi Kivity3842d132010-07-27 12:30:24 +03007218 kvm_make_request(KVM_REQ_EVENT, vcpu);
7219
Jan Kiszkaa26bf122008-09-26 09:30:45 +02007220 ++vcpu->stat.irq_window_exits;
Avi Kivity6aa8b732006-12-10 02:21:36 -08007221 return 1;
7222}
7223
Avi Kivity851ba692009-08-24 11:10:17 +03007224static int handle_halt(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08007225{
Avi Kivityd3bef152007-06-05 15:53:05 +03007226 return kvm_emulate_halt(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08007227}
7228
Avi Kivity851ba692009-08-24 11:10:17 +03007229static int handle_vmcall(struct kvm_vcpu *vcpu)
Ingo Molnarc21415e2007-02-19 14:37:47 +02007230{
Andrey Smetanin0d9c0552016-02-11 16:44:59 +03007231 return kvm_emulate_hypercall(vcpu);
Ingo Molnarc21415e2007-02-19 14:37:47 +02007232}
7233
Gleb Natapovec25d5e2010-11-01 15:35:01 +02007234static int handle_invd(struct kvm_vcpu *vcpu)
7235{
Andre Przywara51d8b662010-12-21 11:12:02 +01007236 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
Gleb Natapovec25d5e2010-11-01 15:35:01 +02007237}
7238
Avi Kivity851ba692009-08-24 11:10:17 +03007239static int handle_invlpg(struct kvm_vcpu *vcpu)
Marcelo Tosattia7052892008-09-23 13:18:35 -03007240{
Sheng Yangf9c617f2009-03-25 10:08:52 +08007241 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
Marcelo Tosattia7052892008-09-23 13:18:35 -03007242
7243 kvm_mmu_invlpg(vcpu, exit_qualification);
Kyle Huey6affcbe2016-11-29 12:40:40 -08007244 return kvm_skip_emulated_instruction(vcpu);
Marcelo Tosattia7052892008-09-23 13:18:35 -03007245}
7246
Avi Kivityfee84b02011-11-10 14:57:25 +02007247static int handle_rdpmc(struct kvm_vcpu *vcpu)
7248{
7249 int err;
7250
7251 err = kvm_rdpmc(vcpu);
Kyle Huey6affcbe2016-11-29 12:40:40 -08007252 return kvm_complete_insn_gp(vcpu, err);
Avi Kivityfee84b02011-11-10 14:57:25 +02007253}
7254
Avi Kivity851ba692009-08-24 11:10:17 +03007255static int handle_wbinvd(struct kvm_vcpu *vcpu)
Eddie Donge5edaa02007-11-11 12:28:35 +02007256{
Kyle Huey6affcbe2016-11-29 12:40:40 -08007257 return kvm_emulate_wbinvd(vcpu);
Eddie Donge5edaa02007-11-11 12:28:35 +02007258}
7259
Dexuan Cui2acf9232010-06-10 11:27:12 +08007260static int handle_xsetbv(struct kvm_vcpu *vcpu)
7261{
7262 u64 new_bv = kvm_read_edx_eax(vcpu);
7263 u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
7264
7265 if (kvm_set_xcr(vcpu, index, new_bv) == 0)
Kyle Huey6affcbe2016-11-29 12:40:40 -08007266 return kvm_skip_emulated_instruction(vcpu);
Dexuan Cui2acf9232010-06-10 11:27:12 +08007267 return 1;
7268}
7269
Wanpeng Lif53cd632014-12-02 19:14:58 +08007270static int handle_xsaves(struct kvm_vcpu *vcpu)
7271{
Kyle Huey6affcbe2016-11-29 12:40:40 -08007272 kvm_skip_emulated_instruction(vcpu);
Wanpeng Lif53cd632014-12-02 19:14:58 +08007273 WARN(1, "this should never happen\n");
7274 return 1;
7275}
7276
7277static int handle_xrstors(struct kvm_vcpu *vcpu)
7278{
Kyle Huey6affcbe2016-11-29 12:40:40 -08007279 kvm_skip_emulated_instruction(vcpu);
Wanpeng Lif53cd632014-12-02 19:14:58 +08007280 WARN(1, "this should never happen\n");
7281 return 1;
7282}
7283
Avi Kivity851ba692009-08-24 11:10:17 +03007284static int handle_apic_access(struct kvm_vcpu *vcpu)
Sheng Yangf78e0e22007-10-29 09:40:42 +08007285{
Kevin Tian58fbbf22011-08-30 13:56:17 +03007286 if (likely(fasteoi)) {
7287 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7288 int access_type, offset;
7289
7290 access_type = exit_qualification & APIC_ACCESS_TYPE;
7291 offset = exit_qualification & APIC_ACCESS_OFFSET;
7292 /*
7293 * Sane guest uses MOV to write EOI, with written value
7294 * not cared. So make a short-circuit here by avoiding
7295 * heavy instruction emulation.
7296 */
7297 if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) &&
7298 (offset == APIC_EOI)) {
7299 kvm_lapic_set_eoi(vcpu);
Kyle Huey6affcbe2016-11-29 12:40:40 -08007300 return kvm_skip_emulated_instruction(vcpu);
Kevin Tian58fbbf22011-08-30 13:56:17 +03007301 }
7302 }
Andre Przywara51d8b662010-12-21 11:12:02 +01007303 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
Sheng Yangf78e0e22007-10-29 09:40:42 +08007304}
7305
Yang Zhangc7c9c562013-01-25 10:18:51 +08007306static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
7307{
7308 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7309 int vector = exit_qualification & 0xff;
7310
7311 /* EOI-induced VM exit is trap-like and thus no need to adjust IP */
7312 kvm_apic_set_eoi_accelerated(vcpu, vector);
7313 return 1;
7314}
7315
Yang Zhang83d4c282013-01-25 10:18:49 +08007316static int handle_apic_write(struct kvm_vcpu *vcpu)
7317{
7318 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7319 u32 offset = exit_qualification & 0xfff;
7320
7321 /* APIC-write VM exit is trap-like and thus no need to adjust IP */
7322 kvm_apic_write_nodecode(vcpu, offset);
7323 return 1;
7324}
7325
Avi Kivity851ba692009-08-24 11:10:17 +03007326static int handle_task_switch(struct kvm_vcpu *vcpu)
Izik Eidus37817f22008-03-24 23:14:53 +02007327{
Jan Kiszka60637aa2008-09-26 09:30:47 +02007328 struct vcpu_vmx *vmx = to_vmx(vcpu);
Izik Eidus37817f22008-03-24 23:14:53 +02007329 unsigned long exit_qualification;
Jan Kiszkae269fb22010-04-14 15:51:09 +02007330 bool has_error_code = false;
7331 u32 error_code = 0;
Izik Eidus37817f22008-03-24 23:14:53 +02007332 u16 tss_selector;
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01007333 int reason, type, idt_v, idt_index;
Gleb Natapov64a7ec02009-03-30 16:03:29 +03007334
7335 idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01007336 idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK);
Gleb Natapov64a7ec02009-03-30 16:03:29 +03007337 type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
Izik Eidus37817f22008-03-24 23:14:53 +02007338
7339 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
7340
7341 reason = (u32)exit_qualification >> 30;
Gleb Natapov64a7ec02009-03-30 16:03:29 +03007342 if (reason == TASK_SWITCH_GATE && idt_v) {
7343 switch (type) {
7344 case INTR_TYPE_NMI_INTR:
7345 vcpu->arch.nmi_injected = false;
Avi Kivity654f06f2011-03-23 15:02:47 +02007346 vmx_set_nmi_mask(vcpu, true);
Gleb Natapov64a7ec02009-03-30 16:03:29 +03007347 break;
7348 case INTR_TYPE_EXT_INTR:
Gleb Natapov66fd3f72009-05-11 13:35:50 +03007349 case INTR_TYPE_SOFT_INTR:
Gleb Natapov64a7ec02009-03-30 16:03:29 +03007350 kvm_clear_interrupt_queue(vcpu);
7351 break;
7352 case INTR_TYPE_HARD_EXCEPTION:
Jan Kiszkae269fb22010-04-14 15:51:09 +02007353 if (vmx->idt_vectoring_info &
7354 VECTORING_INFO_DELIVER_CODE_MASK) {
7355 has_error_code = true;
7356 error_code =
7357 vmcs_read32(IDT_VECTORING_ERROR_CODE);
7358 }
7359 /* fall through */
Gleb Natapov64a7ec02009-03-30 16:03:29 +03007360 case INTR_TYPE_SOFT_EXCEPTION:
7361 kvm_clear_exception_queue(vcpu);
7362 break;
7363 default:
7364 break;
7365 }
Jan Kiszka60637aa2008-09-26 09:30:47 +02007366 }
Izik Eidus37817f22008-03-24 23:14:53 +02007367 tss_selector = exit_qualification;
7368
Gleb Natapov64a7ec02009-03-30 16:03:29 +03007369 if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION &&
7370 type != INTR_TYPE_EXT_INTR &&
7371 type != INTR_TYPE_NMI_INTR))
7372 skip_emulated_instruction(vcpu);
7373
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01007374 if (kvm_task_switch(vcpu, tss_selector,
7375 type == INTR_TYPE_SOFT_INTR ? idt_index : -1, reason,
7376 has_error_code, error_code) == EMULATE_FAIL) {
Gleb Natapovacb54512010-04-15 21:03:50 +03007377 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
7378 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
7379 vcpu->run->internal.ndata = 0;
Jan Kiszka42dbaa52008-12-15 13:52:10 +01007380 return 0;
Gleb Natapovacb54512010-04-15 21:03:50 +03007381 }
Jan Kiszka42dbaa52008-12-15 13:52:10 +01007382
Jan Kiszka42dbaa52008-12-15 13:52:10 +01007383 /*
7384 * TODO: What about debug traps on tss switch?
7385 * Are we supposed to inject them and update dr6?
7386 */
7387
7388 return 1;
Izik Eidus37817f22008-03-24 23:14:53 +02007389}
7390
Avi Kivity851ba692009-08-24 11:10:17 +03007391static int handle_ept_violation(struct kvm_vcpu *vcpu)
Sheng Yang14394422008-04-28 12:24:45 +08007392{
Sheng Yangf9c617f2009-03-25 10:08:52 +08007393 unsigned long exit_qualification;
Sheng Yang14394422008-04-28 12:24:45 +08007394 gpa_t gpa;
Paolo Bonzinieebed242016-11-28 14:39:58 +01007395 u64 error_code;
Sheng Yang14394422008-04-28 12:24:45 +08007396
Sheng Yangf9c617f2009-03-25 10:08:52 +08007397 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
Sheng Yang14394422008-04-28 12:24:45 +08007398
Gleb Natapov0be9c7a2013-09-15 11:07:23 +03007399 /*
7400 * EPT violation happened while executing iret from NMI,
7401 * "blocked by NMI" bit has to be set before next VM entry.
7402 * There are errata that may cause this bit to not be set:
7403 * AAK134, BY25.
7404 */
Gleb Natapovbcd1c292013-09-25 10:58:22 +03007405 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
Paolo Bonzinid02fcf52017-11-06 13:31:13 +01007406 enable_vnmi &&
Gleb Natapovbcd1c292013-09-25 10:58:22 +03007407 (exit_qualification & INTR_INFO_UNBLOCK_NMI))
Gleb Natapov0be9c7a2013-09-15 11:07:23 +03007408 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
7409
Sheng Yang14394422008-04-28 12:24:45 +08007410 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
Marcelo Tosatti229456f2009-06-17 09:22:14 -03007411 trace_kvm_page_fault(gpa, exit_qualification);
Xiao Guangrong4f5982a2012-06-20 15:58:04 +08007412
Junaid Shahid27959a42016-12-06 16:46:10 -08007413 /* Is it a read fault? */
Junaid Shahidab22a472016-12-21 20:29:28 -08007414 error_code = (exit_qualification & EPT_VIOLATION_ACC_READ)
Junaid Shahid27959a42016-12-06 16:46:10 -08007415 ? PFERR_USER_MASK : 0;
7416 /* Is it a write fault? */
Junaid Shahidab22a472016-12-21 20:29:28 -08007417 error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE)
Junaid Shahid27959a42016-12-06 16:46:10 -08007418 ? PFERR_WRITE_MASK : 0;
7419 /* Is it a fetch fault? */
Junaid Shahidab22a472016-12-21 20:29:28 -08007420 error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR)
Junaid Shahid27959a42016-12-06 16:46:10 -08007421 ? PFERR_FETCH_MASK : 0;
7422 /* ept page table entry is present? */
7423 error_code |= (exit_qualification &
7424 (EPT_VIOLATION_READABLE | EPT_VIOLATION_WRITABLE |
7425 EPT_VIOLATION_EXECUTABLE))
7426 ? PFERR_PRESENT_MASK : 0;
Xiao Guangrong4f5982a2012-06-20 15:58:04 +08007427
Paolo Bonzinieebed242016-11-28 14:39:58 +01007428 error_code |= (exit_qualification & 0x100) != 0 ?
7429 PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
Yang Zhang25d92082013-08-06 12:00:32 +03007430
Xiao Guangrong4f5982a2012-06-20 15:58:04 +08007431 vcpu->arch.exit_qualification = exit_qualification;
Xiao Guangrong4f5982a2012-06-20 15:58:04 +08007432 return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
Sheng Yang14394422008-04-28 12:24:45 +08007433}
7434
Avi Kivity851ba692009-08-24 11:10:17 +03007435static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
Marcelo Tosatti68f89402009-06-11 12:07:43 -03007436{
Marcelo Tosatti68f89402009-06-11 12:07:43 -03007437 gpa_t gpa;
7438
Paolo Bonzini9034e6e2017-08-17 18:36:58 +02007439 /*
7440 * A nested guest cannot optimize MMIO vmexits, because we have an
7441 * nGPA here instead of the required GPA.
7442 */
Marcelo Tosatti68f89402009-06-11 12:07:43 -03007443 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
Paolo Bonzini9034e6e2017-08-17 18:36:58 +02007444 if (!is_guest_mode(vcpu) &&
7445 !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
Jason Wang931c33b2015-09-15 14:41:58 +08007446 trace_kvm_fast_mmio(gpa);
Vitaly Kuznetsovd391f122018-01-25 16:37:07 +01007447 /*
7448 * Doing kvm_skip_emulated_instruction() depends on undefined
7449 * behavior: Intel's manual doesn't mandate
7450 * VM_EXIT_INSTRUCTION_LEN to be set in VMCS when EPT MISCONFIG
7451 * occurs and while on real hardware it was observed to be set,
7452 * other hypervisors (namely Hyper-V) don't set it, we end up
7453 * advancing IP with some random value. Disable fast mmio when
7454 * running nested and keep it for real hardware in hope that
7455 * VM_EXIT_INSTRUCTION_LEN will always be set correctly.
7456 */
7457 if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
7458 return kvm_skip_emulated_instruction(vcpu);
7459 else
7460 return x86_emulate_instruction(vcpu, gpa, EMULTYPE_SKIP,
7461 NULL, 0) == EMULATE_DONE;
Michael S. Tsirkin68c3b4d2014-03-31 21:50:44 +03007462 }
Marcelo Tosatti68f89402009-06-11 12:07:43 -03007463
Sean Christophersonc75d0edc2018-03-29 14:48:31 -07007464 return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
Marcelo Tosatti68f89402009-06-11 12:07:43 -03007465}
7466
Avi Kivity851ba692009-08-24 11:10:17 +03007467static int handle_nmi_window(struct kvm_vcpu *vcpu)
Sheng Yangf08864b2008-05-15 18:23:25 +08007468{
Paolo Bonzinid02fcf52017-11-06 13:31:13 +01007469 WARN_ON_ONCE(!enable_vnmi);
Paolo Bonzini47c01522016-12-19 11:44:07 +01007470 vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
7471 CPU_BASED_VIRTUAL_NMI_PENDING);
Sheng Yangf08864b2008-05-15 18:23:25 +08007472 ++vcpu->stat.nmi_window_exits;
Avi Kivity3842d132010-07-27 12:30:24 +03007473 kvm_make_request(KVM_REQ_EVENT, vcpu);
Sheng Yangf08864b2008-05-15 18:23:25 +08007474
7475 return 1;
7476}
7477
Mohammed Gamal80ced182009-09-01 12:48:18 +02007478static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
Mohammed Gamalea953ef2008-08-17 16:47:05 +03007479{
Avi Kivity8b3079a2009-01-05 12:10:54 +02007480 struct vcpu_vmx *vmx = to_vmx(vcpu);
7481 enum emulation_result err = EMULATE_DONE;
Mohammed Gamal80ced182009-09-01 12:48:18 +02007482 int ret = 1;
Avi Kivity49e9d552010-09-19 14:34:08 +02007483 u32 cpu_exec_ctrl;
7484 bool intr_window_requested;
Avi Kivityb8405c12012-06-07 17:08:48 +03007485 unsigned count = 130;
Avi Kivity49e9d552010-09-19 14:34:08 +02007486
Sean Christopherson2bb8caf2018-03-12 10:56:13 -07007487 /*
7488 * We should never reach the point where we are emulating L2
7489 * due to invalid guest state as that means we incorrectly
7490 * allowed a nested VMEntry with an invalid vmcs12.
7491 */
7492 WARN_ON_ONCE(vmx->emulation_required && vmx->nested.nested_run_pending);
7493
Avi Kivity49e9d552010-09-19 14:34:08 +02007494 cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
7495 intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING;
Mohammed Gamalea953ef2008-08-17 16:47:05 +03007496
Paolo Bonzini98eb2f82014-03-27 09:51:52 +01007497 while (vmx->emulation_required && count-- != 0) {
Avi Kivitybdea48e2012-06-10 18:07:57 +03007498 if (intr_window_requested && vmx_interrupt_allowed(vcpu))
Avi Kivity49e9d552010-09-19 14:34:08 +02007499 return handle_interrupt_window(&vmx->vcpu);
7500
Radim Krčmář72875d82017-04-26 22:32:19 +02007501 if (kvm_test_request(KVM_REQ_EVENT, vcpu))
Avi Kivityde87dcdd2012-06-12 20:21:38 +03007502 return 1;
7503
Liran Alon9b8ae632017-11-05 16:56:34 +02007504 err = emulate_instruction(vcpu, 0);
Mohammed Gamalea953ef2008-08-17 16:47:05 +03007505
Paolo Bonziniac0a48c2013-06-25 18:24:41 +02007506 if (err == EMULATE_USER_EXIT) {
Paolo Bonzini94452b92013-08-27 15:41:42 +02007507 ++vcpu->stat.mmio_exits;
Mohammed Gamal80ced182009-09-01 12:48:18 +02007508 ret = 0;
7509 goto out;
7510 }
Guillaume Thouvenin1d5a4d92008-10-29 09:39:42 +01007511
Sean Christophersonadd5ff72018-03-23 09:34:00 -07007512 if (err != EMULATE_DONE)
7513 goto emulation_error;
7514
7515 if (vmx->emulation_required && !vmx->rmode.vm86_active &&
7516 vcpu->arch.exception.pending)
7517 goto emulation_error;
Mohammed Gamalea953ef2008-08-17 16:47:05 +03007518
Gleb Natapov8d76c492013-05-08 18:38:44 +03007519 if (vcpu->arch.halt_request) {
7520 vcpu->arch.halt_request = 0;
Joel Schopp5cb56052015-03-02 13:43:31 -06007521 ret = kvm_vcpu_halt(vcpu);
Gleb Natapov8d76c492013-05-08 18:38:44 +03007522 goto out;
7523 }
7524
Mohammed Gamalea953ef2008-08-17 16:47:05 +03007525 if (signal_pending(current))
Mohammed Gamal80ced182009-09-01 12:48:18 +02007526 goto out;
Mohammed Gamalea953ef2008-08-17 16:47:05 +03007527 if (need_resched())
7528 schedule();
7529 }
7530
Mohammed Gamal80ced182009-09-01 12:48:18 +02007531out:
7532 return ret;
Mohammed Gamalea953ef2008-08-17 16:47:05 +03007533
Sean Christophersonadd5ff72018-03-23 09:34:00 -07007534emulation_error:
7535 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
7536 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
7537 vcpu->run->internal.ndata = 0;
7538 return 0;
Radim Krčmářb4a2d312014-08-21 18:08:08 +02007539}
7540
7541static void grow_ple_window(struct kvm_vcpu *vcpu)
7542{
7543 struct vcpu_vmx *vmx = to_vmx(vcpu);
7544 int old = vmx->ple_window;
7545
Babu Mogerc8e88712018-03-16 16:37:24 -04007546 vmx->ple_window = __grow_ple_window(old, ple_window,
7547 ple_window_grow,
7548 ple_window_max);
Radim Krčmářb4a2d312014-08-21 18:08:08 +02007549
7550 if (vmx->ple_window != old)
7551 vmx->ple_window_dirty = true;
Radim Krčmář7b462682014-08-21 18:08:09 +02007552
7553 trace_kvm_ple_window_grow(vcpu->vcpu_id, vmx->ple_window, old);
Radim Krčmářb4a2d312014-08-21 18:08:08 +02007554}
7555
7556static void shrink_ple_window(struct kvm_vcpu *vcpu)
7557{
7558 struct vcpu_vmx *vmx = to_vmx(vcpu);
7559 int old = vmx->ple_window;
7560
Babu Mogerc8e88712018-03-16 16:37:24 -04007561 vmx->ple_window = __shrink_ple_window(old, ple_window,
7562 ple_window_shrink,
7563 ple_window);
Radim Krčmářb4a2d312014-08-21 18:08:08 +02007564
7565 if (vmx->ple_window != old)
7566 vmx->ple_window_dirty = true;
Radim Krčmář7b462682014-08-21 18:08:09 +02007567
7568 trace_kvm_ple_window_shrink(vcpu->vcpu_id, vmx->ple_window, old);
Radim Krčmářb4a2d312014-08-21 18:08:08 +02007569}
7570
7571/*
Feng Wubf9f6ac2015-09-18 22:29:55 +08007572 * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
7573 */
7574static void wakeup_handler(void)
7575{
7576 struct kvm_vcpu *vcpu;
7577 int cpu = smp_processor_id();
7578
7579 spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
7580 list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu),
7581 blocked_vcpu_list) {
7582 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
7583
7584 if (pi_test_on(pi_desc) == 1)
7585 kvm_vcpu_kick(vcpu);
7586 }
7587 spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
7588}
7589
Peng Haoe01bca22018-04-07 05:47:32 +08007590static void vmx_enable_tdp(void)
Junaid Shahidf160c7b2016-12-06 16:46:16 -08007591{
7592 kvm_mmu_set_mask_ptes(VMX_EPT_READABLE_MASK,
7593 enable_ept_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull,
7594 enable_ept_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull,
7595 0ull, VMX_EPT_EXECUTABLE_MASK,
7596 cpu_has_vmx_ept_execute_only() ? 0ull : VMX_EPT_READABLE_MASK,
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05007597 VMX_EPT_RWX_MASK, 0ull);
Junaid Shahidf160c7b2016-12-06 16:46:16 -08007598
7599 ept_set_mmio_spte_mask();
7600 kvm_enable_tdp();
7601}
7602
Tiejun Chenf2c76482014-10-28 10:14:47 +08007603static __init int hardware_setup(void)
7604{
Paolo Bonzini904e14f2018-01-16 16:51:18 +01007605 int r = -ENOMEM, i;
Tiejun Chen34a1cd62014-10-28 10:14:48 +08007606
7607 rdmsrl_safe(MSR_EFER, &host_efer);
7608
7609 for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i)
7610 kvm_define_shared_msr(i, vmx_msr_index[i]);
7611
Radim Krčmář23611332016-09-29 22:41:33 +02007612 for (i = 0; i < VMX_BITMAP_NR; i++) {
7613 vmx_bitmap[i] = (unsigned long *)__get_free_page(GFP_KERNEL);
7614 if (!vmx_bitmap[i])
7615 goto out;
7616 }
Tiejun Chen34a1cd62014-10-28 10:14:48 +08007617
Tiejun Chen34a1cd62014-10-28 10:14:48 +08007618 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
7619 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
7620
Tiejun Chen34a1cd62014-10-28 10:14:48 +08007621 if (setup_vmcs_config(&vmcs_config) < 0) {
7622 r = -EIO;
Radim Krčmář23611332016-09-29 22:41:33 +02007623 goto out;
Tiejun Chenbaa03522014-12-23 16:21:11 +08007624 }
Tiejun Chenf2c76482014-10-28 10:14:47 +08007625
7626 if (boot_cpu_has(X86_FEATURE_NX))
7627 kvm_enable_efer_bits(EFER_NX);
7628
Wanpeng Li08d839c2017-03-23 05:30:08 -07007629 if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
7630 !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
Tiejun Chenf2c76482014-10-28 10:14:47 +08007631 enable_vpid = 0;
Wanpeng Li08d839c2017-03-23 05:30:08 -07007632
Tiejun Chenf2c76482014-10-28 10:14:47 +08007633 if (!cpu_has_vmx_ept() ||
David Hildenbrand42aa53b2017-08-10 23:15:29 +02007634 !cpu_has_vmx_ept_4levels() ||
David Hildenbrandf5f51582017-08-24 20:51:30 +02007635 !cpu_has_vmx_ept_mt_wb() ||
Wanpeng Li8ad81822017-10-09 15:51:53 -07007636 !cpu_has_vmx_invept_global())
Tiejun Chenf2c76482014-10-28 10:14:47 +08007637 enable_ept = 0;
Tiejun Chenf2c76482014-10-28 10:14:47 +08007638
Wanpeng Lifce6ac42017-05-11 02:58:56 -07007639 if (!cpu_has_vmx_ept_ad_bits() || !enable_ept)
Tiejun Chenf2c76482014-10-28 10:14:47 +08007640 enable_ept_ad_bits = 0;
7641
Wanpeng Li8ad81822017-10-09 15:51:53 -07007642 if (!cpu_has_vmx_unrestricted_guest() || !enable_ept)
Tiejun Chenf2c76482014-10-28 10:14:47 +08007643 enable_unrestricted_guest = 0;
7644
Paolo Bonziniad15a292015-01-30 16:18:49 +01007645 if (!cpu_has_vmx_flexpriority())
Tiejun Chenf2c76482014-10-28 10:14:47 +08007646 flexpriority_enabled = 0;
7647
Paolo Bonzinid02fcf52017-11-06 13:31:13 +01007648 if (!cpu_has_virtual_nmis())
7649 enable_vnmi = 0;
7650
Paolo Bonziniad15a292015-01-30 16:18:49 +01007651 /*
7652 * set_apic_access_page_addr() is used to reload apic access
7653 * page upon invalidation. No need to do anything if not
7654 * using the APIC_ACCESS_ADDR VMCS field.
7655 */
7656 if (!flexpriority_enabled)
Tiejun Chenf2c76482014-10-28 10:14:47 +08007657 kvm_x86_ops->set_apic_access_page_addr = NULL;
Tiejun Chenf2c76482014-10-28 10:14:47 +08007658
7659 if (!cpu_has_vmx_tpr_shadow())
7660 kvm_x86_ops->update_cr8_intercept = NULL;
7661
7662 if (enable_ept && !cpu_has_vmx_ept_2m_page())
7663 kvm_disable_largepages();
7664
Wanpeng Li0f107682017-09-28 18:06:24 -07007665 if (!cpu_has_vmx_ple()) {
Tiejun Chenf2c76482014-10-28 10:14:47 +08007666 ple_gap = 0;
Wanpeng Li0f107682017-09-28 18:06:24 -07007667 ple_window = 0;
7668 ple_window_grow = 0;
7669 ple_window_max = 0;
7670 ple_window_shrink = 0;
7671 }
Tiejun Chenf2c76482014-10-28 10:14:47 +08007672
Paolo Bonzini76dfafd52016-12-19 17:17:11 +01007673 if (!cpu_has_vmx_apicv()) {
Tiejun Chenf2c76482014-10-28 10:14:47 +08007674 enable_apicv = 0;
Paolo Bonzini76dfafd52016-12-19 17:17:11 +01007675 kvm_x86_ops->sync_pir_to_irr = NULL;
7676 }
Tiejun Chenf2c76482014-10-28 10:14:47 +08007677
Haozhong Zhang64903d62015-10-20 15:39:09 +08007678 if (cpu_has_vmx_tsc_scaling()) {
7679 kvm_has_tsc_control = true;
7680 kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX;
7681 kvm_tsc_scaling_ratio_frac_bits = 48;
7682 }
7683
Wanpeng Li04bb92e2015-09-16 19:31:11 +08007684 set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
7685
Junaid Shahidf160c7b2016-12-06 16:46:16 -08007686 if (enable_ept)
7687 vmx_enable_tdp();
7688 else
Tiejun Chenbaa03522014-12-23 16:21:11 +08007689 kvm_disable_tdp();
7690
Kai Huang843e4332015-01-28 10:54:28 +08007691 /*
7692 * Only enable PML when hardware supports PML feature, and both EPT
7693 * and EPT A/D bit features are enabled -- PML depends on them to work.
7694 */
7695 if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml())
7696 enable_pml = 0;
7697
7698 if (!enable_pml) {
7699 kvm_x86_ops->slot_enable_log_dirty = NULL;
7700 kvm_x86_ops->slot_disable_log_dirty = NULL;
7701 kvm_x86_ops->flush_log_dirty = NULL;
7702 kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
7703 }
7704
Yunhong Jiang64672c92016-06-13 14:19:59 -07007705 if (cpu_has_vmx_preemption_timer() && enable_preemption_timer) {
7706 u64 vmx_msr;
7707
7708 rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
7709 cpu_preemption_timer_multi =
7710 vmx_msr & VMX_MISC_PREEMPTION_TIMER_RATE_MASK;
7711 } else {
7712 kvm_x86_ops->set_hv_timer = NULL;
7713 kvm_x86_ops->cancel_hv_timer = NULL;
7714 }
7715
Paolo Bonzinic5d167b2017-12-13 11:05:19 +01007716 if (!cpu_has_vmx_shadow_vmcs())
7717 enable_shadow_vmcs = 0;
7718 if (enable_shadow_vmcs)
7719 init_vmcs_shadow_fields();
7720
Feng Wubf9f6ac2015-09-18 22:29:55 +08007721 kvm_set_posted_intr_wakeup_handler(wakeup_handler);
Paolo Bonzini13893092018-02-26 13:40:09 +01007722 nested_vmx_setup_ctls_msrs(&vmcs_config.nested, enable_apicv);
Feng Wubf9f6ac2015-09-18 22:29:55 +08007723
Ashok Rajc45dcc72016-06-22 14:59:56 +08007724 kvm_mce_cap_supported |= MCG_LMCE_P;
7725
Tiejun Chenf2c76482014-10-28 10:14:47 +08007726 return alloc_kvm_area();
Tiejun Chen34a1cd62014-10-28 10:14:48 +08007727
Tiejun Chen34a1cd62014-10-28 10:14:48 +08007728out:
Radim Krčmář23611332016-09-29 22:41:33 +02007729 for (i = 0; i < VMX_BITMAP_NR; i++)
7730 free_page((unsigned long)vmx_bitmap[i]);
Tiejun Chen34a1cd62014-10-28 10:14:48 +08007731
7732 return r;
Tiejun Chenf2c76482014-10-28 10:14:47 +08007733}
7734
7735static __exit void hardware_unsetup(void)
7736{
Radim Krčmář23611332016-09-29 22:41:33 +02007737 int i;
7738
7739 for (i = 0; i < VMX_BITMAP_NR; i++)
7740 free_page((unsigned long)vmx_bitmap[i]);
Tiejun Chen34a1cd62014-10-28 10:14:48 +08007741
Tiejun Chenf2c76482014-10-28 10:14:47 +08007742 free_kvm_area();
7743}
7744
Avi Kivity6aa8b732006-12-10 02:21:36 -08007745/*
Zhai, Edwin4b8d54f2009-10-09 18:03:20 +08007746 * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
7747 * exiting, so only get here on cpu with PAUSE-Loop-Exiting.
7748 */
Marcelo Tosatti9fb41ba2009-10-12 19:37:31 -03007749static int handle_pause(struct kvm_vcpu *vcpu)
Zhai, Edwin4b8d54f2009-10-09 18:03:20 +08007750{
Wanpeng Lib31c1142018-03-12 04:53:04 -07007751 if (!kvm_pause_in_guest(vcpu->kvm))
Radim Krčmářb4a2d312014-08-21 18:08:08 +02007752 grow_ple_window(vcpu);
7753
Longpeng(Mike)de63ad42017-08-08 12:05:33 +08007754 /*
7755 * Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting"
7756 * VM-execution control is ignored if CPL > 0. OTOH, KVM
7757 * never set PAUSE_EXITING and just set PLE if supported,
7758 * so the vcpu must be CPL=0 if it gets a PAUSE exit.
7759 */
7760 kvm_vcpu_on_spin(vcpu, true);
Kyle Huey6affcbe2016-11-29 12:40:40 -08007761 return kvm_skip_emulated_instruction(vcpu);
Zhai, Edwin4b8d54f2009-10-09 18:03:20 +08007762}
7763
Gabriel L. Somlo87c00572014-05-07 16:52:13 -04007764static int handle_nop(struct kvm_vcpu *vcpu)
Sheng Yang59708672009-12-15 13:29:54 +08007765{
Kyle Huey6affcbe2016-11-29 12:40:40 -08007766 return kvm_skip_emulated_instruction(vcpu);
Sheng Yang59708672009-12-15 13:29:54 +08007767}
7768
Gabriel L. Somlo87c00572014-05-07 16:52:13 -04007769static int handle_mwait(struct kvm_vcpu *vcpu)
7770{
7771 printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
7772 return handle_nop(vcpu);
7773}
7774
Jim Mattson45ec3682017-08-23 16:32:04 -07007775static int handle_invalid_op(struct kvm_vcpu *vcpu)
7776{
7777 kvm_queue_exception(vcpu, UD_VECTOR);
7778 return 1;
7779}
7780
Mihai Donțu5f3d45e2015-07-05 20:08:57 +03007781static int handle_monitor_trap(struct kvm_vcpu *vcpu)
7782{
7783 return 1;
7784}
7785
Gabriel L. Somlo87c00572014-05-07 16:52:13 -04007786static int handle_monitor(struct kvm_vcpu *vcpu)
7787{
7788 printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
7789 return handle_nop(vcpu);
7790}
7791
Zhai, Edwin4b8d54f2009-10-09 18:03:20 +08007792/*
Arthur Chunqi Li0658fba2013-07-04 15:03:32 +08007793 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
7794 * set the success or error code of an emulated VMX instruction, as specified
7795 * by Vol 2B, VMX Instruction Reference, "Conventions".
7796 */
7797static void nested_vmx_succeed(struct kvm_vcpu *vcpu)
7798{
7799 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
7800 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
7801 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
7802}
7803
7804static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
7805{
7806 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
7807 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
7808 X86_EFLAGS_SF | X86_EFLAGS_OF))
7809 | X86_EFLAGS_CF);
7810}
7811
Abel Gordon145c28d2013-04-18 14:36:55 +03007812static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
Arthur Chunqi Li0658fba2013-07-04 15:03:32 +08007813 u32 vm_instruction_error)
7814{
7815 if (to_vmx(vcpu)->nested.current_vmptr == -1ull) {
7816 /*
7817 * failValid writes the error number to the current VMCS, which
7818 * can't be done there isn't a current VMCS.
7819 */
7820 nested_vmx_failInvalid(vcpu);
7821 return;
7822 }
7823 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
7824 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
7825 X86_EFLAGS_SF | X86_EFLAGS_OF))
7826 | X86_EFLAGS_ZF);
7827 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
7828 /*
7829 * We don't need to force a shadow sync because
7830 * VM_INSTRUCTION_ERROR is not shadowed
7831 */
7832}
Abel Gordon145c28d2013-04-18 14:36:55 +03007833
Wincy Vanff651cb2014-12-11 08:52:58 +03007834static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
7835{
7836 /* TODO: not to reset guest simply here. */
7837 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
Paolo Bonzinibbe41b92016-08-19 17:51:20 +02007838 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator);
Wincy Vanff651cb2014-12-11 08:52:58 +03007839}
7840
Jan Kiszkaf4124502014-03-07 20:03:13 +01007841static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
7842{
7843 struct vcpu_vmx *vmx =
7844 container_of(timer, struct vcpu_vmx, nested.preemption_timer);
7845
7846 vmx->nested.preemption_timer_expired = true;
7847 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
7848 kvm_vcpu_kick(&vmx->vcpu);
7849
7850 return HRTIMER_NORESTART;
7851}
7852
Nadav Har'Elff2f6fe2011-05-25 23:05:27 +03007853/*
Bandan Das19677e32014-05-06 02:19:15 -04007854 * Decode the memory-address operand of a vmx instruction, as recorded on an
7855 * exit caused by such an instruction (run by a guest hypervisor).
7856 * On success, returns 0. When the operand is invalid, returns 1 and throws
7857 * #UD or #GP.
7858 */
7859static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
7860 unsigned long exit_qualification,
Eugene Korenevskyf9eb4af2015-04-17 02:22:21 +00007861 u32 vmx_instruction_info, bool wr, gva_t *ret)
Bandan Das19677e32014-05-06 02:19:15 -04007862{
Eugene Korenevskyf9eb4af2015-04-17 02:22:21 +00007863 gva_t off;
7864 bool exn;
7865 struct kvm_segment s;
7866
Bandan Das19677e32014-05-06 02:19:15 -04007867 /*
7868 * According to Vol. 3B, "Information for VM Exits Due to Instruction
7869 * Execution", on an exit, vmx_instruction_info holds most of the
7870 * addressing components of the operand. Only the displacement part
7871 * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
7872 * For how an actual address is calculated from all these components,
7873 * refer to Vol. 1, "Operand Addressing".
7874 */
7875 int scaling = vmx_instruction_info & 3;
7876 int addr_size = (vmx_instruction_info >> 7) & 7;
7877 bool is_reg = vmx_instruction_info & (1u << 10);
7878 int seg_reg = (vmx_instruction_info >> 15) & 7;
7879 int index_reg = (vmx_instruction_info >> 18) & 0xf;
7880 bool index_is_valid = !(vmx_instruction_info & (1u << 22));
7881 int base_reg = (vmx_instruction_info >> 23) & 0xf;
7882 bool base_is_valid = !(vmx_instruction_info & (1u << 27));
7883
7884 if (is_reg) {
7885 kvm_queue_exception(vcpu, UD_VECTOR);
7886 return 1;
7887 }
7888
7889 /* Addr = segment_base + offset */
7890 /* offset = base + [index * scale] + displacement */
Eugene Korenevskyf9eb4af2015-04-17 02:22:21 +00007891 off = exit_qualification; /* holds the displacement */
Bandan Das19677e32014-05-06 02:19:15 -04007892 if (base_is_valid)
Eugene Korenevskyf9eb4af2015-04-17 02:22:21 +00007893 off += kvm_register_read(vcpu, base_reg);
Bandan Das19677e32014-05-06 02:19:15 -04007894 if (index_is_valid)
Eugene Korenevskyf9eb4af2015-04-17 02:22:21 +00007895 off += kvm_register_read(vcpu, index_reg)<<scaling;
7896 vmx_get_segment(vcpu, &s, seg_reg);
7897 *ret = s.base + off;
Bandan Das19677e32014-05-06 02:19:15 -04007898
7899 if (addr_size == 1) /* 32 bit */
7900 *ret &= 0xffffffff;
7901
Eugene Korenevskyf9eb4af2015-04-17 02:22:21 +00007902 /* Checks for #GP/#SS exceptions. */
7903 exn = false;
Quentin Casasnovasff30ef42016-06-18 11:01:05 +02007904 if (is_long_mode(vcpu)) {
7905 /* Long mode: #GP(0)/#SS(0) if the memory address is in a
7906 * non-canonical form. This is the only check on the memory
7907 * destination for long mode!
7908 */
Yu Zhangfd8cb432017-08-24 20:27:56 +08007909 exn = is_noncanonical_address(*ret, vcpu);
Quentin Casasnovasff30ef42016-06-18 11:01:05 +02007910 } else if (is_protmode(vcpu)) {
Eugene Korenevskyf9eb4af2015-04-17 02:22:21 +00007911 /* Protected mode: apply checks for segment validity in the
7912 * following order:
7913 * - segment type check (#GP(0) may be thrown)
7914 * - usability check (#GP(0)/#SS(0))
7915 * - limit check (#GP(0)/#SS(0))
7916 */
7917 if (wr)
7918 /* #GP(0) if the destination operand is located in a
7919 * read-only data segment or any code segment.
7920 */
7921 exn = ((s.type & 0xa) == 0 || (s.type & 8));
7922 else
7923 /* #GP(0) if the source operand is located in an
7924 * execute-only code segment
7925 */
7926 exn = ((s.type & 0xa) == 8);
Quentin Casasnovasff30ef42016-06-18 11:01:05 +02007927 if (exn) {
7928 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
7929 return 1;
7930 }
Eugene Korenevskyf9eb4af2015-04-17 02:22:21 +00007931 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
7932 */
7933 exn = (s.unusable != 0);
7934 /* Protected mode: #GP(0)/#SS(0) if the memory
7935 * operand is outside the segment limit.
7936 */
7937 exn = exn || (off + sizeof(u64) > s.limit);
7938 }
7939 if (exn) {
7940 kvm_queue_exception_e(vcpu,
7941 seg_reg == VCPU_SREG_SS ?
7942 SS_VECTOR : GP_VECTOR,
7943 0);
7944 return 1;
7945 }
7946
Bandan Das19677e32014-05-06 02:19:15 -04007947 return 0;
7948}
7949
Radim Krčmářcbf71272017-05-19 15:48:51 +02007950static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
Bandan Das3573e222014-05-06 02:19:16 -04007951{
7952 gva_t gva;
Bandan Das3573e222014-05-06 02:19:16 -04007953 struct x86_exception e;
Bandan Das3573e222014-05-06 02:19:16 -04007954
7955 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
Eugene Korenevskyf9eb4af2015-04-17 02:22:21 +00007956 vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
Bandan Das3573e222014-05-06 02:19:16 -04007957 return 1;
7958
Paolo Bonzinice14e868a2018-06-06 17:37:49 +02007959 if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
Bandan Das3573e222014-05-06 02:19:16 -04007960 kvm_inject_page_fault(vcpu, &e);
7961 return 1;
7962 }
7963
Bandan Das3573e222014-05-06 02:19:16 -04007964 return 0;
7965}
7966
Jim Mattsone29acc52016-11-30 12:03:43 -08007967static int enter_vmx_operation(struct kvm_vcpu *vcpu)
7968{
7969 struct vcpu_vmx *vmx = to_vmx(vcpu);
7970 struct vmcs *shadow_vmcs;
Paolo Bonzinif21f1652018-01-11 12:16:15 +01007971 int r;
Jim Mattsone29acc52016-11-30 12:03:43 -08007972
Paolo Bonzinif21f1652018-01-11 12:16:15 +01007973 r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
7974 if (r < 0)
Jim Mattsonde3a0022017-11-27 17:22:25 -06007975 goto out_vmcs02;
Jim Mattsone29acc52016-11-30 12:03:43 -08007976
7977 vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
7978 if (!vmx->nested.cached_vmcs12)
7979 goto out_cached_vmcs12;
7980
7981 if (enable_shadow_vmcs) {
7982 shadow_vmcs = alloc_vmcs();
7983 if (!shadow_vmcs)
7984 goto out_shadow_vmcs;
7985 /* mark vmcs as shadow */
7986 shadow_vmcs->revision_id |= (1u << 31);
7987 /* init shadow vmcs */
7988 vmcs_clear(shadow_vmcs);
7989 vmx->vmcs01.shadow_vmcs = shadow_vmcs;
7990 }
7991
Jim Mattsone29acc52016-11-30 12:03:43 -08007992 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
7993 HRTIMER_MODE_REL_PINNED);
7994 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
7995
7996 vmx->nested.vmxon = true;
7997 return 0;
7998
7999out_shadow_vmcs:
8000 kfree(vmx->nested.cached_vmcs12);
8001
8002out_cached_vmcs12:
Jim Mattsonde3a0022017-11-27 17:22:25 -06008003 free_loaded_vmcs(&vmx->nested.vmcs02);
Jim Mattsone29acc52016-11-30 12:03:43 -08008004
Jim Mattsonde3a0022017-11-27 17:22:25 -06008005out_vmcs02:
Jim Mattsone29acc52016-11-30 12:03:43 -08008006 return -ENOMEM;
8007}
8008
Bandan Das3573e222014-05-06 02:19:16 -04008009/*
Nadav Har'Elec378ae2011-05-25 23:02:54 +03008010 * Emulate the VMXON instruction.
8011 * Currently, we just remember that VMX is active, and do not save or even
8012 * inspect the argument to VMXON (the so-called "VMXON pointer") because we
8013 * do not currently need to store anything in that guest-allocated memory
8014 * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
8015 * argument is different from the VMXON pointer (which the spec says they do).
8016 */
8017static int handle_vmon(struct kvm_vcpu *vcpu)
8018{
Jim Mattsone29acc52016-11-30 12:03:43 -08008019 int ret;
Radim Krčmářcbf71272017-05-19 15:48:51 +02008020 gpa_t vmptr;
8021 struct page *page;
Nadav Har'Elec378ae2011-05-25 23:02:54 +03008022 struct vcpu_vmx *vmx = to_vmx(vcpu);
Nadav Har'Elb3897a42013-07-08 19:12:35 +08008023 const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
8024 | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
Nadav Har'Elec378ae2011-05-25 23:02:54 +03008025
Jim Mattson70f3aac2017-04-26 08:53:46 -07008026 /*
8027 * The Intel VMX Instruction Reference lists a bunch of bits that are
8028 * prerequisite to running VMXON, most notably cr4.VMXE must be set to
8029 * 1 (see vmx_set_cr4() for when we allow the guest to set this).
8030 * Otherwise, we should fail with #UD. But most faulting conditions
8031 * have already been checked by hardware, prior to the VM-exit for
8032 * VMXON. We do test guest cr4.VMXE because processor CR4 always has
8033 * that bit set to 1 in non-root mode.
Nadav Har'Elec378ae2011-05-25 23:02:54 +03008034 */
Jim Mattson70f3aac2017-04-26 08:53:46 -07008035 if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
Nadav Har'Elec378ae2011-05-25 23:02:54 +03008036 kvm_queue_exception(vcpu, UD_VECTOR);
8037 return 1;
8038 }
8039
Felix Wilhelm727ba742018-06-11 09:43:44 +02008040 /* CPL=0 must be checked manually. */
8041 if (vmx_get_cpl(vcpu)) {
8042 kvm_queue_exception(vcpu, UD_VECTOR);
8043 return 1;
8044 }
8045
Abel Gordon145c28d2013-04-18 14:36:55 +03008046 if (vmx->nested.vmxon) {
8047 nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
Kyle Huey6affcbe2016-11-29 12:40:40 -08008048 return kvm_skip_emulated_instruction(vcpu);
Abel Gordon145c28d2013-04-18 14:36:55 +03008049 }
Nadav Har'Elb3897a42013-07-08 19:12:35 +08008050
Haozhong Zhang3b840802016-06-22 14:59:54 +08008051 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
Nadav Har'Elb3897a42013-07-08 19:12:35 +08008052 != VMXON_NEEDED_FEATURES) {
8053 kvm_inject_gp(vcpu, 0);
8054 return 1;
8055 }
8056
Radim Krčmářcbf71272017-05-19 15:48:51 +02008057 if (nested_vmx_get_vmptr(vcpu, &vmptr))
Jim Mattson21e7fbe2016-12-22 15:49:55 -08008058 return 1;
Radim Krčmářcbf71272017-05-19 15:48:51 +02008059
8060 /*
8061 * SDM 3: 24.11.5
8062 * The first 4 bytes of VMXON region contain the supported
8063 * VMCS revision identifier
8064 *
8065 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
8066 * which replaces physical address width with 32
8067 */
8068 if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
8069 nested_vmx_failInvalid(vcpu);
8070 return kvm_skip_emulated_instruction(vcpu);
8071 }
8072
David Hildenbrand5e2f30b2017-08-03 18:11:04 +02008073 page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
8074 if (is_error_page(page)) {
Radim Krčmářcbf71272017-05-19 15:48:51 +02008075 nested_vmx_failInvalid(vcpu);
8076 return kvm_skip_emulated_instruction(vcpu);
8077 }
8078 if (*(u32 *)kmap(page) != VMCS12_REVISION) {
8079 kunmap(page);
David Hildenbrand53a70da2017-08-03 18:11:05 +02008080 kvm_release_page_clean(page);
Radim Krčmářcbf71272017-05-19 15:48:51 +02008081 nested_vmx_failInvalid(vcpu);
8082 return kvm_skip_emulated_instruction(vcpu);
8083 }
8084 kunmap(page);
David Hildenbrand53a70da2017-08-03 18:11:05 +02008085 kvm_release_page_clean(page);
Radim Krčmářcbf71272017-05-19 15:48:51 +02008086
8087 vmx->nested.vmxon_ptr = vmptr;
Jim Mattsone29acc52016-11-30 12:03:43 -08008088 ret = enter_vmx_operation(vcpu);
8089 if (ret)
8090 return ret;
Nadav Har'Elec378ae2011-05-25 23:02:54 +03008091
Arthur Chunqi Lia25eb112013-07-04 15:03:33 +08008092 nested_vmx_succeed(vcpu);
Kyle Huey6affcbe2016-11-29 12:40:40 -08008093 return kvm_skip_emulated_instruction(vcpu);
Nadav Har'Elec378ae2011-05-25 23:02:54 +03008094}
8095
8096/*
8097 * Intel's VMX Instruction Reference specifies a common set of prerequisites
8098 * for running VMX instructions (except VMXON, whose prerequisites are
8099 * slightly different). It also specifies what exception to inject otherwise.
Jim Mattson70f3aac2017-04-26 08:53:46 -07008100 * Note that many of these exceptions have priority over VM exits, so they
8101 * don't have to be checked again here.
Nadav Har'Elec378ae2011-05-25 23:02:54 +03008102 */
8103static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
8104{
Felix Wilhelm727ba742018-06-11 09:43:44 +02008105 if (vmx_get_cpl(vcpu)) {
8106 kvm_queue_exception(vcpu, UD_VECTOR);
8107 return 0;
8108 }
8109
Jim Mattson70f3aac2017-04-26 08:53:46 -07008110 if (!to_vmx(vcpu)->nested.vmxon) {
Nadav Har'Elec378ae2011-05-25 23:02:54 +03008111 kvm_queue_exception(vcpu, UD_VECTOR);
8112 return 0;
8113 }
Nadav Har'Elec378ae2011-05-25 23:02:54 +03008114 return 1;
8115}
8116
David Matlack8ca44e82017-08-01 14:00:39 -07008117static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
8118{
8119 vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS);
8120 vmcs_write64(VMCS_LINK_POINTER, -1ull);
8121}
8122
Abel Gordone7953d72013-04-18 14:37:55 +03008123static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
8124{
Paolo Bonzini9a2a05b2014-07-17 11:55:46 +02008125 if (vmx->nested.current_vmptr == -1ull)
8126 return;
8127
Abel Gordon012f83c2013-04-18 14:39:25 +03008128 if (enable_shadow_vmcs) {
Paolo Bonzini9a2a05b2014-07-17 11:55:46 +02008129 /* copy to memory all shadowed fields in case
8130 they were modified */
8131 copy_shadow_to_vmcs12(vmx);
8132 vmx->nested.sync_shadow_vmcs = false;
David Matlack8ca44e82017-08-01 14:00:39 -07008133 vmx_disable_shadow_vmcs(vmx);
Abel Gordon012f83c2013-04-18 14:39:25 +03008134 }
Wincy Van705699a2015-02-03 23:58:17 +08008135 vmx->nested.posted_intr_nv = -1;
David Matlack4f2777b2016-07-13 17:16:37 -07008136
8137 /* Flush VMCS12 to guest memory */
Paolo Bonzini9f744c52017-07-27 15:54:46 +02008138 kvm_vcpu_write_guest_page(&vmx->vcpu,
8139 vmx->nested.current_vmptr >> PAGE_SHIFT,
8140 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE);
David Matlack4f2777b2016-07-13 17:16:37 -07008141
Paolo Bonzini9a2a05b2014-07-17 11:55:46 +02008142 vmx->nested.current_vmptr = -1ull;
Abel Gordone7953d72013-04-18 14:37:55 +03008143}
8144
Nadav Har'Elec378ae2011-05-25 23:02:54 +03008145/*
8146 * Free whatever needs to be freed from vmx->nested when L1 goes down, or
8147 * just stops using VMX.
8148 */
8149static void free_nested(struct vcpu_vmx *vmx)
8150{
Wanpeng Lib7455822017-11-22 14:04:00 -08008151 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
Nadav Har'Elec378ae2011-05-25 23:02:54 +03008152 return;
Paolo Bonzini9a2a05b2014-07-17 11:55:46 +02008153
Nadav Har'Elec378ae2011-05-25 23:02:54 +03008154 vmx->nested.vmxon = false;
Wanpeng Lib7455822017-11-22 14:04:00 -08008155 vmx->nested.smm.vmxon = false;
Wanpeng Li5c614b32015-10-13 09:18:36 -07008156 free_vpid(vmx->nested.vpid02);
David Matlack8ca44e82017-08-01 14:00:39 -07008157 vmx->nested.posted_intr_nv = -1;
8158 vmx->nested.current_vmptr = -1ull;
Jim Mattson355f4fb2016-10-28 08:29:39 -07008159 if (enable_shadow_vmcs) {
David Matlack8ca44e82017-08-01 14:00:39 -07008160 vmx_disable_shadow_vmcs(vmx);
Jim Mattson355f4fb2016-10-28 08:29:39 -07008161 vmcs_clear(vmx->vmcs01.shadow_vmcs);
8162 free_vmcs(vmx->vmcs01.shadow_vmcs);
8163 vmx->vmcs01.shadow_vmcs = NULL;
8164 }
David Matlack4f2777b2016-07-13 17:16:37 -07008165 kfree(vmx->nested.cached_vmcs12);
Jim Mattsonde3a0022017-11-27 17:22:25 -06008166 /* Unpin physical memory we referred to in the vmcs02 */
Nadav Har'Elfe3ef052011-05-25 23:10:02 +03008167 if (vmx->nested.apic_access_page) {
David Hildenbrand53a70da2017-08-03 18:11:05 +02008168 kvm_release_page_dirty(vmx->nested.apic_access_page);
Paolo Bonzini48d89b92014-08-26 13:27:46 +02008169 vmx->nested.apic_access_page = NULL;
Nadav Har'Elfe3ef052011-05-25 23:10:02 +03008170 }
Wanpeng Lia7c0b072014-08-21 19:46:50 +08008171 if (vmx->nested.virtual_apic_page) {
David Hildenbrand53a70da2017-08-03 18:11:05 +02008172 kvm_release_page_dirty(vmx->nested.virtual_apic_page);
Paolo Bonzini48d89b92014-08-26 13:27:46 +02008173 vmx->nested.virtual_apic_page = NULL;
Wanpeng Lia7c0b072014-08-21 19:46:50 +08008174 }
Wincy Van705699a2015-02-03 23:58:17 +08008175 if (vmx->nested.pi_desc_page) {
8176 kunmap(vmx->nested.pi_desc_page);
David Hildenbrand53a70da2017-08-03 18:11:05 +02008177 kvm_release_page_dirty(vmx->nested.pi_desc_page);
Wincy Van705699a2015-02-03 23:58:17 +08008178 vmx->nested.pi_desc_page = NULL;
8179 vmx->nested.pi_desc = NULL;
8180 }
Nadav Har'Elff2f6fe2011-05-25 23:05:27 +03008181
Jim Mattsonde3a0022017-11-27 17:22:25 -06008182 free_loaded_vmcs(&vmx->nested.vmcs02);
Nadav Har'Elec378ae2011-05-25 23:02:54 +03008183}
8184
8185/* Emulate the VMXOFF instruction */
8186static int handle_vmoff(struct kvm_vcpu *vcpu)
8187{
8188 if (!nested_vmx_check_permission(vcpu))
8189 return 1;
8190 free_nested(to_vmx(vcpu));
Arthur Chunqi Lia25eb112013-07-04 15:03:33 +08008191 nested_vmx_succeed(vcpu);
Kyle Huey6affcbe2016-11-29 12:40:40 -08008192 return kvm_skip_emulated_instruction(vcpu);
Nadav Har'Elec378ae2011-05-25 23:02:54 +03008193}
8194
Nadav Har'El27d6c862011-05-25 23:06:59 +03008195/* Emulate the VMCLEAR instruction */
8196static int handle_vmclear(struct kvm_vcpu *vcpu)
8197{
8198 struct vcpu_vmx *vmx = to_vmx(vcpu);
Jim Mattson587d7e722017-03-02 12:41:48 -08008199 u32 zero = 0;
Nadav Har'El27d6c862011-05-25 23:06:59 +03008200 gpa_t vmptr;
Nadav Har'El27d6c862011-05-25 23:06:59 +03008201
8202 if (!nested_vmx_check_permission(vcpu))
8203 return 1;
8204
Radim Krčmářcbf71272017-05-19 15:48:51 +02008205 if (nested_vmx_get_vmptr(vcpu, &vmptr))
Nadav Har'El27d6c862011-05-25 23:06:59 +03008206 return 1;
8207
Radim Krčmářcbf71272017-05-19 15:48:51 +02008208 if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
8209 nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
8210 return kvm_skip_emulated_instruction(vcpu);
8211 }
8212
8213 if (vmptr == vmx->nested.vmxon_ptr) {
8214 nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER);
8215 return kvm_skip_emulated_instruction(vcpu);
8216 }
8217
Paolo Bonzini9a2a05b2014-07-17 11:55:46 +02008218 if (vmptr == vmx->nested.current_vmptr)
Abel Gordone7953d72013-04-18 14:37:55 +03008219 nested_release_vmcs12(vmx);
Nadav Har'El27d6c862011-05-25 23:06:59 +03008220
Jim Mattson587d7e722017-03-02 12:41:48 -08008221 kvm_vcpu_write_guest(vcpu,
8222 vmptr + offsetof(struct vmcs12, launch_state),
8223 &zero, sizeof(zero));
Nadav Har'El27d6c862011-05-25 23:06:59 +03008224
Nadav Har'El27d6c862011-05-25 23:06:59 +03008225 nested_vmx_succeed(vcpu);
Kyle Huey6affcbe2016-11-29 12:40:40 -08008226 return kvm_skip_emulated_instruction(vcpu);
Nadav Har'El27d6c862011-05-25 23:06:59 +03008227}
8228
Nadav Har'Elcd232ad2011-05-25 23:10:33 +03008229static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
8230
8231/* Emulate the VMLAUNCH instruction */
8232static int handle_vmlaunch(struct kvm_vcpu *vcpu)
8233{
8234 return nested_vmx_run(vcpu, true);
8235}
8236
8237/* Emulate the VMRESUME instruction */
8238static int handle_vmresume(struct kvm_vcpu *vcpu)
8239{
8240
8241 return nested_vmx_run(vcpu, false);
8242}
8243
Nadav Har'El49f705c2011-05-25 23:08:30 +03008244/*
8245 * Read a vmcs12 field. Since these can have varying lengths and we return
8246 * one type, we chose the biggest type (u64) and zero-extend the return value
8247 * to that size. Note that the caller, handle_vmread, might need to use only
8248 * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of
8249 * 64-bit fields are to be returned).
8250 */
Paolo Bonzinia2ae9df2014-11-04 18:31:19 +01008251static inline int vmcs12_read_any(struct kvm_vcpu *vcpu,
8252 unsigned long field, u64 *ret)
Nadav Har'El49f705c2011-05-25 23:08:30 +03008253{
8254 short offset = vmcs_field_to_offset(field);
8255 char *p;
8256
8257 if (offset < 0)
Paolo Bonzinia2ae9df2014-11-04 18:31:19 +01008258 return offset;
Nadav Har'El49f705c2011-05-25 23:08:30 +03008259
8260 p = ((char *)(get_vmcs12(vcpu))) + offset;
8261
Jim Mattsond37f4262017-12-22 12:12:16 -08008262 switch (vmcs_field_width(field)) {
8263 case VMCS_FIELD_WIDTH_NATURAL_WIDTH:
Nadav Har'El49f705c2011-05-25 23:08:30 +03008264 *ret = *((natural_width *)p);
Paolo Bonzinia2ae9df2014-11-04 18:31:19 +01008265 return 0;
Jim Mattsond37f4262017-12-22 12:12:16 -08008266 case VMCS_FIELD_WIDTH_U16:
Nadav Har'El49f705c2011-05-25 23:08:30 +03008267 *ret = *((u16 *)p);
Paolo Bonzinia2ae9df2014-11-04 18:31:19 +01008268 return 0;
Jim Mattsond37f4262017-12-22 12:12:16 -08008269 case VMCS_FIELD_WIDTH_U32:
Nadav Har'El49f705c2011-05-25 23:08:30 +03008270 *ret = *((u32 *)p);
Paolo Bonzinia2ae9df2014-11-04 18:31:19 +01008271 return 0;
Jim Mattsond37f4262017-12-22 12:12:16 -08008272 case VMCS_FIELD_WIDTH_U64:
Nadav Har'El49f705c2011-05-25 23:08:30 +03008273 *ret = *((u64 *)p);
Paolo Bonzinia2ae9df2014-11-04 18:31:19 +01008274 return 0;
Nadav Har'El49f705c2011-05-25 23:08:30 +03008275 default:
Paolo Bonzinia2ae9df2014-11-04 18:31:19 +01008276 WARN_ON(1);
8277 return -ENOENT;
Nadav Har'El49f705c2011-05-25 23:08:30 +03008278 }
8279}
8280
Abel Gordon20b97fe2013-04-18 14:36:25 +03008281
Paolo Bonzinia2ae9df2014-11-04 18:31:19 +01008282static inline int vmcs12_write_any(struct kvm_vcpu *vcpu,
8283 unsigned long field, u64 field_value){
Abel Gordon20b97fe2013-04-18 14:36:25 +03008284 short offset = vmcs_field_to_offset(field);
8285 char *p = ((char *) get_vmcs12(vcpu)) + offset;
8286 if (offset < 0)
Paolo Bonzinia2ae9df2014-11-04 18:31:19 +01008287 return offset;
Abel Gordon20b97fe2013-04-18 14:36:25 +03008288
Jim Mattsond37f4262017-12-22 12:12:16 -08008289 switch (vmcs_field_width(field)) {
8290 case VMCS_FIELD_WIDTH_U16:
Abel Gordon20b97fe2013-04-18 14:36:25 +03008291 *(u16 *)p = field_value;
Paolo Bonzinia2ae9df2014-11-04 18:31:19 +01008292 return 0;
Jim Mattsond37f4262017-12-22 12:12:16 -08008293 case VMCS_FIELD_WIDTH_U32:
Abel Gordon20b97fe2013-04-18 14:36:25 +03008294 *(u32 *)p = field_value;
Paolo Bonzinia2ae9df2014-11-04 18:31:19 +01008295 return 0;
Jim Mattsond37f4262017-12-22 12:12:16 -08008296 case VMCS_FIELD_WIDTH_U64:
Abel Gordon20b97fe2013-04-18 14:36:25 +03008297 *(u64 *)p = field_value;
Paolo Bonzinia2ae9df2014-11-04 18:31:19 +01008298 return 0;
Jim Mattsond37f4262017-12-22 12:12:16 -08008299 case VMCS_FIELD_WIDTH_NATURAL_WIDTH:
Abel Gordon20b97fe2013-04-18 14:36:25 +03008300 *(natural_width *)p = field_value;
Paolo Bonzinia2ae9df2014-11-04 18:31:19 +01008301 return 0;
Abel Gordon20b97fe2013-04-18 14:36:25 +03008302 default:
Paolo Bonzinia2ae9df2014-11-04 18:31:19 +01008303 WARN_ON(1);
8304 return -ENOENT;
Abel Gordon20b97fe2013-04-18 14:36:25 +03008305 }
8306
8307}
8308
Jim Mattsonf4160e42018-05-29 09:11:33 -07008309/*
8310 * Copy the writable VMCS shadow fields back to the VMCS12, in case
8311 * they have been modified by the L1 guest. Note that the "read-only"
8312 * VM-exit information fields are actually writable if the vCPU is
8313 * configured to support "VMWRITE to any supported field in the VMCS."
8314 */
Abel Gordon16f5b902013-04-18 14:38:25 +03008315static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
8316{
Jim Mattsonf4160e42018-05-29 09:11:33 -07008317 const u16 *fields[] = {
8318 shadow_read_write_fields,
8319 shadow_read_only_fields
8320 };
8321 const int max_fields[] = {
8322 max_shadow_read_write_fields,
8323 max_shadow_read_only_fields
8324 };
8325 int i, q;
Abel Gordon16f5b902013-04-18 14:38:25 +03008326 unsigned long field;
8327 u64 field_value;
Jim Mattson355f4fb2016-10-28 08:29:39 -07008328 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
Abel Gordon16f5b902013-04-18 14:38:25 +03008329
Jan Kiszka282da872014-10-08 18:05:39 +02008330 preempt_disable();
8331
Abel Gordon16f5b902013-04-18 14:38:25 +03008332 vmcs_load(shadow_vmcs);
8333
Jim Mattsonf4160e42018-05-29 09:11:33 -07008334 for (q = 0; q < ARRAY_SIZE(fields); q++) {
8335 for (i = 0; i < max_fields[q]; i++) {
8336 field = fields[q][i];
8337 field_value = __vmcs_readl(field);
8338 vmcs12_write_any(&vmx->vcpu, field, field_value);
8339 }
8340 /*
8341 * Skip the VM-exit information fields if they are read-only.
8342 */
8343 if (!nested_cpu_has_vmwrite_any_field(&vmx->vcpu))
8344 break;
Abel Gordon16f5b902013-04-18 14:38:25 +03008345 }
8346
8347 vmcs_clear(shadow_vmcs);
8348 vmcs_load(vmx->loaded_vmcs->vmcs);
Jan Kiszka282da872014-10-08 18:05:39 +02008349
8350 preempt_enable();
Abel Gordon16f5b902013-04-18 14:38:25 +03008351}
8352
Abel Gordonc3114422013-04-18 14:38:55 +03008353static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
8354{
Paolo Bonzini44900ba2017-12-13 12:58:02 +01008355 const u16 *fields[] = {
Mathias Krausec2bae892013-06-26 20:36:21 +02008356 shadow_read_write_fields,
8357 shadow_read_only_fields
Abel Gordonc3114422013-04-18 14:38:55 +03008358 };
Mathias Krausec2bae892013-06-26 20:36:21 +02008359 const int max_fields[] = {
Abel Gordonc3114422013-04-18 14:38:55 +03008360 max_shadow_read_write_fields,
8361 max_shadow_read_only_fields
8362 };
8363 int i, q;
8364 unsigned long field;
8365 u64 field_value = 0;
Jim Mattson355f4fb2016-10-28 08:29:39 -07008366 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
Abel Gordonc3114422013-04-18 14:38:55 +03008367
8368 vmcs_load(shadow_vmcs);
8369
Mathias Krausec2bae892013-06-26 20:36:21 +02008370 for (q = 0; q < ARRAY_SIZE(fields); q++) {
Abel Gordonc3114422013-04-18 14:38:55 +03008371 for (i = 0; i < max_fields[q]; i++) {
8372 field = fields[q][i];
8373 vmcs12_read_any(&vmx->vcpu, field, &field_value);
Paolo Bonzini44900ba2017-12-13 12:58:02 +01008374 __vmcs_writel(field, field_value);
Abel Gordonc3114422013-04-18 14:38:55 +03008375 }
8376 }
8377
8378 vmcs_clear(shadow_vmcs);
8379 vmcs_load(vmx->loaded_vmcs->vmcs);
8380}
8381
Nadav Har'El49f705c2011-05-25 23:08:30 +03008382/*
8383 * VMX instructions which assume a current vmcs12 (i.e., that VMPTRLD was
8384 * used before) all generate the same failure when it is missing.
8385 */
8386static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu)
8387{
8388 struct vcpu_vmx *vmx = to_vmx(vcpu);
8389 if (vmx->nested.current_vmptr == -1ull) {
8390 nested_vmx_failInvalid(vcpu);
Nadav Har'El49f705c2011-05-25 23:08:30 +03008391 return 0;
8392 }
8393 return 1;
8394}
8395
8396static int handle_vmread(struct kvm_vcpu *vcpu)
8397{
8398 unsigned long field;
8399 u64 field_value;
8400 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
8401 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
8402 gva_t gva = 0;
8403
Kyle Hueyeb277562016-11-29 12:40:39 -08008404 if (!nested_vmx_check_permission(vcpu))
Nadav Har'El49f705c2011-05-25 23:08:30 +03008405 return 1;
8406
Kyle Huey6affcbe2016-11-29 12:40:40 -08008407 if (!nested_vmx_check_vmcs12(vcpu))
8408 return kvm_skip_emulated_instruction(vcpu);
Kyle Hueyeb277562016-11-29 12:40:39 -08008409
Nadav Har'El49f705c2011-05-25 23:08:30 +03008410 /* Decode instruction info and find the field to read */
Nadav Amit27e6fb52014-06-18 17:19:26 +03008411 field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
Nadav Har'El49f705c2011-05-25 23:08:30 +03008412 /* Read the field, zero-extended to a u64 field_value */
Paolo Bonzinia2ae9df2014-11-04 18:31:19 +01008413 if (vmcs12_read_any(vcpu, field, &field_value) < 0) {
Nadav Har'El49f705c2011-05-25 23:08:30 +03008414 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
Kyle Huey6affcbe2016-11-29 12:40:40 -08008415 return kvm_skip_emulated_instruction(vcpu);
Nadav Har'El49f705c2011-05-25 23:08:30 +03008416 }
8417 /*
8418 * Now copy part of this value to register or memory, as requested.
8419 * Note that the number of bits actually copied is 32 or 64 depending
8420 * on the guest's mode (32 or 64 bit), not on the given field's length.
8421 */
8422 if (vmx_instruction_info & (1u << 10)) {
Nadav Amit27e6fb52014-06-18 17:19:26 +03008423 kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
Nadav Har'El49f705c2011-05-25 23:08:30 +03008424 field_value);
8425 } else {
8426 if (get_vmx_mem_address(vcpu, exit_qualification,
Eugene Korenevskyf9eb4af2015-04-17 02:22:21 +00008427 vmx_instruction_info, true, &gva))
Nadav Har'El49f705c2011-05-25 23:08:30 +03008428 return 1;
Felix Wilhelm727ba742018-06-11 09:43:44 +02008429 /* _system ok, nested_vmx_check_permission has verified cpl=0 */
Paolo Bonzinice14e868a2018-06-06 17:37:49 +02008430 kvm_write_guest_virt_system(vcpu, gva, &field_value,
8431 (is_long_mode(vcpu) ? 8 : 4), NULL);
Nadav Har'El49f705c2011-05-25 23:08:30 +03008432 }
8433
8434 nested_vmx_succeed(vcpu);
Kyle Huey6affcbe2016-11-29 12:40:40 -08008435 return kvm_skip_emulated_instruction(vcpu);
Nadav Har'El49f705c2011-05-25 23:08:30 +03008436}
8437
8438
8439static int handle_vmwrite(struct kvm_vcpu *vcpu)
8440{
8441 unsigned long field;
8442 gva_t gva;
Paolo Bonzini74a497f2017-12-20 13:55:39 +01008443 struct vcpu_vmx *vmx = to_vmx(vcpu);
Nadav Har'El49f705c2011-05-25 23:08:30 +03008444 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
8445 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
Paolo Bonzini74a497f2017-12-20 13:55:39 +01008446
Nadav Har'El49f705c2011-05-25 23:08:30 +03008447 /* The value to write might be 32 or 64 bits, depending on L1's long
8448 * mode, and eventually we need to write that into a field of several
8449 * possible lengths. The code below first zero-extends the value to 64
Adam Buchbinder6a6256f2016-02-23 15:34:30 -08008450 * bit (field_value), and then copies only the appropriate number of
Nadav Har'El49f705c2011-05-25 23:08:30 +03008451 * bits into the vmcs12 field.
8452 */
8453 u64 field_value = 0;
8454 struct x86_exception e;
8455
Kyle Hueyeb277562016-11-29 12:40:39 -08008456 if (!nested_vmx_check_permission(vcpu))
Nadav Har'El49f705c2011-05-25 23:08:30 +03008457 return 1;
8458
Kyle Huey6affcbe2016-11-29 12:40:40 -08008459 if (!nested_vmx_check_vmcs12(vcpu))
8460 return kvm_skip_emulated_instruction(vcpu);
Kyle Hueyeb277562016-11-29 12:40:39 -08008461
Nadav Har'El49f705c2011-05-25 23:08:30 +03008462 if (vmx_instruction_info & (1u << 10))
Nadav Amit27e6fb52014-06-18 17:19:26 +03008463 field_value = kvm_register_readl(vcpu,
Nadav Har'El49f705c2011-05-25 23:08:30 +03008464 (((vmx_instruction_info) >> 3) & 0xf));
8465 else {
8466 if (get_vmx_mem_address(vcpu, exit_qualification,
Eugene Korenevskyf9eb4af2015-04-17 02:22:21 +00008467 vmx_instruction_info, false, &gva))
Nadav Har'El49f705c2011-05-25 23:08:30 +03008468 return 1;
Paolo Bonzinice14e868a2018-06-06 17:37:49 +02008469 if (kvm_read_guest_virt(vcpu, gva, &field_value,
8470 (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
Nadav Har'El49f705c2011-05-25 23:08:30 +03008471 kvm_inject_page_fault(vcpu, &e);
8472 return 1;
8473 }
8474 }
8475
8476
Nadav Amit27e6fb52014-06-18 17:19:26 +03008477 field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
Jim Mattsonf4160e42018-05-29 09:11:33 -07008478 /*
8479 * If the vCPU supports "VMWRITE to any supported field in the
8480 * VMCS," then the "read-only" fields are actually read/write.
8481 */
8482 if (vmcs_field_readonly(field) &&
8483 !nested_cpu_has_vmwrite_any_field(vcpu)) {
Nadav Har'El49f705c2011-05-25 23:08:30 +03008484 nested_vmx_failValid(vcpu,
8485 VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
Kyle Huey6affcbe2016-11-29 12:40:40 -08008486 return kvm_skip_emulated_instruction(vcpu);
Nadav Har'El49f705c2011-05-25 23:08:30 +03008487 }
8488
Paolo Bonzinia2ae9df2014-11-04 18:31:19 +01008489 if (vmcs12_write_any(vcpu, field, field_value) < 0) {
Nadav Har'El49f705c2011-05-25 23:08:30 +03008490 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
Kyle Huey6affcbe2016-11-29 12:40:40 -08008491 return kvm_skip_emulated_instruction(vcpu);
Nadav Har'El49f705c2011-05-25 23:08:30 +03008492 }
8493
Paolo Bonzini74a497f2017-12-20 13:55:39 +01008494 switch (field) {
8495#define SHADOW_FIELD_RW(x) case x:
8496#include "vmx_shadow_fields.h"
8497 /*
8498 * The fields that can be updated by L1 without a vmexit are
8499 * always updated in the vmcs02, the others go down the slow
8500 * path of prepare_vmcs02.
8501 */
8502 break;
8503 default:
8504 vmx->nested.dirty_vmcs12 = true;
8505 break;
8506 }
8507
Nadav Har'El49f705c2011-05-25 23:08:30 +03008508 nested_vmx_succeed(vcpu);
Kyle Huey6affcbe2016-11-29 12:40:40 -08008509 return kvm_skip_emulated_instruction(vcpu);
Nadav Har'El49f705c2011-05-25 23:08:30 +03008510}
8511
Jim Mattsona8bc2842016-11-30 12:03:44 -08008512static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
8513{
8514 vmx->nested.current_vmptr = vmptr;
8515 if (enable_shadow_vmcs) {
8516 vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
8517 SECONDARY_EXEC_SHADOW_VMCS);
8518 vmcs_write64(VMCS_LINK_POINTER,
8519 __pa(vmx->vmcs01.shadow_vmcs));
8520 vmx->nested.sync_shadow_vmcs = true;
8521 }
Paolo Bonzini74a497f2017-12-20 13:55:39 +01008522 vmx->nested.dirty_vmcs12 = true;
Jim Mattsona8bc2842016-11-30 12:03:44 -08008523}
8524
Nadav Har'El63846662011-05-25 23:07:29 +03008525/* Emulate the VMPTRLD instruction */
8526static int handle_vmptrld(struct kvm_vcpu *vcpu)
8527{
8528 struct vcpu_vmx *vmx = to_vmx(vcpu);
Nadav Har'El63846662011-05-25 23:07:29 +03008529 gpa_t vmptr;
Nadav Har'El63846662011-05-25 23:07:29 +03008530
8531 if (!nested_vmx_check_permission(vcpu))
8532 return 1;
8533
Radim Krčmářcbf71272017-05-19 15:48:51 +02008534 if (nested_vmx_get_vmptr(vcpu, &vmptr))
Nadav Har'El63846662011-05-25 23:07:29 +03008535 return 1;
8536
Radim Krčmářcbf71272017-05-19 15:48:51 +02008537 if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
8538 nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
8539 return kvm_skip_emulated_instruction(vcpu);
8540 }
8541
8542 if (vmptr == vmx->nested.vmxon_ptr) {
8543 nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
8544 return kvm_skip_emulated_instruction(vcpu);
8545 }
8546
Nadav Har'El63846662011-05-25 23:07:29 +03008547 if (vmx->nested.current_vmptr != vmptr) {
8548 struct vmcs12 *new_vmcs12;
8549 struct page *page;
David Hildenbrand5e2f30b2017-08-03 18:11:04 +02008550 page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
8551 if (is_error_page(page)) {
Nadav Har'El63846662011-05-25 23:07:29 +03008552 nested_vmx_failInvalid(vcpu);
Kyle Huey6affcbe2016-11-29 12:40:40 -08008553 return kvm_skip_emulated_instruction(vcpu);
Nadav Har'El63846662011-05-25 23:07:29 +03008554 }
8555 new_vmcs12 = kmap(page);
8556 if (new_vmcs12->revision_id != VMCS12_REVISION) {
8557 kunmap(page);
David Hildenbrand53a70da2017-08-03 18:11:05 +02008558 kvm_release_page_clean(page);
Nadav Har'El63846662011-05-25 23:07:29 +03008559 nested_vmx_failValid(vcpu,
8560 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
Kyle Huey6affcbe2016-11-29 12:40:40 -08008561 return kvm_skip_emulated_instruction(vcpu);
Nadav Har'El63846662011-05-25 23:07:29 +03008562 }
Nadav Har'El63846662011-05-25 23:07:29 +03008563
Paolo Bonzini9a2a05b2014-07-17 11:55:46 +02008564 nested_release_vmcs12(vmx);
David Matlack4f2777b2016-07-13 17:16:37 -07008565 /*
8566 * Load VMCS12 from guest memory since it is not already
8567 * cached.
8568 */
Paolo Bonzini9f744c52017-07-27 15:54:46 +02008569 memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE);
8570 kunmap(page);
David Hildenbrand53a70da2017-08-03 18:11:05 +02008571 kvm_release_page_clean(page);
Paolo Bonzini9f744c52017-07-27 15:54:46 +02008572
Jim Mattsona8bc2842016-11-30 12:03:44 -08008573 set_current_vmptr(vmx, vmptr);
Nadav Har'El63846662011-05-25 23:07:29 +03008574 }
8575
8576 nested_vmx_succeed(vcpu);
Kyle Huey6affcbe2016-11-29 12:40:40 -08008577 return kvm_skip_emulated_instruction(vcpu);
Nadav Har'El63846662011-05-25 23:07:29 +03008578}
8579
Nadav Har'El6a4d7552011-05-25 23:08:00 +03008580/* Emulate the VMPTRST instruction */
8581static int handle_vmptrst(struct kvm_vcpu *vcpu)
8582{
8583 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
8584 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
8585 gva_t vmcs_gva;
8586 struct x86_exception e;
8587
8588 if (!nested_vmx_check_permission(vcpu))
8589 return 1;
8590
8591 if (get_vmx_mem_address(vcpu, exit_qualification,
Eugene Korenevskyf9eb4af2015-04-17 02:22:21 +00008592 vmx_instruction_info, true, &vmcs_gva))
Nadav Har'El6a4d7552011-05-25 23:08:00 +03008593 return 1;
Felix Wilhelm727ba742018-06-11 09:43:44 +02008594 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
Paolo Bonzinice14e868a2018-06-06 17:37:49 +02008595 if (kvm_write_guest_virt_system(vcpu, vmcs_gva,
8596 (void *)&to_vmx(vcpu)->nested.current_vmptr,
8597 sizeof(u64), &e)) {
Nadav Har'El6a4d7552011-05-25 23:08:00 +03008598 kvm_inject_page_fault(vcpu, &e);
8599 return 1;
8600 }
8601 nested_vmx_succeed(vcpu);
Kyle Huey6affcbe2016-11-29 12:40:40 -08008602 return kvm_skip_emulated_instruction(vcpu);
Nadav Har'El6a4d7552011-05-25 23:08:00 +03008603}
8604
Nadav Har'Elbfd0a562013-08-05 11:07:17 +03008605/* Emulate the INVEPT instruction */
8606static int handle_invept(struct kvm_vcpu *vcpu)
8607{
Wincy Vanb9c237b2015-02-03 23:56:30 +08008608 struct vcpu_vmx *vmx = to_vmx(vcpu);
Nadav Har'Elbfd0a562013-08-05 11:07:17 +03008609 u32 vmx_instruction_info, types;
8610 unsigned long type;
8611 gva_t gva;
8612 struct x86_exception e;
8613 struct {
8614 u64 eptp, gpa;
8615 } operand;
Nadav Har'Elbfd0a562013-08-05 11:07:17 +03008616
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01008617 if (!(vmx->nested.msrs.secondary_ctls_high &
Wincy Vanb9c237b2015-02-03 23:56:30 +08008618 SECONDARY_EXEC_ENABLE_EPT) ||
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01008619 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) {
Nadav Har'Elbfd0a562013-08-05 11:07:17 +03008620 kvm_queue_exception(vcpu, UD_VECTOR);
8621 return 1;
8622 }
8623
8624 if (!nested_vmx_check_permission(vcpu))
8625 return 1;
8626
Nadav Har'Elbfd0a562013-08-05 11:07:17 +03008627 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
Nadav Amit27e6fb52014-06-18 17:19:26 +03008628 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
Nadav Har'Elbfd0a562013-08-05 11:07:17 +03008629
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01008630 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
Nadav Har'Elbfd0a562013-08-05 11:07:17 +03008631
Jim Mattson85c856b2016-10-26 08:38:38 -07008632 if (type >= 32 || !(types & (1 << type))) {
Nadav Har'Elbfd0a562013-08-05 11:07:17 +03008633 nested_vmx_failValid(vcpu,
8634 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
Kyle Huey6affcbe2016-11-29 12:40:40 -08008635 return kvm_skip_emulated_instruction(vcpu);
Nadav Har'Elbfd0a562013-08-05 11:07:17 +03008636 }
8637
8638 /* According to the Intel VMX instruction reference, the memory
8639 * operand is read even if it isn't needed (e.g., for type==global)
8640 */
8641 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
Eugene Korenevskyf9eb4af2015-04-17 02:22:21 +00008642 vmx_instruction_info, false, &gva))
Nadav Har'Elbfd0a562013-08-05 11:07:17 +03008643 return 1;
Paolo Bonzinice14e868a2018-06-06 17:37:49 +02008644 if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
Nadav Har'Elbfd0a562013-08-05 11:07:17 +03008645 kvm_inject_page_fault(vcpu, &e);
8646 return 1;
8647 }
8648
8649 switch (type) {
Nadav Har'Elbfd0a562013-08-05 11:07:17 +03008650 case VMX_EPT_EXTENT_GLOBAL:
Bandan Das45e11812016-08-02 16:32:36 -04008651 /*
8652 * TODO: track mappings and invalidate
8653 * single context requests appropriately
8654 */
8655 case VMX_EPT_EXTENT_CONTEXT:
Nadav Har'Elbfd0a562013-08-05 11:07:17 +03008656 kvm_mmu_sync_roots(vcpu);
Liang Chen77c39132014-09-18 12:38:37 -04008657 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
Nadav Har'Elbfd0a562013-08-05 11:07:17 +03008658 nested_vmx_succeed(vcpu);
8659 break;
8660 default:
8661 BUG_ON(1);
8662 break;
8663 }
8664
Kyle Huey6affcbe2016-11-29 12:40:40 -08008665 return kvm_skip_emulated_instruction(vcpu);
Nadav Har'Elbfd0a562013-08-05 11:07:17 +03008666}
8667
Petr Matouseka642fc32014-09-23 20:22:30 +02008668static int handle_invvpid(struct kvm_vcpu *vcpu)
8669{
Wanpeng Li99b83ac2015-10-13 09:12:21 -07008670 struct vcpu_vmx *vmx = to_vmx(vcpu);
8671 u32 vmx_instruction_info;
8672 unsigned long type, types;
8673 gva_t gva;
8674 struct x86_exception e;
Jim Mattson40352602017-06-28 09:37:37 -07008675 struct {
8676 u64 vpid;
8677 u64 gla;
8678 } operand;
Wanpeng Li99b83ac2015-10-13 09:12:21 -07008679
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01008680 if (!(vmx->nested.msrs.secondary_ctls_high &
Wanpeng Li99b83ac2015-10-13 09:12:21 -07008681 SECONDARY_EXEC_ENABLE_VPID) ||
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01008682 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) {
Wanpeng Li99b83ac2015-10-13 09:12:21 -07008683 kvm_queue_exception(vcpu, UD_VECTOR);
8684 return 1;
8685 }
8686
8687 if (!nested_vmx_check_permission(vcpu))
8688 return 1;
8689
8690 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
8691 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
8692
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01008693 types = (vmx->nested.msrs.vpid_caps &
Jan Dakinevichbcdde302016-10-28 07:00:30 +03008694 VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
Wanpeng Li99b83ac2015-10-13 09:12:21 -07008695
Jim Mattson85c856b2016-10-26 08:38:38 -07008696 if (type >= 32 || !(types & (1 << type))) {
Wanpeng Li99b83ac2015-10-13 09:12:21 -07008697 nested_vmx_failValid(vcpu,
8698 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
Kyle Huey6affcbe2016-11-29 12:40:40 -08008699 return kvm_skip_emulated_instruction(vcpu);
Wanpeng Li99b83ac2015-10-13 09:12:21 -07008700 }
8701
8702 /* according to the intel vmx instruction reference, the memory
8703 * operand is read even if it isn't needed (e.g., for type==global)
8704 */
8705 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
8706 vmx_instruction_info, false, &gva))
8707 return 1;
Paolo Bonzinice14e868a2018-06-06 17:37:49 +02008708 if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
Wanpeng Li99b83ac2015-10-13 09:12:21 -07008709 kvm_inject_page_fault(vcpu, &e);
8710 return 1;
8711 }
Jim Mattson40352602017-06-28 09:37:37 -07008712 if (operand.vpid >> 16) {
8713 nested_vmx_failValid(vcpu,
8714 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
8715 return kvm_skip_emulated_instruction(vcpu);
8716 }
Wanpeng Li99b83ac2015-10-13 09:12:21 -07008717
8718 switch (type) {
Jan Dakinevichbcdde302016-10-28 07:00:30 +03008719 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
Liran Aloncd9a4912018-05-22 17:16:15 +03008720 if (!operand.vpid ||
8721 is_noncanonical_address(operand.gla, vcpu)) {
Jim Mattson40352602017-06-28 09:37:37 -07008722 nested_vmx_failValid(vcpu,
8723 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
8724 return kvm_skip_emulated_instruction(vcpu);
8725 }
Liran Aloncd9a4912018-05-22 17:16:15 +03008726 if (cpu_has_vmx_invvpid_individual_addr() &&
8727 vmx->nested.vpid02) {
8728 __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR,
8729 vmx->nested.vpid02, operand.gla);
8730 } else
8731 __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true);
8732 break;
Paolo Bonzinief697a72016-03-18 16:58:38 +01008733 case VMX_VPID_EXTENT_SINGLE_CONTEXT:
Jan Dakinevichbcdde302016-10-28 07:00:30 +03008734 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
Jim Mattson40352602017-06-28 09:37:37 -07008735 if (!operand.vpid) {
Jan Dakinevichbcdde302016-10-28 07:00:30 +03008736 nested_vmx_failValid(vcpu,
8737 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
Kyle Huey6affcbe2016-11-29 12:40:40 -08008738 return kvm_skip_emulated_instruction(vcpu);
Jan Dakinevichbcdde302016-10-28 07:00:30 +03008739 }
Liran Aloncd9a4912018-05-22 17:16:15 +03008740 __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true);
Jan Dakinevichbcdde302016-10-28 07:00:30 +03008741 break;
Wanpeng Li99b83ac2015-10-13 09:12:21 -07008742 case VMX_VPID_EXTENT_ALL_CONTEXT:
Liran Aloncd9a4912018-05-22 17:16:15 +03008743 __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true);
Wanpeng Li99b83ac2015-10-13 09:12:21 -07008744 break;
8745 default:
Jan Dakinevichbcdde302016-10-28 07:00:30 +03008746 WARN_ON_ONCE(1);
Kyle Huey6affcbe2016-11-29 12:40:40 -08008747 return kvm_skip_emulated_instruction(vcpu);
Wanpeng Li99b83ac2015-10-13 09:12:21 -07008748 }
8749
Jan Dakinevichbcdde302016-10-28 07:00:30 +03008750 nested_vmx_succeed(vcpu);
8751
Kyle Huey6affcbe2016-11-29 12:40:40 -08008752 return kvm_skip_emulated_instruction(vcpu);
Petr Matouseka642fc32014-09-23 20:22:30 +02008753}
8754
Kai Huang843e4332015-01-28 10:54:28 +08008755static int handle_pml_full(struct kvm_vcpu *vcpu)
8756{
8757 unsigned long exit_qualification;
8758
8759 trace_kvm_pml_full(vcpu->vcpu_id);
8760
8761 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
8762
8763 /*
8764 * PML buffer FULL happened while executing iret from NMI,
8765 * "blocked by NMI" bit has to be set before next VM entry.
8766 */
8767 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
Paolo Bonzinid02fcf52017-11-06 13:31:13 +01008768 enable_vnmi &&
Kai Huang843e4332015-01-28 10:54:28 +08008769 (exit_qualification & INTR_INFO_UNBLOCK_NMI))
8770 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
8771 GUEST_INTR_STATE_NMI);
8772
8773 /*
8774 * PML buffer already flushed at beginning of VMEXIT. Nothing to do
8775 * here.., and there's no userspace involvement needed for PML.
8776 */
8777 return 1;
8778}
8779
Yunhong Jiang64672c92016-06-13 14:19:59 -07008780static int handle_preemption_timer(struct kvm_vcpu *vcpu)
8781{
8782 kvm_lapic_expired_hv_timer(vcpu);
8783 return 1;
8784}
8785
Bandan Das41ab9372017-08-03 15:54:43 -04008786static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
8787{
8788 struct vcpu_vmx *vmx = to_vmx(vcpu);
Bandan Das41ab9372017-08-03 15:54:43 -04008789 int maxphyaddr = cpuid_maxphyaddr(vcpu);
8790
8791 /* Check for memory type validity */
David Hildenbrandbb97a012017-08-10 23:15:28 +02008792 switch (address & VMX_EPTP_MT_MASK) {
8793 case VMX_EPTP_MT_UC:
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01008794 if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))
Bandan Das41ab9372017-08-03 15:54:43 -04008795 return false;
8796 break;
David Hildenbrandbb97a012017-08-10 23:15:28 +02008797 case VMX_EPTP_MT_WB:
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01008798 if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))
Bandan Das41ab9372017-08-03 15:54:43 -04008799 return false;
8800 break;
8801 default:
8802 return false;
8803 }
8804
David Hildenbrandbb97a012017-08-10 23:15:28 +02008805 /* only 4 levels page-walk length are valid */
8806 if ((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4)
Bandan Das41ab9372017-08-03 15:54:43 -04008807 return false;
8808
8809 /* Reserved bits should not be set */
8810 if (address >> maxphyaddr || ((address >> 7) & 0x1f))
8811 return false;
8812
8813 /* AD, if set, should be supported */
David Hildenbrandbb97a012017-08-10 23:15:28 +02008814 if (address & VMX_EPTP_AD_ENABLE_BIT) {
Paolo Bonzini6677f3d2018-02-26 13:40:08 +01008815 if (!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))
Bandan Das41ab9372017-08-03 15:54:43 -04008816 return false;
8817 }
8818
8819 return true;
8820}
8821
8822static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
8823 struct vmcs12 *vmcs12)
8824{
8825 u32 index = vcpu->arch.regs[VCPU_REGS_RCX];
8826 u64 address;
8827 bool accessed_dirty;
8828 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
8829
8830 if (!nested_cpu_has_eptp_switching(vmcs12) ||
8831 !nested_cpu_has_ept(vmcs12))
8832 return 1;
8833
8834 if (index >= VMFUNC_EPTP_ENTRIES)
8835 return 1;
8836
8837
8838 if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
8839 &address, index * 8, 8))
8840 return 1;
8841
David Hildenbrandbb97a012017-08-10 23:15:28 +02008842 accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT);
Bandan Das41ab9372017-08-03 15:54:43 -04008843
8844 /*
8845 * If the (L2) guest does a vmfunc to the currently
8846 * active ept pointer, we don't have to do anything else
8847 */
8848 if (vmcs12->ept_pointer != address) {
8849 if (!valid_ept_address(vcpu, address))
8850 return 1;
8851
8852 kvm_mmu_unload(vcpu);
8853 mmu->ept_ad = accessed_dirty;
8854 mmu->base_role.ad_disabled = !accessed_dirty;
8855 vmcs12->ept_pointer = address;
8856 /*
8857 * TODO: Check what's the correct approach in case
8858 * mmu reload fails. Currently, we just let the next
8859 * reload potentially fail
8860 */
8861 kvm_mmu_reload(vcpu);
8862 }
8863
8864 return 0;
8865}
8866
Bandan Das2a499e42017-08-03 15:54:41 -04008867static int handle_vmfunc(struct kvm_vcpu *vcpu)
8868{
Bandan Das27c42a12017-08-03 15:54:42 -04008869 struct vcpu_vmx *vmx = to_vmx(vcpu);
8870 struct vmcs12 *vmcs12;
8871 u32 function = vcpu->arch.regs[VCPU_REGS_RAX];
8872
8873 /*
8874 * VMFUNC is only supported for nested guests, but we always enable the
8875 * secondary control for simplicity; for non-nested mode, fake that we
8876 * didn't by injecting #UD.
8877 */
8878 if (!is_guest_mode(vcpu)) {
8879 kvm_queue_exception(vcpu, UD_VECTOR);
8880 return 1;
8881 }
8882
8883 vmcs12 = get_vmcs12(vcpu);
8884 if ((vmcs12->vm_function_control & (1 << function)) == 0)
8885 goto fail;
Bandan Das41ab9372017-08-03 15:54:43 -04008886
8887 switch (function) {
8888 case 0:
8889 if (nested_vmx_eptp_switching(vcpu, vmcs12))
8890 goto fail;
8891 break;
8892 default:
8893 goto fail;
8894 }
8895 return kvm_skip_emulated_instruction(vcpu);
Bandan Das27c42a12017-08-03 15:54:42 -04008896
8897fail:
8898 nested_vmx_vmexit(vcpu, vmx->exit_reason,
8899 vmcs_read32(VM_EXIT_INTR_INFO),
8900 vmcs_readl(EXIT_QUALIFICATION));
Bandan Das2a499e42017-08-03 15:54:41 -04008901 return 1;
8902}
8903
Nadav Har'El0140cae2011-05-25 23:06:28 +03008904/*
Avi Kivity6aa8b732006-12-10 02:21:36 -08008905 * The exit handlers return 1 if the exit was handled fully and guest execution
8906 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
8907 * to be done to userspace and return 0.
8908 */
Mathias Krause772e0312012-08-30 01:30:19 +02008909static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
Avi Kivity6aa8b732006-12-10 02:21:36 -08008910 [EXIT_REASON_EXCEPTION_NMI] = handle_exception,
8911 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
Avi Kivity988ad742007-02-12 00:54:36 -08008912 [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault,
Sheng Yangf08864b2008-05-15 18:23:25 +08008913 [EXIT_REASON_NMI_WINDOW] = handle_nmi_window,
Avi Kivity6aa8b732006-12-10 02:21:36 -08008914 [EXIT_REASON_IO_INSTRUCTION] = handle_io,
Avi Kivity6aa8b732006-12-10 02:21:36 -08008915 [EXIT_REASON_CR_ACCESS] = handle_cr,
8916 [EXIT_REASON_DR_ACCESS] = handle_dr,
8917 [EXIT_REASON_CPUID] = handle_cpuid,
8918 [EXIT_REASON_MSR_READ] = handle_rdmsr,
8919 [EXIT_REASON_MSR_WRITE] = handle_wrmsr,
8920 [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
8921 [EXIT_REASON_HLT] = handle_halt,
Gleb Natapovec25d5e2010-11-01 15:35:01 +02008922 [EXIT_REASON_INVD] = handle_invd,
Marcelo Tosattia7052892008-09-23 13:18:35 -03008923 [EXIT_REASON_INVLPG] = handle_invlpg,
Avi Kivityfee84b02011-11-10 14:57:25 +02008924 [EXIT_REASON_RDPMC] = handle_rdpmc,
Ingo Molnarc21415e2007-02-19 14:37:47 +02008925 [EXIT_REASON_VMCALL] = handle_vmcall,
Nadav Har'El27d6c862011-05-25 23:06:59 +03008926 [EXIT_REASON_VMCLEAR] = handle_vmclear,
Nadav Har'Elcd232ad2011-05-25 23:10:33 +03008927 [EXIT_REASON_VMLAUNCH] = handle_vmlaunch,
Nadav Har'El63846662011-05-25 23:07:29 +03008928 [EXIT_REASON_VMPTRLD] = handle_vmptrld,
Nadav Har'El6a4d7552011-05-25 23:08:00 +03008929 [EXIT_REASON_VMPTRST] = handle_vmptrst,
Nadav Har'El49f705c2011-05-25 23:08:30 +03008930 [EXIT_REASON_VMREAD] = handle_vmread,
Nadav Har'Elcd232ad2011-05-25 23:10:33 +03008931 [EXIT_REASON_VMRESUME] = handle_vmresume,
Nadav Har'El49f705c2011-05-25 23:08:30 +03008932 [EXIT_REASON_VMWRITE] = handle_vmwrite,
Nadav Har'Elec378ae2011-05-25 23:02:54 +03008933 [EXIT_REASON_VMOFF] = handle_vmoff,
8934 [EXIT_REASON_VMON] = handle_vmon,
Sheng Yangf78e0e22007-10-29 09:40:42 +08008935 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
8936 [EXIT_REASON_APIC_ACCESS] = handle_apic_access,
Yang Zhang83d4c282013-01-25 10:18:49 +08008937 [EXIT_REASON_APIC_WRITE] = handle_apic_write,
Yang Zhangc7c9c562013-01-25 10:18:51 +08008938 [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced,
Eddie Donge5edaa02007-11-11 12:28:35 +02008939 [EXIT_REASON_WBINVD] = handle_wbinvd,
Dexuan Cui2acf9232010-06-10 11:27:12 +08008940 [EXIT_REASON_XSETBV] = handle_xsetbv,
Izik Eidus37817f22008-03-24 23:14:53 +02008941 [EXIT_REASON_TASK_SWITCH] = handle_task_switch,
Andi Kleena0861c02009-06-08 17:37:09 +08008942 [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check,
Paolo Bonzini0367f202016-07-12 10:44:55 +02008943 [EXIT_REASON_GDTR_IDTR] = handle_desc,
8944 [EXIT_REASON_LDTR_TR] = handle_desc,
Marcelo Tosatti68f89402009-06-11 12:07:43 -03008945 [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation,
8946 [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig,
Zhai, Edwin4b8d54f2009-10-09 18:03:20 +08008947 [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause,
Gabriel L. Somlo87c00572014-05-07 16:52:13 -04008948 [EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait,
Mihai Donțu5f3d45e2015-07-05 20:08:57 +03008949 [EXIT_REASON_MONITOR_TRAP_FLAG] = handle_monitor_trap,
Gabriel L. Somlo87c00572014-05-07 16:52:13 -04008950 [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor,
Nadav Har'Elbfd0a562013-08-05 11:07:17 +03008951 [EXIT_REASON_INVEPT] = handle_invept,
Petr Matouseka642fc32014-09-23 20:22:30 +02008952 [EXIT_REASON_INVVPID] = handle_invvpid,
Jim Mattson45ec3682017-08-23 16:32:04 -07008953 [EXIT_REASON_RDRAND] = handle_invalid_op,
Jim Mattson75f4fc82017-08-23 16:32:03 -07008954 [EXIT_REASON_RDSEED] = handle_invalid_op,
Wanpeng Lif53cd632014-12-02 19:14:58 +08008955 [EXIT_REASON_XSAVES] = handle_xsaves,
8956 [EXIT_REASON_XRSTORS] = handle_xrstors,
Kai Huang843e4332015-01-28 10:54:28 +08008957 [EXIT_REASON_PML_FULL] = handle_pml_full,
Bandan Das2a499e42017-08-03 15:54:41 -04008958 [EXIT_REASON_VMFUNC] = handle_vmfunc,
Yunhong Jiang64672c92016-06-13 14:19:59 -07008959 [EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer,
Avi Kivity6aa8b732006-12-10 02:21:36 -08008960};
8961
8962static const int kvm_vmx_max_exit_handlers =
Robert P. J. Day50a34852007-06-03 13:35:29 -04008963 ARRAY_SIZE(kvm_vmx_exit_handlers);
Avi Kivity6aa8b732006-12-10 02:21:36 -08008964
Jan Kiszka908a7bd2013-02-18 11:21:16 +01008965static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
8966 struct vmcs12 *vmcs12)
8967{
8968 unsigned long exit_qualification;
8969 gpa_t bitmap, last_bitmap;
8970 unsigned int port;
8971 int size;
8972 u8 b;
8973
Jan Kiszka908a7bd2013-02-18 11:21:16 +01008974 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
Zhihui Zhang2f0a6392013-12-30 15:56:29 -05008975 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
Jan Kiszka908a7bd2013-02-18 11:21:16 +01008976
8977 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
8978
8979 port = exit_qualification >> 16;
8980 size = (exit_qualification & 7) + 1;
8981
8982 last_bitmap = (gpa_t)-1;
8983 b = -1;
8984
8985 while (size > 0) {
8986 if (port < 0x8000)
8987 bitmap = vmcs12->io_bitmap_a;
8988 else if (port < 0x10000)
8989 bitmap = vmcs12->io_bitmap_b;
8990 else
Joe Perches1d804d02015-03-30 16:46:09 -07008991 return true;
Jan Kiszka908a7bd2013-02-18 11:21:16 +01008992 bitmap += (port & 0x7fff) / 8;
8993
8994 if (last_bitmap != bitmap)
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02008995 if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1))
Joe Perches1d804d02015-03-30 16:46:09 -07008996 return true;
Jan Kiszka908a7bd2013-02-18 11:21:16 +01008997 if (b & (1 << (port & 7)))
Joe Perches1d804d02015-03-30 16:46:09 -07008998 return true;
Jan Kiszka908a7bd2013-02-18 11:21:16 +01008999
9000 port++;
9001 size--;
9002 last_bitmap = bitmap;
9003 }
9004
Joe Perches1d804d02015-03-30 16:46:09 -07009005 return false;
Jan Kiszka908a7bd2013-02-18 11:21:16 +01009006}
9007
Nadav Har'El644d7112011-05-25 23:12:35 +03009008/*
9009 * Return 1 if we should exit from L2 to L1 to handle an MSR access access,
9010 * rather than handle it ourselves in L0. I.e., check whether L1 expressed
9011 * disinterest in the current event (read or write a specific MSR) by using an
9012 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
9013 */
9014static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
9015 struct vmcs12 *vmcs12, u32 exit_reason)
9016{
9017 u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX];
9018 gpa_t bitmap;
9019
Jan Kiszkacbd29cb2013-02-11 12:19:28 +01009020 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
Joe Perches1d804d02015-03-30 16:46:09 -07009021 return true;
Nadav Har'El644d7112011-05-25 23:12:35 +03009022
9023 /*
9024 * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
9025 * for the four combinations of read/write and low/high MSR numbers.
9026 * First we need to figure out which of the four to use:
9027 */
9028 bitmap = vmcs12->msr_bitmap;
9029 if (exit_reason == EXIT_REASON_MSR_WRITE)
9030 bitmap += 2048;
9031 if (msr_index >= 0xc0000000) {
9032 msr_index -= 0xc0000000;
9033 bitmap += 1024;
9034 }
9035
9036 /* Then read the msr_index'th bit from this bitmap: */
9037 if (msr_index < 1024*8) {
9038 unsigned char b;
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02009039 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1))
Joe Perches1d804d02015-03-30 16:46:09 -07009040 return true;
Nadav Har'El644d7112011-05-25 23:12:35 +03009041 return 1 & (b >> (msr_index & 7));
9042 } else
Joe Perches1d804d02015-03-30 16:46:09 -07009043 return true; /* let L1 handle the wrong parameter */
Nadav Har'El644d7112011-05-25 23:12:35 +03009044}
9045
9046/*
9047 * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
9048 * rather than handle it ourselves in L0. I.e., check if L1 wanted to
9049 * intercept (via guest_host_mask etc.) the current event.
9050 */
9051static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
9052 struct vmcs12 *vmcs12)
9053{
9054 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
9055 int cr = exit_qualification & 15;
Jan H. Schönherre1d39b12017-05-20 13:22:56 +02009056 int reg;
9057 unsigned long val;
Nadav Har'El644d7112011-05-25 23:12:35 +03009058
9059 switch ((exit_qualification >> 4) & 3) {
9060 case 0: /* mov to cr */
Jan H. Schönherre1d39b12017-05-20 13:22:56 +02009061 reg = (exit_qualification >> 8) & 15;
9062 val = kvm_register_readl(vcpu, reg);
Nadav Har'El644d7112011-05-25 23:12:35 +03009063 switch (cr) {
9064 case 0:
9065 if (vmcs12->cr0_guest_host_mask &
9066 (val ^ vmcs12->cr0_read_shadow))
Joe Perches1d804d02015-03-30 16:46:09 -07009067 return true;
Nadav Har'El644d7112011-05-25 23:12:35 +03009068 break;
9069 case 3:
9070 if ((vmcs12->cr3_target_count >= 1 &&
9071 vmcs12->cr3_target_value0 == val) ||
9072 (vmcs12->cr3_target_count >= 2 &&
9073 vmcs12->cr3_target_value1 == val) ||
9074 (vmcs12->cr3_target_count >= 3 &&
9075 vmcs12->cr3_target_value2 == val) ||
9076 (vmcs12->cr3_target_count >= 4 &&
9077 vmcs12->cr3_target_value3 == val))
Joe Perches1d804d02015-03-30 16:46:09 -07009078 return false;
Nadav Har'El644d7112011-05-25 23:12:35 +03009079 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
Joe Perches1d804d02015-03-30 16:46:09 -07009080 return true;
Nadav Har'El644d7112011-05-25 23:12:35 +03009081 break;
9082 case 4:
9083 if (vmcs12->cr4_guest_host_mask &
9084 (vmcs12->cr4_read_shadow ^ val))
Joe Perches1d804d02015-03-30 16:46:09 -07009085 return true;
Nadav Har'El644d7112011-05-25 23:12:35 +03009086 break;
9087 case 8:
9088 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
Joe Perches1d804d02015-03-30 16:46:09 -07009089 return true;
Nadav Har'El644d7112011-05-25 23:12:35 +03009090 break;
9091 }
9092 break;
9093 case 2: /* clts */
9094 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
9095 (vmcs12->cr0_read_shadow & X86_CR0_TS))
Joe Perches1d804d02015-03-30 16:46:09 -07009096 return true;
Nadav Har'El644d7112011-05-25 23:12:35 +03009097 break;
9098 case 1: /* mov from cr */
9099 switch (cr) {
9100 case 3:
9101 if (vmcs12->cpu_based_vm_exec_control &
9102 CPU_BASED_CR3_STORE_EXITING)
Joe Perches1d804d02015-03-30 16:46:09 -07009103 return true;
Nadav Har'El644d7112011-05-25 23:12:35 +03009104 break;
9105 case 8:
9106 if (vmcs12->cpu_based_vm_exec_control &
9107 CPU_BASED_CR8_STORE_EXITING)
Joe Perches1d804d02015-03-30 16:46:09 -07009108 return true;
Nadav Har'El644d7112011-05-25 23:12:35 +03009109 break;
9110 }
9111 break;
9112 case 3: /* lmsw */
9113 /*
9114 * lmsw can change bits 1..3 of cr0, and only set bit 0 of
9115 * cr0. Other attempted changes are ignored, with no exit.
9116 */
Jan H. Schönherre1d39b12017-05-20 13:22:56 +02009117 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
Nadav Har'El644d7112011-05-25 23:12:35 +03009118 if (vmcs12->cr0_guest_host_mask & 0xe &
9119 (val ^ vmcs12->cr0_read_shadow))
Joe Perches1d804d02015-03-30 16:46:09 -07009120 return true;
Nadav Har'El644d7112011-05-25 23:12:35 +03009121 if ((vmcs12->cr0_guest_host_mask & 0x1) &&
9122 !(vmcs12->cr0_read_shadow & 0x1) &&
9123 (val & 0x1))
Joe Perches1d804d02015-03-30 16:46:09 -07009124 return true;
Nadav Har'El644d7112011-05-25 23:12:35 +03009125 break;
9126 }
Joe Perches1d804d02015-03-30 16:46:09 -07009127 return false;
Nadav Har'El644d7112011-05-25 23:12:35 +03009128}
9129
9130/*
9131 * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we
9132 * should handle it ourselves in L0 (and then continue L2). Only call this
9133 * when in is_guest_mode (L2).
9134 */
Paolo Bonzini7313c692017-07-27 10:31:25 +02009135static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
Nadav Har'El644d7112011-05-25 23:12:35 +03009136{
Nadav Har'El644d7112011-05-25 23:12:35 +03009137 u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
9138 struct vcpu_vmx *vmx = to_vmx(vcpu);
9139 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
9140
Jim Mattson4f350c62017-09-14 16:31:44 -07009141 if (vmx->nested.nested_run_pending)
9142 return false;
9143
9144 if (unlikely(vmx->fail)) {
9145 pr_info_ratelimited("%s failed vm entry %x\n", __func__,
9146 vmcs_read32(VM_INSTRUCTION_ERROR));
9147 return true;
9148 }
Jan Kiszka542060e2014-01-04 18:47:21 +01009149
David Matlackc9f04402017-08-01 14:00:40 -07009150 /*
9151 * The host physical addresses of some pages of guest memory
Jim Mattsonde3a0022017-11-27 17:22:25 -06009152 * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC
9153 * Page). The CPU may write to these pages via their host
9154 * physical address while L2 is running, bypassing any
9155 * address-translation-based dirty tracking (e.g. EPT write
9156 * protection).
David Matlackc9f04402017-08-01 14:00:40 -07009157 *
9158 * Mark them dirty on every exit from L2 to prevent them from
9159 * getting out of sync with dirty tracking.
9160 */
9161 nested_mark_vmcs12_pages_dirty(vcpu);
9162
Jim Mattson4f350c62017-09-14 16:31:44 -07009163 trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason,
9164 vmcs_readl(EXIT_QUALIFICATION),
9165 vmx->idt_vectoring_info,
9166 intr_info,
9167 vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
9168 KVM_ISA_VMX);
Nadav Har'El644d7112011-05-25 23:12:35 +03009169
9170 switch (exit_reason) {
9171 case EXIT_REASON_EXCEPTION_NMI:
Jim Mattsonef85b672016-12-12 11:01:37 -08009172 if (is_nmi(intr_info))
Joe Perches1d804d02015-03-30 16:46:09 -07009173 return false;
Nadav Har'El644d7112011-05-25 23:12:35 +03009174 else if (is_page_fault(intr_info))
Wanpeng Li52a5c152017-07-13 18:30:42 -07009175 return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept;
Anthoine Bourgeoise504c902013-11-13 11:45:37 +01009176 else if (is_no_device(intr_info) &&
Paolo Bonziniccf98442014-02-27 22:54:11 +01009177 !(vmcs12->guest_cr0 & X86_CR0_TS))
Joe Perches1d804d02015-03-30 16:46:09 -07009178 return false;
Jan Kiszka6f054852016-02-09 20:15:18 +01009179 else if (is_debug(intr_info) &&
9180 vcpu->guest_debug &
9181 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
9182 return false;
9183 else if (is_breakpoint(intr_info) &&
9184 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
9185 return false;
Nadav Har'El644d7112011-05-25 23:12:35 +03009186 return vmcs12->exception_bitmap &
9187 (1u << (intr_info & INTR_INFO_VECTOR_MASK));
9188 case EXIT_REASON_EXTERNAL_INTERRUPT:
Joe Perches1d804d02015-03-30 16:46:09 -07009189 return false;
Nadav Har'El644d7112011-05-25 23:12:35 +03009190 case EXIT_REASON_TRIPLE_FAULT:
Joe Perches1d804d02015-03-30 16:46:09 -07009191 return true;
Nadav Har'El644d7112011-05-25 23:12:35 +03009192 case EXIT_REASON_PENDING_INTERRUPT:
Jan Kiszka3b656cf2013-04-14 12:12:45 +02009193 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
Nadav Har'El644d7112011-05-25 23:12:35 +03009194 case EXIT_REASON_NMI_WINDOW:
Jan Kiszka3b656cf2013-04-14 12:12:45 +02009195 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
Nadav Har'El644d7112011-05-25 23:12:35 +03009196 case EXIT_REASON_TASK_SWITCH:
Joe Perches1d804d02015-03-30 16:46:09 -07009197 return true;
Nadav Har'El644d7112011-05-25 23:12:35 +03009198 case EXIT_REASON_CPUID:
Joe Perches1d804d02015-03-30 16:46:09 -07009199 return true;
Nadav Har'El644d7112011-05-25 23:12:35 +03009200 case EXIT_REASON_HLT:
9201 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
9202 case EXIT_REASON_INVD:
Joe Perches1d804d02015-03-30 16:46:09 -07009203 return true;
Nadav Har'El644d7112011-05-25 23:12:35 +03009204 case EXIT_REASON_INVLPG:
9205 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
9206 case EXIT_REASON_RDPMC:
9207 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
Paolo Bonzinia5f46452017-03-30 11:55:32 +02009208 case EXIT_REASON_RDRAND:
David Hildenbrand736fdf72017-08-24 20:51:37 +02009209 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING);
Paolo Bonzinia5f46452017-03-30 11:55:32 +02009210 case EXIT_REASON_RDSEED:
David Hildenbrand736fdf72017-08-24 20:51:37 +02009211 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING);
Jan Kiszkab3a2a902015-03-23 19:27:19 +01009212 case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
Nadav Har'El644d7112011-05-25 23:12:35 +03009213 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
9214 case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
9215 case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
9216 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
9217 case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
9218 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
Petr Matouseka642fc32014-09-23 20:22:30 +02009219 case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
Nadav Har'El644d7112011-05-25 23:12:35 +03009220 /*
9221 * VMX instructions trap unconditionally. This allows L1 to
9222 * emulate them for its L2 guest, i.e., allows 3-level nesting!
9223 */
Joe Perches1d804d02015-03-30 16:46:09 -07009224 return true;
Nadav Har'El644d7112011-05-25 23:12:35 +03009225 case EXIT_REASON_CR_ACCESS:
9226 return nested_vmx_exit_handled_cr(vcpu, vmcs12);
9227 case EXIT_REASON_DR_ACCESS:
9228 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
9229 case EXIT_REASON_IO_INSTRUCTION:
Jan Kiszka908a7bd2013-02-18 11:21:16 +01009230 return nested_vmx_exit_handled_io(vcpu, vmcs12);
Paolo Bonzini1b073042016-10-25 16:06:30 +02009231 case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR:
9232 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC);
Nadav Har'El644d7112011-05-25 23:12:35 +03009233 case EXIT_REASON_MSR_READ:
9234 case EXIT_REASON_MSR_WRITE:
9235 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
9236 case EXIT_REASON_INVALID_STATE:
Joe Perches1d804d02015-03-30 16:46:09 -07009237 return true;
Nadav Har'El644d7112011-05-25 23:12:35 +03009238 case EXIT_REASON_MWAIT_INSTRUCTION:
9239 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
Mihai Donțu5f3d45e2015-07-05 20:08:57 +03009240 case EXIT_REASON_MONITOR_TRAP_FLAG:
9241 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
Nadav Har'El644d7112011-05-25 23:12:35 +03009242 case EXIT_REASON_MONITOR_INSTRUCTION:
9243 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
9244 case EXIT_REASON_PAUSE_INSTRUCTION:
9245 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
9246 nested_cpu_has2(vmcs12,
9247 SECONDARY_EXEC_PAUSE_LOOP_EXITING);
9248 case EXIT_REASON_MCE_DURING_VMENTRY:
Joe Perches1d804d02015-03-30 16:46:09 -07009249 return false;
Nadav Har'El644d7112011-05-25 23:12:35 +03009250 case EXIT_REASON_TPR_BELOW_THRESHOLD:
Wanpeng Lia7c0b072014-08-21 19:46:50 +08009251 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
Nadav Har'El644d7112011-05-25 23:12:35 +03009252 case EXIT_REASON_APIC_ACCESS:
Wincy Van82f0dd42015-02-03 23:57:18 +08009253 case EXIT_REASON_APIC_WRITE:
Wincy Van608406e2015-02-03 23:57:51 +08009254 case EXIT_REASON_EOI_INDUCED:
Jim Mattsonab5df312018-05-09 17:02:03 -04009255 /*
9256 * The controls for "virtualize APIC accesses," "APIC-
9257 * register virtualization," and "virtual-interrupt
9258 * delivery" only come from vmcs12.
9259 */
Joe Perches1d804d02015-03-30 16:46:09 -07009260 return true;
Nadav Har'El644d7112011-05-25 23:12:35 +03009261 case EXIT_REASON_EPT_VIOLATION:
Nadav Har'El2b1be672013-08-05 11:07:19 +03009262 /*
9263 * L0 always deals with the EPT violation. If nested EPT is
9264 * used, and the nested mmu code discovers that the address is
9265 * missing in the guest EPT table (EPT12), the EPT violation
9266 * will be injected with nested_ept_inject_page_fault()
9267 */
Joe Perches1d804d02015-03-30 16:46:09 -07009268 return false;
Nadav Har'El644d7112011-05-25 23:12:35 +03009269 case EXIT_REASON_EPT_MISCONFIG:
Nadav Har'El2b1be672013-08-05 11:07:19 +03009270 /*
9271 * L2 never uses directly L1's EPT, but rather L0's own EPT
9272 * table (shadow on EPT) or a merged EPT table that L0 built
9273 * (EPT on EPT). So any problems with the structure of the
9274 * table is L0's fault.
9275 */
Joe Perches1d804d02015-03-30 16:46:09 -07009276 return false;
Paolo Bonzini90a2db62017-07-27 13:22:13 +02009277 case EXIT_REASON_INVPCID:
9278 return
9279 nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) &&
9280 nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
Nadav Har'El644d7112011-05-25 23:12:35 +03009281 case EXIT_REASON_WBINVD:
9282 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
9283 case EXIT_REASON_XSETBV:
Joe Perches1d804d02015-03-30 16:46:09 -07009284 return true;
Wanpeng Li81dc01f2014-12-04 19:11:07 +08009285 case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
9286 /*
9287 * This should never happen, since it is not possible to
9288 * set XSS to a non-zero value---neither in L1 nor in L2.
9289 * If if it were, XSS would have to be checked against
9290 * the XSS exit bitmap in vmcs12.
9291 */
9292 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
Wanpeng Li55123e32016-07-06 18:29:58 +08009293 case EXIT_REASON_PREEMPTION_TIMER:
9294 return false;
Ladi Prosekab007cc2017-03-31 10:19:26 +02009295 case EXIT_REASON_PML_FULL:
Bandan Das03efce62017-05-05 15:25:15 -04009296 /* We emulate PML support to L1. */
Ladi Prosekab007cc2017-03-31 10:19:26 +02009297 return false;
Bandan Das2a499e42017-08-03 15:54:41 -04009298 case EXIT_REASON_VMFUNC:
9299 /* VM functions are emulated through L2->L0 vmexits. */
9300 return false;
Nadav Har'El644d7112011-05-25 23:12:35 +03009301 default:
Joe Perches1d804d02015-03-30 16:46:09 -07009302 return true;
Nadav Har'El644d7112011-05-25 23:12:35 +03009303 }
9304}
9305
Paolo Bonzini7313c692017-07-27 10:31:25 +02009306static int nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason)
9307{
9308 u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
9309
9310 /*
9311 * At this point, the exit interruption info in exit_intr_info
9312 * is only valid for EXCEPTION_NMI exits. For EXTERNAL_INTERRUPT
9313 * we need to query the in-kernel LAPIC.
9314 */
9315 WARN_ON(exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT);
9316 if ((exit_intr_info &
9317 (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) ==
9318 (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) {
9319 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
9320 vmcs12->vm_exit_intr_error_code =
9321 vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
9322 }
9323
9324 nested_vmx_vmexit(vcpu, exit_reason, exit_intr_info,
9325 vmcs_readl(EXIT_QUALIFICATION));
9326 return 1;
9327}
9328
Avi Kivity586f9602010-11-18 13:09:54 +02009329static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
9330{
9331 *info1 = vmcs_readl(EXIT_QUALIFICATION);
9332 *info2 = vmcs_read32(VM_EXIT_INTR_INFO);
9333}
9334
Kai Huanga3eaa862015-11-04 13:46:05 +08009335static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
Kai Huang843e4332015-01-28 10:54:28 +08009336{
Kai Huanga3eaa862015-11-04 13:46:05 +08009337 if (vmx->pml_pg) {
9338 __free_page(vmx->pml_pg);
9339 vmx->pml_pg = NULL;
9340 }
Kai Huang843e4332015-01-28 10:54:28 +08009341}
9342
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02009343static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu)
Kai Huang843e4332015-01-28 10:54:28 +08009344{
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02009345 struct vcpu_vmx *vmx = to_vmx(vcpu);
Kai Huang843e4332015-01-28 10:54:28 +08009346 u64 *pml_buf;
9347 u16 pml_idx;
9348
9349 pml_idx = vmcs_read16(GUEST_PML_INDEX);
9350
9351 /* Do nothing if PML buffer is empty */
9352 if (pml_idx == (PML_ENTITY_NUM - 1))
9353 return;
9354
9355 /* PML index always points to next available PML buffer entity */
9356 if (pml_idx >= PML_ENTITY_NUM)
9357 pml_idx = 0;
9358 else
9359 pml_idx++;
9360
9361 pml_buf = page_address(vmx->pml_pg);
9362 for (; pml_idx < PML_ENTITY_NUM; pml_idx++) {
9363 u64 gpa;
9364
9365 gpa = pml_buf[pml_idx];
9366 WARN_ON(gpa & (PAGE_SIZE - 1));
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02009367 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
Kai Huang843e4332015-01-28 10:54:28 +08009368 }
9369
9370 /* reset PML index */
9371 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
9372}
9373
9374/*
9375 * Flush all vcpus' PML buffer and update logged GPAs to dirty_bitmap.
9376 * Called before reporting dirty_bitmap to userspace.
9377 */
9378static void kvm_flush_pml_buffers(struct kvm *kvm)
9379{
9380 int i;
9381 struct kvm_vcpu *vcpu;
9382 /*
9383 * We only need to kick vcpu out of guest mode here, as PML buffer
9384 * is flushed at beginning of all VMEXITs, and it's obvious that only
9385 * vcpus running in guest are possible to have unflushed GPAs in PML
9386 * buffer.
9387 */
9388 kvm_for_each_vcpu(i, vcpu, kvm)
9389 kvm_vcpu_kick(vcpu);
9390}
9391
Paolo Bonzini4eb64dc2015-04-30 12:57:28 +02009392static void vmx_dump_sel(char *name, uint32_t sel)
9393{
9394 pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n",
Chao Peng96794e42017-02-21 03:50:01 -05009395 name, vmcs_read16(sel),
Paolo Bonzini4eb64dc2015-04-30 12:57:28 +02009396 vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR),
9397 vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR),
9398 vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR));
9399}
9400
9401static void vmx_dump_dtsel(char *name, uint32_t limit)
9402{
9403 pr_err("%s limit=0x%08x, base=0x%016lx\n",
9404 name, vmcs_read32(limit),
9405 vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT));
9406}
9407
9408static void dump_vmcs(void)
9409{
9410 u32 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS);
9411 u32 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS);
9412 u32 cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
9413 u32 pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
9414 u32 secondary_exec_control = 0;
9415 unsigned long cr4 = vmcs_readl(GUEST_CR4);
Paolo Bonzinif3531052015-12-03 15:49:56 +01009416 u64 efer = vmcs_read64(GUEST_IA32_EFER);
Paolo Bonzini4eb64dc2015-04-30 12:57:28 +02009417 int i, n;
9418
9419 if (cpu_has_secondary_exec_ctrls())
9420 secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
9421
9422 pr_err("*** Guest State ***\n");
9423 pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
9424 vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW),
9425 vmcs_readl(CR0_GUEST_HOST_MASK));
9426 pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
9427 cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK));
9428 pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3));
9429 if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) &&
9430 (cr4 & X86_CR4_PAE) && !(efer & EFER_LMA))
9431 {
Paolo Bonzini845c5b402015-12-03 15:51:00 +01009432 pr_err("PDPTR0 = 0x%016llx PDPTR1 = 0x%016llx\n",
9433 vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1));
9434 pr_err("PDPTR2 = 0x%016llx PDPTR3 = 0x%016llx\n",
9435 vmcs_read64(GUEST_PDPTR2), vmcs_read64(GUEST_PDPTR3));
Paolo Bonzini4eb64dc2015-04-30 12:57:28 +02009436 }
9437 pr_err("RSP = 0x%016lx RIP = 0x%016lx\n",
9438 vmcs_readl(GUEST_RSP), vmcs_readl(GUEST_RIP));
9439 pr_err("RFLAGS=0x%08lx DR7 = 0x%016lx\n",
9440 vmcs_readl(GUEST_RFLAGS), vmcs_readl(GUEST_DR7));
9441 pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
9442 vmcs_readl(GUEST_SYSENTER_ESP),
9443 vmcs_read32(GUEST_SYSENTER_CS), vmcs_readl(GUEST_SYSENTER_EIP));
9444 vmx_dump_sel("CS: ", GUEST_CS_SELECTOR);
9445 vmx_dump_sel("DS: ", GUEST_DS_SELECTOR);
9446 vmx_dump_sel("SS: ", GUEST_SS_SELECTOR);
9447 vmx_dump_sel("ES: ", GUEST_ES_SELECTOR);
9448 vmx_dump_sel("FS: ", GUEST_FS_SELECTOR);
9449 vmx_dump_sel("GS: ", GUEST_GS_SELECTOR);
9450 vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT);
9451 vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR);
9452 vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT);
9453 vmx_dump_sel("TR: ", GUEST_TR_SELECTOR);
9454 if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) ||
9455 (vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER)))
Paolo Bonzini845c5b402015-12-03 15:51:00 +01009456 pr_err("EFER = 0x%016llx PAT = 0x%016llx\n",
9457 efer, vmcs_read64(GUEST_IA32_PAT));
9458 pr_err("DebugCtl = 0x%016llx DebugExceptions = 0x%016lx\n",
9459 vmcs_read64(GUEST_IA32_DEBUGCTL),
Paolo Bonzini4eb64dc2015-04-30 12:57:28 +02009460 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS));
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +01009461 if (cpu_has_load_perf_global_ctrl &&
9462 vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
Paolo Bonzini845c5b402015-12-03 15:51:00 +01009463 pr_err("PerfGlobCtl = 0x%016llx\n",
9464 vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL));
Paolo Bonzini4eb64dc2015-04-30 12:57:28 +02009465 if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS)
Paolo Bonzini845c5b402015-12-03 15:51:00 +01009466 pr_err("BndCfgS = 0x%016llx\n", vmcs_read64(GUEST_BNDCFGS));
Paolo Bonzini4eb64dc2015-04-30 12:57:28 +02009467 pr_err("Interruptibility = %08x ActivityState = %08x\n",
9468 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO),
9469 vmcs_read32(GUEST_ACTIVITY_STATE));
9470 if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
9471 pr_err("InterruptStatus = %04x\n",
9472 vmcs_read16(GUEST_INTR_STATUS));
9473
9474 pr_err("*** Host State ***\n");
9475 pr_err("RIP = 0x%016lx RSP = 0x%016lx\n",
9476 vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP));
9477 pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n",
9478 vmcs_read16(HOST_CS_SELECTOR), vmcs_read16(HOST_SS_SELECTOR),
9479 vmcs_read16(HOST_DS_SELECTOR), vmcs_read16(HOST_ES_SELECTOR),
9480 vmcs_read16(HOST_FS_SELECTOR), vmcs_read16(HOST_GS_SELECTOR),
9481 vmcs_read16(HOST_TR_SELECTOR));
9482 pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n",
9483 vmcs_readl(HOST_FS_BASE), vmcs_readl(HOST_GS_BASE),
9484 vmcs_readl(HOST_TR_BASE));
9485 pr_err("GDTBase=%016lx IDTBase=%016lx\n",
9486 vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE));
9487 pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n",
9488 vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3),
9489 vmcs_readl(HOST_CR4));
9490 pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
9491 vmcs_readl(HOST_IA32_SYSENTER_ESP),
9492 vmcs_read32(HOST_IA32_SYSENTER_CS),
9493 vmcs_readl(HOST_IA32_SYSENTER_EIP));
9494 if (vmexit_ctl & (VM_EXIT_LOAD_IA32_PAT | VM_EXIT_LOAD_IA32_EFER))
Paolo Bonzini845c5b402015-12-03 15:51:00 +01009495 pr_err("EFER = 0x%016llx PAT = 0x%016llx\n",
9496 vmcs_read64(HOST_IA32_EFER),
9497 vmcs_read64(HOST_IA32_PAT));
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +01009498 if (cpu_has_load_perf_global_ctrl &&
9499 vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
Paolo Bonzini845c5b402015-12-03 15:51:00 +01009500 pr_err("PerfGlobCtl = 0x%016llx\n",
9501 vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL));
Paolo Bonzini4eb64dc2015-04-30 12:57:28 +02009502
9503 pr_err("*** Control State ***\n");
9504 pr_err("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n",
9505 pin_based_exec_ctrl, cpu_based_exec_ctrl, secondary_exec_control);
9506 pr_err("EntryControls=%08x ExitControls=%08x\n", vmentry_ctl, vmexit_ctl);
9507 pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n",
9508 vmcs_read32(EXCEPTION_BITMAP),
9509 vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK),
9510 vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH));
9511 pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n",
9512 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
9513 vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE),
9514 vmcs_read32(VM_ENTRY_INSTRUCTION_LEN));
9515 pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n",
9516 vmcs_read32(VM_EXIT_INTR_INFO),
9517 vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
9518 vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
9519 pr_err(" reason=%08x qualification=%016lx\n",
9520 vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION));
9521 pr_err("IDTVectoring: info=%08x errcode=%08x\n",
9522 vmcs_read32(IDT_VECTORING_INFO_FIELD),
9523 vmcs_read32(IDT_VECTORING_ERROR_CODE));
Paolo Bonzini845c5b402015-12-03 15:51:00 +01009524 pr_err("TSC Offset = 0x%016llx\n", vmcs_read64(TSC_OFFSET));
Haozhong Zhang8cfe9862015-10-20 15:39:12 +08009525 if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING)
Paolo Bonzini845c5b402015-12-03 15:51:00 +01009526 pr_err("TSC Multiplier = 0x%016llx\n",
9527 vmcs_read64(TSC_MULTIPLIER));
Paolo Bonzini4eb64dc2015-04-30 12:57:28 +02009528 if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW)
9529 pr_err("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD));
9530 if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR)
9531 pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV));
9532 if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT))
Paolo Bonzini845c5b402015-12-03 15:51:00 +01009533 pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER));
Paolo Bonzini4eb64dc2015-04-30 12:57:28 +02009534 n = vmcs_read32(CR3_TARGET_COUNT);
9535 for (i = 0; i + 1 < n; i += 4)
9536 pr_err("CR3 target%u=%016lx target%u=%016lx\n",
9537 i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2),
9538 i + 1, vmcs_readl(CR3_TARGET_VALUE0 + i * 2 + 2));
9539 if (i < n)
9540 pr_err("CR3 target%u=%016lx\n",
9541 i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2));
9542 if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING)
9543 pr_err("PLE Gap=%08x Window=%08x\n",
9544 vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW));
9545 if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
9546 pr_err("Virtual processor ID = 0x%04x\n",
9547 vmcs_read16(VIRTUAL_PROCESSOR_ID));
9548}
9549
Avi Kivity6aa8b732006-12-10 02:21:36 -08009550/*
9551 * The guest has exited. See if we can fix it or if we need userspace
9552 * assistance.
9553 */
Avi Kivity851ba692009-08-24 11:10:17 +03009554static int vmx_handle_exit(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08009555{
Avi Kivity29bd8a72007-09-10 17:27:03 +03009556 struct vcpu_vmx *vmx = to_vmx(vcpu);
Andi Kleena0861c02009-06-08 17:37:09 +08009557 u32 exit_reason = vmx->exit_reason;
Avi Kivity1155f762007-11-22 11:30:47 +02009558 u32 vectoring_info = vmx->idt_vectoring_info;
Avi Kivity29bd8a72007-09-10 17:27:03 +03009559
Paolo Bonzini8b89fe12015-12-10 18:37:32 +01009560 trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
9561
Kai Huang843e4332015-01-28 10:54:28 +08009562 /*
9563 * Flush logged GPAs PML buffer, this will make dirty_bitmap more
9564 * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
9565 * querying dirty_bitmap, we only need to kick all vcpus out of guest
9566 * mode as if vcpus is in root mode, the PML buffer must has been
9567 * flushed already.
9568 */
9569 if (enable_pml)
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02009570 vmx_flush_pml_buffer(vcpu);
Kai Huang843e4332015-01-28 10:54:28 +08009571
Mohammed Gamal80ced182009-09-01 12:48:18 +02009572 /* If guest state is invalid, start emulating */
Gleb Natapov14168782013-01-21 15:36:49 +02009573 if (vmx->emulation_required)
Mohammed Gamal80ced182009-09-01 12:48:18 +02009574 return handle_invalid_guest_state(vcpu);
Guillaume Thouvenin1d5a4d92008-10-29 09:39:42 +01009575
Paolo Bonzini7313c692017-07-27 10:31:25 +02009576 if (is_guest_mode(vcpu) && nested_vmx_exit_reflected(vcpu, exit_reason))
9577 return nested_vmx_reflect_vmexit(vcpu, exit_reason);
Nadav Har'El644d7112011-05-25 23:12:35 +03009578
Mohammed Gamal51207022010-05-31 22:40:54 +03009579 if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
Paolo Bonzini4eb64dc2015-04-30 12:57:28 +02009580 dump_vmcs();
Mohammed Gamal51207022010-05-31 22:40:54 +03009581 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
9582 vcpu->run->fail_entry.hardware_entry_failure_reason
9583 = exit_reason;
9584 return 0;
9585 }
9586
Avi Kivity29bd8a72007-09-10 17:27:03 +03009587 if (unlikely(vmx->fail)) {
Avi Kivity851ba692009-08-24 11:10:17 +03009588 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
9589 vcpu->run->fail_entry.hardware_entry_failure_reason
Avi Kivity29bd8a72007-09-10 17:27:03 +03009590 = vmcs_read32(VM_INSTRUCTION_ERROR);
9591 return 0;
9592 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08009593
Xiao Guangrongb9bf6882012-10-17 13:46:52 +08009594 /*
9595 * Note:
9596 * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by
9597 * delivery event since it indicates guest is accessing MMIO.
9598 * The vm-exit can be triggered again after return to guest that
9599 * will cause infinite loop.
9600 */
Mike Dayd77c26f2007-10-08 09:02:08 -04009601 if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
Sheng Yang14394422008-04-28 12:24:45 +08009602 (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
Jan Kiszka60637aa2008-09-26 09:30:47 +02009603 exit_reason != EXIT_REASON_EPT_VIOLATION &&
Cao, Leib244c9f2016-07-15 13:54:04 +00009604 exit_reason != EXIT_REASON_PML_FULL &&
Xiao Guangrongb9bf6882012-10-17 13:46:52 +08009605 exit_reason != EXIT_REASON_TASK_SWITCH)) {
9606 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
9607 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
Paolo Bonzini70bcd702017-07-05 12:38:06 +02009608 vcpu->run->internal.ndata = 3;
Xiao Guangrongb9bf6882012-10-17 13:46:52 +08009609 vcpu->run->internal.data[0] = vectoring_info;
9610 vcpu->run->internal.data[1] = exit_reason;
Paolo Bonzini70bcd702017-07-05 12:38:06 +02009611 vcpu->run->internal.data[2] = vcpu->arch.exit_qualification;
9612 if (exit_reason == EXIT_REASON_EPT_MISCONFIG) {
9613 vcpu->run->internal.ndata++;
9614 vcpu->run->internal.data[3] =
9615 vmcs_read64(GUEST_PHYSICAL_ADDRESS);
9616 }
Xiao Guangrongb9bf6882012-10-17 13:46:52 +08009617 return 0;
9618 }
Jan Kiszka3b86cd92008-09-26 09:30:57 +02009619
Paolo Bonzinid02fcf52017-11-06 13:31:13 +01009620 if (unlikely(!enable_vnmi &&
Paolo Bonzini8a1b4392017-11-06 13:31:12 +01009621 vmx->loaded_vmcs->soft_vnmi_blocked)) {
9622 if (vmx_interrupt_allowed(vcpu)) {
9623 vmx->loaded_vmcs->soft_vnmi_blocked = 0;
9624 } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL &&
9625 vcpu->arch.nmi_pending) {
9626 /*
9627 * This CPU don't support us in finding the end of an
9628 * NMI-blocked window if the guest runs with IRQs
9629 * disabled. So we pull the trigger after 1 s of
9630 * futile waiting, but inform the user about this.
9631 */
9632 printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
9633 "state on VCPU %d after 1 s timeout\n",
9634 __func__, vcpu->vcpu_id);
9635 vmx->loaded_vmcs->soft_vnmi_blocked = 0;
9636 }
9637 }
9638
Avi Kivity6aa8b732006-12-10 02:21:36 -08009639 if (exit_reason < kvm_vmx_max_exit_handlers
9640 && kvm_vmx_exit_handlers[exit_reason])
Avi Kivity851ba692009-08-24 11:10:17 +03009641 return kvm_vmx_exit_handlers[exit_reason](vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08009642 else {
Radim Krčmář6c6c5e02017-01-13 18:59:04 +01009643 vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
9644 exit_reason);
Michael S. Tsirkin2bc19dc2014-09-18 16:21:16 +03009645 kvm_queue_exception(vcpu, UD_VECTOR);
9646 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08009647 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08009648}
9649
Paolo Bonzinia47dd5f2018-07-02 12:47:38 +02009650/*
9651 * Software based L1D cache flush which is used when microcode providing
9652 * the cache control MSR is not loaded.
9653 *
9654 * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to
9655 * flush it is required to read in 64 KiB because the replacement algorithm
9656 * is not exactly LRU. This could be sized at runtime via topology
9657 * information but as all relevant affected CPUs have 32KiB L1D cache size
9658 * there is no point in doing so.
9659 */
9660#define L1D_CACHE_ORDER 4
9661static void *vmx_l1d_flush_pages;
9662
Paolo Bonzinic595cee2018-07-02 13:07:14 +02009663static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
Paolo Bonzinia47dd5f2018-07-02 12:47:38 +02009664{
9665 int size = PAGE_SIZE << L1D_CACHE_ORDER;
Paolo Bonzinic595cee2018-07-02 13:07:14 +02009666
9667 /*
Thomas Gleixner2f055942018-07-13 16:23:17 +02009668 * This code is only executed when the the flush mode is 'cond' or
9669 * 'always'
Konrad Rzeszutek Wilk390d9752018-06-28 17:10:36 -04009670 *
Thomas Gleixner2f055942018-07-13 16:23:17 +02009671 * If 'flush always', keep the flush bit set, otherwise clear
9672 * it. The flush bit gets set again either from vcpu_run() or from
9673 * one of the unsafe VMEXIT handlers.
Paolo Bonzinic595cee2018-07-02 13:07:14 +02009674 */
Thomas Gleixner4c6523e2018-07-13 16:23:20 +02009675 if (static_branch_unlikely(&vmx_l1d_flush_always))
9676 vcpu->arch.l1tf_flush_l1d = true;
9677 else
9678 vcpu->arch.l1tf_flush_l1d = false;
Paolo Bonzinic595cee2018-07-02 13:07:14 +02009679
9680 vcpu->stat.l1d_flush++;
Paolo Bonzinia47dd5f2018-07-02 12:47:38 +02009681
Paolo Bonzini3fa045b2018-07-02 13:03:48 +02009682 if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
9683 wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
9684 return;
9685 }
9686
Paolo Bonzinia47dd5f2018-07-02 12:47:38 +02009687 asm volatile(
9688 /* First ensure the pages are in the TLB */
9689 "xorl %%eax, %%eax\n"
9690 ".Lpopulate_tlb:\n\t"
9691 "movzbl (%[empty_zp], %%" _ASM_AX "), %%ecx\n\t"
9692 "addl $4096, %%eax\n\t"
9693 "cmpl %%eax, %[size]\n\t"
9694 "jne .Lpopulate_tlb\n\t"
9695 "xorl %%eax, %%eax\n\t"
9696 "cpuid\n\t"
9697 /* Now fill the cache */
9698 "xorl %%eax, %%eax\n"
9699 ".Lfill_cache:\n"
9700 "movzbl (%[empty_zp], %%" _ASM_AX "), %%ecx\n\t"
9701 "addl $64, %%eax\n\t"
9702 "cmpl %%eax, %[size]\n\t"
9703 "jne .Lfill_cache\n\t"
9704 "lfence\n"
9705 :: [empty_zp] "r" (vmx_l1d_flush_pages),
9706 [size] "r" (size)
9707 : "eax", "ebx", "ecx", "edx");
9708}
9709
Gleb Natapov95ba8273132009-04-21 17:45:08 +03009710static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
Yang, Sheng6e5d8652007-09-12 18:03:11 +08009711{
Wanpeng Lia7c0b072014-08-21 19:46:50 +08009712 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
9713
9714 if (is_guest_mode(vcpu) &&
9715 nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
9716 return;
9717
Gleb Natapov95ba8273132009-04-21 17:45:08 +03009718 if (irr == -1 || tpr < irr) {
Yang, Sheng6e5d8652007-09-12 18:03:11 +08009719 vmcs_write32(TPR_THRESHOLD, 0);
9720 return;
9721 }
9722
Gleb Natapov95ba8273132009-04-21 17:45:08 +03009723 vmcs_write32(TPR_THRESHOLD, irr);
Yang, Sheng6e5d8652007-09-12 18:03:11 +08009724}
9725
Jim Mattson8d860bb2018-05-09 16:56:05 -04009726static void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
Yang Zhang8d146952013-01-25 10:18:50 +08009727{
9728 u32 sec_exec_control;
9729
Jim Mattson8d860bb2018-05-09 16:56:05 -04009730 if (!lapic_in_kernel(vcpu))
9731 return;
9732
Radim Krčmářdccbfcf2016-08-08 20:16:23 +02009733 /* Postpone execution until vmcs01 is the current VMCS. */
9734 if (is_guest_mode(vcpu)) {
Jim Mattson8d860bb2018-05-09 16:56:05 -04009735 to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true;
Radim Krčmářdccbfcf2016-08-08 20:16:23 +02009736 return;
9737 }
9738
Paolo Bonzini35754c92015-07-29 12:05:37 +02009739 if (!cpu_need_tpr_shadow(vcpu))
Yang Zhang8d146952013-01-25 10:18:50 +08009740 return;
9741
9742 sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
Jim Mattson8d860bb2018-05-09 16:56:05 -04009743 sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
9744 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
Yang Zhang8d146952013-01-25 10:18:50 +08009745
Jim Mattson8d860bb2018-05-09 16:56:05 -04009746 switch (kvm_get_apic_mode(vcpu)) {
9747 case LAPIC_MODE_INVALID:
9748 WARN_ONCE(true, "Invalid local APIC state");
9749 case LAPIC_MODE_DISABLED:
9750 break;
9751 case LAPIC_MODE_XAPIC:
9752 if (flexpriority_enabled) {
9753 sec_exec_control |=
9754 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
9755 vmx_flush_tlb(vcpu, true);
9756 }
9757 break;
9758 case LAPIC_MODE_X2APIC:
9759 if (cpu_has_vmx_virtualize_x2apic_mode())
9760 sec_exec_control |=
9761 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
9762 break;
Yang Zhang8d146952013-01-25 10:18:50 +08009763 }
9764 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
9765
Paolo Bonzini904e14f2018-01-16 16:51:18 +01009766 vmx_update_msr_bitmap(vcpu);
Yang Zhang8d146952013-01-25 10:18:50 +08009767}
9768
Tang Chen38b99172014-09-24 15:57:54 +08009769static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
9770{
Jim Mattsonab5df312018-05-09 17:02:03 -04009771 if (!is_guest_mode(vcpu)) {
Tang Chen38b99172014-09-24 15:57:54 +08009772 vmcs_write64(APIC_ACCESS_ADDR, hpa);
Junaid Shahida468f2d2018-04-26 13:09:50 -07009773 vmx_flush_tlb(vcpu, true);
Jim Mattsonfb6c8192017-03-16 13:53:59 -07009774 }
Tang Chen38b99172014-09-24 15:57:54 +08009775}
9776
Paolo Bonzini67c9ddd2016-05-10 17:01:23 +02009777static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
Yang Zhangc7c9c562013-01-25 10:18:51 +08009778{
9779 u16 status;
9780 u8 old;
9781
Paolo Bonzini67c9ddd2016-05-10 17:01:23 +02009782 if (max_isr == -1)
9783 max_isr = 0;
Yang Zhangc7c9c562013-01-25 10:18:51 +08009784
9785 status = vmcs_read16(GUEST_INTR_STATUS);
9786 old = status >> 8;
Paolo Bonzini67c9ddd2016-05-10 17:01:23 +02009787 if (max_isr != old) {
Yang Zhangc7c9c562013-01-25 10:18:51 +08009788 status &= 0xff;
Paolo Bonzini67c9ddd2016-05-10 17:01:23 +02009789 status |= max_isr << 8;
Yang Zhangc7c9c562013-01-25 10:18:51 +08009790 vmcs_write16(GUEST_INTR_STATUS, status);
9791 }
9792}
9793
9794static void vmx_set_rvi(int vector)
9795{
9796 u16 status;
9797 u8 old;
9798
Wei Wang4114c272014-11-05 10:53:43 +08009799 if (vector == -1)
9800 vector = 0;
9801
Yang Zhangc7c9c562013-01-25 10:18:51 +08009802 status = vmcs_read16(GUEST_INTR_STATUS);
9803 old = (u8)status & 0xff;
9804 if ((u8)vector != old) {
9805 status &= ~0xff;
9806 status |= (u8)vector;
9807 vmcs_write16(GUEST_INTR_STATUS, status);
9808 }
9809}
9810
9811static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
9812{
Liran Alon851c1a182017-12-24 18:12:56 +02009813 /*
9814 * When running L2, updating RVI is only relevant when
9815 * vmcs12 virtual-interrupt-delivery enabled.
9816 * However, it can be enabled only when L1 also
9817 * intercepts external-interrupts and in that case
9818 * we should not update vmcs02 RVI but instead intercept
9819 * interrupt. Therefore, do nothing when running L2.
9820 */
9821 if (!is_guest_mode(vcpu))
Wanpeng Li963fee12014-07-17 19:03:00 +08009822 vmx_set_rvi(max_irr);
Yang Zhangc7c9c562013-01-25 10:18:51 +08009823}
9824
Paolo Bonzini76dfafd52016-12-19 17:17:11 +01009825static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
Paolo Bonzini810e6de2016-12-19 13:05:46 +01009826{
9827 struct vcpu_vmx *vmx = to_vmx(vcpu);
Paolo Bonzini76dfafd52016-12-19 17:17:11 +01009828 int max_irr;
Liran Alonf27a85c2017-12-24 18:12:55 +02009829 bool max_irr_updated;
Paolo Bonzini810e6de2016-12-19 13:05:46 +01009830
Paolo Bonzini76dfafd52016-12-19 17:17:11 +01009831 WARN_ON(!vcpu->arch.apicv_active);
9832 if (pi_test_on(&vmx->pi_desc)) {
9833 pi_clear_on(&vmx->pi_desc);
9834 /*
9835 * IOMMU can write to PIR.ON, so the barrier matters even on UP.
9836 * But on x86 this is just a compiler barrier anyway.
9837 */
9838 smp_mb__after_atomic();
Liran Alonf27a85c2017-12-24 18:12:55 +02009839 max_irr_updated =
9840 kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr);
9841
9842 /*
9843 * If we are running L2 and L1 has a new pending interrupt
9844 * which can be injected, we should re-evaluate
9845 * what should be done with this new L1 interrupt.
Liran Alon851c1a182017-12-24 18:12:56 +02009846 * If L1 intercepts external-interrupts, we should
9847 * exit from L2 to L1. Otherwise, interrupt should be
9848 * delivered directly to L2.
Liran Alonf27a85c2017-12-24 18:12:55 +02009849 */
Liran Alon851c1a182017-12-24 18:12:56 +02009850 if (is_guest_mode(vcpu) && max_irr_updated) {
9851 if (nested_exit_on_intr(vcpu))
9852 kvm_vcpu_exiting_guest_mode(vcpu);
9853 else
9854 kvm_make_request(KVM_REQ_EVENT, vcpu);
9855 }
Paolo Bonzini76dfafd52016-12-19 17:17:11 +01009856 } else {
9857 max_irr = kvm_lapic_find_highest_irr(vcpu);
9858 }
9859 vmx_hwapic_irr_update(vcpu, max_irr);
9860 return max_irr;
Paolo Bonzini810e6de2016-12-19 13:05:46 +01009861}
9862
Andrey Smetanin63086302015-11-10 15:36:32 +03009863static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
Yang Zhangc7c9c562013-01-25 10:18:51 +08009864{
Andrey Smetanind62caab2015-11-10 15:36:33 +03009865 if (!kvm_vcpu_apicv_active(vcpu))
Yang Zhang3d81bc72013-04-11 19:25:13 +08009866 return;
9867
Yang Zhangc7c9c562013-01-25 10:18:51 +08009868 vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
9869 vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]);
9870 vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]);
9871 vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
9872}
9873
Paolo Bonzini967235d2016-12-19 14:03:45 +01009874static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
9875{
9876 struct vcpu_vmx *vmx = to_vmx(vcpu);
9877
9878 pi_clear_on(&vmx->pi_desc);
9879 memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir));
9880}
9881
Avi Kivity51aa01d2010-07-20 14:31:20 +03009882static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
Avi Kivitycf393f72008-07-01 16:20:21 +03009883{
Jim Mattson48ae0fb2017-05-22 09:48:33 -07009884 u32 exit_intr_info = 0;
9885 u16 basic_exit_reason = (u16)vmx->exit_reason;
Avi Kivity00eba012011-03-07 17:24:54 +02009886
Jim Mattson48ae0fb2017-05-22 09:48:33 -07009887 if (!(basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY
9888 || basic_exit_reason == EXIT_REASON_EXCEPTION_NMI))
Avi Kivity00eba012011-03-07 17:24:54 +02009889 return;
9890
Jim Mattson48ae0fb2017-05-22 09:48:33 -07009891 if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
9892 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
9893 vmx->exit_intr_info = exit_intr_info;
Andi Kleena0861c02009-06-08 17:37:09 +08009894
Wanpeng Li1261bfa2017-07-13 18:30:40 -07009895 /* if exit due to PF check for async PF */
9896 if (is_page_fault(exit_intr_info))
9897 vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
9898
Andi Kleena0861c02009-06-08 17:37:09 +08009899 /* Handle machine checks before interrupts are enabled */
Jim Mattson48ae0fb2017-05-22 09:48:33 -07009900 if (basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY ||
9901 is_machine_check(exit_intr_info))
Andi Kleena0861c02009-06-08 17:37:09 +08009902 kvm_machine_check();
9903
Gleb Natapov20f65982009-05-11 13:35:55 +03009904 /* We need to handle NMIs before interrupts are enabled */
Jim Mattsonef85b672016-12-12 11:01:37 -08009905 if (is_nmi(exit_intr_info)) {
Andi Kleendd60d212017-07-25 17:20:32 -07009906 kvm_before_interrupt(&vmx->vcpu);
Gleb Natapov20f65982009-05-11 13:35:55 +03009907 asm("int $2");
Andi Kleendd60d212017-07-25 17:20:32 -07009908 kvm_after_interrupt(&vmx->vcpu);
Zhang, Yanminff9d07a2010-04-19 13:32:45 +08009909 }
Avi Kivity51aa01d2010-07-20 14:31:20 +03009910}
Gleb Natapov20f65982009-05-11 13:35:55 +03009911
Yang Zhanga547c6d2013-04-11 19:25:10 +08009912static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
9913{
9914 u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
9915
Yang Zhanga547c6d2013-04-11 19:25:10 +08009916 if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK))
9917 == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) {
9918 unsigned int vector;
9919 unsigned long entry;
9920 gate_desc *desc;
9921 struct vcpu_vmx *vmx = to_vmx(vcpu);
9922#ifdef CONFIG_X86_64
9923 unsigned long tmp;
9924#endif
9925
9926 vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
9927 desc = (gate_desc *)vmx->host_idt_base + vector;
Thomas Gleixner64b163f2017-08-28 08:47:37 +02009928 entry = gate_offset(desc);
Yang Zhanga547c6d2013-04-11 19:25:10 +08009929 asm volatile(
9930#ifdef CONFIG_X86_64
9931 "mov %%" _ASM_SP ", %[sp]\n\t"
9932 "and $0xfffffffffffffff0, %%" _ASM_SP "\n\t"
9933 "push $%c[ss]\n\t"
9934 "push %[sp]\n\t"
9935#endif
9936 "pushf\n\t"
Yang Zhanga547c6d2013-04-11 19:25:10 +08009937 __ASM_SIZE(push) " $%c[cs]\n\t"
Peter Zijlstrac940a3f2018-01-25 10:58:14 +01009938 CALL_NOSPEC
Yang Zhanga547c6d2013-04-11 19:25:10 +08009939 :
9940#ifdef CONFIG_X86_64
Chris J Arges3f62de52016-01-22 15:44:38 -06009941 [sp]"=&r"(tmp),
Yang Zhanga547c6d2013-04-11 19:25:10 +08009942#endif
Josh Poimboeuff5caf622017-09-20 16:24:33 -05009943 ASM_CALL_CONSTRAINT
Yang Zhanga547c6d2013-04-11 19:25:10 +08009944 :
Peter Zijlstrac940a3f2018-01-25 10:58:14 +01009945 THUNK_TARGET(entry),
Yang Zhanga547c6d2013-04-11 19:25:10 +08009946 [ss]"i"(__KERNEL_DS),
9947 [cs]"i"(__KERNEL_CS)
9948 );
Paolo Bonzinic595cee2018-07-02 13:07:14 +02009949 vcpu->arch.l1tf_flush_l1d = true;
Paolo Bonzinif2485b32016-06-15 15:23:11 +02009950 }
Yang Zhanga547c6d2013-04-11 19:25:10 +08009951}
Josh Poimboeufc207aee2017-06-28 10:11:06 -05009952STACK_FRAME_NON_STANDARD(vmx_handle_external_intr);
Yang Zhanga547c6d2013-04-11 19:25:10 +08009953
Tom Lendackybc226f02018-05-10 22:06:39 +02009954static bool vmx_has_emulated_msr(int index)
Paolo Bonzini6d396b52015-04-01 14:25:33 +02009955{
Tom Lendackybc226f02018-05-10 22:06:39 +02009956 switch (index) {
9957 case MSR_IA32_SMBASE:
9958 /*
9959 * We cannot do SMM unless we can run the guest in big
9960 * real mode.
9961 */
9962 return enable_unrestricted_guest || emulate_invalid_guest_state;
9963 case MSR_AMD64_VIRT_SPEC_CTRL:
9964 /* This is AMD only. */
9965 return false;
9966 default:
9967 return true;
9968 }
Paolo Bonzini6d396b52015-04-01 14:25:33 +02009969}
9970
Liu, Jinsongda8999d2014-02-24 10:55:46 +00009971static bool vmx_mpx_supported(void)
9972{
9973 return (vmcs_config.vmexit_ctrl & VM_EXIT_CLEAR_BNDCFGS) &&
9974 (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_BNDCFGS);
9975}
9976
Wanpeng Li55412b22014-12-02 19:21:30 +08009977static bool vmx_xsaves_supported(void)
9978{
9979 return vmcs_config.cpu_based_2nd_exec_ctrl &
9980 SECONDARY_EXEC_XSAVES;
9981}
9982
Avi Kivity51aa01d2010-07-20 14:31:20 +03009983static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
9984{
Avi Kivityc5ca8e52011-03-07 17:37:37 +02009985 u32 exit_intr_info;
Avi Kivity51aa01d2010-07-20 14:31:20 +03009986 bool unblock_nmi;
9987 u8 vector;
9988 bool idtv_info_valid;
9989
9990 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
Gleb Natapov20f65982009-05-11 13:35:55 +03009991
Paolo Bonzinid02fcf52017-11-06 13:31:13 +01009992 if (enable_vnmi) {
Paolo Bonzini8a1b4392017-11-06 13:31:12 +01009993 if (vmx->loaded_vmcs->nmi_known_unmasked)
9994 return;
9995 /*
9996 * Can't use vmx->exit_intr_info since we're not sure what
9997 * the exit reason is.
9998 */
9999 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
10000 unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
10001 vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
10002 /*
10003 * SDM 3: 27.7.1.2 (September 2008)
10004 * Re-set bit "block by NMI" before VM entry if vmexit caused by
10005 * a guest IRET fault.
10006 * SDM 3: 23.2.2 (September 2008)
10007 * Bit 12 is undefined in any of the following cases:
10008 * If the VM exit sets the valid bit in the IDT-vectoring
10009 * information field.
10010 * If the VM exit is due to a double fault.
10011 */
10012 if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
10013 vector != DF_VECTOR && !idtv_info_valid)
10014 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
10015 GUEST_INTR_STATE_NMI);
10016 else
10017 vmx->loaded_vmcs->nmi_known_unmasked =
10018 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
10019 & GUEST_INTR_STATE_NMI);
10020 } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked))
10021 vmx->loaded_vmcs->vnmi_blocked_time +=
10022 ktime_to_ns(ktime_sub(ktime_get(),
10023 vmx->loaded_vmcs->entry_time));
Avi Kivity51aa01d2010-07-20 14:31:20 +030010024}
10025
Jan Kiszka3ab66e82013-02-20 14:03:24 +010010026static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
Avi Kivity83422e12010-07-20 14:43:23 +030010027 u32 idt_vectoring_info,
10028 int instr_len_field,
10029 int error_code_field)
Avi Kivity51aa01d2010-07-20 14:31:20 +030010030{
Avi Kivity51aa01d2010-07-20 14:31:20 +030010031 u8 vector;
10032 int type;
10033 bool idtv_info_valid;
10034
10035 idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
Avi Kivity668f6122008-07-02 09:28:55 +030010036
Jan Kiszka3ab66e82013-02-20 14:03:24 +010010037 vcpu->arch.nmi_injected = false;
10038 kvm_clear_exception_queue(vcpu);
10039 kvm_clear_interrupt_queue(vcpu);
Gleb Natapov37b96e92009-03-30 16:03:13 +030010040
10041 if (!idtv_info_valid)
10042 return;
10043
Jan Kiszka3ab66e82013-02-20 14:03:24 +010010044 kvm_make_request(KVM_REQ_EVENT, vcpu);
Avi Kivity3842d132010-07-27 12:30:24 +030010045
Avi Kivity668f6122008-07-02 09:28:55 +030010046 vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
10047 type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
Gleb Natapov37b96e92009-03-30 16:03:13 +030010048
Gleb Natapov64a7ec02009-03-30 16:03:29 +030010049 switch (type) {
Gleb Natapov37b96e92009-03-30 16:03:13 +030010050 case INTR_TYPE_NMI_INTR:
Jan Kiszka3ab66e82013-02-20 14:03:24 +010010051 vcpu->arch.nmi_injected = true;
Avi Kivity668f6122008-07-02 09:28:55 +030010052 /*
Gleb Natapov7b4a25c2009-03-30 16:03:08 +030010053 * SDM 3: 27.7.1.2 (September 2008)
Gleb Natapov37b96e92009-03-30 16:03:13 +030010054 * Clear bit "block by NMI" before VM entry if a NMI
10055 * delivery faulted.
Avi Kivity668f6122008-07-02 09:28:55 +030010056 */
Jan Kiszka3ab66e82013-02-20 14:03:24 +010010057 vmx_set_nmi_mask(vcpu, false);
Gleb Natapov37b96e92009-03-30 16:03:13 +030010058 break;
Gleb Natapov37b96e92009-03-30 16:03:13 +030010059 case INTR_TYPE_SOFT_EXCEPTION:
Jan Kiszka3ab66e82013-02-20 14:03:24 +010010060 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
Gleb Natapov66fd3f72009-05-11 13:35:50 +030010061 /* fall through */
10062 case INTR_TYPE_HARD_EXCEPTION:
Avi Kivity35920a32008-07-03 14:50:12 +030010063 if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
Avi Kivity83422e12010-07-20 14:43:23 +030010064 u32 err = vmcs_read32(error_code_field);
Gleb Natapov851eb6672013-09-25 12:51:34 +030010065 kvm_requeue_exception_e(vcpu, vector, err);
Avi Kivity35920a32008-07-03 14:50:12 +030010066 } else
Gleb Natapov851eb6672013-09-25 12:51:34 +030010067 kvm_requeue_exception(vcpu, vector);
Gleb Natapov37b96e92009-03-30 16:03:13 +030010068 break;
Gleb Natapov66fd3f72009-05-11 13:35:50 +030010069 case INTR_TYPE_SOFT_INTR:
Jan Kiszka3ab66e82013-02-20 14:03:24 +010010070 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
Gleb Natapov66fd3f72009-05-11 13:35:50 +030010071 /* fall through */
Gleb Natapov37b96e92009-03-30 16:03:13 +030010072 case INTR_TYPE_EXT_INTR:
Jan Kiszka3ab66e82013-02-20 14:03:24 +010010073 kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR);
Gleb Natapov37b96e92009-03-30 16:03:13 +030010074 break;
10075 default:
10076 break;
Avi Kivityf7d92382008-07-03 16:14:28 +030010077 }
Avi Kivitycf393f72008-07-01 16:20:21 +030010078}
10079
Avi Kivity83422e12010-07-20 14:43:23 +030010080static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
10081{
Jan Kiszka3ab66e82013-02-20 14:03:24 +010010082 __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info,
Avi Kivity83422e12010-07-20 14:43:23 +030010083 VM_EXIT_INSTRUCTION_LEN,
10084 IDT_VECTORING_ERROR_CODE);
10085}
10086
Avi Kivityb463a6f2010-07-20 15:06:17 +030010087static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
10088{
Jan Kiszka3ab66e82013-02-20 14:03:24 +010010089 __vmx_complete_interrupts(vcpu,
Avi Kivityb463a6f2010-07-20 15:06:17 +030010090 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
10091 VM_ENTRY_INSTRUCTION_LEN,
10092 VM_ENTRY_EXCEPTION_ERROR_CODE);
10093
10094 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
10095}
10096
Gleb Natapovd7cd9792011-10-05 14:01:23 +020010097static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
10098{
10099 int i, nr_msrs;
10100 struct perf_guest_switch_msr *msrs;
10101
10102 msrs = perf_guest_get_msrs(&nr_msrs);
10103
10104 if (!msrs)
10105 return;
10106
10107 for (i = 0; i < nr_msrs; i++)
10108 if (msrs[i].host == msrs[i].guest)
10109 clear_atomic_switch_msr(vmx, msrs[i].msr);
10110 else
10111 add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
Konrad Rzeszutek Wilk989e3992018-06-20 22:01:22 -040010112 msrs[i].host, false);
Gleb Natapovd7cd9792011-10-05 14:01:23 +020010113}
10114
Jiang Biao33365e72016-11-03 15:03:37 +080010115static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
Yunhong Jiang64672c92016-06-13 14:19:59 -070010116{
10117 struct vcpu_vmx *vmx = to_vmx(vcpu);
10118 u64 tscl;
10119 u32 delta_tsc;
10120
10121 if (vmx->hv_deadline_tsc == -1)
10122 return;
10123
10124 tscl = rdtsc();
10125 if (vmx->hv_deadline_tsc > tscl)
10126 /* sure to be 32 bit only because checked on set_hv_timer */
10127 delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
10128 cpu_preemption_timer_multi);
10129 else
10130 delta_tsc = 0;
10131
10132 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc);
10133}
10134
Lai Jiangshana3b5ba42011-02-11 14:29:40 +080010135static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -080010136{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -040010137 struct vcpu_vmx *vmx = to_vmx(vcpu);
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +010010138 unsigned long cr3, cr4, evmcs_rsp;
Avi Kivity104f2262010-11-18 13:12:52 +020010139
Paolo Bonzini8a1b4392017-11-06 13:31:12 +010010140 /* Record the guest's net vcpu time for enforced NMI injections. */
Paolo Bonzinid02fcf52017-11-06 13:31:13 +010010141 if (unlikely(!enable_vnmi &&
Paolo Bonzini8a1b4392017-11-06 13:31:12 +010010142 vmx->loaded_vmcs->soft_vnmi_blocked))
10143 vmx->loaded_vmcs->entry_time = ktime_get();
10144
Avi Kivity104f2262010-11-18 13:12:52 +020010145 /* Don't enter VMX if guest state is invalid, let the exit handler
10146 start emulation until we arrive back to a valid state */
Gleb Natapov14168782013-01-21 15:36:49 +020010147 if (vmx->emulation_required)
Avi Kivity104f2262010-11-18 13:12:52 +020010148 return;
10149
Radim Krčmářa7653ec2014-08-21 18:08:07 +020010150 if (vmx->ple_window_dirty) {
10151 vmx->ple_window_dirty = false;
10152 vmcs_write32(PLE_WINDOW, vmx->ple_window);
10153 }
10154
Abel Gordon012f83c2013-04-18 14:39:25 +030010155 if (vmx->nested.sync_shadow_vmcs) {
10156 copy_vmcs12_to_shadow(vmx);
10157 vmx->nested.sync_shadow_vmcs = false;
10158 }
10159
Avi Kivity104f2262010-11-18 13:12:52 +020010160 if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
10161 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
10162 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
10163 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
10164
Andy Lutomirskid6e41f12017-05-28 10:00:17 -070010165 cr3 = __get_current_cr3_fast();
Ladi Prosek44889942017-09-22 07:53:15 +020010166 if (unlikely(cr3 != vmx->loaded_vmcs->vmcs_host_cr3)) {
Andy Lutomirskid6e41f12017-05-28 10:00:17 -070010167 vmcs_writel(HOST_CR3, cr3);
Ladi Prosek44889942017-09-22 07:53:15 +020010168 vmx->loaded_vmcs->vmcs_host_cr3 = cr3;
Andy Lutomirskid6e41f12017-05-28 10:00:17 -070010169 }
10170
Andy Lutomirski1e02ce42014-10-24 15:58:08 -070010171 cr4 = cr4_read_shadow();
Ladi Prosek44889942017-09-22 07:53:15 +020010172 if (unlikely(cr4 != vmx->loaded_vmcs->vmcs_host_cr4)) {
Andy Lutomirskid974baa2014-10-08 09:02:13 -070010173 vmcs_writel(HOST_CR4, cr4);
Ladi Prosek44889942017-09-22 07:53:15 +020010174 vmx->loaded_vmcs->vmcs_host_cr4 = cr4;
Andy Lutomirskid974baa2014-10-08 09:02:13 -070010175 }
10176
Avi Kivity104f2262010-11-18 13:12:52 +020010177 /* When single-stepping over STI and MOV SS, we must clear the
10178 * corresponding interruptibility bits in the guest state. Otherwise
10179 * vmentry fails as it then expects bit 14 (BS) in pending debug
10180 * exceptions being set, but that's not correct for the guest debugging
10181 * case. */
10182 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
10183 vmx_set_interrupt_shadow(vcpu, 0);
10184
Paolo Bonzinib9dd21e2017-08-23 23:14:38 +020010185 if (static_cpu_has(X86_FEATURE_PKU) &&
10186 kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
10187 vcpu->arch.pkru != vmx->host_pkru)
10188 __write_pkru(vcpu->arch.pkru);
Xiao Guangrong1be0e612016-03-22 16:51:18 +080010189
Gleb Natapovd7cd9792011-10-05 14:01:23 +020010190 atomic_switch_perf_msrs(vmx);
10191
Yunhong Jiang64672c92016-06-13 14:19:59 -070010192 vmx_arm_hv_timer(vcpu);
10193
KarimAllah Ahmedd28b3872018-02-01 22:59:45 +010010194 /*
10195 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
10196 * it's non-zero. Since vmentry is serialising on affected CPUs, there
10197 * is no need to worry about the conditional branch over the wrmsr
10198 * being speculatively taken.
10199 */
Thomas Gleixnerccbcd262018-05-09 23:01:01 +020010200 x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
KarimAllah Ahmedd28b3872018-02-01 22:59:45 +010010201
Nadav Har'Eld462b812011-05-24 15:26:10 +030010202 vmx->__launched = vmx->loaded_vmcs->launched;
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +010010203
10204 evmcs_rsp = static_branch_unlikely(&enable_evmcs) ?
10205 (unsigned long)&current_evmcs->host_rsp : 0;
10206
Paolo Bonzinic595cee2018-07-02 13:07:14 +020010207 if (static_branch_unlikely(&vmx_l1d_should_flush)) {
10208 if (vcpu->arch.l1tf_flush_l1d)
10209 vmx_l1d_flush(vcpu);
10210 }
10211
Avi Kivity104f2262010-11-18 13:12:52 +020010212 asm(
Avi Kivity6aa8b732006-12-10 02:21:36 -080010213 /* Store host registers */
Avi Kivityb188c81f2012-09-16 15:10:58 +030010214 "push %%" _ASM_DX "; push %%" _ASM_BP ";"
10215 "push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */
10216 "push %%" _ASM_CX " \n\t"
10217 "cmp %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
Avi Kivity313dbd492008-07-17 18:04:30 +030010218 "je 1f \n\t"
Avi Kivityb188c81f2012-09-16 15:10:58 +030010219 "mov %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +010010220 /* Avoid VMWRITE when Enlightened VMCS is in use */
10221 "test %%" _ASM_SI ", %%" _ASM_SI " \n\t"
10222 "jz 2f \n\t"
10223 "mov %%" _ASM_SP ", (%%" _ASM_SI ") \n\t"
10224 "jmp 1f \n\t"
10225 "2: \n\t"
Avi Kivity4ecac3f2008-05-13 13:23:38 +030010226 __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
Avi Kivity313dbd492008-07-17 18:04:30 +030010227 "1: \n\t"
Avi Kivityd3edefc2009-06-16 12:33:56 +030010228 /* Reload cr2 if changed */
Avi Kivityb188c81f2012-09-16 15:10:58 +030010229 "mov %c[cr2](%0), %%" _ASM_AX " \n\t"
10230 "mov %%cr2, %%" _ASM_DX " \n\t"
10231 "cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t"
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +010010232 "je 3f \n\t"
Avi Kivityb188c81f2012-09-16 15:10:58 +030010233 "mov %%" _ASM_AX", %%cr2 \n\t"
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +010010234 "3: \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -080010235 /* Check if vmlaunch of vmresume is needed */
Avi Kivitye08aa782007-11-15 18:06:18 +020010236 "cmpl $0, %c[launched](%0) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -080010237 /* Load guest registers. Don't clobber flags. */
Avi Kivityb188c81f2012-09-16 15:10:58 +030010238 "mov %c[rax](%0), %%" _ASM_AX " \n\t"
10239 "mov %c[rbx](%0), %%" _ASM_BX " \n\t"
10240 "mov %c[rdx](%0), %%" _ASM_DX " \n\t"
10241 "mov %c[rsi](%0), %%" _ASM_SI " \n\t"
10242 "mov %c[rdi](%0), %%" _ASM_DI " \n\t"
10243 "mov %c[rbp](%0), %%" _ASM_BP " \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -080010244#ifdef CONFIG_X86_64
Avi Kivitye08aa782007-11-15 18:06:18 +020010245 "mov %c[r8](%0), %%r8 \n\t"
10246 "mov %c[r9](%0), %%r9 \n\t"
10247 "mov %c[r10](%0), %%r10 \n\t"
10248 "mov %c[r11](%0), %%r11 \n\t"
10249 "mov %c[r12](%0), %%r12 \n\t"
10250 "mov %c[r13](%0), %%r13 \n\t"
10251 "mov %c[r14](%0), %%r14 \n\t"
10252 "mov %c[r15](%0), %%r15 \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -080010253#endif
Avi Kivityb188c81f2012-09-16 15:10:58 +030010254 "mov %c[rcx](%0), %%" _ASM_CX " \n\t" /* kills %0 (ecx) */
Avi Kivityc8019492008-07-14 14:44:59 +030010255
Avi Kivity6aa8b732006-12-10 02:21:36 -080010256 /* Enter guest mode */
Avi Kivity83287ea422012-09-16 15:10:57 +030010257 "jne 1f \n\t"
Avi Kivity4ecac3f2008-05-13 13:23:38 +030010258 __ex(ASM_VMX_VMLAUNCH) "\n\t"
Avi Kivity83287ea422012-09-16 15:10:57 +030010259 "jmp 2f \n\t"
10260 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
10261 "2: "
Avi Kivity6aa8b732006-12-10 02:21:36 -080010262 /* Save guest registers, load host registers, keep flags */
Avi Kivityb188c81f2012-09-16 15:10:58 +030010263 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
Avi Kivity40712fa2011-01-06 18:09:12 +020010264 "pop %0 \n\t"
Jim Mattson0cb5b302018-01-03 14:31:38 -080010265 "setbe %c[fail](%0)\n\t"
Avi Kivityb188c81f2012-09-16 15:10:58 +030010266 "mov %%" _ASM_AX ", %c[rax](%0) \n\t"
10267 "mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
10268 __ASM_SIZE(pop) " %c[rcx](%0) \n\t"
10269 "mov %%" _ASM_DX ", %c[rdx](%0) \n\t"
10270 "mov %%" _ASM_SI ", %c[rsi](%0) \n\t"
10271 "mov %%" _ASM_DI ", %c[rdi](%0) \n\t"
10272 "mov %%" _ASM_BP ", %c[rbp](%0) \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -080010273#ifdef CONFIG_X86_64
Avi Kivitye08aa782007-11-15 18:06:18 +020010274 "mov %%r8, %c[r8](%0) \n\t"
10275 "mov %%r9, %c[r9](%0) \n\t"
10276 "mov %%r10, %c[r10](%0) \n\t"
10277 "mov %%r11, %c[r11](%0) \n\t"
10278 "mov %%r12, %c[r12](%0) \n\t"
10279 "mov %%r13, %c[r13](%0) \n\t"
10280 "mov %%r14, %c[r14](%0) \n\t"
10281 "mov %%r15, %c[r15](%0) \n\t"
Jim Mattson0cb5b302018-01-03 14:31:38 -080010282 "xor %%r8d, %%r8d \n\t"
10283 "xor %%r9d, %%r9d \n\t"
10284 "xor %%r10d, %%r10d \n\t"
10285 "xor %%r11d, %%r11d \n\t"
10286 "xor %%r12d, %%r12d \n\t"
10287 "xor %%r13d, %%r13d \n\t"
10288 "xor %%r14d, %%r14d \n\t"
10289 "xor %%r15d, %%r15d \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -080010290#endif
Avi Kivityb188c81f2012-09-16 15:10:58 +030010291 "mov %%cr2, %%" _ASM_AX " \n\t"
10292 "mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
Avi Kivityc8019492008-07-14 14:44:59 +030010293
Jim Mattson0cb5b302018-01-03 14:31:38 -080010294 "xor %%eax, %%eax \n\t"
10295 "xor %%ebx, %%ebx \n\t"
10296 "xor %%esi, %%esi \n\t"
10297 "xor %%edi, %%edi \n\t"
Avi Kivityb188c81f2012-09-16 15:10:58 +030010298 "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t"
Avi Kivity83287ea422012-09-16 15:10:57 +030010299 ".pushsection .rodata \n\t"
10300 ".global vmx_return \n\t"
10301 "vmx_return: " _ASM_PTR " 2b \n\t"
10302 ".popsection"
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +010010303 : : "c"(vmx), "d"((unsigned long)HOST_RSP), "S"(evmcs_rsp),
Nadav Har'Eld462b812011-05-24 15:26:10 +030010304 [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
Avi Kivitye08aa782007-11-15 18:06:18 +020010305 [fail]"i"(offsetof(struct vcpu_vmx, fail)),
Avi Kivity313dbd492008-07-17 18:04:30 +030010306 [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
Zhang Xiantaoad312c72007-12-13 23:50:52 +080010307 [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
10308 [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
10309 [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
10310 [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])),
10311 [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])),
10312 [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])),
10313 [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])),
Avi Kivity05b3e0c2006-12-13 00:33:45 -080010314#ifdef CONFIG_X86_64
Zhang Xiantaoad312c72007-12-13 23:50:52 +080010315 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])),
10316 [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])),
10317 [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])),
10318 [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])),
10319 [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])),
10320 [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])),
10321 [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
10322 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
Avi Kivity6aa8b732006-12-10 02:21:36 -080010323#endif
Avi Kivity40712fa2011-01-06 18:09:12 +020010324 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
10325 [wordsize]"i"(sizeof(ulong))
Laurent Vivierc2036302007-10-25 14:18:52 +020010326 : "cc", "memory"
10327#ifdef CONFIG_X86_64
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +010010328 , "rax", "rbx", "rdi"
Laurent Vivierc2036302007-10-25 14:18:52 +020010329 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
Avi Kivityb188c81f2012-09-16 15:10:58 +030010330#else
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +010010331 , "eax", "ebx", "edi"
Laurent Vivierc2036302007-10-25 14:18:52 +020010332#endif
10333 );
Avi Kivity6aa8b732006-12-10 02:21:36 -080010334
KarimAllah Ahmedd28b3872018-02-01 22:59:45 +010010335 /*
10336 * We do not use IBRS in the kernel. If this vCPU has used the
10337 * SPEC_CTRL MSR it may have left it on; save the value and
10338 * turn it off. This is much more efficient than blindly adding
10339 * it to the atomic save/restore list. Especially as the former
10340 * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
10341 *
10342 * For non-nested case:
10343 * If the L01 MSR bitmap does not intercept the MSR, then we need to
10344 * save it.
10345 *
10346 * For nested case:
10347 * If the L02 MSR bitmap does not intercept the MSR, then we need to
10348 * save it.
10349 */
Paolo Bonzini946fbbc2018-02-22 16:43:18 +010010350 if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
Paolo Bonziniecb586b2018-02-22 16:43:17 +010010351 vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
KarimAllah Ahmedd28b3872018-02-01 22:59:45 +010010352
Thomas Gleixnerccbcd262018-05-09 23:01:01 +020010353 x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
KarimAllah Ahmedd28b3872018-02-01 22:59:45 +010010354
David Woodhouse117cc7a2018-01-12 11:11:27 +000010355 /* Eliminate branch target predictions from guest mode */
10356 vmexit_fill_RSB();
10357
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +010010358 /* All fields are clean at this point */
10359 if (static_branch_unlikely(&enable_evmcs))
10360 current_evmcs->hv_clean_fields |=
10361 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
10362
Gleb Natapov2a7921b2012-08-12 16:12:29 +030010363 /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
Wanpeng Li74c55932017-11-29 01:31:20 -080010364 if (vmx->host_debugctlmsr)
10365 update_debugctlmsr(vmx->host_debugctlmsr);
Gleb Natapov2a7921b2012-08-12 16:12:29 +030010366
Avi Kivityaa67f602012-08-01 16:48:03 +030010367#ifndef CONFIG_X86_64
10368 /*
10369 * The sysexit path does not restore ds/es, so we must set them to
10370 * a reasonable value ourselves.
10371 *
10372 * We can't defer this to vmx_load_host_state() since that function
10373 * may be executed in interrupt context, which saves and restore segments
10374 * around it, nullifying its effect.
10375 */
10376 loadsegment(ds, __USER_DS);
10377 loadsegment(es, __USER_DS);
10378#endif
10379
Avi Kivity6de4f3a2009-05-31 22:58:47 +030010380 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
Avi Kivity6de12732011-03-07 12:51:22 +020010381 | (1 << VCPU_EXREG_RFLAGS)
Avi Kivityaff48ba2010-12-05 18:56:11 +020010382 | (1 << VCPU_EXREG_PDPTR)
Avi Kivity2fb92db2011-04-27 19:42:18 +030010383 | (1 << VCPU_EXREG_SEGMENTS)
Avi Kivityaff48ba2010-12-05 18:56:11 +020010384 | (1 << VCPU_EXREG_CR3));
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030010385 vcpu->arch.regs_dirty = 0;
10386
Gleb Natapove0b890d2013-09-25 12:51:33 +030010387 /*
Xiao Guangrong1be0e612016-03-22 16:51:18 +080010388 * eager fpu is enabled if PKEY is supported and CR4 is switched
10389 * back on host, so it is safe to read guest PKRU from current
10390 * XSAVE.
10391 */
Paolo Bonzinib9dd21e2017-08-23 23:14:38 +020010392 if (static_cpu_has(X86_FEATURE_PKU) &&
10393 kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) {
10394 vcpu->arch.pkru = __read_pkru();
10395 if (vcpu->arch.pkru != vmx->host_pkru)
Xiao Guangrong1be0e612016-03-22 16:51:18 +080010396 __write_pkru(vmx->host_pkru);
Xiao Guangrong1be0e612016-03-22 16:51:18 +080010397 }
10398
Gleb Natapove0b890d2013-09-25 12:51:33 +030010399 vmx->nested.nested_run_pending = 0;
Jim Mattsonb060ca32017-09-14 16:31:42 -070010400 vmx->idt_vectoring_info = 0;
10401
10402 vmx->exit_reason = vmx->fail ? 0xdead : vmcs_read32(VM_EXIT_REASON);
10403 if (vmx->fail || (vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
10404 return;
10405
10406 vmx->loaded_vmcs->launched = 1;
10407 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
Gleb Natapove0b890d2013-09-25 12:51:33 +030010408
Avi Kivity51aa01d2010-07-20 14:31:20 +030010409 vmx_complete_atomic_exit(vmx);
10410 vmx_recover_nmi_blocking(vmx);
Avi Kivitycf393f72008-07-01 16:20:21 +030010411 vmx_complete_interrupts(vmx);
Avi Kivity6aa8b732006-12-10 02:21:36 -080010412}
Josh Poimboeufc207aee2017-06-28 10:11:06 -050010413STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
Avi Kivity6aa8b732006-12-10 02:21:36 -080010414
Sean Christopherson434a1e92018-03-20 12:17:18 -070010415static struct kvm *vmx_vm_alloc(void)
10416{
Marc Orrd1e5b0e2018-05-15 04:37:37 -070010417 struct kvm_vmx *kvm_vmx = vzalloc(sizeof(struct kvm_vmx));
Sean Christopherson40bbb9d2018-03-20 12:17:20 -070010418 return &kvm_vmx->kvm;
Sean Christopherson434a1e92018-03-20 12:17:18 -070010419}
10420
10421static void vmx_vm_free(struct kvm *kvm)
10422{
Marc Orrd1e5b0e2018-05-15 04:37:37 -070010423 vfree(to_kvm_vmx(kvm));
Sean Christopherson434a1e92018-03-20 12:17:18 -070010424}
10425
David Hildenbrand1279a6b12017-03-20 10:00:08 +010010426static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
Paolo Bonzini4fa77342014-07-17 12:25:16 +020010427{
10428 struct vcpu_vmx *vmx = to_vmx(vcpu);
10429 int cpu;
10430
David Hildenbrand1279a6b12017-03-20 10:00:08 +010010431 if (vmx->loaded_vmcs == vmcs)
Paolo Bonzini4fa77342014-07-17 12:25:16 +020010432 return;
10433
10434 cpu = get_cpu();
David Hildenbrand1279a6b12017-03-20 10:00:08 +010010435 vmx->loaded_vmcs = vmcs;
Paolo Bonzini4fa77342014-07-17 12:25:16 +020010436 vmx_vcpu_put(vcpu);
10437 vmx_vcpu_load(vcpu, cpu);
Paolo Bonzini4fa77342014-07-17 12:25:16 +020010438 put_cpu();
10439}
10440
Jim Mattson2f1fe812016-07-08 15:36:06 -070010441/*
10442 * Ensure that the current vmcs of the logical processor is the
10443 * vmcs01 of the vcpu before calling free_nested().
10444 */
10445static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu)
10446{
10447 struct vcpu_vmx *vmx = to_vmx(vcpu);
Jim Mattson2f1fe812016-07-08 15:36:06 -070010448
Christoffer Dallec7660c2017-12-04 21:35:23 +010010449 vcpu_load(vcpu);
David Hildenbrand1279a6b12017-03-20 10:00:08 +010010450 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
Jim Mattson2f1fe812016-07-08 15:36:06 -070010451 free_nested(vmx);
10452 vcpu_put(vcpu);
10453}
10454
Avi Kivity6aa8b732006-12-10 02:21:36 -080010455static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
10456{
Rusty Russellfb3f0f52007-07-27 17:16:56 +100010457 struct vcpu_vmx *vmx = to_vmx(vcpu);
10458
Kai Huang843e4332015-01-28 10:54:28 +080010459 if (enable_pml)
Kai Huanga3eaa862015-11-04 13:46:05 +080010460 vmx_destroy_pml_buffer(vmx);
Wanpeng Li991e7a02015-09-16 17:30:05 +080010461 free_vpid(vmx->vpid);
Paolo Bonzini4fa77342014-07-17 12:25:16 +020010462 leave_guest_mode(vcpu);
Jim Mattson2f1fe812016-07-08 15:36:06 -070010463 vmx_free_vcpu_nested(vcpu);
Paolo Bonzini4fa77342014-07-17 12:25:16 +020010464 free_loaded_vmcs(vmx->loaded_vmcs);
Rusty Russellfb3f0f52007-07-27 17:16:56 +100010465 kfree(vmx->guest_msrs);
10466 kvm_vcpu_uninit(vcpu);
Rusty Russella4770342007-08-01 14:46:11 +100010467 kmem_cache_free(kvm_vcpu_cache, vmx);
Avi Kivity6aa8b732006-12-10 02:21:36 -080010468}
10469
Rusty Russellfb3f0f52007-07-27 17:16:56 +100010470static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
Avi Kivity6aa8b732006-12-10 02:21:36 -080010471{
Rusty Russellfb3f0f52007-07-27 17:16:56 +100010472 int err;
Rusty Russellc16f8622007-07-30 21:12:19 +100010473 struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Paolo Bonzini904e14f2018-01-16 16:51:18 +010010474 unsigned long *msr_bitmap;
Avi Kivity15ad7142007-07-11 18:17:21 +030010475 int cpu;
Avi Kivity6aa8b732006-12-10 02:21:36 -080010476
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -040010477 if (!vmx)
Rusty Russellfb3f0f52007-07-27 17:16:56 +100010478 return ERR_PTR(-ENOMEM);
10479
Wanpeng Li991e7a02015-09-16 17:30:05 +080010480 vmx->vpid = allocate_vpid();
Sheng Yang2384d2b2008-01-17 15:14:33 +080010481
Rusty Russellfb3f0f52007-07-27 17:16:56 +100010482 err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
10483 if (err)
10484 goto free_vcpu;
Ingo Molnar965b58a2007-01-05 16:36:23 -080010485
Peter Feiner4e595162016-07-07 14:49:58 -070010486 err = -ENOMEM;
10487
10488 /*
10489 * If PML is turned on, failure on enabling PML just results in failure
10490 * of creating the vcpu, therefore we can simplify PML logic (by
10491 * avoiding dealing with cases, such as enabling PML partially on vcpus
10492 * for the guest, etc.
10493 */
10494 if (enable_pml) {
10495 vmx->pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
10496 if (!vmx->pml_pg)
10497 goto uninit_vcpu;
10498 }
10499
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -040010500 vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
Paolo Bonzini03916db2014-07-24 14:21:57 +020010501 BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0])
10502 > PAGE_SIZE);
Nadav Amit0123be42014-07-24 15:06:56 +030010503
Peter Feiner4e595162016-07-07 14:49:58 -070010504 if (!vmx->guest_msrs)
10505 goto free_pml;
Ingo Molnar965b58a2007-01-05 16:36:23 -080010506
Paolo Bonzinif21f1652018-01-11 12:16:15 +010010507 err = alloc_loaded_vmcs(&vmx->vmcs01);
10508 if (err < 0)
Rusty Russellfb3f0f52007-07-27 17:16:56 +100010509 goto free_msrs;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -040010510
Paolo Bonzini904e14f2018-01-16 16:51:18 +010010511 msr_bitmap = vmx->vmcs01.msr_bitmap;
10512 vmx_disable_intercept_for_msr(msr_bitmap, MSR_FS_BASE, MSR_TYPE_RW);
10513 vmx_disable_intercept_for_msr(msr_bitmap, MSR_GS_BASE, MSR_TYPE_RW);
10514 vmx_disable_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
10515 vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
10516 vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
10517 vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
10518 vmx->msr_bitmap_mode = 0;
10519
Paolo Bonzinif21f1652018-01-11 12:16:15 +010010520 vmx->loaded_vmcs = &vmx->vmcs01;
Avi Kivity15ad7142007-07-11 18:17:21 +030010521 cpu = get_cpu();
10522 vmx_vcpu_load(&vmx->vcpu, cpu);
Zachary Amsdene48672f2010-08-19 22:07:23 -100010523 vmx->vcpu.cpu = cpu;
David Hildenbrand12d79912017-08-24 20:51:26 +020010524 vmx_vcpu_setup(vmx);
Rusty Russellfb3f0f52007-07-27 17:16:56 +100010525 vmx_vcpu_put(&vmx->vcpu);
Avi Kivity15ad7142007-07-11 18:17:21 +030010526 put_cpu();
Paolo Bonzini35754c92015-07-29 12:05:37 +020010527 if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) {
Jan Kiszkabe6d05c2011-04-13 01:27:55 +020010528 err = alloc_apic_access_page(kvm);
10529 if (err)
Marcelo Tosatti5e4a0b32008-02-14 21:21:43 -020010530 goto free_vmcs;
Jan Kiszkaa63cb562013-04-08 11:07:46 +020010531 }
Ingo Molnar965b58a2007-01-05 16:36:23 -080010532
Sean Christophersone90008d2018-03-05 12:04:37 -080010533 if (enable_ept && !enable_unrestricted_guest) {
Tang Chenf51770e2014-09-16 18:41:59 +080010534 err = init_rmode_identity_map(kvm);
10535 if (err)
Gleb Natapov93ea5382011-02-21 12:07:59 +020010536 goto free_vmcs;
Sheng Yangb927a3c2009-07-21 10:42:48 +080010537 }
Sheng Yangb7ebfb02008-04-25 21:44:52 +080010538
Wanpeng Li5c614b32015-10-13 09:18:36 -070010539 if (nested) {
Paolo Bonzini6677f3d2018-02-26 13:40:08 +010010540 nested_vmx_setup_ctls_msrs(&vmx->nested.msrs,
10541 kvm_vcpu_apicv_active(&vmx->vcpu));
Wanpeng Li5c614b32015-10-13 09:18:36 -070010542 vmx->nested.vpid02 = allocate_vpid();
10543 }
Wincy Vanb9c237b2015-02-03 23:56:30 +080010544
Wincy Van705699a2015-02-03 23:58:17 +080010545 vmx->nested.posted_intr_nv = -1;
Nadav Har'Ela9d30f32011-05-25 23:03:55 +030010546 vmx->nested.current_vmptr = -1ull;
Nadav Har'Ela9d30f32011-05-25 23:03:55 +030010547
Haozhong Zhang37e4c992016-06-22 14:59:55 +080010548 vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED;
10549
Paolo Bonzini31afb2e2017-06-06 12:57:06 +020010550 /*
10551 * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR
10552 * or POSTED_INTR_WAKEUP_VECTOR.
10553 */
10554 vmx->pi_desc.nv = POSTED_INTR_VECTOR;
10555 vmx->pi_desc.sn = 1;
10556
Rusty Russellfb3f0f52007-07-27 17:16:56 +100010557 return &vmx->vcpu;
Ingo Molnar965b58a2007-01-05 16:36:23 -080010558
Rusty Russellfb3f0f52007-07-27 17:16:56 +100010559free_vmcs:
Wanpeng Li5c614b32015-10-13 09:18:36 -070010560 free_vpid(vmx->nested.vpid02);
Xiao Guangrong5f3fbc32012-05-14 14:58:58 +080010561 free_loaded_vmcs(vmx->loaded_vmcs);
Rusty Russellfb3f0f52007-07-27 17:16:56 +100010562free_msrs:
Rusty Russellfb3f0f52007-07-27 17:16:56 +100010563 kfree(vmx->guest_msrs);
Peter Feiner4e595162016-07-07 14:49:58 -070010564free_pml:
10565 vmx_destroy_pml_buffer(vmx);
Rusty Russellfb3f0f52007-07-27 17:16:56 +100010566uninit_vcpu:
10567 kvm_vcpu_uninit(&vmx->vcpu);
10568free_vcpu:
Wanpeng Li991e7a02015-09-16 17:30:05 +080010569 free_vpid(vmx->vpid);
Rusty Russella4770342007-08-01 14:46:11 +100010570 kmem_cache_free(kvm_vcpu_cache, vmx);
Rusty Russellfb3f0f52007-07-27 17:16:56 +100010571 return ERR_PTR(err);
Avi Kivity6aa8b732006-12-10 02:21:36 -080010572}
10573
Konrad Rzeszutek Wilk26acfb62018-06-20 11:29:53 -040010574#define L1TF_MSG "SMT enabled with L1TF CPU bug present. Refer to CVE-2018-3620 for details.\n"
10575
Wanpeng Lib31c1142018-03-12 04:53:04 -070010576static int vmx_vm_init(struct kvm *kvm)
10577{
10578 if (!ple_gap)
10579 kvm->arch.pause_in_guest = true;
Konrad Rzeszutek Wilk26acfb62018-06-20 11:29:53 -040010580
10581 if (boot_cpu_has(X86_BUG_L1TF) && cpu_smt_control == CPU_SMT_ENABLED) {
10582 if (nosmt) {
10583 pr_err(L1TF_MSG);
10584 return -EOPNOTSUPP;
10585 }
10586 pr_warn(L1TF_MSG);
10587 }
Wanpeng Lib31c1142018-03-12 04:53:04 -070010588 return 0;
10589}
10590
Yang, Sheng002c7f72007-07-31 14:23:01 +030010591static void __init vmx_check_processor_compat(void *rtn)
10592{
10593 struct vmcs_config vmcs_conf;
10594
10595 *(int *)rtn = 0;
10596 if (setup_vmcs_config(&vmcs_conf) < 0)
10597 *(int *)rtn = -EIO;
Paolo Bonzini13893092018-02-26 13:40:09 +010010598 nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, enable_apicv);
Yang, Sheng002c7f72007-07-31 14:23:01 +030010599 if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
10600 printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
10601 smp_processor_id());
10602 *(int *)rtn = -EIO;
10603 }
10604}
10605
Sheng Yang4b12f0d2009-04-27 20:35:42 +080010606static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
Sheng Yang64d4d522008-10-09 16:01:57 +080010607{
Xiao Guangrongb18d5432015-06-15 16:55:21 +080010608 u8 cache;
10609 u64 ipat = 0;
Sheng Yang4b12f0d2009-04-27 20:35:42 +080010610
Sheng Yang522c68c2009-04-27 20:35:43 +080010611 /* For VT-d and EPT combination
Paolo Bonzini606decd2015-10-01 13:12:47 +020010612 * 1. MMIO: always map as UC
Sheng Yang522c68c2009-04-27 20:35:43 +080010613 * 2. EPT with VT-d:
10614 * a. VT-d without snooping control feature: can't guarantee the
Paolo Bonzini606decd2015-10-01 13:12:47 +020010615 * result, try to trust guest.
Sheng Yang522c68c2009-04-27 20:35:43 +080010616 * b. VT-d with snooping control feature: snooping control feature of
10617 * VT-d engine can guarantee the cache correctness. Just set it
10618 * to WB to keep consistent with host. So the same as item 3.
Sheng Yanga19a6d12010-02-09 16:41:53 +080010619 * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
Sheng Yang522c68c2009-04-27 20:35:43 +080010620 * consistent with host MTRR
10621 */
Paolo Bonzini606decd2015-10-01 13:12:47 +020010622 if (is_mmio) {
10623 cache = MTRR_TYPE_UNCACHABLE;
10624 goto exit;
10625 }
10626
10627 if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
Xiao Guangrongb18d5432015-06-15 16:55:21 +080010628 ipat = VMX_EPT_IPAT_BIT;
10629 cache = MTRR_TYPE_WRBACK;
10630 goto exit;
10631 }
10632
10633 if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
10634 ipat = VMX_EPT_IPAT_BIT;
Paolo Bonzini0da029e2015-07-23 08:24:42 +020010635 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
Xiao Guangrongfb2799502015-07-16 03:25:56 +080010636 cache = MTRR_TYPE_WRBACK;
10637 else
10638 cache = MTRR_TYPE_UNCACHABLE;
Xiao Guangrongb18d5432015-06-15 16:55:21 +080010639 goto exit;
10640 }
10641
Xiao Guangrongff536042015-06-15 16:55:22 +080010642 cache = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
Xiao Guangrongb18d5432015-06-15 16:55:21 +080010643
10644exit:
10645 return (cache << VMX_EPT_MT_EPTE_SHIFT) | ipat;
Sheng Yang64d4d522008-10-09 16:01:57 +080010646}
10647
Sheng Yang17cc3932010-01-05 19:02:27 +080010648static int vmx_get_lpage_level(void)
Joerg Roedel344f4142009-07-27 16:30:48 +020010649{
Sheng Yang878403b2010-01-05 19:02:29 +080010650 if (enable_ept && !cpu_has_vmx_ept_1g_page())
10651 return PT_DIRECTORY_LEVEL;
10652 else
10653 /* For shadow and EPT supported 1GB page */
10654 return PT_PDPE_LEVEL;
Joerg Roedel344f4142009-07-27 16:30:48 +020010655}
10656
Xiao Guangrongfeda8052015-09-09 14:05:55 +080010657static void vmcs_set_secondary_exec_control(u32 new_ctl)
10658{
10659 /*
10660 * These bits in the secondary execution controls field
10661 * are dynamic, the others are mostly based on the hypervisor
10662 * architecture and the guest's CPUID. Do not touch the
10663 * dynamic bits.
10664 */
10665 u32 mask =
10666 SECONDARY_EXEC_SHADOW_VMCS |
10667 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
Paolo Bonzini0367f202016-07-12 10:44:55 +020010668 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
10669 SECONDARY_EXEC_DESC;
Xiao Guangrongfeda8052015-09-09 14:05:55 +080010670
10671 u32 cur_ctl = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
10672
10673 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
10674 (new_ctl & ~mask) | (cur_ctl & mask));
10675}
10676
David Matlack8322ebb2016-11-29 18:14:09 -080010677/*
10678 * Generate MSR_IA32_VMX_CR{0,4}_FIXED1 according to CPUID. Only set bits
10679 * (indicating "allowed-1") if they are supported in the guest's CPUID.
10680 */
10681static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
10682{
10683 struct vcpu_vmx *vmx = to_vmx(vcpu);
10684 struct kvm_cpuid_entry2 *entry;
10685
Paolo Bonzini6677f3d2018-02-26 13:40:08 +010010686 vmx->nested.msrs.cr0_fixed1 = 0xffffffff;
10687 vmx->nested.msrs.cr4_fixed1 = X86_CR4_PCE;
David Matlack8322ebb2016-11-29 18:14:09 -080010688
10689#define cr4_fixed1_update(_cr4_mask, _reg, _cpuid_mask) do { \
10690 if (entry && (entry->_reg & (_cpuid_mask))) \
Paolo Bonzini6677f3d2018-02-26 13:40:08 +010010691 vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask); \
David Matlack8322ebb2016-11-29 18:14:09 -080010692} while (0)
10693
10694 entry = kvm_find_cpuid_entry(vcpu, 0x1, 0);
10695 cr4_fixed1_update(X86_CR4_VME, edx, bit(X86_FEATURE_VME));
10696 cr4_fixed1_update(X86_CR4_PVI, edx, bit(X86_FEATURE_VME));
10697 cr4_fixed1_update(X86_CR4_TSD, edx, bit(X86_FEATURE_TSC));
10698 cr4_fixed1_update(X86_CR4_DE, edx, bit(X86_FEATURE_DE));
10699 cr4_fixed1_update(X86_CR4_PSE, edx, bit(X86_FEATURE_PSE));
10700 cr4_fixed1_update(X86_CR4_PAE, edx, bit(X86_FEATURE_PAE));
10701 cr4_fixed1_update(X86_CR4_MCE, edx, bit(X86_FEATURE_MCE));
10702 cr4_fixed1_update(X86_CR4_PGE, edx, bit(X86_FEATURE_PGE));
10703 cr4_fixed1_update(X86_CR4_OSFXSR, edx, bit(X86_FEATURE_FXSR));
10704 cr4_fixed1_update(X86_CR4_OSXMMEXCPT, edx, bit(X86_FEATURE_XMM));
10705 cr4_fixed1_update(X86_CR4_VMXE, ecx, bit(X86_FEATURE_VMX));
10706 cr4_fixed1_update(X86_CR4_SMXE, ecx, bit(X86_FEATURE_SMX));
10707 cr4_fixed1_update(X86_CR4_PCIDE, ecx, bit(X86_FEATURE_PCID));
10708 cr4_fixed1_update(X86_CR4_OSXSAVE, ecx, bit(X86_FEATURE_XSAVE));
10709
10710 entry = kvm_find_cpuid_entry(vcpu, 0x7, 0);
10711 cr4_fixed1_update(X86_CR4_FSGSBASE, ebx, bit(X86_FEATURE_FSGSBASE));
10712 cr4_fixed1_update(X86_CR4_SMEP, ebx, bit(X86_FEATURE_SMEP));
10713 cr4_fixed1_update(X86_CR4_SMAP, ebx, bit(X86_FEATURE_SMAP));
10714 cr4_fixed1_update(X86_CR4_PKE, ecx, bit(X86_FEATURE_PKU));
Paolo Bonzinic4ad77e2017-11-13 14:23:59 +010010715 cr4_fixed1_update(X86_CR4_UMIP, ecx, bit(X86_FEATURE_UMIP));
David Matlack8322ebb2016-11-29 18:14:09 -080010716
10717#undef cr4_fixed1_update
10718}
10719
Sheng Yang0e851882009-12-18 16:48:46 +080010720static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
10721{
Sheng Yang4e47c7a2009-12-18 16:48:47 +080010722 struct vcpu_vmx *vmx = to_vmx(vcpu);
Sheng Yang4e47c7a2009-12-18 16:48:47 +080010723
Paolo Bonzini80154d72017-08-24 13:55:35 +020010724 if (cpu_has_secondary_exec_ctrls()) {
10725 vmx_compute_secondary_exec_control(vmx);
10726 vmcs_set_secondary_exec_control(vmx->secondary_exec_control);
Sheng Yang4e47c7a2009-12-18 16:48:47 +080010727 }
Mao, Junjiead756a12012-07-02 01:18:48 +000010728
Haozhong Zhang37e4c992016-06-22 14:59:55 +080010729 if (nested_vmx_allowed(vcpu))
10730 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
10731 FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
10732 else
10733 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
10734 ~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
David Matlack8322ebb2016-11-29 18:14:09 -080010735
10736 if (nested_vmx_allowed(vcpu))
10737 nested_vmx_cr_fixed1_bits_update(vcpu);
Sheng Yang0e851882009-12-18 16:48:46 +080010738}
10739
Joerg Roedeld4330ef2010-04-22 12:33:11 +020010740static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
10741{
Nadav Har'El7b8050f2011-05-25 23:16:10 +030010742 if (func == 1 && nested)
10743 entry->ecx |= bit(X86_FEATURE_VMX);
Joerg Roedeld4330ef2010-04-22 12:33:11 +020010744}
10745
Yang Zhang25d92082013-08-06 12:00:32 +030010746static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
10747 struct x86_exception *fault)
10748{
Jan Kiszka533558b2014-01-04 18:47:20 +010010749 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
Bandan Dasc5f983f2017-05-05 15:25:14 -040010750 struct vcpu_vmx *vmx = to_vmx(vcpu);
Jan Kiszka533558b2014-01-04 18:47:20 +010010751 u32 exit_reason;
Bandan Dasc5f983f2017-05-05 15:25:14 -040010752 unsigned long exit_qualification = vcpu->arch.exit_qualification;
Yang Zhang25d92082013-08-06 12:00:32 +030010753
Bandan Dasc5f983f2017-05-05 15:25:14 -040010754 if (vmx->nested.pml_full) {
10755 exit_reason = EXIT_REASON_PML_FULL;
10756 vmx->nested.pml_full = false;
10757 exit_qualification &= INTR_INFO_UNBLOCK_NMI;
10758 } else if (fault->error_code & PFERR_RSVD_MASK)
Jan Kiszka533558b2014-01-04 18:47:20 +010010759 exit_reason = EXIT_REASON_EPT_MISCONFIG;
Yang Zhang25d92082013-08-06 12:00:32 +030010760 else
Jan Kiszka533558b2014-01-04 18:47:20 +010010761 exit_reason = EXIT_REASON_EPT_VIOLATION;
Bandan Dasc5f983f2017-05-05 15:25:14 -040010762
10763 nested_vmx_vmexit(vcpu, exit_reason, 0, exit_qualification);
Yang Zhang25d92082013-08-06 12:00:32 +030010764 vmcs12->guest_physical_address = fault->address;
10765}
10766
Peter Feiner995f00a2017-06-30 17:26:32 -070010767static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
10768{
David Hildenbrandbb97a012017-08-10 23:15:28 +020010769 return nested_ept_get_cr3(vcpu) & VMX_EPTP_AD_ENABLE_BIT;
Peter Feiner995f00a2017-06-30 17:26:32 -070010770}
10771
Nadav Har'El155a97a2013-08-05 11:07:16 +030010772/* Callbacks for nested_ept_init_mmu_context: */
10773
10774static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
10775{
10776 /* return the page table to be shadowed - in our case, EPT12 */
10777 return get_vmcs12(vcpu)->ept_pointer;
10778}
10779
Paolo Bonziniae1e2d12017-03-30 11:55:30 +020010780static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
Nadav Har'El155a97a2013-08-05 11:07:16 +030010781{
Paolo Bonziniad896af2013-10-02 16:56:14 +020010782 WARN_ON(mmu_is_nested(vcpu));
David Hildenbranda057e0e2017-08-10 23:36:54 +020010783 if (!valid_ept_address(vcpu, nested_ept_get_cr3(vcpu)))
Paolo Bonziniae1e2d12017-03-30 11:55:30 +020010784 return 1;
10785
10786 kvm_mmu_unload(vcpu);
Paolo Bonziniad896af2013-10-02 16:56:14 +020010787 kvm_init_shadow_ept_mmu(vcpu,
Paolo Bonzini6677f3d2018-02-26 13:40:08 +010010788 to_vmx(vcpu)->nested.msrs.ept_caps &
Paolo Bonziniae1e2d12017-03-30 11:55:30 +020010789 VMX_EPT_EXECUTE_ONLY_BIT,
David Hildenbranda057e0e2017-08-10 23:36:54 +020010790 nested_ept_ad_enabled(vcpu));
Nadav Har'El155a97a2013-08-05 11:07:16 +030010791 vcpu->arch.mmu.set_cr3 = vmx_set_cr3;
10792 vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3;
10793 vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;
10794
10795 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
Paolo Bonziniae1e2d12017-03-30 11:55:30 +020010796 return 0;
Nadav Har'El155a97a2013-08-05 11:07:16 +030010797}
10798
10799static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
10800{
10801 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
10802}
10803
Eugene Korenevsky19d5f102014-12-16 22:35:53 +030010804static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
10805 u16 error_code)
10806{
10807 bool inequality, bit;
10808
10809 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0;
10810 inequality =
10811 (error_code & vmcs12->page_fault_error_code_mask) !=
10812 vmcs12->page_fault_error_code_match;
10813 return inequality ^ bit;
10814}
10815
Gleb Natapovfeaf0c7d2013-09-25 12:51:36 +030010816static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
10817 struct x86_exception *fault)
10818{
10819 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
10820
10821 WARN_ON(!is_guest_mode(vcpu));
10822
Wanpeng Li305d0ab2017-09-28 18:16:44 -070010823 if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) &&
10824 !to_vmx(vcpu)->nested.nested_run_pending) {
Paolo Bonzinib96fb432017-07-27 12:29:32 +020010825 vmcs12->vm_exit_intr_error_code = fault->error_code;
10826 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
10827 PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
10828 INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK,
10829 fault->address);
Paolo Bonzini7313c692017-07-27 10:31:25 +020010830 } else {
Gleb Natapovfeaf0c7d2013-09-25 12:51:36 +030010831 kvm_inject_page_fault(vcpu, fault);
Paolo Bonzini7313c692017-07-27 10:31:25 +020010832 }
Gleb Natapovfeaf0c7d2013-09-25 12:51:36 +030010833}
10834
Paolo Bonzinic9923842017-12-13 14:16:30 +010010835static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
10836 struct vmcs12 *vmcs12);
Jim Mattson6beb7bd2016-11-30 12:03:45 -080010837
10838static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
Wanpeng Lia2bcba52014-08-21 19:46:49 +080010839 struct vmcs12 *vmcs12)
10840{
10841 struct vcpu_vmx *vmx = to_vmx(vcpu);
David Hildenbrand5e2f30b2017-08-03 18:11:04 +020010842 struct page *page;
Jim Mattson6beb7bd2016-11-30 12:03:45 -080010843 u64 hpa;
Wanpeng Lia2bcba52014-08-21 19:46:49 +080010844
10845 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
Wanpeng Lia2bcba52014-08-21 19:46:49 +080010846 /*
10847 * Translate L1 physical address to host physical
10848 * address for vmcs02. Keep the page pinned, so this
10849 * physical address remains valid. We keep a reference
10850 * to it so we can release it later.
10851 */
David Hildenbrand5e2f30b2017-08-03 18:11:04 +020010852 if (vmx->nested.apic_access_page) { /* shouldn't happen */
David Hildenbrand53a70da2017-08-03 18:11:05 +020010853 kvm_release_page_dirty(vmx->nested.apic_access_page);
David Hildenbrand5e2f30b2017-08-03 18:11:04 +020010854 vmx->nested.apic_access_page = NULL;
10855 }
10856 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
Jim Mattson6beb7bd2016-11-30 12:03:45 -080010857 /*
10858 * If translation failed, no matter: This feature asks
10859 * to exit when accessing the given address, and if it
10860 * can never be accessed, this feature won't do
10861 * anything anyway.
10862 */
David Hildenbrand5e2f30b2017-08-03 18:11:04 +020010863 if (!is_error_page(page)) {
10864 vmx->nested.apic_access_page = page;
Jim Mattson6beb7bd2016-11-30 12:03:45 -080010865 hpa = page_to_phys(vmx->nested.apic_access_page);
10866 vmcs_write64(APIC_ACCESS_ADDR, hpa);
10867 } else {
10868 vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
10869 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
10870 }
Wanpeng Lia2bcba52014-08-21 19:46:49 +080010871 }
Wanpeng Lia7c0b072014-08-21 19:46:50 +080010872
10873 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
David Hildenbrand5e2f30b2017-08-03 18:11:04 +020010874 if (vmx->nested.virtual_apic_page) { /* shouldn't happen */
David Hildenbrand53a70da2017-08-03 18:11:05 +020010875 kvm_release_page_dirty(vmx->nested.virtual_apic_page);
David Hildenbrand5e2f30b2017-08-03 18:11:04 +020010876 vmx->nested.virtual_apic_page = NULL;
10877 }
10878 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->virtual_apic_page_addr);
Wanpeng Lia7c0b072014-08-21 19:46:50 +080010879
10880 /*
Jim Mattson6beb7bd2016-11-30 12:03:45 -080010881 * If translation failed, VM entry will fail because
10882 * prepare_vmcs02 set VIRTUAL_APIC_PAGE_ADDR to -1ull.
10883 * Failing the vm entry is _not_ what the processor
10884 * does but it's basically the only possibility we
10885 * have. We could still enter the guest if CR8 load
10886 * exits are enabled, CR8 store exits are enabled, and
10887 * virtualize APIC access is disabled; in this case
10888 * the processor would never use the TPR shadow and we
10889 * could simply clear the bit from the execution
10890 * control. But such a configuration is useless, so
10891 * let's keep the code simple.
Wanpeng Lia7c0b072014-08-21 19:46:50 +080010892 */
David Hildenbrand5e2f30b2017-08-03 18:11:04 +020010893 if (!is_error_page(page)) {
10894 vmx->nested.virtual_apic_page = page;
Jim Mattson6beb7bd2016-11-30 12:03:45 -080010895 hpa = page_to_phys(vmx->nested.virtual_apic_page);
10896 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa);
10897 }
Wanpeng Lia7c0b072014-08-21 19:46:50 +080010898 }
10899
Wincy Van705699a2015-02-03 23:58:17 +080010900 if (nested_cpu_has_posted_intr(vmcs12)) {
Wincy Van705699a2015-02-03 23:58:17 +080010901 if (vmx->nested.pi_desc_page) { /* shouldn't happen */
10902 kunmap(vmx->nested.pi_desc_page);
David Hildenbrand53a70da2017-08-03 18:11:05 +020010903 kvm_release_page_dirty(vmx->nested.pi_desc_page);
David Hildenbrand5e2f30b2017-08-03 18:11:04 +020010904 vmx->nested.pi_desc_page = NULL;
Wincy Van705699a2015-02-03 23:58:17 +080010905 }
David Hildenbrand5e2f30b2017-08-03 18:11:04 +020010906 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr);
10907 if (is_error_page(page))
Jim Mattson6beb7bd2016-11-30 12:03:45 -080010908 return;
David Hildenbrand5e2f30b2017-08-03 18:11:04 +020010909 vmx->nested.pi_desc_page = page;
10910 vmx->nested.pi_desc = kmap(vmx->nested.pi_desc_page);
Wincy Van705699a2015-02-03 23:58:17 +080010911 vmx->nested.pi_desc =
10912 (struct pi_desc *)((void *)vmx->nested.pi_desc +
10913 (unsigned long)(vmcs12->posted_intr_desc_addr &
10914 (PAGE_SIZE - 1)));
Jim Mattson6beb7bd2016-11-30 12:03:45 -080010915 vmcs_write64(POSTED_INTR_DESC_ADDR,
10916 page_to_phys(vmx->nested.pi_desc_page) +
10917 (unsigned long)(vmcs12->posted_intr_desc_addr &
10918 (PAGE_SIZE - 1)));
Wincy Van705699a2015-02-03 23:58:17 +080010919 }
Linus Torvaldsd4667ca2018-02-14 17:02:15 -080010920 if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
KarimAllah Ahmed3712caeb2018-02-10 23:39:26 +000010921 vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
10922 CPU_BASED_USE_MSR_BITMAPS);
Jim Mattson6beb7bd2016-11-30 12:03:45 -080010923 else
10924 vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
10925 CPU_BASED_USE_MSR_BITMAPS);
Wanpeng Lia2bcba52014-08-21 19:46:49 +080010926}
10927
Jan Kiszkaf4124502014-03-07 20:03:13 +010010928static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
10929{
10930 u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
10931 struct vcpu_vmx *vmx = to_vmx(vcpu);
10932
10933 if (vcpu->arch.virtual_tsc_khz == 0)
10934 return;
10935
10936 /* Make sure short timeouts reliably trigger an immediate vmexit.
10937 * hrtimer_start does not guarantee this. */
10938 if (preemption_timeout <= 1) {
10939 vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
10940 return;
10941 }
10942
10943 preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
10944 preemption_timeout *= 1000000;
10945 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
10946 hrtimer_start(&vmx->nested.preemption_timer,
10947 ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL);
10948}
10949
Jim Mattson56a20512017-07-06 16:33:06 -070010950static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
10951 struct vmcs12 *vmcs12)
10952{
10953 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
10954 return 0;
10955
10956 if (!page_address_valid(vcpu, vmcs12->io_bitmap_a) ||
10957 !page_address_valid(vcpu, vmcs12->io_bitmap_b))
10958 return -EINVAL;
10959
10960 return 0;
10961}
10962
Wincy Van3af18d92015-02-03 23:49:31 +080010963static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
10964 struct vmcs12 *vmcs12)
10965{
Wincy Van3af18d92015-02-03 23:49:31 +080010966 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
10967 return 0;
10968
Jim Mattson5fa99cb2017-07-06 16:33:07 -070010969 if (!page_address_valid(vcpu, vmcs12->msr_bitmap))
Wincy Van3af18d92015-02-03 23:49:31 +080010970 return -EINVAL;
10971
10972 return 0;
10973}
10974
Jim Mattson712b12d2017-08-24 13:24:47 -070010975static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
10976 struct vmcs12 *vmcs12)
10977{
10978 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
10979 return 0;
10980
10981 if (!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr))
10982 return -EINVAL;
10983
10984 return 0;
10985}
10986
Wincy Van3af18d92015-02-03 23:49:31 +080010987/*
10988 * Merge L0's and L1's MSR bitmap, return false to indicate that
10989 * we do not use the hardware.
10990 */
Paolo Bonzinic9923842017-12-13 14:16:30 +010010991static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
10992 struct vmcs12 *vmcs12)
Wincy Van3af18d92015-02-03 23:49:31 +080010993{
Wincy Van82f0dd42015-02-03 23:57:18 +080010994 int msr;
Wincy Vanf2b93282015-02-03 23:56:03 +080010995 struct page *page;
Radim Krčmářd048c092016-08-08 20:16:22 +020010996 unsigned long *msr_bitmap_l1;
Paolo Bonzini904e14f2018-01-16 16:51:18 +010010997 unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap;
Ashok Raj15d45072018-02-01 22:59:43 +010010998 /*
KarimAllah Ahmedd28b3872018-02-01 22:59:45 +010010999 * pred_cmd & spec_ctrl are trying to verify two things:
Ashok Raj15d45072018-02-01 22:59:43 +010011000 *
11001 * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
11002 * ensures that we do not accidentally generate an L02 MSR bitmap
11003 * from the L12 MSR bitmap that is too permissive.
11004 * 2. That L1 or L2s have actually used the MSR. This avoids
11005 * unnecessarily merging of the bitmap if the MSR is unused. This
11006 * works properly because we only update the L01 MSR bitmap lazily.
11007 * So even if L0 should pass L1 these MSRs, the L01 bitmap is only
11008 * updated to reflect this when L1 (or its L2s) actually write to
11009 * the MSR.
11010 */
KarimAllah Ahmed206587a2018-02-10 23:39:25 +000011011 bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
11012 bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
Wincy Vanf2b93282015-02-03 23:56:03 +080011013
Paolo Bonzinic9923842017-12-13 14:16:30 +010011014 /* Nothing to do if the MSR bitmap is not in use. */
11015 if (!cpu_has_vmx_msr_bitmap() ||
11016 !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
11017 return false;
11018
Ashok Raj15d45072018-02-01 22:59:43 +010011019 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
KarimAllah Ahmedd28b3872018-02-01 22:59:45 +010011020 !pred_cmd && !spec_ctrl)
Wincy Vanf2b93282015-02-03 23:56:03 +080011021 return false;
11022
David Hildenbrand5e2f30b2017-08-03 18:11:04 +020011023 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->msr_bitmap);
11024 if (is_error_page(page))
Wincy Vanf2b93282015-02-03 23:56:03 +080011025 return false;
Paolo Bonzinic9923842017-12-13 14:16:30 +010011026
Radim Krčmářd048c092016-08-08 20:16:22 +020011027 msr_bitmap_l1 = (unsigned long *)kmap(page);
Paolo Bonzinic9923842017-12-13 14:16:30 +010011028 if (nested_cpu_has_apic_reg_virt(vmcs12)) {
11029 /*
11030 * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it
11031 * just lets the processor take the value from the virtual-APIC page;
11032 * take those 256 bits directly from the L1 bitmap.
11033 */
11034 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
11035 unsigned word = msr / BITS_PER_LONG;
11036 msr_bitmap_l0[word] = msr_bitmap_l1[word];
11037 msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
Wincy Van608406e2015-02-03 23:57:51 +080011038 }
Paolo Bonzinic9923842017-12-13 14:16:30 +010011039 } else {
11040 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
11041 unsigned word = msr / BITS_PER_LONG;
11042 msr_bitmap_l0[word] = ~0;
11043 msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
11044 }
11045 }
11046
11047 nested_vmx_disable_intercept_for_msr(
11048 msr_bitmap_l1, msr_bitmap_l0,
Paolo Bonzinid7231e72017-12-21 00:47:55 +010011049 X2APIC_MSR(APIC_TASKPRI),
Paolo Bonzinic9923842017-12-13 14:16:30 +010011050 MSR_TYPE_W);
11051
11052 if (nested_cpu_has_vid(vmcs12)) {
11053 nested_vmx_disable_intercept_for_msr(
11054 msr_bitmap_l1, msr_bitmap_l0,
Paolo Bonzinid7231e72017-12-21 00:47:55 +010011055 X2APIC_MSR(APIC_EOI),
Paolo Bonzinic9923842017-12-13 14:16:30 +010011056 MSR_TYPE_W);
11057 nested_vmx_disable_intercept_for_msr(
11058 msr_bitmap_l1, msr_bitmap_l0,
Paolo Bonzinid7231e72017-12-21 00:47:55 +010011059 X2APIC_MSR(APIC_SELF_IPI),
Paolo Bonzinic9923842017-12-13 14:16:30 +010011060 MSR_TYPE_W);
Wincy Van82f0dd42015-02-03 23:57:18 +080011061 }
Ashok Raj15d45072018-02-01 22:59:43 +010011062
KarimAllah Ahmedd28b3872018-02-01 22:59:45 +010011063 if (spec_ctrl)
11064 nested_vmx_disable_intercept_for_msr(
11065 msr_bitmap_l1, msr_bitmap_l0,
11066 MSR_IA32_SPEC_CTRL,
11067 MSR_TYPE_R | MSR_TYPE_W);
11068
Ashok Raj15d45072018-02-01 22:59:43 +010011069 if (pred_cmd)
11070 nested_vmx_disable_intercept_for_msr(
11071 msr_bitmap_l1, msr_bitmap_l0,
11072 MSR_IA32_PRED_CMD,
11073 MSR_TYPE_W);
11074
Wincy Vanf2b93282015-02-03 23:56:03 +080011075 kunmap(page);
David Hildenbrand53a70da2017-08-03 18:11:05 +020011076 kvm_release_page_clean(page);
Wincy Vanf2b93282015-02-03 23:56:03 +080011077
11078 return true;
11079}
11080
Krish Sadhukhanf0f4cf52018-04-11 01:10:16 -040011081static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
11082 struct vmcs12 *vmcs12)
11083{
11084 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
11085 !page_address_valid(vcpu, vmcs12->apic_access_addr))
11086 return -EINVAL;
11087 else
11088 return 0;
11089}
11090
Wincy Vanf2b93282015-02-03 23:56:03 +080011091static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
11092 struct vmcs12 *vmcs12)
11093{
Wincy Van82f0dd42015-02-03 23:57:18 +080011094 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
Wincy Van608406e2015-02-03 23:57:51 +080011095 !nested_cpu_has_apic_reg_virt(vmcs12) &&
Wincy Van705699a2015-02-03 23:58:17 +080011096 !nested_cpu_has_vid(vmcs12) &&
11097 !nested_cpu_has_posted_intr(vmcs12))
Wincy Vanf2b93282015-02-03 23:56:03 +080011098 return 0;
11099
11100 /*
11101 * If virtualize x2apic mode is enabled,
11102 * virtualize apic access must be disabled.
11103 */
Wincy Van82f0dd42015-02-03 23:57:18 +080011104 if (nested_cpu_has_virt_x2apic_mode(vmcs12) &&
11105 nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
Wincy Vanf2b93282015-02-03 23:56:03 +080011106 return -EINVAL;
11107
Wincy Van608406e2015-02-03 23:57:51 +080011108 /*
11109 * If virtual interrupt delivery is enabled,
11110 * we must exit on external interrupts.
11111 */
11112 if (nested_cpu_has_vid(vmcs12) &&
11113 !nested_exit_on_intr(vcpu))
11114 return -EINVAL;
11115
Wincy Van705699a2015-02-03 23:58:17 +080011116 /*
11117 * bits 15:8 should be zero in posted_intr_nv,
11118 * the descriptor address has been already checked
11119 * in nested_get_vmcs12_pages.
11120 */
11121 if (nested_cpu_has_posted_intr(vmcs12) &&
11122 (!nested_cpu_has_vid(vmcs12) ||
11123 !nested_exit_intr_ack_set(vcpu) ||
11124 vmcs12->posted_intr_nv & 0xff00))
11125 return -EINVAL;
11126
Wincy Vanf2b93282015-02-03 23:56:03 +080011127 /* tpr shadow is needed by all apicv features. */
11128 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
11129 return -EINVAL;
11130
11131 return 0;
Wincy Van3af18d92015-02-03 23:49:31 +080011132}
11133
Eugene Korenevskye9ac0332014-12-11 08:53:27 +030011134static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
11135 unsigned long count_field,
Eugene Korenevsky92d71bc2015-03-29 23:56:44 +030011136 unsigned long addr_field)
Wincy Vanff651cb2014-12-11 08:52:58 +030011137{
Eugene Korenevsky92d71bc2015-03-29 23:56:44 +030011138 int maxphyaddr;
Eugene Korenevskye9ac0332014-12-11 08:53:27 +030011139 u64 count, addr;
11140
11141 if (vmcs12_read_any(vcpu, count_field, &count) ||
11142 vmcs12_read_any(vcpu, addr_field, &addr)) {
11143 WARN_ON(1);
11144 return -EINVAL;
11145 }
11146 if (count == 0)
11147 return 0;
Eugene Korenevsky92d71bc2015-03-29 23:56:44 +030011148 maxphyaddr = cpuid_maxphyaddr(vcpu);
Eugene Korenevskye9ac0332014-12-11 08:53:27 +030011149 if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
11150 (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) {
Paolo Bonzinibbe41b92016-08-19 17:51:20 +020011151 pr_debug_ratelimited(
Eugene Korenevskye9ac0332014-12-11 08:53:27 +030011152 "nVMX: invalid MSR switch (0x%lx, %d, %llu, 0x%08llx)",
11153 addr_field, maxphyaddr, count, addr);
11154 return -EINVAL;
11155 }
11156 return 0;
11157}
11158
11159static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
11160 struct vmcs12 *vmcs12)
11161{
Eugene Korenevskye9ac0332014-12-11 08:53:27 +030011162 if (vmcs12->vm_exit_msr_load_count == 0 &&
11163 vmcs12->vm_exit_msr_store_count == 0 &&
11164 vmcs12->vm_entry_msr_load_count == 0)
11165 return 0; /* Fast path */
Eugene Korenevskye9ac0332014-12-11 08:53:27 +030011166 if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT,
Eugene Korenevsky92d71bc2015-03-29 23:56:44 +030011167 VM_EXIT_MSR_LOAD_ADDR) ||
Eugene Korenevskye9ac0332014-12-11 08:53:27 +030011168 nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT,
Eugene Korenevsky92d71bc2015-03-29 23:56:44 +030011169 VM_EXIT_MSR_STORE_ADDR) ||
Eugene Korenevskye9ac0332014-12-11 08:53:27 +030011170 nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT,
Eugene Korenevsky92d71bc2015-03-29 23:56:44 +030011171 VM_ENTRY_MSR_LOAD_ADDR))
Wincy Vanff651cb2014-12-11 08:52:58 +030011172 return -EINVAL;
11173 return 0;
11174}
11175
Bandan Dasc5f983f2017-05-05 15:25:14 -040011176static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
11177 struct vmcs12 *vmcs12)
11178{
11179 u64 address = vmcs12->pml_address;
11180 int maxphyaddr = cpuid_maxphyaddr(vcpu);
11181
11182 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML)) {
11183 if (!nested_cpu_has_ept(vmcs12) ||
11184 !IS_ALIGNED(address, 4096) ||
11185 address >> maxphyaddr)
11186 return -EINVAL;
11187 }
11188
11189 return 0;
11190}
11191
Eugene Korenevskye9ac0332014-12-11 08:53:27 +030011192static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
11193 struct vmx_msr_entry *e)
11194{
11195 /* x2APIC MSR accesses are not allowed */
Jan Kiszka8a9781f2015-05-04 08:32:32 +020011196 if (vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)
Eugene Korenevskye9ac0332014-12-11 08:53:27 +030011197 return -EINVAL;
11198 if (e->index == MSR_IA32_UCODE_WRITE || /* SDM Table 35-2 */
11199 e->index == MSR_IA32_UCODE_REV)
11200 return -EINVAL;
11201 if (e->reserved != 0)
11202 return -EINVAL;
11203 return 0;
11204}
11205
11206static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
11207 struct vmx_msr_entry *e)
Wincy Vanff651cb2014-12-11 08:52:58 +030011208{
11209 if (e->index == MSR_FS_BASE ||
11210 e->index == MSR_GS_BASE ||
Eugene Korenevskye9ac0332014-12-11 08:53:27 +030011211 e->index == MSR_IA32_SMM_MONITOR_CTL || /* SMM is not supported */
11212 nested_vmx_msr_check_common(vcpu, e))
11213 return -EINVAL;
11214 return 0;
11215}
11216
11217static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
11218 struct vmx_msr_entry *e)
11219{
11220 if (e->index == MSR_IA32_SMBASE || /* SMM is not supported */
11221 nested_vmx_msr_check_common(vcpu, e))
Wincy Vanff651cb2014-12-11 08:52:58 +030011222 return -EINVAL;
11223 return 0;
11224}
11225
11226/*
11227 * Load guest's/host's msr at nested entry/exit.
11228 * return 0 for success, entry index for failure.
11229 */
11230static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
11231{
11232 u32 i;
11233 struct vmx_msr_entry e;
11234 struct msr_data msr;
11235
11236 msr.host_initiated = false;
11237 for (i = 0; i < count; i++) {
Paolo Bonzini54bf36a2015-04-08 15:39:23 +020011238 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
11239 &e, sizeof(e))) {
Paolo Bonzinibbe41b92016-08-19 17:51:20 +020011240 pr_debug_ratelimited(
Eugene Korenevskye9ac0332014-12-11 08:53:27 +030011241 "%s cannot read MSR entry (%u, 0x%08llx)\n",
11242 __func__, i, gpa + i * sizeof(e));
Wincy Vanff651cb2014-12-11 08:52:58 +030011243 goto fail;
Eugene Korenevskye9ac0332014-12-11 08:53:27 +030011244 }
11245 if (nested_vmx_load_msr_check(vcpu, &e)) {
Paolo Bonzinibbe41b92016-08-19 17:51:20 +020011246 pr_debug_ratelimited(
Eugene Korenevskye9ac0332014-12-11 08:53:27 +030011247 "%s check failed (%u, 0x%x, 0x%x)\n",
11248 __func__, i, e.index, e.reserved);
11249 goto fail;
11250 }
Wincy Vanff651cb2014-12-11 08:52:58 +030011251 msr.index = e.index;
11252 msr.data = e.value;
Eugene Korenevskye9ac0332014-12-11 08:53:27 +030011253 if (kvm_set_msr(vcpu, &msr)) {
Paolo Bonzinibbe41b92016-08-19 17:51:20 +020011254 pr_debug_ratelimited(
Eugene Korenevskye9ac0332014-12-11 08:53:27 +030011255 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
11256 __func__, i, e.index, e.value);
Wincy Vanff651cb2014-12-11 08:52:58 +030011257 goto fail;
Eugene Korenevskye9ac0332014-12-11 08:53:27 +030011258 }
Wincy Vanff651cb2014-12-11 08:52:58 +030011259 }
11260 return 0;
11261fail:
11262 return i + 1;
11263}
11264
11265static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
11266{
11267 u32 i;
11268 struct vmx_msr_entry e;
11269
11270 for (i = 0; i < count; i++) {
Paolo Bonzini609e36d2015-04-08 15:30:38 +020011271 struct msr_data msr_info;
Paolo Bonzini54bf36a2015-04-08 15:39:23 +020011272 if (kvm_vcpu_read_guest(vcpu,
11273 gpa + i * sizeof(e),
11274 &e, 2 * sizeof(u32))) {
Paolo Bonzinibbe41b92016-08-19 17:51:20 +020011275 pr_debug_ratelimited(
Eugene Korenevskye9ac0332014-12-11 08:53:27 +030011276 "%s cannot read MSR entry (%u, 0x%08llx)\n",
11277 __func__, i, gpa + i * sizeof(e));
Wincy Vanff651cb2014-12-11 08:52:58 +030011278 return -EINVAL;
Eugene Korenevskye9ac0332014-12-11 08:53:27 +030011279 }
11280 if (nested_vmx_store_msr_check(vcpu, &e)) {
Paolo Bonzinibbe41b92016-08-19 17:51:20 +020011281 pr_debug_ratelimited(
Eugene Korenevskye9ac0332014-12-11 08:53:27 +030011282 "%s check failed (%u, 0x%x, 0x%x)\n",
11283 __func__, i, e.index, e.reserved);
Wincy Vanff651cb2014-12-11 08:52:58 +030011284 return -EINVAL;
Eugene Korenevskye9ac0332014-12-11 08:53:27 +030011285 }
Paolo Bonzini609e36d2015-04-08 15:30:38 +020011286 msr_info.host_initiated = false;
11287 msr_info.index = e.index;
11288 if (kvm_get_msr(vcpu, &msr_info)) {
Paolo Bonzinibbe41b92016-08-19 17:51:20 +020011289 pr_debug_ratelimited(
Eugene Korenevskye9ac0332014-12-11 08:53:27 +030011290 "%s cannot read MSR (%u, 0x%x)\n",
11291 __func__, i, e.index);
11292 return -EINVAL;
11293 }
Paolo Bonzini54bf36a2015-04-08 15:39:23 +020011294 if (kvm_vcpu_write_guest(vcpu,
11295 gpa + i * sizeof(e) +
11296 offsetof(struct vmx_msr_entry, value),
11297 &msr_info.data, sizeof(msr_info.data))) {
Paolo Bonzinibbe41b92016-08-19 17:51:20 +020011298 pr_debug_ratelimited(
Eugene Korenevskye9ac0332014-12-11 08:53:27 +030011299 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
Paolo Bonzini609e36d2015-04-08 15:30:38 +020011300 __func__, i, e.index, msr_info.data);
Eugene Korenevskye9ac0332014-12-11 08:53:27 +030011301 return -EINVAL;
11302 }
Wincy Vanff651cb2014-12-11 08:52:58 +030011303 }
11304 return 0;
11305}
11306
Ladi Prosek1dc35da2016-11-30 16:03:11 +010011307static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val)
11308{
11309 unsigned long invalid_mask;
11310
11311 invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
11312 return (val & invalid_mask) == 0;
11313}
11314
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011315/*
Ladi Prosek9ed38ffa2016-11-30 16:03:10 +010011316 * Load guest's/host's cr3 at nested entry/exit. nested_ept is true if we are
11317 * emulating VM entry into a guest with EPT enabled.
11318 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
11319 * is assigned to entry_failure_code on failure.
11320 */
11321static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept,
Jim Mattsonca0bde22016-11-30 12:03:46 -080011322 u32 *entry_failure_code)
Ladi Prosek9ed38ffa2016-11-30 16:03:10 +010011323{
Ladi Prosek9ed38ffa2016-11-30 16:03:10 +010011324 if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) {
Ladi Prosek1dc35da2016-11-30 16:03:11 +010011325 if (!nested_cr3_valid(vcpu, cr3)) {
Ladi Prosek9ed38ffa2016-11-30 16:03:10 +010011326 *entry_failure_code = ENTRY_FAIL_DEFAULT;
11327 return 1;
11328 }
11329
11330 /*
11331 * If PAE paging and EPT are both on, CR3 is not used by the CPU and
11332 * must not be dereferenced.
11333 */
11334 if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu) &&
11335 !nested_ept) {
11336 if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) {
11337 *entry_failure_code = ENTRY_FAIL_PDPTE;
11338 return 1;
11339 }
11340 }
11341
11342 vcpu->arch.cr3 = cr3;
11343 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
11344 }
11345
11346 kvm_mmu_reset_context(vcpu);
11347 return 0;
11348}
11349
Jim Mattson6514dc32018-04-26 16:09:12 -070011350static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Paolo Bonzini74a497f2017-12-20 13:55:39 +010011351{
Paolo Bonzini8665c3f2017-12-20 13:56:53 +010011352 struct vcpu_vmx *vmx = to_vmx(vcpu);
11353
11354 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
11355 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
11356 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
11357 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
11358 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
11359 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
11360 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
11361 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
11362 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
11363 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
11364 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
11365 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
11366 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
11367 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
11368 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
11369 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
11370 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
11371 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
11372 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
11373 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
11374 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
11375 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
11376 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
11377 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
11378 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
11379 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
11380 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
11381 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
11382 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
11383 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
11384 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
Paolo Bonzini25a2e4f2017-12-20 14:05:21 +010011385
11386 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
11387 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
11388 vmcs12->guest_pending_dbg_exceptions);
11389 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
11390 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
11391
11392 if (nested_cpu_has_xsaves(vmcs12))
11393 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
11394 vmcs_write64(VMCS_LINK_POINTER, -1ull);
11395
11396 if (cpu_has_vmx_posted_intr())
11397 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
11398
11399 /*
11400 * Whether page-faults are trapped is determined by a combination of
11401 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
11402 * If enable_ept, L0 doesn't care about page faults and we should
11403 * set all of these to L1's desires. However, if !enable_ept, L0 does
11404 * care about (at least some) page faults, and because it is not easy
11405 * (if at all possible?) to merge L0 and L1's desires, we simply ask
11406 * to exit on each and every L2 page fault. This is done by setting
11407 * MASK=MATCH=0 and (see below) EB.PF=1.
11408 * Note that below we don't need special code to set EB.PF beyond the
11409 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
11410 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
11411 * !enable_ept, EB.PF is 1, so the "or" will always be 1.
11412 */
11413 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
11414 enable_ept ? vmcs12->page_fault_error_code_mask : 0);
11415 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
11416 enable_ept ? vmcs12->page_fault_error_code_match : 0);
11417
11418 /* All VMFUNCs are currently emulated through L0 vmexits. */
11419 if (cpu_has_vmx_vmfunc())
11420 vmcs_write64(VM_FUNCTION_CONTROL, 0);
11421
11422 if (cpu_has_vmx_apicv()) {
11423 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
11424 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1);
11425 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2);
11426 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
11427 }
11428
11429 /*
11430 * Set host-state according to L0's settings (vmcs12 is irrelevant here)
11431 * Some constant fields are set here by vmx_set_constant_host_state().
11432 * Other fields are different per CPU, and will be set later when
11433 * vmx_vcpu_load() is called, and when vmx_save_host_state() is called.
11434 */
11435 vmx_set_constant_host_state(vmx);
11436
11437 /*
11438 * Set the MSR load/store lists to match L0's settings.
11439 */
11440 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
Konrad Rzeszutek Wilk33966dd62018-06-20 13:58:37 -040011441 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
11442 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
11443 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
11444 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
Paolo Bonzini25a2e4f2017-12-20 14:05:21 +010011445
11446 set_cr4_guest_host_mask(vmx);
11447
11448 if (vmx_mpx_supported())
11449 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
11450
11451 if (enable_vpid) {
11452 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
11453 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
11454 else
11455 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
11456 }
11457
11458 /*
11459 * L1 may access the L2's PDPTR, so save them to construct vmcs12
11460 */
11461 if (enable_ept) {
11462 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
11463 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
11464 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
11465 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
11466 }
Radim Krčmář80132f42018-02-02 18:26:58 +010011467
11468 if (cpu_has_vmx_msr_bitmap())
11469 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
Paolo Bonzini74a497f2017-12-20 13:55:39 +010011470}
11471
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011472/*
11473 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
11474 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
Tiejun Chenb4619662014-09-22 10:31:38 +080011475 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011476 * guest in a way that will both be appropriate to L1's requests, and our
11477 * needs. In addition to modifying the active vmcs (which is vmcs02), this
11478 * function also has additional necessary side-effects, like setting various
11479 * vcpu->arch fields.
Ladi Prosekee146c12016-11-30 16:03:09 +010011480 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
11481 * is assigned to entry_failure_code on failure.
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011482 */
Ladi Prosekee146c12016-11-30 16:03:09 +010011483static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
Jim Mattson6514dc32018-04-26 16:09:12 -070011484 u32 *entry_failure_code)
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011485{
11486 struct vcpu_vmx *vmx = to_vmx(vcpu);
Bandan Das03efce62017-05-05 15:25:15 -040011487 u32 exec_control, vmcs12_exec_ctrl;
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011488
Sean Christopherson9d1887e2018-03-05 09:33:27 -080011489 if (vmx->nested.dirty_vmcs12) {
Jim Mattson6514dc32018-04-26 16:09:12 -070011490 prepare_vmcs02_full(vcpu, vmcs12);
Sean Christopherson9d1887e2018-03-05 09:33:27 -080011491 vmx->nested.dirty_vmcs12 = false;
11492 }
11493
Paolo Bonzini8665c3f2017-12-20 13:56:53 +010011494 /*
11495 * First, the fields that are shadowed. This must be kept in sync
11496 * with vmx_shadow_fields.h.
11497 */
11498
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011499 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011500 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011501 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011502 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
11503 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
Paolo Bonzini8665c3f2017-12-20 13:56:53 +010011504
11505 /*
11506 * Not in vmcs02: GUEST_PML_INDEX, HOST_FS_SELECTOR, HOST_GS_SELECTOR,
11507 * HOST_FS_BASE, HOST_GS_BASE.
11508 */
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011509
Jim Mattson6514dc32018-04-26 16:09:12 -070011510 if (vmx->nested.nested_run_pending &&
Jim Mattsoncf8b84f2016-11-30 12:03:42 -080011511 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
Jan Kiszka2996fca2014-06-16 13:59:43 +020011512 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
11513 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
11514 } else {
11515 kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
11516 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
11517 }
Jim Mattson6514dc32018-04-26 16:09:12 -070011518 if (vmx->nested.nested_run_pending) {
Jim Mattsoncf8b84f2016-11-30 12:03:42 -080011519 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
11520 vmcs12->vm_entry_intr_info_field);
11521 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
11522 vmcs12->vm_entry_exception_error_code);
11523 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
11524 vmcs12->vm_entry_instruction_len);
11525 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
11526 vmcs12->guest_interruptibility_info);
Wanpeng Li2d6144e2017-07-25 03:40:46 -070011527 vmx->loaded_vmcs->nmi_known_unmasked =
11528 !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
Jim Mattsoncf8b84f2016-11-30 12:03:42 -080011529 } else {
11530 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
11531 }
Gleb Natapov63fbf592013-07-28 18:31:06 +030011532 vmx_set_rflags(vcpu, vmcs12->guest_rflags);
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011533
Jan Kiszkaf4124502014-03-07 20:03:13 +010011534 exec_control = vmcs12->pin_based_vm_exec_control;
Wincy Van705699a2015-02-03 23:58:17 +080011535
Paolo Bonzini93140062016-07-06 13:23:51 +020011536 /* Preemption timer setting is only taken from vmcs01. */
11537 exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
11538 exec_control |= vmcs_config.pin_based_exec_ctrl;
11539 if (vmx->hv_deadline_tsc == -1)
11540 exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
11541
11542 /* Posted interrupts setting is only taken from vmcs12. */
Wincy Van705699a2015-02-03 23:58:17 +080011543 if (nested_cpu_has_posted_intr(vmcs12)) {
Wincy Van705699a2015-02-03 23:58:17 +080011544 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
11545 vmx->nested.pi_pending = false;
Jim Mattson6beb7bd2016-11-30 12:03:45 -080011546 } else {
Wincy Van705699a2015-02-03 23:58:17 +080011547 exec_control &= ~PIN_BASED_POSTED_INTR;
Jim Mattson6beb7bd2016-11-30 12:03:45 -080011548 }
Wincy Van705699a2015-02-03 23:58:17 +080011549
Jan Kiszkaf4124502014-03-07 20:03:13 +010011550 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control);
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011551
Jan Kiszkaf4124502014-03-07 20:03:13 +010011552 vmx->nested.preemption_timer_expired = false;
11553 if (nested_cpu_has_preemption_timer(vmcs12))
11554 vmx_start_preemption_timer(vcpu);
Jan Kiszka0238ea92013-03-13 11:31:24 +010011555
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011556 if (cpu_has_secondary_exec_ctrls()) {
Paolo Bonzini80154d72017-08-24 13:55:35 +020011557 exec_control = vmx->secondary_exec_control;
Xiao Guangronge2821622015-09-09 14:05:52 +080011558
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011559 /* Take the following fields only from vmcs12 */
Paolo Bonzini696dfd92014-05-07 11:20:54 +020011560 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
Paolo Bonzini90a2db62017-07-27 13:22:13 +020011561 SECONDARY_EXEC_ENABLE_INVPCID |
Jan Kiszkab3a2a902015-03-23 19:27:19 +010011562 SECONDARY_EXEC_RDTSCP |
Paolo Bonzini3db13482017-08-24 14:48:03 +020011563 SECONDARY_EXEC_XSAVES |
Paolo Bonzini696dfd92014-05-07 11:20:54 +020011564 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
Bandan Das27c42a12017-08-03 15:54:42 -040011565 SECONDARY_EXEC_APIC_REGISTER_VIRT |
11566 SECONDARY_EXEC_ENABLE_VMFUNC);
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011567 if (nested_cpu_has(vmcs12,
Bandan Das03efce62017-05-05 15:25:15 -040011568 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) {
11569 vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control &
11570 ~SECONDARY_EXEC_ENABLE_PML;
11571 exec_control |= vmcs12_exec_ctrl;
11572 }
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011573
Paolo Bonzini25a2e4f2017-12-20 14:05:21 +010011574 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
Wincy Van608406e2015-02-03 23:57:51 +080011575 vmcs_write16(GUEST_INTR_STATUS,
11576 vmcs12->guest_intr_status);
Wincy Van608406e2015-02-03 23:57:51 +080011577
Jim Mattson6beb7bd2016-11-30 12:03:45 -080011578 /*
11579 * Write an illegal value to APIC_ACCESS_ADDR. Later,
11580 * nested_get_vmcs12_pages will either fix it up or
11581 * remove the VM execution control.
11582 */
11583 if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
11584 vmcs_write64(APIC_ACCESS_ADDR, -1ull);
11585
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011586 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
11587 }
11588
Jim Mattson83bafef2016-10-04 10:48:38 -070011589 /*
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011590 * HOST_RSP is normally set correctly in vmx_vcpu_run() just before
11591 * entry, but only if the current (host) sp changed from the value
11592 * we wrote last (vmx->host_rsp). This cache is no longer relevant
11593 * if we switch vmcs, and rather than hold a separate cache per vmcs,
11594 * here we just force the write to happen on entry.
11595 */
11596 vmx->host_rsp = 0;
11597
11598 exec_control = vmx_exec_control(vmx); /* L0's desires */
11599 exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
11600 exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
11601 exec_control &= ~CPU_BASED_TPR_SHADOW;
11602 exec_control |= vmcs12->cpu_based_vm_exec_control;
Wanpeng Lia7c0b072014-08-21 19:46:50 +080011603
Jim Mattson6beb7bd2016-11-30 12:03:45 -080011604 /*
11605 * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR. Later, if
11606 * nested_get_vmcs12_pages can't fix it up, the illegal value
11607 * will result in a VM entry failure.
11608 */
Wanpeng Lia7c0b072014-08-21 19:46:50 +080011609 if (exec_control & CPU_BASED_TPR_SHADOW) {
Jim Mattson6beb7bd2016-11-30 12:03:45 -080011610 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull);
Wanpeng Lia7c0b072014-08-21 19:46:50 +080011611 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
Jim Mattson51aa68e2017-09-12 13:02:54 -070011612 } else {
11613#ifdef CONFIG_X86_64
11614 exec_control |= CPU_BASED_CR8_LOAD_EXITING |
11615 CPU_BASED_CR8_STORE_EXITING;
11616#endif
Wanpeng Lia7c0b072014-08-21 19:46:50 +080011617 }
11618
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011619 /*
Quan Xu8eb73e22017-12-12 16:44:21 +080011620 * A vmexit (to either L1 hypervisor or L0 userspace) is always needed
11621 * for I/O port accesses.
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011622 */
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011623 exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
11624 exec_control |= CPU_BASED_UNCOND_IO_EXITING;
11625
11626 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
11627
11628 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
11629 * bitwise-or of what L1 wants to trap for L2, and what we want to
11630 * trap. Note that CR0.TS also needs updating - we do this later.
11631 */
11632 update_exception_bitmap(vcpu);
11633 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
11634 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
11635
Nadav Har'El8049d652013-08-05 11:07:06 +030011636 /* L2->L1 exit controls are emulated - the hardware exit is to L0 so
11637 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
11638 * bits are further modified by vmx_set_efer() below.
11639 */
Jan Kiszkaf4124502014-03-07 20:03:13 +010011640 vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
Nadav Har'El8049d652013-08-05 11:07:06 +030011641
11642 /* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are
11643 * emulated by vmx_set_efer(), below.
11644 */
Gleb Natapov2961e8762013-11-25 15:37:13 +020011645 vm_entry_controls_init(vmx,
Nadav Har'El8049d652013-08-05 11:07:06 +030011646 (vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER &
11647 ~VM_ENTRY_IA32E_MODE) |
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011648 (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
11649
Jim Mattson6514dc32018-04-26 16:09:12 -070011650 if (vmx->nested.nested_run_pending &&
Jim Mattsoncf8b84f2016-11-30 12:03:42 -080011651 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) {
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011652 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
Jan Kiszka44811c02013-08-04 17:17:27 +020011653 vcpu->arch.pat = vmcs12->guest_ia32_pat;
Jim Mattsoncf8b84f2016-11-30 12:03:42 -080011654 } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011655 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
Jim Mattsoncf8b84f2016-11-30 12:03:42 -080011656 }
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011657
KarimAllah Ahmede79f2452018-04-14 05:10:52 +020011658 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
11659
Peter Feinerc95ba922016-08-17 09:36:47 -070011660 if (kvm_has_tsc_control)
11661 decache_tsc_multiplier(vmx);
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011662
11663 if (enable_vpid) {
11664 /*
Wanpeng Li5c614b32015-10-13 09:18:36 -070011665 * There is no direct mapping between vpid02 and vpid12, the
11666 * vpid02 is per-vCPU for L0 and reused while the value of
11667 * vpid12 is changed w/ one invvpid during nested vmentry.
11668 * The vpid12 is allocated by L1 for L2, so it will not
11669 * influence global bitmap(for vpid01 and vpid02 allocation)
11670 * even if spawn a lot of nested vCPUs.
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011671 */
Wanpeng Li5c614b32015-10-13 09:18:36 -070011672 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) {
Wanpeng Li5c614b32015-10-13 09:18:36 -070011673 if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
11674 vmx->nested.last_vpid = vmcs12->virtual_processor_id;
Liran Alon6bce30c2018-05-22 17:16:12 +030011675 __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true);
Wanpeng Li5c614b32015-10-13 09:18:36 -070011676 }
11677 } else {
Wanpeng Lic2ba05c2017-12-12 17:33:03 -080011678 vmx_flush_tlb(vcpu, true);
Wanpeng Li5c614b32015-10-13 09:18:36 -070011679 }
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011680 }
11681
Ladi Prosek1fb883b2017-04-04 14:18:53 +020011682 if (enable_pml) {
11683 /*
11684 * Conceptually we want to copy the PML address and index from
11685 * vmcs01 here, and then back to vmcs01 on nested vmexit. But,
11686 * since we always flush the log on each vmexit, this happens
11687 * to be equivalent to simply resetting the fields in vmcs02.
11688 */
11689 ASSERT(vmx->pml_pg);
11690 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
11691 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
11692 }
11693
Nadav Har'El155a97a2013-08-05 11:07:16 +030011694 if (nested_cpu_has_ept(vmcs12)) {
Paolo Bonziniae1e2d12017-03-30 11:55:30 +020011695 if (nested_ept_init_mmu_context(vcpu)) {
11696 *entry_failure_code = ENTRY_FAIL_DEFAULT;
11697 return 1;
11698 }
Jim Mattsonfb6c8192017-03-16 13:53:59 -070011699 } else if (nested_cpu_has2(vmcs12,
11700 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
Junaid Shahida468f2d2018-04-26 13:09:50 -070011701 vmx_flush_tlb(vcpu, true);
Nadav Har'El155a97a2013-08-05 11:07:16 +030011702 }
11703
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011704 /*
Paolo Bonzinibd7e5b02017-02-03 21:18:52 -080011705 * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
11706 * bits which we consider mandatory enabled.
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011707 * The CR0_READ_SHADOW is what L2 should have expected to read given
11708 * the specifications by L1; It's not enough to take
11709 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
11710 * have more bits than L1 expected.
11711 */
11712 vmx_set_cr0(vcpu, vmcs12->guest_cr0);
11713 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
11714
11715 vmx_set_cr4(vcpu, vmcs12->guest_cr4);
11716 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
11717
Jim Mattson6514dc32018-04-26 16:09:12 -070011718 if (vmx->nested.nested_run_pending &&
Jim Mattsoncf8b84f2016-11-30 12:03:42 -080011719 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
David Matlack5a6a9742016-11-29 18:14:10 -080011720 vcpu->arch.efer = vmcs12->guest_ia32_efer;
11721 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
11722 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
11723 else
11724 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
11725 /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
11726 vmx_set_efer(vcpu, vcpu->arch.efer);
11727
Sean Christopherson2bb8caf2018-03-12 10:56:13 -070011728 /*
11729 * Guest state is invalid and unrestricted guest is disabled,
11730 * which means L1 attempted VMEntry to L2 with invalid state.
11731 * Fail the VMEntry.
11732 */
Paolo Bonzini3184a992018-03-21 14:20:18 +010011733 if (vmx->emulation_required) {
11734 *entry_failure_code = ENTRY_FAIL_DEFAULT;
Sean Christopherson2bb8caf2018-03-12 10:56:13 -070011735 return 1;
Paolo Bonzini3184a992018-03-21 14:20:18 +010011736 }
Sean Christopherson2bb8caf2018-03-12 10:56:13 -070011737
Ladi Prosek9ed38ffa2016-11-30 16:03:10 +010011738 /* Shadow page tables on either EPT or shadow page tables. */
Ladi Prosek7ad658b2017-03-23 07:18:08 +010011739 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
Ladi Prosek9ed38ffa2016-11-30 16:03:10 +010011740 entry_failure_code))
11741 return 1;
Ladi Prosek7ca29de2016-11-30 16:03:08 +010011742
Gleb Natapovfeaf0c7d2013-09-25 12:51:36 +030011743 if (!enable_ept)
11744 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
11745
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011746 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
11747 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
Ladi Prosekee146c12016-11-30 16:03:09 +010011748 return 0;
Nadav Har'Elfe3ef052011-05-25 23:10:02 +030011749}
11750
Krish Sadhukhan0c7f6502018-02-20 21:24:39 -050011751static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
11752{
11753 if (!nested_cpu_has_nmi_exiting(vmcs12) &&
11754 nested_cpu_has_virtual_nmis(vmcs12))
11755 return -EINVAL;
11756
11757 if (!nested_cpu_has_virtual_nmis(vmcs12) &&
11758 nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING))
11759 return -EINVAL;
11760
11761 return 0;
11762}
11763
Jim Mattsonca0bde22016-11-30 12:03:46 -080011764static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
11765{
11766 struct vcpu_vmx *vmx = to_vmx(vcpu);
11767
11768 if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
11769 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
11770 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
11771
Jim Mattson56a20512017-07-06 16:33:06 -070011772 if (nested_vmx_check_io_bitmap_controls(vcpu, vmcs12))
11773 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
11774
Jim Mattsonca0bde22016-11-30 12:03:46 -080011775 if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12))
11776 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
11777
Krish Sadhukhanf0f4cf52018-04-11 01:10:16 -040011778 if (nested_vmx_check_apic_access_controls(vcpu, vmcs12))
11779 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
11780
Jim Mattson712b12d2017-08-24 13:24:47 -070011781 if (nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12))
11782 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
11783
Jim Mattsonca0bde22016-11-30 12:03:46 -080011784 if (nested_vmx_check_apicv_controls(vcpu, vmcs12))
11785 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
11786
11787 if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12))
11788 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
11789
Bandan Dasc5f983f2017-05-05 15:25:14 -040011790 if (nested_vmx_check_pml_controls(vcpu, vmcs12))
11791 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
11792
Jim Mattsonca0bde22016-11-30 12:03:46 -080011793 if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
Paolo Bonzini6677f3d2018-02-26 13:40:08 +010011794 vmx->nested.msrs.procbased_ctls_low,
11795 vmx->nested.msrs.procbased_ctls_high) ||
Jim Mattson2e5b0bd2017-05-04 11:51:58 -070011796 (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
11797 !vmx_control_verify(vmcs12->secondary_vm_exec_control,
Paolo Bonzini6677f3d2018-02-26 13:40:08 +010011798 vmx->nested.msrs.secondary_ctls_low,
11799 vmx->nested.msrs.secondary_ctls_high)) ||
Jim Mattsonca0bde22016-11-30 12:03:46 -080011800 !vmx_control_verify(vmcs12->pin_based_vm_exec_control,
Paolo Bonzini6677f3d2018-02-26 13:40:08 +010011801 vmx->nested.msrs.pinbased_ctls_low,
11802 vmx->nested.msrs.pinbased_ctls_high) ||
Jim Mattsonca0bde22016-11-30 12:03:46 -080011803 !vmx_control_verify(vmcs12->vm_exit_controls,
Paolo Bonzini6677f3d2018-02-26 13:40:08 +010011804 vmx->nested.msrs.exit_ctls_low,
11805 vmx->nested.msrs.exit_ctls_high) ||
Jim Mattsonca0bde22016-11-30 12:03:46 -080011806 !vmx_control_verify(vmcs12->vm_entry_controls,
Paolo Bonzini6677f3d2018-02-26 13:40:08 +010011807 vmx->nested.msrs.entry_ctls_low,
11808 vmx->nested.msrs.entry_ctls_high))
Jim Mattsonca0bde22016-11-30 12:03:46 -080011809 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
11810
Krish Sadhukhan0c7f6502018-02-20 21:24:39 -050011811 if (nested_vmx_check_nmi_controls(vmcs12))
Jim Mattsonca0bde22016-11-30 12:03:46 -080011812 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
11813
Bandan Das41ab9372017-08-03 15:54:43 -040011814 if (nested_cpu_has_vmfunc(vmcs12)) {
11815 if (vmcs12->vm_function_control &
Paolo Bonzini6677f3d2018-02-26 13:40:08 +010011816 ~vmx->nested.msrs.vmfunc_controls)
Bandan Das41ab9372017-08-03 15:54:43 -040011817 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
11818
11819 if (nested_cpu_has_eptp_switching(vmcs12)) {
11820 if (!nested_cpu_has_ept(vmcs12) ||
11821 !page_address_valid(vcpu, vmcs12->eptp_list_address))
11822 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
11823 }
11824 }
Bandan Das27c42a12017-08-03 15:54:42 -040011825
Jim Mattsonc7c2c702017-05-05 11:28:09 -070011826 if (vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu))
11827 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
11828
Jim Mattsonca0bde22016-11-30 12:03:46 -080011829 if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) ||
11830 !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) ||
11831 !nested_cr3_valid(vcpu, vmcs12->host_cr3))
11832 return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
11833
11834 return 0;
11835}
11836
11837static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
11838 u32 *exit_qual)
11839{
11840 bool ia32e;
11841
11842 *exit_qual = ENTRY_FAIL_DEFAULT;
11843
11844 if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) ||
11845 !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4))
11846 return 1;
11847
11848 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_SHADOW_VMCS) &&
11849 vmcs12->vmcs_link_pointer != -1ull) {
11850 *exit_qual = ENTRY_FAIL_VMCS_LINK_PTR;
11851 return 1;
11852 }
11853
11854 /*
11855 * If the load IA32_EFER VM-entry control is 1, the following checks
11856 * are performed on the field for the IA32_EFER MSR:
11857 * - Bits reserved in the IA32_EFER MSR must be 0.
11858 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
11859 * the IA-32e mode guest VM-exit control. It must also be identical
11860 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
11861 * CR0.PG) is 1.
11862 */
11863 if (to_vmx(vcpu)->nested.nested_run_pending &&
11864 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
11865 ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
11866 if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) ||
11867 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
11868 ((vmcs12->guest_cr0 & X86_CR0_PG) &&
11869 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME)))
11870 return 1;
11871 }
11872
11873 /*
11874 * If the load IA32_EFER VM-exit control is 1, bits reserved in the
11875 * IA32_EFER MSR must be 0 in the field for that register. In addition,
11876 * the values of the LMA and LME bits in the field must each be that of
11877 * the host address-space size VM-exit control.
11878 */
11879 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
11880 ia32e = (vmcs12->vm_exit_controls &
11881 VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
11882 if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
11883 ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
11884 ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))
11885 return 1;
11886 }
11887
Wanpeng Lif1b026a2017-11-05 16:54:48 -080011888 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
11889 (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
11890 (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
11891 return 1;
11892
Jim Mattsonca0bde22016-11-30 12:03:46 -080011893 return 0;
11894}
11895
Jim Mattson6514dc32018-04-26 16:09:12 -070011896static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
Jim Mattson858e25c2016-11-30 12:03:47 -080011897{
11898 struct vcpu_vmx *vmx = to_vmx(vcpu);
11899 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
Jim Mattson858e25c2016-11-30 12:03:47 -080011900 u32 msr_entry_idx;
11901 u32 exit_qual;
KarimAllah Ahmede79f2452018-04-14 05:10:52 +020011902 int r;
Jim Mattson858e25c2016-11-30 12:03:47 -080011903
Jim Mattson858e25c2016-11-30 12:03:47 -080011904 enter_guest_mode(vcpu);
11905
11906 if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
11907 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
11908
Jim Mattsonde3a0022017-11-27 17:22:25 -060011909 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
Jim Mattson858e25c2016-11-30 12:03:47 -080011910 vmx_segment_cache_clear(vmx);
11911
KarimAllah Ahmede79f2452018-04-14 05:10:52 +020011912 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
11913 vcpu->arch.tsc_offset += vmcs12->tsc_offset;
11914
11915 r = EXIT_REASON_INVALID_STATE;
Jim Mattson6514dc32018-04-26 16:09:12 -070011916 if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
KarimAllah Ahmede79f2452018-04-14 05:10:52 +020011917 goto fail;
Jim Mattson858e25c2016-11-30 12:03:47 -080011918
11919 nested_get_vmcs12_pages(vcpu, vmcs12);
11920
KarimAllah Ahmede79f2452018-04-14 05:10:52 +020011921 r = EXIT_REASON_MSR_LOAD_FAIL;
Jim Mattson858e25c2016-11-30 12:03:47 -080011922 msr_entry_idx = nested_vmx_load_msr(vcpu,
11923 vmcs12->vm_entry_msr_load_addr,
11924 vmcs12->vm_entry_msr_load_count);
KarimAllah Ahmede79f2452018-04-14 05:10:52 +020011925 if (msr_entry_idx)
11926 goto fail;
Jim Mattson858e25c2016-11-30 12:03:47 -080011927
Jim Mattson858e25c2016-11-30 12:03:47 -080011928 /*
11929 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
11930 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
11931 * returned as far as L1 is concerned. It will only return (and set
11932 * the success flag) when L2 exits (see nested_vmx_vmexit()).
11933 */
11934 return 0;
KarimAllah Ahmede79f2452018-04-14 05:10:52 +020011935
11936fail:
11937 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
11938 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
11939 leave_guest_mode(vcpu);
11940 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
11941 nested_vmx_entry_failure(vcpu, vmcs12, r, exit_qual);
11942 return 1;
Jim Mattson858e25c2016-11-30 12:03:47 -080011943}
11944
Nadav Har'Elcd232ad2011-05-25 23:10:33 +030011945/*
11946 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
11947 * for running an L2 nested guest.
11948 */
11949static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
11950{
11951 struct vmcs12 *vmcs12;
11952 struct vcpu_vmx *vmx = to_vmx(vcpu);
Jim Mattsonb3f1dfb2017-07-17 12:00:34 -070011953 u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
Jim Mattsonca0bde22016-11-30 12:03:46 -080011954 u32 exit_qual;
11955 int ret;
Nadav Har'Elcd232ad2011-05-25 23:10:33 +030011956
Kyle Hueyeb277562016-11-29 12:40:39 -080011957 if (!nested_vmx_check_permission(vcpu))
Nadav Har'Elcd232ad2011-05-25 23:10:33 +030011958 return 1;
11959
Kyle Hueyeb277562016-11-29 12:40:39 -080011960 if (!nested_vmx_check_vmcs12(vcpu))
11961 goto out;
11962
Nadav Har'Elcd232ad2011-05-25 23:10:33 +030011963 vmcs12 = get_vmcs12(vcpu);
11964
Abel Gordon012f83c2013-04-18 14:39:25 +030011965 if (enable_shadow_vmcs)
11966 copy_shadow_to_vmcs12(vmx);
11967
Nadav Har'El7c177932011-05-25 23:12:04 +030011968 /*
11969 * The nested entry process starts with enforcing various prerequisites
11970 * on vmcs12 as required by the Intel SDM, and act appropriately when
11971 * they fail: As the SDM explains, some conditions should cause the
11972 * instruction to fail, while others will cause the instruction to seem
11973 * to succeed, but return an EXIT_REASON_INVALID_STATE.
11974 * To speed up the normal (success) code path, we should avoid checking
11975 * for misconfigurations which will anyway be caught by the processor
11976 * when using the merged vmcs02.
11977 */
Jim Mattsonb3f1dfb2017-07-17 12:00:34 -070011978 if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS) {
11979 nested_vmx_failValid(vcpu,
11980 VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
11981 goto out;
11982 }
11983
Nadav Har'El7c177932011-05-25 23:12:04 +030011984 if (vmcs12->launch_state == launch) {
11985 nested_vmx_failValid(vcpu,
11986 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
11987 : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
Kyle Hueyeb277562016-11-29 12:40:39 -080011988 goto out;
Nadav Har'El7c177932011-05-25 23:12:04 +030011989 }
11990
Jim Mattsonca0bde22016-11-30 12:03:46 -080011991 ret = check_vmentry_prereqs(vcpu, vmcs12);
11992 if (ret) {
11993 nested_vmx_failValid(vcpu, ret);
Kyle Hueyeb277562016-11-29 12:40:39 -080011994 goto out;
Paolo Bonzini26539bd2013-04-15 15:00:27 +020011995 }
11996
Nadav Har'El7c177932011-05-25 23:12:04 +030011997 /*
Jim Mattsonca0bde22016-11-30 12:03:46 -080011998 * After this point, the trap flag no longer triggers a singlestep trap
11999 * on the vm entry instructions; don't call kvm_skip_emulated_instruction.
12000 * This is not 100% correct; for performance reasons, we delegate most
12001 * of the checks on host state to the processor. If those fail,
12002 * the singlestep trap is missed.
Jan Kiszka384bb782013-04-20 10:52:36 +020012003 */
Jim Mattsonca0bde22016-11-30 12:03:46 -080012004 skip_emulated_instruction(vcpu);
Jan Kiszka384bb782013-04-20 10:52:36 +020012005
Jim Mattsonca0bde22016-11-30 12:03:46 -080012006 ret = check_vmentry_postreqs(vcpu, vmcs12, &exit_qual);
12007 if (ret) {
12008 nested_vmx_entry_failure(vcpu, vmcs12,
12009 EXIT_REASON_INVALID_STATE, exit_qual);
12010 return 1;
Jan Kiszka384bb782013-04-20 10:52:36 +020012011 }
12012
12013 /*
Nadav Har'El7c177932011-05-25 23:12:04 +030012014 * We're finally done with prerequisite checking, and can start with
12015 * the nested entry.
12016 */
12017
Jim Mattson6514dc32018-04-26 16:09:12 -070012018 vmx->nested.nested_run_pending = 1;
12019 ret = enter_vmx_non_root_mode(vcpu);
12020 if (ret) {
12021 vmx->nested.nested_run_pending = 0;
Jim Mattson858e25c2016-11-30 12:03:47 -080012022 return ret;
Jim Mattson6514dc32018-04-26 16:09:12 -070012023 }
Wincy Vanff651cb2014-12-11 08:52:58 +030012024
Paolo Bonzinic595cee2018-07-02 13:07:14 +020012025 /* Hide L1D cache contents from the nested guest. */
12026 vmx->vcpu.arch.l1tf_flush_l1d = true;
12027
Chao Gao135a06c2018-02-11 10:06:30 +080012028 /*
12029 * If we're entering a halted L2 vcpu and the L2 vcpu won't be woken
12030 * by event injection, halt vcpu.
12031 */
12032 if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) &&
Jim Mattson6514dc32018-04-26 16:09:12 -070012033 !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK)) {
12034 vmx->nested.nested_run_pending = 0;
Joel Schopp5cb56052015-03-02 13:43:31 -060012035 return kvm_vcpu_halt(vcpu);
Jim Mattson6514dc32018-04-26 16:09:12 -070012036 }
Nadav Har'Elcd232ad2011-05-25 23:10:33 +030012037 return 1;
Kyle Hueyeb277562016-11-29 12:40:39 -080012038
12039out:
Kyle Huey6affcbe2016-11-29 12:40:40 -080012040 return kvm_skip_emulated_instruction(vcpu);
Nadav Har'Elcd232ad2011-05-25 23:10:33 +030012041}
12042
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012043/*
12044 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
12045 * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK).
12046 * This function returns the new value we should put in vmcs12.guest_cr0.
12047 * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
12048 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
12049 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
12050 * didn't trap the bit, because if L1 did, so would L0).
12051 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have
12052 * been modified by L2, and L1 knows it. So just leave the old value of
12053 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
12054 * isn't relevant, because if L0 traps this bit it can set it to anything.
12055 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
12056 * changed these bits, and therefore they need to be updated, but L0
12057 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather
12058 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
12059 */
12060static inline unsigned long
12061vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
12062{
12063 return
12064 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
12065 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
12066 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
12067 vcpu->arch.cr0_guest_owned_bits));
12068}
12069
12070static inline unsigned long
12071vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
12072{
12073 return
12074 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
12075 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
12076 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
12077 vcpu->arch.cr4_guest_owned_bits));
12078}
12079
Jan Kiszka5f3d5792013-04-14 12:12:46 +020012080static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
12081 struct vmcs12 *vmcs12)
12082{
12083 u32 idt_vectoring;
12084 unsigned int nr;
12085
Wanpeng Li664f8e22017-08-24 03:35:09 -070012086 if (vcpu->arch.exception.injected) {
Jan Kiszka5f3d5792013-04-14 12:12:46 +020012087 nr = vcpu->arch.exception.nr;
12088 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
12089
12090 if (kvm_exception_is_soft(nr)) {
12091 vmcs12->vm_exit_instruction_len =
12092 vcpu->arch.event_exit_inst_len;
12093 idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION;
12094 } else
12095 idt_vectoring |= INTR_TYPE_HARD_EXCEPTION;
12096
12097 if (vcpu->arch.exception.has_error_code) {
12098 idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK;
12099 vmcs12->idt_vectoring_error_code =
12100 vcpu->arch.exception.error_code;
12101 }
12102
12103 vmcs12->idt_vectoring_info_field = idt_vectoring;
Jan Kiszkacd2633c2013-10-23 17:42:15 +010012104 } else if (vcpu->arch.nmi_injected) {
Jan Kiszka5f3d5792013-04-14 12:12:46 +020012105 vmcs12->idt_vectoring_info_field =
12106 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
Liran Alon04140b42018-03-23 03:01:31 +030012107 } else if (vcpu->arch.interrupt.injected) {
Jan Kiszka5f3d5792013-04-14 12:12:46 +020012108 nr = vcpu->arch.interrupt.nr;
12109 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
12110
12111 if (vcpu->arch.interrupt.soft) {
12112 idt_vectoring |= INTR_TYPE_SOFT_INTR;
12113 vmcs12->vm_entry_instruction_len =
12114 vcpu->arch.event_exit_inst_len;
12115 } else
12116 idt_vectoring |= INTR_TYPE_EXT_INTR;
12117
12118 vmcs12->idt_vectoring_info_field = idt_vectoring;
12119 }
12120}
12121
Jan Kiszkab6b8a142014-03-07 20:03:12 +010012122static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
12123{
12124 struct vcpu_vmx *vmx = to_vmx(vcpu);
Wanpeng Libfcf83b2017-08-24 03:35:11 -070012125 unsigned long exit_qual;
Liran Alon917dc602017-11-05 16:07:43 +020012126 bool block_nested_events =
12127 vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
Wanpeng Liacc9ab62017-02-27 04:24:39 -080012128
Wanpeng Libfcf83b2017-08-24 03:35:11 -070012129 if (vcpu->arch.exception.pending &&
12130 nested_vmx_check_exception(vcpu, &exit_qual)) {
Liran Alon917dc602017-11-05 16:07:43 +020012131 if (block_nested_events)
Wanpeng Libfcf83b2017-08-24 03:35:11 -070012132 return -EBUSY;
12133 nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
Wanpeng Libfcf83b2017-08-24 03:35:11 -070012134 return 0;
12135 }
12136
Jan Kiszkaf4124502014-03-07 20:03:13 +010012137 if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
12138 vmx->nested.preemption_timer_expired) {
Liran Alon917dc602017-11-05 16:07:43 +020012139 if (block_nested_events)
Jan Kiszkaf4124502014-03-07 20:03:13 +010012140 return -EBUSY;
12141 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
12142 return 0;
12143 }
12144
Jan Kiszkab6b8a142014-03-07 20:03:12 +010012145 if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
Liran Alon917dc602017-11-05 16:07:43 +020012146 if (block_nested_events)
Jan Kiszkab6b8a142014-03-07 20:03:12 +010012147 return -EBUSY;
12148 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
12149 NMI_VECTOR | INTR_TYPE_NMI_INTR |
12150 INTR_INFO_VALID_MASK, 0);
12151 /*
12152 * The NMI-triggered VM exit counts as injection:
12153 * clear this one and block further NMIs.
12154 */
12155 vcpu->arch.nmi_pending = 0;
12156 vmx_set_nmi_mask(vcpu, true);
12157 return 0;
12158 }
12159
12160 if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
12161 nested_exit_on_intr(vcpu)) {
Liran Alon917dc602017-11-05 16:07:43 +020012162 if (block_nested_events)
Jan Kiszkab6b8a142014-03-07 20:03:12 +010012163 return -EBUSY;
12164 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
Wincy Van705699a2015-02-03 23:58:17 +080012165 return 0;
Jan Kiszkab6b8a142014-03-07 20:03:12 +010012166 }
12167
David Hildenbrand6342c502017-01-25 11:58:58 +010012168 vmx_complete_nested_posted_interrupt(vcpu);
12169 return 0;
Jan Kiszkab6b8a142014-03-07 20:03:12 +010012170}
12171
Jan Kiszkaf4124502014-03-07 20:03:13 +010012172static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
12173{
12174 ktime_t remaining =
12175 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer);
12176 u64 value;
12177
12178 if (ktime_to_ns(remaining) <= 0)
12179 return 0;
12180
12181 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz;
12182 do_div(value, 1000000);
12183 return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
12184}
12185
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012186/*
Jim Mattsoncf8b84f2016-11-30 12:03:42 -080012187 * Update the guest state fields of vmcs12 to reflect changes that
12188 * occurred while L2 was running. (The "IA-32e mode guest" bit of the
12189 * VM-entry controls is also updated, since this is really a guest
12190 * state bit.)
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012191 */
Jim Mattsoncf8b84f2016-11-30 12:03:42 -080012192static void sync_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012193{
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012194 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
12195 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
12196
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012197 vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
12198 vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP);
12199 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
12200
12201 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
12202 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
12203 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
12204 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
12205 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
12206 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
12207 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
12208 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
12209 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
12210 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
12211 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
12212 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
12213 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
12214 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
12215 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
12216 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
12217 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
12218 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
12219 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
12220 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
12221 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
12222 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
12223 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
12224 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
12225 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
12226 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
12227 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
12228 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
12229 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
12230 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
12231 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
12232 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
12233 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
12234 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
12235 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
12236 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
12237
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012238 vmcs12->guest_interruptibility_info =
12239 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
12240 vmcs12->guest_pending_dbg_exceptions =
12241 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
Jan Kiszka3edf1e62014-01-04 18:47:24 +010012242 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
12243 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT;
12244 else
12245 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012246
Jan Kiszkaf4124502014-03-07 20:03:13 +010012247 if (nested_cpu_has_preemption_timer(vmcs12)) {
12248 if (vmcs12->vm_exit_controls &
12249 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
12250 vmcs12->vmx_preemption_timer_value =
12251 vmx_get_preemption_timer_value(vcpu);
12252 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
12253 }
Arthur Chunqi Li7854cbc2013-09-16 16:11:44 +080012254
Nadav Har'El3633cfc2013-08-05 11:07:07 +030012255 /*
12256 * In some cases (usually, nested EPT), L2 is allowed to change its
12257 * own CR3 without exiting. If it has changed it, we must keep it.
12258 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
12259 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
12260 *
12261 * Additionally, restore L2's PDPTR to vmcs12.
12262 */
12263 if (enable_ept) {
Paolo Bonzinif3531052015-12-03 15:49:56 +010012264 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3);
Nadav Har'El3633cfc2013-08-05 11:07:07 +030012265 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
12266 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
12267 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
12268 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
12269 }
12270
Jim Mattsond281e132017-06-01 12:44:46 -070012271 vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
Jan Dakinevich119a9c02016-09-04 21:22:47 +030012272
Wincy Van608406e2015-02-03 23:57:51 +080012273 if (nested_cpu_has_vid(vmcs12))
12274 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
12275
Jan Kiszkac18911a2013-03-13 16:06:41 +010012276 vmcs12->vm_entry_controls =
12277 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
Gleb Natapov2961e8762013-11-25 15:37:13 +020012278 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
Jan Kiszkac18911a2013-03-13 16:06:41 +010012279
Jan Kiszka2996fca2014-06-16 13:59:43 +020012280 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) {
12281 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
12282 vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
12283 }
12284
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012285 /* TODO: These cannot have changed unless we have MSR bitmaps and
12286 * the relevant bit asks not to trap the change */
Jan Kiszkab8c07d52013-04-06 13:51:21 +020012287 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012288 vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT);
Jan Kiszka10ba54a2013-08-08 16:26:31 +020012289 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
12290 vmcs12->guest_ia32_efer = vcpu->arch.efer;
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012291 vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
12292 vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
12293 vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
Paolo Bonzinia87036a2016-03-08 09:52:13 +010012294 if (kvm_mpx_supported())
Paolo Bonzini36be0b92014-02-24 12:30:04 +010012295 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
Jim Mattsoncf8b84f2016-11-30 12:03:42 -080012296}
12297
12298/*
12299 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
12300 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
12301 * and this function updates it to reflect the changes to the guest state while
12302 * L2 was running (and perhaps made some exits which were handled directly by L0
12303 * without going back to L1), and to reflect the exit reason.
12304 * Note that we do not have to copy here all VMCS fields, just those that
12305 * could have changed by the L2 guest or the exit - i.e., the guest-state and
12306 * exit-information fields only. Other fields are modified by L1 with VMWRITE,
12307 * which already writes to vmcs12 directly.
12308 */
12309static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
12310 u32 exit_reason, u32 exit_intr_info,
12311 unsigned long exit_qualification)
12312{
12313 /* update guest state fields: */
12314 sync_vmcs12(vcpu, vmcs12);
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012315
12316 /* update exit information fields: */
12317
Jan Kiszka533558b2014-01-04 18:47:20 +010012318 vmcs12->vm_exit_reason = exit_reason;
12319 vmcs12->exit_qualification = exit_qualification;
Jan Kiszka533558b2014-01-04 18:47:20 +010012320 vmcs12->vm_exit_intr_info = exit_intr_info;
Paolo Bonzini7313c692017-07-27 10:31:25 +020012321
Jan Kiszka5f3d5792013-04-14 12:12:46 +020012322 vmcs12->idt_vectoring_info_field = 0;
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012323 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
12324 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
12325
Jan Kiszka5f3d5792013-04-14 12:12:46 +020012326 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
Jim Mattson7cdc2d62017-07-06 16:33:05 -070012327 vmcs12->launch_state = 1;
12328
Jan Kiszka5f3d5792013-04-14 12:12:46 +020012329 /* vm_entry_intr_info_field is cleared on exit. Emulate this
12330 * instead of reading the real value. */
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012331 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
Jan Kiszka5f3d5792013-04-14 12:12:46 +020012332
12333 /*
12334 * Transfer the event that L0 or L1 may wanted to inject into
12335 * L2 to IDT_VECTORING_INFO_FIELD.
12336 */
12337 vmcs12_save_pending_event(vcpu, vmcs12);
12338 }
12339
12340 /*
12341 * Drop what we picked up for L2 via vmx_complete_interrupts. It is
12342 * preserved above and would only end up incorrectly in L1.
12343 */
12344 vcpu->arch.nmi_injected = false;
12345 kvm_clear_exception_queue(vcpu);
12346 kvm_clear_interrupt_queue(vcpu);
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012347}
12348
Wanpeng Li5af41572017-11-05 16:54:49 -080012349static void load_vmcs12_mmu_host_state(struct kvm_vcpu *vcpu,
12350 struct vmcs12 *vmcs12)
12351{
12352 u32 entry_failure_code;
12353
12354 nested_ept_uninit_mmu_context(vcpu);
12355
12356 /*
12357 * Only PDPTE load can fail as the value of cr3 was checked on entry and
12358 * couldn't have changed.
12359 */
12360 if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
12361 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
12362
12363 if (!enable_ept)
12364 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
12365}
12366
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012367/*
12368 * A part of what we need to when the nested L2 guest exits and we want to
12369 * run its L1 parent, is to reset L1's guest state to the host state specified
12370 * in vmcs12.
12371 * This function is to be called not only on normal nested exit, but also on
12372 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
12373 * Failures During or After Loading Guest State").
12374 * This function should be called when the active VMCS is L1's (vmcs01).
12375 */
Jan Kiszka733568f2013-02-23 15:07:47 +010012376static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
12377 struct vmcs12 *vmcs12)
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012378{
Arthur Chunqi Li21feb4e2013-07-15 16:04:08 +080012379 struct kvm_segment seg;
12380
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012381 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
12382 vcpu->arch.efer = vmcs12->host_ia32_efer;
Jan Kiszkad1fa0352013-04-14 12:44:54 +020012383 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012384 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
12385 else
12386 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
12387 vmx_set_efer(vcpu, vcpu->arch.efer);
12388
12389 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp);
12390 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip);
H. Peter Anvin1adfa762013-04-27 16:10:11 -070012391 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012392 /*
12393 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
Paolo Bonzinibd7e5b02017-02-03 21:18:52 -080012394 * actually changed, because vmx_set_cr0 refers to efer set above.
12395 *
12396 * CR0_GUEST_HOST_MASK is already set in the original vmcs01
12397 * (KVM doesn't change it);
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012398 */
Paolo Bonzinibd7e5b02017-02-03 21:18:52 -080012399 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
Jan Kiszka9e3e4db2013-09-03 21:11:45 +020012400 vmx_set_cr0(vcpu, vmcs12->host_cr0);
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012401
Paolo Bonzinibd7e5b02017-02-03 21:18:52 -080012402 /* Same as above - no reason to call set_cr4_guest_host_mask(). */
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012403 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
Haozhong Zhang8eb3f872017-10-10 15:01:22 +080012404 vmx_set_cr4(vcpu, vmcs12->host_cr4);
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012405
Wanpeng Li5af41572017-11-05 16:54:49 -080012406 load_vmcs12_mmu_host_state(vcpu, vmcs12);
Gleb Natapovfeaf0c7d2013-09-25 12:51:36 +030012407
Liran Alon6f1e03b2018-05-22 17:16:14 +030012408 /*
12409 * If vmcs01 don't use VPID, CPU flushes TLB on every
12410 * VMEntry/VMExit. Thus, no need to flush TLB.
12411 *
12412 * If vmcs12 uses VPID, TLB entries populated by L2 are
12413 * tagged with vmx->nested.vpid02 while L1 entries are tagged
12414 * with vmx->vpid. Thus, no need to flush TLB.
12415 *
12416 * Therefore, flush TLB only in case vmcs01 uses VPID and
12417 * vmcs12 don't use VPID as in this case L1 & L2 TLB entries
12418 * are both tagged with vmx->vpid.
12419 */
12420 if (enable_vpid &&
12421 !(nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02)) {
Wanpeng Lic2ba05c2017-12-12 17:33:03 -080012422 vmx_flush_tlb(vcpu, true);
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012423 }
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012424
12425 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
12426 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
12427 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
12428 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
12429 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
Ladi Prosek21f2d552017-10-11 16:54:42 +020012430 vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
12431 vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012432
Paolo Bonzini36be0b92014-02-24 12:30:04 +010012433 /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
12434 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
12435 vmcs_write64(GUEST_BNDCFGS, 0);
12436
Jan Kiszka44811c02013-08-04 17:17:27 +020012437 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012438 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
Jan Kiszka44811c02013-08-04 17:17:27 +020012439 vcpu->arch.pat = vmcs12->host_ia32_pat;
12440 }
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012441 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
12442 vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL,
12443 vmcs12->host_ia32_perf_global_ctrl);
Jan Kiszka503cd0c2013-03-03 13:05:44 +010012444
Arthur Chunqi Li21feb4e2013-07-15 16:04:08 +080012445 /* Set L1 segment info according to Intel SDM
12446 27.5.2 Loading Host Segment and Descriptor-Table Registers */
12447 seg = (struct kvm_segment) {
12448 .base = 0,
12449 .limit = 0xFFFFFFFF,
12450 .selector = vmcs12->host_cs_selector,
12451 .type = 11,
12452 .present = 1,
12453 .s = 1,
12454 .g = 1
12455 };
12456 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
12457 seg.l = 1;
12458 else
12459 seg.db = 1;
12460 vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
12461 seg = (struct kvm_segment) {
12462 .base = 0,
12463 .limit = 0xFFFFFFFF,
12464 .type = 3,
12465 .present = 1,
12466 .s = 1,
12467 .db = 1,
12468 .g = 1
12469 };
12470 seg.selector = vmcs12->host_ds_selector;
12471 vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
12472 seg.selector = vmcs12->host_es_selector;
12473 vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
12474 seg.selector = vmcs12->host_ss_selector;
12475 vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
12476 seg.selector = vmcs12->host_fs_selector;
12477 seg.base = vmcs12->host_fs_base;
12478 vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
12479 seg.selector = vmcs12->host_gs_selector;
12480 seg.base = vmcs12->host_gs_base;
12481 vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
12482 seg = (struct kvm_segment) {
Gleb Natapov205befd2013-08-04 15:08:06 +030012483 .base = vmcs12->host_tr_base,
Arthur Chunqi Li21feb4e2013-07-15 16:04:08 +080012484 .limit = 0x67,
12485 .selector = vmcs12->host_tr_selector,
12486 .type = 11,
12487 .present = 1
12488 };
12489 vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
12490
Jan Kiszka503cd0c2013-03-03 13:05:44 +010012491 kvm_set_dr(vcpu, 7, 0x400);
12492 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
Wincy Vanff651cb2014-12-11 08:52:58 +030012493
Wincy Van3af18d92015-02-03 23:49:31 +080012494 if (cpu_has_vmx_msr_bitmap())
Paolo Bonzini904e14f2018-01-16 16:51:18 +010012495 vmx_update_msr_bitmap(vcpu);
Wincy Van3af18d92015-02-03 23:49:31 +080012496
Wincy Vanff651cb2014-12-11 08:52:58 +030012497 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
12498 vmcs12->vm_exit_msr_load_count))
12499 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012500}
12501
12502/*
12503 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
12504 * and modify vmcs12 to make it see what it would expect to see there if
12505 * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
12506 */
Jan Kiszka533558b2014-01-04 18:47:20 +010012507static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
12508 u32 exit_intr_info,
12509 unsigned long exit_qualification)
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012510{
12511 struct vcpu_vmx *vmx = to_vmx(vcpu);
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012512 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
12513
Jan Kiszka5f3d5792013-04-14 12:12:46 +020012514 /* trying to cancel vmlaunch/vmresume is a bug */
12515 WARN_ON_ONCE(vmx->nested.nested_run_pending);
12516
Wanpeng Li6550c4d2017-07-31 19:25:27 -070012517 /*
Jim Mattson4f350c62017-09-14 16:31:44 -070012518 * The only expected VM-instruction error is "VM entry with
12519 * invalid control field(s)." Anything else indicates a
12520 * problem with L0.
Wanpeng Li6550c4d2017-07-31 19:25:27 -070012521 */
Jim Mattson4f350c62017-09-14 16:31:44 -070012522 WARN_ON_ONCE(vmx->fail && (vmcs_read32(VM_INSTRUCTION_ERROR) !=
12523 VMXERR_ENTRY_INVALID_CONTROL_FIELD));
12524
12525 leave_guest_mode(vcpu);
12526
KarimAllah Ahmede79f2452018-04-14 05:10:52 +020012527 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
12528 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
12529
Jim Mattson4f350c62017-09-14 16:31:44 -070012530 if (likely(!vmx->fail)) {
Ladi Prosek72e9cbd2017-10-11 16:54:43 +020012531 if (exit_reason == -1)
12532 sync_vmcs12(vcpu, vmcs12);
12533 else
12534 prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
12535 exit_qualification);
Jim Mattson4f350c62017-09-14 16:31:44 -070012536
12537 if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr,
12538 vmcs12->vm_exit_msr_store_count))
12539 nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL);
Bandan Das77b0f5d2014-04-19 18:17:45 -040012540 }
12541
Jim Mattson4f350c62017-09-14 16:31:44 -070012542 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
Paolo Bonzini8391ce42016-07-07 14:58:33 +020012543 vm_entry_controls_reset_shadow(vmx);
12544 vm_exit_controls_reset_shadow(vmx);
Jan Kiszka36c3cc42013-02-23 22:35:37 +010012545 vmx_segment_cache_clear(vmx);
12546
Paolo Bonzini93140062016-07-06 13:23:51 +020012547 /* Update any VMCS fields that might have changed while L2 ran */
Konrad Rzeszutek Wilk33966dd62018-06-20 13:58:37 -040012548 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
12549 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
Paolo Bonziniea26e4e2016-11-01 00:39:48 +010012550 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
Paolo Bonzini93140062016-07-06 13:23:51 +020012551 if (vmx->hv_deadline_tsc == -1)
12552 vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
12553 PIN_BASED_VMX_PREEMPTION_TIMER);
12554 else
12555 vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
12556 PIN_BASED_VMX_PREEMPTION_TIMER);
Peter Feinerc95ba922016-08-17 09:36:47 -070012557 if (kvm_has_tsc_control)
12558 decache_tsc_multiplier(vmx);
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012559
Jim Mattson8d860bb2018-05-09 16:56:05 -040012560 if (vmx->nested.change_vmcs01_virtual_apic_mode) {
12561 vmx->nested.change_vmcs01_virtual_apic_mode = false;
12562 vmx_set_virtual_apic_mode(vcpu);
Jim Mattsonfb6c8192017-03-16 13:53:59 -070012563 } else if (!nested_cpu_has_ept(vmcs12) &&
12564 nested_cpu_has2(vmcs12,
12565 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
Junaid Shahida468f2d2018-04-26 13:09:50 -070012566 vmx_flush_tlb(vcpu, true);
Radim Krčmářdccbfcf2016-08-08 20:16:23 +020012567 }
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012568
12569 /* This is needed for same reason as it was needed in prepare_vmcs02 */
12570 vmx->host_rsp = 0;
12571
12572 /* Unpin physical memory we referred to in vmcs02 */
12573 if (vmx->nested.apic_access_page) {
David Hildenbrand53a70da2017-08-03 18:11:05 +020012574 kvm_release_page_dirty(vmx->nested.apic_access_page);
Paolo Bonzini48d89b92014-08-26 13:27:46 +020012575 vmx->nested.apic_access_page = NULL;
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012576 }
Wanpeng Lia7c0b072014-08-21 19:46:50 +080012577 if (vmx->nested.virtual_apic_page) {
David Hildenbrand53a70da2017-08-03 18:11:05 +020012578 kvm_release_page_dirty(vmx->nested.virtual_apic_page);
Paolo Bonzini48d89b92014-08-26 13:27:46 +020012579 vmx->nested.virtual_apic_page = NULL;
Wanpeng Lia7c0b072014-08-21 19:46:50 +080012580 }
Wincy Van705699a2015-02-03 23:58:17 +080012581 if (vmx->nested.pi_desc_page) {
12582 kunmap(vmx->nested.pi_desc_page);
David Hildenbrand53a70da2017-08-03 18:11:05 +020012583 kvm_release_page_dirty(vmx->nested.pi_desc_page);
Wincy Van705699a2015-02-03 23:58:17 +080012584 vmx->nested.pi_desc_page = NULL;
12585 vmx->nested.pi_desc = NULL;
12586 }
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012587
12588 /*
Tang Chen38b99172014-09-24 15:57:54 +080012589 * We are now running in L2, mmu_notifier will force to reload the
12590 * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1.
12591 */
Wanpeng Lic83b6d12016-09-06 17:20:33 +080012592 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
Tang Chen38b99172014-09-24 15:57:54 +080012593
Ladi Prosek72e9cbd2017-10-11 16:54:43 +020012594 if (enable_shadow_vmcs && exit_reason != -1)
Abel Gordon012f83c2013-04-18 14:39:25 +030012595 vmx->nested.sync_shadow_vmcs = true;
Jan Kiszkab6b8a142014-03-07 20:03:12 +010012596
12597 /* in case we halted in L2 */
12598 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
Jim Mattson4f350c62017-09-14 16:31:44 -070012599
12600 if (likely(!vmx->fail)) {
12601 /*
12602 * TODO: SDM says that with acknowledge interrupt on
12603 * exit, bit 31 of the VM-exit interrupt information
12604 * (valid interrupt) is always set to 1 on
12605 * EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't
12606 * need kvm_cpu_has_interrupt(). See the commit
12607 * message for details.
12608 */
12609 if (nested_exit_intr_ack_set(vcpu) &&
12610 exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
12611 kvm_cpu_has_interrupt(vcpu)) {
12612 int irq = kvm_cpu_get_interrupt(vcpu);
12613 WARN_ON(irq < 0);
12614 vmcs12->vm_exit_intr_info = irq |
12615 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
12616 }
12617
Ladi Prosek72e9cbd2017-10-11 16:54:43 +020012618 if (exit_reason != -1)
12619 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
12620 vmcs12->exit_qualification,
12621 vmcs12->idt_vectoring_info_field,
12622 vmcs12->vm_exit_intr_info,
12623 vmcs12->vm_exit_intr_error_code,
12624 KVM_ISA_VMX);
Jim Mattson4f350c62017-09-14 16:31:44 -070012625
12626 load_vmcs12_host_state(vcpu, vmcs12);
12627
12628 return;
12629 }
12630
12631 /*
12632 * After an early L2 VM-entry failure, we're now back
12633 * in L1 which thinks it just finished a VMLAUNCH or
12634 * VMRESUME instruction, so we need to set the failure
12635 * flag and the VM-instruction error field of the VMCS
12636 * accordingly.
12637 */
12638 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
Wanpeng Li5af41572017-11-05 16:54:49 -080012639
12640 load_vmcs12_mmu_host_state(vcpu, vmcs12);
12641
Jim Mattson4f350c62017-09-14 16:31:44 -070012642 /*
12643 * The emulated instruction was already skipped in
12644 * nested_vmx_run, but the updated RIP was never
12645 * written back to the vmcs01.
12646 */
12647 skip_emulated_instruction(vcpu);
12648 vmx->fail = 0;
Nadav Har'El4704d0b2011-05-25 23:11:34 +030012649}
12650
Nadav Har'El7c177932011-05-25 23:12:04 +030012651/*
Jan Kiszka42124922014-01-04 18:47:19 +010012652 * Forcibly leave nested mode in order to be able to reset the VCPU later on.
12653 */
12654static void vmx_leave_nested(struct kvm_vcpu *vcpu)
12655{
Wanpeng Li2f707d92017-03-06 04:03:28 -080012656 if (is_guest_mode(vcpu)) {
12657 to_vmx(vcpu)->nested.nested_run_pending = 0;
Jan Kiszka533558b2014-01-04 18:47:20 +010012658 nested_vmx_vmexit(vcpu, -1, 0, 0);
Wanpeng Li2f707d92017-03-06 04:03:28 -080012659 }
Jan Kiszka42124922014-01-04 18:47:19 +010012660 free_nested(to_vmx(vcpu));
12661}
12662
12663/*
Nadav Har'El7c177932011-05-25 23:12:04 +030012664 * L1's failure to enter L2 is a subset of a normal exit, as explained in
12665 * 23.7 "VM-entry failures during or after loading guest state" (this also
12666 * lists the acceptable exit-reason and exit-qualification parameters).
12667 * It should only be called before L2 actually succeeded to run, and when
12668 * vmcs01 is current (it doesn't leave_guest_mode() or switch vmcss).
12669 */
12670static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
12671 struct vmcs12 *vmcs12,
12672 u32 reason, unsigned long qualification)
12673{
12674 load_vmcs12_host_state(vcpu, vmcs12);
12675 vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
12676 vmcs12->exit_qualification = qualification;
12677 nested_vmx_succeed(vcpu);
Abel Gordon012f83c2013-04-18 14:39:25 +030012678 if (enable_shadow_vmcs)
12679 to_vmx(vcpu)->nested.sync_shadow_vmcs = true;
Nadav Har'El7c177932011-05-25 23:12:04 +030012680}
12681
Joerg Roedel8a76d7f2011-04-04 12:39:27 +020012682static int vmx_check_intercept(struct kvm_vcpu *vcpu,
12683 struct x86_instruction_info *info,
12684 enum x86_intercept_stage stage)
12685{
Paolo Bonzinifb6d4d32016-07-12 11:04:26 +020012686 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
12687 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
12688
12689 /*
12690 * RDPID causes #UD if disabled through secondary execution controls.
12691 * Because it is marked as EmulateOnUD, we need to intercept it here.
12692 */
12693 if (info->intercept == x86_intercept_rdtscp &&
12694 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
12695 ctxt->exception.vector = UD_VECTOR;
12696 ctxt->exception.error_code_valid = false;
12697 return X86EMUL_PROPAGATE_FAULT;
12698 }
12699
12700 /* TODO: check more intercepts... */
Joerg Roedel8a76d7f2011-04-04 12:39:27 +020012701 return X86EMUL_CONTINUE;
12702}
12703
Yunhong Jiang64672c92016-06-13 14:19:59 -070012704#ifdef CONFIG_X86_64
12705/* (a << shift) / divisor, return 1 if overflow otherwise 0 */
12706static inline int u64_shl_div_u64(u64 a, unsigned int shift,
12707 u64 divisor, u64 *result)
12708{
12709 u64 low = a << shift, high = a >> (64 - shift);
12710
12711 /* To avoid the overflow on divq */
12712 if (high >= divisor)
12713 return 1;
12714
12715 /* Low hold the result, high hold rem which is discarded */
12716 asm("divq %2\n\t" : "=a" (low), "=d" (high) :
12717 "rm" (divisor), "0" (low), "1" (high));
12718 *result = low;
12719
12720 return 0;
12721}
12722
12723static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
12724{
KarimAllah Ahmed386c6dd2018-04-10 14:15:46 +020012725 struct vcpu_vmx *vmx;
Wanpeng Lic5ce8232018-05-29 14:53:17 +080012726 u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles;
KarimAllah Ahmed386c6dd2018-04-10 14:15:46 +020012727
12728 if (kvm_mwait_in_guest(vcpu->kvm))
12729 return -EOPNOTSUPP;
12730
12731 vmx = to_vmx(vcpu);
12732 tscl = rdtsc();
12733 guest_tscl = kvm_read_l1_tsc(vcpu, tscl);
12734 delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl;
Wanpeng Lic5ce8232018-05-29 14:53:17 +080012735 lapic_timer_advance_cycles = nsec_to_cycles(vcpu, lapic_timer_advance_ns);
12736
12737 if (delta_tsc > lapic_timer_advance_cycles)
12738 delta_tsc -= lapic_timer_advance_cycles;
12739 else
12740 delta_tsc = 0;
Yunhong Jiang64672c92016-06-13 14:19:59 -070012741
12742 /* Convert to host delta tsc if tsc scaling is enabled */
12743 if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio &&
12744 u64_shl_div_u64(delta_tsc,
12745 kvm_tsc_scaling_ratio_frac_bits,
12746 vcpu->arch.tsc_scaling_ratio,
12747 &delta_tsc))
12748 return -ERANGE;
12749
12750 /*
12751 * If the delta tsc can't fit in the 32 bit after the multi shift,
12752 * we can't use the preemption timer.
12753 * It's possible that it fits on later vmentries, but checking
12754 * on every vmentry is costly so we just use an hrtimer.
12755 */
12756 if (delta_tsc >> (cpu_preemption_timer_multi + 32))
12757 return -ERANGE;
12758
12759 vmx->hv_deadline_tsc = tscl + delta_tsc;
12760 vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
12761 PIN_BASED_VMX_PREEMPTION_TIMER);
Wanpeng Lic8533542017-06-29 06:28:09 -070012762
12763 return delta_tsc == 0;
Yunhong Jiang64672c92016-06-13 14:19:59 -070012764}
12765
12766static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
12767{
12768 struct vcpu_vmx *vmx = to_vmx(vcpu);
12769 vmx->hv_deadline_tsc = -1;
12770 vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
12771 PIN_BASED_VMX_PREEMPTION_TIMER);
12772}
12773#endif
12774
Paolo Bonzini48d89b92014-08-26 13:27:46 +020012775static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu)
Radim Krčmářae97a3b2014-08-21 18:08:06 +020012776{
Wanpeng Lib31c1142018-03-12 04:53:04 -070012777 if (!kvm_pause_in_guest(vcpu->kvm))
Radim Krčmářb4a2d312014-08-21 18:08:08 +020012778 shrink_ple_window(vcpu);
Radim Krčmářae97a3b2014-08-21 18:08:06 +020012779}
12780
Kai Huang843e4332015-01-28 10:54:28 +080012781static void vmx_slot_enable_log_dirty(struct kvm *kvm,
12782 struct kvm_memory_slot *slot)
12783{
12784 kvm_mmu_slot_leaf_clear_dirty(kvm, slot);
12785 kvm_mmu_slot_largepage_remove_write_access(kvm, slot);
12786}
12787
12788static void vmx_slot_disable_log_dirty(struct kvm *kvm,
12789 struct kvm_memory_slot *slot)
12790{
12791 kvm_mmu_slot_set_dirty(kvm, slot);
12792}
12793
12794static void vmx_flush_log_dirty(struct kvm *kvm)
12795{
12796 kvm_flush_pml_buffers(kvm);
12797}
12798
Bandan Dasc5f983f2017-05-05 15:25:14 -040012799static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
12800{
12801 struct vmcs12 *vmcs12;
12802 struct vcpu_vmx *vmx = to_vmx(vcpu);
12803 gpa_t gpa;
12804 struct page *page = NULL;
12805 u64 *pml_address;
12806
12807 if (is_guest_mode(vcpu)) {
12808 WARN_ON_ONCE(vmx->nested.pml_full);
12809
12810 /*
12811 * Check if PML is enabled for the nested guest.
12812 * Whether eptp bit 6 is set is already checked
12813 * as part of A/D emulation.
12814 */
12815 vmcs12 = get_vmcs12(vcpu);
12816 if (!nested_cpu_has_pml(vmcs12))
12817 return 0;
12818
Dan Carpenter47698862017-05-10 22:43:17 +030012819 if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) {
Bandan Dasc5f983f2017-05-05 15:25:14 -040012820 vmx->nested.pml_full = true;
12821 return 1;
12822 }
12823
12824 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull;
12825
David Hildenbrand5e2f30b2017-08-03 18:11:04 +020012826 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->pml_address);
12827 if (is_error_page(page))
Bandan Dasc5f983f2017-05-05 15:25:14 -040012828 return 0;
12829
12830 pml_address = kmap(page);
12831 pml_address[vmcs12->guest_pml_index--] = gpa;
12832 kunmap(page);
David Hildenbrand53a70da2017-08-03 18:11:05 +020012833 kvm_release_page_clean(page);
Bandan Dasc5f983f2017-05-05 15:25:14 -040012834 }
12835
12836 return 0;
12837}
12838
Kai Huang843e4332015-01-28 10:54:28 +080012839static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
12840 struct kvm_memory_slot *memslot,
12841 gfn_t offset, unsigned long mask)
12842{
12843 kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask);
12844}
12845
Paolo Bonzinicd39e112017-06-06 12:57:04 +020012846static void __pi_post_block(struct kvm_vcpu *vcpu)
12847{
12848 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
12849 struct pi_desc old, new;
12850 unsigned int dest;
Paolo Bonzinicd39e112017-06-06 12:57:04 +020012851
12852 do {
12853 old.control = new.control = pi_desc->control;
Paolo Bonzini8b306e22017-06-06 12:57:05 +020012854 WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR,
12855 "Wakeup handler not enabled while the VCPU is blocked\n");
Paolo Bonzinicd39e112017-06-06 12:57:04 +020012856
12857 dest = cpu_physical_id(vcpu->cpu);
12858
12859 if (x2apic_enabled())
12860 new.ndst = dest;
12861 else
12862 new.ndst = (dest << 8) & 0xFF00;
12863
Paolo Bonzinicd39e112017-06-06 12:57:04 +020012864 /* set 'NV' to 'notification vector' */
12865 new.nv = POSTED_INTR_VECTOR;
Paolo Bonzinic0a16662017-09-28 17:58:41 +020012866 } while (cmpxchg64(&pi_desc->control, old.control,
12867 new.control) != old.control);
Paolo Bonzinicd39e112017-06-06 12:57:04 +020012868
Paolo Bonzini8b306e22017-06-06 12:57:05 +020012869 if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) {
12870 spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
Paolo Bonzinicd39e112017-06-06 12:57:04 +020012871 list_del(&vcpu->blocked_vcpu_list);
Paolo Bonzini8b306e22017-06-06 12:57:05 +020012872 spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
Paolo Bonzinicd39e112017-06-06 12:57:04 +020012873 vcpu->pre_pcpu = -1;
12874 }
12875}
12876
Feng Wuefc64402015-09-18 22:29:51 +080012877/*
Feng Wubf9f6ac2015-09-18 22:29:55 +080012878 * This routine does the following things for vCPU which is going
12879 * to be blocked if VT-d PI is enabled.
12880 * - Store the vCPU to the wakeup list, so when interrupts happen
12881 * we can find the right vCPU to wake up.
12882 * - Change the Posted-interrupt descriptor as below:
12883 * 'NDST' <-- vcpu->pre_pcpu
12884 * 'NV' <-- POSTED_INTR_WAKEUP_VECTOR
12885 * - If 'ON' is set during this process, which means at least one
12886 * interrupt is posted for this vCPU, we cannot block it, in
12887 * this case, return 1, otherwise, return 0.
12888 *
12889 */
Yunhong Jiangbc225122016-06-13 14:19:58 -070012890static int pi_pre_block(struct kvm_vcpu *vcpu)
Feng Wubf9f6ac2015-09-18 22:29:55 +080012891{
Feng Wubf9f6ac2015-09-18 22:29:55 +080012892 unsigned int dest;
12893 struct pi_desc old, new;
12894 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
12895
12896 if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
Yang Zhanga0052192016-06-13 09:56:56 +080012897 !irq_remapping_cap(IRQ_POSTING_CAP) ||
12898 !kvm_vcpu_apicv_active(vcpu))
Feng Wubf9f6ac2015-09-18 22:29:55 +080012899 return 0;
12900
Paolo Bonzini8b306e22017-06-06 12:57:05 +020012901 WARN_ON(irqs_disabled());
12902 local_irq_disable();
12903 if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) {
12904 vcpu->pre_pcpu = vcpu->cpu;
12905 spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
12906 list_add_tail(&vcpu->blocked_vcpu_list,
12907 &per_cpu(blocked_vcpu_on_cpu,
12908 vcpu->pre_pcpu));
12909 spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
12910 }
Feng Wubf9f6ac2015-09-18 22:29:55 +080012911
12912 do {
12913 old.control = new.control = pi_desc->control;
12914
Feng Wubf9f6ac2015-09-18 22:29:55 +080012915 WARN((pi_desc->sn == 1),
12916 "Warning: SN field of posted-interrupts "
12917 "is set before blocking\n");
12918
12919 /*
12920 * Since vCPU can be preempted during this process,
12921 * vcpu->cpu could be different with pre_pcpu, we
12922 * need to set pre_pcpu as the destination of wakeup
12923 * notification event, then we can find the right vCPU
12924 * to wakeup in wakeup handler if interrupts happen
12925 * when the vCPU is in blocked state.
12926 */
12927 dest = cpu_physical_id(vcpu->pre_pcpu);
12928
12929 if (x2apic_enabled())
12930 new.ndst = dest;
12931 else
12932 new.ndst = (dest << 8) & 0xFF00;
12933
12934 /* set 'NV' to 'wakeup vector' */
12935 new.nv = POSTED_INTR_WAKEUP_VECTOR;
Paolo Bonzinic0a16662017-09-28 17:58:41 +020012936 } while (cmpxchg64(&pi_desc->control, old.control,
12937 new.control) != old.control);
Feng Wubf9f6ac2015-09-18 22:29:55 +080012938
Paolo Bonzini8b306e22017-06-06 12:57:05 +020012939 /* We should not block the vCPU if an interrupt is posted for it. */
12940 if (pi_test_on(pi_desc) == 1)
12941 __pi_post_block(vcpu);
12942
12943 local_irq_enable();
12944 return (vcpu->pre_pcpu == -1);
Feng Wubf9f6ac2015-09-18 22:29:55 +080012945}
12946
Yunhong Jiangbc225122016-06-13 14:19:58 -070012947static int vmx_pre_block(struct kvm_vcpu *vcpu)
12948{
12949 if (pi_pre_block(vcpu))
12950 return 1;
12951
Yunhong Jiang64672c92016-06-13 14:19:59 -070012952 if (kvm_lapic_hv_timer_in_use(vcpu))
12953 kvm_lapic_switch_to_sw_timer(vcpu);
12954
Yunhong Jiangbc225122016-06-13 14:19:58 -070012955 return 0;
12956}
12957
12958static void pi_post_block(struct kvm_vcpu *vcpu)
Feng Wubf9f6ac2015-09-18 22:29:55 +080012959{
Paolo Bonzini8b306e22017-06-06 12:57:05 +020012960 if (vcpu->pre_pcpu == -1)
Feng Wubf9f6ac2015-09-18 22:29:55 +080012961 return;
12962
Paolo Bonzini8b306e22017-06-06 12:57:05 +020012963 WARN_ON(irqs_disabled());
12964 local_irq_disable();
Paolo Bonzinicd39e112017-06-06 12:57:04 +020012965 __pi_post_block(vcpu);
Paolo Bonzini8b306e22017-06-06 12:57:05 +020012966 local_irq_enable();
Feng Wubf9f6ac2015-09-18 22:29:55 +080012967}
12968
Yunhong Jiangbc225122016-06-13 14:19:58 -070012969static void vmx_post_block(struct kvm_vcpu *vcpu)
12970{
Yunhong Jiang64672c92016-06-13 14:19:59 -070012971 if (kvm_x86_ops->set_hv_timer)
12972 kvm_lapic_switch_to_hv_timer(vcpu);
12973
Yunhong Jiangbc225122016-06-13 14:19:58 -070012974 pi_post_block(vcpu);
12975}
12976
Feng Wubf9f6ac2015-09-18 22:29:55 +080012977/*
Feng Wuefc64402015-09-18 22:29:51 +080012978 * vmx_update_pi_irte - set IRTE for Posted-Interrupts
12979 *
12980 * @kvm: kvm
12981 * @host_irq: host irq of the interrupt
12982 * @guest_irq: gsi of the interrupt
12983 * @set: set or unset PI
12984 * returns 0 on success, < 0 on failure
12985 */
12986static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
12987 uint32_t guest_irq, bool set)
12988{
12989 struct kvm_kernel_irq_routing_entry *e;
12990 struct kvm_irq_routing_table *irq_rt;
12991 struct kvm_lapic_irq irq;
12992 struct kvm_vcpu *vcpu;
12993 struct vcpu_data vcpu_info;
Jan H. Schönherr3a8b0672017-09-07 19:02:30 +010012994 int idx, ret = 0;
Feng Wuefc64402015-09-18 22:29:51 +080012995
12996 if (!kvm_arch_has_assigned_device(kvm) ||
Yang Zhanga0052192016-06-13 09:56:56 +080012997 !irq_remapping_cap(IRQ_POSTING_CAP) ||
12998 !kvm_vcpu_apicv_active(kvm->vcpus[0]))
Feng Wuefc64402015-09-18 22:29:51 +080012999 return 0;
13000
13001 idx = srcu_read_lock(&kvm->irq_srcu);
13002 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
Jan H. Schönherr3a8b0672017-09-07 19:02:30 +010013003 if (guest_irq >= irq_rt->nr_rt_entries ||
13004 hlist_empty(&irq_rt->map[guest_irq])) {
13005 pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n",
13006 guest_irq, irq_rt->nr_rt_entries);
13007 goto out;
13008 }
Feng Wuefc64402015-09-18 22:29:51 +080013009
13010 hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
13011 if (e->type != KVM_IRQ_ROUTING_MSI)
13012 continue;
13013 /*
13014 * VT-d PI cannot support posting multicast/broadcast
13015 * interrupts to a vCPU, we still use interrupt remapping
13016 * for these kind of interrupts.
13017 *
13018 * For lowest-priority interrupts, we only support
13019 * those with single CPU as the destination, e.g. user
13020 * configures the interrupts via /proc/irq or uses
13021 * irqbalance to make the interrupts single-CPU.
13022 *
13023 * We will support full lowest-priority interrupt later.
13024 */
13025
Radim Krčmář371313132016-07-12 22:09:27 +020013026 kvm_set_msi_irq(kvm, e, &irq);
Feng Wu23a1c252016-01-25 16:53:32 +080013027 if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) {
13028 /*
13029 * Make sure the IRTE is in remapped mode if
13030 * we don't handle it in posted mode.
13031 */
13032 ret = irq_set_vcpu_affinity(host_irq, NULL);
13033 if (ret < 0) {
13034 printk(KERN_INFO
13035 "failed to back to remapped mode, irq: %u\n",
13036 host_irq);
13037 goto out;
13038 }
13039
Feng Wuefc64402015-09-18 22:29:51 +080013040 continue;
Feng Wu23a1c252016-01-25 16:53:32 +080013041 }
Feng Wuefc64402015-09-18 22:29:51 +080013042
13043 vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu));
13044 vcpu_info.vector = irq.vector;
13045
hu huajun2698d822018-04-11 15:16:40 +080013046 trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi,
Feng Wuefc64402015-09-18 22:29:51 +080013047 vcpu_info.vector, vcpu_info.pi_desc_addr, set);
13048
13049 if (set)
13050 ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
Haozhong Zhangdc91f2e2017-09-18 09:56:49 +080013051 else
Feng Wuefc64402015-09-18 22:29:51 +080013052 ret = irq_set_vcpu_affinity(host_irq, NULL);
Feng Wuefc64402015-09-18 22:29:51 +080013053
13054 if (ret < 0) {
13055 printk(KERN_INFO "%s: failed to update PI IRTE\n",
13056 __func__);
13057 goto out;
13058 }
13059 }
13060
13061 ret = 0;
13062out:
13063 srcu_read_unlock(&kvm->irq_srcu, idx);
13064 return ret;
13065}
13066
Ashok Rajc45dcc72016-06-22 14:59:56 +080013067static void vmx_setup_mce(struct kvm_vcpu *vcpu)
13068{
13069 if (vcpu->arch.mcg_cap & MCG_LMCE_P)
13070 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
13071 FEATURE_CONTROL_LMCE;
13072 else
13073 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
13074 ~FEATURE_CONTROL_LMCE;
13075}
13076
Ladi Prosek72d7b372017-10-11 16:54:41 +020013077static int vmx_smi_allowed(struct kvm_vcpu *vcpu)
13078{
Ladi Prosek72e9cbd2017-10-11 16:54:43 +020013079 /* we need a nested vmexit to enter SMM, postpone if run is pending */
13080 if (to_vmx(vcpu)->nested.nested_run_pending)
13081 return 0;
Ladi Prosek72d7b372017-10-11 16:54:41 +020013082 return 1;
13083}
13084
Ladi Prosek0234bf82017-10-11 16:54:40 +020013085static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
13086{
Ladi Prosek72e9cbd2017-10-11 16:54:43 +020013087 struct vcpu_vmx *vmx = to_vmx(vcpu);
13088
13089 vmx->nested.smm.guest_mode = is_guest_mode(vcpu);
13090 if (vmx->nested.smm.guest_mode)
13091 nested_vmx_vmexit(vcpu, -1, 0, 0);
13092
13093 vmx->nested.smm.vmxon = vmx->nested.vmxon;
13094 vmx->nested.vmxon = false;
Wanpeng Licaa057a2018-03-12 04:53:03 -070013095 vmx_clear_hlt(vcpu);
Ladi Prosek0234bf82017-10-11 16:54:40 +020013096 return 0;
13097}
13098
13099static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
13100{
Ladi Prosek72e9cbd2017-10-11 16:54:43 +020013101 struct vcpu_vmx *vmx = to_vmx(vcpu);
13102 int ret;
13103
13104 if (vmx->nested.smm.vmxon) {
13105 vmx->nested.vmxon = true;
13106 vmx->nested.smm.vmxon = false;
13107 }
13108
13109 if (vmx->nested.smm.guest_mode) {
13110 vcpu->arch.hflags &= ~HF_SMM_MASK;
Jim Mattson6514dc32018-04-26 16:09:12 -070013111 ret = enter_vmx_non_root_mode(vcpu);
Ladi Prosek72e9cbd2017-10-11 16:54:43 +020013112 vcpu->arch.hflags |= HF_SMM_MASK;
13113 if (ret)
13114 return ret;
13115
13116 vmx->nested.smm.guest_mode = false;
13117 }
Ladi Prosek0234bf82017-10-11 16:54:40 +020013118 return 0;
13119}
13120
Ladi Prosekcc3d9672017-10-17 16:02:39 +020013121static int enable_smi_window(struct kvm_vcpu *vcpu)
13122{
13123 return 0;
13124}
13125
Kees Cook404f6aa2016-08-08 16:29:06 -070013126static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
Avi Kivity6aa8b732006-12-10 02:21:36 -080013127 .cpu_has_kvm_support = cpu_has_kvm_support,
13128 .disabled_by_bios = vmx_disabled_by_bios,
13129 .hardware_setup = hardware_setup,
13130 .hardware_unsetup = hardware_unsetup,
Yang, Sheng002c7f72007-07-31 14:23:01 +030013131 .check_processor_compatibility = vmx_check_processor_compat,
Avi Kivity6aa8b732006-12-10 02:21:36 -080013132 .hardware_enable = hardware_enable,
13133 .hardware_disable = hardware_disable,
Sheng Yang04547152009-04-01 15:52:31 +080013134 .cpu_has_accelerated_tpr = report_flexpriority,
Tom Lendackybc226f02018-05-10 22:06:39 +020013135 .has_emulated_msr = vmx_has_emulated_msr,
Avi Kivity6aa8b732006-12-10 02:21:36 -080013136
Wanpeng Lib31c1142018-03-12 04:53:04 -070013137 .vm_init = vmx_vm_init,
Sean Christopherson434a1e92018-03-20 12:17:18 -070013138 .vm_alloc = vmx_vm_alloc,
13139 .vm_free = vmx_vm_free,
Wanpeng Lib31c1142018-03-12 04:53:04 -070013140
Avi Kivity6aa8b732006-12-10 02:21:36 -080013141 .vcpu_create = vmx_create_vcpu,
13142 .vcpu_free = vmx_free_vcpu,
Avi Kivity04d2cc72007-09-10 18:10:54 +030013143 .vcpu_reset = vmx_vcpu_reset,
Avi Kivity6aa8b732006-12-10 02:21:36 -080013144
Avi Kivity04d2cc72007-09-10 18:10:54 +030013145 .prepare_guest_switch = vmx_save_host_state,
Avi Kivity6aa8b732006-12-10 02:21:36 -080013146 .vcpu_load = vmx_vcpu_load,
13147 .vcpu_put = vmx_vcpu_put,
13148
Paolo Bonzinia96036b2015-11-10 11:55:36 +010013149 .update_bp_intercept = update_exception_bitmap,
Tom Lendacky801e4592018-02-21 13:39:51 -060013150 .get_msr_feature = vmx_get_msr_feature,
Avi Kivity6aa8b732006-12-10 02:21:36 -080013151 .get_msr = vmx_get_msr,
13152 .set_msr = vmx_set_msr,
13153 .get_segment_base = vmx_get_segment_base,
13154 .get_segment = vmx_get_segment,
13155 .set_segment = vmx_set_segment,
Izik Eidus2e4d2652008-03-24 19:38:34 +020013156 .get_cpl = vmx_get_cpl,
Avi Kivity6aa8b732006-12-10 02:21:36 -080013157 .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
Avi Kivitye8467fd2009-12-29 18:43:06 +020013158 .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
Avi Kivityaff48ba2010-12-05 18:56:11 +020013159 .decache_cr3 = vmx_decache_cr3,
Anthony Liguori25c4c272007-04-27 09:29:21 +030013160 .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
Avi Kivity6aa8b732006-12-10 02:21:36 -080013161 .set_cr0 = vmx_set_cr0,
Avi Kivity6aa8b732006-12-10 02:21:36 -080013162 .set_cr3 = vmx_set_cr3,
13163 .set_cr4 = vmx_set_cr4,
Avi Kivity6aa8b732006-12-10 02:21:36 -080013164 .set_efer = vmx_set_efer,
Avi Kivity6aa8b732006-12-10 02:21:36 -080013165 .get_idt = vmx_get_idt,
13166 .set_idt = vmx_set_idt,
13167 .get_gdt = vmx_get_gdt,
13168 .set_gdt = vmx_set_gdt,
Jan Kiszka73aaf249e2014-01-04 18:47:16 +010013169 .get_dr6 = vmx_get_dr6,
13170 .set_dr6 = vmx_set_dr6,
Gleb Natapov020df072010-04-13 10:05:23 +030013171 .set_dr7 = vmx_set_dr7,
Paolo Bonzini81908bf2014-02-21 10:32:27 +010013172 .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030013173 .cache_reg = vmx_cache_reg,
Avi Kivity6aa8b732006-12-10 02:21:36 -080013174 .get_rflags = vmx_get_rflags,
13175 .set_rflags = vmx_set_rflags,
Huaitong Hanbe94f6b2016-03-22 16:51:20 +080013176
Avi Kivity6aa8b732006-12-10 02:21:36 -080013177 .tlb_flush = vmx_flush_tlb,
Avi Kivity6aa8b732006-12-10 02:21:36 -080013178
Avi Kivity6aa8b732006-12-10 02:21:36 -080013179 .run = vmx_vcpu_run,
Avi Kivity6062d012009-03-23 17:35:17 +020013180 .handle_exit = vmx_handle_exit,
Avi Kivity6aa8b732006-12-10 02:21:36 -080013181 .skip_emulated_instruction = skip_emulated_instruction,
Glauber Costa2809f5d2009-05-12 16:21:05 -040013182 .set_interrupt_shadow = vmx_set_interrupt_shadow,
13183 .get_interrupt_shadow = vmx_get_interrupt_shadow,
Ingo Molnar102d8322007-02-19 14:37:47 +020013184 .patch_hypercall = vmx_patch_hypercall,
Eddie Dong2a8067f2007-08-06 16:29:07 +030013185 .set_irq = vmx_inject_irq,
Gleb Natapov95ba8273132009-04-21 17:45:08 +030013186 .set_nmi = vmx_inject_nmi,
Avi Kivity298101d2007-11-25 13:41:11 +020013187 .queue_exception = vmx_queue_exception,
Avi Kivityb463a6f2010-07-20 15:06:17 +030013188 .cancel_injection = vmx_cancel_injection,
Gleb Natapov78646122009-03-23 12:12:11 +020013189 .interrupt_allowed = vmx_interrupt_allowed,
Gleb Natapov95ba8273132009-04-21 17:45:08 +030013190 .nmi_allowed = vmx_nmi_allowed,
Jan Kiszka3cfc3092009-11-12 01:04:25 +010013191 .get_nmi_mask = vmx_get_nmi_mask,
13192 .set_nmi_mask = vmx_set_nmi_mask,
Gleb Natapov95ba8273132009-04-21 17:45:08 +030013193 .enable_nmi_window = enable_nmi_window,
13194 .enable_irq_window = enable_irq_window,
13195 .update_cr8_intercept = update_cr8_intercept,
Jim Mattson8d860bb2018-05-09 16:56:05 -040013196 .set_virtual_apic_mode = vmx_set_virtual_apic_mode,
Tang Chen38b99172014-09-24 15:57:54 +080013197 .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
Andrey Smetanind62caab2015-11-10 15:36:33 +030013198 .get_enable_apicv = vmx_get_enable_apicv,
13199 .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
Yang Zhangc7c9c562013-01-25 10:18:51 +080013200 .load_eoi_exitmap = vmx_load_eoi_exitmap,
Paolo Bonzini967235d2016-12-19 14:03:45 +010013201 .apicv_post_state_restore = vmx_apicv_post_state_restore,
Yang Zhangc7c9c562013-01-25 10:18:51 +080013202 .hwapic_irr_update = vmx_hwapic_irr_update,
13203 .hwapic_isr_update = vmx_hwapic_isr_update,
Yang Zhanga20ed542013-04-11 19:25:15 +080013204 .sync_pir_to_irr = vmx_sync_pir_to_irr,
13205 .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
Gleb Natapov95ba8273132009-04-21 17:45:08 +030013206
Izik Eiduscbc94022007-10-25 00:29:55 +020013207 .set_tss_addr = vmx_set_tss_addr,
Sean Christopherson2ac52ab2018-03-20 12:17:19 -070013208 .set_identity_map_addr = vmx_set_identity_map_addr,
Sheng Yang67253af2008-04-25 10:20:22 +080013209 .get_tdp_level = get_ept_level,
Sheng Yang4b12f0d2009-04-27 20:35:42 +080013210 .get_mt_mask = vmx_get_mt_mask,
Marcelo Tosatti229456f2009-06-17 09:22:14 -030013211
Avi Kivity586f9602010-11-18 13:09:54 +020013212 .get_exit_info = vmx_get_exit_info,
Avi Kivity586f9602010-11-18 13:09:54 +020013213
Sheng Yang17cc3932010-01-05 19:02:27 +080013214 .get_lpage_level = vmx_get_lpage_level,
Sheng Yang0e851882009-12-18 16:48:46 +080013215
13216 .cpuid_update = vmx_cpuid_update,
Sheng Yang4e47c7a2009-12-18 16:48:47 +080013217
13218 .rdtscp_supported = vmx_rdtscp_supported,
Mao, Junjiead756a12012-07-02 01:18:48 +000013219 .invpcid_supported = vmx_invpcid_supported,
Joerg Roedeld4330ef2010-04-22 12:33:11 +020013220
13221 .set_supported_cpuid = vmx_set_supported_cpuid,
Sheng Yangf5f48ee2010-06-30 12:25:15 +080013222
13223 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
Zachary Amsden99e3e302010-08-19 22:07:17 -100013224
KarimAllah Ahmede79f2452018-04-14 05:10:52 +020013225 .read_l1_tsc_offset = vmx_read_l1_tsc_offset,
Zachary Amsden99e3e302010-08-19 22:07:17 -100013226 .write_tsc_offset = vmx_write_tsc_offset,
Joerg Roedel1c97f0a2010-09-10 17:30:41 +020013227
13228 .set_tdp_cr3 = vmx_set_cr3,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +020013229
13230 .check_intercept = vmx_check_intercept,
Yang Zhanga547c6d2013-04-11 19:25:10 +080013231 .handle_external_intr = vmx_handle_external_intr,
Liu, Jinsongda8999d2014-02-24 10:55:46 +000013232 .mpx_supported = vmx_mpx_supported,
Wanpeng Li55412b22014-12-02 19:21:30 +080013233 .xsaves_supported = vmx_xsaves_supported,
Paolo Bonzini66336ca2016-07-12 10:36:41 +020013234 .umip_emulated = vmx_umip_emulated,
Jan Kiszkab6b8a142014-03-07 20:03:12 +010013235
13236 .check_nested_events = vmx_check_nested_events,
Radim Krčmářae97a3b2014-08-21 18:08:06 +020013237
13238 .sched_in = vmx_sched_in,
Kai Huang843e4332015-01-28 10:54:28 +080013239
13240 .slot_enable_log_dirty = vmx_slot_enable_log_dirty,
13241 .slot_disable_log_dirty = vmx_slot_disable_log_dirty,
13242 .flush_log_dirty = vmx_flush_log_dirty,
13243 .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
Bandan Dasc5f983f2017-05-05 15:25:14 -040013244 .write_log_dirty = vmx_write_pml_buffer,
Wei Huang25462f72015-06-19 15:45:05 +020013245
Feng Wubf9f6ac2015-09-18 22:29:55 +080013246 .pre_block = vmx_pre_block,
13247 .post_block = vmx_post_block,
13248
Wei Huang25462f72015-06-19 15:45:05 +020013249 .pmu_ops = &intel_pmu_ops,
Feng Wuefc64402015-09-18 22:29:51 +080013250
13251 .update_pi_irte = vmx_update_pi_irte,
Yunhong Jiang64672c92016-06-13 14:19:59 -070013252
13253#ifdef CONFIG_X86_64
13254 .set_hv_timer = vmx_set_hv_timer,
13255 .cancel_hv_timer = vmx_cancel_hv_timer,
13256#endif
Ashok Rajc45dcc72016-06-22 14:59:56 +080013257
13258 .setup_mce = vmx_setup_mce,
Ladi Prosek0234bf82017-10-11 16:54:40 +020013259
Ladi Prosek72d7b372017-10-11 16:54:41 +020013260 .smi_allowed = vmx_smi_allowed,
Ladi Prosek0234bf82017-10-11 16:54:40 +020013261 .pre_enter_smm = vmx_pre_enter_smm,
13262 .pre_leave_smm = vmx_pre_leave_smm,
Ladi Prosekcc3d9672017-10-17 16:02:39 +020013263 .enable_smi_window = enable_smi_window,
Avi Kivity6aa8b732006-12-10 02:21:36 -080013264};
13265
Thomas Gleixner72c6d2d2018-07-13 16:23:16 +020013266static void vmx_cleanup_l1d_flush(void)
Paolo Bonzinia47dd5f2018-07-02 12:47:38 +020013267{
13268 if (vmx_l1d_flush_pages) {
13269 free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
13270 vmx_l1d_flush_pages = NULL;
13271 }
Thomas Gleixner72c6d2d2018-07-13 16:23:16 +020013272 /* Restore state so sysfs ignores VMX */
13273 l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
Konrad Rzeszutek Wilka3994772018-07-02 12:29:30 +020013274}
13275
Thomas Gleixnera7b90202018-07-13 16:23:18 +020013276static void vmx_exit(void)
13277{
13278#ifdef CONFIG_KEXEC_CORE
13279 RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
13280 synchronize_rcu();
13281#endif
13282
13283 kvm_exit();
13284
13285#if IS_ENABLED(CONFIG_HYPERV)
13286 if (static_branch_unlikely(&enable_evmcs)) {
13287 int cpu;
13288 struct hv_vp_assist_page *vp_ap;
13289 /*
13290 * Reset everything to support using non-enlightened VMCS
13291 * access later (e.g. when we reload the module with
13292 * enlightened_vmcs=0)
13293 */
13294 for_each_online_cpu(cpu) {
13295 vp_ap = hv_get_vp_assist_page(cpu);
13296
13297 if (!vp_ap)
13298 continue;
13299
13300 vp_ap->current_nested_vmcs = 0;
13301 vp_ap->enlighten_vmentry = 0;
13302 }
13303
13304 static_branch_disable(&enable_evmcs);
13305 }
13306#endif
13307 vmx_cleanup_l1d_flush();
13308}
13309module_exit(vmx_exit);
13310
Avi Kivity6aa8b732006-12-10 02:21:36 -080013311static int __init vmx_init(void)
13312{
Vitaly Kuznetsov773e8a02018-03-20 15:02:11 +010013313 int r;
13314
13315#if IS_ENABLED(CONFIG_HYPERV)
13316 /*
13317 * Enlightened VMCS usage should be recommended and the host needs
13318 * to support eVMCS v1 or above. We can also disable eVMCS support
13319 * with module parameter.
13320 */
13321 if (enlightened_vmcs &&
13322 ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED &&
13323 (ms_hyperv.nested_features & HV_X64_ENLIGHTENED_VMCS_VERSION) >=
13324 KVM_EVMCS_VERSION) {
13325 int cpu;
13326
13327 /* Check that we have assist pages on all online CPUs */
13328 for_each_online_cpu(cpu) {
13329 if (!hv_get_vp_assist_page(cpu)) {
13330 enlightened_vmcs = false;
13331 break;
13332 }
13333 }
13334
13335 if (enlightened_vmcs) {
13336 pr_info("KVM: vmx: using Hyper-V Enlightened VMCS\n");
13337 static_branch_enable(&enable_evmcs);
13338 }
13339 } else {
13340 enlightened_vmcs = false;
13341 }
13342#endif
13343
Thomas Gleixnera7b90202018-07-13 16:23:18 +020013344 r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
13345 __alignof__(struct vcpu_vmx), THIS_MODULE);
He, Qingfdef3ad2007-04-30 09:45:24 +030013346 if (r)
Tiejun Chen34a1cd62014-10-28 10:14:48 +080013347 return r;
Sheng Yang25c5f222008-03-28 13:18:56 +080013348
Thomas Gleixnera7b90202018-07-13 16:23:18 +020013349 /*
Thomas Gleixner7db92e12018-07-13 16:23:19 +020013350 * Must be called after kvm_init() so enable_ept is properly set
13351 * up. Hand the parameter mitigation value in which was stored in
13352 * the pre module init parser. If no parameter was given, it will
13353 * contain 'auto' which will be turned into the default 'cond'
13354 * mitigation mode.
Thomas Gleixnera7b90202018-07-13 16:23:18 +020013355 */
Thomas Gleixner7db92e12018-07-13 16:23:19 +020013356 if (boot_cpu_has(X86_BUG_L1TF)) {
13357 r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
13358 if (r) {
13359 vmx_exit();
13360 return r;
13361 }
Paolo Bonzinia47dd5f2018-07-02 12:47:38 +020013362 }
13363
Dave Young2965faa2015-09-09 15:38:55 -070013364#ifdef CONFIG_KEXEC_CORE
Zhang Yanfei8f536b72012-12-06 23:43:34 +080013365 rcu_assign_pointer(crash_vmclear_loaded_vmcss,
13366 crash_vmclear_local_loaded_vmcss);
13367#endif
Jim Mattson21ebf532018-05-01 15:40:28 -070013368 vmx_check_vmcs12_offsets();
Zhang Yanfei8f536b72012-12-06 23:43:34 +080013369
He, Qingfdef3ad2007-04-30 09:45:24 +030013370 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -080013371}
Thomas Gleixnera7b90202018-07-13 16:23:18 +020013372module_init(vmx_init);