blob: ee25b9fdfa8257831e15e5d54e4216aeda9adea7 [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
Nicolas Kaiser9611c182010-10-06 14:23:22 +02008 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Avi Kivity6aa8b732006-12-10 02:21:36 -08009 *
10 * Authors:
11 * Avi Kivity <avi@qumranet.com>
12 * Yaniv Kamay <yaniv@qumranet.com>
13 *
14 * This work is licensed under the terms of the GNU GPL, version 2. See
15 * the COPYING file in the top-level directory.
16 *
17 */
18
Eddie Dong85f455f2007-07-06 12:20:49 +030019#include "irq.h"
Zhang Xiantao1d737c82007-12-14 09:35:10 +080020#include "mmu.h"
Avi Kivitye4956062007-06-28 14:15:57 -040021
Avi Kivityedf88412007-12-16 11:02:48 +020022#include <linux/kvm_host.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080023#include <linux/module.h>
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +020024#include <linux/kernel.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080025#include <linux/mm.h>
26#include <linux/highmem.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040027#include <linux/sched.h>
Avi Kivityc7addb92007-09-16 18:58:32 +020028#include <linux/moduleparam.h>
Marcelo Tosatti229456f2009-06-17 09:22:14 -030029#include <linux/ftrace_event.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
Shane Wangcafd6652010-04-29 12:09:01 -040031#include <linux/tboot.h>
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030032#include "kvm_cache_regs.h"
Avi Kivity35920a32008-07-03 14:50:12 +030033#include "x86.h"
Avi Kivitye4956062007-06-28 14:15:57 -040034
Avi Kivity6aa8b732006-12-10 02:21:36 -080035#include <asm/io.h>
Anthony Liguori3b3be0d2006-12-13 00:33:43 -080036#include <asm/desc.h>
Eduardo Habkost13673a92008-11-17 19:03:13 -020037#include <asm/vmx.h>
Eduardo Habkost6210e372008-11-17 19:03:16 -020038#include <asm/virtext.h>
Andi Kleena0861c02009-06-08 17:37:09 +080039#include <asm/mce.h>
Dexuan Cui2acf9232010-06-10 11:27:12 +080040#include <asm/i387.h>
41#include <asm/xcr.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080042
Marcelo Tosatti229456f2009-06-17 09:22:14 -030043#include "trace.h"
44
Avi Kivity4ecac3f2008-05-13 13:23:38 +030045#define __ex(x) __kvm_handle_fault_on_reboot(x)
Avi Kivity5e520e62011-05-15 10:13:12 -040046#define __ex_clear(x, reg) \
47 ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
Avi Kivity4ecac3f2008-05-13 13:23:38 +030048
Avi Kivity6aa8b732006-12-10 02:21:36 -080049MODULE_AUTHOR("Qumranet");
50MODULE_LICENSE("GPL");
51
Avi Kivity4462d212009-03-23 17:53:37 +020052static int __read_mostly bypass_guest_pf = 1;
Avi Kivityc1f8bc02009-03-23 15:41:17 +020053module_param(bypass_guest_pf, bool, S_IRUGO);
Avi Kivityc7addb92007-09-16 18:58:32 +020054
Avi Kivity4462d212009-03-23 17:53:37 +020055static int __read_mostly enable_vpid = 1;
Avi Kivity736caef2009-03-23 17:39:48 +020056module_param_named(vpid, enable_vpid, bool, 0444);
Sheng Yang2384d2b2008-01-17 15:14:33 +080057
Avi Kivity4462d212009-03-23 17:53:37 +020058static int __read_mostly flexpriority_enabled = 1;
Avi Kivity736caef2009-03-23 17:39:48 +020059module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
Avi Kivity4c9fc8e2008-03-24 18:15:14 +020060
Avi Kivity4462d212009-03-23 17:53:37 +020061static int __read_mostly enable_ept = 1;
Avi Kivity736caef2009-03-23 17:39:48 +020062module_param_named(ept, enable_ept, bool, S_IRUGO);
Sheng Yangd56f5462008-04-25 10:13:16 +080063
Nitin A Kamble3a624e22009-06-08 11:34:16 -070064static int __read_mostly enable_unrestricted_guest = 1;
65module_param_named(unrestricted_guest,
66 enable_unrestricted_guest, bool, S_IRUGO);
67
Avi Kivity4462d212009-03-23 17:53:37 +020068static int __read_mostly emulate_invalid_guest_state = 0;
Avi Kivityc1f8bc02009-03-23 15:41:17 +020069module_param(emulate_invalid_guest_state, bool, S_IRUGO);
Mohammed Gamal04fa4d32008-08-17 16:39:48 +030070
Dongxiao Xub923e622010-05-11 18:29:45 +080071static int __read_mostly vmm_exclusive = 1;
72module_param(vmm_exclusive, bool, S_IRUGO);
73
Anthony Liguori443381a2010-12-06 10:53:38 -060074static int __read_mostly yield_on_hlt = 1;
75module_param(yield_on_hlt, bool, S_IRUGO);
76
Nadav Har'El801d3422011-05-25 23:02:23 +030077/*
78 * If nested=1, nested virtualization is supported, i.e., guests may use
79 * VMX and be a hypervisor for its own guests. If nested=0, guests may not
80 * use VMX instructions.
81 */
82static int __read_mostly nested = 0;
83module_param(nested, bool, S_IRUGO);
84
Avi Kivitycdc0e242009-12-06 17:21:14 +020085#define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \
86 (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD)
87#define KVM_GUEST_CR0_MASK \
88 (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
89#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \
Avi Kivity81231c62010-01-24 16:26:40 +020090 (X86_CR0_WP | X86_CR0_NE)
Avi Kivitycdc0e242009-12-06 17:21:14 +020091#define KVM_VM_CR0_ALWAYS_ON \
92 (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
Avi Kivity4c386092009-12-07 12:26:18 +020093#define KVM_CR4_GUEST_OWNED_BITS \
94 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
95 | X86_CR4_OSXMMEXCPT)
96
Avi Kivitycdc0e242009-12-06 17:21:14 +020097#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
98#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
99
Avi Kivity78ac8b42010-04-08 18:19:35 +0300100#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
101
Zhai, Edwin4b8d54f2009-10-09 18:03:20 +0800102/*
103 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
104 * ple_gap: upper bound on the amount of time between two successive
105 * executions of PAUSE in a loop. Also indicate if ple enabled.
Rik van Riel00c25bc2011-01-04 09:51:33 -0500106 * According to test, this time is usually smaller than 128 cycles.
Zhai, Edwin4b8d54f2009-10-09 18:03:20 +0800107 * ple_window: upper bound on the amount of time a guest is allowed to execute
108 * in a PAUSE loop. Tests indicate that most spinlocks are held for
109 * less than 2^12 cycles
110 * Time is measured based on a counter that runs at the same rate as the TSC,
111 * refer SDM volume 3b section 21.6.13 & 22.1.3.
112 */
Rik van Riel00c25bc2011-01-04 09:51:33 -0500113#define KVM_VMX_DEFAULT_PLE_GAP 128
Zhai, Edwin4b8d54f2009-10-09 18:03:20 +0800114#define KVM_VMX_DEFAULT_PLE_WINDOW 4096
115static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
116module_param(ple_gap, int, S_IRUGO);
117
118static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
119module_param(ple_window, int, S_IRUGO);
120
Avi Kivity61d2ef22010-04-28 16:40:38 +0300121#define NR_AUTOLOAD_MSRS 1
Nadav Har'Elff2f6fe2011-05-25 23:05:27 +0300122#define VMCS02_POOL_SIZE 1
Avi Kivity61d2ef22010-04-28 16:40:38 +0300123
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400124struct vmcs {
125 u32 revision_id;
126 u32 abort;
127 char data[0];
128};
129
Nadav Har'Eld462b812011-05-24 15:26:10 +0300130/*
131 * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
132 * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
133 * loaded on this CPU (so we can clear them if the CPU goes down).
134 */
135struct loaded_vmcs {
136 struct vmcs *vmcs;
137 int cpu;
138 int launched;
139 struct list_head loaded_vmcss_on_cpu_link;
140};
141
Avi Kivity26bb0982009-09-07 11:14:12 +0300142struct shared_msr_entry {
143 unsigned index;
144 u64 data;
Avi Kivityd5696722009-12-02 12:28:47 +0200145 u64 mask;
Avi Kivity26bb0982009-09-07 11:14:12 +0300146};
147
Nadav Har'Elec378ae2011-05-25 23:02:54 +0300148/*
Nadav Har'Ela9d30f32011-05-25 23:03:55 +0300149 * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
150 * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
151 * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
152 * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
153 * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
154 * More than one of these structures may exist, if L1 runs multiple L2 guests.
155 * nested_vmx_run() will use the data here to build a vmcs02: a VMCS for the
156 * underlying hardware which will be used to run L2.
157 * This structure is packed to ensure that its layout is identical across
158 * machines (necessary for live migration).
159 * If there are changes in this struct, VMCS12_REVISION must be changed.
160 */
Nadav Har'El22bd0352011-05-25 23:05:57 +0300161typedef u64 natural_width;
Nadav Har'Ela9d30f32011-05-25 23:03:55 +0300162struct __packed vmcs12 {
163 /* According to the Intel spec, a VMCS region must start with the
164 * following two fields. Then follow implementation-specific data.
165 */
166 u32 revision_id;
167 u32 abort;
Nadav Har'El22bd0352011-05-25 23:05:57 +0300168
Nadav Har'El27d6c862011-05-25 23:06:59 +0300169 u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
170 u32 padding[7]; /* room for future expansion */
171
Nadav Har'El22bd0352011-05-25 23:05:57 +0300172 u64 io_bitmap_a;
173 u64 io_bitmap_b;
174 u64 msr_bitmap;
175 u64 vm_exit_msr_store_addr;
176 u64 vm_exit_msr_load_addr;
177 u64 vm_entry_msr_load_addr;
178 u64 tsc_offset;
179 u64 virtual_apic_page_addr;
180 u64 apic_access_addr;
181 u64 ept_pointer;
182 u64 guest_physical_address;
183 u64 vmcs_link_pointer;
184 u64 guest_ia32_debugctl;
185 u64 guest_ia32_pat;
186 u64 guest_ia32_efer;
187 u64 guest_ia32_perf_global_ctrl;
188 u64 guest_pdptr0;
189 u64 guest_pdptr1;
190 u64 guest_pdptr2;
191 u64 guest_pdptr3;
192 u64 host_ia32_pat;
193 u64 host_ia32_efer;
194 u64 host_ia32_perf_global_ctrl;
195 u64 padding64[8]; /* room for future expansion */
196 /*
197 * To allow migration of L1 (complete with its L2 guests) between
198 * machines of different natural widths (32 or 64 bit), we cannot have
199 * unsigned long fields with no explict size. We use u64 (aliased
200 * natural_width) instead. Luckily, x86 is little-endian.
201 */
202 natural_width cr0_guest_host_mask;
203 natural_width cr4_guest_host_mask;
204 natural_width cr0_read_shadow;
205 natural_width cr4_read_shadow;
206 natural_width cr3_target_value0;
207 natural_width cr3_target_value1;
208 natural_width cr3_target_value2;
209 natural_width cr3_target_value3;
210 natural_width exit_qualification;
211 natural_width guest_linear_address;
212 natural_width guest_cr0;
213 natural_width guest_cr3;
214 natural_width guest_cr4;
215 natural_width guest_es_base;
216 natural_width guest_cs_base;
217 natural_width guest_ss_base;
218 natural_width guest_ds_base;
219 natural_width guest_fs_base;
220 natural_width guest_gs_base;
221 natural_width guest_ldtr_base;
222 natural_width guest_tr_base;
223 natural_width guest_gdtr_base;
224 natural_width guest_idtr_base;
225 natural_width guest_dr7;
226 natural_width guest_rsp;
227 natural_width guest_rip;
228 natural_width guest_rflags;
229 natural_width guest_pending_dbg_exceptions;
230 natural_width guest_sysenter_esp;
231 natural_width guest_sysenter_eip;
232 natural_width host_cr0;
233 natural_width host_cr3;
234 natural_width host_cr4;
235 natural_width host_fs_base;
236 natural_width host_gs_base;
237 natural_width host_tr_base;
238 natural_width host_gdtr_base;
239 natural_width host_idtr_base;
240 natural_width host_ia32_sysenter_esp;
241 natural_width host_ia32_sysenter_eip;
242 natural_width host_rsp;
243 natural_width host_rip;
244 natural_width paddingl[8]; /* room for future expansion */
245 u32 pin_based_vm_exec_control;
246 u32 cpu_based_vm_exec_control;
247 u32 exception_bitmap;
248 u32 page_fault_error_code_mask;
249 u32 page_fault_error_code_match;
250 u32 cr3_target_count;
251 u32 vm_exit_controls;
252 u32 vm_exit_msr_store_count;
253 u32 vm_exit_msr_load_count;
254 u32 vm_entry_controls;
255 u32 vm_entry_msr_load_count;
256 u32 vm_entry_intr_info_field;
257 u32 vm_entry_exception_error_code;
258 u32 vm_entry_instruction_len;
259 u32 tpr_threshold;
260 u32 secondary_vm_exec_control;
261 u32 vm_instruction_error;
262 u32 vm_exit_reason;
263 u32 vm_exit_intr_info;
264 u32 vm_exit_intr_error_code;
265 u32 idt_vectoring_info_field;
266 u32 idt_vectoring_error_code;
267 u32 vm_exit_instruction_len;
268 u32 vmx_instruction_info;
269 u32 guest_es_limit;
270 u32 guest_cs_limit;
271 u32 guest_ss_limit;
272 u32 guest_ds_limit;
273 u32 guest_fs_limit;
274 u32 guest_gs_limit;
275 u32 guest_ldtr_limit;
276 u32 guest_tr_limit;
277 u32 guest_gdtr_limit;
278 u32 guest_idtr_limit;
279 u32 guest_es_ar_bytes;
280 u32 guest_cs_ar_bytes;
281 u32 guest_ss_ar_bytes;
282 u32 guest_ds_ar_bytes;
283 u32 guest_fs_ar_bytes;
284 u32 guest_gs_ar_bytes;
285 u32 guest_ldtr_ar_bytes;
286 u32 guest_tr_ar_bytes;
287 u32 guest_interruptibility_info;
288 u32 guest_activity_state;
289 u32 guest_sysenter_cs;
290 u32 host_ia32_sysenter_cs;
291 u32 padding32[8]; /* room for future expansion */
292 u16 virtual_processor_id;
293 u16 guest_es_selector;
294 u16 guest_cs_selector;
295 u16 guest_ss_selector;
296 u16 guest_ds_selector;
297 u16 guest_fs_selector;
298 u16 guest_gs_selector;
299 u16 guest_ldtr_selector;
300 u16 guest_tr_selector;
301 u16 host_es_selector;
302 u16 host_cs_selector;
303 u16 host_ss_selector;
304 u16 host_ds_selector;
305 u16 host_fs_selector;
306 u16 host_gs_selector;
307 u16 host_tr_selector;
Nadav Har'Ela9d30f32011-05-25 23:03:55 +0300308};
309
310/*
311 * VMCS12_REVISION is an arbitrary id that should be changed if the content or
312 * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
313 * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
314 */
315#define VMCS12_REVISION 0x11e57ed0
316
317/*
318 * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
319 * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
320 * current implementation, 4K are reserved to avoid future complications.
321 */
322#define VMCS12_SIZE 0x1000
323
Nadav Har'Elff2f6fe2011-05-25 23:05:27 +0300324/* Used to remember the last vmcs02 used for some recently used vmcs12s */
325struct vmcs02_list {
326 struct list_head list;
327 gpa_t vmptr;
328 struct loaded_vmcs vmcs02;
329};
330
Nadav Har'Ela9d30f32011-05-25 23:03:55 +0300331/*
Nadav Har'Elec378ae2011-05-25 23:02:54 +0300332 * The nested_vmx structure is part of vcpu_vmx, and holds information we need
333 * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
334 */
335struct nested_vmx {
336 /* Has the level1 guest done vmxon? */
337 bool vmxon;
Nadav Har'Ela9d30f32011-05-25 23:03:55 +0300338
339 /* The guest-physical address of the current VMCS L1 keeps for L2 */
340 gpa_t current_vmptr;
341 /* The host-usable pointer to the above */
342 struct page *current_vmcs12_page;
343 struct vmcs12 *current_vmcs12;
Nadav Har'Elff2f6fe2011-05-25 23:05:27 +0300344
345 /* vmcs02_list cache of VMCSs recently used to run L2 guests */
346 struct list_head vmcs02_pool;
347 int vmcs02_num;
Nadav Har'Elfe3ef052011-05-25 23:10:02 +0300348 u64 vmcs01_tsc_offset;
349 /*
350 * Guest pages referred to in vmcs02 with host-physical pointers, so
351 * we must keep them pinned while L2 runs.
352 */
353 struct page *apic_access_page;
Nadav Har'Elec378ae2011-05-25 23:02:54 +0300354};
355
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400356struct vcpu_vmx {
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000357 struct kvm_vcpu vcpu;
Avi Kivity313dbd42008-07-17 18:04:30 +0300358 unsigned long host_rsp;
Avi Kivity29bd8a72007-09-10 17:27:03 +0300359 u8 fail;
Avi Kivity69c73022011-03-07 15:26:44 +0200360 u8 cpl;
Avi Kivity9d58b932011-03-07 16:52:07 +0200361 bool nmi_known_unmasked;
Avi Kivity51aa01d2010-07-20 14:31:20 +0300362 u32 exit_intr_info;
Avi Kivity1155f762007-11-22 11:30:47 +0200363 u32 idt_vectoring_info;
Avi Kivity6de12732011-03-07 12:51:22 +0200364 ulong rflags;
Avi Kivity26bb0982009-09-07 11:14:12 +0300365 struct shared_msr_entry *guest_msrs;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400366 int nmsrs;
367 int save_nmsrs;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400368#ifdef CONFIG_X86_64
Avi Kivity44ea2b12009-09-06 15:55:37 +0300369 u64 msr_host_kernel_gs_base;
370 u64 msr_guest_kernel_gs_base;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400371#endif
Nadav Har'Eld462b812011-05-24 15:26:10 +0300372 /*
373 * loaded_vmcs points to the VMCS currently used in this vcpu. For a
374 * non-nested (L1) guest, it always points to vmcs01. For a nested
375 * guest (L2), it points to a different VMCS.
376 */
377 struct loaded_vmcs vmcs01;
378 struct loaded_vmcs *loaded_vmcs;
379 bool __launched; /* temporary, used in vmx_vcpu_run */
Avi Kivity61d2ef22010-04-28 16:40:38 +0300380 struct msr_autoload {
381 unsigned nr;
382 struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
383 struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
384 } msr_autoload;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400385 struct {
386 int loaded;
387 u16 fs_sel, gs_sel, ldt_sel;
Laurent Vivier152d3f22007-08-23 16:33:11 +0200388 int gs_ldt_reload_needed;
389 int fs_reload_needed;
Mike Dayd77c26f2007-10-08 09:02:08 -0400390 } host_state;
Avi Kivity9c8cba32007-11-22 11:42:59 +0200391 struct {
Avi Kivity7ffd92c2009-06-09 14:10:45 +0300392 int vm86_active;
Avi Kivity78ac8b42010-04-08 18:19:35 +0300393 ulong save_rflags;
Avi Kivity7ffd92c2009-06-09 14:10:45 +0300394 struct kvm_save_segment {
395 u16 selector;
396 unsigned long base;
397 u32 limit;
398 u32 ar;
399 } tr, es, ds, fs, gs;
Avi Kivity9c8cba32007-11-22 11:42:59 +0200400 } rmode;
Avi Kivity2fb92db2011-04-27 19:42:18 +0300401 struct {
402 u32 bitmask; /* 4 bits per segment (1 bit per field) */
403 struct kvm_save_segment seg[8];
404 } segment_cache;
Sheng Yang2384d2b2008-01-17 15:14:33 +0800405 int vpid;
Mohammed Gamal04fa4d32008-08-17 16:39:48 +0300406 bool emulation_required;
Jan Kiszka3b86cd92008-09-26 09:30:57 +0200407
408 /* Support for vnmi-less CPUs */
409 int soft_vnmi_blocked;
410 ktime_t entry_time;
411 s64 vnmi_blocked_time;
Andi Kleena0861c02009-06-08 17:37:09 +0800412 u32 exit_reason;
Sheng Yang4e47c7a2009-12-18 16:48:47 +0800413
414 bool rdtscp_enabled;
Nadav Har'Elec378ae2011-05-25 23:02:54 +0300415
416 /* Support for a guest hypervisor (nested VMX) */
417 struct nested_vmx nested;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400418};
419
Avi Kivity2fb92db2011-04-27 19:42:18 +0300420enum segment_cache_field {
421 SEG_FIELD_SEL = 0,
422 SEG_FIELD_BASE = 1,
423 SEG_FIELD_LIMIT = 2,
424 SEG_FIELD_AR = 3,
425
426 SEG_FIELD_NR = 4
427};
428
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400429static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
430{
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000431 return container_of(vcpu, struct vcpu_vmx, vcpu);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400432}
433
Nadav Har'El22bd0352011-05-25 23:05:57 +0300434#define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
435#define FIELD(number, name) [number] = VMCS12_OFFSET(name)
436#define FIELD64(number, name) [number] = VMCS12_OFFSET(name), \
437 [number##_HIGH] = VMCS12_OFFSET(name)+4
438
439static unsigned short vmcs_field_to_offset_table[] = {
440 FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
441 FIELD(GUEST_ES_SELECTOR, guest_es_selector),
442 FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
443 FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
444 FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
445 FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
446 FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
447 FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
448 FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
449 FIELD(HOST_ES_SELECTOR, host_es_selector),
450 FIELD(HOST_CS_SELECTOR, host_cs_selector),
451 FIELD(HOST_SS_SELECTOR, host_ss_selector),
452 FIELD(HOST_DS_SELECTOR, host_ds_selector),
453 FIELD(HOST_FS_SELECTOR, host_fs_selector),
454 FIELD(HOST_GS_SELECTOR, host_gs_selector),
455 FIELD(HOST_TR_SELECTOR, host_tr_selector),
456 FIELD64(IO_BITMAP_A, io_bitmap_a),
457 FIELD64(IO_BITMAP_B, io_bitmap_b),
458 FIELD64(MSR_BITMAP, msr_bitmap),
459 FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
460 FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
461 FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
462 FIELD64(TSC_OFFSET, tsc_offset),
463 FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
464 FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
465 FIELD64(EPT_POINTER, ept_pointer),
466 FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
467 FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
468 FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
469 FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
470 FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
471 FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
472 FIELD64(GUEST_PDPTR0, guest_pdptr0),
473 FIELD64(GUEST_PDPTR1, guest_pdptr1),
474 FIELD64(GUEST_PDPTR2, guest_pdptr2),
475 FIELD64(GUEST_PDPTR3, guest_pdptr3),
476 FIELD64(HOST_IA32_PAT, host_ia32_pat),
477 FIELD64(HOST_IA32_EFER, host_ia32_efer),
478 FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
479 FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
480 FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
481 FIELD(EXCEPTION_BITMAP, exception_bitmap),
482 FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
483 FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
484 FIELD(CR3_TARGET_COUNT, cr3_target_count),
485 FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
486 FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
487 FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
488 FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
489 FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
490 FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
491 FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
492 FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
493 FIELD(TPR_THRESHOLD, tpr_threshold),
494 FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
495 FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
496 FIELD(VM_EXIT_REASON, vm_exit_reason),
497 FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
498 FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
499 FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
500 FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
501 FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
502 FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
503 FIELD(GUEST_ES_LIMIT, guest_es_limit),
504 FIELD(GUEST_CS_LIMIT, guest_cs_limit),
505 FIELD(GUEST_SS_LIMIT, guest_ss_limit),
506 FIELD(GUEST_DS_LIMIT, guest_ds_limit),
507 FIELD(GUEST_FS_LIMIT, guest_fs_limit),
508 FIELD(GUEST_GS_LIMIT, guest_gs_limit),
509 FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
510 FIELD(GUEST_TR_LIMIT, guest_tr_limit),
511 FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
512 FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
513 FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
514 FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
515 FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
516 FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
517 FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
518 FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
519 FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
520 FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
521 FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
522 FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
523 FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
524 FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
525 FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
526 FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
527 FIELD(CR0_READ_SHADOW, cr0_read_shadow),
528 FIELD(CR4_READ_SHADOW, cr4_read_shadow),
529 FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
530 FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
531 FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
532 FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
533 FIELD(EXIT_QUALIFICATION, exit_qualification),
534 FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
535 FIELD(GUEST_CR0, guest_cr0),
536 FIELD(GUEST_CR3, guest_cr3),
537 FIELD(GUEST_CR4, guest_cr4),
538 FIELD(GUEST_ES_BASE, guest_es_base),
539 FIELD(GUEST_CS_BASE, guest_cs_base),
540 FIELD(GUEST_SS_BASE, guest_ss_base),
541 FIELD(GUEST_DS_BASE, guest_ds_base),
542 FIELD(GUEST_FS_BASE, guest_fs_base),
543 FIELD(GUEST_GS_BASE, guest_gs_base),
544 FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
545 FIELD(GUEST_TR_BASE, guest_tr_base),
546 FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
547 FIELD(GUEST_IDTR_BASE, guest_idtr_base),
548 FIELD(GUEST_DR7, guest_dr7),
549 FIELD(GUEST_RSP, guest_rsp),
550 FIELD(GUEST_RIP, guest_rip),
551 FIELD(GUEST_RFLAGS, guest_rflags),
552 FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
553 FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
554 FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
555 FIELD(HOST_CR0, host_cr0),
556 FIELD(HOST_CR3, host_cr3),
557 FIELD(HOST_CR4, host_cr4),
558 FIELD(HOST_FS_BASE, host_fs_base),
559 FIELD(HOST_GS_BASE, host_gs_base),
560 FIELD(HOST_TR_BASE, host_tr_base),
561 FIELD(HOST_GDTR_BASE, host_gdtr_base),
562 FIELD(HOST_IDTR_BASE, host_idtr_base),
563 FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
564 FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
565 FIELD(HOST_RSP, host_rsp),
566 FIELD(HOST_RIP, host_rip),
567};
568static const int max_vmcs_field = ARRAY_SIZE(vmcs_field_to_offset_table);
569
570static inline short vmcs_field_to_offset(unsigned long field)
571{
572 if (field >= max_vmcs_field || vmcs_field_to_offset_table[field] == 0)
573 return -1;
574 return vmcs_field_to_offset_table[field];
575}
576
Nadav Har'Ela9d30f32011-05-25 23:03:55 +0300577static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
578{
579 return to_vmx(vcpu)->nested.current_vmcs12;
580}
581
582static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr)
583{
584 struct page *page = gfn_to_page(vcpu->kvm, addr >> PAGE_SHIFT);
585 if (is_error_page(page)) {
586 kvm_release_page_clean(page);
587 return NULL;
588 }
589 return page;
590}
591
592static void nested_release_page(struct page *page)
593{
594 kvm_release_page_dirty(page);
595}
596
597static void nested_release_page_clean(struct page *page)
598{
599 kvm_release_page_clean(page);
600}
601
Sheng Yang4e1096d2008-07-06 19:16:51 +0800602static u64 construct_eptp(unsigned long root_hpa);
Dongxiao Xu4610c9c2010-05-11 18:29:48 +0800603static void kvm_cpu_vmxon(u64 addr);
604static void kvm_cpu_vmxoff(void);
Avi Kivityaff48ba2010-12-05 18:56:11 +0200605static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
Gleb Natapov776e58e2011-03-13 12:34:27 +0200606static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
Avi Kivity75880a02007-06-20 11:20:04 +0300607
Avi Kivity6aa8b732006-12-10 02:21:36 -0800608static DEFINE_PER_CPU(struct vmcs *, vmxarea);
609static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
Nadav Har'Eld462b812011-05-24 15:26:10 +0300610/*
611 * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
612 * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
613 */
614static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
Avi Kivity3444d7d2010-07-26 18:32:38 +0300615static DEFINE_PER_CPU(struct desc_ptr, host_gdt);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800616
Avi Kivity3e7c73e2009-02-24 21:46:19 +0200617static unsigned long *vmx_io_bitmap_a;
618static unsigned long *vmx_io_bitmap_b;
Avi Kivity58972972009-02-24 22:26:47 +0200619static unsigned long *vmx_msr_bitmap_legacy;
620static unsigned long *vmx_msr_bitmap_longmode;
He, Qingfdef3ad2007-04-30 09:45:24 +0300621
Avi Kivity110312c2010-12-21 12:54:20 +0200622static bool cpu_has_load_ia32_efer;
623
Sheng Yang2384d2b2008-01-17 15:14:33 +0800624static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
625static DEFINE_SPINLOCK(vmx_vpid_lock);
626
Yang, Sheng1c3d14f2007-07-29 11:07:42 +0300627static struct vmcs_config {
Avi Kivity6aa8b732006-12-10 02:21:36 -0800628 int size;
629 int order;
630 u32 revision_id;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +0300631 u32 pin_based_exec_ctrl;
632 u32 cpu_based_exec_ctrl;
Sheng Yangf78e0e22007-10-29 09:40:42 +0800633 u32 cpu_based_2nd_exec_ctrl;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +0300634 u32 vmexit_ctrl;
635 u32 vmentry_ctrl;
636} vmcs_config;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800637
Hannes Ederefff9e52008-11-28 17:02:06 +0100638static struct vmx_capability {
Sheng Yangd56f5462008-04-25 10:13:16 +0800639 u32 ept;
640 u32 vpid;
641} vmx_capability;
642
Avi Kivity6aa8b732006-12-10 02:21:36 -0800643#define VMX_SEGMENT_FIELD(seg) \
644 [VCPU_SREG_##seg] = { \
645 .selector = GUEST_##seg##_SELECTOR, \
646 .base = GUEST_##seg##_BASE, \
647 .limit = GUEST_##seg##_LIMIT, \
648 .ar_bytes = GUEST_##seg##_AR_BYTES, \
649 }
650
651static struct kvm_vmx_segment_field {
652 unsigned selector;
653 unsigned base;
654 unsigned limit;
655 unsigned ar_bytes;
656} kvm_vmx_segment_fields[] = {
657 VMX_SEGMENT_FIELD(CS),
658 VMX_SEGMENT_FIELD(DS),
659 VMX_SEGMENT_FIELD(ES),
660 VMX_SEGMENT_FIELD(FS),
661 VMX_SEGMENT_FIELD(GS),
662 VMX_SEGMENT_FIELD(SS),
663 VMX_SEGMENT_FIELD(TR),
664 VMX_SEGMENT_FIELD(LDTR),
665};
666
Avi Kivity26bb0982009-09-07 11:14:12 +0300667static u64 host_efer;
668
Avi Kivity6de4f3a2009-05-31 22:58:47 +0300669static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
670
Avi Kivity4d56c8a2007-04-19 14:28:44 +0300671/*
Brian Gerst8c065852010-07-17 09:03:26 -0400672 * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
Avi Kivity4d56c8a2007-04-19 14:28:44 +0300673 * away by decrementing the array size.
674 */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800675static const u32 vmx_msr_index[] = {
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800676#ifdef CONFIG_X86_64
Avi Kivity44ea2b12009-09-06 15:55:37 +0300677 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
Avi Kivity6aa8b732006-12-10 02:21:36 -0800678#endif
Brian Gerst8c065852010-07-17 09:03:26 -0400679 MSR_EFER, MSR_TSC_AUX, MSR_STAR,
Avi Kivity6aa8b732006-12-10 02:21:36 -0800680};
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +0200681#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800682
Gui Jianfeng31299942010-03-15 17:29:09 +0800683static inline bool is_page_fault(u32 intr_info)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800684{
685 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
686 INTR_INFO_VALID_MASK)) ==
Jan Kiszka8ab2d2e2008-12-15 13:52:10 +0100687 (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800688}
689
Gui Jianfeng31299942010-03-15 17:29:09 +0800690static inline bool is_no_device(u32 intr_info)
Anthony Liguori2ab455c2007-04-27 09:29:49 +0300691{
692 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
693 INTR_INFO_VALID_MASK)) ==
Jan Kiszka8ab2d2e2008-12-15 13:52:10 +0100694 (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
Anthony Liguori2ab455c2007-04-27 09:29:49 +0300695}
696
Gui Jianfeng31299942010-03-15 17:29:09 +0800697static inline bool is_invalid_opcode(u32 intr_info)
Anthony Liguori7aa81cc2007-09-17 14:57:50 -0500698{
699 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
700 INTR_INFO_VALID_MASK)) ==
Jan Kiszka8ab2d2e2008-12-15 13:52:10 +0100701 (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -0500702}
703
Gui Jianfeng31299942010-03-15 17:29:09 +0800704static inline bool is_external_interrupt(u32 intr_info)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800705{
706 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
707 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
708}
709
Gui Jianfeng31299942010-03-15 17:29:09 +0800710static inline bool is_machine_check(u32 intr_info)
Andi Kleena0861c02009-06-08 17:37:09 +0800711{
712 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
713 INTR_INFO_VALID_MASK)) ==
714 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
715}
716
Gui Jianfeng31299942010-03-15 17:29:09 +0800717static inline bool cpu_has_vmx_msr_bitmap(void)
Sheng Yang25c5f222008-03-28 13:18:56 +0800718{
Sheng Yang04547152009-04-01 15:52:31 +0800719 return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
Sheng Yang25c5f222008-03-28 13:18:56 +0800720}
721
Gui Jianfeng31299942010-03-15 17:29:09 +0800722static inline bool cpu_has_vmx_tpr_shadow(void)
Yang, Sheng6e5d8652007-09-12 18:03:11 +0800723{
Sheng Yang04547152009-04-01 15:52:31 +0800724 return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
Yang, Sheng6e5d8652007-09-12 18:03:11 +0800725}
726
Gui Jianfeng31299942010-03-15 17:29:09 +0800727static inline bool vm_need_tpr_shadow(struct kvm *kvm)
Yang, Sheng6e5d8652007-09-12 18:03:11 +0800728{
Sheng Yang04547152009-04-01 15:52:31 +0800729 return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm));
Yang, Sheng6e5d8652007-09-12 18:03:11 +0800730}
731
Gui Jianfeng31299942010-03-15 17:29:09 +0800732static inline bool cpu_has_secondary_exec_ctrls(void)
Sheng Yangf78e0e22007-10-29 09:40:42 +0800733{
Sheng Yang04547152009-04-01 15:52:31 +0800734 return vmcs_config.cpu_based_exec_ctrl &
735 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
Sheng Yangf78e0e22007-10-29 09:40:42 +0800736}
737
Avi Kivity774ead32007-12-26 13:57:04 +0200738static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
Sheng Yangf78e0e22007-10-29 09:40:42 +0800739{
Sheng Yang04547152009-04-01 15:52:31 +0800740 return vmcs_config.cpu_based_2nd_exec_ctrl &
741 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
742}
743
744static inline bool cpu_has_vmx_flexpriority(void)
745{
746 return cpu_has_vmx_tpr_shadow() &&
747 cpu_has_vmx_virtualize_apic_accesses();
Sheng Yangf78e0e22007-10-29 09:40:42 +0800748}
749
Marcelo Tosattie7997942009-06-11 12:07:40 -0300750static inline bool cpu_has_vmx_ept_execute_only(void)
751{
Gui Jianfeng31299942010-03-15 17:29:09 +0800752 return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
Marcelo Tosattie7997942009-06-11 12:07:40 -0300753}
754
755static inline bool cpu_has_vmx_eptp_uncacheable(void)
756{
Gui Jianfeng31299942010-03-15 17:29:09 +0800757 return vmx_capability.ept & VMX_EPTP_UC_BIT;
Marcelo Tosattie7997942009-06-11 12:07:40 -0300758}
759
760static inline bool cpu_has_vmx_eptp_writeback(void)
761{
Gui Jianfeng31299942010-03-15 17:29:09 +0800762 return vmx_capability.ept & VMX_EPTP_WB_BIT;
Marcelo Tosattie7997942009-06-11 12:07:40 -0300763}
764
765static inline bool cpu_has_vmx_ept_2m_page(void)
766{
Gui Jianfeng31299942010-03-15 17:29:09 +0800767 return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
Marcelo Tosattie7997942009-06-11 12:07:40 -0300768}
769
Sheng Yang878403b2010-01-05 19:02:29 +0800770static inline bool cpu_has_vmx_ept_1g_page(void)
771{
Gui Jianfeng31299942010-03-15 17:29:09 +0800772 return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
Sheng Yang878403b2010-01-05 19:02:29 +0800773}
774
Sheng Yang4bc9b982010-06-02 14:05:24 +0800775static inline bool cpu_has_vmx_ept_4levels(void)
776{
777 return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
778}
779
Gui Jianfeng31299942010-03-15 17:29:09 +0800780static inline bool cpu_has_vmx_invept_individual_addr(void)
Sheng Yangd56f5462008-04-25 10:13:16 +0800781{
Gui Jianfeng31299942010-03-15 17:29:09 +0800782 return vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT;
Sheng Yangd56f5462008-04-25 10:13:16 +0800783}
784
Gui Jianfeng31299942010-03-15 17:29:09 +0800785static inline bool cpu_has_vmx_invept_context(void)
Sheng Yangd56f5462008-04-25 10:13:16 +0800786{
Gui Jianfeng31299942010-03-15 17:29:09 +0800787 return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
Sheng Yangd56f5462008-04-25 10:13:16 +0800788}
789
Gui Jianfeng31299942010-03-15 17:29:09 +0800790static inline bool cpu_has_vmx_invept_global(void)
Sheng Yangd56f5462008-04-25 10:13:16 +0800791{
Gui Jianfeng31299942010-03-15 17:29:09 +0800792 return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
Sheng Yangd56f5462008-04-25 10:13:16 +0800793}
794
Gui Jianfeng518c8ae2010-06-04 08:51:39 +0800795static inline bool cpu_has_vmx_invvpid_single(void)
796{
797 return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
798}
799
Gui Jianfengb9d762f2010-06-07 10:32:29 +0800800static inline bool cpu_has_vmx_invvpid_global(void)
801{
802 return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
803}
804
Gui Jianfeng31299942010-03-15 17:29:09 +0800805static inline bool cpu_has_vmx_ept(void)
Sheng Yangd56f5462008-04-25 10:13:16 +0800806{
Sheng Yang04547152009-04-01 15:52:31 +0800807 return vmcs_config.cpu_based_2nd_exec_ctrl &
808 SECONDARY_EXEC_ENABLE_EPT;
Sheng Yangd56f5462008-04-25 10:13:16 +0800809}
810
Gui Jianfeng31299942010-03-15 17:29:09 +0800811static inline bool cpu_has_vmx_unrestricted_guest(void)
Nitin A Kamble3a624e22009-06-08 11:34:16 -0700812{
813 return vmcs_config.cpu_based_2nd_exec_ctrl &
814 SECONDARY_EXEC_UNRESTRICTED_GUEST;
815}
816
Gui Jianfeng31299942010-03-15 17:29:09 +0800817static inline bool cpu_has_vmx_ple(void)
Zhai, Edwin4b8d54f2009-10-09 18:03:20 +0800818{
819 return vmcs_config.cpu_based_2nd_exec_ctrl &
820 SECONDARY_EXEC_PAUSE_LOOP_EXITING;
821}
822
Gui Jianfeng31299942010-03-15 17:29:09 +0800823static inline bool vm_need_virtualize_apic_accesses(struct kvm *kvm)
Sheng Yangf78e0e22007-10-29 09:40:42 +0800824{
Gui Jianfeng6d3e4352010-01-29 15:36:59 +0800825 return flexpriority_enabled && irqchip_in_kernel(kvm);
Sheng Yangf78e0e22007-10-29 09:40:42 +0800826}
827
Gui Jianfeng31299942010-03-15 17:29:09 +0800828static inline bool cpu_has_vmx_vpid(void)
Sheng Yang2384d2b2008-01-17 15:14:33 +0800829{
Sheng Yang04547152009-04-01 15:52:31 +0800830 return vmcs_config.cpu_based_2nd_exec_ctrl &
831 SECONDARY_EXEC_ENABLE_VPID;
Sheng Yang2384d2b2008-01-17 15:14:33 +0800832}
833
Gui Jianfeng31299942010-03-15 17:29:09 +0800834static inline bool cpu_has_vmx_rdtscp(void)
Sheng Yang4e47c7a2009-12-18 16:48:47 +0800835{
836 return vmcs_config.cpu_based_2nd_exec_ctrl &
837 SECONDARY_EXEC_RDTSCP;
838}
839
Gui Jianfeng31299942010-03-15 17:29:09 +0800840static inline bool cpu_has_virtual_nmis(void)
Sheng Yangf08864b2008-05-15 18:23:25 +0800841{
842 return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
843}
844
Sheng Yangf5f48ee2010-06-30 12:25:15 +0800845static inline bool cpu_has_vmx_wbinvd_exit(void)
846{
847 return vmcs_config.cpu_based_2nd_exec_ctrl &
848 SECONDARY_EXEC_WBINVD_EXITING;
849}
850
Sheng Yang04547152009-04-01 15:52:31 +0800851static inline bool report_flexpriority(void)
852{
853 return flexpriority_enabled;
854}
855
Nadav Har'Elfe3ef052011-05-25 23:10:02 +0300856static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
857{
858 return vmcs12->cpu_based_vm_exec_control & bit;
859}
860
861static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
862{
863 return (vmcs12->cpu_based_vm_exec_control &
864 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
865 (vmcs12->secondary_vm_exec_control & bit);
866}
867
Nadav Har'El7c177932011-05-25 23:12:04 +0300868static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
869 struct vmcs12 *vmcs12,
870 u32 reason, unsigned long qualification);
871
Rusty Russell8b9cf982007-07-30 16:31:43 +1000872static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
Avi Kivity7725f0b2006-12-13 00:34:01 -0800873{
874 int i;
875
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400876 for (i = 0; i < vmx->nmsrs; ++i)
Avi Kivity26bb0982009-09-07 11:14:12 +0300877 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
Eddie Donga75beee2007-05-17 18:55:15 +0300878 return i;
879 return -1;
880}
881
Sheng Yang2384d2b2008-01-17 15:14:33 +0800882static inline void __invvpid(int ext, u16 vpid, gva_t gva)
883{
884 struct {
885 u64 vpid : 16;
886 u64 rsvd : 48;
887 u64 gva;
888 } operand = { vpid, 0, gva };
889
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300890 asm volatile (__ex(ASM_VMX_INVVPID)
Sheng Yang2384d2b2008-01-17 15:14:33 +0800891 /* CF==1 or ZF==1 --> rc = -1 */
892 "; ja 1f ; ud2 ; 1:"
893 : : "a"(&operand), "c"(ext) : "cc", "memory");
894}
895
Sheng Yang14394422008-04-28 12:24:45 +0800896static inline void __invept(int ext, u64 eptp, gpa_t gpa)
897{
898 struct {
899 u64 eptp, gpa;
900 } operand = {eptp, gpa};
901
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300902 asm volatile (__ex(ASM_VMX_INVEPT)
Sheng Yang14394422008-04-28 12:24:45 +0800903 /* CF==1 or ZF==1 --> rc = -1 */
904 "; ja 1f ; ud2 ; 1:\n"
905 : : "a" (&operand), "c" (ext) : "cc", "memory");
906}
907
Avi Kivity26bb0982009-09-07 11:14:12 +0300908static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
Eddie Donga75beee2007-05-17 18:55:15 +0300909{
910 int i;
911
Rusty Russell8b9cf982007-07-30 16:31:43 +1000912 i = __find_msr_index(vmx, msr);
Eddie Donga75beee2007-05-17 18:55:15 +0300913 if (i >= 0)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400914 return &vmx->guest_msrs[i];
Al Viro8b6d44c2007-02-09 16:38:40 +0000915 return NULL;
Avi Kivity7725f0b2006-12-13 00:34:01 -0800916}
917
Avi Kivity6aa8b732006-12-10 02:21:36 -0800918static void vmcs_clear(struct vmcs *vmcs)
919{
920 u64 phys_addr = __pa(vmcs);
921 u8 error;
922
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300923 asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
Avi Kivity16d8f722010-12-21 16:51:50 +0200924 : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800925 : "cc", "memory");
926 if (error)
927 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
928 vmcs, phys_addr);
929}
930
Nadav Har'Eld462b812011-05-24 15:26:10 +0300931static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
932{
933 vmcs_clear(loaded_vmcs->vmcs);
934 loaded_vmcs->cpu = -1;
935 loaded_vmcs->launched = 0;
936}
937
Dongxiao Xu7725b892010-05-11 18:29:38 +0800938static void vmcs_load(struct vmcs *vmcs)
939{
940 u64 phys_addr = __pa(vmcs);
941 u8 error;
942
943 asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
Avi Kivity16d8f722010-12-21 16:51:50 +0200944 : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
Dongxiao Xu7725b892010-05-11 18:29:38 +0800945 : "cc", "memory");
946 if (error)
947 printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
948 vmcs, phys_addr);
949}
950
Nadav Har'Eld462b812011-05-24 15:26:10 +0300951static void __loaded_vmcs_clear(void *arg)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800952{
Nadav Har'Eld462b812011-05-24 15:26:10 +0300953 struct loaded_vmcs *loaded_vmcs = arg;
Ingo Molnard3b2c332007-01-05 16:36:23 -0800954 int cpu = raw_smp_processor_id();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800955
Nadav Har'Eld462b812011-05-24 15:26:10 +0300956 if (loaded_vmcs->cpu != cpu)
957 return; /* vcpu migration can race with cpu offline */
958 if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800959 per_cpu(current_vmcs, cpu) = NULL;
Nadav Har'Eld462b812011-05-24 15:26:10 +0300960 list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
961 loaded_vmcs_init(loaded_vmcs);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800962}
963
Nadav Har'Eld462b812011-05-24 15:26:10 +0300964static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
Avi Kivity8d0be2b2007-02-12 00:54:46 -0800965{
Nadav Har'Eld462b812011-05-24 15:26:10 +0300966 if (loaded_vmcs->cpu != -1)
967 smp_call_function_single(
968 loaded_vmcs->cpu, __loaded_vmcs_clear, loaded_vmcs, 1);
Avi Kivity8d0be2b2007-02-12 00:54:46 -0800969}
970
Gui Jianfeng1760dd42010-06-07 10:33:27 +0800971static inline void vpid_sync_vcpu_single(struct vcpu_vmx *vmx)
Sheng Yang2384d2b2008-01-17 15:14:33 +0800972{
973 if (vmx->vpid == 0)
974 return;
975
Gui Jianfeng518c8ae2010-06-04 08:51:39 +0800976 if (cpu_has_vmx_invvpid_single())
977 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0);
Sheng Yang2384d2b2008-01-17 15:14:33 +0800978}
979
Gui Jianfengb9d762f2010-06-07 10:32:29 +0800980static inline void vpid_sync_vcpu_global(void)
981{
982 if (cpu_has_vmx_invvpid_global())
983 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
984}
985
986static inline void vpid_sync_context(struct vcpu_vmx *vmx)
987{
988 if (cpu_has_vmx_invvpid_single())
Gui Jianfeng1760dd42010-06-07 10:33:27 +0800989 vpid_sync_vcpu_single(vmx);
Gui Jianfengb9d762f2010-06-07 10:32:29 +0800990 else
991 vpid_sync_vcpu_global();
992}
993
Sheng Yang14394422008-04-28 12:24:45 +0800994static inline void ept_sync_global(void)
995{
996 if (cpu_has_vmx_invept_global())
997 __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
998}
999
1000static inline void ept_sync_context(u64 eptp)
1001{
Avi Kivity089d0342009-03-23 18:26:32 +02001002 if (enable_ept) {
Sheng Yang14394422008-04-28 12:24:45 +08001003 if (cpu_has_vmx_invept_context())
1004 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
1005 else
1006 ept_sync_global();
1007 }
1008}
1009
1010static inline void ept_sync_individual_addr(u64 eptp, gpa_t gpa)
1011{
Avi Kivity089d0342009-03-23 18:26:32 +02001012 if (enable_ept) {
Sheng Yang14394422008-04-28 12:24:45 +08001013 if (cpu_has_vmx_invept_individual_addr())
1014 __invept(VMX_EPT_EXTENT_INDIVIDUAL_ADDR,
1015 eptp, gpa);
1016 else
1017 ept_sync_context(eptp);
1018 }
1019}
1020
Avi Kivity96304212011-05-15 10:13:13 -04001021static __always_inline unsigned long vmcs_readl(unsigned long field)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001022{
Avi Kivity5e520e62011-05-15 10:13:12 -04001023 unsigned long value;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001024
Avi Kivity5e520e62011-05-15 10:13:12 -04001025 asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
1026 : "=a"(value) : "d"(field) : "cc");
Avi Kivity6aa8b732006-12-10 02:21:36 -08001027 return value;
1028}
1029
Avi Kivity96304212011-05-15 10:13:13 -04001030static __always_inline u16 vmcs_read16(unsigned long field)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001031{
1032 return vmcs_readl(field);
1033}
1034
Avi Kivity96304212011-05-15 10:13:13 -04001035static __always_inline u32 vmcs_read32(unsigned long field)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001036{
1037 return vmcs_readl(field);
1038}
1039
Avi Kivity96304212011-05-15 10:13:13 -04001040static __always_inline u64 vmcs_read64(unsigned long field)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001041{
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001042#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001043 return vmcs_readl(field);
1044#else
1045 return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
1046#endif
1047}
1048
Avi Kivitye52de1b2007-01-05 16:36:56 -08001049static noinline void vmwrite_error(unsigned long field, unsigned long value)
1050{
1051 printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
1052 field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
1053 dump_stack();
1054}
1055
Avi Kivity6aa8b732006-12-10 02:21:36 -08001056static void vmcs_writel(unsigned long field, unsigned long value)
1057{
1058 u8 error;
1059
Avi Kivity4ecac3f2008-05-13 13:23:38 +03001060 asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
Mike Dayd77c26f2007-10-08 09:02:08 -04001061 : "=q"(error) : "a"(value), "d"(field) : "cc");
Avi Kivitye52de1b2007-01-05 16:36:56 -08001062 if (unlikely(error))
1063 vmwrite_error(field, value);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001064}
1065
1066static void vmcs_write16(unsigned long field, u16 value)
1067{
1068 vmcs_writel(field, value);
1069}
1070
1071static void vmcs_write32(unsigned long field, u32 value)
1072{
1073 vmcs_writel(field, value);
1074}
1075
1076static void vmcs_write64(unsigned long field, u64 value)
1077{
Avi Kivity6aa8b732006-12-10 02:21:36 -08001078 vmcs_writel(field, value);
Avi Kivity7682f2d2008-05-12 19:25:43 +03001079#ifndef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001080 asm volatile ("");
1081 vmcs_writel(field+1, value >> 32);
1082#endif
1083}
1084
Anthony Liguori2ab455c2007-04-27 09:29:49 +03001085static void vmcs_clear_bits(unsigned long field, u32 mask)
1086{
1087 vmcs_writel(field, vmcs_readl(field) & ~mask);
1088}
1089
1090static void vmcs_set_bits(unsigned long field, u32 mask)
1091{
1092 vmcs_writel(field, vmcs_readl(field) | mask);
1093}
1094
Avi Kivity2fb92db2011-04-27 19:42:18 +03001095static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
1096{
1097 vmx->segment_cache.bitmask = 0;
1098}
1099
1100static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
1101 unsigned field)
1102{
1103 bool ret;
1104 u32 mask = 1 << (seg * SEG_FIELD_NR + field);
1105
1106 if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
1107 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
1108 vmx->segment_cache.bitmask = 0;
1109 }
1110 ret = vmx->segment_cache.bitmask & mask;
1111 vmx->segment_cache.bitmask |= mask;
1112 return ret;
1113}
1114
1115static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
1116{
1117 u16 *p = &vmx->segment_cache.seg[seg].selector;
1118
1119 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
1120 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
1121 return *p;
1122}
1123
1124static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
1125{
1126 ulong *p = &vmx->segment_cache.seg[seg].base;
1127
1128 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
1129 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
1130 return *p;
1131}
1132
1133static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
1134{
1135 u32 *p = &vmx->segment_cache.seg[seg].limit;
1136
1137 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
1138 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
1139 return *p;
1140}
1141
1142static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
1143{
1144 u32 *p = &vmx->segment_cache.seg[seg].ar;
1145
1146 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
1147 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
1148 return *p;
1149}
1150
Avi Kivityabd3f2d2007-05-02 17:57:40 +03001151static void update_exception_bitmap(struct kvm_vcpu *vcpu)
1152{
1153 u32 eb;
1154
Jan Kiszkafd7373c2010-01-20 18:20:20 +01001155 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
1156 (1u << NM_VECTOR) | (1u << DB_VECTOR);
1157 if ((vcpu->guest_debug &
1158 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
1159 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
1160 eb |= 1u << BP_VECTOR;
Avi Kivity7ffd92c2009-06-09 14:10:45 +03001161 if (to_vmx(vcpu)->rmode.vm86_active)
Avi Kivityabd3f2d2007-05-02 17:57:40 +03001162 eb = ~0;
Avi Kivity089d0342009-03-23 18:26:32 +02001163 if (enable_ept)
Sheng Yang14394422008-04-28 12:24:45 +08001164 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
Avi Kivity02daab22009-12-30 12:40:26 +02001165 if (vcpu->fpu_active)
1166 eb &= ~(1u << NM_VECTOR);
Avi Kivityabd3f2d2007-05-02 17:57:40 +03001167 vmcs_write32(EXCEPTION_BITMAP, eb);
1168}
1169
Avi Kivity61d2ef22010-04-28 16:40:38 +03001170static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
1171{
1172 unsigned i;
1173 struct msr_autoload *m = &vmx->msr_autoload;
1174
Avi Kivity110312c2010-12-21 12:54:20 +02001175 if (msr == MSR_EFER && cpu_has_load_ia32_efer) {
1176 vmcs_clear_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER);
1177 vmcs_clear_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER);
1178 return;
1179 }
1180
Avi Kivity61d2ef22010-04-28 16:40:38 +03001181 for (i = 0; i < m->nr; ++i)
1182 if (m->guest[i].index == msr)
1183 break;
1184
1185 if (i == m->nr)
1186 return;
1187 --m->nr;
1188 m->guest[i] = m->guest[m->nr];
1189 m->host[i] = m->host[m->nr];
1190 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
1191 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
1192}
1193
1194static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1195 u64 guest_val, u64 host_val)
1196{
1197 unsigned i;
1198 struct msr_autoload *m = &vmx->msr_autoload;
1199
Avi Kivity110312c2010-12-21 12:54:20 +02001200 if (msr == MSR_EFER && cpu_has_load_ia32_efer) {
1201 vmcs_write64(GUEST_IA32_EFER, guest_val);
1202 vmcs_write64(HOST_IA32_EFER, host_val);
1203 vmcs_set_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER);
1204 vmcs_set_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER);
1205 return;
1206 }
1207
Avi Kivity61d2ef22010-04-28 16:40:38 +03001208 for (i = 0; i < m->nr; ++i)
1209 if (m->guest[i].index == msr)
1210 break;
1211
1212 if (i == m->nr) {
1213 ++m->nr;
1214 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
1215 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
1216 }
1217
1218 m->guest[i].index = msr;
1219 m->guest[i].value = guest_val;
1220 m->host[i].index = msr;
1221 m->host[i].value = host_val;
1222}
1223
Avi Kivity33ed6322007-05-02 16:54:03 +03001224static void reload_tss(void)
1225{
Avi Kivity33ed6322007-05-02 16:54:03 +03001226 /*
1227 * VT restores TR but not its size. Useless.
1228 */
Avi Kivityd3591922010-07-26 18:32:39 +03001229 struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
Avi Kivitya5f61302008-02-20 17:57:21 +02001230 struct desc_struct *descs;
Avi Kivity33ed6322007-05-02 16:54:03 +03001231
Avi Kivityd3591922010-07-26 18:32:39 +03001232 descs = (void *)gdt->address;
Avi Kivity33ed6322007-05-02 16:54:03 +03001233 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
1234 load_TR_desc();
Avi Kivity33ed6322007-05-02 16:54:03 +03001235}
1236
Avi Kivity92c0d902009-10-29 11:00:16 +02001237static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
Eddie Dong2cc51562007-05-21 07:28:09 +03001238{
Roel Kluin3a34a882009-08-04 02:08:45 -07001239 u64 guest_efer;
Avi Kivity51c6cf62007-08-29 03:48:05 +03001240 u64 ignore_bits;
Eddie Dong2cc51562007-05-21 07:28:09 +03001241
Avi Kivityf6801df2010-01-21 15:31:50 +02001242 guest_efer = vmx->vcpu.arch.efer;
Roel Kluin3a34a882009-08-04 02:08:45 -07001243
Avi Kivity51c6cf62007-08-29 03:48:05 +03001244 /*
1245 * NX is emulated; LMA and LME handled by hardware; SCE meaninless
1246 * outside long mode
1247 */
1248 ignore_bits = EFER_NX | EFER_SCE;
1249#ifdef CONFIG_X86_64
1250 ignore_bits |= EFER_LMA | EFER_LME;
1251 /* SCE is meaningful only in long mode on Intel */
1252 if (guest_efer & EFER_LMA)
1253 ignore_bits &= ~(u64)EFER_SCE;
1254#endif
Avi Kivity51c6cf62007-08-29 03:48:05 +03001255 guest_efer &= ~ignore_bits;
1256 guest_efer |= host_efer & ignore_bits;
Avi Kivity26bb0982009-09-07 11:14:12 +03001257 vmx->guest_msrs[efer_offset].data = guest_efer;
Avi Kivityd5696722009-12-02 12:28:47 +02001258 vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
Avi Kivity84ad33e2010-04-28 16:42:29 +03001259
1260 clear_atomic_switch_msr(vmx, MSR_EFER);
1261 /* On ept, can't emulate nx, and must switch nx atomically */
1262 if (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX)) {
1263 guest_efer = vmx->vcpu.arch.efer;
1264 if (!(guest_efer & EFER_LMA))
1265 guest_efer &= ~EFER_LME;
1266 add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, host_efer);
1267 return false;
1268 }
1269
Avi Kivity26bb0982009-09-07 11:14:12 +03001270 return true;
Avi Kivity51c6cf62007-08-29 03:48:05 +03001271}
1272
Gleb Natapov2d49ec72010-02-25 12:43:09 +02001273static unsigned long segment_base(u16 selector)
1274{
Avi Kivityd3591922010-07-26 18:32:39 +03001275 struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
Gleb Natapov2d49ec72010-02-25 12:43:09 +02001276 struct desc_struct *d;
1277 unsigned long table_base;
1278 unsigned long v;
1279
1280 if (!(selector & ~3))
1281 return 0;
1282
Avi Kivityd3591922010-07-26 18:32:39 +03001283 table_base = gdt->address;
Gleb Natapov2d49ec72010-02-25 12:43:09 +02001284
1285 if (selector & 4) { /* from ldt */
1286 u16 ldt_selector = kvm_read_ldt();
1287
1288 if (!(ldt_selector & ~3))
1289 return 0;
1290
1291 table_base = segment_base(ldt_selector);
1292 }
1293 d = (struct desc_struct *)(table_base + (selector & ~7));
1294 v = get_desc_base(d);
1295#ifdef CONFIG_X86_64
1296 if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
1297 v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
1298#endif
1299 return v;
1300}
1301
1302static inline unsigned long kvm_read_tr_base(void)
1303{
1304 u16 tr;
1305 asm("str %0" : "=g"(tr));
1306 return segment_base(tr);
1307}
1308
Avi Kivity04d2cc72007-09-10 18:10:54 +03001309static void vmx_save_host_state(struct kvm_vcpu *vcpu)
Avi Kivity33ed6322007-05-02 16:54:03 +03001310{
Avi Kivity04d2cc72007-09-10 18:10:54 +03001311 struct vcpu_vmx *vmx = to_vmx(vcpu);
Avi Kivity26bb0982009-09-07 11:14:12 +03001312 int i;
Avi Kivity04d2cc72007-09-10 18:10:54 +03001313
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001314 if (vmx->host_state.loaded)
Avi Kivity33ed6322007-05-02 16:54:03 +03001315 return;
1316
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001317 vmx->host_state.loaded = 1;
Avi Kivity33ed6322007-05-02 16:54:03 +03001318 /*
1319 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
1320 * allow segment selectors with cpl > 0 or ti == 1.
1321 */
Avi Kivityd6e88ae2008-07-10 16:53:33 +03001322 vmx->host_state.ldt_sel = kvm_read_ldt();
Laurent Vivier152d3f22007-08-23 16:33:11 +02001323 vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
Avi Kivity9581d442010-10-19 16:46:55 +02001324 savesegment(fs, vmx->host_state.fs_sel);
Laurent Vivier152d3f22007-08-23 16:33:11 +02001325 if (!(vmx->host_state.fs_sel & 7)) {
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001326 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
Laurent Vivier152d3f22007-08-23 16:33:11 +02001327 vmx->host_state.fs_reload_needed = 0;
1328 } else {
Avi Kivity33ed6322007-05-02 16:54:03 +03001329 vmcs_write16(HOST_FS_SELECTOR, 0);
Laurent Vivier152d3f22007-08-23 16:33:11 +02001330 vmx->host_state.fs_reload_needed = 1;
Avi Kivity33ed6322007-05-02 16:54:03 +03001331 }
Avi Kivity9581d442010-10-19 16:46:55 +02001332 savesegment(gs, vmx->host_state.gs_sel);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001333 if (!(vmx->host_state.gs_sel & 7))
1334 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
Avi Kivity33ed6322007-05-02 16:54:03 +03001335 else {
1336 vmcs_write16(HOST_GS_SELECTOR, 0);
Laurent Vivier152d3f22007-08-23 16:33:11 +02001337 vmx->host_state.gs_ldt_reload_needed = 1;
Avi Kivity33ed6322007-05-02 16:54:03 +03001338 }
1339
1340#ifdef CONFIG_X86_64
1341 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
1342 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
1343#else
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001344 vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
1345 vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
Avi Kivity33ed6322007-05-02 16:54:03 +03001346#endif
Avi Kivity707c0872007-05-02 17:33:43 +03001347
1348#ifdef CONFIG_X86_64
Avi Kivityc8770e72010-11-11 12:37:26 +02001349 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
1350 if (is_long_mode(&vmx->vcpu))
Avi Kivity44ea2b12009-09-06 15:55:37 +03001351 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
Avi Kivity707c0872007-05-02 17:33:43 +03001352#endif
Avi Kivity26bb0982009-09-07 11:14:12 +03001353 for (i = 0; i < vmx->save_nmsrs; ++i)
1354 kvm_set_shared_msr(vmx->guest_msrs[i].index,
Avi Kivityd5696722009-12-02 12:28:47 +02001355 vmx->guest_msrs[i].data,
1356 vmx->guest_msrs[i].mask);
Avi Kivity33ed6322007-05-02 16:54:03 +03001357}
1358
Avi Kivitya9b21b62008-06-24 11:48:49 +03001359static void __vmx_load_host_state(struct vcpu_vmx *vmx)
Avi Kivity33ed6322007-05-02 16:54:03 +03001360{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001361 if (!vmx->host_state.loaded)
Avi Kivity33ed6322007-05-02 16:54:03 +03001362 return;
1363
Avi Kivitye1beb1d2007-11-18 13:50:24 +02001364 ++vmx->vcpu.stat.host_state_reload;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001365 vmx->host_state.loaded = 0;
Avi Kivityc8770e72010-11-11 12:37:26 +02001366#ifdef CONFIG_X86_64
1367 if (is_long_mode(&vmx->vcpu))
1368 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1369#endif
Laurent Vivier152d3f22007-08-23 16:33:11 +02001370 if (vmx->host_state.gs_ldt_reload_needed) {
Avi Kivityd6e88ae2008-07-10 16:53:33 +03001371 kvm_load_ldt(vmx->host_state.ldt_sel);
Avi Kivity33ed6322007-05-02 16:54:03 +03001372#ifdef CONFIG_X86_64
Avi Kivity9581d442010-10-19 16:46:55 +02001373 load_gs_index(vmx->host_state.gs_sel);
Avi Kivity9581d442010-10-19 16:46:55 +02001374#else
1375 loadsegment(gs, vmx->host_state.gs_sel);
Avi Kivity33ed6322007-05-02 16:54:03 +03001376#endif
Avi Kivity33ed6322007-05-02 16:54:03 +03001377 }
Avi Kivity0a77fe42010-10-19 18:48:35 +02001378 if (vmx->host_state.fs_reload_needed)
1379 loadsegment(fs, vmx->host_state.fs_sel);
Laurent Vivier152d3f22007-08-23 16:33:11 +02001380 reload_tss();
Avi Kivity44ea2b12009-09-06 15:55:37 +03001381#ifdef CONFIG_X86_64
Avi Kivityc8770e72010-11-11 12:37:26 +02001382 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
Avi Kivity44ea2b12009-09-06 15:55:37 +03001383#endif
Avi Kivity1c11e712010-05-03 16:05:44 +03001384 if (current_thread_info()->status & TS_USEDFPU)
1385 clts();
Avi Kivity3444d7d2010-07-26 18:32:38 +03001386 load_gdt(&__get_cpu_var(host_gdt));
Avi Kivity33ed6322007-05-02 16:54:03 +03001387}
1388
Avi Kivitya9b21b62008-06-24 11:48:49 +03001389static void vmx_load_host_state(struct vcpu_vmx *vmx)
1390{
1391 preempt_disable();
1392 __vmx_load_host_state(vmx);
1393 preempt_enable();
1394}
1395
Avi Kivity6aa8b732006-12-10 02:21:36 -08001396/*
1397 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
1398 * vcpu mutex is already taken.
1399 */
Avi Kivity15ad7142007-07-11 18:17:21 +03001400static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001401{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001402 struct vcpu_vmx *vmx = to_vmx(vcpu);
Dongxiao Xu4610c9c2010-05-11 18:29:48 +08001403 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001404
Dongxiao Xu4610c9c2010-05-11 18:29:48 +08001405 if (!vmm_exclusive)
1406 kvm_cpu_vmxon(phys_addr);
Nadav Har'Eld462b812011-05-24 15:26:10 +03001407 else if (vmx->loaded_vmcs->cpu != cpu)
1408 loaded_vmcs_clear(vmx->loaded_vmcs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001409
Nadav Har'Eld462b812011-05-24 15:26:10 +03001410 if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
1411 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
1412 vmcs_load(vmx->loaded_vmcs->vmcs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001413 }
1414
Nadav Har'Eld462b812011-05-24 15:26:10 +03001415 if (vmx->loaded_vmcs->cpu != cpu) {
Avi Kivityd3591922010-07-26 18:32:39 +03001416 struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001417 unsigned long sysenter_esp;
1418
Avi Kivitya8eeb042010-05-10 12:34:53 +03001419 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
Dongxiao Xu92fe13b2010-05-11 18:29:42 +08001420 local_irq_disable();
Nadav Har'Eld462b812011-05-24 15:26:10 +03001421 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
1422 &per_cpu(loaded_vmcss_on_cpu, cpu));
Dongxiao Xu92fe13b2010-05-11 18:29:42 +08001423 local_irq_enable();
1424
Avi Kivity6aa8b732006-12-10 02:21:36 -08001425 /*
1426 * Linux uses per-cpu TSS and GDT, so set these when switching
1427 * processors.
1428 */
Avi Kivityd6e88ae2008-07-10 16:53:33 +03001429 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
Avi Kivityd3591922010-07-26 18:32:39 +03001430 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
Avi Kivity6aa8b732006-12-10 02:21:36 -08001431
1432 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
1433 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
Nadav Har'Eld462b812011-05-24 15:26:10 +03001434 vmx->loaded_vmcs->cpu = cpu;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001435 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001436}
1437
1438static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
1439{
Avi Kivitya9b21b62008-06-24 11:48:49 +03001440 __vmx_load_host_state(to_vmx(vcpu));
Dongxiao Xu4610c9c2010-05-11 18:29:48 +08001441 if (!vmm_exclusive) {
Nadav Har'Eld462b812011-05-24 15:26:10 +03001442 __loaded_vmcs_clear(to_vmx(vcpu)->loaded_vmcs);
1443 vcpu->cpu = -1;
Dongxiao Xu4610c9c2010-05-11 18:29:48 +08001444 kvm_cpu_vmxoff();
1445 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001446}
1447
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001448static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
1449{
Avi Kivity81231c62010-01-24 16:26:40 +02001450 ulong cr0;
1451
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001452 if (vcpu->fpu_active)
1453 return;
1454 vcpu->fpu_active = 1;
Avi Kivity81231c62010-01-24 16:26:40 +02001455 cr0 = vmcs_readl(GUEST_CR0);
1456 cr0 &= ~(X86_CR0_TS | X86_CR0_MP);
1457 cr0 |= kvm_read_cr0_bits(vcpu, X86_CR0_TS | X86_CR0_MP);
1458 vmcs_writel(GUEST_CR0, cr0);
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001459 update_exception_bitmap(vcpu);
Avi Kivityedcafe32009-12-30 18:07:40 +02001460 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
1461 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001462}
1463
Avi Kivityedcafe32009-12-30 18:07:40 +02001464static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
1465
Nadav Har'Elfe3ef052011-05-25 23:10:02 +03001466/*
1467 * Return the cr0 value that a nested guest would read. This is a combination
1468 * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
1469 * its hypervisor (cr0_read_shadow).
1470 */
1471static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
1472{
1473 return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
1474 (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
1475}
1476static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
1477{
1478 return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
1479 (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
1480}
1481
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001482static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
1483{
Avi Kivityedcafe32009-12-30 18:07:40 +02001484 vmx_decache_cr0_guest_bits(vcpu);
Avi Kivity81231c62010-01-24 16:26:40 +02001485 vmcs_set_bits(GUEST_CR0, X86_CR0_TS | X86_CR0_MP);
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001486 update_exception_bitmap(vcpu);
Avi Kivityedcafe32009-12-30 18:07:40 +02001487 vcpu->arch.cr0_guest_owned_bits = 0;
1488 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
1489 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
Avi Kivity5fd86fc2007-05-02 20:40:00 +03001490}
1491
Avi Kivity6aa8b732006-12-10 02:21:36 -08001492static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
1493{
Avi Kivity78ac8b42010-04-08 18:19:35 +03001494 unsigned long rflags, save_rflags;
Avi Kivity345dcaa2009-08-12 15:29:37 +03001495
Avi Kivity6de12732011-03-07 12:51:22 +02001496 if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
1497 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
1498 rflags = vmcs_readl(GUEST_RFLAGS);
1499 if (to_vmx(vcpu)->rmode.vm86_active) {
1500 rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
1501 save_rflags = to_vmx(vcpu)->rmode.save_rflags;
1502 rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
1503 }
1504 to_vmx(vcpu)->rflags = rflags;
Avi Kivity78ac8b42010-04-08 18:19:35 +03001505 }
Avi Kivity6de12732011-03-07 12:51:22 +02001506 return to_vmx(vcpu)->rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001507}
1508
1509static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1510{
Avi Kivity6de12732011-03-07 12:51:22 +02001511 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
Avi Kivity69c73022011-03-07 15:26:44 +02001512 __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
Avi Kivity6de12732011-03-07 12:51:22 +02001513 to_vmx(vcpu)->rflags = rflags;
Avi Kivity78ac8b42010-04-08 18:19:35 +03001514 if (to_vmx(vcpu)->rmode.vm86_active) {
1515 to_vmx(vcpu)->rmode.save_rflags = rflags;
Glauber de Oliveira Costa053de042008-01-30 13:31:27 +01001516 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
Avi Kivity78ac8b42010-04-08 18:19:35 +03001517 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001518 vmcs_writel(GUEST_RFLAGS, rflags);
1519}
1520
Glauber Costa2809f5d2009-05-12 16:21:05 -04001521static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
1522{
1523 u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1524 int ret = 0;
1525
1526 if (interruptibility & GUEST_INTR_STATE_STI)
Jan Kiszka48005f62010-02-19 19:38:07 +01001527 ret |= KVM_X86_SHADOW_INT_STI;
Glauber Costa2809f5d2009-05-12 16:21:05 -04001528 if (interruptibility & GUEST_INTR_STATE_MOV_SS)
Jan Kiszka48005f62010-02-19 19:38:07 +01001529 ret |= KVM_X86_SHADOW_INT_MOV_SS;
Glauber Costa2809f5d2009-05-12 16:21:05 -04001530
1531 return ret & mask;
1532}
1533
1534static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
1535{
1536 u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1537 u32 interruptibility = interruptibility_old;
1538
1539 interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
1540
Jan Kiszka48005f62010-02-19 19:38:07 +01001541 if (mask & KVM_X86_SHADOW_INT_MOV_SS)
Glauber Costa2809f5d2009-05-12 16:21:05 -04001542 interruptibility |= GUEST_INTR_STATE_MOV_SS;
Jan Kiszka48005f62010-02-19 19:38:07 +01001543 else if (mask & KVM_X86_SHADOW_INT_STI)
Glauber Costa2809f5d2009-05-12 16:21:05 -04001544 interruptibility |= GUEST_INTR_STATE_STI;
1545
1546 if ((interruptibility != interruptibility_old))
1547 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
1548}
1549
Avi Kivity6aa8b732006-12-10 02:21:36 -08001550static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
1551{
1552 unsigned long rip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001553
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001554 rip = kvm_rip_read(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001555 rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001556 kvm_rip_write(vcpu, rip);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001557
Glauber Costa2809f5d2009-05-12 16:21:05 -04001558 /* skipping an emulated instruction also counts */
1559 vmx_set_interrupt_shadow(vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001560}
1561
Anthony Liguori443381a2010-12-06 10:53:38 -06001562static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
1563{
1564 /* Ensure that we clear the HLT state in the VMCS. We don't need to
1565 * explicitly skip the instruction because if the HLT state is set, then
1566 * the instruction is already executing and RIP has already been
1567 * advanced. */
1568 if (!yield_on_hlt &&
1569 vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT)
1570 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
1571}
1572
Avi Kivity298101d2007-11-25 13:41:11 +02001573static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
Joerg Roedelce7ddec2010-04-22 12:33:13 +02001574 bool has_error_code, u32 error_code,
1575 bool reinject)
Avi Kivity298101d2007-11-25 13:41:11 +02001576{
Jan Kiszka77ab6db2008-07-14 12:28:51 +02001577 struct vcpu_vmx *vmx = to_vmx(vcpu);
Jan Kiszka8ab2d2e2008-12-15 13:52:10 +01001578 u32 intr_info = nr | INTR_INFO_VALID_MASK;
Jan Kiszka77ab6db2008-07-14 12:28:51 +02001579
Jan Kiszka8ab2d2e2008-12-15 13:52:10 +01001580 if (has_error_code) {
Jan Kiszka77ab6db2008-07-14 12:28:51 +02001581 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
Jan Kiszka8ab2d2e2008-12-15 13:52:10 +01001582 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
1583 }
Jan Kiszka77ab6db2008-07-14 12:28:51 +02001584
Avi Kivity7ffd92c2009-06-09 14:10:45 +03001585 if (vmx->rmode.vm86_active) {
Serge E. Hallyn71f98332011-04-13 09:12:54 -05001586 int inc_eip = 0;
1587 if (kvm_exception_is_soft(nr))
1588 inc_eip = vcpu->arch.event_exit_inst_len;
1589 if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE)
Mohammed Gamala92601b2010-09-19 14:34:07 +02001590 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
Jan Kiszka77ab6db2008-07-14 12:28:51 +02001591 return;
1592 }
1593
Gleb Natapov66fd3f72009-05-11 13:35:50 +03001594 if (kvm_exception_is_soft(nr)) {
1595 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
1596 vmx->vcpu.arch.event_exit_inst_len);
Jan Kiszka8ab2d2e2008-12-15 13:52:10 +01001597 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
1598 } else
1599 intr_info |= INTR_TYPE_HARD_EXCEPTION;
1600
1601 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
Anthony Liguori443381a2010-12-06 10:53:38 -06001602 vmx_clear_hlt(vcpu);
Avi Kivity298101d2007-11-25 13:41:11 +02001603}
1604
Sheng Yang4e47c7a2009-12-18 16:48:47 +08001605static bool vmx_rdtscp_supported(void)
1606{
1607 return cpu_has_vmx_rdtscp();
1608}
1609
Avi Kivity6aa8b732006-12-10 02:21:36 -08001610/*
Eddie Donga75beee2007-05-17 18:55:15 +03001611 * Swap MSR entry in host/guest MSR entry array.
1612 */
Rusty Russell8b9cf982007-07-30 16:31:43 +10001613static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
Eddie Donga75beee2007-05-17 18:55:15 +03001614{
Avi Kivity26bb0982009-09-07 11:14:12 +03001615 struct shared_msr_entry tmp;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001616
1617 tmp = vmx->guest_msrs[to];
1618 vmx->guest_msrs[to] = vmx->guest_msrs[from];
1619 vmx->guest_msrs[from] = tmp;
Eddie Donga75beee2007-05-17 18:55:15 +03001620}
1621
1622/*
Avi Kivitye38aea32007-04-19 13:22:48 +03001623 * Set up the vmcs to automatically save and restore system
1624 * msrs. Don't touch the 64-bit msrs if the guest is in legacy
1625 * mode, as fiddling with msrs is very expensive.
1626 */
Rusty Russell8b9cf982007-07-30 16:31:43 +10001627static void setup_msrs(struct vcpu_vmx *vmx)
Avi Kivitye38aea32007-04-19 13:22:48 +03001628{
Avi Kivity26bb0982009-09-07 11:14:12 +03001629 int save_nmsrs, index;
Avi Kivity58972972009-02-24 22:26:47 +02001630 unsigned long *msr_bitmap;
Avi Kivitye38aea32007-04-19 13:22:48 +03001631
Avi Kivity33f9c502008-02-27 16:06:57 +02001632 vmx_load_host_state(vmx);
Eddie Donga75beee2007-05-17 18:55:15 +03001633 save_nmsrs = 0;
Avi Kivity4d56c8a2007-04-19 14:28:44 +03001634#ifdef CONFIG_X86_64
Rusty Russell8b9cf982007-07-30 16:31:43 +10001635 if (is_long_mode(&vmx->vcpu)) {
Rusty Russell8b9cf982007-07-30 16:31:43 +10001636 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
Eddie Donga75beee2007-05-17 18:55:15 +03001637 if (index >= 0)
Rusty Russell8b9cf982007-07-30 16:31:43 +10001638 move_msr_up(vmx, index, save_nmsrs++);
1639 index = __find_msr_index(vmx, MSR_LSTAR);
Eddie Donga75beee2007-05-17 18:55:15 +03001640 if (index >= 0)
Rusty Russell8b9cf982007-07-30 16:31:43 +10001641 move_msr_up(vmx, index, save_nmsrs++);
1642 index = __find_msr_index(vmx, MSR_CSTAR);
Eddie Donga75beee2007-05-17 18:55:15 +03001643 if (index >= 0)
Rusty Russell8b9cf982007-07-30 16:31:43 +10001644 move_msr_up(vmx, index, save_nmsrs++);
Sheng Yang4e47c7a2009-12-18 16:48:47 +08001645 index = __find_msr_index(vmx, MSR_TSC_AUX);
1646 if (index >= 0 && vmx->rdtscp_enabled)
1647 move_msr_up(vmx, index, save_nmsrs++);
Eddie Donga75beee2007-05-17 18:55:15 +03001648 /*
Brian Gerst8c065852010-07-17 09:03:26 -04001649 * MSR_STAR is only needed on long mode guests, and only
Eddie Donga75beee2007-05-17 18:55:15 +03001650 * if efer.sce is enabled.
1651 */
Brian Gerst8c065852010-07-17 09:03:26 -04001652 index = __find_msr_index(vmx, MSR_STAR);
Avi Kivityf6801df2010-01-21 15:31:50 +02001653 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
Rusty Russell8b9cf982007-07-30 16:31:43 +10001654 move_msr_up(vmx, index, save_nmsrs++);
Avi Kivity4d56c8a2007-04-19 14:28:44 +03001655 }
Eddie Donga75beee2007-05-17 18:55:15 +03001656#endif
Avi Kivity92c0d902009-10-29 11:00:16 +02001657 index = __find_msr_index(vmx, MSR_EFER);
1658 if (index >= 0 && update_transition_efer(vmx, index))
Avi Kivity26bb0982009-09-07 11:14:12 +03001659 move_msr_up(vmx, index, save_nmsrs++);
Avi Kivity4d56c8a2007-04-19 14:28:44 +03001660
Avi Kivity26bb0982009-09-07 11:14:12 +03001661 vmx->save_nmsrs = save_nmsrs;
Avi Kivity58972972009-02-24 22:26:47 +02001662
1663 if (cpu_has_vmx_msr_bitmap()) {
1664 if (is_long_mode(&vmx->vcpu))
1665 msr_bitmap = vmx_msr_bitmap_longmode;
1666 else
1667 msr_bitmap = vmx_msr_bitmap_legacy;
1668
1669 vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
1670 }
Avi Kivitye38aea32007-04-19 13:22:48 +03001671}
1672
1673/*
Avi Kivity6aa8b732006-12-10 02:21:36 -08001674 * reads and returns guest's timestamp counter "register"
1675 * guest_tsc = host_tsc + tsc_offset -- 21.3
1676 */
1677static u64 guest_read_tsc(void)
1678{
1679 u64 host_tsc, tsc_offset;
1680
1681 rdtscll(host_tsc);
1682 tsc_offset = vmcs_read64(TSC_OFFSET);
1683 return host_tsc + tsc_offset;
1684}
1685
1686/*
Joerg Roedel4051b182011-03-25 09:44:49 +01001687 * Empty call-back. Needs to be implemented when VMX enables the SET_TSC_KHZ
1688 * ioctl. In this case the call-back should update internal vmx state to make
1689 * the changes effective.
1690 */
1691static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
1692{
1693 /* Nothing to do here */
1694}
1695
1696/*
Zachary Amsden99e3e302010-08-19 22:07:17 -10001697 * writes 'offset' into guest's timestamp counter offset register
Avi Kivity6aa8b732006-12-10 02:21:36 -08001698 */
Zachary Amsden99e3e302010-08-19 22:07:17 -10001699static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001700{
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10001701 vmcs_write64(TSC_OFFSET, offset);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001702}
1703
Zachary Amsdene48672f2010-08-19 22:07:23 -10001704static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
1705{
1706 u64 offset = vmcs_read64(TSC_OFFSET);
1707 vmcs_write64(TSC_OFFSET, offset + adjustment);
1708}
1709
Joerg Roedel857e4092011-03-25 09:44:50 +01001710static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
1711{
1712 return target_tsc - native_read_tsc();
1713}
1714
Nadav Har'El801d3422011-05-25 23:02:23 +03001715static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
1716{
1717 struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0);
1718 return best && (best->ecx & (1 << (X86_FEATURE_VMX & 31)));
1719}
1720
1721/*
1722 * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
1723 * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
1724 * all guests if the "nested" module option is off, and can also be disabled
1725 * for a single guest by disabling its VMX cpuid bit.
1726 */
1727static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
1728{
1729 return nested && guest_cpuid_has_vmx(vcpu);
1730}
1731
Avi Kivity6aa8b732006-12-10 02:21:36 -08001732/*
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03001733 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
1734 * returned for the various VMX controls MSRs when nested VMX is enabled.
1735 * The same values should also be used to verify that vmcs12 control fields are
1736 * valid during nested entry from L1 to L2.
1737 * Each of these control msrs has a low and high 32-bit half: A low bit is on
1738 * if the corresponding bit in the (32-bit) control field *must* be on, and a
1739 * bit in the high half is on if the corresponding bit in the control field
1740 * may be on. See also vmx_control_verify().
1741 * TODO: allow these variables to be modified (downgraded) by module options
1742 * or other means.
1743 */
1744static u32 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high;
1745static u32 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high;
1746static u32 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high;
1747static u32 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high;
1748static u32 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high;
1749static __init void nested_vmx_setup_ctls_msrs(void)
1750{
1751 /*
1752 * Note that as a general rule, the high half of the MSRs (bits in
1753 * the control fields which may be 1) should be initialized by the
1754 * intersection of the underlying hardware's MSR (i.e., features which
1755 * can be supported) and the list of features we want to expose -
1756 * because they are known to be properly supported in our code.
1757 * Also, usually, the low half of the MSRs (bits which must be 1) can
1758 * be set to 0, meaning that L1 may turn off any of these bits. The
1759 * reason is that if one of these bits is necessary, it will appear
1760 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
1761 * fields of vmcs01 and vmcs02, will turn these bits off - and
1762 * nested_vmx_exit_handled() will not pass related exits to L1.
1763 * These rules have exceptions below.
1764 */
1765
1766 /* pin-based controls */
1767 /*
1768 * According to the Intel spec, if bit 55 of VMX_BASIC is off (as it is
1769 * in our case), bits 1, 2 and 4 (i.e., 0x16) must be 1 in this MSR.
1770 */
1771 nested_vmx_pinbased_ctls_low = 0x16 ;
1772 nested_vmx_pinbased_ctls_high = 0x16 |
1773 PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING |
1774 PIN_BASED_VIRTUAL_NMIS;
1775
1776 /* exit controls */
1777 nested_vmx_exit_ctls_low = 0;
1778#ifdef CONFIG_X86_64
1779 nested_vmx_exit_ctls_high = VM_EXIT_HOST_ADDR_SPACE_SIZE;
1780#else
1781 nested_vmx_exit_ctls_high = 0;
1782#endif
1783
1784 /* entry controls */
1785 rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
1786 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high);
1787 nested_vmx_entry_ctls_low = 0;
1788 nested_vmx_entry_ctls_high &=
1789 VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_IA32E_MODE;
1790
1791 /* cpu-based controls */
1792 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
1793 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high);
1794 nested_vmx_procbased_ctls_low = 0;
1795 nested_vmx_procbased_ctls_high &=
1796 CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_USE_TSC_OFFSETING |
1797 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
1798 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
1799 CPU_BASED_CR3_STORE_EXITING |
1800#ifdef CONFIG_X86_64
1801 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
1802#endif
1803 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
1804 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING |
1805 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
1806 /*
1807 * We can allow some features even when not supported by the
1808 * hardware. For example, L1 can specify an MSR bitmap - and we
1809 * can use it to avoid exits to L1 - even when L0 runs L2
1810 * without MSR bitmaps.
1811 */
1812 nested_vmx_procbased_ctls_high |= CPU_BASED_USE_MSR_BITMAPS;
1813
1814 /* secondary cpu-based controls */
1815 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
1816 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high);
1817 nested_vmx_secondary_ctls_low = 0;
1818 nested_vmx_secondary_ctls_high &=
1819 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
1820}
1821
1822static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
1823{
1824 /*
1825 * Bits 0 in high must be 0, and bits 1 in low must be 1.
1826 */
1827 return ((control & high) | low) == control;
1828}
1829
1830static inline u64 vmx_control_msr(u32 low, u32 high)
1831{
1832 return low | ((u64)high << 32);
1833}
1834
1835/*
1836 * If we allow our guest to use VMX instructions (i.e., nested VMX), we should
1837 * also let it use VMX-specific MSRs.
1838 * vmx_get_vmx_msr() and vmx_set_vmx_msr() return 1 when we handled a
1839 * VMX-specific MSR, or 0 when we haven't (and the caller should handle it
1840 * like all other MSRs).
1841 */
1842static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1843{
1844 if (!nested_vmx_allowed(vcpu) && msr_index >= MSR_IA32_VMX_BASIC &&
1845 msr_index <= MSR_IA32_VMX_TRUE_ENTRY_CTLS) {
1846 /*
1847 * According to the spec, processors which do not support VMX
1848 * should throw a #GP(0) when VMX capability MSRs are read.
1849 */
1850 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
1851 return 1;
1852 }
1853
1854 switch (msr_index) {
1855 case MSR_IA32_FEATURE_CONTROL:
1856 *pdata = 0;
1857 break;
1858 case MSR_IA32_VMX_BASIC:
1859 /*
1860 * This MSR reports some information about VMX support. We
1861 * should return information about the VMX we emulate for the
1862 * guest, and the VMCS structure we give it - not about the
1863 * VMX support of the underlying hardware.
1864 */
1865 *pdata = VMCS12_REVISION |
1866 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
1867 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
1868 break;
1869 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1870 case MSR_IA32_VMX_PINBASED_CTLS:
1871 *pdata = vmx_control_msr(nested_vmx_pinbased_ctls_low,
1872 nested_vmx_pinbased_ctls_high);
1873 break;
1874 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1875 case MSR_IA32_VMX_PROCBASED_CTLS:
1876 *pdata = vmx_control_msr(nested_vmx_procbased_ctls_low,
1877 nested_vmx_procbased_ctls_high);
1878 break;
1879 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1880 case MSR_IA32_VMX_EXIT_CTLS:
1881 *pdata = vmx_control_msr(nested_vmx_exit_ctls_low,
1882 nested_vmx_exit_ctls_high);
1883 break;
1884 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1885 case MSR_IA32_VMX_ENTRY_CTLS:
1886 *pdata = vmx_control_msr(nested_vmx_entry_ctls_low,
1887 nested_vmx_entry_ctls_high);
1888 break;
1889 case MSR_IA32_VMX_MISC:
1890 *pdata = 0;
1891 break;
1892 /*
1893 * These MSRs specify bits which the guest must keep fixed (on or off)
1894 * while L1 is in VMXON mode (in L1's root mode, or running an L2).
1895 * We picked the standard core2 setting.
1896 */
1897#define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
1898#define VMXON_CR4_ALWAYSON X86_CR4_VMXE
1899 case MSR_IA32_VMX_CR0_FIXED0:
1900 *pdata = VMXON_CR0_ALWAYSON;
1901 break;
1902 case MSR_IA32_VMX_CR0_FIXED1:
1903 *pdata = -1ULL;
1904 break;
1905 case MSR_IA32_VMX_CR4_FIXED0:
1906 *pdata = VMXON_CR4_ALWAYSON;
1907 break;
1908 case MSR_IA32_VMX_CR4_FIXED1:
1909 *pdata = -1ULL;
1910 break;
1911 case MSR_IA32_VMX_VMCS_ENUM:
1912 *pdata = 0x1f;
1913 break;
1914 case MSR_IA32_VMX_PROCBASED_CTLS2:
1915 *pdata = vmx_control_msr(nested_vmx_secondary_ctls_low,
1916 nested_vmx_secondary_ctls_high);
1917 break;
1918 case MSR_IA32_VMX_EPT_VPID_CAP:
1919 /* Currently, no nested ept or nested vpid */
1920 *pdata = 0;
1921 break;
1922 default:
1923 return 0;
1924 }
1925
1926 return 1;
1927}
1928
1929static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1930{
1931 if (!nested_vmx_allowed(vcpu))
1932 return 0;
1933
1934 if (msr_index == MSR_IA32_FEATURE_CONTROL)
1935 /* TODO: the right thing. */
1936 return 1;
1937 /*
1938 * No need to treat VMX capability MSRs specially: If we don't handle
1939 * them, handle_wrmsr will #GP(0), which is correct (they are readonly)
1940 */
1941 return 0;
1942}
1943
1944/*
Avi Kivity6aa8b732006-12-10 02:21:36 -08001945 * Reads an msr value (of 'msr_index') into 'pdata'.
1946 * Returns 0 on success, non-0 otherwise.
1947 * Assumes vcpu_load() was already called.
1948 */
1949static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1950{
1951 u64 data;
Avi Kivity26bb0982009-09-07 11:14:12 +03001952 struct shared_msr_entry *msr;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001953
1954 if (!pdata) {
1955 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
1956 return -EINVAL;
1957 }
1958
1959 switch (msr_index) {
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001960#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001961 case MSR_FS_BASE:
1962 data = vmcs_readl(GUEST_FS_BASE);
1963 break;
1964 case MSR_GS_BASE:
1965 data = vmcs_readl(GUEST_GS_BASE);
1966 break;
Avi Kivity44ea2b12009-09-06 15:55:37 +03001967 case MSR_KERNEL_GS_BASE:
1968 vmx_load_host_state(to_vmx(vcpu));
1969 data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
1970 break;
Avi Kivity26bb0982009-09-07 11:14:12 +03001971#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -08001972 case MSR_EFER:
Avi Kivity3bab1f52006-12-29 16:49:48 -08001973 return kvm_get_msr_common(vcpu, msr_index, pdata);
Jaswinder Singh Rajputaf24a4e2009-05-15 18:42:05 +05301974 case MSR_IA32_TSC:
Avi Kivity6aa8b732006-12-10 02:21:36 -08001975 data = guest_read_tsc();
1976 break;
1977 case MSR_IA32_SYSENTER_CS:
1978 data = vmcs_read32(GUEST_SYSENTER_CS);
1979 break;
1980 case MSR_IA32_SYSENTER_EIP:
Avi Kivityf5b42c32007-03-06 12:05:53 +02001981 data = vmcs_readl(GUEST_SYSENTER_EIP);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001982 break;
1983 case MSR_IA32_SYSENTER_ESP:
Avi Kivityf5b42c32007-03-06 12:05:53 +02001984 data = vmcs_readl(GUEST_SYSENTER_ESP);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001985 break;
Sheng Yang4e47c7a2009-12-18 16:48:47 +08001986 case MSR_TSC_AUX:
1987 if (!to_vmx(vcpu)->rdtscp_enabled)
1988 return 1;
1989 /* Otherwise falls through */
Avi Kivity6aa8b732006-12-10 02:21:36 -08001990 default:
Avi Kivity26bb0982009-09-07 11:14:12 +03001991 vmx_load_host_state(to_vmx(vcpu));
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03001992 if (vmx_get_vmx_msr(vcpu, msr_index, pdata))
1993 return 0;
Rusty Russell8b9cf982007-07-30 16:31:43 +10001994 msr = find_msr_entry(to_vmx(vcpu), msr_index);
Avi Kivity3bab1f52006-12-29 16:49:48 -08001995 if (msr) {
Gleb Natapov542423b2009-08-27 15:07:30 +03001996 vmx_load_host_state(to_vmx(vcpu));
Avi Kivity3bab1f52006-12-29 16:49:48 -08001997 data = msr->data;
1998 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001999 }
Avi Kivity3bab1f52006-12-29 16:49:48 -08002000 return kvm_get_msr_common(vcpu, msr_index, pdata);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002001 }
2002
2003 *pdata = data;
2004 return 0;
2005}
2006
2007/*
2008 * Writes msr value into into the appropriate "register".
2009 * Returns 0 on success, non-0 otherwise.
2010 * Assumes vcpu_load() was already called.
2011 */
2012static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
2013{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002014 struct vcpu_vmx *vmx = to_vmx(vcpu);
Avi Kivity26bb0982009-09-07 11:14:12 +03002015 struct shared_msr_entry *msr;
Eddie Dong2cc51562007-05-21 07:28:09 +03002016 int ret = 0;
2017
Avi Kivity6aa8b732006-12-10 02:21:36 -08002018 switch (msr_index) {
Avi Kivity3bab1f52006-12-29 16:49:48 -08002019 case MSR_EFER:
Avi Kivitya9b21b62008-06-24 11:48:49 +03002020 vmx_load_host_state(vmx);
Eddie Dong2cc51562007-05-21 07:28:09 +03002021 ret = kvm_set_msr_common(vcpu, msr_index, data);
Eddie Dong2cc51562007-05-21 07:28:09 +03002022 break;
Avi Kivity16175a72009-03-23 22:13:44 +02002023#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08002024 case MSR_FS_BASE:
Avi Kivity2fb92db2011-04-27 19:42:18 +03002025 vmx_segment_cache_clear(vmx);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002026 vmcs_writel(GUEST_FS_BASE, data);
2027 break;
2028 case MSR_GS_BASE:
Avi Kivity2fb92db2011-04-27 19:42:18 +03002029 vmx_segment_cache_clear(vmx);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002030 vmcs_writel(GUEST_GS_BASE, data);
2031 break;
Avi Kivity44ea2b12009-09-06 15:55:37 +03002032 case MSR_KERNEL_GS_BASE:
2033 vmx_load_host_state(vmx);
2034 vmx->msr_guest_kernel_gs_base = data;
2035 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002036#endif
2037 case MSR_IA32_SYSENTER_CS:
2038 vmcs_write32(GUEST_SYSENTER_CS, data);
2039 break;
2040 case MSR_IA32_SYSENTER_EIP:
Avi Kivityf5b42c32007-03-06 12:05:53 +02002041 vmcs_writel(GUEST_SYSENTER_EIP, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002042 break;
2043 case MSR_IA32_SYSENTER_ESP:
Avi Kivityf5b42c32007-03-06 12:05:53 +02002044 vmcs_writel(GUEST_SYSENTER_ESP, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002045 break;
Jaswinder Singh Rajputaf24a4e2009-05-15 18:42:05 +05302046 case MSR_IA32_TSC:
Zachary Amsden99e3e302010-08-19 22:07:17 -10002047 kvm_write_tsc(vcpu, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002048 break;
Sheng Yang468d4722008-10-09 16:01:55 +08002049 case MSR_IA32_CR_PAT:
2050 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2051 vmcs_write64(GUEST_IA32_PAT, data);
2052 vcpu->arch.pat = data;
2053 break;
2054 }
Sheng Yang4e47c7a2009-12-18 16:48:47 +08002055 ret = kvm_set_msr_common(vcpu, msr_index, data);
2056 break;
2057 case MSR_TSC_AUX:
2058 if (!vmx->rdtscp_enabled)
2059 return 1;
2060 /* Check reserved bit, higher 32 bits should be zero */
2061 if ((data >> 32) != 0)
2062 return 1;
2063 /* Otherwise falls through */
Avi Kivity6aa8b732006-12-10 02:21:36 -08002064 default:
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03002065 if (vmx_set_vmx_msr(vcpu, msr_index, data))
2066 break;
Rusty Russell8b9cf982007-07-30 16:31:43 +10002067 msr = find_msr_entry(vmx, msr_index);
Avi Kivity3bab1f52006-12-29 16:49:48 -08002068 if (msr) {
Gleb Natapov542423b2009-08-27 15:07:30 +03002069 vmx_load_host_state(vmx);
Avi Kivity3bab1f52006-12-29 16:49:48 -08002070 msr->data = data;
2071 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002072 }
Eddie Dong2cc51562007-05-21 07:28:09 +03002073 ret = kvm_set_msr_common(vcpu, msr_index, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002074 }
2075
Eddie Dong2cc51562007-05-21 07:28:09 +03002076 return ret;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002077}
2078
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002079static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002080{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002081 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
2082 switch (reg) {
2083 case VCPU_REGS_RSP:
2084 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
2085 break;
2086 case VCPU_REGS_RIP:
2087 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
2088 break;
Avi Kivity6de4f3a2009-05-31 22:58:47 +03002089 case VCPU_EXREG_PDPTR:
2090 if (enable_ept)
2091 ept_save_pdptrs(vcpu);
2092 break;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002093 default:
2094 break;
2095 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08002096}
2097
Jan Kiszka355be0b2009-10-03 00:31:21 +02002098static void set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002099{
Jan Kiszkaae675ef2008-12-15 13:52:10 +01002100 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
2101 vmcs_writel(GUEST_DR7, dbg->arch.debugreg[7]);
2102 else
2103 vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
2104
Avi Kivityabd3f2d2007-05-02 17:57:40 +03002105 update_exception_bitmap(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002106}
2107
2108static __init int cpu_has_kvm_support(void)
2109{
Eduardo Habkost6210e372008-11-17 19:03:16 -02002110 return cpu_has_vmx();
Avi Kivity6aa8b732006-12-10 02:21:36 -08002111}
2112
2113static __init int vmx_disabled_by_bios(void)
2114{
2115 u64 msr;
2116
2117 rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
Shane Wangcafd6652010-04-29 12:09:01 -04002118 if (msr & FEATURE_CONTROL_LOCKED) {
Joseph Cihula23f3e992011-02-08 11:45:56 -08002119 /* launched w/ TXT and VMX disabled */
Shane Wangcafd6652010-04-29 12:09:01 -04002120 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
2121 && tboot_enabled())
2122 return 1;
Joseph Cihula23f3e992011-02-08 11:45:56 -08002123 /* launched w/o TXT and VMX only enabled w/ TXT */
Shane Wangcafd6652010-04-29 12:09:01 -04002124 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
Joseph Cihula23f3e992011-02-08 11:45:56 -08002125 && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
Shane Wangf9335af2010-11-17 11:40:17 +08002126 && !tboot_enabled()) {
2127 printk(KERN_WARNING "kvm: disable TXT in the BIOS or "
Joseph Cihula23f3e992011-02-08 11:45:56 -08002128 "activate TXT before enabling KVM\n");
Shane Wangcafd6652010-04-29 12:09:01 -04002129 return 1;
Shane Wangf9335af2010-11-17 11:40:17 +08002130 }
Joseph Cihula23f3e992011-02-08 11:45:56 -08002131 /* launched w/o TXT and VMX disabled */
2132 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
2133 && !tboot_enabled())
2134 return 1;
Shane Wangcafd6652010-04-29 12:09:01 -04002135 }
2136
2137 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002138}
2139
Dongxiao Xu7725b892010-05-11 18:29:38 +08002140static void kvm_cpu_vmxon(u64 addr)
2141{
2142 asm volatile (ASM_VMX_VMXON_RAX
2143 : : "a"(&addr), "m"(addr)
2144 : "memory", "cc");
2145}
2146
Alexander Graf10474ae2009-09-15 11:37:46 +02002147static int hardware_enable(void *garbage)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002148{
2149 int cpu = raw_smp_processor_id();
2150 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
Shane Wangcafd6652010-04-29 12:09:01 -04002151 u64 old, test_bits;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002152
Alexander Graf10474ae2009-09-15 11:37:46 +02002153 if (read_cr4() & X86_CR4_VMXE)
2154 return -EBUSY;
2155
Nadav Har'Eld462b812011-05-24 15:26:10 +03002156 INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
Avi Kivity6aa8b732006-12-10 02:21:36 -08002157 rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
Shane Wangcafd6652010-04-29 12:09:01 -04002158
2159 test_bits = FEATURE_CONTROL_LOCKED;
2160 test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
2161 if (tboot_enabled())
2162 test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
2163
2164 if ((old & test_bits) != test_bits) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08002165 /* enable and lock */
Shane Wangcafd6652010-04-29 12:09:01 -04002166 wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
2167 }
Rusty Russell66aee912007-07-17 23:34:16 +10002168 write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
Alexander Graf10474ae2009-09-15 11:37:46 +02002169
Dongxiao Xu4610c9c2010-05-11 18:29:48 +08002170 if (vmm_exclusive) {
2171 kvm_cpu_vmxon(phys_addr);
2172 ept_sync_global();
2173 }
Alexander Graf10474ae2009-09-15 11:37:46 +02002174
Avi Kivity3444d7d2010-07-26 18:32:38 +03002175 store_gdt(&__get_cpu_var(host_gdt));
2176
Alexander Graf10474ae2009-09-15 11:37:46 +02002177 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002178}
2179
Nadav Har'Eld462b812011-05-24 15:26:10 +03002180static void vmclear_local_loaded_vmcss(void)
Avi Kivity543e4242008-05-13 16:22:47 +03002181{
2182 int cpu = raw_smp_processor_id();
Nadav Har'Eld462b812011-05-24 15:26:10 +03002183 struct loaded_vmcs *v, *n;
Avi Kivity543e4242008-05-13 16:22:47 +03002184
Nadav Har'Eld462b812011-05-24 15:26:10 +03002185 list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
2186 loaded_vmcss_on_cpu_link)
2187 __loaded_vmcs_clear(v);
Avi Kivity543e4242008-05-13 16:22:47 +03002188}
2189
Eduardo Habkost710ff4a2008-11-17 19:03:18 -02002190
2191/* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot()
2192 * tricks.
2193 */
2194static void kvm_cpu_vmxoff(void)
2195{
2196 asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
Eduardo Habkost710ff4a2008-11-17 19:03:18 -02002197}
2198
Avi Kivity6aa8b732006-12-10 02:21:36 -08002199static void hardware_disable(void *garbage)
2200{
Dongxiao Xu4610c9c2010-05-11 18:29:48 +08002201 if (vmm_exclusive) {
Nadav Har'Eld462b812011-05-24 15:26:10 +03002202 vmclear_local_loaded_vmcss();
Dongxiao Xu4610c9c2010-05-11 18:29:48 +08002203 kvm_cpu_vmxoff();
2204 }
Dongxiao Xu7725b892010-05-11 18:29:38 +08002205 write_cr4(read_cr4() & ~X86_CR4_VMXE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002206}
2207
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03002208static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
Mike Dayd77c26f2007-10-08 09:02:08 -04002209 u32 msr, u32 *result)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002210{
2211 u32 vmx_msr_low, vmx_msr_high;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03002212 u32 ctl = ctl_min | ctl_opt;
2213
2214 rdmsr(msr, vmx_msr_low, vmx_msr_high);
2215
2216 ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
2217 ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */
2218
2219 /* Ensure minimum (required) set of control bits are supported. */
2220 if (ctl_min & ~ctl)
Yang, Sheng002c7f72007-07-31 14:23:01 +03002221 return -EIO;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03002222
2223 *result = ctl;
2224 return 0;
2225}
2226
Avi Kivity110312c2010-12-21 12:54:20 +02002227static __init bool allow_1_setting(u32 msr, u32 ctl)
2228{
2229 u32 vmx_msr_low, vmx_msr_high;
2230
2231 rdmsr(msr, vmx_msr_low, vmx_msr_high);
2232 return vmx_msr_high & ctl;
2233}
2234
Yang, Sheng002c7f72007-07-31 14:23:01 +03002235static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03002236{
2237 u32 vmx_msr_low, vmx_msr_high;
Sheng Yangd56f5462008-04-25 10:13:16 +08002238 u32 min, opt, min2, opt2;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03002239 u32 _pin_based_exec_control = 0;
2240 u32 _cpu_based_exec_control = 0;
Sheng Yangf78e0e22007-10-29 09:40:42 +08002241 u32 _cpu_based_2nd_exec_control = 0;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03002242 u32 _vmexit_control = 0;
2243 u32 _vmentry_control = 0;
2244
2245 min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
Sheng Yangf08864b2008-05-15 18:23:25 +08002246 opt = PIN_BASED_VIRTUAL_NMIS;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03002247 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
2248 &_pin_based_exec_control) < 0)
Yang, Sheng002c7f72007-07-31 14:23:01 +03002249 return -EIO;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03002250
Anthony Liguori443381a2010-12-06 10:53:38 -06002251 min =
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03002252#ifdef CONFIG_X86_64
2253 CPU_BASED_CR8_LOAD_EXITING |
2254 CPU_BASED_CR8_STORE_EXITING |
2255#endif
Sheng Yangd56f5462008-04-25 10:13:16 +08002256 CPU_BASED_CR3_LOAD_EXITING |
2257 CPU_BASED_CR3_STORE_EXITING |
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03002258 CPU_BASED_USE_IO_BITMAPS |
2259 CPU_BASED_MOV_DR_EXITING |
Marcelo Tosattia7052892008-09-23 13:18:35 -03002260 CPU_BASED_USE_TSC_OFFSETING |
Sheng Yang59708672009-12-15 13:29:54 +08002261 CPU_BASED_MWAIT_EXITING |
2262 CPU_BASED_MONITOR_EXITING |
Marcelo Tosattia7052892008-09-23 13:18:35 -03002263 CPU_BASED_INVLPG_EXITING;
Anthony Liguori443381a2010-12-06 10:53:38 -06002264
2265 if (yield_on_hlt)
2266 min |= CPU_BASED_HLT_EXITING;
2267
Sheng Yangf78e0e22007-10-29 09:40:42 +08002268 opt = CPU_BASED_TPR_SHADOW |
Sheng Yang25c5f222008-03-28 13:18:56 +08002269 CPU_BASED_USE_MSR_BITMAPS |
Sheng Yangf78e0e22007-10-29 09:40:42 +08002270 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03002271 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
2272 &_cpu_based_exec_control) < 0)
Yang, Sheng002c7f72007-07-31 14:23:01 +03002273 return -EIO;
Yang, Sheng6e5d8652007-09-12 18:03:11 +08002274#ifdef CONFIG_X86_64
2275 if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
2276 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
2277 ~CPU_BASED_CR8_STORE_EXITING;
2278#endif
Sheng Yangf78e0e22007-10-29 09:40:42 +08002279 if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
Sheng Yangd56f5462008-04-25 10:13:16 +08002280 min2 = 0;
2281 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
Sheng Yang2384d2b2008-01-17 15:14:33 +08002282 SECONDARY_EXEC_WBINVD_EXITING |
Sheng Yangd56f5462008-04-25 10:13:16 +08002283 SECONDARY_EXEC_ENABLE_VPID |
Nitin A Kamble3a624e22009-06-08 11:34:16 -07002284 SECONDARY_EXEC_ENABLE_EPT |
Zhai, Edwin4b8d54f2009-10-09 18:03:20 +08002285 SECONDARY_EXEC_UNRESTRICTED_GUEST |
Sheng Yang4e47c7a2009-12-18 16:48:47 +08002286 SECONDARY_EXEC_PAUSE_LOOP_EXITING |
2287 SECONDARY_EXEC_RDTSCP;
Sheng Yangd56f5462008-04-25 10:13:16 +08002288 if (adjust_vmx_controls(min2, opt2,
2289 MSR_IA32_VMX_PROCBASED_CTLS2,
Sheng Yangf78e0e22007-10-29 09:40:42 +08002290 &_cpu_based_2nd_exec_control) < 0)
2291 return -EIO;
2292 }
2293#ifndef CONFIG_X86_64
2294 if (!(_cpu_based_2nd_exec_control &
2295 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
2296 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
2297#endif
Sheng Yangd56f5462008-04-25 10:13:16 +08002298 if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
Marcelo Tosattia7052892008-09-23 13:18:35 -03002299 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
2300 enabled */
Gleb Natapov5fff7d22009-08-27 18:41:30 +03002301 _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
2302 CPU_BASED_CR3_STORE_EXITING |
2303 CPU_BASED_INVLPG_EXITING);
Sheng Yangd56f5462008-04-25 10:13:16 +08002304 rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
2305 vmx_capability.ept, vmx_capability.vpid);
2306 }
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03002307
2308 min = 0;
2309#ifdef CONFIG_X86_64
2310 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
2311#endif
Sheng Yang468d4722008-10-09 16:01:55 +08002312 opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03002313 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
2314 &_vmexit_control) < 0)
Yang, Sheng002c7f72007-07-31 14:23:01 +03002315 return -EIO;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03002316
Sheng Yang468d4722008-10-09 16:01:55 +08002317 min = 0;
2318 opt = VM_ENTRY_LOAD_IA32_PAT;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03002319 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
2320 &_vmentry_control) < 0)
Yang, Sheng002c7f72007-07-31 14:23:01 +03002321 return -EIO;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002322
Nguyen Anh Quynhc68876f2006-12-29 16:49:54 -08002323 rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03002324
2325 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
2326 if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
Yang, Sheng002c7f72007-07-31 14:23:01 +03002327 return -EIO;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03002328
2329#ifdef CONFIG_X86_64
2330 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
2331 if (vmx_msr_high & (1u<<16))
Yang, Sheng002c7f72007-07-31 14:23:01 +03002332 return -EIO;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03002333#endif
2334
2335 /* Require Write-Back (WB) memory type for VMCS accesses. */
2336 if (((vmx_msr_high >> 18) & 15) != 6)
Yang, Sheng002c7f72007-07-31 14:23:01 +03002337 return -EIO;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03002338
Yang, Sheng002c7f72007-07-31 14:23:01 +03002339 vmcs_conf->size = vmx_msr_high & 0x1fff;
2340 vmcs_conf->order = get_order(vmcs_config.size);
2341 vmcs_conf->revision_id = vmx_msr_low;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03002342
Yang, Sheng002c7f72007-07-31 14:23:01 +03002343 vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
2344 vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
Sheng Yangf78e0e22007-10-29 09:40:42 +08002345 vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
Yang, Sheng002c7f72007-07-31 14:23:01 +03002346 vmcs_conf->vmexit_ctrl = _vmexit_control;
2347 vmcs_conf->vmentry_ctrl = _vmentry_control;
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03002348
Avi Kivity110312c2010-12-21 12:54:20 +02002349 cpu_has_load_ia32_efer =
2350 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
2351 VM_ENTRY_LOAD_IA32_EFER)
2352 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
2353 VM_EXIT_LOAD_IA32_EFER);
2354
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03002355 return 0;
Nguyen Anh Quynhc68876f2006-12-29 16:49:54 -08002356}
Avi Kivity6aa8b732006-12-10 02:21:36 -08002357
2358static struct vmcs *alloc_vmcs_cpu(int cpu)
2359{
2360 int node = cpu_to_node(cpu);
2361 struct page *pages;
2362 struct vmcs *vmcs;
2363
Mel Gorman6484eb32009-06-16 15:31:54 -07002364 pages = alloc_pages_exact_node(node, GFP_KERNEL, vmcs_config.order);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002365 if (!pages)
2366 return NULL;
2367 vmcs = page_address(pages);
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03002368 memset(vmcs, 0, vmcs_config.size);
2369 vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
Avi Kivity6aa8b732006-12-10 02:21:36 -08002370 return vmcs;
2371}
2372
2373static struct vmcs *alloc_vmcs(void)
2374{
Ingo Molnard3b2c332007-01-05 16:36:23 -08002375 return alloc_vmcs_cpu(raw_smp_processor_id());
Avi Kivity6aa8b732006-12-10 02:21:36 -08002376}
2377
2378static void free_vmcs(struct vmcs *vmcs)
2379{
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03002380 free_pages((unsigned long)vmcs, vmcs_config.order);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002381}
2382
Nadav Har'Eld462b812011-05-24 15:26:10 +03002383/*
2384 * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded
2385 */
2386static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
2387{
2388 if (!loaded_vmcs->vmcs)
2389 return;
2390 loaded_vmcs_clear(loaded_vmcs);
2391 free_vmcs(loaded_vmcs->vmcs);
2392 loaded_vmcs->vmcs = NULL;
2393}
2394
Sam Ravnborg39959582007-06-01 00:47:13 -07002395static void free_kvm_area(void)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002396{
2397 int cpu;
2398
Zachary Amsden3230bb42009-09-29 11:38:37 -10002399 for_each_possible_cpu(cpu) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08002400 free_vmcs(per_cpu(vmxarea, cpu));
Zachary Amsden3230bb42009-09-29 11:38:37 -10002401 per_cpu(vmxarea, cpu) = NULL;
2402 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08002403}
2404
Avi Kivity6aa8b732006-12-10 02:21:36 -08002405static __init int alloc_kvm_area(void)
2406{
2407 int cpu;
2408
Zachary Amsden3230bb42009-09-29 11:38:37 -10002409 for_each_possible_cpu(cpu) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08002410 struct vmcs *vmcs;
2411
2412 vmcs = alloc_vmcs_cpu(cpu);
2413 if (!vmcs) {
2414 free_kvm_area();
2415 return -ENOMEM;
2416 }
2417
2418 per_cpu(vmxarea, cpu) = vmcs;
2419 }
2420 return 0;
2421}
2422
2423static __init int hardware_setup(void)
2424{
Yang, Sheng002c7f72007-07-31 14:23:01 +03002425 if (setup_vmcs_config(&vmcs_config) < 0)
2426 return -EIO;
Joerg Roedel50a37eb2008-01-31 14:57:38 +01002427
2428 if (boot_cpu_has(X86_FEATURE_NX))
2429 kvm_enable_efer_bits(EFER_NX);
2430
Sheng Yang93ba03c2009-04-01 15:52:32 +08002431 if (!cpu_has_vmx_vpid())
2432 enable_vpid = 0;
2433
Sheng Yang4bc9b982010-06-02 14:05:24 +08002434 if (!cpu_has_vmx_ept() ||
2435 !cpu_has_vmx_ept_4levels()) {
Sheng Yang93ba03c2009-04-01 15:52:32 +08002436 enable_ept = 0;
Nitin A Kamble3a624e22009-06-08 11:34:16 -07002437 enable_unrestricted_guest = 0;
2438 }
2439
2440 if (!cpu_has_vmx_unrestricted_guest())
2441 enable_unrestricted_guest = 0;
Sheng Yang93ba03c2009-04-01 15:52:32 +08002442
2443 if (!cpu_has_vmx_flexpriority())
2444 flexpriority_enabled = 0;
2445
Gleb Natapov95ba8273132009-04-21 17:45:08 +03002446 if (!cpu_has_vmx_tpr_shadow())
2447 kvm_x86_ops->update_cr8_intercept = NULL;
2448
Marcelo Tosatti54dee992009-06-11 12:07:44 -03002449 if (enable_ept && !cpu_has_vmx_ept_2m_page())
2450 kvm_disable_largepages();
2451
Zhai, Edwin4b8d54f2009-10-09 18:03:20 +08002452 if (!cpu_has_vmx_ple())
2453 ple_gap = 0;
2454
Nadav Har'Elb87a51a2011-05-25 23:04:25 +03002455 if (nested)
2456 nested_vmx_setup_ctls_msrs();
2457
Avi Kivity6aa8b732006-12-10 02:21:36 -08002458 return alloc_kvm_area();
2459}
2460
2461static __exit void hardware_unsetup(void)
2462{
2463 free_kvm_area();
2464}
2465
Avi Kivity6aa8b732006-12-10 02:21:36 -08002466static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
2467{
2468 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
2469
Avi Kivity6af11b92007-03-19 13:18:10 +02002470 if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08002471 vmcs_write16(sf->selector, save->selector);
2472 vmcs_writel(sf->base, save->base);
2473 vmcs_write32(sf->limit, save->limit);
2474 vmcs_write32(sf->ar_bytes, save->ar);
2475 } else {
2476 u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK)
2477 << AR_DPL_SHIFT;
2478 vmcs_write32(sf->ar_bytes, 0x93 | dpl);
2479 }
2480}
2481
2482static void enter_pmode(struct kvm_vcpu *vcpu)
2483{
2484 unsigned long flags;
Mohammed Gamala89a8fb2008-08-17 16:42:16 +03002485 struct vcpu_vmx *vmx = to_vmx(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002486
Mohammed Gamala89a8fb2008-08-17 16:42:16 +03002487 vmx->emulation_required = 1;
Avi Kivity7ffd92c2009-06-09 14:10:45 +03002488 vmx->rmode.vm86_active = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002489
Avi Kivity2fb92db2011-04-27 19:42:18 +03002490 vmx_segment_cache_clear(vmx);
2491
Avi Kivityd0ba64f2011-01-03 14:28:51 +02002492 vmcs_write16(GUEST_TR_SELECTOR, vmx->rmode.tr.selector);
Avi Kivity7ffd92c2009-06-09 14:10:45 +03002493 vmcs_writel(GUEST_TR_BASE, vmx->rmode.tr.base);
2494 vmcs_write32(GUEST_TR_LIMIT, vmx->rmode.tr.limit);
2495 vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002496
2497 flags = vmcs_readl(GUEST_RFLAGS);
Avi Kivity78ac8b42010-04-08 18:19:35 +03002498 flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
2499 flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002500 vmcs_writel(GUEST_RFLAGS, flags);
2501
Rusty Russell66aee912007-07-17 23:34:16 +10002502 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
2503 (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
Avi Kivity6aa8b732006-12-10 02:21:36 -08002504
2505 update_exception_bitmap(vcpu);
2506
Mohammed Gamala89a8fb2008-08-17 16:42:16 +03002507 if (emulate_invalid_guest_state)
2508 return;
2509
Avi Kivity7ffd92c2009-06-09 14:10:45 +03002510 fix_pmode_dataseg(VCPU_SREG_ES, &vmx->rmode.es);
2511 fix_pmode_dataseg(VCPU_SREG_DS, &vmx->rmode.ds);
2512 fix_pmode_dataseg(VCPU_SREG_GS, &vmx->rmode.gs);
2513 fix_pmode_dataseg(VCPU_SREG_FS, &vmx->rmode.fs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002514
Avi Kivity2fb92db2011-04-27 19:42:18 +03002515 vmx_segment_cache_clear(vmx);
2516
Avi Kivity6aa8b732006-12-10 02:21:36 -08002517 vmcs_write16(GUEST_SS_SELECTOR, 0);
2518 vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
2519
2520 vmcs_write16(GUEST_CS_SELECTOR,
2521 vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK);
2522 vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
2523}
2524
Mike Dayd77c26f2007-10-08 09:02:08 -04002525static gva_t rmode_tss_base(struct kvm *kvm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002526{
Zhang Xiantaobfc6d222007-12-14 10:20:16 +08002527 if (!kvm->arch.tss_addr) {
Marcelo Tosattibc6678a2009-12-23 14:35:21 -02002528 struct kvm_memslots *slots;
2529 gfn_t base_gfn;
2530
Lai Jiangshan90d83dc2010-04-19 17:41:23 +08002531 slots = kvm_memslots(kvm);
Avi Kivityf495c6e2010-06-10 17:21:29 +03002532 base_gfn = slots->memslots[0].base_gfn +
Marcelo Tosatti46a26bf2009-12-23 14:35:16 -02002533 kvm->memslots->memslots[0].npages - 3;
Izik Eiduscbc94022007-10-25 00:29:55 +02002534 return base_gfn << PAGE_SHIFT;
2535 }
Zhang Xiantaobfc6d222007-12-14 10:20:16 +08002536 return kvm->arch.tss_addr;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002537}
2538
2539static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
2540{
2541 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
2542
2543 save->selector = vmcs_read16(sf->selector);
2544 save->base = vmcs_readl(sf->base);
2545 save->limit = vmcs_read32(sf->limit);
2546 save->ar = vmcs_read32(sf->ar_bytes);
Jan Kiszka15b00f32007-11-19 10:21:45 +01002547 vmcs_write16(sf->selector, save->base >> 4);
Gleb Natapov444e8632010-12-27 17:25:04 +02002548 vmcs_write32(sf->base, save->base & 0xffff0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002549 vmcs_write32(sf->limit, 0xffff);
2550 vmcs_write32(sf->ar_bytes, 0xf3);
Gleb Natapov444e8632010-12-27 17:25:04 +02002551 if (save->base & 0xf)
2552 printk_once(KERN_WARNING "kvm: segment base is not paragraph"
2553 " aligned when entering protected mode (seg=%d)",
2554 seg);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002555}
2556
2557static void enter_rmode(struct kvm_vcpu *vcpu)
2558{
2559 unsigned long flags;
Mohammed Gamala89a8fb2008-08-17 16:42:16 +03002560 struct vcpu_vmx *vmx = to_vmx(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002561
Nitin A Kamble3a624e22009-06-08 11:34:16 -07002562 if (enable_unrestricted_guest)
2563 return;
2564
Mohammed Gamala89a8fb2008-08-17 16:42:16 +03002565 vmx->emulation_required = 1;
Avi Kivity7ffd92c2009-06-09 14:10:45 +03002566 vmx->rmode.vm86_active = 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002567
Gleb Natapov776e58e2011-03-13 12:34:27 +02002568 /*
2569 * Very old userspace does not call KVM_SET_TSS_ADDR before entering
2570 * vcpu. Call it here with phys address pointing 16M below 4G.
2571 */
2572 if (!vcpu->kvm->arch.tss_addr) {
2573 printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be "
2574 "called before entering vcpu\n");
2575 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2576 vmx_set_tss_addr(vcpu->kvm, 0xfeffd000);
2577 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2578 }
2579
Avi Kivity2fb92db2011-04-27 19:42:18 +03002580 vmx_segment_cache_clear(vmx);
2581
Avi Kivityd0ba64f2011-01-03 14:28:51 +02002582 vmx->rmode.tr.selector = vmcs_read16(GUEST_TR_SELECTOR);
Avi Kivity7ffd92c2009-06-09 14:10:45 +03002583 vmx->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002584 vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
2585
Avi Kivity7ffd92c2009-06-09 14:10:45 +03002586 vmx->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002587 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
2588
Avi Kivity7ffd92c2009-06-09 14:10:45 +03002589 vmx->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002590 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
2591
2592 flags = vmcs_readl(GUEST_RFLAGS);
Avi Kivity78ac8b42010-04-08 18:19:35 +03002593 vmx->rmode.save_rflags = flags;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002594
Glauber de Oliveira Costa053de042008-01-30 13:31:27 +01002595 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002596
2597 vmcs_writel(GUEST_RFLAGS, flags);
Rusty Russell66aee912007-07-17 23:34:16 +10002598 vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002599 update_exception_bitmap(vcpu);
2600
Mohammed Gamala89a8fb2008-08-17 16:42:16 +03002601 if (emulate_invalid_guest_state)
2602 goto continue_rmode;
2603
Avi Kivity6aa8b732006-12-10 02:21:36 -08002604 vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
2605 vmcs_write32(GUEST_SS_LIMIT, 0xffff);
2606 vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
2607
2608 vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
Michael Riepeabacf8d2006-12-22 01:05:45 -08002609 vmcs_write32(GUEST_CS_LIMIT, 0xffff);
Avi Kivity8cb5b032007-03-20 18:40:40 +02002610 if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
2611 vmcs_writel(GUEST_CS_BASE, 0xf0000);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002612 vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
2613
Avi Kivity7ffd92c2009-06-09 14:10:45 +03002614 fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.es);
2615 fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.ds);
2616 fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.gs);
2617 fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.fs);
Avi Kivity75880a02007-06-20 11:20:04 +03002618
Mohammed Gamala89a8fb2008-08-17 16:42:16 +03002619continue_rmode:
Eddie Dong8668a3c2007-10-10 14:26:45 +08002620 kvm_mmu_reset_context(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002621}
2622
Amit Shah401d10d2009-02-20 22:53:37 +05302623static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
2624{
2625 struct vcpu_vmx *vmx = to_vmx(vcpu);
Avi Kivity26bb0982009-09-07 11:14:12 +03002626 struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
2627
2628 if (!msr)
2629 return;
Amit Shah401d10d2009-02-20 22:53:37 +05302630
Avi Kivity44ea2b12009-09-06 15:55:37 +03002631 /*
2632 * Force kernel_gs_base reloading before EFER changes, as control
2633 * of this msr depends on is_long_mode().
2634 */
2635 vmx_load_host_state(to_vmx(vcpu));
Avi Kivityf6801df2010-01-21 15:31:50 +02002636 vcpu->arch.efer = efer;
Amit Shah401d10d2009-02-20 22:53:37 +05302637 if (efer & EFER_LMA) {
2638 vmcs_write32(VM_ENTRY_CONTROLS,
2639 vmcs_read32(VM_ENTRY_CONTROLS) |
2640 VM_ENTRY_IA32E_MODE);
2641 msr->data = efer;
2642 } else {
2643 vmcs_write32(VM_ENTRY_CONTROLS,
2644 vmcs_read32(VM_ENTRY_CONTROLS) &
2645 ~VM_ENTRY_IA32E_MODE);
2646
2647 msr->data = efer & ~EFER_LME;
2648 }
2649 setup_msrs(vmx);
2650}
2651
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002652#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08002653
2654static void enter_lmode(struct kvm_vcpu *vcpu)
2655{
2656 u32 guest_tr_ar;
2657
Avi Kivity2fb92db2011-04-27 19:42:18 +03002658 vmx_segment_cache_clear(to_vmx(vcpu));
2659
Avi Kivity6aa8b732006-12-10 02:21:36 -08002660 guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
2661 if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
2662 printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08002663 __func__);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002664 vmcs_write32(GUEST_TR_AR_BYTES,
2665 (guest_tr_ar & ~AR_TYPE_MASK)
2666 | AR_TYPE_BUSY_64_TSS);
2667 }
Avi Kivityda38f432010-07-06 11:30:49 +03002668 vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002669}
2670
2671static void exit_lmode(struct kvm_vcpu *vcpu)
2672{
Avi Kivity6aa8b732006-12-10 02:21:36 -08002673 vmcs_write32(VM_ENTRY_CONTROLS,
2674 vmcs_read32(VM_ENTRY_CONTROLS)
Li, Xin B1e4e6e02007-08-01 21:49:10 +03002675 & ~VM_ENTRY_IA32E_MODE);
Avi Kivityda38f432010-07-06 11:30:49 +03002676 vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002677}
2678
2679#endif
2680
Sheng Yang2384d2b2008-01-17 15:14:33 +08002681static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
2682{
Gui Jianfengb9d762f2010-06-07 10:32:29 +08002683 vpid_sync_context(to_vmx(vcpu));
Xiao Guangrongdd180b32010-07-03 16:02:42 +08002684 if (enable_ept) {
2685 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2686 return;
Sheng Yang4e1096d2008-07-06 19:16:51 +08002687 ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa));
Xiao Guangrongdd180b32010-07-03 16:02:42 +08002688 }
Sheng Yang2384d2b2008-01-17 15:14:33 +08002689}
2690
Avi Kivitye8467fd2009-12-29 18:43:06 +02002691static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
2692{
2693 ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
2694
2695 vcpu->arch.cr0 &= ~cr0_guest_owned_bits;
2696 vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
2697}
2698
Avi Kivityaff48ba2010-12-05 18:56:11 +02002699static void vmx_decache_cr3(struct kvm_vcpu *vcpu)
2700{
2701 if (enable_ept && is_paging(vcpu))
2702 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
2703 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
2704}
2705
Anthony Liguori25c4c272007-04-27 09:29:21 +03002706static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
Avi Kivity399badf2007-01-05 16:36:38 -08002707{
Avi Kivityfc78f512009-12-07 12:16:48 +02002708 ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
2709
2710 vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
2711 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
Avi Kivity399badf2007-01-05 16:36:38 -08002712}
2713
Sheng Yang14394422008-04-28 12:24:45 +08002714static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
2715{
Avi Kivity6de4f3a2009-05-31 22:58:47 +03002716 if (!test_bit(VCPU_EXREG_PDPTR,
2717 (unsigned long *)&vcpu->arch.regs_dirty))
2718 return;
2719
Sheng Yang14394422008-04-28 12:24:45 +08002720 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
Joerg Roedelff03a072010-09-10 17:30:57 +02002721 vmcs_write64(GUEST_PDPTR0, vcpu->arch.mmu.pdptrs[0]);
2722 vmcs_write64(GUEST_PDPTR1, vcpu->arch.mmu.pdptrs[1]);
2723 vmcs_write64(GUEST_PDPTR2, vcpu->arch.mmu.pdptrs[2]);
2724 vmcs_write64(GUEST_PDPTR3, vcpu->arch.mmu.pdptrs[3]);
Sheng Yang14394422008-04-28 12:24:45 +08002725 }
2726}
2727
Avi Kivity8f5d5492009-05-31 18:41:29 +03002728static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
2729{
2730 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
Joerg Roedelff03a072010-09-10 17:30:57 +02002731 vcpu->arch.mmu.pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
2732 vcpu->arch.mmu.pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
2733 vcpu->arch.mmu.pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
2734 vcpu->arch.mmu.pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
Avi Kivity8f5d5492009-05-31 18:41:29 +03002735 }
Avi Kivity6de4f3a2009-05-31 22:58:47 +03002736
2737 __set_bit(VCPU_EXREG_PDPTR,
2738 (unsigned long *)&vcpu->arch.regs_avail);
2739 __set_bit(VCPU_EXREG_PDPTR,
2740 (unsigned long *)&vcpu->arch.regs_dirty);
Avi Kivity8f5d5492009-05-31 18:41:29 +03002741}
2742
Nadav Har'El5e1746d2011-05-25 23:03:24 +03002743static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
Sheng Yang14394422008-04-28 12:24:45 +08002744
2745static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
2746 unsigned long cr0,
2747 struct kvm_vcpu *vcpu)
2748{
Marcelo Tosatti5233dd52011-06-06 14:27:47 -03002749 if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
2750 vmx_decache_cr3(vcpu);
Sheng Yang14394422008-04-28 12:24:45 +08002751 if (!(cr0 & X86_CR0_PG)) {
2752 /* From paging/starting to nonpaging */
2753 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
Sheng Yang65267ea2008-06-18 14:43:38 +08002754 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) |
Sheng Yang14394422008-04-28 12:24:45 +08002755 (CPU_BASED_CR3_LOAD_EXITING |
2756 CPU_BASED_CR3_STORE_EXITING));
2757 vcpu->arch.cr0 = cr0;
Avi Kivityfc78f512009-12-07 12:16:48 +02002758 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
Sheng Yang14394422008-04-28 12:24:45 +08002759 } else if (!is_paging(vcpu)) {
2760 /* From nonpaging to paging */
2761 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
Sheng Yang65267ea2008-06-18 14:43:38 +08002762 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
Sheng Yang14394422008-04-28 12:24:45 +08002763 ~(CPU_BASED_CR3_LOAD_EXITING |
2764 CPU_BASED_CR3_STORE_EXITING));
2765 vcpu->arch.cr0 = cr0;
Avi Kivityfc78f512009-12-07 12:16:48 +02002766 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
Sheng Yang14394422008-04-28 12:24:45 +08002767 }
Sheng Yang95eb84a2009-08-19 09:52:18 +08002768
2769 if (!(cr0 & X86_CR0_WP))
2770 *hw_cr0 &= ~X86_CR0_WP;
Sheng Yang14394422008-04-28 12:24:45 +08002771}
2772
Avi Kivity6aa8b732006-12-10 02:21:36 -08002773static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
2774{
Avi Kivity7ffd92c2009-06-09 14:10:45 +03002775 struct vcpu_vmx *vmx = to_vmx(vcpu);
Nitin A Kamble3a624e22009-06-08 11:34:16 -07002776 unsigned long hw_cr0;
2777
2778 if (enable_unrestricted_guest)
2779 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST)
2780 | KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
2781 else
2782 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON;
Sheng Yang14394422008-04-28 12:24:45 +08002783
Avi Kivity7ffd92c2009-06-09 14:10:45 +03002784 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
Avi Kivity6aa8b732006-12-10 02:21:36 -08002785 enter_pmode(vcpu);
2786
Avi Kivity7ffd92c2009-06-09 14:10:45 +03002787 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
Avi Kivity6aa8b732006-12-10 02:21:36 -08002788 enter_rmode(vcpu);
2789
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002790#ifdef CONFIG_X86_64
Avi Kivityf6801df2010-01-21 15:31:50 +02002791 if (vcpu->arch.efer & EFER_LME) {
Rusty Russell707d92f2007-07-17 23:19:08 +10002792 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
Avi Kivity6aa8b732006-12-10 02:21:36 -08002793 enter_lmode(vcpu);
Rusty Russell707d92f2007-07-17 23:19:08 +10002794 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
Avi Kivity6aa8b732006-12-10 02:21:36 -08002795 exit_lmode(vcpu);
2796 }
2797#endif
2798
Avi Kivity089d0342009-03-23 18:26:32 +02002799 if (enable_ept)
Sheng Yang14394422008-04-28 12:24:45 +08002800 ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
2801
Avi Kivity02daab22009-12-30 12:40:26 +02002802 if (!vcpu->fpu_active)
Avi Kivity81231c62010-01-24 16:26:40 +02002803 hw_cr0 |= X86_CR0_TS | X86_CR0_MP;
Avi Kivity02daab22009-12-30 12:40:26 +02002804
Avi Kivity6aa8b732006-12-10 02:21:36 -08002805 vmcs_writel(CR0_READ_SHADOW, cr0);
Sheng Yang14394422008-04-28 12:24:45 +08002806 vmcs_writel(GUEST_CR0, hw_cr0);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002807 vcpu->arch.cr0 = cr0;
Avi Kivity69c73022011-03-07 15:26:44 +02002808 __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002809}
2810
Sheng Yang14394422008-04-28 12:24:45 +08002811static u64 construct_eptp(unsigned long root_hpa)
2812{
2813 u64 eptp;
2814
2815 /* TODO write the value reading from MSR */
2816 eptp = VMX_EPT_DEFAULT_MT |
2817 VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT;
2818 eptp |= (root_hpa & PAGE_MASK);
2819
2820 return eptp;
2821}
2822
Avi Kivity6aa8b732006-12-10 02:21:36 -08002823static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
2824{
Sheng Yang14394422008-04-28 12:24:45 +08002825 unsigned long guest_cr3;
2826 u64 eptp;
2827
2828 guest_cr3 = cr3;
Avi Kivity089d0342009-03-23 18:26:32 +02002829 if (enable_ept) {
Sheng Yang14394422008-04-28 12:24:45 +08002830 eptp = construct_eptp(cr3);
2831 vmcs_write64(EPT_POINTER, eptp);
Avi Kivity9f8fe502010-12-05 17:30:00 +02002832 guest_cr3 = is_paging(vcpu) ? kvm_read_cr3(vcpu) :
Sheng Yangb927a3c2009-07-21 10:42:48 +08002833 vcpu->kvm->arch.ept_identity_map_addr;
Marcelo Tosatti7c93be42009-10-26 16:48:33 -02002834 ept_load_pdptrs(vcpu);
Sheng Yang14394422008-04-28 12:24:45 +08002835 }
2836
Sheng Yang2384d2b2008-01-17 15:14:33 +08002837 vmx_flush_tlb(vcpu);
Sheng Yang14394422008-04-28 12:24:45 +08002838 vmcs_writel(GUEST_CR3, guest_cr3);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002839}
2840
Nadav Har'El5e1746d2011-05-25 23:03:24 +03002841static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002842{
Avi Kivity7ffd92c2009-06-09 14:10:45 +03002843 unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ?
Sheng Yang14394422008-04-28 12:24:45 +08002844 KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
2845
Nadav Har'El5e1746d2011-05-25 23:03:24 +03002846 if (cr4 & X86_CR4_VMXE) {
2847 /*
2848 * To use VMXON (and later other VMX instructions), a guest
2849 * must first be able to turn on cr4.VMXE (see handle_vmon()).
2850 * So basically the check on whether to allow nested VMX
2851 * is here.
2852 */
2853 if (!nested_vmx_allowed(vcpu))
2854 return 1;
2855 } else if (to_vmx(vcpu)->nested.vmxon)
2856 return 1;
2857
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002858 vcpu->arch.cr4 = cr4;
Avi Kivitybc230082009-12-08 12:14:42 +02002859 if (enable_ept) {
2860 if (!is_paging(vcpu)) {
2861 hw_cr4 &= ~X86_CR4_PAE;
2862 hw_cr4 |= X86_CR4_PSE;
2863 } else if (!(cr4 & X86_CR4_PAE)) {
2864 hw_cr4 &= ~X86_CR4_PAE;
2865 }
2866 }
Sheng Yang14394422008-04-28 12:24:45 +08002867
2868 vmcs_writel(CR4_READ_SHADOW, cr4);
2869 vmcs_writel(GUEST_CR4, hw_cr4);
Nadav Har'El5e1746d2011-05-25 23:03:24 +03002870 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002871}
2872
Avi Kivity6aa8b732006-12-10 02:21:36 -08002873static void vmx_get_segment(struct kvm_vcpu *vcpu,
2874 struct kvm_segment *var, int seg)
2875{
Avi Kivitya9179492011-01-03 14:28:52 +02002876 struct vcpu_vmx *vmx = to_vmx(vcpu);
Avi Kivitya9179492011-01-03 14:28:52 +02002877 struct kvm_save_segment *save;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002878 u32 ar;
2879
Avi Kivitya9179492011-01-03 14:28:52 +02002880 if (vmx->rmode.vm86_active
2881 && (seg == VCPU_SREG_TR || seg == VCPU_SREG_ES
2882 || seg == VCPU_SREG_DS || seg == VCPU_SREG_FS
2883 || seg == VCPU_SREG_GS)
2884 && !emulate_invalid_guest_state) {
2885 switch (seg) {
2886 case VCPU_SREG_TR: save = &vmx->rmode.tr; break;
2887 case VCPU_SREG_ES: save = &vmx->rmode.es; break;
2888 case VCPU_SREG_DS: save = &vmx->rmode.ds; break;
2889 case VCPU_SREG_FS: save = &vmx->rmode.fs; break;
2890 case VCPU_SREG_GS: save = &vmx->rmode.gs; break;
2891 default: BUG();
2892 }
2893 var->selector = save->selector;
2894 var->base = save->base;
2895 var->limit = save->limit;
2896 ar = save->ar;
2897 if (seg == VCPU_SREG_TR
Avi Kivity2fb92db2011-04-27 19:42:18 +03002898 || var->selector == vmx_read_guest_seg_selector(vmx, seg))
Avi Kivitya9179492011-01-03 14:28:52 +02002899 goto use_saved_rmode_seg;
2900 }
Avi Kivity2fb92db2011-04-27 19:42:18 +03002901 var->base = vmx_read_guest_seg_base(vmx, seg);
2902 var->limit = vmx_read_guest_seg_limit(vmx, seg);
2903 var->selector = vmx_read_guest_seg_selector(vmx, seg);
2904 ar = vmx_read_guest_seg_ar(vmx, seg);
Avi Kivitya9179492011-01-03 14:28:52 +02002905use_saved_rmode_seg:
Avi Kivity9fd4a3b2009-01-04 23:43:42 +02002906 if ((ar & AR_UNUSABLE_MASK) && !emulate_invalid_guest_state)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002907 ar = 0;
2908 var->type = ar & 15;
2909 var->s = (ar >> 4) & 1;
2910 var->dpl = (ar >> 5) & 3;
2911 var->present = (ar >> 7) & 1;
2912 var->avl = (ar >> 12) & 1;
2913 var->l = (ar >> 13) & 1;
2914 var->db = (ar >> 14) & 1;
2915 var->g = (ar >> 15) & 1;
2916 var->unusable = (ar >> 16) & 1;
2917}
2918
Avi Kivitya9179492011-01-03 14:28:52 +02002919static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
2920{
Avi Kivitya9179492011-01-03 14:28:52 +02002921 struct kvm_segment s;
2922
2923 if (to_vmx(vcpu)->rmode.vm86_active) {
2924 vmx_get_segment(vcpu, &s, seg);
2925 return s.base;
2926 }
Avi Kivity2fb92db2011-04-27 19:42:18 +03002927 return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
Avi Kivitya9179492011-01-03 14:28:52 +02002928}
2929
Avi Kivity69c73022011-03-07 15:26:44 +02002930static int __vmx_get_cpl(struct kvm_vcpu *vcpu)
Izik Eidus2e4d2652008-03-24 19:38:34 +02002931{
Avi Kivity3eeb3282010-01-21 15:31:48 +02002932 if (!is_protmode(vcpu))
Izik Eidus2e4d2652008-03-24 19:38:34 +02002933 return 0;
2934
Avi Kivityf4c63e52011-03-07 14:54:28 +02002935 if (!is_long_mode(vcpu)
2936 && (kvm_get_rflags(vcpu) & X86_EFLAGS_VM)) /* if virtual 8086 */
Izik Eidus2e4d2652008-03-24 19:38:34 +02002937 return 3;
2938
Avi Kivity2fb92db2011-04-27 19:42:18 +03002939 return vmx_read_guest_seg_selector(to_vmx(vcpu), VCPU_SREG_CS) & 3;
Izik Eidus2e4d2652008-03-24 19:38:34 +02002940}
2941
Avi Kivity69c73022011-03-07 15:26:44 +02002942static int vmx_get_cpl(struct kvm_vcpu *vcpu)
2943{
2944 if (!test_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail)) {
2945 __set_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
2946 to_vmx(vcpu)->cpl = __vmx_get_cpl(vcpu);
2947 }
2948 return to_vmx(vcpu)->cpl;
2949}
2950
2951
Avi Kivity653e3102007-05-07 10:55:37 +03002952static u32 vmx_segment_access_rights(struct kvm_segment *var)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002953{
Avi Kivity6aa8b732006-12-10 02:21:36 -08002954 u32 ar;
2955
Avi Kivity653e3102007-05-07 10:55:37 +03002956 if (var->unusable)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002957 ar = 1 << 16;
2958 else {
2959 ar = var->type & 15;
2960 ar |= (var->s & 1) << 4;
2961 ar |= (var->dpl & 3) << 5;
2962 ar |= (var->present & 1) << 7;
2963 ar |= (var->avl & 1) << 12;
2964 ar |= (var->l & 1) << 13;
2965 ar |= (var->db & 1) << 14;
2966 ar |= (var->g & 1) << 15;
2967 }
Uri Lublinf7fbf1f2006-12-13 00:34:00 -08002968 if (ar == 0) /* a 0 value means unusable */
2969 ar = AR_UNUSABLE_MASK;
Avi Kivity653e3102007-05-07 10:55:37 +03002970
2971 return ar;
2972}
2973
2974static void vmx_set_segment(struct kvm_vcpu *vcpu,
2975 struct kvm_segment *var, int seg)
2976{
Avi Kivity7ffd92c2009-06-09 14:10:45 +03002977 struct vcpu_vmx *vmx = to_vmx(vcpu);
Avi Kivity653e3102007-05-07 10:55:37 +03002978 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
2979 u32 ar;
2980
Avi Kivity2fb92db2011-04-27 19:42:18 +03002981 vmx_segment_cache_clear(vmx);
2982
Avi Kivity7ffd92c2009-06-09 14:10:45 +03002983 if (vmx->rmode.vm86_active && seg == VCPU_SREG_TR) {
Gleb Natapova8ba6c22011-02-21 12:07:58 +02002984 vmcs_write16(sf->selector, var->selector);
Avi Kivity7ffd92c2009-06-09 14:10:45 +03002985 vmx->rmode.tr.selector = var->selector;
2986 vmx->rmode.tr.base = var->base;
2987 vmx->rmode.tr.limit = var->limit;
2988 vmx->rmode.tr.ar = vmx_segment_access_rights(var);
Avi Kivity653e3102007-05-07 10:55:37 +03002989 return;
2990 }
2991 vmcs_writel(sf->base, var->base);
2992 vmcs_write32(sf->limit, var->limit);
2993 vmcs_write16(sf->selector, var->selector);
Avi Kivity7ffd92c2009-06-09 14:10:45 +03002994 if (vmx->rmode.vm86_active && var->s) {
Avi Kivity653e3102007-05-07 10:55:37 +03002995 /*
2996 * Hack real-mode segments into vm86 compatibility.
2997 */
2998 if (var->base == 0xffff0000 && var->selector == 0xf000)
2999 vmcs_writel(sf->base, 0xf0000);
3000 ar = 0xf3;
3001 } else
3002 ar = vmx_segment_access_rights(var);
Nitin A Kamble3a624e22009-06-08 11:34:16 -07003003
3004 /*
3005 * Fix the "Accessed" bit in AR field of segment registers for older
3006 * qemu binaries.
3007 * IA32 arch specifies that at the time of processor reset the
3008 * "Accessed" bit in the AR field of segment registers is 1. And qemu
3009 * is setting it to 0 in the usedland code. This causes invalid guest
3010 * state vmexit when "unrestricted guest" mode is turned on.
3011 * Fix for this setup issue in cpu_reset is being pushed in the qemu
3012 * tree. Newer qemu binaries with that qemu fix would not need this
3013 * kvm hack.
3014 */
3015 if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR))
3016 ar |= 0x1; /* Accessed */
3017
Avi Kivity6aa8b732006-12-10 02:21:36 -08003018 vmcs_write32(sf->ar_bytes, ar);
Avi Kivity69c73022011-03-07 15:26:44 +02003019 __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003020}
3021
Avi Kivity6aa8b732006-12-10 02:21:36 -08003022static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
3023{
Avi Kivity2fb92db2011-04-27 19:42:18 +03003024 u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003025
3026 *db = (ar >> 14) & 1;
3027 *l = (ar >> 13) & 1;
3028}
3029
Gleb Natapov89a27f42010-02-16 10:51:48 +02003030static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003031{
Gleb Natapov89a27f42010-02-16 10:51:48 +02003032 dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
3033 dt->address = vmcs_readl(GUEST_IDTR_BASE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003034}
3035
Gleb Natapov89a27f42010-02-16 10:51:48 +02003036static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003037{
Gleb Natapov89a27f42010-02-16 10:51:48 +02003038 vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
3039 vmcs_writel(GUEST_IDTR_BASE, dt->address);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003040}
3041
Gleb Natapov89a27f42010-02-16 10:51:48 +02003042static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003043{
Gleb Natapov89a27f42010-02-16 10:51:48 +02003044 dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
3045 dt->address = vmcs_readl(GUEST_GDTR_BASE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003046}
3047
Gleb Natapov89a27f42010-02-16 10:51:48 +02003048static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003049{
Gleb Natapov89a27f42010-02-16 10:51:48 +02003050 vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
3051 vmcs_writel(GUEST_GDTR_BASE, dt->address);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003052}
3053
Mohammed Gamal648dfaa2008-08-17 16:38:32 +03003054static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
3055{
3056 struct kvm_segment var;
3057 u32 ar;
3058
3059 vmx_get_segment(vcpu, &var, seg);
3060 ar = vmx_segment_access_rights(&var);
3061
3062 if (var.base != (var.selector << 4))
3063 return false;
3064 if (var.limit != 0xffff)
3065 return false;
3066 if (ar != 0xf3)
3067 return false;
3068
3069 return true;
3070}
3071
3072static bool code_segment_valid(struct kvm_vcpu *vcpu)
3073{
3074 struct kvm_segment cs;
3075 unsigned int cs_rpl;
3076
3077 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
3078 cs_rpl = cs.selector & SELECTOR_RPL_MASK;
3079
Avi Kivity1872a3f2009-01-04 23:26:52 +02003080 if (cs.unusable)
3081 return false;
Mohammed Gamal648dfaa2008-08-17 16:38:32 +03003082 if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK))
3083 return false;
3084 if (!cs.s)
3085 return false;
Avi Kivity1872a3f2009-01-04 23:26:52 +02003086 if (cs.type & AR_TYPE_WRITEABLE_MASK) {
Mohammed Gamal648dfaa2008-08-17 16:38:32 +03003087 if (cs.dpl > cs_rpl)
3088 return false;
Avi Kivity1872a3f2009-01-04 23:26:52 +02003089 } else {
Mohammed Gamal648dfaa2008-08-17 16:38:32 +03003090 if (cs.dpl != cs_rpl)
3091 return false;
3092 }
3093 if (!cs.present)
3094 return false;
3095
3096 /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
3097 return true;
3098}
3099
3100static bool stack_segment_valid(struct kvm_vcpu *vcpu)
3101{
3102 struct kvm_segment ss;
3103 unsigned int ss_rpl;
3104
3105 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
3106 ss_rpl = ss.selector & SELECTOR_RPL_MASK;
3107
Avi Kivity1872a3f2009-01-04 23:26:52 +02003108 if (ss.unusable)
3109 return true;
3110 if (ss.type != 3 && ss.type != 7)
Mohammed Gamal648dfaa2008-08-17 16:38:32 +03003111 return false;
3112 if (!ss.s)
3113 return false;
3114 if (ss.dpl != ss_rpl) /* DPL != RPL */
3115 return false;
3116 if (!ss.present)
3117 return false;
3118
3119 return true;
3120}
3121
3122static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
3123{
3124 struct kvm_segment var;
3125 unsigned int rpl;
3126
3127 vmx_get_segment(vcpu, &var, seg);
3128 rpl = var.selector & SELECTOR_RPL_MASK;
3129
Avi Kivity1872a3f2009-01-04 23:26:52 +02003130 if (var.unusable)
3131 return true;
Mohammed Gamal648dfaa2008-08-17 16:38:32 +03003132 if (!var.s)
3133 return false;
3134 if (!var.present)
3135 return false;
3136 if (~var.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK)) {
3137 if (var.dpl < rpl) /* DPL < RPL */
3138 return false;
3139 }
3140
3141 /* TODO: Add other members to kvm_segment_field to allow checking for other access
3142 * rights flags
3143 */
3144 return true;
3145}
3146
3147static bool tr_valid(struct kvm_vcpu *vcpu)
3148{
3149 struct kvm_segment tr;
3150
3151 vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
3152
Avi Kivity1872a3f2009-01-04 23:26:52 +02003153 if (tr.unusable)
3154 return false;
Mohammed Gamal648dfaa2008-08-17 16:38:32 +03003155 if (tr.selector & SELECTOR_TI_MASK) /* TI = 1 */
3156 return false;
Avi Kivity1872a3f2009-01-04 23:26:52 +02003157 if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
Mohammed Gamal648dfaa2008-08-17 16:38:32 +03003158 return false;
3159 if (!tr.present)
3160 return false;
3161
3162 return true;
3163}
3164
3165static bool ldtr_valid(struct kvm_vcpu *vcpu)
3166{
3167 struct kvm_segment ldtr;
3168
3169 vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
3170
Avi Kivity1872a3f2009-01-04 23:26:52 +02003171 if (ldtr.unusable)
3172 return true;
Mohammed Gamal648dfaa2008-08-17 16:38:32 +03003173 if (ldtr.selector & SELECTOR_TI_MASK) /* TI = 1 */
3174 return false;
3175 if (ldtr.type != 2)
3176 return false;
3177 if (!ldtr.present)
3178 return false;
3179
3180 return true;
3181}
3182
3183static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
3184{
3185 struct kvm_segment cs, ss;
3186
3187 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
3188 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
3189
3190 return ((cs.selector & SELECTOR_RPL_MASK) ==
3191 (ss.selector & SELECTOR_RPL_MASK));
3192}
3193
3194/*
3195 * Check if guest state is valid. Returns true if valid, false if
3196 * not.
3197 * We assume that registers are always usable
3198 */
3199static bool guest_state_valid(struct kvm_vcpu *vcpu)
3200{
3201 /* real mode guest state checks */
Avi Kivity3eeb3282010-01-21 15:31:48 +02003202 if (!is_protmode(vcpu)) {
Mohammed Gamal648dfaa2008-08-17 16:38:32 +03003203 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
3204 return false;
3205 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
3206 return false;
3207 if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
3208 return false;
3209 if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
3210 return false;
3211 if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
3212 return false;
3213 if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
3214 return false;
3215 } else {
3216 /* protected mode guest state checks */
3217 if (!cs_ss_rpl_check(vcpu))
3218 return false;
3219 if (!code_segment_valid(vcpu))
3220 return false;
3221 if (!stack_segment_valid(vcpu))
3222 return false;
3223 if (!data_segment_valid(vcpu, VCPU_SREG_DS))
3224 return false;
3225 if (!data_segment_valid(vcpu, VCPU_SREG_ES))
3226 return false;
3227 if (!data_segment_valid(vcpu, VCPU_SREG_FS))
3228 return false;
3229 if (!data_segment_valid(vcpu, VCPU_SREG_GS))
3230 return false;
3231 if (!tr_valid(vcpu))
3232 return false;
3233 if (!ldtr_valid(vcpu))
3234 return false;
3235 }
3236 /* TODO:
3237 * - Add checks on RIP
3238 * - Add checks on RFLAGS
3239 */
3240
3241 return true;
3242}
3243
Mike Dayd77c26f2007-10-08 09:02:08 -04003244static int init_rmode_tss(struct kvm *kvm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003245{
Xiao Guangrong40dcaa92011-03-09 15:41:04 +08003246 gfn_t fn;
Izik Eidus195aefd2007-10-01 22:14:18 +02003247 u16 data = 0;
Xiao Guangrong40dcaa92011-03-09 15:41:04 +08003248 int r, idx, ret = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003249
Xiao Guangrong40dcaa92011-03-09 15:41:04 +08003250 idx = srcu_read_lock(&kvm->srcu);
3251 fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
Izik Eidus195aefd2007-10-01 22:14:18 +02003252 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
3253 if (r < 0)
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003254 goto out;
Izik Eidus195aefd2007-10-01 22:14:18 +02003255 data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
Sheng Yang464d17c2008-08-13 14:10:33 +08003256 r = kvm_write_guest_page(kvm, fn++, &data,
3257 TSS_IOPB_BASE_OFFSET, sizeof(u16));
Izik Eidus195aefd2007-10-01 22:14:18 +02003258 if (r < 0)
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003259 goto out;
Izik Eidus195aefd2007-10-01 22:14:18 +02003260 r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
3261 if (r < 0)
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003262 goto out;
Izik Eidus195aefd2007-10-01 22:14:18 +02003263 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
3264 if (r < 0)
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003265 goto out;
Izik Eidus195aefd2007-10-01 22:14:18 +02003266 data = ~0;
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003267 r = kvm_write_guest_page(kvm, fn, &data,
3268 RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
3269 sizeof(u8));
Izik Eidus195aefd2007-10-01 22:14:18 +02003270 if (r < 0)
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003271 goto out;
3272
3273 ret = 1;
3274out:
Xiao Guangrong40dcaa92011-03-09 15:41:04 +08003275 srcu_read_unlock(&kvm->srcu, idx);
Marcelo Tosatti10589a42007-12-20 19:18:22 -05003276 return ret;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003277}
3278
Sheng Yangb7ebfb02008-04-25 21:44:52 +08003279static int init_rmode_identity_map(struct kvm *kvm)
3280{
Xiao Guangrong40dcaa92011-03-09 15:41:04 +08003281 int i, idx, r, ret;
Sheng Yangb7ebfb02008-04-25 21:44:52 +08003282 pfn_t identity_map_pfn;
3283 u32 tmp;
3284
Avi Kivity089d0342009-03-23 18:26:32 +02003285 if (!enable_ept)
Sheng Yangb7ebfb02008-04-25 21:44:52 +08003286 return 1;
3287 if (unlikely(!kvm->arch.ept_identity_pagetable)) {
3288 printk(KERN_ERR "EPT: identity-mapping pagetable "
3289 "haven't been allocated!\n");
3290 return 0;
3291 }
3292 if (likely(kvm->arch.ept_identity_pagetable_done))
3293 return 1;
3294 ret = 0;
Sheng Yangb927a3c2009-07-21 10:42:48 +08003295 identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT;
Xiao Guangrong40dcaa92011-03-09 15:41:04 +08003296 idx = srcu_read_lock(&kvm->srcu);
Sheng Yangb7ebfb02008-04-25 21:44:52 +08003297 r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
3298 if (r < 0)
3299 goto out;
3300 /* Set up identity-mapping pagetable for EPT in real mode */
3301 for (i = 0; i < PT32_ENT_PER_PAGE; i++) {
3302 tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
3303 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
3304 r = kvm_write_guest_page(kvm, identity_map_pfn,
3305 &tmp, i * sizeof(tmp), sizeof(tmp));
3306 if (r < 0)
3307 goto out;
3308 }
3309 kvm->arch.ept_identity_pagetable_done = true;
3310 ret = 1;
3311out:
Xiao Guangrong40dcaa92011-03-09 15:41:04 +08003312 srcu_read_unlock(&kvm->srcu, idx);
Sheng Yangb7ebfb02008-04-25 21:44:52 +08003313 return ret;
3314}
3315
Avi Kivity6aa8b732006-12-10 02:21:36 -08003316static void seg_setup(int seg)
3317{
3318 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
Nitin A Kamble3a624e22009-06-08 11:34:16 -07003319 unsigned int ar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003320
3321 vmcs_write16(sf->selector, 0);
3322 vmcs_writel(sf->base, 0);
3323 vmcs_write32(sf->limit, 0xffff);
Nitin A Kamble3a624e22009-06-08 11:34:16 -07003324 if (enable_unrestricted_guest) {
3325 ar = 0x93;
3326 if (seg == VCPU_SREG_CS)
3327 ar |= 0x08; /* code segment */
3328 } else
3329 ar = 0xf3;
3330
3331 vmcs_write32(sf->ar_bytes, ar);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003332}
3333
Sheng Yangf78e0e22007-10-29 09:40:42 +08003334static int alloc_apic_access_page(struct kvm *kvm)
3335{
3336 struct kvm_userspace_memory_region kvm_userspace_mem;
3337 int r = 0;
3338
Marcelo Tosatti79fac952009-12-23 14:35:26 -02003339 mutex_lock(&kvm->slots_lock);
Zhang Xiantaobfc6d222007-12-14 10:20:16 +08003340 if (kvm->arch.apic_access_page)
Sheng Yangf78e0e22007-10-29 09:40:42 +08003341 goto out;
3342 kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
3343 kvm_userspace_mem.flags = 0;
3344 kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL;
3345 kvm_userspace_mem.memory_size = PAGE_SIZE;
3346 r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
3347 if (r)
3348 goto out;
Izik Eidus72dc67a2008-02-10 18:04:15 +02003349
Zhang Xiantaobfc6d222007-12-14 10:20:16 +08003350 kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
Sheng Yangf78e0e22007-10-29 09:40:42 +08003351out:
Marcelo Tosatti79fac952009-12-23 14:35:26 -02003352 mutex_unlock(&kvm->slots_lock);
Sheng Yangf78e0e22007-10-29 09:40:42 +08003353 return r;
3354}
3355
Sheng Yangb7ebfb02008-04-25 21:44:52 +08003356static int alloc_identity_pagetable(struct kvm *kvm)
3357{
3358 struct kvm_userspace_memory_region kvm_userspace_mem;
3359 int r = 0;
3360
Marcelo Tosatti79fac952009-12-23 14:35:26 -02003361 mutex_lock(&kvm->slots_lock);
Sheng Yangb7ebfb02008-04-25 21:44:52 +08003362 if (kvm->arch.ept_identity_pagetable)
3363 goto out;
3364 kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
3365 kvm_userspace_mem.flags = 0;
Sheng Yangb927a3c2009-07-21 10:42:48 +08003366 kvm_userspace_mem.guest_phys_addr =
3367 kvm->arch.ept_identity_map_addr;
Sheng Yangb7ebfb02008-04-25 21:44:52 +08003368 kvm_userspace_mem.memory_size = PAGE_SIZE;
3369 r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
3370 if (r)
3371 goto out;
3372
Sheng Yangb7ebfb02008-04-25 21:44:52 +08003373 kvm->arch.ept_identity_pagetable = gfn_to_page(kvm,
Sheng Yangb927a3c2009-07-21 10:42:48 +08003374 kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
Sheng Yangb7ebfb02008-04-25 21:44:52 +08003375out:
Marcelo Tosatti79fac952009-12-23 14:35:26 -02003376 mutex_unlock(&kvm->slots_lock);
Sheng Yangb7ebfb02008-04-25 21:44:52 +08003377 return r;
3378}
3379
Sheng Yang2384d2b2008-01-17 15:14:33 +08003380static void allocate_vpid(struct vcpu_vmx *vmx)
3381{
3382 int vpid;
3383
3384 vmx->vpid = 0;
Avi Kivity919818a2009-03-23 18:01:29 +02003385 if (!enable_vpid)
Sheng Yang2384d2b2008-01-17 15:14:33 +08003386 return;
3387 spin_lock(&vmx_vpid_lock);
3388 vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
3389 if (vpid < VMX_NR_VPIDS) {
3390 vmx->vpid = vpid;
3391 __set_bit(vpid, vmx_vpid_bitmap);
3392 }
3393 spin_unlock(&vmx_vpid_lock);
3394}
3395
Lai Jiangshancdbecfc2010-04-17 16:41:47 +08003396static void free_vpid(struct vcpu_vmx *vmx)
3397{
3398 if (!enable_vpid)
3399 return;
3400 spin_lock(&vmx_vpid_lock);
3401 if (vmx->vpid != 0)
3402 __clear_bit(vmx->vpid, vmx_vpid_bitmap);
3403 spin_unlock(&vmx_vpid_lock);
3404}
3405
Avi Kivity58972972009-02-24 22:26:47 +02003406static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr)
Sheng Yang25c5f222008-03-28 13:18:56 +08003407{
Avi Kivity3e7c73e2009-02-24 21:46:19 +02003408 int f = sizeof(unsigned long);
Sheng Yang25c5f222008-03-28 13:18:56 +08003409
3410 if (!cpu_has_vmx_msr_bitmap())
3411 return;
3412
3413 /*
3414 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
3415 * have the write-low and read-high bitmap offsets the wrong way round.
3416 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
3417 */
Sheng Yang25c5f222008-03-28 13:18:56 +08003418 if (msr <= 0x1fff) {
Avi Kivity3e7c73e2009-02-24 21:46:19 +02003419 __clear_bit(msr, msr_bitmap + 0x000 / f); /* read-low */
3420 __clear_bit(msr, msr_bitmap + 0x800 / f); /* write-low */
Sheng Yang25c5f222008-03-28 13:18:56 +08003421 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
3422 msr &= 0x1fff;
Avi Kivity3e7c73e2009-02-24 21:46:19 +02003423 __clear_bit(msr, msr_bitmap + 0x400 / f); /* read-high */
3424 __clear_bit(msr, msr_bitmap + 0xc00 / f); /* write-high */
Sheng Yang25c5f222008-03-28 13:18:56 +08003425 }
Sheng Yang25c5f222008-03-28 13:18:56 +08003426}
3427
Avi Kivity58972972009-02-24 22:26:47 +02003428static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
3429{
3430 if (!longmode_only)
3431 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy, msr);
3432 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, msr);
3433}
3434
Avi Kivity6aa8b732006-12-10 02:21:36 -08003435/*
Nadav Har'Ela3a8ff82011-05-25 23:09:01 +03003436 * Set up the vmcs's constant host-state fields, i.e., host-state fields that
3437 * will not change in the lifetime of the guest.
3438 * Note that host-state that does change is set elsewhere. E.g., host-state
3439 * that is set differently for each CPU is set in vmx_vcpu_load(), not here.
3440 */
3441static void vmx_set_constant_host_state(void)
3442{
3443 u32 low32, high32;
3444 unsigned long tmpl;
3445 struct desc_ptr dt;
3446
3447 vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS); /* 22.2.3 */
3448 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
3449 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
3450
3451 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
3452 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
3453 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
3454 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
3455 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
3456
3457 native_store_idt(&dt);
3458 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
3459
3460 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
3461 vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
3462
3463 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
3464 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
3465 rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
3466 vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */
3467
3468 if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
3469 rdmsr(MSR_IA32_CR_PAT, low32, high32);
3470 vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32));
3471 }
3472}
3473
Nadav Har'Elbf8179a2011-05-25 23:09:31 +03003474static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
3475{
3476 vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
3477 if (enable_ept)
3478 vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
Nadav Har'Elfe3ef052011-05-25 23:10:02 +03003479 if (is_guest_mode(&vmx->vcpu))
3480 vmx->vcpu.arch.cr4_guest_owned_bits &=
3481 ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask;
Nadav Har'Elbf8179a2011-05-25 23:09:31 +03003482 vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
3483}
3484
3485static u32 vmx_exec_control(struct vcpu_vmx *vmx)
3486{
3487 u32 exec_control = vmcs_config.cpu_based_exec_ctrl;
3488 if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
3489 exec_control &= ~CPU_BASED_TPR_SHADOW;
3490#ifdef CONFIG_X86_64
3491 exec_control |= CPU_BASED_CR8_STORE_EXITING |
3492 CPU_BASED_CR8_LOAD_EXITING;
3493#endif
3494 }
3495 if (!enable_ept)
3496 exec_control |= CPU_BASED_CR3_STORE_EXITING |
3497 CPU_BASED_CR3_LOAD_EXITING |
3498 CPU_BASED_INVLPG_EXITING;
3499 return exec_control;
3500}
3501
3502static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
3503{
3504 u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
3505 if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
3506 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
3507 if (vmx->vpid == 0)
3508 exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
3509 if (!enable_ept) {
3510 exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
3511 enable_unrestricted_guest = 0;
3512 }
3513 if (!enable_unrestricted_guest)
3514 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
3515 if (!ple_gap)
3516 exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
3517 return exec_control;
3518}
3519
Nadav Har'Ela3a8ff82011-05-25 23:09:01 +03003520/*
Avi Kivity6aa8b732006-12-10 02:21:36 -08003521 * Sets up the vmcs for emulated real mode.
3522 */
Rusty Russell8b9cf982007-07-30 16:31:43 +10003523static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003524{
Avi Kivity6aa8b732006-12-10 02:21:36 -08003525 unsigned long a;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003526 int i;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003527
Avi Kivity6aa8b732006-12-10 02:21:36 -08003528 /* I/O */
Avi Kivity3e7c73e2009-02-24 21:46:19 +02003529 vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a));
3530 vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b));
Avi Kivity6aa8b732006-12-10 02:21:36 -08003531
Sheng Yang25c5f222008-03-28 13:18:56 +08003532 if (cpu_has_vmx_msr_bitmap())
Avi Kivity58972972009-02-24 22:26:47 +02003533 vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy));
Sheng Yang25c5f222008-03-28 13:18:56 +08003534
Avi Kivity6aa8b732006-12-10 02:21:36 -08003535 vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
3536
Avi Kivity6aa8b732006-12-10 02:21:36 -08003537 /* Control */
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03003538 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
3539 vmcs_config.pin_based_exec_ctrl);
Yang, Sheng6e5d8652007-09-12 18:03:11 +08003540
Nadav Har'Elbf8179a2011-05-25 23:09:31 +03003541 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx));
Avi Kivity6aa8b732006-12-10 02:21:36 -08003542
Sheng Yang83ff3b92007-11-21 14:33:25 +08003543 if (cpu_has_secondary_exec_ctrls()) {
Nadav Har'Elbf8179a2011-05-25 23:09:31 +03003544 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
3545 vmx_secondary_exec_control(vmx));
Sheng Yang83ff3b92007-11-21 14:33:25 +08003546 }
Sheng Yangf78e0e22007-10-29 09:40:42 +08003547
Zhai, Edwin4b8d54f2009-10-09 18:03:20 +08003548 if (ple_gap) {
3549 vmcs_write32(PLE_GAP, ple_gap);
3550 vmcs_write32(PLE_WINDOW, ple_window);
3551 }
3552
Avi Kivityc7addb92007-09-16 18:58:32 +02003553 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf);
3554 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003555 vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
3556
Avi Kivity9581d442010-10-19 16:46:55 +02003557 vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */
3558 vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */
Nadav Har'Ela3a8ff82011-05-25 23:09:01 +03003559 vmx_set_constant_host_state();
Avi Kivity05b3e0c2006-12-13 00:33:45 -08003560#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08003561 rdmsrl(MSR_FS_BASE, a);
3562 vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
3563 rdmsrl(MSR_GS_BASE, a);
3564 vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
3565#else
3566 vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
3567 vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
3568#endif
3569
Eddie Dong2cc51562007-05-21 07:28:09 +03003570 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
3571 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
Avi Kivity61d2ef22010-04-28 16:40:38 +03003572 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
Eddie Dong2cc51562007-05-21 07:28:09 +03003573 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
Avi Kivity61d2ef22010-04-28 16:40:38 +03003574 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
Avi Kivity6aa8b732006-12-10 02:21:36 -08003575
Sheng Yang468d4722008-10-09 16:01:55 +08003576 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
Nadav Har'Ela3a8ff82011-05-25 23:09:01 +03003577 u32 msr_low, msr_high;
3578 u64 host_pat;
Sheng Yang468d4722008-10-09 16:01:55 +08003579 rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
3580 host_pat = msr_low | ((u64) msr_high << 32);
3581 /* Write the default value follow host pat */
3582 vmcs_write64(GUEST_IA32_PAT, host_pat);
3583 /* Keep arch.pat sync with GUEST_IA32_PAT */
3584 vmx->vcpu.arch.pat = host_pat;
3585 }
3586
Avi Kivity6aa8b732006-12-10 02:21:36 -08003587 for (i = 0; i < NR_VMX_MSR; ++i) {
3588 u32 index = vmx_msr_index[i];
3589 u32 data_low, data_high;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003590 int j = vmx->nmsrs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003591
3592 if (rdmsr_safe(index, &data_low, &data_high) < 0)
3593 continue;
Avi Kivity432bd6c2007-01-31 23:48:13 -08003594 if (wrmsr_safe(index, data_low, data_high) < 0)
3595 continue;
Avi Kivity26bb0982009-09-07 11:14:12 +03003596 vmx->guest_msrs[j].index = i;
3597 vmx->guest_msrs[j].data = 0;
Avi Kivityd5696722009-12-02 12:28:47 +02003598 vmx->guest_msrs[j].mask = -1ull;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003599 ++vmx->nmsrs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003600 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08003601
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03003602 vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003603
3604 /* 22.2.1, 20.8.1 */
Yang, Sheng1c3d14f2007-07-29 11:07:42 +03003605 vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
3606
Avi Kivitye00c8cf2007-10-21 11:00:39 +02003607 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
Nadav Har'Elbf8179a2011-05-25 23:09:31 +03003608 set_cr4_guest_host_mask(vmx);
Avi Kivitye00c8cf2007-10-21 11:00:39 +02003609
Zachary Amsden99e3e302010-08-19 22:07:17 -10003610 kvm_write_tsc(&vmx->vcpu, 0);
Sheng Yangf78e0e22007-10-29 09:40:42 +08003611
Avi Kivitye00c8cf2007-10-21 11:00:39 +02003612 return 0;
3613}
3614
3615static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
3616{
3617 struct vcpu_vmx *vmx = to_vmx(vcpu);
3618 u64 msr;
Xiao Guangrong4b9d3a02010-06-08 10:15:51 +08003619 int ret;
Avi Kivitye00c8cf2007-10-21 11:00:39 +02003620
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003621 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
Avi Kivitye00c8cf2007-10-21 11:00:39 +02003622
Avi Kivity7ffd92c2009-06-09 14:10:45 +03003623 vmx->rmode.vm86_active = 0;
Avi Kivitye00c8cf2007-10-21 11:00:39 +02003624
Jan Kiszka3b86cd92008-09-26 09:30:57 +02003625 vmx->soft_vnmi_blocked = 0;
3626
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003627 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
Avi Kivity2d3ad1f2008-02-24 11:20:43 +02003628 kvm_set_cr8(&vmx->vcpu, 0);
Avi Kivitye00c8cf2007-10-21 11:00:39 +02003629 msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
Gleb Natapovc5af89b2009-06-09 15:56:26 +03003630 if (kvm_vcpu_is_bsp(&vmx->vcpu))
Avi Kivitye00c8cf2007-10-21 11:00:39 +02003631 msr |= MSR_IA32_APICBASE_BSP;
3632 kvm_set_apic_base(&vmx->vcpu, msr);
3633
Jan Kiszka10ab25c2010-05-25 16:01:50 +02003634 ret = fx_init(&vmx->vcpu);
3635 if (ret != 0)
3636 goto out;
Avi Kivitye00c8cf2007-10-21 11:00:39 +02003637
Avi Kivity2fb92db2011-04-27 19:42:18 +03003638 vmx_segment_cache_clear(vmx);
3639
Avi Kivity5706be02008-08-20 15:07:31 +03003640 seg_setup(VCPU_SREG_CS);
Avi Kivitye00c8cf2007-10-21 11:00:39 +02003641 /*
3642 * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
3643 * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh.
3644 */
Gleb Natapovc5af89b2009-06-09 15:56:26 +03003645 if (kvm_vcpu_is_bsp(&vmx->vcpu)) {
Avi Kivitye00c8cf2007-10-21 11:00:39 +02003646 vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
3647 vmcs_writel(GUEST_CS_BASE, 0x000f0000);
3648 } else {
Zhang Xiantaoad312c72007-12-13 23:50:52 +08003649 vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8);
3650 vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12);
Avi Kivitye00c8cf2007-10-21 11:00:39 +02003651 }
Avi Kivitye00c8cf2007-10-21 11:00:39 +02003652
3653 seg_setup(VCPU_SREG_DS);
3654 seg_setup(VCPU_SREG_ES);
3655 seg_setup(VCPU_SREG_FS);
3656 seg_setup(VCPU_SREG_GS);
3657 seg_setup(VCPU_SREG_SS);
3658
3659 vmcs_write16(GUEST_TR_SELECTOR, 0);
3660 vmcs_writel(GUEST_TR_BASE, 0);
3661 vmcs_write32(GUEST_TR_LIMIT, 0xffff);
3662 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
3663
3664 vmcs_write16(GUEST_LDTR_SELECTOR, 0);
3665 vmcs_writel(GUEST_LDTR_BASE, 0);
3666 vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
3667 vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
3668
3669 vmcs_write32(GUEST_SYSENTER_CS, 0);
3670 vmcs_writel(GUEST_SYSENTER_ESP, 0);
3671 vmcs_writel(GUEST_SYSENTER_EIP, 0);
3672
3673 vmcs_writel(GUEST_RFLAGS, 0x02);
Gleb Natapovc5af89b2009-06-09 15:56:26 +03003674 if (kvm_vcpu_is_bsp(&vmx->vcpu))
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003675 kvm_rip_write(vcpu, 0xfff0);
Avi Kivitye00c8cf2007-10-21 11:00:39 +02003676 else
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003677 kvm_rip_write(vcpu, 0);
3678 kvm_register_write(vcpu, VCPU_REGS_RSP, 0);
Avi Kivitye00c8cf2007-10-21 11:00:39 +02003679
Avi Kivitye00c8cf2007-10-21 11:00:39 +02003680 vmcs_writel(GUEST_DR7, 0x400);
3681
3682 vmcs_writel(GUEST_GDTR_BASE, 0);
3683 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
3684
3685 vmcs_writel(GUEST_IDTR_BASE, 0);
3686 vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
3687
Anthony Liguori443381a2010-12-06 10:53:38 -06003688 vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
Avi Kivitye00c8cf2007-10-21 11:00:39 +02003689 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
3690 vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
3691
Avi Kivitye00c8cf2007-10-21 11:00:39 +02003692 /* Special registers */
3693 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
3694
3695 setup_msrs(vmx);
3696
Avi Kivity6aa8b732006-12-10 02:21:36 -08003697 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */
3698
Sheng Yangf78e0e22007-10-29 09:40:42 +08003699 if (cpu_has_vmx_tpr_shadow()) {
3700 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
3701 if (vm_need_tpr_shadow(vmx->vcpu.kvm))
3702 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
Takuya Yoshikawaafc20182011-03-05 12:40:20 +09003703 __pa(vmx->vcpu.arch.apic->regs));
Sheng Yangf78e0e22007-10-29 09:40:42 +08003704 vmcs_write32(TPR_THRESHOLD, 0);
3705 }
3706
3707 if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
3708 vmcs_write64(APIC_ACCESS_ADDR,
Zhang Xiantaobfc6d222007-12-14 10:20:16 +08003709 page_to_phys(vmx->vcpu.kvm->arch.apic_access_page));
Avi Kivity6aa8b732006-12-10 02:21:36 -08003710
Sheng Yang2384d2b2008-01-17 15:14:33 +08003711 if (vmx->vpid != 0)
3712 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
3713
Eduardo Habkostfa400522009-10-24 02:49:58 -02003714 vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
Avi Kivity4d4ec082009-12-29 18:07:30 +02003715 vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */
Rusty Russell8b9cf982007-07-30 16:31:43 +10003716 vmx_set_cr4(&vmx->vcpu, 0);
Rusty Russell8b9cf982007-07-30 16:31:43 +10003717 vmx_set_efer(&vmx->vcpu, 0);
Rusty Russell8b9cf982007-07-30 16:31:43 +10003718 vmx_fpu_activate(&vmx->vcpu);
3719 update_exception_bitmap(&vmx->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003720
Gui Jianfengb9d762f2010-06-07 10:32:29 +08003721 vpid_sync_context(vmx);
Sheng Yang2384d2b2008-01-17 15:14:33 +08003722
Marcelo Tosatti3200f402008-03-29 20:17:59 -03003723 ret = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003724
Mohammed Gamala89a8fb2008-08-17 16:42:16 +03003725 /* HACK: Don't enable emulation on guest boot/reset */
3726 vmx->emulation_required = 0;
3727
Avi Kivity6aa8b732006-12-10 02:21:36 -08003728out:
3729 return ret;
3730}
3731
Jan Kiszka3b86cd92008-09-26 09:30:57 +02003732static void enable_irq_window(struct kvm_vcpu *vcpu)
3733{
3734 u32 cpu_based_vm_exec_control;
3735
3736 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
3737 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
3738 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
3739}
3740
3741static void enable_nmi_window(struct kvm_vcpu *vcpu)
3742{
3743 u32 cpu_based_vm_exec_control;
3744
3745 if (!cpu_has_virtual_nmis()) {
3746 enable_irq_window(vcpu);
3747 return;
3748 }
3749
Avi Kivity30bd0c42010-11-01 23:20:48 +02003750 if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
3751 enable_irq_window(vcpu);
3752 return;
3753 }
Jan Kiszka3b86cd92008-09-26 09:30:57 +02003754 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
3755 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING;
3756 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
3757}
3758
Gleb Natapov66fd3f72009-05-11 13:35:50 +03003759static void vmx_inject_irq(struct kvm_vcpu *vcpu)
Eddie Dong85f455f2007-07-06 12:20:49 +03003760{
Avi Kivity9c8cba32007-11-22 11:42:59 +02003761 struct vcpu_vmx *vmx = to_vmx(vcpu);
Gleb Natapov66fd3f72009-05-11 13:35:50 +03003762 uint32_t intr;
3763 int irq = vcpu->arch.interrupt.nr;
Avi Kivity9c8cba32007-11-22 11:42:59 +02003764
Marcelo Tosatti229456f2009-06-17 09:22:14 -03003765 trace_kvm_inj_virq(irq);
Feng (Eric) Liu2714d1d2008-04-10 15:31:10 -04003766
Avi Kivityfa89a812008-09-01 15:57:51 +03003767 ++vcpu->stat.irq_injections;
Avi Kivity7ffd92c2009-06-09 14:10:45 +03003768 if (vmx->rmode.vm86_active) {
Serge E. Hallyn71f98332011-04-13 09:12:54 -05003769 int inc_eip = 0;
3770 if (vcpu->arch.interrupt.soft)
3771 inc_eip = vcpu->arch.event_exit_inst_len;
3772 if (kvm_inject_realmode_interrupt(vcpu, irq, inc_eip) != EMULATE_DONE)
Mohammed Gamala92601b2010-09-19 14:34:07 +02003773 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03003774 return;
3775 }
Gleb Natapov66fd3f72009-05-11 13:35:50 +03003776 intr = irq | INTR_INFO_VALID_MASK;
3777 if (vcpu->arch.interrupt.soft) {
3778 intr |= INTR_TYPE_SOFT_INTR;
3779 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
3780 vmx->vcpu.arch.event_exit_inst_len);
3781 } else
3782 intr |= INTR_TYPE_EXT_INTR;
3783 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
Anthony Liguori443381a2010-12-06 10:53:38 -06003784 vmx_clear_hlt(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03003785}
3786
Sheng Yangf08864b2008-05-15 18:23:25 +08003787static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
3788{
Jan Kiszka66a5a342008-09-26 09:30:51 +02003789 struct vcpu_vmx *vmx = to_vmx(vcpu);
3790
Jan Kiszka3b86cd92008-09-26 09:30:57 +02003791 if (!cpu_has_virtual_nmis()) {
3792 /*
3793 * Tracking the NMI-blocked state in software is built upon
3794 * finding the next open IRQ window. This, in turn, depends on
3795 * well-behaving guests: They have to keep IRQs disabled at
3796 * least as long as the NMI handler runs. Otherwise we may
3797 * cause NMI nesting, maybe breaking the guest. But as this is
3798 * highly unlikely, we can live with the residual risk.
3799 */
3800 vmx->soft_vnmi_blocked = 1;
3801 vmx->vnmi_blocked_time = 0;
3802 }
3803
Jan Kiszka487b3912008-09-26 09:30:56 +02003804 ++vcpu->stat.nmi_injections;
Avi Kivity9d58b932011-03-07 16:52:07 +02003805 vmx->nmi_known_unmasked = false;
Avi Kivity7ffd92c2009-06-09 14:10:45 +03003806 if (vmx->rmode.vm86_active) {
Serge E. Hallyn71f98332011-04-13 09:12:54 -05003807 if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE)
Mohammed Gamala92601b2010-09-19 14:34:07 +02003808 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
Jan Kiszka66a5a342008-09-26 09:30:51 +02003809 return;
3810 }
Sheng Yangf08864b2008-05-15 18:23:25 +08003811 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
3812 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
Anthony Liguori443381a2010-12-06 10:53:38 -06003813 vmx_clear_hlt(vcpu);
Sheng Yangf08864b2008-05-15 18:23:25 +08003814}
3815
Gleb Natapovc4282df2009-04-21 17:45:07 +03003816static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
Jan Kiszka33f089c2008-09-26 09:30:49 +02003817{
Jan Kiszka3b86cd92008-09-26 09:30:57 +02003818 if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
Gleb Natapovc4282df2009-04-21 17:45:07 +03003819 return 0;
Jan Kiszka33f089c2008-09-26 09:30:49 +02003820
Gleb Natapovc4282df2009-04-21 17:45:07 +03003821 return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
Avi Kivity30bd0c42010-11-01 23:20:48 +02003822 (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
3823 | GUEST_INTR_STATE_NMI));
Jan Kiszka33f089c2008-09-26 09:30:49 +02003824}
3825
Jan Kiszka3cfc3092009-11-12 01:04:25 +01003826static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
3827{
3828 if (!cpu_has_virtual_nmis())
3829 return to_vmx(vcpu)->soft_vnmi_blocked;
Avi Kivity9d58b932011-03-07 16:52:07 +02003830 if (to_vmx(vcpu)->nmi_known_unmasked)
3831 return false;
Avi Kivityc332c832010-05-04 12:24:12 +03003832 return vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
Jan Kiszka3cfc3092009-11-12 01:04:25 +01003833}
3834
3835static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
3836{
3837 struct vcpu_vmx *vmx = to_vmx(vcpu);
3838
3839 if (!cpu_has_virtual_nmis()) {
3840 if (vmx->soft_vnmi_blocked != masked) {
3841 vmx->soft_vnmi_blocked = masked;
3842 vmx->vnmi_blocked_time = 0;
3843 }
3844 } else {
Avi Kivity9d58b932011-03-07 16:52:07 +02003845 vmx->nmi_known_unmasked = !masked;
Jan Kiszka3cfc3092009-11-12 01:04:25 +01003846 if (masked)
3847 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
3848 GUEST_INTR_STATE_NMI);
3849 else
3850 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
3851 GUEST_INTR_STATE_NMI);
3852 }
3853}
3854
Gleb Natapov78646122009-03-23 12:12:11 +02003855static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
3856{
Gleb Natapovc4282df2009-04-21 17:45:07 +03003857 return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
3858 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
3859 (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
Gleb Natapov78646122009-03-23 12:12:11 +02003860}
3861
Izik Eiduscbc94022007-10-25 00:29:55 +02003862static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
3863{
3864 int ret;
3865 struct kvm_userspace_memory_region tss_mem = {
Sheng Yang6fe63972008-10-16 17:30:58 +08003866 .slot = TSS_PRIVATE_MEMSLOT,
Izik Eiduscbc94022007-10-25 00:29:55 +02003867 .guest_phys_addr = addr,
3868 .memory_size = PAGE_SIZE * 3,
3869 .flags = 0,
3870 };
3871
3872 ret = kvm_set_memory_region(kvm, &tss_mem, 0);
3873 if (ret)
3874 return ret;
Zhang Xiantaobfc6d222007-12-14 10:20:16 +08003875 kvm->arch.tss_addr = addr;
Gleb Natapov93ea5382011-02-21 12:07:59 +02003876 if (!init_rmode_tss(kvm))
3877 return -ENOMEM;
3878
Izik Eiduscbc94022007-10-25 00:29:55 +02003879 return 0;
3880}
3881
Avi Kivity6aa8b732006-12-10 02:21:36 -08003882static int handle_rmode_exception(struct kvm_vcpu *vcpu,
3883 int vec, u32 err_code)
3884{
Nitin A Kambleb3f37702007-05-17 15:50:34 +03003885 /*
3886 * Instruction with address size override prefix opcode 0x67
3887 * Cause the #SS fault with 0 error code in VM86 mode.
3888 */
3889 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
Andre Przywara51d8b662010-12-21 11:12:02 +01003890 if (emulate_instruction(vcpu, 0) == EMULATE_DONE)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003891 return 1;
Jan Kiszka77ab6db2008-07-14 12:28:51 +02003892 /*
3893 * Forward all other exceptions that are valid in real mode.
3894 * FIXME: Breaks guest debugging in real mode, needs to be fixed with
3895 * the required debugging infrastructure rework.
3896 */
3897 switch (vec) {
Jan Kiszka77ab6db2008-07-14 12:28:51 +02003898 case DB_VECTOR:
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003899 if (vcpu->guest_debug &
3900 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
3901 return 0;
3902 kvm_queue_exception(vcpu, vec);
3903 return 1;
Jan Kiszka77ab6db2008-07-14 12:28:51 +02003904 case BP_VECTOR:
Jan Kiszkac573cd22010-02-23 17:47:53 +01003905 /*
3906 * Update instruction length as we may reinject the exception
3907 * from user space while in guest debugging mode.
3908 */
3909 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
3910 vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003911 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
3912 return 0;
3913 /* fall through */
3914 case DE_VECTOR:
Jan Kiszka77ab6db2008-07-14 12:28:51 +02003915 case OF_VECTOR:
3916 case BR_VECTOR:
3917 case UD_VECTOR:
3918 case DF_VECTOR:
3919 case SS_VECTOR:
3920 case GP_VECTOR:
3921 case MF_VECTOR:
3922 kvm_queue_exception(vcpu, vec);
3923 return 1;
3924 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08003925 return 0;
3926}
3927
Andi Kleena0861c02009-06-08 17:37:09 +08003928/*
3929 * Trigger machine check on the host. We assume all the MSRs are already set up
3930 * by the CPU and that we still run on the same CPU as the MCE occurred on.
3931 * We pass a fake environment to the machine check handler because we want
3932 * the guest to be always treated like user space, no matter what context
3933 * it used internally.
3934 */
3935static void kvm_machine_check(void)
3936{
3937#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
3938 struct pt_regs regs = {
3939 .cs = 3, /* Fake ring 3 no matter what the guest ran on */
3940 .flags = X86_EFLAGS_IF,
3941 };
3942
3943 do_machine_check(&regs, 0);
3944#endif
3945}
3946
Avi Kivity851ba692009-08-24 11:10:17 +03003947static int handle_machine_check(struct kvm_vcpu *vcpu)
Andi Kleena0861c02009-06-08 17:37:09 +08003948{
3949 /* already handled by vcpu_run */
3950 return 1;
3951}
3952
Avi Kivity851ba692009-08-24 11:10:17 +03003953static int handle_exception(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003954{
Avi Kivity1155f762007-11-22 11:30:47 +02003955 struct vcpu_vmx *vmx = to_vmx(vcpu);
Avi Kivity851ba692009-08-24 11:10:17 +03003956 struct kvm_run *kvm_run = vcpu->run;
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003957 u32 intr_info, ex_no, error_code;
Jan Kiszka42dbaa52008-12-15 13:52:10 +01003958 unsigned long cr2, rip, dr6;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003959 u32 vect_info;
3960 enum emulation_result er;
3961
Avi Kivity1155f762007-11-22 11:30:47 +02003962 vect_info = vmx->idt_vectoring_info;
Avi Kivity88786472011-03-07 17:39:45 +02003963 intr_info = vmx->exit_intr_info;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003964
Andi Kleena0861c02009-06-08 17:37:09 +08003965 if (is_machine_check(intr_info))
Avi Kivity851ba692009-08-24 11:10:17 +03003966 return handle_machine_check(vcpu);
Andi Kleena0861c02009-06-08 17:37:09 +08003967
Avi Kivity6aa8b732006-12-10 02:21:36 -08003968 if ((vect_info & VECTORING_INFO_VALID_MASK) &&
Avi Kivity65ac7262009-11-04 11:59:01 +02003969 !is_page_fault(intr_info)) {
3970 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3971 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
3972 vcpu->run->internal.ndata = 2;
3973 vcpu->run->internal.data[0] = vect_info;
3974 vcpu->run->internal.data[1] = intr_info;
3975 return 0;
3976 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08003977
Jan Kiszkae4a41882008-09-26 09:30:46 +02003978 if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
Avi Kivity1b6269d2007-10-09 12:12:19 +02003979 return 1; /* already handled by vmx_vcpu_run() */
Anthony Liguori2ab455c2007-04-27 09:29:49 +03003980
3981 if (is_no_device(intr_info)) {
Avi Kivity5fd86fc2007-05-02 20:40:00 +03003982 vmx_fpu_activate(vcpu);
Anthony Liguori2ab455c2007-04-27 09:29:49 +03003983 return 1;
3984 }
3985
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05003986 if (is_invalid_opcode(intr_info)) {
Andre Przywara51d8b662010-12-21 11:12:02 +01003987 er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05003988 if (er != EMULATE_DONE)
Avi Kivity7ee5d9402007-11-25 15:22:50 +02003989 kvm_queue_exception(vcpu, UD_VECTOR);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05003990 return 1;
3991 }
3992
Avi Kivity6aa8b732006-12-10 02:21:36 -08003993 error_code = 0;
Ryan Harper2e113842008-02-11 10:26:38 -06003994 if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003995 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
3996 if (is_page_fault(intr_info)) {
Sheng Yang14394422008-04-28 12:24:45 +08003997 /* EPT won't cause page fault directly */
Avi Kivity089d0342009-03-23 18:26:32 +02003998 if (enable_ept)
Sheng Yang14394422008-04-28 12:24:45 +08003999 BUG();
Avi Kivity6aa8b732006-12-10 02:21:36 -08004000 cr2 = vmcs_readl(EXIT_QUALIFICATION);
Marcelo Tosatti229456f2009-06-17 09:22:14 -03004001 trace_kvm_page_fault(cr2, error_code);
4002
Gleb Natapov3298b752009-05-11 13:35:46 +03004003 if (kvm_event_needs_reinjection(vcpu))
Avi Kivity577bdc42008-07-19 08:57:05 +03004004 kvm_mmu_unprotect_page_virt(vcpu, cr2);
Andre Przywaradc25e892010-12-21 11:12:07 +01004005 return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004006 }
4007
Avi Kivity7ffd92c2009-06-09 14:10:45 +03004008 if (vmx->rmode.vm86_active &&
Avi Kivity6aa8b732006-12-10 02:21:36 -08004009 handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
Avi Kivity72d6e5a2007-06-05 16:15:51 +03004010 error_code)) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004011 if (vcpu->arch.halt_request) {
4012 vcpu->arch.halt_request = 0;
Avi Kivity72d6e5a2007-06-05 16:15:51 +03004013 return kvm_emulate_halt(vcpu);
4014 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08004015 return 1;
Avi Kivity72d6e5a2007-06-05 16:15:51 +03004016 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08004017
Jan Kiszkad0bfb942008-12-15 13:52:10 +01004018 ex_no = intr_info & INTR_INFO_VECTOR_MASK;
Jan Kiszka42dbaa52008-12-15 13:52:10 +01004019 switch (ex_no) {
4020 case DB_VECTOR:
4021 dr6 = vmcs_readl(EXIT_QUALIFICATION);
4022 if (!(vcpu->guest_debug &
4023 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
4024 vcpu->arch.dr6 = dr6 | DR6_FIXED_1;
4025 kvm_queue_exception(vcpu, DB_VECTOR);
4026 return 1;
4027 }
4028 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
4029 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
4030 /* fall through */
4031 case BP_VECTOR:
Jan Kiszkac573cd22010-02-23 17:47:53 +01004032 /*
4033 * Update instruction length as we may reinject #BP from
4034 * user space while in guest debugging mode. Reading it for
4035 * #DB as well causes no harm, it is not used in that case.
4036 */
4037 vmx->vcpu.arch.event_exit_inst_len =
4038 vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004039 kvm_run->exit_reason = KVM_EXIT_DEBUG;
Avi Kivity0a434bb2011-04-28 15:59:33 +03004040 rip = kvm_rip_read(vcpu);
Jan Kiszkad0bfb942008-12-15 13:52:10 +01004041 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
4042 kvm_run->debug.arch.exception = ex_no;
Jan Kiszka42dbaa52008-12-15 13:52:10 +01004043 break;
4044 default:
Jan Kiszkad0bfb942008-12-15 13:52:10 +01004045 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
4046 kvm_run->ex.exception = ex_no;
4047 kvm_run->ex.error_code = error_code;
Jan Kiszka42dbaa52008-12-15 13:52:10 +01004048 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004049 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08004050 return 0;
4051}
4052
Avi Kivity851ba692009-08-24 11:10:17 +03004053static int handle_external_interrupt(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004054{
Avi Kivity1165f5f2007-04-19 17:27:43 +03004055 ++vcpu->stat.irq_exits;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004056 return 1;
4057}
4058
Avi Kivity851ba692009-08-24 11:10:17 +03004059static int handle_triple_fault(struct kvm_vcpu *vcpu)
Avi Kivity988ad742007-02-12 00:54:36 -08004060{
Avi Kivity851ba692009-08-24 11:10:17 +03004061 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
Avi Kivity988ad742007-02-12 00:54:36 -08004062 return 0;
4063}
Avi Kivity6aa8b732006-12-10 02:21:36 -08004064
Avi Kivity851ba692009-08-24 11:10:17 +03004065static int handle_io(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004066{
He, Qingbfdaab02007-09-12 14:18:28 +08004067 unsigned long exit_qualification;
Jan Kiszka34c33d12009-02-08 13:28:15 +01004068 int size, in, string;
Avi Kivity039576c2007-03-20 12:46:50 +02004069 unsigned port;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004070
He, Qingbfdaab02007-09-12 14:18:28 +08004071 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
Avi Kivity039576c2007-03-20 12:46:50 +02004072 string = (exit_qualification & 16) != 0;
Laurent Viviere70669a2007-08-05 10:36:40 +03004073 in = (exit_qualification & 8) != 0;
Laurent Viviere70669a2007-08-05 10:36:40 +03004074
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02004075 ++vcpu->stat.io_exits;
4076
4077 if (string || in)
Andre Przywara51d8b662010-12-21 11:12:02 +01004078 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02004079
4080 port = exit_qualification >> 16;
4081 size = (exit_qualification & 7) + 1;
Guillaume Thouvenine93f36b2008-10-28 10:51:30 +01004082 skip_emulated_instruction(vcpu);
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02004083
4084 return kvm_fast_pio_out(vcpu, size, port);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004085}
4086
Ingo Molnar102d8322007-02-19 14:37:47 +02004087static void
4088vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
4089{
4090 /*
4091 * Patch in the VMCALL instruction:
4092 */
4093 hypercall[0] = 0x0f;
4094 hypercall[1] = 0x01;
4095 hypercall[2] = 0xc1;
Ingo Molnar102d8322007-02-19 14:37:47 +02004096}
4097
Avi Kivity851ba692009-08-24 11:10:17 +03004098static int handle_cr(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004099{
Marcelo Tosatti229456f2009-06-17 09:22:14 -03004100 unsigned long exit_qualification, val;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004101 int cr;
4102 int reg;
Avi Kivity49a9b072010-06-10 17:02:14 +03004103 int err;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004104
He, Qingbfdaab02007-09-12 14:18:28 +08004105 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004106 cr = exit_qualification & 15;
4107 reg = (exit_qualification >> 8) & 15;
4108 switch ((exit_qualification >> 4) & 3) {
4109 case 0: /* mov to cr */
Marcelo Tosatti229456f2009-06-17 09:22:14 -03004110 val = kvm_register_read(vcpu, reg);
4111 trace_kvm_cr_write(cr, val);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004112 switch (cr) {
4113 case 0:
Avi Kivity49a9b072010-06-10 17:02:14 +03004114 err = kvm_set_cr0(vcpu, val);
Andre Przywaradb8fcef2010-12-21 11:12:01 +01004115 kvm_complete_insn_gp(vcpu, err);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004116 return 1;
4117 case 3:
Avi Kivity23902182010-06-10 17:02:16 +03004118 err = kvm_set_cr3(vcpu, val);
Andre Przywaradb8fcef2010-12-21 11:12:01 +01004119 kvm_complete_insn_gp(vcpu, err);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004120 return 1;
4121 case 4:
Avi Kivitya83b29c2010-06-10 17:02:15 +03004122 err = kvm_set_cr4(vcpu, val);
Andre Przywaradb8fcef2010-12-21 11:12:01 +01004123 kvm_complete_insn_gp(vcpu, err);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004124 return 1;
Gleb Natapov0a5fff192009-04-21 17:45:06 +03004125 case 8: {
4126 u8 cr8_prev = kvm_get_cr8(vcpu);
4127 u8 cr8 = kvm_register_read(vcpu, reg);
Andre Przywaraeea1cff2010-12-21 11:12:00 +01004128 err = kvm_set_cr8(vcpu, cr8);
Andre Przywaradb8fcef2010-12-21 11:12:01 +01004129 kvm_complete_insn_gp(vcpu, err);
Gleb Natapov0a5fff192009-04-21 17:45:06 +03004130 if (irqchip_in_kernel(vcpu->kvm))
4131 return 1;
4132 if (cr8_prev <= cr8)
4133 return 1;
Avi Kivity851ba692009-08-24 11:10:17 +03004134 vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
Gleb Natapov0a5fff192009-04-21 17:45:06 +03004135 return 0;
4136 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08004137 };
4138 break;
Anthony Liguori25c4c272007-04-27 09:29:21 +03004139 case 2: /* clts */
Avi Kivityedcafe32009-12-30 18:07:40 +02004140 vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
Avi Kivity4d4ec082009-12-29 18:07:30 +02004141 trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
Anthony Liguori25c4c272007-04-27 09:29:21 +03004142 skip_emulated_instruction(vcpu);
Avi Kivity6b52d182010-01-21 15:31:47 +02004143 vmx_fpu_activate(vcpu);
Anthony Liguori25c4c272007-04-27 09:29:21 +03004144 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004145 case 1: /*mov from cr*/
4146 switch (cr) {
4147 case 3:
Avi Kivity9f8fe502010-12-05 17:30:00 +02004148 val = kvm_read_cr3(vcpu);
4149 kvm_register_write(vcpu, reg, val);
4150 trace_kvm_cr_read(cr, val);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004151 skip_emulated_instruction(vcpu);
4152 return 1;
4153 case 8:
Marcelo Tosatti229456f2009-06-17 09:22:14 -03004154 val = kvm_get_cr8(vcpu);
4155 kvm_register_write(vcpu, reg, val);
4156 trace_kvm_cr_read(cr, val);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004157 skip_emulated_instruction(vcpu);
4158 return 1;
4159 }
4160 break;
4161 case 3: /* lmsw */
Avi Kivitya1f83a72009-12-29 17:33:58 +02004162 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
Avi Kivity4d4ec082009-12-29 18:07:30 +02004163 trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
Avi Kivitya1f83a72009-12-29 17:33:58 +02004164 kvm_lmsw(vcpu, val);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004165
4166 skip_emulated_instruction(vcpu);
4167 return 1;
4168 default:
4169 break;
4170 }
Avi Kivity851ba692009-08-24 11:10:17 +03004171 vcpu->run->exit_reason = 0;
Rusty Russellf0242472007-08-01 10:48:02 +10004172 pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
Avi Kivity6aa8b732006-12-10 02:21:36 -08004173 (int)(exit_qualification >> 4) & 3, cr);
4174 return 0;
4175}
4176
Avi Kivity851ba692009-08-24 11:10:17 +03004177static int handle_dr(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004178{
He, Qingbfdaab02007-09-12 14:18:28 +08004179 unsigned long exit_qualification;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004180 int dr, reg;
4181
Jan Kiszkaf2483412010-01-20 18:20:20 +01004182 /* Do not handle if the CPL > 0, will trigger GP on re-entry */
Avi Kivity0a79b002009-09-01 12:03:25 +03004183 if (!kvm_require_cpl(vcpu, 0))
4184 return 1;
Jan Kiszka42dbaa52008-12-15 13:52:10 +01004185 dr = vmcs_readl(GUEST_DR7);
4186 if (dr & DR7_GD) {
4187 /*
4188 * As the vm-exit takes precedence over the debug trap, we
4189 * need to emulate the latter, either for the host or the
4190 * guest debugging itself.
4191 */
4192 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
Avi Kivity851ba692009-08-24 11:10:17 +03004193 vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
4194 vcpu->run->debug.arch.dr7 = dr;
4195 vcpu->run->debug.arch.pc =
Jan Kiszka42dbaa52008-12-15 13:52:10 +01004196 vmcs_readl(GUEST_CS_BASE) +
4197 vmcs_readl(GUEST_RIP);
Avi Kivity851ba692009-08-24 11:10:17 +03004198 vcpu->run->debug.arch.exception = DB_VECTOR;
4199 vcpu->run->exit_reason = KVM_EXIT_DEBUG;
Jan Kiszka42dbaa52008-12-15 13:52:10 +01004200 return 0;
4201 } else {
4202 vcpu->arch.dr7 &= ~DR7_GD;
4203 vcpu->arch.dr6 |= DR6_BD;
4204 vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
4205 kvm_queue_exception(vcpu, DB_VECTOR);
4206 return 1;
4207 }
4208 }
4209
He, Qingbfdaab02007-09-12 14:18:28 +08004210 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
Jan Kiszka42dbaa52008-12-15 13:52:10 +01004211 dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
4212 reg = DEBUG_REG_ACCESS_REG(exit_qualification);
4213 if (exit_qualification & TYPE_MOV_FROM_DR) {
Gleb Natapov020df072010-04-13 10:05:23 +03004214 unsigned long val;
4215 if (!kvm_get_dr(vcpu, dr, &val))
4216 kvm_register_write(vcpu, reg, val);
4217 } else
4218 kvm_set_dr(vcpu, dr, vcpu->arch.regs[reg]);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004219 skip_emulated_instruction(vcpu);
4220 return 1;
4221}
4222
Gleb Natapov020df072010-04-13 10:05:23 +03004223static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
4224{
4225 vmcs_writel(GUEST_DR7, val);
4226}
4227
Avi Kivity851ba692009-08-24 11:10:17 +03004228static int handle_cpuid(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004229{
Avi Kivity06465c52007-02-28 20:46:53 +02004230 kvm_emulate_cpuid(vcpu);
4231 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004232}
4233
Avi Kivity851ba692009-08-24 11:10:17 +03004234static int handle_rdmsr(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004235{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004236 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
Avi Kivity6aa8b732006-12-10 02:21:36 -08004237 u64 data;
4238
4239 if (vmx_get_msr(vcpu, ecx, &data)) {
Avi Kivity59200272010-01-25 19:47:02 +02004240 trace_kvm_msr_read_ex(ecx);
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02004241 kvm_inject_gp(vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004242 return 1;
4243 }
4244
Marcelo Tosatti229456f2009-06-17 09:22:14 -03004245 trace_kvm_msr_read(ecx, data);
Feng (Eric) Liu2714d1d2008-04-10 15:31:10 -04004246
Avi Kivity6aa8b732006-12-10 02:21:36 -08004247 /* FIXME: handling of bits 32:63 of rax, rdx */
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004248 vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
4249 vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004250 skip_emulated_instruction(vcpu);
4251 return 1;
4252}
4253
Avi Kivity851ba692009-08-24 11:10:17 +03004254static int handle_wrmsr(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004255{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08004256 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
4257 u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
4258 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004259
4260 if (vmx_set_msr(vcpu, ecx, data) != 0) {
Avi Kivity59200272010-01-25 19:47:02 +02004261 trace_kvm_msr_write_ex(ecx, data);
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02004262 kvm_inject_gp(vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004263 return 1;
4264 }
4265
Avi Kivity59200272010-01-25 19:47:02 +02004266 trace_kvm_msr_write(ecx, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004267 skip_emulated_instruction(vcpu);
4268 return 1;
4269}
4270
Avi Kivity851ba692009-08-24 11:10:17 +03004271static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
Yang, Sheng6e5d8652007-09-12 18:03:11 +08004272{
Avi Kivity3842d132010-07-27 12:30:24 +03004273 kvm_make_request(KVM_REQ_EVENT, vcpu);
Yang, Sheng6e5d8652007-09-12 18:03:11 +08004274 return 1;
4275}
4276
Avi Kivity851ba692009-08-24 11:10:17 +03004277static int handle_interrupt_window(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004278{
Eddie Dong85f455f2007-07-06 12:20:49 +03004279 u32 cpu_based_vm_exec_control;
4280
4281 /* clear pending irq */
4282 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
4283 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
4284 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
Feng (Eric) Liu2714d1d2008-04-10 15:31:10 -04004285
Avi Kivity3842d132010-07-27 12:30:24 +03004286 kvm_make_request(KVM_REQ_EVENT, vcpu);
4287
Jan Kiszkaa26bf122008-09-26 09:30:45 +02004288 ++vcpu->stat.irq_window_exits;
Feng (Eric) Liu2714d1d2008-04-10 15:31:10 -04004289
Dor Laorc1150d82007-01-05 16:36:24 -08004290 /*
4291 * If the user space waits to inject interrupts, exit as soon as
4292 * possible
4293 */
Gleb Natapov80618232009-04-21 17:44:56 +03004294 if (!irqchip_in_kernel(vcpu->kvm) &&
Avi Kivity851ba692009-08-24 11:10:17 +03004295 vcpu->run->request_interrupt_window &&
Gleb Natapov80618232009-04-21 17:44:56 +03004296 !kvm_cpu_has_interrupt(vcpu)) {
Avi Kivity851ba692009-08-24 11:10:17 +03004297 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
Dor Laorc1150d82007-01-05 16:36:24 -08004298 return 0;
4299 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08004300 return 1;
4301}
4302
Avi Kivity851ba692009-08-24 11:10:17 +03004303static int handle_halt(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004304{
4305 skip_emulated_instruction(vcpu);
Avi Kivityd3bef152007-06-05 15:53:05 +03004306 return kvm_emulate_halt(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004307}
4308
Avi Kivity851ba692009-08-24 11:10:17 +03004309static int handle_vmcall(struct kvm_vcpu *vcpu)
Ingo Molnarc21415e2007-02-19 14:37:47 +02004310{
Dor Laor510043d2007-02-19 18:25:43 +02004311 skip_emulated_instruction(vcpu);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05004312 kvm_emulate_hypercall(vcpu);
4313 return 1;
Ingo Molnarc21415e2007-02-19 14:37:47 +02004314}
4315
Gleb Natapovec25d5e2010-11-01 15:35:01 +02004316static int handle_invd(struct kvm_vcpu *vcpu)
4317{
Andre Przywara51d8b662010-12-21 11:12:02 +01004318 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
Gleb Natapovec25d5e2010-11-01 15:35:01 +02004319}
4320
Avi Kivity851ba692009-08-24 11:10:17 +03004321static int handle_invlpg(struct kvm_vcpu *vcpu)
Marcelo Tosattia7052892008-09-23 13:18:35 -03004322{
Sheng Yangf9c617f2009-03-25 10:08:52 +08004323 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
Marcelo Tosattia7052892008-09-23 13:18:35 -03004324
4325 kvm_mmu_invlpg(vcpu, exit_qualification);
4326 skip_emulated_instruction(vcpu);
4327 return 1;
4328}
4329
Avi Kivity851ba692009-08-24 11:10:17 +03004330static int handle_wbinvd(struct kvm_vcpu *vcpu)
Eddie Donge5edaa02007-11-11 12:28:35 +02004331{
4332 skip_emulated_instruction(vcpu);
Sheng Yangf5f48ee2010-06-30 12:25:15 +08004333 kvm_emulate_wbinvd(vcpu);
Eddie Donge5edaa02007-11-11 12:28:35 +02004334 return 1;
4335}
4336
Dexuan Cui2acf9232010-06-10 11:27:12 +08004337static int handle_xsetbv(struct kvm_vcpu *vcpu)
4338{
4339 u64 new_bv = kvm_read_edx_eax(vcpu);
4340 u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
4341
4342 if (kvm_set_xcr(vcpu, index, new_bv) == 0)
4343 skip_emulated_instruction(vcpu);
4344 return 1;
4345}
4346
Avi Kivity851ba692009-08-24 11:10:17 +03004347static int handle_apic_access(struct kvm_vcpu *vcpu)
Sheng Yangf78e0e22007-10-29 09:40:42 +08004348{
Andre Przywara51d8b662010-12-21 11:12:02 +01004349 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
Sheng Yangf78e0e22007-10-29 09:40:42 +08004350}
4351
Avi Kivity851ba692009-08-24 11:10:17 +03004352static int handle_task_switch(struct kvm_vcpu *vcpu)
Izik Eidus37817f22008-03-24 23:14:53 +02004353{
Jan Kiszka60637aa2008-09-26 09:30:47 +02004354 struct vcpu_vmx *vmx = to_vmx(vcpu);
Izik Eidus37817f22008-03-24 23:14:53 +02004355 unsigned long exit_qualification;
Jan Kiszkae269fb22010-04-14 15:51:09 +02004356 bool has_error_code = false;
4357 u32 error_code = 0;
Izik Eidus37817f22008-03-24 23:14:53 +02004358 u16 tss_selector;
Gleb Natapov64a7ec02009-03-30 16:03:29 +03004359 int reason, type, idt_v;
4360
4361 idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
4362 type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
Izik Eidus37817f22008-03-24 23:14:53 +02004363
4364 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4365
4366 reason = (u32)exit_qualification >> 30;
Gleb Natapov64a7ec02009-03-30 16:03:29 +03004367 if (reason == TASK_SWITCH_GATE && idt_v) {
4368 switch (type) {
4369 case INTR_TYPE_NMI_INTR:
4370 vcpu->arch.nmi_injected = false;
Avi Kivity654f06f2011-03-23 15:02:47 +02004371 vmx_set_nmi_mask(vcpu, true);
Gleb Natapov64a7ec02009-03-30 16:03:29 +03004372 break;
4373 case INTR_TYPE_EXT_INTR:
Gleb Natapov66fd3f72009-05-11 13:35:50 +03004374 case INTR_TYPE_SOFT_INTR:
Gleb Natapov64a7ec02009-03-30 16:03:29 +03004375 kvm_clear_interrupt_queue(vcpu);
4376 break;
4377 case INTR_TYPE_HARD_EXCEPTION:
Jan Kiszkae269fb22010-04-14 15:51:09 +02004378 if (vmx->idt_vectoring_info &
4379 VECTORING_INFO_DELIVER_CODE_MASK) {
4380 has_error_code = true;
4381 error_code =
4382 vmcs_read32(IDT_VECTORING_ERROR_CODE);
4383 }
4384 /* fall through */
Gleb Natapov64a7ec02009-03-30 16:03:29 +03004385 case INTR_TYPE_SOFT_EXCEPTION:
4386 kvm_clear_exception_queue(vcpu);
4387 break;
4388 default:
4389 break;
4390 }
Jan Kiszka60637aa2008-09-26 09:30:47 +02004391 }
Izik Eidus37817f22008-03-24 23:14:53 +02004392 tss_selector = exit_qualification;
4393
Gleb Natapov64a7ec02009-03-30 16:03:29 +03004394 if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION &&
4395 type != INTR_TYPE_EXT_INTR &&
4396 type != INTR_TYPE_NMI_INTR))
4397 skip_emulated_instruction(vcpu);
4398
Gleb Natapovacb54512010-04-15 21:03:50 +03004399 if (kvm_task_switch(vcpu, tss_selector, reason,
4400 has_error_code, error_code) == EMULATE_FAIL) {
4401 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
4402 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
4403 vcpu->run->internal.ndata = 0;
Jan Kiszka42dbaa52008-12-15 13:52:10 +01004404 return 0;
Gleb Natapovacb54512010-04-15 21:03:50 +03004405 }
Jan Kiszka42dbaa52008-12-15 13:52:10 +01004406
4407 /* clear all local breakpoint enable flags */
4408 vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~55);
4409
4410 /*
4411 * TODO: What about debug traps on tss switch?
4412 * Are we supposed to inject them and update dr6?
4413 */
4414
4415 return 1;
Izik Eidus37817f22008-03-24 23:14:53 +02004416}
4417
Avi Kivity851ba692009-08-24 11:10:17 +03004418static int handle_ept_violation(struct kvm_vcpu *vcpu)
Sheng Yang14394422008-04-28 12:24:45 +08004419{
Sheng Yangf9c617f2009-03-25 10:08:52 +08004420 unsigned long exit_qualification;
Sheng Yang14394422008-04-28 12:24:45 +08004421 gpa_t gpa;
Sheng Yang14394422008-04-28 12:24:45 +08004422 int gla_validity;
Sheng Yang14394422008-04-28 12:24:45 +08004423
Sheng Yangf9c617f2009-03-25 10:08:52 +08004424 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
Sheng Yang14394422008-04-28 12:24:45 +08004425
4426 if (exit_qualification & (1 << 6)) {
4427 printk(KERN_ERR "EPT: GPA exceeds GAW!\n");
Jan Kiszka7f582ab2009-07-22 23:53:01 +02004428 return -EINVAL;
Sheng Yang14394422008-04-28 12:24:45 +08004429 }
4430
4431 gla_validity = (exit_qualification >> 7) & 0x3;
4432 if (gla_validity != 0x3 && gla_validity != 0x1 && gla_validity != 0) {
4433 printk(KERN_ERR "EPT: Handling EPT violation failed!\n");
4434 printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n",
4435 (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS),
Sheng Yangf9c617f2009-03-25 10:08:52 +08004436 vmcs_readl(GUEST_LINEAR_ADDRESS));
Sheng Yang14394422008-04-28 12:24:45 +08004437 printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n",
4438 (long unsigned int)exit_qualification);
Avi Kivity851ba692009-08-24 11:10:17 +03004439 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
4440 vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION;
Avi Kivity596ae892009-06-03 14:12:10 +03004441 return 0;
Sheng Yang14394422008-04-28 12:24:45 +08004442 }
4443
4444 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
Marcelo Tosatti229456f2009-06-17 09:22:14 -03004445 trace_kvm_page_fault(gpa, exit_qualification);
Andre Przywaradc25e892010-12-21 11:12:07 +01004446 return kvm_mmu_page_fault(vcpu, gpa, exit_qualification & 0x3, NULL, 0);
Sheng Yang14394422008-04-28 12:24:45 +08004447}
4448
Marcelo Tosatti68f89402009-06-11 12:07:43 -03004449static u64 ept_rsvd_mask(u64 spte, int level)
4450{
4451 int i;
4452 u64 mask = 0;
4453
4454 for (i = 51; i > boot_cpu_data.x86_phys_bits; i--)
4455 mask |= (1ULL << i);
4456
4457 if (level > 2)
4458 /* bits 7:3 reserved */
4459 mask |= 0xf8;
4460 else if (level == 2) {
4461 if (spte & (1ULL << 7))
4462 /* 2MB ref, bits 20:12 reserved */
4463 mask |= 0x1ff000;
4464 else
4465 /* bits 6:3 reserved */
4466 mask |= 0x78;
4467 }
4468
4469 return mask;
4470}
4471
4472static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte,
4473 int level)
4474{
4475 printk(KERN_ERR "%s: spte 0x%llx level %d\n", __func__, spte, level);
4476
4477 /* 010b (write-only) */
4478 WARN_ON((spte & 0x7) == 0x2);
4479
4480 /* 110b (write/execute) */
4481 WARN_ON((spte & 0x7) == 0x6);
4482
4483 /* 100b (execute-only) and value not supported by logical processor */
4484 if (!cpu_has_vmx_ept_execute_only())
4485 WARN_ON((spte & 0x7) == 0x4);
4486
4487 /* not 000b */
4488 if ((spte & 0x7)) {
4489 u64 rsvd_bits = spte & ept_rsvd_mask(spte, level);
4490
4491 if (rsvd_bits != 0) {
4492 printk(KERN_ERR "%s: rsvd_bits = 0x%llx\n",
4493 __func__, rsvd_bits);
4494 WARN_ON(1);
4495 }
4496
4497 if (level == 1 || (level == 2 && (spte & (1ULL << 7)))) {
4498 u64 ept_mem_type = (spte & 0x38) >> 3;
4499
4500 if (ept_mem_type == 2 || ept_mem_type == 3 ||
4501 ept_mem_type == 7) {
4502 printk(KERN_ERR "%s: ept_mem_type=0x%llx\n",
4503 __func__, ept_mem_type);
4504 WARN_ON(1);
4505 }
4506 }
4507 }
4508}
4509
Avi Kivity851ba692009-08-24 11:10:17 +03004510static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
Marcelo Tosatti68f89402009-06-11 12:07:43 -03004511{
4512 u64 sptes[4];
4513 int nr_sptes, i;
4514 gpa_t gpa;
4515
4516 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
4517
4518 printk(KERN_ERR "EPT: Misconfiguration.\n");
4519 printk(KERN_ERR "EPT: GPA: 0x%llx\n", gpa);
4520
4521 nr_sptes = kvm_mmu_get_spte_hierarchy(vcpu, gpa, sptes);
4522
4523 for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i)
4524 ept_misconfig_inspect_spte(vcpu, sptes[i-1], i);
4525
Avi Kivity851ba692009-08-24 11:10:17 +03004526 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
4527 vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG;
Marcelo Tosatti68f89402009-06-11 12:07:43 -03004528
4529 return 0;
4530}
4531
Avi Kivity851ba692009-08-24 11:10:17 +03004532static int handle_nmi_window(struct kvm_vcpu *vcpu)
Sheng Yangf08864b2008-05-15 18:23:25 +08004533{
4534 u32 cpu_based_vm_exec_control;
4535
4536 /* clear pending NMI */
4537 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
4538 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
4539 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
4540 ++vcpu->stat.nmi_window_exits;
Avi Kivity3842d132010-07-27 12:30:24 +03004541 kvm_make_request(KVM_REQ_EVENT, vcpu);
Sheng Yangf08864b2008-05-15 18:23:25 +08004542
4543 return 1;
4544}
4545
Mohammed Gamal80ced182009-09-01 12:48:18 +02004546static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
Mohammed Gamalea953ef2008-08-17 16:47:05 +03004547{
Avi Kivity8b3079a2009-01-05 12:10:54 +02004548 struct vcpu_vmx *vmx = to_vmx(vcpu);
4549 enum emulation_result err = EMULATE_DONE;
Mohammed Gamal80ced182009-09-01 12:48:18 +02004550 int ret = 1;
Avi Kivity49e9d552010-09-19 14:34:08 +02004551 u32 cpu_exec_ctrl;
4552 bool intr_window_requested;
4553
4554 cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
4555 intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING;
Mohammed Gamalea953ef2008-08-17 16:47:05 +03004556
4557 while (!guest_state_valid(vcpu)) {
Avi Kivity49e9d552010-09-19 14:34:08 +02004558 if (intr_window_requested
4559 && (kvm_get_rflags(&vmx->vcpu) & X86_EFLAGS_IF))
4560 return handle_interrupt_window(&vmx->vcpu);
4561
Andre Przywara51d8b662010-12-21 11:12:02 +01004562 err = emulate_instruction(vcpu, 0);
Mohammed Gamalea953ef2008-08-17 16:47:05 +03004563
Mohammed Gamal80ced182009-09-01 12:48:18 +02004564 if (err == EMULATE_DO_MMIO) {
4565 ret = 0;
4566 goto out;
4567 }
Guillaume Thouvenin1d5a4d92008-10-29 09:39:42 +01004568
Gleb Natapov6d77dbf2010-05-10 11:16:56 +03004569 if (err != EMULATE_DONE)
4570 return 0;
Mohammed Gamalea953ef2008-08-17 16:47:05 +03004571
4572 if (signal_pending(current))
Mohammed Gamal80ced182009-09-01 12:48:18 +02004573 goto out;
Mohammed Gamalea953ef2008-08-17 16:47:05 +03004574 if (need_resched())
4575 schedule();
4576 }
4577
Mohammed Gamal80ced182009-09-01 12:48:18 +02004578 vmx->emulation_required = 0;
4579out:
4580 return ret;
Mohammed Gamalea953ef2008-08-17 16:47:05 +03004581}
4582
Avi Kivity6aa8b732006-12-10 02:21:36 -08004583/*
Zhai, Edwin4b8d54f2009-10-09 18:03:20 +08004584 * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
4585 * exiting, so only get here on cpu with PAUSE-Loop-Exiting.
4586 */
Marcelo Tosatti9fb41ba2009-10-12 19:37:31 -03004587static int handle_pause(struct kvm_vcpu *vcpu)
Zhai, Edwin4b8d54f2009-10-09 18:03:20 +08004588{
4589 skip_emulated_instruction(vcpu);
4590 kvm_vcpu_on_spin(vcpu);
4591
4592 return 1;
4593}
4594
Sheng Yang59708672009-12-15 13:29:54 +08004595static int handle_invalid_op(struct kvm_vcpu *vcpu)
4596{
4597 kvm_queue_exception(vcpu, UD_VECTOR);
4598 return 1;
4599}
4600
Zhai, Edwin4b8d54f2009-10-09 18:03:20 +08004601/*
Nadav Har'Elff2f6fe2011-05-25 23:05:27 +03004602 * To run an L2 guest, we need a vmcs02 based on the L1-specified vmcs12.
4603 * We could reuse a single VMCS for all the L2 guests, but we also want the
4604 * option to allocate a separate vmcs02 for each separate loaded vmcs12 - this
4605 * allows keeping them loaded on the processor, and in the future will allow
4606 * optimizations where prepare_vmcs02 doesn't need to set all the fields on
4607 * every entry if they never change.
4608 * So we keep, in vmx->nested.vmcs02_pool, a cache of size VMCS02_POOL_SIZE
4609 * (>=0) with a vmcs02 for each recently loaded vmcs12s, most recent first.
4610 *
4611 * The following functions allocate and free a vmcs02 in this pool.
4612 */
4613
4614/* Get a VMCS from the pool to use as vmcs02 for the current vmcs12. */
4615static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
4616{
4617 struct vmcs02_list *item;
4618 list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
4619 if (item->vmptr == vmx->nested.current_vmptr) {
4620 list_move(&item->list, &vmx->nested.vmcs02_pool);
4621 return &item->vmcs02;
4622 }
4623
4624 if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) {
4625 /* Recycle the least recently used VMCS. */
4626 item = list_entry(vmx->nested.vmcs02_pool.prev,
4627 struct vmcs02_list, list);
4628 item->vmptr = vmx->nested.current_vmptr;
4629 list_move(&item->list, &vmx->nested.vmcs02_pool);
4630 return &item->vmcs02;
4631 }
4632
4633 /* Create a new VMCS */
4634 item = (struct vmcs02_list *)
4635 kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
4636 if (!item)
4637 return NULL;
4638 item->vmcs02.vmcs = alloc_vmcs();
4639 if (!item->vmcs02.vmcs) {
4640 kfree(item);
4641 return NULL;
4642 }
4643 loaded_vmcs_init(&item->vmcs02);
4644 item->vmptr = vmx->nested.current_vmptr;
4645 list_add(&(item->list), &(vmx->nested.vmcs02_pool));
4646 vmx->nested.vmcs02_num++;
4647 return &item->vmcs02;
4648}
4649
4650/* Free and remove from pool a vmcs02 saved for a vmcs12 (if there is one) */
4651static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr)
4652{
4653 struct vmcs02_list *item;
4654 list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
4655 if (item->vmptr == vmptr) {
4656 free_loaded_vmcs(&item->vmcs02);
4657 list_del(&item->list);
4658 kfree(item);
4659 vmx->nested.vmcs02_num--;
4660 return;
4661 }
4662}
4663
4664/*
4665 * Free all VMCSs saved for this vcpu, except the one pointed by
4666 * vmx->loaded_vmcs. These include the VMCSs in vmcs02_pool (except the one
4667 * currently used, if running L2), and vmcs01 when running L2.
4668 */
4669static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx)
4670{
4671 struct vmcs02_list *item, *n;
4672 list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) {
4673 if (vmx->loaded_vmcs != &item->vmcs02)
4674 free_loaded_vmcs(&item->vmcs02);
4675 list_del(&item->list);
4676 kfree(item);
4677 }
4678 vmx->nested.vmcs02_num = 0;
4679
4680 if (vmx->loaded_vmcs != &vmx->vmcs01)
4681 free_loaded_vmcs(&vmx->vmcs01);
4682}
4683
4684/*
Nadav Har'Elec378ae2011-05-25 23:02:54 +03004685 * Emulate the VMXON instruction.
4686 * Currently, we just remember that VMX is active, and do not save or even
4687 * inspect the argument to VMXON (the so-called "VMXON pointer") because we
4688 * do not currently need to store anything in that guest-allocated memory
4689 * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
4690 * argument is different from the VMXON pointer (which the spec says they do).
4691 */
4692static int handle_vmon(struct kvm_vcpu *vcpu)
4693{
4694 struct kvm_segment cs;
4695 struct vcpu_vmx *vmx = to_vmx(vcpu);
4696
4697 /* The Intel VMX Instruction Reference lists a bunch of bits that
4698 * are prerequisite to running VMXON, most notably cr4.VMXE must be
4699 * set to 1 (see vmx_set_cr4() for when we allow the guest to set this).
4700 * Otherwise, we should fail with #UD. We test these now:
4701 */
4702 if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE) ||
4703 !kvm_read_cr0_bits(vcpu, X86_CR0_PE) ||
4704 (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
4705 kvm_queue_exception(vcpu, UD_VECTOR);
4706 return 1;
4707 }
4708
4709 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
4710 if (is_long_mode(vcpu) && !cs.l) {
4711 kvm_queue_exception(vcpu, UD_VECTOR);
4712 return 1;
4713 }
4714
4715 if (vmx_get_cpl(vcpu)) {
4716 kvm_inject_gp(vcpu, 0);
4717 return 1;
4718 }
4719
Nadav Har'Elff2f6fe2011-05-25 23:05:27 +03004720 INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
4721 vmx->nested.vmcs02_num = 0;
4722
Nadav Har'Elec378ae2011-05-25 23:02:54 +03004723 vmx->nested.vmxon = true;
4724
4725 skip_emulated_instruction(vcpu);
4726 return 1;
4727}
4728
4729/*
4730 * Intel's VMX Instruction Reference specifies a common set of prerequisites
4731 * for running VMX instructions (except VMXON, whose prerequisites are
4732 * slightly different). It also specifies what exception to inject otherwise.
4733 */
4734static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
4735{
4736 struct kvm_segment cs;
4737 struct vcpu_vmx *vmx = to_vmx(vcpu);
4738
4739 if (!vmx->nested.vmxon) {
4740 kvm_queue_exception(vcpu, UD_VECTOR);
4741 return 0;
4742 }
4743
4744 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
4745 if ((vmx_get_rflags(vcpu) & X86_EFLAGS_VM) ||
4746 (is_long_mode(vcpu) && !cs.l)) {
4747 kvm_queue_exception(vcpu, UD_VECTOR);
4748 return 0;
4749 }
4750
4751 if (vmx_get_cpl(vcpu)) {
4752 kvm_inject_gp(vcpu, 0);
4753 return 0;
4754 }
4755
4756 return 1;
4757}
4758
4759/*
4760 * Free whatever needs to be freed from vmx->nested when L1 goes down, or
4761 * just stops using VMX.
4762 */
4763static void free_nested(struct vcpu_vmx *vmx)
4764{
4765 if (!vmx->nested.vmxon)
4766 return;
4767 vmx->nested.vmxon = false;
Nadav Har'Ela9d30f32011-05-25 23:03:55 +03004768 if (vmx->nested.current_vmptr != -1ull) {
4769 kunmap(vmx->nested.current_vmcs12_page);
4770 nested_release_page(vmx->nested.current_vmcs12_page);
4771 vmx->nested.current_vmptr = -1ull;
4772 vmx->nested.current_vmcs12 = NULL;
4773 }
Nadav Har'Elfe3ef052011-05-25 23:10:02 +03004774 /* Unpin physical memory we referred to in current vmcs02 */
4775 if (vmx->nested.apic_access_page) {
4776 nested_release_page(vmx->nested.apic_access_page);
4777 vmx->nested.apic_access_page = 0;
4778 }
Nadav Har'Elff2f6fe2011-05-25 23:05:27 +03004779
4780 nested_free_all_saved_vmcss(vmx);
Nadav Har'Elec378ae2011-05-25 23:02:54 +03004781}
4782
4783/* Emulate the VMXOFF instruction */
4784static int handle_vmoff(struct kvm_vcpu *vcpu)
4785{
4786 if (!nested_vmx_check_permission(vcpu))
4787 return 1;
4788 free_nested(to_vmx(vcpu));
4789 skip_emulated_instruction(vcpu);
4790 return 1;
4791}
4792
4793/*
Nadav Har'El064aea72011-05-25 23:04:56 +03004794 * Decode the memory-address operand of a vmx instruction, as recorded on an
4795 * exit caused by such an instruction (run by a guest hypervisor).
4796 * On success, returns 0. When the operand is invalid, returns 1 and throws
4797 * #UD or #GP.
4798 */
4799static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
4800 unsigned long exit_qualification,
4801 u32 vmx_instruction_info, gva_t *ret)
4802{
4803 /*
4804 * According to Vol. 3B, "Information for VM Exits Due to Instruction
4805 * Execution", on an exit, vmx_instruction_info holds most of the
4806 * addressing components of the operand. Only the displacement part
4807 * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
4808 * For how an actual address is calculated from all these components,
4809 * refer to Vol. 1, "Operand Addressing".
4810 */
4811 int scaling = vmx_instruction_info & 3;
4812 int addr_size = (vmx_instruction_info >> 7) & 7;
4813 bool is_reg = vmx_instruction_info & (1u << 10);
4814 int seg_reg = (vmx_instruction_info >> 15) & 7;
4815 int index_reg = (vmx_instruction_info >> 18) & 0xf;
4816 bool index_is_valid = !(vmx_instruction_info & (1u << 22));
4817 int base_reg = (vmx_instruction_info >> 23) & 0xf;
4818 bool base_is_valid = !(vmx_instruction_info & (1u << 27));
4819
4820 if (is_reg) {
4821 kvm_queue_exception(vcpu, UD_VECTOR);
4822 return 1;
4823 }
4824
4825 /* Addr = segment_base + offset */
4826 /* offset = base + [index * scale] + displacement */
4827 *ret = vmx_get_segment_base(vcpu, seg_reg);
4828 if (base_is_valid)
4829 *ret += kvm_register_read(vcpu, base_reg);
4830 if (index_is_valid)
4831 *ret += kvm_register_read(vcpu, index_reg)<<scaling;
4832 *ret += exit_qualification; /* holds the displacement */
4833
4834 if (addr_size == 1) /* 32 bit */
4835 *ret &= 0xffffffff;
4836
4837 /*
4838 * TODO: throw #GP (and return 1) in various cases that the VM*
4839 * instructions require it - e.g., offset beyond segment limit,
4840 * unusable or unreadable/unwritable segment, non-canonical 64-bit
4841 * address, and so on. Currently these are not checked.
4842 */
4843 return 0;
4844}
4845
4846/*
Nadav Har'El0140cae2011-05-25 23:06:28 +03004847 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
4848 * set the success or error code of an emulated VMX instruction, as specified
4849 * by Vol 2B, VMX Instruction Reference, "Conventions".
4850 */
4851static void nested_vmx_succeed(struct kvm_vcpu *vcpu)
4852{
4853 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
4854 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
4855 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
4856}
4857
4858static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
4859{
4860 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
4861 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
4862 X86_EFLAGS_SF | X86_EFLAGS_OF))
4863 | X86_EFLAGS_CF);
4864}
4865
4866static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
4867 u32 vm_instruction_error)
4868{
4869 if (to_vmx(vcpu)->nested.current_vmptr == -1ull) {
4870 /*
4871 * failValid writes the error number to the current VMCS, which
4872 * can't be done there isn't a current VMCS.
4873 */
4874 nested_vmx_failInvalid(vcpu);
4875 return;
4876 }
4877 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
4878 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
4879 X86_EFLAGS_SF | X86_EFLAGS_OF))
4880 | X86_EFLAGS_ZF);
4881 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
4882}
4883
Nadav Har'El27d6c862011-05-25 23:06:59 +03004884/* Emulate the VMCLEAR instruction */
4885static int handle_vmclear(struct kvm_vcpu *vcpu)
4886{
4887 struct vcpu_vmx *vmx = to_vmx(vcpu);
4888 gva_t gva;
4889 gpa_t vmptr;
4890 struct vmcs12 *vmcs12;
4891 struct page *page;
4892 struct x86_exception e;
4893
4894 if (!nested_vmx_check_permission(vcpu))
4895 return 1;
4896
4897 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
4898 vmcs_read32(VMX_INSTRUCTION_INFO), &gva))
4899 return 1;
4900
4901 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
4902 sizeof(vmptr), &e)) {
4903 kvm_inject_page_fault(vcpu, &e);
4904 return 1;
4905 }
4906
4907 if (!IS_ALIGNED(vmptr, PAGE_SIZE)) {
4908 nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
4909 skip_emulated_instruction(vcpu);
4910 return 1;
4911 }
4912
4913 if (vmptr == vmx->nested.current_vmptr) {
4914 kunmap(vmx->nested.current_vmcs12_page);
4915 nested_release_page(vmx->nested.current_vmcs12_page);
4916 vmx->nested.current_vmptr = -1ull;
4917 vmx->nested.current_vmcs12 = NULL;
4918 }
4919
4920 page = nested_get_page(vcpu, vmptr);
4921 if (page == NULL) {
4922 /*
4923 * For accurate processor emulation, VMCLEAR beyond available
4924 * physical memory should do nothing at all. However, it is
4925 * possible that a nested vmx bug, not a guest hypervisor bug,
4926 * resulted in this case, so let's shut down before doing any
4927 * more damage:
4928 */
4929 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
4930 return 1;
4931 }
4932 vmcs12 = kmap(page);
4933 vmcs12->launch_state = 0;
4934 kunmap(page);
4935 nested_release_page(page);
4936
4937 nested_free_vmcs02(vmx, vmptr);
4938
4939 skip_emulated_instruction(vcpu);
4940 nested_vmx_succeed(vcpu);
4941 return 1;
4942}
4943
Nadav Har'Elcd232ad2011-05-25 23:10:33 +03004944static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
4945
4946/* Emulate the VMLAUNCH instruction */
4947static int handle_vmlaunch(struct kvm_vcpu *vcpu)
4948{
4949 return nested_vmx_run(vcpu, true);
4950}
4951
4952/* Emulate the VMRESUME instruction */
4953static int handle_vmresume(struct kvm_vcpu *vcpu)
4954{
4955
4956 return nested_vmx_run(vcpu, false);
4957}
4958
Nadav Har'El49f705c2011-05-25 23:08:30 +03004959enum vmcs_field_type {
4960 VMCS_FIELD_TYPE_U16 = 0,
4961 VMCS_FIELD_TYPE_U64 = 1,
4962 VMCS_FIELD_TYPE_U32 = 2,
4963 VMCS_FIELD_TYPE_NATURAL_WIDTH = 3
4964};
4965
4966static inline int vmcs_field_type(unsigned long field)
4967{
4968 if (0x1 & field) /* the *_HIGH fields are all 32 bit */
4969 return VMCS_FIELD_TYPE_U32;
4970 return (field >> 13) & 0x3 ;
4971}
4972
4973static inline int vmcs_field_readonly(unsigned long field)
4974{
4975 return (((field >> 10) & 0x3) == 1);
4976}
4977
4978/*
4979 * Read a vmcs12 field. Since these can have varying lengths and we return
4980 * one type, we chose the biggest type (u64) and zero-extend the return value
4981 * to that size. Note that the caller, handle_vmread, might need to use only
4982 * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of
4983 * 64-bit fields are to be returned).
4984 */
4985static inline bool vmcs12_read_any(struct kvm_vcpu *vcpu,
4986 unsigned long field, u64 *ret)
4987{
4988 short offset = vmcs_field_to_offset(field);
4989 char *p;
4990
4991 if (offset < 0)
4992 return 0;
4993
4994 p = ((char *)(get_vmcs12(vcpu))) + offset;
4995
4996 switch (vmcs_field_type(field)) {
4997 case VMCS_FIELD_TYPE_NATURAL_WIDTH:
4998 *ret = *((natural_width *)p);
4999 return 1;
5000 case VMCS_FIELD_TYPE_U16:
5001 *ret = *((u16 *)p);
5002 return 1;
5003 case VMCS_FIELD_TYPE_U32:
5004 *ret = *((u32 *)p);
5005 return 1;
5006 case VMCS_FIELD_TYPE_U64:
5007 *ret = *((u64 *)p);
5008 return 1;
5009 default:
5010 return 0; /* can never happen. */
5011 }
5012}
5013
5014/*
5015 * VMX instructions which assume a current vmcs12 (i.e., that VMPTRLD was
5016 * used before) all generate the same failure when it is missing.
5017 */
5018static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu)
5019{
5020 struct vcpu_vmx *vmx = to_vmx(vcpu);
5021 if (vmx->nested.current_vmptr == -1ull) {
5022 nested_vmx_failInvalid(vcpu);
5023 skip_emulated_instruction(vcpu);
5024 return 0;
5025 }
5026 return 1;
5027}
5028
5029static int handle_vmread(struct kvm_vcpu *vcpu)
5030{
5031 unsigned long field;
5032 u64 field_value;
5033 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5034 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5035 gva_t gva = 0;
5036
5037 if (!nested_vmx_check_permission(vcpu) ||
5038 !nested_vmx_check_vmcs12(vcpu))
5039 return 1;
5040
5041 /* Decode instruction info and find the field to read */
5042 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
5043 /* Read the field, zero-extended to a u64 field_value */
5044 if (!vmcs12_read_any(vcpu, field, &field_value)) {
5045 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
5046 skip_emulated_instruction(vcpu);
5047 return 1;
5048 }
5049 /*
5050 * Now copy part of this value to register or memory, as requested.
5051 * Note that the number of bits actually copied is 32 or 64 depending
5052 * on the guest's mode (32 or 64 bit), not on the given field's length.
5053 */
5054 if (vmx_instruction_info & (1u << 10)) {
5055 kvm_register_write(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
5056 field_value);
5057 } else {
5058 if (get_vmx_mem_address(vcpu, exit_qualification,
5059 vmx_instruction_info, &gva))
5060 return 1;
5061 /* _system ok, as nested_vmx_check_permission verified cpl=0 */
5062 kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
5063 &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
5064 }
5065
5066 nested_vmx_succeed(vcpu);
5067 skip_emulated_instruction(vcpu);
5068 return 1;
5069}
5070
5071
5072static int handle_vmwrite(struct kvm_vcpu *vcpu)
5073{
5074 unsigned long field;
5075 gva_t gva;
5076 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5077 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5078 char *p;
5079 short offset;
5080 /* The value to write might be 32 or 64 bits, depending on L1's long
5081 * mode, and eventually we need to write that into a field of several
5082 * possible lengths. The code below first zero-extends the value to 64
5083 * bit (field_value), and then copies only the approriate number of
5084 * bits into the vmcs12 field.
5085 */
5086 u64 field_value = 0;
5087 struct x86_exception e;
5088
5089 if (!nested_vmx_check_permission(vcpu) ||
5090 !nested_vmx_check_vmcs12(vcpu))
5091 return 1;
5092
5093 if (vmx_instruction_info & (1u << 10))
5094 field_value = kvm_register_read(vcpu,
5095 (((vmx_instruction_info) >> 3) & 0xf));
5096 else {
5097 if (get_vmx_mem_address(vcpu, exit_qualification,
5098 vmx_instruction_info, &gva))
5099 return 1;
5100 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
5101 &field_value, (is_long_mode(vcpu) ? 8 : 4), &e)) {
5102 kvm_inject_page_fault(vcpu, &e);
5103 return 1;
5104 }
5105 }
5106
5107
5108 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
5109 if (vmcs_field_readonly(field)) {
5110 nested_vmx_failValid(vcpu,
5111 VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
5112 skip_emulated_instruction(vcpu);
5113 return 1;
5114 }
5115
5116 offset = vmcs_field_to_offset(field);
5117 if (offset < 0) {
5118 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
5119 skip_emulated_instruction(vcpu);
5120 return 1;
5121 }
5122 p = ((char *) get_vmcs12(vcpu)) + offset;
5123
5124 switch (vmcs_field_type(field)) {
5125 case VMCS_FIELD_TYPE_U16:
5126 *(u16 *)p = field_value;
5127 break;
5128 case VMCS_FIELD_TYPE_U32:
5129 *(u32 *)p = field_value;
5130 break;
5131 case VMCS_FIELD_TYPE_U64:
5132 *(u64 *)p = field_value;
5133 break;
5134 case VMCS_FIELD_TYPE_NATURAL_WIDTH:
5135 *(natural_width *)p = field_value;
5136 break;
5137 default:
5138 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
5139 skip_emulated_instruction(vcpu);
5140 return 1;
5141 }
5142
5143 nested_vmx_succeed(vcpu);
5144 skip_emulated_instruction(vcpu);
5145 return 1;
5146}
5147
Nadav Har'El63846662011-05-25 23:07:29 +03005148/* Emulate the VMPTRLD instruction */
5149static int handle_vmptrld(struct kvm_vcpu *vcpu)
5150{
5151 struct vcpu_vmx *vmx = to_vmx(vcpu);
5152 gva_t gva;
5153 gpa_t vmptr;
5154 struct x86_exception e;
5155
5156 if (!nested_vmx_check_permission(vcpu))
5157 return 1;
5158
5159 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
5160 vmcs_read32(VMX_INSTRUCTION_INFO), &gva))
5161 return 1;
5162
5163 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
5164 sizeof(vmptr), &e)) {
5165 kvm_inject_page_fault(vcpu, &e);
5166 return 1;
5167 }
5168
5169 if (!IS_ALIGNED(vmptr, PAGE_SIZE)) {
5170 nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
5171 skip_emulated_instruction(vcpu);
5172 return 1;
5173 }
5174
5175 if (vmx->nested.current_vmptr != vmptr) {
5176 struct vmcs12 *new_vmcs12;
5177 struct page *page;
5178 page = nested_get_page(vcpu, vmptr);
5179 if (page == NULL) {
5180 nested_vmx_failInvalid(vcpu);
5181 skip_emulated_instruction(vcpu);
5182 return 1;
5183 }
5184 new_vmcs12 = kmap(page);
5185 if (new_vmcs12->revision_id != VMCS12_REVISION) {
5186 kunmap(page);
5187 nested_release_page_clean(page);
5188 nested_vmx_failValid(vcpu,
5189 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5190 skip_emulated_instruction(vcpu);
5191 return 1;
5192 }
5193 if (vmx->nested.current_vmptr != -1ull) {
5194 kunmap(vmx->nested.current_vmcs12_page);
5195 nested_release_page(vmx->nested.current_vmcs12_page);
5196 }
5197
5198 vmx->nested.current_vmptr = vmptr;
5199 vmx->nested.current_vmcs12 = new_vmcs12;
5200 vmx->nested.current_vmcs12_page = page;
5201 }
5202
5203 nested_vmx_succeed(vcpu);
5204 skip_emulated_instruction(vcpu);
5205 return 1;
5206}
5207
Nadav Har'El6a4d7552011-05-25 23:08:00 +03005208/* Emulate the VMPTRST instruction */
5209static int handle_vmptrst(struct kvm_vcpu *vcpu)
5210{
5211 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5212 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5213 gva_t vmcs_gva;
5214 struct x86_exception e;
5215
5216 if (!nested_vmx_check_permission(vcpu))
5217 return 1;
5218
5219 if (get_vmx_mem_address(vcpu, exit_qualification,
5220 vmx_instruction_info, &vmcs_gva))
5221 return 1;
5222 /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */
5223 if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
5224 (void *)&to_vmx(vcpu)->nested.current_vmptr,
5225 sizeof(u64), &e)) {
5226 kvm_inject_page_fault(vcpu, &e);
5227 return 1;
5228 }
5229 nested_vmx_succeed(vcpu);
5230 skip_emulated_instruction(vcpu);
5231 return 1;
5232}
5233
Nadav Har'El0140cae2011-05-25 23:06:28 +03005234/*
Avi Kivity6aa8b732006-12-10 02:21:36 -08005235 * The exit handlers return 1 if the exit was handled fully and guest execution
5236 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
5237 * to be done to userspace and return 0.
5238 */
Avi Kivity851ba692009-08-24 11:10:17 +03005239static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
Avi Kivity6aa8b732006-12-10 02:21:36 -08005240 [EXIT_REASON_EXCEPTION_NMI] = handle_exception,
5241 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
Avi Kivity988ad742007-02-12 00:54:36 -08005242 [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault,
Sheng Yangf08864b2008-05-15 18:23:25 +08005243 [EXIT_REASON_NMI_WINDOW] = handle_nmi_window,
Avi Kivity6aa8b732006-12-10 02:21:36 -08005244 [EXIT_REASON_IO_INSTRUCTION] = handle_io,
Avi Kivity6aa8b732006-12-10 02:21:36 -08005245 [EXIT_REASON_CR_ACCESS] = handle_cr,
5246 [EXIT_REASON_DR_ACCESS] = handle_dr,
5247 [EXIT_REASON_CPUID] = handle_cpuid,
5248 [EXIT_REASON_MSR_READ] = handle_rdmsr,
5249 [EXIT_REASON_MSR_WRITE] = handle_wrmsr,
5250 [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
5251 [EXIT_REASON_HLT] = handle_halt,
Gleb Natapovec25d5e2010-11-01 15:35:01 +02005252 [EXIT_REASON_INVD] = handle_invd,
Marcelo Tosattia7052892008-09-23 13:18:35 -03005253 [EXIT_REASON_INVLPG] = handle_invlpg,
Ingo Molnarc21415e2007-02-19 14:37:47 +02005254 [EXIT_REASON_VMCALL] = handle_vmcall,
Nadav Har'El27d6c862011-05-25 23:06:59 +03005255 [EXIT_REASON_VMCLEAR] = handle_vmclear,
Nadav Har'Elcd232ad2011-05-25 23:10:33 +03005256 [EXIT_REASON_VMLAUNCH] = handle_vmlaunch,
Nadav Har'El63846662011-05-25 23:07:29 +03005257 [EXIT_REASON_VMPTRLD] = handle_vmptrld,
Nadav Har'El6a4d7552011-05-25 23:08:00 +03005258 [EXIT_REASON_VMPTRST] = handle_vmptrst,
Nadav Har'El49f705c2011-05-25 23:08:30 +03005259 [EXIT_REASON_VMREAD] = handle_vmread,
Nadav Har'Elcd232ad2011-05-25 23:10:33 +03005260 [EXIT_REASON_VMRESUME] = handle_vmresume,
Nadav Har'El49f705c2011-05-25 23:08:30 +03005261 [EXIT_REASON_VMWRITE] = handle_vmwrite,
Nadav Har'Elec378ae2011-05-25 23:02:54 +03005262 [EXIT_REASON_VMOFF] = handle_vmoff,
5263 [EXIT_REASON_VMON] = handle_vmon,
Sheng Yangf78e0e22007-10-29 09:40:42 +08005264 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
5265 [EXIT_REASON_APIC_ACCESS] = handle_apic_access,
Eddie Donge5edaa02007-11-11 12:28:35 +02005266 [EXIT_REASON_WBINVD] = handle_wbinvd,
Dexuan Cui2acf9232010-06-10 11:27:12 +08005267 [EXIT_REASON_XSETBV] = handle_xsetbv,
Izik Eidus37817f22008-03-24 23:14:53 +02005268 [EXIT_REASON_TASK_SWITCH] = handle_task_switch,
Andi Kleena0861c02009-06-08 17:37:09 +08005269 [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check,
Marcelo Tosatti68f89402009-06-11 12:07:43 -03005270 [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation,
5271 [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig,
Zhai, Edwin4b8d54f2009-10-09 18:03:20 +08005272 [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause,
Sheng Yang59708672009-12-15 13:29:54 +08005273 [EXIT_REASON_MWAIT_INSTRUCTION] = handle_invalid_op,
5274 [EXIT_REASON_MONITOR_INSTRUCTION] = handle_invalid_op,
Avi Kivity6aa8b732006-12-10 02:21:36 -08005275};
5276
5277static const int kvm_vmx_max_exit_handlers =
Robert P. J. Day50a34852007-06-03 13:35:29 -04005278 ARRAY_SIZE(kvm_vmx_exit_handlers);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005279
Avi Kivity586f9602010-11-18 13:09:54 +02005280static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
5281{
5282 *info1 = vmcs_readl(EXIT_QUALIFICATION);
5283 *info2 = vmcs_read32(VM_EXIT_INTR_INFO);
5284}
5285
Avi Kivity6aa8b732006-12-10 02:21:36 -08005286/*
5287 * The guest has exited. See if we can fix it or if we need userspace
5288 * assistance.
5289 */
Avi Kivity851ba692009-08-24 11:10:17 +03005290static int vmx_handle_exit(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08005291{
Avi Kivity29bd8a72007-09-10 17:27:03 +03005292 struct vcpu_vmx *vmx = to_vmx(vcpu);
Andi Kleena0861c02009-06-08 17:37:09 +08005293 u32 exit_reason = vmx->exit_reason;
Avi Kivity1155f762007-11-22 11:30:47 +02005294 u32 vectoring_info = vmx->idt_vectoring_info;
Avi Kivity29bd8a72007-09-10 17:27:03 +03005295
Avi Kivityaa179112010-11-17 18:44:19 +02005296 trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
Feng (Eric) Liu2714d1d2008-04-10 15:31:10 -04005297
Mohammed Gamal80ced182009-09-01 12:48:18 +02005298 /* If guest state is invalid, start emulating */
5299 if (vmx->emulation_required && emulate_invalid_guest_state)
5300 return handle_invalid_guest_state(vcpu);
Guillaume Thouvenin1d5a4d92008-10-29 09:39:42 +01005301
Mohammed Gamal51207022010-05-31 22:40:54 +03005302 if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
5303 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
5304 vcpu->run->fail_entry.hardware_entry_failure_reason
5305 = exit_reason;
5306 return 0;
5307 }
5308
Avi Kivity29bd8a72007-09-10 17:27:03 +03005309 if (unlikely(vmx->fail)) {
Avi Kivity851ba692009-08-24 11:10:17 +03005310 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
5311 vcpu->run->fail_entry.hardware_entry_failure_reason
Avi Kivity29bd8a72007-09-10 17:27:03 +03005312 = vmcs_read32(VM_INSTRUCTION_ERROR);
5313 return 0;
5314 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08005315
Mike Dayd77c26f2007-10-08 09:02:08 -04005316 if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
Sheng Yang14394422008-04-28 12:24:45 +08005317 (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
Jan Kiszka60637aa2008-09-26 09:30:47 +02005318 exit_reason != EXIT_REASON_EPT_VIOLATION &&
5319 exit_reason != EXIT_REASON_TASK_SWITCH))
5320 printk(KERN_WARNING "%s: unexpected, valid vectoring info "
5321 "(0x%x) and exit reason is 0x%x\n",
5322 __func__, vectoring_info, exit_reason);
Jan Kiszka3b86cd92008-09-26 09:30:57 +02005323
5324 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) {
Gleb Natapovc4282df2009-04-21 17:45:07 +03005325 if (vmx_interrupt_allowed(vcpu)) {
Jan Kiszka3b86cd92008-09-26 09:30:57 +02005326 vmx->soft_vnmi_blocked = 0;
Jan Kiszka3b86cd92008-09-26 09:30:57 +02005327 } else if (vmx->vnmi_blocked_time > 1000000000LL &&
Jan Kiszka45312202008-12-11 16:54:54 +01005328 vcpu->arch.nmi_pending) {
Jan Kiszka3b86cd92008-09-26 09:30:57 +02005329 /*
5330 * This CPU don't support us in finding the end of an
5331 * NMI-blocked window if the guest runs with IRQs
5332 * disabled. So we pull the trigger after 1 s of
5333 * futile waiting, but inform the user about this.
5334 */
5335 printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
5336 "state on VCPU %d after 1 s timeout\n",
5337 __func__, vcpu->vcpu_id);
5338 vmx->soft_vnmi_blocked = 0;
Jan Kiszka3b86cd92008-09-26 09:30:57 +02005339 }
Jan Kiszka3b86cd92008-09-26 09:30:57 +02005340 }
5341
Avi Kivity6aa8b732006-12-10 02:21:36 -08005342 if (exit_reason < kvm_vmx_max_exit_handlers
5343 && kvm_vmx_exit_handlers[exit_reason])
Avi Kivity851ba692009-08-24 11:10:17 +03005344 return kvm_vmx_exit_handlers[exit_reason](vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005345 else {
Avi Kivity851ba692009-08-24 11:10:17 +03005346 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
5347 vcpu->run->hw.hardware_exit_reason = exit_reason;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005348 }
5349 return 0;
5350}
5351
Gleb Natapov95ba8273132009-04-21 17:45:08 +03005352static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
Yang, Sheng6e5d8652007-09-12 18:03:11 +08005353{
Gleb Natapov95ba8273132009-04-21 17:45:08 +03005354 if (irr == -1 || tpr < irr) {
Yang, Sheng6e5d8652007-09-12 18:03:11 +08005355 vmcs_write32(TPR_THRESHOLD, 0);
5356 return;
5357 }
5358
Gleb Natapov95ba8273132009-04-21 17:45:08 +03005359 vmcs_write32(TPR_THRESHOLD, irr);
Yang, Sheng6e5d8652007-09-12 18:03:11 +08005360}
5361
Avi Kivity51aa01d2010-07-20 14:31:20 +03005362static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
Avi Kivitycf393f72008-07-01 16:20:21 +03005363{
Avi Kivity00eba012011-03-07 17:24:54 +02005364 u32 exit_intr_info;
5365
5366 if (!(vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY
5367 || vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI))
5368 return;
5369
Avi Kivityc5ca8e52011-03-07 17:37:37 +02005370 vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
Avi Kivity00eba012011-03-07 17:24:54 +02005371 exit_intr_info = vmx->exit_intr_info;
Andi Kleena0861c02009-06-08 17:37:09 +08005372
5373 /* Handle machine checks before interrupts are enabled */
Avi Kivity00eba012011-03-07 17:24:54 +02005374 if (is_machine_check(exit_intr_info))
Andi Kleena0861c02009-06-08 17:37:09 +08005375 kvm_machine_check();
5376
Gleb Natapov20f65982009-05-11 13:35:55 +03005377 /* We need to handle NMIs before interrupts are enabled */
Avi Kivity00eba012011-03-07 17:24:54 +02005378 if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
Zhang, Yanminff9d07a2010-04-19 13:32:45 +08005379 (exit_intr_info & INTR_INFO_VALID_MASK)) {
5380 kvm_before_handle_nmi(&vmx->vcpu);
Gleb Natapov20f65982009-05-11 13:35:55 +03005381 asm("int $2");
Zhang, Yanminff9d07a2010-04-19 13:32:45 +08005382 kvm_after_handle_nmi(&vmx->vcpu);
5383 }
Avi Kivity51aa01d2010-07-20 14:31:20 +03005384}
Gleb Natapov20f65982009-05-11 13:35:55 +03005385
Avi Kivity51aa01d2010-07-20 14:31:20 +03005386static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
5387{
Avi Kivityc5ca8e52011-03-07 17:37:37 +02005388 u32 exit_intr_info;
Avi Kivity51aa01d2010-07-20 14:31:20 +03005389 bool unblock_nmi;
5390 u8 vector;
5391 bool idtv_info_valid;
5392
5393 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
Gleb Natapov20f65982009-05-11 13:35:55 +03005394
Avi Kivitycf393f72008-07-01 16:20:21 +03005395 if (cpu_has_virtual_nmis()) {
Avi Kivity9d58b932011-03-07 16:52:07 +02005396 if (vmx->nmi_known_unmasked)
5397 return;
Avi Kivityc5ca8e52011-03-07 17:37:37 +02005398 /*
5399 * Can't use vmx->exit_intr_info since we're not sure what
5400 * the exit reason is.
5401 */
5402 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
Avi Kivitycf393f72008-07-01 16:20:21 +03005403 unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
5404 vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
5405 /*
Gleb Natapov7b4a25c2009-03-30 16:03:08 +03005406 * SDM 3: 27.7.1.2 (September 2008)
Avi Kivitycf393f72008-07-01 16:20:21 +03005407 * Re-set bit "block by NMI" before VM entry if vmexit caused by
5408 * a guest IRET fault.
Gleb Natapov7b4a25c2009-03-30 16:03:08 +03005409 * SDM 3: 23.2.2 (September 2008)
5410 * Bit 12 is undefined in any of the following cases:
5411 * If the VM exit sets the valid bit in the IDT-vectoring
5412 * information field.
5413 * If the VM exit is due to a double fault.
Avi Kivitycf393f72008-07-01 16:20:21 +03005414 */
Gleb Natapov7b4a25c2009-03-30 16:03:08 +03005415 if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
5416 vector != DF_VECTOR && !idtv_info_valid)
Avi Kivitycf393f72008-07-01 16:20:21 +03005417 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
5418 GUEST_INTR_STATE_NMI);
Avi Kivity9d58b932011-03-07 16:52:07 +02005419 else
5420 vmx->nmi_known_unmasked =
5421 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
5422 & GUEST_INTR_STATE_NMI);
Jan Kiszka3b86cd92008-09-26 09:30:57 +02005423 } else if (unlikely(vmx->soft_vnmi_blocked))
5424 vmx->vnmi_blocked_time +=
5425 ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time));
Avi Kivity51aa01d2010-07-20 14:31:20 +03005426}
5427
Avi Kivity83422e12010-07-20 14:43:23 +03005428static void __vmx_complete_interrupts(struct vcpu_vmx *vmx,
5429 u32 idt_vectoring_info,
5430 int instr_len_field,
5431 int error_code_field)
Avi Kivity51aa01d2010-07-20 14:31:20 +03005432{
Avi Kivity51aa01d2010-07-20 14:31:20 +03005433 u8 vector;
5434 int type;
5435 bool idtv_info_valid;
5436
5437 idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
Avi Kivity668f6122008-07-02 09:28:55 +03005438
Gleb Natapov37b96e92009-03-30 16:03:13 +03005439 vmx->vcpu.arch.nmi_injected = false;
5440 kvm_clear_exception_queue(&vmx->vcpu);
5441 kvm_clear_interrupt_queue(&vmx->vcpu);
5442
5443 if (!idtv_info_valid)
5444 return;
5445
Avi Kivity3842d132010-07-27 12:30:24 +03005446 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
5447
Avi Kivity668f6122008-07-02 09:28:55 +03005448 vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
5449 type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
Gleb Natapov37b96e92009-03-30 16:03:13 +03005450
Gleb Natapov64a7ec02009-03-30 16:03:29 +03005451 switch (type) {
Gleb Natapov37b96e92009-03-30 16:03:13 +03005452 case INTR_TYPE_NMI_INTR:
5453 vmx->vcpu.arch.nmi_injected = true;
Avi Kivity668f6122008-07-02 09:28:55 +03005454 /*
Gleb Natapov7b4a25c2009-03-30 16:03:08 +03005455 * SDM 3: 27.7.1.2 (September 2008)
Gleb Natapov37b96e92009-03-30 16:03:13 +03005456 * Clear bit "block by NMI" before VM entry if a NMI
5457 * delivery faulted.
Avi Kivity668f6122008-07-02 09:28:55 +03005458 */
Avi Kivity654f06f2011-03-23 15:02:47 +02005459 vmx_set_nmi_mask(&vmx->vcpu, false);
Gleb Natapov37b96e92009-03-30 16:03:13 +03005460 break;
Gleb Natapov37b96e92009-03-30 16:03:13 +03005461 case INTR_TYPE_SOFT_EXCEPTION:
Gleb Natapov66fd3f72009-05-11 13:35:50 +03005462 vmx->vcpu.arch.event_exit_inst_len =
Avi Kivity83422e12010-07-20 14:43:23 +03005463 vmcs_read32(instr_len_field);
Gleb Natapov66fd3f72009-05-11 13:35:50 +03005464 /* fall through */
5465 case INTR_TYPE_HARD_EXCEPTION:
Avi Kivity35920a32008-07-03 14:50:12 +03005466 if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
Avi Kivity83422e12010-07-20 14:43:23 +03005467 u32 err = vmcs_read32(error_code_field);
Gleb Natapov37b96e92009-03-30 16:03:13 +03005468 kvm_queue_exception_e(&vmx->vcpu, vector, err);
Avi Kivity35920a32008-07-03 14:50:12 +03005469 } else
5470 kvm_queue_exception(&vmx->vcpu, vector);
Gleb Natapov37b96e92009-03-30 16:03:13 +03005471 break;
Gleb Natapov66fd3f72009-05-11 13:35:50 +03005472 case INTR_TYPE_SOFT_INTR:
5473 vmx->vcpu.arch.event_exit_inst_len =
Avi Kivity83422e12010-07-20 14:43:23 +03005474 vmcs_read32(instr_len_field);
Gleb Natapov66fd3f72009-05-11 13:35:50 +03005475 /* fall through */
Gleb Natapov37b96e92009-03-30 16:03:13 +03005476 case INTR_TYPE_EXT_INTR:
Gleb Natapov66fd3f72009-05-11 13:35:50 +03005477 kvm_queue_interrupt(&vmx->vcpu, vector,
5478 type == INTR_TYPE_SOFT_INTR);
Gleb Natapov37b96e92009-03-30 16:03:13 +03005479 break;
5480 default:
5481 break;
Avi Kivityf7d92382008-07-03 16:14:28 +03005482 }
Avi Kivitycf393f72008-07-01 16:20:21 +03005483}
5484
Avi Kivity83422e12010-07-20 14:43:23 +03005485static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
5486{
5487 __vmx_complete_interrupts(vmx, vmx->idt_vectoring_info,
5488 VM_EXIT_INSTRUCTION_LEN,
5489 IDT_VECTORING_ERROR_CODE);
5490}
5491
Avi Kivityb463a6f2010-07-20 15:06:17 +03005492static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
5493{
5494 __vmx_complete_interrupts(to_vmx(vcpu),
5495 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
5496 VM_ENTRY_INSTRUCTION_LEN,
5497 VM_ENTRY_EXCEPTION_ERROR_CODE);
5498
5499 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
5500}
5501
Avi Kivityc8019492008-07-14 14:44:59 +03005502#ifdef CONFIG_X86_64
5503#define R "r"
5504#define Q "q"
5505#else
5506#define R "e"
5507#define Q "l"
5508#endif
5509
Lai Jiangshana3b5ba42011-02-11 14:29:40 +08005510static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08005511{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04005512 struct vcpu_vmx *vmx = to_vmx(vcpu);
Avi Kivity104f2262010-11-18 13:12:52 +02005513
5514 /* Record the guest's net vcpu time for enforced NMI injections. */
5515 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
5516 vmx->entry_time = ktime_get();
5517
5518 /* Don't enter VMX if guest state is invalid, let the exit handler
5519 start emulation until we arrive back to a valid state */
5520 if (vmx->emulation_required && emulate_invalid_guest_state)
5521 return;
5522
5523 if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
5524 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
5525 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
5526 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
5527
5528 /* When single-stepping over STI and MOV SS, we must clear the
5529 * corresponding interruptibility bits in the guest state. Otherwise
5530 * vmentry fails as it then expects bit 14 (BS) in pending debug
5531 * exceptions being set, but that's not correct for the guest debugging
5532 * case. */
5533 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
5534 vmx_set_interrupt_shadow(vcpu, 0);
5535
Nadav Har'Eld462b812011-05-24 15:26:10 +03005536 vmx->__launched = vmx->loaded_vmcs->launched;
Avi Kivity104f2262010-11-18 13:12:52 +02005537 asm(
Avi Kivity6aa8b732006-12-10 02:21:36 -08005538 /* Store host registers */
Avi Kivityc8019492008-07-14 14:44:59 +03005539 "push %%"R"dx; push %%"R"bp;"
Avi Kivity40712fa2011-01-06 18:09:12 +02005540 "push %%"R"cx \n\t" /* placeholder for guest rcx */
Avi Kivityc8019492008-07-14 14:44:59 +03005541 "push %%"R"cx \n\t"
Avi Kivity313dbd42008-07-17 18:04:30 +03005542 "cmp %%"R"sp, %c[host_rsp](%0) \n\t"
5543 "je 1f \n\t"
5544 "mov %%"R"sp, %c[host_rsp](%0) \n\t"
Avi Kivity4ecac3f2008-05-13 13:23:38 +03005545 __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
Avi Kivity313dbd42008-07-17 18:04:30 +03005546 "1: \n\t"
Avi Kivityd3edefc2009-06-16 12:33:56 +03005547 /* Reload cr2 if changed */
5548 "mov %c[cr2](%0), %%"R"ax \n\t"
5549 "mov %%cr2, %%"R"dx \n\t"
5550 "cmp %%"R"ax, %%"R"dx \n\t"
5551 "je 2f \n\t"
5552 "mov %%"R"ax, %%cr2 \n\t"
5553 "2: \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08005554 /* Check if vmlaunch of vmresume is needed */
Avi Kivitye08aa782007-11-15 18:06:18 +02005555 "cmpl $0, %c[launched](%0) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08005556 /* Load guest registers. Don't clobber flags. */
Avi Kivityc8019492008-07-14 14:44:59 +03005557 "mov %c[rax](%0), %%"R"ax \n\t"
5558 "mov %c[rbx](%0), %%"R"bx \n\t"
5559 "mov %c[rdx](%0), %%"R"dx \n\t"
5560 "mov %c[rsi](%0), %%"R"si \n\t"
5561 "mov %c[rdi](%0), %%"R"di \n\t"
5562 "mov %c[rbp](%0), %%"R"bp \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08005563#ifdef CONFIG_X86_64
Avi Kivitye08aa782007-11-15 18:06:18 +02005564 "mov %c[r8](%0), %%r8 \n\t"
5565 "mov %c[r9](%0), %%r9 \n\t"
5566 "mov %c[r10](%0), %%r10 \n\t"
5567 "mov %c[r11](%0), %%r11 \n\t"
5568 "mov %c[r12](%0), %%r12 \n\t"
5569 "mov %c[r13](%0), %%r13 \n\t"
5570 "mov %c[r14](%0), %%r14 \n\t"
5571 "mov %c[r15](%0), %%r15 \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08005572#endif
Avi Kivityc8019492008-07-14 14:44:59 +03005573 "mov %c[rcx](%0), %%"R"cx \n\t" /* kills %0 (ecx) */
5574
Avi Kivity6aa8b732006-12-10 02:21:36 -08005575 /* Enter guest mode */
Avi Kivitycd2276a2007-05-14 20:41:13 +03005576 "jne .Llaunched \n\t"
Avi Kivity4ecac3f2008-05-13 13:23:38 +03005577 __ex(ASM_VMX_VMLAUNCH) "\n\t"
Avi Kivitycd2276a2007-05-14 20:41:13 +03005578 "jmp .Lkvm_vmx_return \n\t"
Avi Kivity4ecac3f2008-05-13 13:23:38 +03005579 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
Avi Kivitycd2276a2007-05-14 20:41:13 +03005580 ".Lkvm_vmx_return: "
Avi Kivity6aa8b732006-12-10 02:21:36 -08005581 /* Save guest registers, load host registers, keep flags */
Avi Kivity40712fa2011-01-06 18:09:12 +02005582 "mov %0, %c[wordsize](%%"R"sp) \n\t"
5583 "pop %0 \n\t"
Avi Kivityc8019492008-07-14 14:44:59 +03005584 "mov %%"R"ax, %c[rax](%0) \n\t"
5585 "mov %%"R"bx, %c[rbx](%0) \n\t"
Avi Kivity1c696d02011-01-06 18:09:11 +02005586 "pop"Q" %c[rcx](%0) \n\t"
Avi Kivityc8019492008-07-14 14:44:59 +03005587 "mov %%"R"dx, %c[rdx](%0) \n\t"
5588 "mov %%"R"si, %c[rsi](%0) \n\t"
5589 "mov %%"R"di, %c[rdi](%0) \n\t"
5590 "mov %%"R"bp, %c[rbp](%0) \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08005591#ifdef CONFIG_X86_64
Avi Kivitye08aa782007-11-15 18:06:18 +02005592 "mov %%r8, %c[r8](%0) \n\t"
5593 "mov %%r9, %c[r9](%0) \n\t"
5594 "mov %%r10, %c[r10](%0) \n\t"
5595 "mov %%r11, %c[r11](%0) \n\t"
5596 "mov %%r12, %c[r12](%0) \n\t"
5597 "mov %%r13, %c[r13](%0) \n\t"
5598 "mov %%r14, %c[r14](%0) \n\t"
5599 "mov %%r15, %c[r15](%0) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08005600#endif
Avi Kivityc8019492008-07-14 14:44:59 +03005601 "mov %%cr2, %%"R"ax \n\t"
5602 "mov %%"R"ax, %c[cr2](%0) \n\t"
5603
Avi Kivity1c696d02011-01-06 18:09:11 +02005604 "pop %%"R"bp; pop %%"R"dx \n\t"
Avi Kivitye08aa782007-11-15 18:06:18 +02005605 "setbe %c[fail](%0) \n\t"
5606 : : "c"(vmx), "d"((unsigned long)HOST_RSP),
Nadav Har'Eld462b812011-05-24 15:26:10 +03005607 [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
Avi Kivitye08aa782007-11-15 18:06:18 +02005608 [fail]"i"(offsetof(struct vcpu_vmx, fail)),
Avi Kivity313dbd42008-07-17 18:04:30 +03005609 [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005610 [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
5611 [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
5612 [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
5613 [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])),
5614 [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])),
5615 [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])),
5616 [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])),
Avi Kivity05b3e0c2006-12-13 00:33:45 -08005617#ifdef CONFIG_X86_64
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005618 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])),
5619 [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])),
5620 [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])),
5621 [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])),
5622 [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])),
5623 [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])),
5624 [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
5625 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
Avi Kivity6aa8b732006-12-10 02:21:36 -08005626#endif
Avi Kivity40712fa2011-01-06 18:09:12 +02005627 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
5628 [wordsize]"i"(sizeof(ulong))
Laurent Vivierc2036302007-10-25 14:18:52 +02005629 : "cc", "memory"
Jan Kiszka07d6f552010-09-28 16:37:42 +02005630 , R"ax", R"bx", R"di", R"si"
Laurent Vivierc2036302007-10-25 14:18:52 +02005631#ifdef CONFIG_X86_64
Laurent Vivierc2036302007-10-25 14:18:52 +02005632 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
5633#endif
5634 );
Avi Kivity6aa8b732006-12-10 02:21:36 -08005635
Avi Kivity6de4f3a2009-05-31 22:58:47 +03005636 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
Avi Kivity6de12732011-03-07 12:51:22 +02005637 | (1 << VCPU_EXREG_RFLAGS)
Avi Kivity69c73022011-03-07 15:26:44 +02005638 | (1 << VCPU_EXREG_CPL)
Avi Kivityaff48ba2010-12-05 18:56:11 +02005639 | (1 << VCPU_EXREG_PDPTR)
Avi Kivity2fb92db2011-04-27 19:42:18 +03005640 | (1 << VCPU_EXREG_SEGMENTS)
Avi Kivityaff48ba2010-12-05 18:56:11 +02005641 | (1 << VCPU_EXREG_CR3));
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03005642 vcpu->arch.regs_dirty = 0;
5643
Avi Kivity1155f762007-11-22 11:30:47 +02005644 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
5645
Mike Dayd77c26f2007-10-08 09:02:08 -04005646 asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
Nadav Har'Eld462b812011-05-24 15:26:10 +03005647 vmx->loaded_vmcs->launched = 1;
Avi Kivity1b6269d2007-10-09 12:12:19 +02005648
Avi Kivity51aa01d2010-07-20 14:31:20 +03005649 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
Avi Kivity51aa01d2010-07-20 14:31:20 +03005650
5651 vmx_complete_atomic_exit(vmx);
5652 vmx_recover_nmi_blocking(vmx);
Avi Kivitycf393f72008-07-01 16:20:21 +03005653 vmx_complete_interrupts(vmx);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005654}
5655
Avi Kivityc8019492008-07-14 14:44:59 +03005656#undef R
5657#undef Q
5658
Avi Kivity6aa8b732006-12-10 02:21:36 -08005659static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
5660{
Rusty Russellfb3f0f52007-07-27 17:16:56 +10005661 struct vcpu_vmx *vmx = to_vmx(vcpu);
5662
Lai Jiangshancdbecfc2010-04-17 16:41:47 +08005663 free_vpid(vmx);
Nadav Har'Elec378ae2011-05-25 23:02:54 +03005664 free_nested(vmx);
Nadav Har'Eld462b812011-05-24 15:26:10 +03005665 free_loaded_vmcs(vmx->loaded_vmcs);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10005666 kfree(vmx->guest_msrs);
5667 kvm_vcpu_uninit(vcpu);
Rusty Russella4770342007-08-01 14:46:11 +10005668 kmem_cache_free(kvm_vcpu_cache, vmx);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005669}
5670
Rusty Russellfb3f0f52007-07-27 17:16:56 +10005671static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
Avi Kivity6aa8b732006-12-10 02:21:36 -08005672{
Rusty Russellfb3f0f52007-07-27 17:16:56 +10005673 int err;
Rusty Russellc16f8622007-07-30 21:12:19 +10005674 struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Avi Kivity15ad7142007-07-11 18:17:21 +03005675 int cpu;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005676
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04005677 if (!vmx)
Rusty Russellfb3f0f52007-07-27 17:16:56 +10005678 return ERR_PTR(-ENOMEM);
5679
Sheng Yang2384d2b2008-01-17 15:14:33 +08005680 allocate_vpid(vmx);
5681
Rusty Russellfb3f0f52007-07-27 17:16:56 +10005682 err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
5683 if (err)
5684 goto free_vcpu;
Ingo Molnar965b58a2007-01-05 16:36:23 -08005685
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04005686 vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
Jan Kiszkabe6d05c2011-04-13 01:27:55 +02005687 err = -ENOMEM;
Rusty Russellfb3f0f52007-07-27 17:16:56 +10005688 if (!vmx->guest_msrs) {
Rusty Russellfb3f0f52007-07-27 17:16:56 +10005689 goto uninit_vcpu;
5690 }
Ingo Molnar965b58a2007-01-05 16:36:23 -08005691
Nadav Har'Eld462b812011-05-24 15:26:10 +03005692 vmx->loaded_vmcs = &vmx->vmcs01;
5693 vmx->loaded_vmcs->vmcs = alloc_vmcs();
5694 if (!vmx->loaded_vmcs->vmcs)
Rusty Russellfb3f0f52007-07-27 17:16:56 +10005695 goto free_msrs;
Nadav Har'Eld462b812011-05-24 15:26:10 +03005696 if (!vmm_exclusive)
5697 kvm_cpu_vmxon(__pa(per_cpu(vmxarea, raw_smp_processor_id())));
5698 loaded_vmcs_init(vmx->loaded_vmcs);
5699 if (!vmm_exclusive)
5700 kvm_cpu_vmxoff();
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04005701
Avi Kivity15ad7142007-07-11 18:17:21 +03005702 cpu = get_cpu();
5703 vmx_vcpu_load(&vmx->vcpu, cpu);
Zachary Amsdene48672f2010-08-19 22:07:23 -10005704 vmx->vcpu.cpu = cpu;
Rusty Russell8b9cf982007-07-30 16:31:43 +10005705 err = vmx_vcpu_setup(vmx);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10005706 vmx_vcpu_put(&vmx->vcpu);
Avi Kivity15ad7142007-07-11 18:17:21 +03005707 put_cpu();
Rusty Russellfb3f0f52007-07-27 17:16:56 +10005708 if (err)
5709 goto free_vmcs;
Marcelo Tosatti5e4a0b32008-02-14 21:21:43 -02005710 if (vm_need_virtualize_apic_accesses(kvm))
Jan Kiszkabe6d05c2011-04-13 01:27:55 +02005711 err = alloc_apic_access_page(kvm);
5712 if (err)
Marcelo Tosatti5e4a0b32008-02-14 21:21:43 -02005713 goto free_vmcs;
Ingo Molnar965b58a2007-01-05 16:36:23 -08005714
Sheng Yangb927a3c2009-07-21 10:42:48 +08005715 if (enable_ept) {
5716 if (!kvm->arch.ept_identity_map_addr)
5717 kvm->arch.ept_identity_map_addr =
5718 VMX_EPT_IDENTITY_PAGETABLE_ADDR;
Gleb Natapov93ea5382011-02-21 12:07:59 +02005719 err = -ENOMEM;
Sheng Yangb7ebfb02008-04-25 21:44:52 +08005720 if (alloc_identity_pagetable(kvm) != 0)
5721 goto free_vmcs;
Gleb Natapov93ea5382011-02-21 12:07:59 +02005722 if (!init_rmode_identity_map(kvm))
5723 goto free_vmcs;
Sheng Yangb927a3c2009-07-21 10:42:48 +08005724 }
Sheng Yangb7ebfb02008-04-25 21:44:52 +08005725
Nadav Har'Ela9d30f32011-05-25 23:03:55 +03005726 vmx->nested.current_vmptr = -1ull;
5727 vmx->nested.current_vmcs12 = NULL;
5728
Rusty Russellfb3f0f52007-07-27 17:16:56 +10005729 return &vmx->vcpu;
Ingo Molnar965b58a2007-01-05 16:36:23 -08005730
Rusty Russellfb3f0f52007-07-27 17:16:56 +10005731free_vmcs:
Nadav Har'Eld462b812011-05-24 15:26:10 +03005732 free_vmcs(vmx->loaded_vmcs->vmcs);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10005733free_msrs:
Rusty Russellfb3f0f52007-07-27 17:16:56 +10005734 kfree(vmx->guest_msrs);
5735uninit_vcpu:
5736 kvm_vcpu_uninit(&vmx->vcpu);
5737free_vcpu:
Lai Jiangshancdbecfc2010-04-17 16:41:47 +08005738 free_vpid(vmx);
Rusty Russella4770342007-08-01 14:46:11 +10005739 kmem_cache_free(kvm_vcpu_cache, vmx);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10005740 return ERR_PTR(err);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005741}
5742
Yang, Sheng002c7f72007-07-31 14:23:01 +03005743static void __init vmx_check_processor_compat(void *rtn)
5744{
5745 struct vmcs_config vmcs_conf;
5746
5747 *(int *)rtn = 0;
5748 if (setup_vmcs_config(&vmcs_conf) < 0)
5749 *(int *)rtn = -EIO;
5750 if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
5751 printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
5752 smp_processor_id());
5753 *(int *)rtn = -EIO;
5754 }
5755}
5756
Sheng Yang67253af2008-04-25 10:20:22 +08005757static int get_ept_level(void)
5758{
5759 return VMX_EPT_DEFAULT_GAW + 1;
5760}
5761
Sheng Yang4b12f0d2009-04-27 20:35:42 +08005762static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
Sheng Yang64d4d522008-10-09 16:01:57 +08005763{
Sheng Yang4b12f0d2009-04-27 20:35:42 +08005764 u64 ret;
5765
Sheng Yang522c68c2009-04-27 20:35:43 +08005766 /* For VT-d and EPT combination
5767 * 1. MMIO: always map as UC
5768 * 2. EPT with VT-d:
5769 * a. VT-d without snooping control feature: can't guarantee the
5770 * result, try to trust guest.
5771 * b. VT-d with snooping control feature: snooping control feature of
5772 * VT-d engine can guarantee the cache correctness. Just set it
5773 * to WB to keep consistent with host. So the same as item 3.
Sheng Yanga19a6d12010-02-09 16:41:53 +08005774 * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
Sheng Yang522c68c2009-04-27 20:35:43 +08005775 * consistent with host MTRR
5776 */
Sheng Yang4b12f0d2009-04-27 20:35:42 +08005777 if (is_mmio)
5778 ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
Sheng Yang522c68c2009-04-27 20:35:43 +08005779 else if (vcpu->kvm->arch.iommu_domain &&
5780 !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY))
5781 ret = kvm_get_guest_memory_type(vcpu, gfn) <<
5782 VMX_EPT_MT_EPTE_SHIFT;
Sheng Yang4b12f0d2009-04-27 20:35:42 +08005783 else
Sheng Yang522c68c2009-04-27 20:35:43 +08005784 ret = (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT)
Sheng Yanga19a6d12010-02-09 16:41:53 +08005785 | VMX_EPT_IPAT_BIT;
Sheng Yang4b12f0d2009-04-27 20:35:42 +08005786
5787 return ret;
Sheng Yang64d4d522008-10-09 16:01:57 +08005788}
5789
Avi Kivityf4c9e872009-12-28 16:06:35 +02005790#define _ER(x) { EXIT_REASON_##x, #x }
5791
Marcelo Tosatti229456f2009-06-17 09:22:14 -03005792static const struct trace_print_flags vmx_exit_reasons_str[] = {
Avi Kivityf4c9e872009-12-28 16:06:35 +02005793 _ER(EXCEPTION_NMI),
5794 _ER(EXTERNAL_INTERRUPT),
5795 _ER(TRIPLE_FAULT),
5796 _ER(PENDING_INTERRUPT),
5797 _ER(NMI_WINDOW),
5798 _ER(TASK_SWITCH),
5799 _ER(CPUID),
5800 _ER(HLT),
5801 _ER(INVLPG),
5802 _ER(RDPMC),
5803 _ER(RDTSC),
5804 _ER(VMCALL),
5805 _ER(VMCLEAR),
5806 _ER(VMLAUNCH),
5807 _ER(VMPTRLD),
5808 _ER(VMPTRST),
5809 _ER(VMREAD),
5810 _ER(VMRESUME),
5811 _ER(VMWRITE),
5812 _ER(VMOFF),
5813 _ER(VMON),
5814 _ER(CR_ACCESS),
5815 _ER(DR_ACCESS),
5816 _ER(IO_INSTRUCTION),
5817 _ER(MSR_READ),
5818 _ER(MSR_WRITE),
5819 _ER(MWAIT_INSTRUCTION),
5820 _ER(MONITOR_INSTRUCTION),
5821 _ER(PAUSE_INSTRUCTION),
5822 _ER(MCE_DURING_VMENTRY),
5823 _ER(TPR_BELOW_THRESHOLD),
5824 _ER(APIC_ACCESS),
5825 _ER(EPT_VIOLATION),
5826 _ER(EPT_MISCONFIG),
5827 _ER(WBINVD),
Marcelo Tosatti229456f2009-06-17 09:22:14 -03005828 { -1, NULL }
5829};
5830
Avi Kivityf4c9e872009-12-28 16:06:35 +02005831#undef _ER
5832
Sheng Yang17cc3932010-01-05 19:02:27 +08005833static int vmx_get_lpage_level(void)
Joerg Roedel344f4142009-07-27 16:30:48 +02005834{
Sheng Yang878403b2010-01-05 19:02:29 +08005835 if (enable_ept && !cpu_has_vmx_ept_1g_page())
5836 return PT_DIRECTORY_LEVEL;
5837 else
5838 /* For shadow and EPT supported 1GB page */
5839 return PT_PDPE_LEVEL;
Joerg Roedel344f4142009-07-27 16:30:48 +02005840}
5841
Sheng Yang0e851882009-12-18 16:48:46 +08005842static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
5843{
Sheng Yang4e47c7a2009-12-18 16:48:47 +08005844 struct kvm_cpuid_entry2 *best;
5845 struct vcpu_vmx *vmx = to_vmx(vcpu);
5846 u32 exec_control;
5847
5848 vmx->rdtscp_enabled = false;
5849 if (vmx_rdtscp_supported()) {
5850 exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
5851 if (exec_control & SECONDARY_EXEC_RDTSCP) {
5852 best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
5853 if (best && (best->edx & bit(X86_FEATURE_RDTSCP)))
5854 vmx->rdtscp_enabled = true;
5855 else {
5856 exec_control &= ~SECONDARY_EXEC_RDTSCP;
5857 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
5858 exec_control);
5859 }
5860 }
5861 }
Sheng Yang0e851882009-12-18 16:48:46 +08005862}
5863
Joerg Roedeld4330ef2010-04-22 12:33:11 +02005864static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
5865{
5866}
5867
Nadav Har'Elfe3ef052011-05-25 23:10:02 +03005868/*
5869 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
5870 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
5871 * with L0's requirements for its guest (a.k.a. vmsc01), so we can run the L2
5872 * guest in a way that will both be appropriate to L1's requests, and our
5873 * needs. In addition to modifying the active vmcs (which is vmcs02), this
5874 * function also has additional necessary side-effects, like setting various
5875 * vcpu->arch fields.
5876 */
5877static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
5878{
5879 struct vcpu_vmx *vmx = to_vmx(vcpu);
5880 u32 exec_control;
5881
5882 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
5883 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
5884 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
5885 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
5886 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
5887 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
5888 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
5889 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
5890 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
5891 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
5892 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
5893 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
5894 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
5895 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
5896 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
5897 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
5898 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
5899 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
5900 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
5901 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
5902 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
5903 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
5904 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
5905 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
5906 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
5907 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
5908 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
5909 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
5910 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
5911 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
5912 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
5913 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
5914 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
5915 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
5916 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
5917 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
5918
5919 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
5920 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
5921 vmcs12->vm_entry_intr_info_field);
5922 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
5923 vmcs12->vm_entry_exception_error_code);
5924 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
5925 vmcs12->vm_entry_instruction_len);
5926 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
5927 vmcs12->guest_interruptibility_info);
5928 vmcs_write32(GUEST_ACTIVITY_STATE, vmcs12->guest_activity_state);
5929 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
5930 vmcs_writel(GUEST_DR7, vmcs12->guest_dr7);
5931 vmcs_writel(GUEST_RFLAGS, vmcs12->guest_rflags);
5932 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
5933 vmcs12->guest_pending_dbg_exceptions);
5934 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
5935 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
5936
5937 vmcs_write64(VMCS_LINK_POINTER, -1ull);
5938
5939 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
5940 (vmcs_config.pin_based_exec_ctrl |
5941 vmcs12->pin_based_vm_exec_control));
5942
5943 /*
5944 * Whether page-faults are trapped is determined by a combination of
5945 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
5946 * If enable_ept, L0 doesn't care about page faults and we should
5947 * set all of these to L1's desires. However, if !enable_ept, L0 does
5948 * care about (at least some) page faults, and because it is not easy
5949 * (if at all possible?) to merge L0 and L1's desires, we simply ask
5950 * to exit on each and every L2 page fault. This is done by setting
5951 * MASK=MATCH=0 and (see below) EB.PF=1.
5952 * Note that below we don't need special code to set EB.PF beyond the
5953 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
5954 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
5955 * !enable_ept, EB.PF is 1, so the "or" will always be 1.
5956 *
5957 * A problem with this approach (when !enable_ept) is that L1 may be
5958 * injected with more page faults than it asked for. This could have
5959 * caused problems, but in practice existing hypervisors don't care.
5960 * To fix this, we will need to emulate the PFEC checking (on the L1
5961 * page tables), using walk_addr(), when injecting PFs to L1.
5962 */
5963 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
5964 enable_ept ? vmcs12->page_fault_error_code_mask : 0);
5965 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
5966 enable_ept ? vmcs12->page_fault_error_code_match : 0);
5967
5968 if (cpu_has_secondary_exec_ctrls()) {
5969 u32 exec_control = vmx_secondary_exec_control(vmx);
5970 if (!vmx->rdtscp_enabled)
5971 exec_control &= ~SECONDARY_EXEC_RDTSCP;
5972 /* Take the following fields only from vmcs12 */
5973 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
5974 if (nested_cpu_has(vmcs12,
5975 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
5976 exec_control |= vmcs12->secondary_vm_exec_control;
5977
5978 if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) {
5979 /*
5980 * Translate L1 physical address to host physical
5981 * address for vmcs02. Keep the page pinned, so this
5982 * physical address remains valid. We keep a reference
5983 * to it so we can release it later.
5984 */
5985 if (vmx->nested.apic_access_page) /* shouldn't happen */
5986 nested_release_page(vmx->nested.apic_access_page);
5987 vmx->nested.apic_access_page =
5988 nested_get_page(vcpu, vmcs12->apic_access_addr);
5989 /*
5990 * If translation failed, no matter: This feature asks
5991 * to exit when accessing the given address, and if it
5992 * can never be accessed, this feature won't do
5993 * anything anyway.
5994 */
5995 if (!vmx->nested.apic_access_page)
5996 exec_control &=
5997 ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
5998 else
5999 vmcs_write64(APIC_ACCESS_ADDR,
6000 page_to_phys(vmx->nested.apic_access_page));
6001 }
6002
6003 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
6004 }
6005
6006
6007 /*
6008 * Set host-state according to L0's settings (vmcs12 is irrelevant here)
6009 * Some constant fields are set here by vmx_set_constant_host_state().
6010 * Other fields are different per CPU, and will be set later when
6011 * vmx_vcpu_load() is called, and when vmx_save_host_state() is called.
6012 */
6013 vmx_set_constant_host_state();
6014
6015 /*
6016 * HOST_RSP is normally set correctly in vmx_vcpu_run() just before
6017 * entry, but only if the current (host) sp changed from the value
6018 * we wrote last (vmx->host_rsp). This cache is no longer relevant
6019 * if we switch vmcs, and rather than hold a separate cache per vmcs,
6020 * here we just force the write to happen on entry.
6021 */
6022 vmx->host_rsp = 0;
6023
6024 exec_control = vmx_exec_control(vmx); /* L0's desires */
6025 exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
6026 exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
6027 exec_control &= ~CPU_BASED_TPR_SHADOW;
6028 exec_control |= vmcs12->cpu_based_vm_exec_control;
6029 /*
6030 * Merging of IO and MSR bitmaps not currently supported.
6031 * Rather, exit every time.
6032 */
6033 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
6034 exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
6035 exec_control |= CPU_BASED_UNCOND_IO_EXITING;
6036
6037 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
6038
6039 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
6040 * bitwise-or of what L1 wants to trap for L2, and what we want to
6041 * trap. Note that CR0.TS also needs updating - we do this later.
6042 */
6043 update_exception_bitmap(vcpu);
6044 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
6045 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
6046
6047 /* Note: IA32_MODE, LOAD_IA32_EFER are modified by vmx_set_efer below */
6048 vmcs_write32(VM_EXIT_CONTROLS,
6049 vmcs12->vm_exit_controls | vmcs_config.vmexit_ctrl);
6050 vmcs_write32(VM_ENTRY_CONTROLS, vmcs12->vm_entry_controls |
6051 (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
6052
6053 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)
6054 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
6055 else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
6056 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
6057
6058
6059 set_cr4_guest_host_mask(vmx);
6060
6061 vmcs_write64(TSC_OFFSET,
6062 vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
6063
6064 if (enable_vpid) {
6065 /*
6066 * Trivially support vpid by letting L2s share their parent
6067 * L1's vpid. TODO: move to a more elaborate solution, giving
6068 * each L2 its own vpid and exposing the vpid feature to L1.
6069 */
6070 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
6071 vmx_flush_tlb(vcpu);
6072 }
6073
6074 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)
6075 vcpu->arch.efer = vmcs12->guest_ia32_efer;
6076 if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
6077 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
6078 else
6079 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
6080 /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
6081 vmx_set_efer(vcpu, vcpu->arch.efer);
6082
6083 /*
6084 * This sets GUEST_CR0 to vmcs12->guest_cr0, with possibly a modified
6085 * TS bit (for lazy fpu) and bits which we consider mandatory enabled.
6086 * The CR0_READ_SHADOW is what L2 should have expected to read given
6087 * the specifications by L1; It's not enough to take
6088 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
6089 * have more bits than L1 expected.
6090 */
6091 vmx_set_cr0(vcpu, vmcs12->guest_cr0);
6092 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
6093
6094 vmx_set_cr4(vcpu, vmcs12->guest_cr4);
6095 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
6096
6097 /* shadow page tables on either EPT or shadow page tables */
6098 kvm_set_cr3(vcpu, vmcs12->guest_cr3);
6099 kvm_mmu_reset_context(vcpu);
6100
6101 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
6102 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
6103}
6104
Nadav Har'Elcd232ad2011-05-25 23:10:33 +03006105/*
6106 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
6107 * for running an L2 nested guest.
6108 */
6109static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
6110{
6111 struct vmcs12 *vmcs12;
6112 struct vcpu_vmx *vmx = to_vmx(vcpu);
6113 int cpu;
6114 struct loaded_vmcs *vmcs02;
6115
6116 if (!nested_vmx_check_permission(vcpu) ||
6117 !nested_vmx_check_vmcs12(vcpu))
6118 return 1;
6119
6120 skip_emulated_instruction(vcpu);
6121 vmcs12 = get_vmcs12(vcpu);
6122
Nadav Har'El7c177932011-05-25 23:12:04 +03006123 /*
6124 * The nested entry process starts with enforcing various prerequisites
6125 * on vmcs12 as required by the Intel SDM, and act appropriately when
6126 * they fail: As the SDM explains, some conditions should cause the
6127 * instruction to fail, while others will cause the instruction to seem
6128 * to succeed, but return an EXIT_REASON_INVALID_STATE.
6129 * To speed up the normal (success) code path, we should avoid checking
6130 * for misconfigurations which will anyway be caught by the processor
6131 * when using the merged vmcs02.
6132 */
6133 if (vmcs12->launch_state == launch) {
6134 nested_vmx_failValid(vcpu,
6135 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
6136 : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
6137 return 1;
6138 }
6139
6140 if ((vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_MSR_BITMAPS) &&
6141 !IS_ALIGNED(vmcs12->msr_bitmap, PAGE_SIZE)) {
6142 /*TODO: Also verify bits beyond physical address width are 0*/
6143 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
6144 return 1;
6145 }
6146
6147 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
6148 !IS_ALIGNED(vmcs12->apic_access_addr, PAGE_SIZE)) {
6149 /*TODO: Also verify bits beyond physical address width are 0*/
6150 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
6151 return 1;
6152 }
6153
6154 if (vmcs12->vm_entry_msr_load_count > 0 ||
6155 vmcs12->vm_exit_msr_load_count > 0 ||
6156 vmcs12->vm_exit_msr_store_count > 0) {
6157 if (printk_ratelimit())
6158 printk(KERN_WARNING
6159 "%s: VMCS MSR_{LOAD,STORE} unsupported\n", __func__);
6160 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
6161 return 1;
6162 }
6163
6164 if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
6165 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high) ||
6166 !vmx_control_verify(vmcs12->secondary_vm_exec_control,
6167 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high) ||
6168 !vmx_control_verify(vmcs12->pin_based_vm_exec_control,
6169 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high) ||
6170 !vmx_control_verify(vmcs12->vm_exit_controls,
6171 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high) ||
6172 !vmx_control_verify(vmcs12->vm_entry_controls,
6173 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high))
6174 {
6175 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
6176 return 1;
6177 }
6178
6179 if (((vmcs12->host_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) ||
6180 ((vmcs12->host_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
6181 nested_vmx_failValid(vcpu,
6182 VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
6183 return 1;
6184 }
6185
6186 if (((vmcs12->guest_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) ||
6187 ((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
6188 nested_vmx_entry_failure(vcpu, vmcs12,
6189 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
6190 return 1;
6191 }
6192 if (vmcs12->vmcs_link_pointer != -1ull) {
6193 nested_vmx_entry_failure(vcpu, vmcs12,
6194 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR);
6195 return 1;
6196 }
6197
6198 /*
6199 * We're finally done with prerequisite checking, and can start with
6200 * the nested entry.
6201 */
6202
Nadav Har'Elcd232ad2011-05-25 23:10:33 +03006203 vmcs02 = nested_get_current_vmcs02(vmx);
6204 if (!vmcs02)
6205 return -ENOMEM;
6206
6207 enter_guest_mode(vcpu);
6208
6209 vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET);
6210
6211 cpu = get_cpu();
6212 vmx->loaded_vmcs = vmcs02;
6213 vmx_vcpu_put(vcpu);
6214 vmx_vcpu_load(vcpu, cpu);
6215 vcpu->cpu = cpu;
6216 put_cpu();
6217
6218 vmcs12->launch_state = 1;
6219
6220 prepare_vmcs02(vcpu, vmcs12);
6221
6222 /*
6223 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
6224 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
6225 * returned as far as L1 is concerned. It will only return (and set
6226 * the success flag) when L2 exits (see nested_vmx_vmexit()).
6227 */
6228 return 1;
6229}
6230
Nadav Har'El4704d0b2011-05-25 23:11:34 +03006231/*
6232 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
6233 * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK).
6234 * This function returns the new value we should put in vmcs12.guest_cr0.
6235 * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
6236 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
6237 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
6238 * didn't trap the bit, because if L1 did, so would L0).
6239 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have
6240 * been modified by L2, and L1 knows it. So just leave the old value of
6241 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
6242 * isn't relevant, because if L0 traps this bit it can set it to anything.
6243 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
6244 * changed these bits, and therefore they need to be updated, but L0
6245 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather
6246 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
6247 */
6248static inline unsigned long
6249vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
6250{
6251 return
6252 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
6253 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
6254 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
6255 vcpu->arch.cr0_guest_owned_bits));
6256}
6257
6258static inline unsigned long
6259vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
6260{
6261 return
6262 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
6263 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
6264 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
6265 vcpu->arch.cr4_guest_owned_bits));
6266}
6267
6268/*
6269 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
6270 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
6271 * and this function updates it to reflect the changes to the guest state while
6272 * L2 was running (and perhaps made some exits which were handled directly by L0
6273 * without going back to L1), and to reflect the exit reason.
6274 * Note that we do not have to copy here all VMCS fields, just those that
6275 * could have changed by the L2 guest or the exit - i.e., the guest-state and
6276 * exit-information fields only. Other fields are modified by L1 with VMWRITE,
6277 * which already writes to vmcs12 directly.
6278 */
6279void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
6280{
6281 /* update guest state fields: */
6282 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
6283 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
6284
6285 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
6286 vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
6287 vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP);
6288 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
6289
6290 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
6291 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
6292 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
6293 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
6294 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
6295 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
6296 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
6297 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
6298 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
6299 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
6300 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
6301 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
6302 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
6303 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
6304 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
6305 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
6306 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
6307 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
6308 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
6309 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
6310 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
6311 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
6312 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
6313 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
6314 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
6315 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
6316 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
6317 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
6318 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
6319 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
6320 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
6321 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
6322 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
6323 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
6324 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
6325 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
6326
6327 vmcs12->guest_activity_state = vmcs_read32(GUEST_ACTIVITY_STATE);
6328 vmcs12->guest_interruptibility_info =
6329 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
6330 vmcs12->guest_pending_dbg_exceptions =
6331 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
6332
6333 /* TODO: These cannot have changed unless we have MSR bitmaps and
6334 * the relevant bit asks not to trap the change */
6335 vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
6336 if (vmcs12->vm_entry_controls & VM_EXIT_SAVE_IA32_PAT)
6337 vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT);
6338 vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
6339 vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
6340 vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
6341
6342 /* update exit information fields: */
6343
6344 vmcs12->vm_exit_reason = vmcs_read32(VM_EXIT_REASON);
6345 vmcs12->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
6346
6347 vmcs12->vm_exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
6348 vmcs12->vm_exit_intr_error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
6349 vmcs12->idt_vectoring_info_field =
6350 vmcs_read32(IDT_VECTORING_INFO_FIELD);
6351 vmcs12->idt_vectoring_error_code =
6352 vmcs_read32(IDT_VECTORING_ERROR_CODE);
6353 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
6354 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
6355
6356 /* clear vm-entry fields which are to be cleared on exit */
6357 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
6358 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
6359}
6360
6361/*
6362 * A part of what we need to when the nested L2 guest exits and we want to
6363 * run its L1 parent, is to reset L1's guest state to the host state specified
6364 * in vmcs12.
6365 * This function is to be called not only on normal nested exit, but also on
6366 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
6367 * Failures During or After Loading Guest State").
6368 * This function should be called when the active VMCS is L1's (vmcs01).
6369 */
6370void load_vmcs12_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
6371{
6372 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
6373 vcpu->arch.efer = vmcs12->host_ia32_efer;
6374 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
6375 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
6376 else
6377 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
6378 vmx_set_efer(vcpu, vcpu->arch.efer);
6379
6380 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp);
6381 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip);
6382 /*
6383 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
6384 * actually changed, because it depends on the current state of
6385 * fpu_active (which may have changed).
6386 * Note that vmx_set_cr0 refers to efer set above.
6387 */
6388 kvm_set_cr0(vcpu, vmcs12->host_cr0);
6389 /*
6390 * If we did fpu_activate()/fpu_deactivate() during L2's run, we need
6391 * to apply the same changes to L1's vmcs. We just set cr0 correctly,
6392 * but we also need to update cr0_guest_host_mask and exception_bitmap.
6393 */
6394 update_exception_bitmap(vcpu);
6395 vcpu->arch.cr0_guest_owned_bits = (vcpu->fpu_active ? X86_CR0_TS : 0);
6396 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
6397
6398 /*
6399 * Note that CR4_GUEST_HOST_MASK is already set in the original vmcs01
6400 * (KVM doesn't change it)- no reason to call set_cr4_guest_host_mask();
6401 */
6402 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
6403 kvm_set_cr4(vcpu, vmcs12->host_cr4);
6404
6405 /* shadow page tables on either EPT or shadow page tables */
6406 kvm_set_cr3(vcpu, vmcs12->host_cr3);
6407 kvm_mmu_reset_context(vcpu);
6408
6409 if (enable_vpid) {
6410 /*
6411 * Trivially support vpid by letting L2s share their parent
6412 * L1's vpid. TODO: move to a more elaborate solution, giving
6413 * each L2 its own vpid and exposing the vpid feature to L1.
6414 */
6415 vmx_flush_tlb(vcpu);
6416 }
6417
6418
6419 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
6420 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
6421 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
6422 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
6423 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
6424 vmcs_writel(GUEST_TR_BASE, vmcs12->host_tr_base);
6425 vmcs_writel(GUEST_GS_BASE, vmcs12->host_gs_base);
6426 vmcs_writel(GUEST_FS_BASE, vmcs12->host_fs_base);
6427 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->host_es_selector);
6428 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->host_cs_selector);
6429 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->host_ss_selector);
6430 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->host_ds_selector);
6431 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->host_fs_selector);
6432 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->host_gs_selector);
6433 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->host_tr_selector);
6434
6435 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT)
6436 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
6437 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
6438 vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL,
6439 vmcs12->host_ia32_perf_global_ctrl);
6440}
6441
6442/*
6443 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
6444 * and modify vmcs12 to make it see what it would expect to see there if
6445 * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
6446 */
6447static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
6448{
6449 struct vcpu_vmx *vmx = to_vmx(vcpu);
6450 int cpu;
6451 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6452
6453 leave_guest_mode(vcpu);
6454 prepare_vmcs12(vcpu, vmcs12);
6455
6456 cpu = get_cpu();
6457 vmx->loaded_vmcs = &vmx->vmcs01;
6458 vmx_vcpu_put(vcpu);
6459 vmx_vcpu_load(vcpu, cpu);
6460 vcpu->cpu = cpu;
6461 put_cpu();
6462
6463 /* if no vmcs02 cache requested, remove the one we used */
6464 if (VMCS02_POOL_SIZE == 0)
6465 nested_free_vmcs02(vmx, vmx->nested.current_vmptr);
6466
6467 load_vmcs12_host_state(vcpu, vmcs12);
6468
6469 /* Update TSC_OFFSET if vmx_adjust_tsc_offset() was used while L2 ran */
6470 vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
6471
6472 /* This is needed for same reason as it was needed in prepare_vmcs02 */
6473 vmx->host_rsp = 0;
6474
6475 /* Unpin physical memory we referred to in vmcs02 */
6476 if (vmx->nested.apic_access_page) {
6477 nested_release_page(vmx->nested.apic_access_page);
6478 vmx->nested.apic_access_page = 0;
6479 }
6480
6481 /*
6482 * Exiting from L2 to L1, we're now back to L1 which thinks it just
6483 * finished a VMLAUNCH or VMRESUME instruction, so we need to set the
6484 * success or failure flag accordingly.
6485 */
6486 if (unlikely(vmx->fail)) {
6487 vmx->fail = 0;
6488 nested_vmx_failValid(vcpu, vmcs_read32(VM_INSTRUCTION_ERROR));
6489 } else
6490 nested_vmx_succeed(vcpu);
6491}
6492
Nadav Har'El7c177932011-05-25 23:12:04 +03006493/*
6494 * L1's failure to enter L2 is a subset of a normal exit, as explained in
6495 * 23.7 "VM-entry failures during or after loading guest state" (this also
6496 * lists the acceptable exit-reason and exit-qualification parameters).
6497 * It should only be called before L2 actually succeeded to run, and when
6498 * vmcs01 is current (it doesn't leave_guest_mode() or switch vmcss).
6499 */
6500static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
6501 struct vmcs12 *vmcs12,
6502 u32 reason, unsigned long qualification)
6503{
6504 load_vmcs12_host_state(vcpu, vmcs12);
6505 vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
6506 vmcs12->exit_qualification = qualification;
6507 nested_vmx_succeed(vcpu);
6508}
6509
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02006510static int vmx_check_intercept(struct kvm_vcpu *vcpu,
6511 struct x86_instruction_info *info,
6512 enum x86_intercept_stage stage)
6513{
6514 return X86EMUL_CONTINUE;
6515}
6516
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03006517static struct kvm_x86_ops vmx_x86_ops = {
Avi Kivity6aa8b732006-12-10 02:21:36 -08006518 .cpu_has_kvm_support = cpu_has_kvm_support,
6519 .disabled_by_bios = vmx_disabled_by_bios,
6520 .hardware_setup = hardware_setup,
6521 .hardware_unsetup = hardware_unsetup,
Yang, Sheng002c7f72007-07-31 14:23:01 +03006522 .check_processor_compatibility = vmx_check_processor_compat,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006523 .hardware_enable = hardware_enable,
6524 .hardware_disable = hardware_disable,
Sheng Yang04547152009-04-01 15:52:31 +08006525 .cpu_has_accelerated_tpr = report_flexpriority,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006526
6527 .vcpu_create = vmx_create_vcpu,
6528 .vcpu_free = vmx_free_vcpu,
Avi Kivity04d2cc72007-09-10 18:10:54 +03006529 .vcpu_reset = vmx_vcpu_reset,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006530
Avi Kivity04d2cc72007-09-10 18:10:54 +03006531 .prepare_guest_switch = vmx_save_host_state,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006532 .vcpu_load = vmx_vcpu_load,
6533 .vcpu_put = vmx_vcpu_put,
6534
6535 .set_guest_debug = set_guest_debug,
6536 .get_msr = vmx_get_msr,
6537 .set_msr = vmx_set_msr,
6538 .get_segment_base = vmx_get_segment_base,
6539 .get_segment = vmx_get_segment,
6540 .set_segment = vmx_set_segment,
Izik Eidus2e4d2652008-03-24 19:38:34 +02006541 .get_cpl = vmx_get_cpl,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006542 .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
Avi Kivitye8467fd2009-12-29 18:43:06 +02006543 .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
Avi Kivityaff48ba2010-12-05 18:56:11 +02006544 .decache_cr3 = vmx_decache_cr3,
Anthony Liguori25c4c272007-04-27 09:29:21 +03006545 .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006546 .set_cr0 = vmx_set_cr0,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006547 .set_cr3 = vmx_set_cr3,
6548 .set_cr4 = vmx_set_cr4,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006549 .set_efer = vmx_set_efer,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006550 .get_idt = vmx_get_idt,
6551 .set_idt = vmx_set_idt,
6552 .get_gdt = vmx_get_gdt,
6553 .set_gdt = vmx_set_gdt,
Gleb Natapov020df072010-04-13 10:05:23 +03006554 .set_dr7 = vmx_set_dr7,
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03006555 .cache_reg = vmx_cache_reg,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006556 .get_rflags = vmx_get_rflags,
6557 .set_rflags = vmx_set_rflags,
Avi Kivityebcbab42010-02-07 11:56:52 +02006558 .fpu_activate = vmx_fpu_activate,
Avi Kivity02daab22009-12-30 12:40:26 +02006559 .fpu_deactivate = vmx_fpu_deactivate,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006560
6561 .tlb_flush = vmx_flush_tlb,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006562
Avi Kivity6aa8b732006-12-10 02:21:36 -08006563 .run = vmx_vcpu_run,
Avi Kivity6062d012009-03-23 17:35:17 +02006564 .handle_exit = vmx_handle_exit,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006565 .skip_emulated_instruction = skip_emulated_instruction,
Glauber Costa2809f5d2009-05-12 16:21:05 -04006566 .set_interrupt_shadow = vmx_set_interrupt_shadow,
6567 .get_interrupt_shadow = vmx_get_interrupt_shadow,
Ingo Molnar102d8322007-02-19 14:37:47 +02006568 .patch_hypercall = vmx_patch_hypercall,
Eddie Dong2a8067f2007-08-06 16:29:07 +03006569 .set_irq = vmx_inject_irq,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03006570 .set_nmi = vmx_inject_nmi,
Avi Kivity298101d2007-11-25 13:41:11 +02006571 .queue_exception = vmx_queue_exception,
Avi Kivityb463a6f2010-07-20 15:06:17 +03006572 .cancel_injection = vmx_cancel_injection,
Gleb Natapov78646122009-03-23 12:12:11 +02006573 .interrupt_allowed = vmx_interrupt_allowed,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03006574 .nmi_allowed = vmx_nmi_allowed,
Jan Kiszka3cfc3092009-11-12 01:04:25 +01006575 .get_nmi_mask = vmx_get_nmi_mask,
6576 .set_nmi_mask = vmx_set_nmi_mask,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03006577 .enable_nmi_window = enable_nmi_window,
6578 .enable_irq_window = enable_irq_window,
6579 .update_cr8_intercept = update_cr8_intercept,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03006580
Izik Eiduscbc94022007-10-25 00:29:55 +02006581 .set_tss_addr = vmx_set_tss_addr,
Sheng Yang67253af2008-04-25 10:20:22 +08006582 .get_tdp_level = get_ept_level,
Sheng Yang4b12f0d2009-04-27 20:35:42 +08006583 .get_mt_mask = vmx_get_mt_mask,
Marcelo Tosatti229456f2009-06-17 09:22:14 -03006584
Avi Kivity586f9602010-11-18 13:09:54 +02006585 .get_exit_info = vmx_get_exit_info,
Marcelo Tosatti229456f2009-06-17 09:22:14 -03006586 .exit_reasons_str = vmx_exit_reasons_str,
Avi Kivity586f9602010-11-18 13:09:54 +02006587
Sheng Yang17cc3932010-01-05 19:02:27 +08006588 .get_lpage_level = vmx_get_lpage_level,
Sheng Yang0e851882009-12-18 16:48:46 +08006589
6590 .cpuid_update = vmx_cpuid_update,
Sheng Yang4e47c7a2009-12-18 16:48:47 +08006591
6592 .rdtscp_supported = vmx_rdtscp_supported,
Joerg Roedeld4330ef2010-04-22 12:33:11 +02006593
6594 .set_supported_cpuid = vmx_set_supported_cpuid,
Sheng Yangf5f48ee2010-06-30 12:25:15 +08006595
6596 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
Zachary Amsden99e3e302010-08-19 22:07:17 -10006597
Joerg Roedel4051b182011-03-25 09:44:49 +01006598 .set_tsc_khz = vmx_set_tsc_khz,
Zachary Amsden99e3e302010-08-19 22:07:17 -10006599 .write_tsc_offset = vmx_write_tsc_offset,
Zachary Amsdene48672f2010-08-19 22:07:23 -10006600 .adjust_tsc_offset = vmx_adjust_tsc_offset,
Joerg Roedel857e4092011-03-25 09:44:50 +01006601 .compute_tsc_offset = vmx_compute_tsc_offset,
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02006602
6603 .set_tdp_cr3 = vmx_set_cr3,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02006604
6605 .check_intercept = vmx_check_intercept,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006606};
6607
6608static int __init vmx_init(void)
6609{
Avi Kivity26bb0982009-09-07 11:14:12 +03006610 int r, i;
6611
6612 rdmsrl_safe(MSR_EFER, &host_efer);
6613
6614 for (i = 0; i < NR_VMX_MSR; ++i)
6615 kvm_define_shared_msr(i, vmx_msr_index[i]);
He, Qingfdef3ad2007-04-30 09:45:24 +03006616
Avi Kivity3e7c73e2009-02-24 21:46:19 +02006617 vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
He, Qingfdef3ad2007-04-30 09:45:24 +03006618 if (!vmx_io_bitmap_a)
6619 return -ENOMEM;
6620
Avi Kivity3e7c73e2009-02-24 21:46:19 +02006621 vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
He, Qingfdef3ad2007-04-30 09:45:24 +03006622 if (!vmx_io_bitmap_b) {
6623 r = -ENOMEM;
6624 goto out;
6625 }
6626
Avi Kivity58972972009-02-24 22:26:47 +02006627 vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
6628 if (!vmx_msr_bitmap_legacy) {
Sheng Yang25c5f222008-03-28 13:18:56 +08006629 r = -ENOMEM;
6630 goto out1;
6631 }
6632
Avi Kivity58972972009-02-24 22:26:47 +02006633 vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
6634 if (!vmx_msr_bitmap_longmode) {
6635 r = -ENOMEM;
6636 goto out2;
6637 }
6638
He, Qingfdef3ad2007-04-30 09:45:24 +03006639 /*
6640 * Allow direct access to the PC debug port (it is often used for I/O
6641 * delays, but the vmexits simply slow things down).
6642 */
Avi Kivity3e7c73e2009-02-24 21:46:19 +02006643 memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
6644 clear_bit(0x80, vmx_io_bitmap_a);
He, Qingfdef3ad2007-04-30 09:45:24 +03006645
Avi Kivity3e7c73e2009-02-24 21:46:19 +02006646 memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
He, Qingfdef3ad2007-04-30 09:45:24 +03006647
Avi Kivity58972972009-02-24 22:26:47 +02006648 memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
6649 memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
Sheng Yang25c5f222008-03-28 13:18:56 +08006650
Sheng Yang2384d2b2008-01-17 15:14:33 +08006651 set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
6652
Avi Kivity0ee75be2010-04-28 15:39:01 +03006653 r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
6654 __alignof__(struct vcpu_vmx), THIS_MODULE);
He, Qingfdef3ad2007-04-30 09:45:24 +03006655 if (r)
Avi Kivity58972972009-02-24 22:26:47 +02006656 goto out3;
Sheng Yang25c5f222008-03-28 13:18:56 +08006657
Avi Kivity58972972009-02-24 22:26:47 +02006658 vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
6659 vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
6660 vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
6661 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
6662 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
6663 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
He, Qingfdef3ad2007-04-30 09:45:24 +03006664
Avi Kivity089d0342009-03-23 18:26:32 +02006665 if (enable_ept) {
Sheng Yang14394422008-04-28 12:24:45 +08006666 bypass_guest_pf = 0;
Sheng Yang534e38b2008-09-08 15:12:30 +08006667 kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
Sheng Yang4b12f0d2009-04-27 20:35:42 +08006668 VMX_EPT_EXECUTABLE_MASK);
Sheng Yang5fdbcb92008-07-16 09:25:40 +08006669 kvm_enable_tdp();
6670 } else
6671 kvm_disable_tdp();
Sheng Yang14394422008-04-28 12:24:45 +08006672
Avi Kivityc7addb92007-09-16 18:58:32 +02006673 if (bypass_guest_pf)
6674 kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
6675
He, Qingfdef3ad2007-04-30 09:45:24 +03006676 return 0;
6677
Avi Kivity58972972009-02-24 22:26:47 +02006678out3:
6679 free_page((unsigned long)vmx_msr_bitmap_longmode);
Sheng Yang25c5f222008-03-28 13:18:56 +08006680out2:
Avi Kivity58972972009-02-24 22:26:47 +02006681 free_page((unsigned long)vmx_msr_bitmap_legacy);
He, Qingfdef3ad2007-04-30 09:45:24 +03006682out1:
Avi Kivity3e7c73e2009-02-24 21:46:19 +02006683 free_page((unsigned long)vmx_io_bitmap_b);
He, Qingfdef3ad2007-04-30 09:45:24 +03006684out:
Avi Kivity3e7c73e2009-02-24 21:46:19 +02006685 free_page((unsigned long)vmx_io_bitmap_a);
He, Qingfdef3ad2007-04-30 09:45:24 +03006686 return r;
Avi Kivity6aa8b732006-12-10 02:21:36 -08006687}
6688
6689static void __exit vmx_exit(void)
6690{
Avi Kivity58972972009-02-24 22:26:47 +02006691 free_page((unsigned long)vmx_msr_bitmap_legacy);
6692 free_page((unsigned long)vmx_msr_bitmap_longmode);
Avi Kivity3e7c73e2009-02-24 21:46:19 +02006693 free_page((unsigned long)vmx_io_bitmap_b);
6694 free_page((unsigned long)vmx_io_bitmap_a);
He, Qingfdef3ad2007-04-30 09:45:24 +03006695
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08006696 kvm_exit();
Avi Kivity6aa8b732006-12-10 02:21:36 -08006697}
6698
6699module_init(vmx_init)
6700module_exit(vmx_exit)