blob: 1b3eb8a0a1bc9bd4ddfb0f0e0e92c39b209c0b4b [file] [log] [blame]
Vegard Nossuma656c8e2008-07-22 21:27:11 +02001/*
Carsten Otte043405e2007-10-10 17:16:19 +02002 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This header defines architecture specific interfaces, x86 version
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2. See
7 * the COPYING file in the top-level directory.
8 *
9 */
10
H. Peter Anvin1965aae2008-10-22 22:26:29 -070011#ifndef _ASM_X86_KVM_HOST_H
12#define _ASM_X86_KVM_HOST_H
Carsten Otte043405e2007-10-10 17:16:19 +020013
Zhang Xiantao34c16ee2007-10-20 15:34:38 +080014#include <linux/types.h>
15#include <linux/mm.h>
Andrea Arcangelie930bff2008-07-25 16:24:52 +020016#include <linux/mmu_notifier.h>
Marcelo Tosatti229456f2009-06-17 09:22:14 -030017#include <linux/tracepoint.h>
Sheng Yangf5f48ee2010-06-30 12:25:15 +080018#include <linux/cpumask.h>
Zhang Xiantao34c16ee2007-10-20 15:34:38 +080019
20#include <linux/kvm.h>
21#include <linux/kvm_para.h>
Avi Kivityedf88412007-12-16 11:02:48 +020022#include <linux/kvm_types.h>
Zhang Xiantao34c16ee2007-10-20 15:34:38 +080023
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +020024#include <asm/pvclock-abi.h>
Hollis Blancharde01a1b52007-12-03 15:30:25 -060025#include <asm/desc.h>
Sheng Yang0bed3b52008-10-09 16:01:54 +080026#include <asm/mtrr.h>
Alexander Graf9962d032008-11-25 20:17:02 +010027#include <asm/msr-index.h>
Hollis Blancharde01a1b52007-12-03 15:30:25 -060028
Avi Kivity0680fe52009-12-27 17:00:46 +020029#define KVM_MAX_VCPUS 64
Avi Kivity69a9f692008-03-21 12:38:23 +020030#define KVM_MEMORY_SLOTS 32
31/* memory slots that does not exposed to userspace */
32#define KVM_PRIVATE_MEM_SLOTS 4
33
34#define KVM_PIO_PAGE_OFFSET 1
Laurent Vivier542472b2008-05-30 16:05:55 +020035#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
Avi Kivity69a9f692008-03-21 12:38:23 +020036
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080037#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
38#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
Joe Perches7d76b4d2008-03-23 01:02:34 -070039#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
40 0xFFFFFF0000000000ULL)
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080041
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080042#define INVALID_PAGE (~(hpa_t)0)
Xiao Guangrongdd180b32010-07-03 16:02:42 +080043#define VALID_PAGE(x) ((x) != INVALID_PAGE)
44
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080045#define UNMAPPED_GVA (~(gpa_t)0)
46
Joerg Roedelec04b262009-06-19 15:16:23 +020047/* KVM Hugepage definitions for x86 */
Joerg Roedel04326ca2009-07-27 16:30:47 +020048#define KVM_NR_PAGE_SIZES 3
Joerg Roedel82855412010-07-01 16:00:11 +020049#define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9)
50#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
Joerg Roedelec04b262009-06-19 15:16:23 +020051#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
52#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
53#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
Marcelo Tosatti05da4552008-02-23 11:44:30 -030054
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080055#define DE_VECTOR 0
Jan Kiszka19bd8af2008-07-13 13:40:55 +020056#define DB_VECTOR 1
Jan Kiszka77ab6db2008-07-14 12:28:51 +020057#define BP_VECTOR 3
58#define OF_VECTOR 4
59#define BR_VECTOR 5
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080060#define UD_VECTOR 6
61#define NM_VECTOR 7
62#define DF_VECTOR 8
63#define TS_VECTOR 10
64#define NP_VECTOR 11
65#define SS_VECTOR 12
66#define GP_VECTOR 13
67#define PF_VECTOR 14
Jan Kiszka77ab6db2008-07-14 12:28:51 +020068#define MF_VECTOR 16
Joerg Roedel53371b52008-04-09 14:15:30 +020069#define MC_VECTOR 18
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080070
71#define SELECTOR_TI_MASK (1 << 2)
72#define SELECTOR_RPL_MASK 0x03
73
74#define IOPL_SHIFT 12
75
Zhang Xiantaod657a982007-12-14 09:41:22 +080076#define KVM_PERMILLE_MMU_PAGES 20
77#define KVM_MIN_ALLOC_MMU_PAGES 64
Dong, Eddie1ae0a132008-01-07 13:20:25 +020078#define KVM_MMU_HASH_SHIFT 10
79#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
Zhang Xiantaod657a982007-12-14 09:41:22 +080080#define KVM_MIN_FREE_MMU_PAGES 5
81#define KVM_REFILL_PAGES 25
82#define KVM_MAX_CPUID_ENTRIES 40
Sheng Yang0bed3b52008-10-09 16:01:54 +080083#define KVM_NR_FIXED_MTRR_REGION 88
Avi Kivity9ba075a2008-05-26 20:06:35 +030084#define KVM_NR_VAR_MTRR 8
Zhang Xiantaod657a982007-12-14 09:41:22 +080085
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +080086extern spinlock_t kvm_lock;
87extern struct list_head vm_list;
88
Zhang Xiantaod657a982007-12-14 09:41:22 +080089struct kvm_vcpu;
90struct kvm;
91
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030092enum kvm_reg {
Zhang Xiantao2b3ccfa2007-11-19 14:56:05 +080093 VCPU_REGS_RAX = 0,
94 VCPU_REGS_RCX = 1,
95 VCPU_REGS_RDX = 2,
96 VCPU_REGS_RBX = 3,
97 VCPU_REGS_RSP = 4,
98 VCPU_REGS_RBP = 5,
99 VCPU_REGS_RSI = 6,
100 VCPU_REGS_RDI = 7,
101#ifdef CONFIG_X86_64
102 VCPU_REGS_R8 = 8,
103 VCPU_REGS_R9 = 9,
104 VCPU_REGS_R10 = 10,
105 VCPU_REGS_R11 = 11,
106 VCPU_REGS_R12 = 12,
107 VCPU_REGS_R13 = 13,
108 VCPU_REGS_R14 = 14,
109 VCPU_REGS_R15 = 15,
110#endif
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300111 VCPU_REGS_RIP,
Zhang Xiantao2b3ccfa2007-11-19 14:56:05 +0800112 NR_VCPU_REGS
113};
114
Avi Kivity6de4f3a2009-05-31 22:58:47 +0300115enum kvm_reg_ex {
116 VCPU_EXREG_PDPTR = NR_VCPU_REGS,
117};
118
Zhang Xiantao2b3ccfa2007-11-19 14:56:05 +0800119enum {
Zhang Xiantao2b3ccfa2007-11-19 14:56:05 +0800120 VCPU_SREG_ES,
Avi Kivity81609e32008-05-27 16:26:01 +0300121 VCPU_SREG_CS,
122 VCPU_SREG_SS,
123 VCPU_SREG_DS,
Zhang Xiantao2b3ccfa2007-11-19 14:56:05 +0800124 VCPU_SREG_FS,
125 VCPU_SREG_GS,
Zhang Xiantao2b3ccfa2007-11-19 14:56:05 +0800126 VCPU_SREG_TR,
127 VCPU_SREG_LDTR,
128};
129
Avi Kivity56e82312009-08-12 15:04:37 +0300130#include <asm/kvm_emulate.h>
Zhang Xiantao2b3ccfa2007-11-19 14:56:05 +0800131
Zhang Xiantaod657a982007-12-14 09:41:22 +0800132#define KVM_NR_MEM_OBJS 40
133
Jan Kiszka42dbaa52008-12-15 13:52:10 +0100134#define KVM_NR_DB_REGS 4
135
136#define DR6_BD (1 << 13)
137#define DR6_BS (1 << 14)
138#define DR6_FIXED_1 0xffff0ff0
139#define DR6_VOLATILE 0x0000e00f
140
141#define DR7_BP_EN_MASK 0x000000ff
142#define DR7_GE (1 << 9)
143#define DR7_GD (1 << 13)
144#define DR7_FIXED_1 0x00000400
145#define DR7_VOLATILE 0xffff23ff
146
Zhang Xiantaod657a982007-12-14 09:41:22 +0800147/*
148 * We don't want allocation failures within the mmu code, so we preallocate
149 * enough memory for a single page fault in a cache.
150 */
151struct kvm_mmu_memory_cache {
152 int nobjs;
153 void *objects[KVM_NR_MEM_OBJS];
154};
155
156#define NR_PTE_CHAIN_ENTRIES 5
157
158struct kvm_pte_chain {
159 u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
160 struct hlist_node link;
161};
162
163/*
164 * kvm_mmu_page_role, below, is defined as:
165 *
166 * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
167 * bits 4:7 - page table level for this shadow (1-4)
168 * bits 8:9 - page table quadrant for 2-level guests
Avi Kivityf6e2c022009-01-11 13:02:10 +0200169 * bit 16 - direct mapping of virtual to physical mapping at gfn
170 * used for real mode and two-dimensional paging
Zhang Xiantaod657a982007-12-14 09:41:22 +0800171 * bits 17:19 - common access permissions for all ptes in this shadow page
172 */
173union kvm_mmu_page_role {
174 unsigned word;
175 struct {
Joe Perches7d76b4d2008-03-23 01:02:34 -0700176 unsigned level:4;
Avi Kivity5b7e0102010-04-14 19:20:03 +0300177 unsigned cr4_pae:1;
Joe Perches7d76b4d2008-03-23 01:02:34 -0700178 unsigned quadrant:2;
179 unsigned pad_for_nice_hex_output:6;
Avi Kivityf6e2c022009-01-11 13:02:10 +0200180 unsigned direct:1;
Joe Perches7d76b4d2008-03-23 01:02:34 -0700181 unsigned access:3;
Marcelo Tosatti2e53d632008-02-20 14:47:24 -0500182 unsigned invalid:1;
Avi Kivity9645bb562009-03-31 11:31:54 +0300183 unsigned nxe:1;
Avi Kivity3dbe1412010-05-12 11:48:18 +0300184 unsigned cr0_wp:1;
Zhang Xiantaod657a982007-12-14 09:41:22 +0800185 };
186};
187
188struct kvm_mmu_page {
189 struct list_head link;
190 struct hlist_node hash_link;
191
192 /*
193 * The following two entries are used to key the shadow page in the
194 * hash table.
195 */
196 gfn_t gfn;
197 union kvm_mmu_page_role role;
198
199 u64 *spt;
200 /* hold the gfn of each spte inside spt */
201 gfn_t *gfns;
Sheng Yang291f26b2008-10-16 17:30:57 +0800202 /*
203 * One bit set per slot which has memory
204 * in this shadow page.
205 */
206 DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
Xiao Guangrong0571d362010-04-16 21:27:54 +0800207 bool multimapped; /* More than one parent_pte? */
Marcelo Tosatti4731d4c2008-09-23 13:18:39 -0300208 bool unsync;
Xiao Guangrong0571d362010-04-16 21:27:54 +0800209 int root_count; /* Currently serving as active root */
Marcelo Tosatti60c8aec2008-12-01 22:32:02 -0200210 unsigned int unsync_children;
Zhang Xiantaod657a982007-12-14 09:41:22 +0800211 union {
212 u64 *parent_pte; /* !multimapped */
213 struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
214 };
Marcelo Tosatti0074ff62008-09-23 13:18:40 -0300215 DECLARE_BITMAP(unsync_child_bitmap, 512);
Zhang Xiantaod657a982007-12-14 09:41:22 +0800216};
217
Dave Hansen6ad18fb2008-08-11 10:01:49 -0700218struct kvm_pv_mmu_op_buffer {
219 void *ptr;
220 unsigned len;
221 unsigned processed;
222 char buf[512] __aligned(sizeof(long));
223};
224
Avi Kivity1c083642009-01-04 12:39:07 +0200225struct kvm_pio_request {
226 unsigned long count;
Avi Kivity1c083642009-01-04 12:39:07 +0200227 int in;
228 int port;
229 int size;
Avi Kivity1c083642009-01-04 12:39:07 +0200230};
231
Zhang Xiantaod657a982007-12-14 09:41:22 +0800232/*
233 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
234 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
235 * mode.
236 */
237struct kvm_mmu {
238 void (*new_cr3)(struct kvm_vcpu *vcpu);
Joerg Roedelf43addd2010-09-10 17:30:40 +0200239 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
Joerg Roedel5777ed32010-09-10 17:30:42 +0200240 unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
Zhang Xiantaod657a982007-12-14 09:41:22 +0800241 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
Joerg Roedel8df25a32010-09-10 17:30:46 +0200242 void (*inject_page_fault)(struct kvm_vcpu *vcpu);
Zhang Xiantaod657a982007-12-14 09:41:22 +0800243 void (*free)(struct kvm_vcpu *vcpu);
Gleb Natapov1871c602010-02-10 14:21:32 +0200244 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
245 u32 *error);
Joerg Roedelc30a3582010-09-10 17:30:48 +0200246 gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
Zhang Xiantaod657a982007-12-14 09:41:22 +0800247 void (*prefetch_page)(struct kvm_vcpu *vcpu,
248 struct kvm_mmu_page *page);
Marcelo Tosattie8bc2172008-09-23 13:18:33 -0300249 int (*sync_page)(struct kvm_vcpu *vcpu,
Xiao Guangrongbe71e062010-06-11 21:31:38 +0800250 struct kvm_mmu_page *sp, bool clear_unsync);
Marcelo Tosattia7052892008-09-23 13:18:35 -0300251 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
Zhang Xiantaod657a982007-12-14 09:41:22 +0800252 hpa_t root_hpa;
253 int root_level;
254 int shadow_root_level;
Avi Kivitya770f6f2008-12-21 19:20:09 +0200255 union kvm_mmu_page_role base_role;
Joerg Roedelc5a78f2b2010-09-10 17:30:39 +0200256 bool direct_map;
Zhang Xiantaod657a982007-12-14 09:41:22 +0800257
258 u64 *pae_root;
Dong, Eddie82725b22009-03-30 16:21:08 +0800259 u64 rsvd_bits_mask[2][4];
Zhang Xiantaod657a982007-12-14 09:41:22 +0800260};
261
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800262struct kvm_vcpu_arch {
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300263 /*
264 * rip and regs accesses must go through
265 * kvm_{register,rip}_{read,write} functions.
266 */
267 unsigned long regs[NR_VCPU_REGS];
268 u32 regs_avail;
269 u32 regs_dirty;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800270
271 unsigned long cr0;
Avi Kivitye8467fd2009-12-29 18:43:06 +0200272 unsigned long cr0_guest_owned_bits;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800273 unsigned long cr2;
274 unsigned long cr3;
275 unsigned long cr4;
Avi Kivityfc78f512009-12-07 12:16:48 +0200276 unsigned long cr4_guest_owned_bits;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800277 unsigned long cr8;
Alexander Graf1371d902008-11-25 20:17:04 +0100278 u32 hflags;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800279 u64 pdptrs[4]; /* pae */
Avi Kivityf6801df2010-01-21 15:31:50 +0200280 u64 efer;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800281 u64 apic_base;
282 struct kvm_lapic *apic; /* kernel irqchip context */
Gleb Natapove1035712009-03-05 16:34:59 +0200283 int32_t apic_arb_prio;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800284 int mp_state;
285 int sipi_vector;
286 u64 ia32_misc_enable_msr;
Avi Kivityb209749f2007-10-22 16:50:39 +0200287 bool tpr_access_reporting;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800288
Joerg Roedel14dfe852010-09-10 17:30:49 +0200289 /*
290 * Paging state of the vcpu
291 *
292 * If the vcpu runs in guest mode with two level paging this still saves
293 * the paging mode of the l1 guest. This context is always used to
294 * handle faults.
295 */
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800296 struct kvm_mmu mmu;
Joerg Roedel8df25a32010-09-10 17:30:46 +0200297
298 /*
Joerg Roedel14dfe852010-09-10 17:30:49 +0200299 * Pointer to the mmu context currently used for
300 * gva_to_gpa translations.
301 */
302 struct kvm_mmu *walk_mmu;
303
304 /*
Joerg Roedel8df25a32010-09-10 17:30:46 +0200305 * This struct is filled with the necessary information to propagate a
306 * page fault into the guest
307 */
308 struct {
309 u64 address;
310 unsigned error_code;
311 } fault;
312
Dave Hansen6ad18fb2008-08-11 10:01:49 -0700313 /* only needed in kvm_pv_mmu_op() path, but it's hot so
314 * put it here to avoid allocation */
315 struct kvm_pv_mmu_op_buffer mmu_op_buffer;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800316
317 struct kvm_mmu_memory_cache mmu_pte_chain_cache;
318 struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
319 struct kvm_mmu_memory_cache mmu_page_cache;
320 struct kvm_mmu_memory_cache mmu_page_header_cache;
321
322 gfn_t last_pt_write_gfn;
323 int last_pt_write_count;
324 u64 *last_pte_updated;
Avi Kivity1b7fcd32008-05-15 13:51:35 +0300325 gfn_t last_pte_gfn;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800326
Avi Kivityd7824ff2007-12-30 12:29:05 +0200327 struct {
Anthony Liguori35149e22008-04-02 14:46:56 -0500328 gfn_t gfn; /* presumed gfn during guest pte update */
329 pfn_t pfn; /* pfn corresponding to that gfn */
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200330 unsigned long mmu_seq;
Avi Kivityd7824ff2007-12-30 12:29:05 +0200331 } update_pte;
332
Sheng Yang98918832010-05-17 17:08:28 +0800333 struct fpu guest_fpu;
Dexuan Cui2acf9232010-06-10 11:27:12 +0800334 u64 xcr0;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800335
336 gva_t mmio_fault_cr2;
337 struct kvm_pio_request pio;
338 void *pio_data;
339
Gleb Natapov66fd3f72009-05-11 13:35:50 +0300340 u8 event_exit_inst_len;
341
Avi Kivity298101d2007-11-25 13:41:11 +0200342 struct kvm_queued_exception {
343 bool pending;
344 bool has_error_code;
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200345 bool reinject;
Avi Kivity298101d2007-11-25 13:41:11 +0200346 u8 nr;
347 u32 error_code;
348 } exception;
349
Avi Kivity937a7ea2008-07-03 15:17:01 +0300350 struct kvm_queued_interrupt {
351 bool pending;
Gleb Natapov66fd3f72009-05-11 13:35:50 +0300352 bool soft;
Avi Kivity937a7ea2008-07-03 15:17:01 +0300353 u8 nr;
354 } interrupt;
355
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800356 int halt_request; /* real mode on Intel only */
357
358 int cpuid_nent;
Dan Kenigsberg07716712007-11-21 17:10:04 +0200359 struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800360 /* emulate context */
361
362 struct x86_emulate_ctxt emulate_ctxt;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200363
364 gpa_t time;
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200365 struct pvclock_vcpu_time_info hv_clock;
Zachary Amsdene48672f2010-08-19 22:07:23 -1000366 unsigned int hw_tsc_khz;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200367 unsigned int time_offset;
368 struct page *time_page;
Zachary Amsdene48672f2010-08-19 22:07:23 -1000369 u64 last_host_tsc;
Zachary Amsden1d5f0662010-08-19 22:07:30 -1000370 u64 last_guest_tsc;
371 u64 last_kernel_ns;
Sheng Yang3419ffc2008-05-15 09:52:48 +0800372
373 bool nmi_pending;
Avi Kivity668f6122008-07-02 09:28:55 +0300374 bool nmi_injected;
Avi Kivity9ba075a2008-05-26 20:06:35 +0300375
Sheng Yang0bed3b52008-10-09 16:01:54 +0800376 struct mtrr_state_type mtrr_state;
377 u32 pat;
Jan Kiszka42dbaa52008-12-15 13:52:10 +0100378
379 int switch_db_regs;
Jan Kiszka42dbaa52008-12-15 13:52:10 +0100380 unsigned long db[KVM_NR_DB_REGS];
381 unsigned long dr6;
382 unsigned long dr7;
383 unsigned long eff_db[KVM_NR_DB_REGS];
Huang Ying890ca9a2009-05-11 16:48:15 +0800384
385 u64 mcg_cap;
386 u64 mcg_status;
387 u64 mcg_ctl;
388 u64 *mce_banks;
Jan Kiszka94fe45d2009-10-18 13:24:44 +0200389
390 /* used for guest single stepping over the given code position */
Jan Kiszka94fe45d2009-10-18 13:24:44 +0200391 unsigned long singlestep_rip;
Jan Kiszkaf92653e2010-02-23 17:47:55 +0100392
Gleb Natapov10388a02010-01-17 15:51:23 +0200393 /* fields used by HYPER-V emulation */
394 u64 hv_vapic;
Sheng Yangf5f48ee2010-06-30 12:25:15 +0800395
396 cpumask_var_t wbinvd_dirty_mask;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800397};
398
Marcelo Tosattifef9cce2009-12-23 14:35:17 -0200399struct kvm_arch {
Dave Hansen49d5ca22010-08-19 18:11:28 -0700400 unsigned int n_used_mmu_pages;
Zhang Xiantaof05e70a2007-12-14 10:01:48 +0800401 unsigned int n_requested_mmu_pages;
Dave Hansen39de71e2010-08-19 18:11:14 -0700402 unsigned int n_max_mmu_pages;
Avi Kivity08e850c2010-03-15 13:59:57 +0200403 atomic_t invlpg_counter;
Zhang Xiantaof05e70a2007-12-14 10:01:48 +0800404 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
405 /*
406 * Hash table of struct kvm_mmu_page.
407 */
408 struct list_head active_mmu_pages;
Ben-Ami Yassour4d5c5d02008-07-28 19:26:26 +0300409 struct list_head assigned_dev_head;
Joerg Roedel19de40a2008-12-03 14:43:34 +0100410 struct iommu_domain *iommu_domain;
Sheng Yang522c68c2009-04-27 20:35:43 +0800411 int iommu_flags;
Zhang Xiantaod7deeeb02007-12-14 10:17:34 +0800412 struct kvm_pic *vpic;
413 struct kvm_ioapic *vioapic;
Sheng Yang78376992008-01-28 05:10:22 +0800414 struct kvm_pit *vpit;
Jan Kiszkacc6e4622008-10-20 10:20:03 +0200415 int vapics_in_nmi_mode;
Zhang Xiantaobfc6d222007-12-14 10:20:16 +0800416
Zhang Xiantaobfc6d222007-12-14 10:20:16 +0800417 unsigned int tss_addr;
418 struct page *apic_access_page;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200419
420 gpa_t wall_clock;
Sheng Yangb7ebfb02008-04-25 21:44:52 +0800421
422 struct page *ept_identity_pagetable;
423 bool ept_identity_pagetable_done;
Sheng Yangb927a3c2009-07-21 10:42:48 +0800424 gpa_t ept_identity_map_addr;
Sheng Yang5550af42008-10-15 20:15:06 +0800425
426 unsigned long irq_sources_bitmap;
Glauber Costaafbcf7a2009-10-16 15:28:36 -0400427 s64 kvmclock_offset;
Zachary Amsden99e3e302010-08-19 22:07:17 -1000428 spinlock_t tsc_write_lock;
Zachary Amsdenf38e0982010-08-19 22:07:20 -1000429 u64 last_tsc_nsec;
430 u64 last_tsc_offset;
431 u64 last_tsc_write;
Ed Swierkffde22a2009-10-15 15:21:43 -0700432
433 struct kvm_xen_hvm_config xen_hvm_config;
Gleb Natapov55cd8e52010-01-17 15:51:22 +0200434
435 /* fields used by HYPER-V emulation */
436 u64 hv_guest_os_id;
437 u64 hv_hypercall;
Zhang Xiantaod69fb812007-12-14 09:54:20 +0800438};
439
Zhang Xiantao07114562007-12-14 10:23:23 +0800440struct kvm_vm_stat {
441 u32 mmu_shadow_zapped;
442 u32 mmu_pte_write;
443 u32 mmu_pte_updated;
444 u32 mmu_pde_zapped;
445 u32 mmu_flooded;
446 u32 mmu_recycled;
Avi Kivitydfc5aa02007-12-18 19:47:18 +0200447 u32 mmu_cache_miss;
Marcelo Tosatti4731d4c2008-09-23 13:18:39 -0300448 u32 mmu_unsync;
Zhang Xiantao07114562007-12-14 10:23:23 +0800449 u32 remote_tlb_flush;
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300450 u32 lpages;
Zhang Xiantao07114562007-12-14 10:23:23 +0800451};
452
Zhang Xiantao77b4c252007-12-14 09:49:26 +0800453struct kvm_vcpu_stat {
454 u32 pf_fixed;
455 u32 pf_guest;
456 u32 tlb_flush;
457 u32 invlpg;
458
459 u32 exits;
460 u32 io_exits;
461 u32 mmio_exits;
462 u32 signal_exits;
463 u32 irq_window_exits;
Sheng Yangf08864b2008-05-15 18:23:25 +0800464 u32 nmi_window_exits;
Zhang Xiantao77b4c252007-12-14 09:49:26 +0800465 u32 halt_exits;
466 u32 halt_wakeup;
467 u32 request_irq_exits;
468 u32 irq_exits;
469 u32 host_state_reload;
470 u32 efer_reload;
471 u32 fpu_reload;
472 u32 insn_emulation;
473 u32 insn_emulation_fail;
Amit Shahf11c3a82008-02-21 01:00:30 +0530474 u32 hypercalls;
Avi Kivityfa89a812008-09-01 15:57:51 +0300475 u32 irq_injections;
Jan Kiszkac4abb7c2008-09-26 09:30:55 +0200476 u32 nmi_injections;
Zhang Xiantao77b4c252007-12-14 09:49:26 +0800477};
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800478
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800479struct kvm_x86_ops {
480 int (*cpu_has_kvm_support)(void); /* __init */
481 int (*disabled_by_bios)(void); /* __init */
Alexander Graf10474ae2009-09-15 11:37:46 +0200482 int (*hardware_enable)(void *dummy);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800483 void (*hardware_disable)(void *dummy);
484 void (*check_processor_compatibility)(void *rtn);
485 int (*hardware_setup)(void); /* __init */
486 void (*hardware_unsetup)(void); /* __exit */
Avi Kivity774ead32007-12-26 13:57:04 +0200487 bool (*cpu_has_accelerated_tpr)(void);
Sheng Yang0e851882009-12-18 16:48:46 +0800488 void (*cpuid_update)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800489
490 /* Create, but do not attach this VCPU */
491 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
492 void (*vcpu_free)(struct kvm_vcpu *vcpu);
493 int (*vcpu_reset)(struct kvm_vcpu *vcpu);
494
495 void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
496 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
497 void (*vcpu_put)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800498
Jan Kiszka355be0b2009-10-03 00:31:21 +0200499 void (*set_guest_debug)(struct kvm_vcpu *vcpu,
500 struct kvm_guest_debug *dbg);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800501 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
502 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
503 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
504 void (*get_segment)(struct kvm_vcpu *vcpu,
505 struct kvm_segment *var, int seg);
Izik Eidus2e4d2652008-03-24 19:38:34 +0200506 int (*get_cpl)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800507 void (*set_segment)(struct kvm_vcpu *vcpu,
508 struct kvm_segment *var, int seg);
509 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
Avi Kivitye8467fd2009-12-29 18:43:06 +0200510 void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800511 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
512 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
513 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
514 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
515 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
Gleb Natapov89a27f42010-02-16 10:51:48 +0200516 void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
517 void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
518 void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
519 void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
Gleb Natapov020df072010-04-13 10:05:23 +0300520 void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300521 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800522 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
523 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
Avi Kivity6b52d182010-01-21 15:31:47 +0200524 void (*fpu_activate)(struct kvm_vcpu *vcpu);
Avi Kivity02daab22009-12-30 12:40:26 +0200525 void (*fpu_deactivate)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800526
527 void (*tlb_flush)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800528
Avi Kivity851ba692009-08-24 11:10:17 +0300529 void (*run)(struct kvm_vcpu *vcpu);
530 int (*handle_exit)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800531 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
Glauber Costa2809f5d2009-05-12 16:21:05 -0400532 void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
533 u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800534 void (*patch_hypercall)(struct kvm_vcpu *vcpu,
535 unsigned char *hypercall_addr);
Gleb Natapov66fd3f72009-05-11 13:35:50 +0300536 void (*set_irq)(struct kvm_vcpu *vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +0300537 void (*set_nmi)(struct kvm_vcpu *vcpu);
Avi Kivity298101d2007-11-25 13:41:11 +0200538 void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200539 bool has_error_code, u32 error_code,
540 bool reinject);
Gleb Natapov78646122009-03-23 12:12:11 +0200541 int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +0300542 int (*nmi_allowed)(struct kvm_vcpu *vcpu);
Jan Kiszka3cfc3092009-11-12 01:04:25 +0100543 bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
544 void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
Gleb Natapov95ba8273132009-04-21 17:45:08 +0300545 void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
546 void (*enable_irq_window)(struct kvm_vcpu *vcpu);
547 void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800548 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
Sheng Yang67253af2008-04-25 10:20:22 +0800549 int (*get_tdp_level)(void);
Sheng Yang4b12f0d2009-04-27 20:35:42 +0800550 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
Sheng Yang17cc3932010-01-05 19:02:27 +0800551 int (*get_lpage_level)(void);
Sheng Yang4e47c7a2009-12-18 16:48:47 +0800552 bool (*rdtscp_supported)(void);
Zachary Amsdene48672f2010-08-19 22:07:23 -1000553 void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment);
Joerg Roedel344f4142009-07-27 16:30:48 +0200554
Joerg Roedel1c97f0a2010-09-10 17:30:41 +0200555 void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
556
Joerg Roedeld4330ef2010-04-22 12:33:11 +0200557 void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
558
Sheng Yangf5f48ee2010-06-30 12:25:15 +0800559 bool (*has_wbinvd_exit)(void);
560
Zachary Amsden99e3e302010-08-19 22:07:17 -1000561 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
562
Marcelo Tosatti229456f2009-06-17 09:22:14 -0300563 const struct trace_print_flags *exit_reasons_str;
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800564};
565
Zhang Xiantao97896d02007-11-14 20:09:30 +0800566extern struct kvm_x86_ops *kvm_x86_ops;
567
Zhang Xiantao54f15852007-11-19 15:24:28 +0800568int kvm_mmu_module_init(void);
569void kvm_mmu_module_exit(void);
570
571void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
572int kvm_mmu_create(struct kvm_vcpu *vcpu);
573int kvm_mmu_setup(struct kvm_vcpu *vcpu);
574void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
Sheng Yang7b523452008-04-25 21:13:50 +0800575void kvm_mmu_set_base_ptes(u64 base_pte);
576void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
Sheng Yang4b12f0d2009-04-27 20:35:42 +0800577 u64 dirty_mask, u64 nx_mask, u64 x_mask);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800578
579int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
580void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
581void kvm_mmu_zap_all(struct kvm *kvm);
Zhang Xiantao3ad82a72007-11-20 13:11:38 +0800582unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800583void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
584
Joerg Roedelcc4b6872008-02-07 13:47:43 +0100585int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
586
Marcelo Tosatti3200f402008-03-29 20:17:59 -0300587int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
Avi Kivity9f811282008-03-02 14:06:05 +0200588 const void *val, int bytes);
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -0500589int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
590 gpa_t addr, unsigned long *ret);
Sheng Yang4b12f0d2009-04-27 20:35:42 +0800591u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -0500592
593extern bool tdp_enabled;
Avi Kivity9f811282008-03-02 14:06:05 +0200594
Zhang Xiantao54f15852007-11-19 15:24:28 +0800595enum emulation_result {
596 EMULATE_DONE, /* no further processing */
597 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
598 EMULATE_FAIL, /* can't emulate this instruction */
599};
600
Sheng Yang571008d2008-01-02 14:49:22 +0800601#define EMULTYPE_NO_DECODE (1 << 0)
602#define EMULTYPE_TRAP_UD (1 << 1)
Gleb Natapovba8afb62009-04-12 13:36:57 +0300603#define EMULTYPE_SKIP (1 << 2)
Avi Kivity851ba692009-08-24 11:10:17 +0300604int emulate_instruction(struct kvm_vcpu *vcpu,
Sheng Yang571008d2008-01-02 14:49:22 +0800605 unsigned long cr2, u16 error_code, int emulation_type);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800606void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
607void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800608
Joerg Roedelf2b4b7d2008-01-31 14:57:37 +0100609void kvm_enable_efer_bits(u64);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800610int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
611int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
612
613struct x86_emulate_ctxt;
614
Gleb Natapovcf8f70b2010-03-18 15:20:23 +0200615int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800616void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
617int kvm_emulate_halt(struct kvm_vcpu *vcpu);
618int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
619int emulate_clts(struct kvm_vcpu *vcpu);
Sheng Yangf5f48ee2010-06-30 12:25:15 +0800620int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800621
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +0200622void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
Gleb Natapovc6975182010-02-18 12:15:01 +0200623int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +0200624
Jan Kiszkae269fb22010-04-14 15:51:09 +0200625int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
626 bool has_error_code, u32 error_code);
Izik Eidus37817f22008-03-24 23:14:53 +0200627
Avi Kivity49a9b072010-06-10 17:02:14 +0300628int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
Avi Kivity23902182010-06-10 17:02:16 +0300629int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
Avi Kivitya83b29c2010-06-10 17:02:15 +0300630int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
Joerg Roedel9c204562008-04-01 16:44:56 +0200631void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
Gleb Natapov020df072010-04-13 10:05:23 +0300632int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
633int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200634unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
635void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800636void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
Dexuan Cui2acf9232010-06-10 11:27:12 +0800637int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800638
639int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
640int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
641
Jan Kiszka91586a32009-10-05 13:07:21 +0200642unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
643void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
644
Avi Kivity298101d2007-11-25 13:41:11 +0200645void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
646void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200647void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
648void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
Joerg Roedel8df25a32010-09-10 17:30:46 +0200649void kvm_inject_page_fault(struct kvm_vcpu *vcpu);
Avi Kivity0a79b002009-09-01 12:03:25 +0300650bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
Avi Kivity298101d2007-11-25 13:41:11 +0200651
Gleb Natapov49256632009-02-04 17:28:14 +0200652int kvm_pic_set_irq(void *opaque, int irq, int level);
Xiantao Zhang3de42dc2008-10-06 13:48:45 +0800653
Sheng Yang3419ffc2008-05-15 09:52:48 +0800654void kvm_inject_nmi(struct kvm_vcpu *vcpu);
655
Jan Kiszka10ab25c2010-05-25 16:01:50 +0200656int fx_init(struct kvm_vcpu *vcpu);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800657
Avi Kivityd835dfe2007-11-21 02:57:59 +0200658void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800659void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
Marcelo Tosattiad218f82008-12-01 22:32:05 -0200660 const u8 *new, int bytes,
661 bool guest_initiated);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800662int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
663void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
664int kvm_mmu_load(struct kvm_vcpu *vcpu);
665void kvm_mmu_unload(struct kvm_vcpu *vcpu);
Marcelo Tosatti0ba73cd2008-09-23 13:18:34 -0300666void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
Gleb Natapov1871c602010-02-10 14:21:32 +0200667gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
668gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
669gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
670gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800671
672int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
673
674int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
675
Avi Kivity30677142007-10-28 18:48:59 +0200676int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
Marcelo Tosattia7052892008-09-23 13:18:35 -0300677void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800678
Joerg Roedel18552672008-02-07 13:47:41 +0100679void kvm_enable_tdp(void);
Joerg Roedel5f4cb662008-07-14 20:36:36 +0200680void kvm_disable_tdp(void);
Joerg Roedel18552672008-02-07 13:47:41 +0100681
Carsten Ottede7d7892007-10-30 18:44:25 +0100682int complete_pio(struct kvm_vcpu *vcpu);
Gleb Natapovf850e2e2010-02-10 14:21:33 +0200683bool kvm_check_iopl(struct kvm_vcpu *vcpu);
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800684
685static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
686{
687 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
688
689 return (struct kvm_mmu_page *)page_private(page);
690}
691
Avi Kivityd6e88ae2008-07-10 16:53:33 +0300692static inline u16 kvm_read_ldt(void)
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800693{
694 u16 ldt;
695 asm("sldt %0" : "=g"(ldt));
696 return ldt;
697}
698
Avi Kivityd6e88ae2008-07-10 16:53:33 +0300699static inline void kvm_load_ldt(u16 sel)
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800700{
701 asm("lldt %0" : : "rm"(sel));
702}
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800703
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800704#ifdef CONFIG_X86_64
705static inline unsigned long read_msr(unsigned long msr)
706{
707 u64 value;
708
709 rdmsrl(msr, value);
710 return value;
711}
712#endif
713
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800714static inline u32 get_rdx_init_val(void)
715{
716 return 0x600; /* P6 family */
717}
718
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200719static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
720{
721 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
722}
723
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800724#define TSS_IOPB_BASE_OFFSET 0x66
725#define TSS_BASE_SIZE 0x68
726#define TSS_IOPB_SIZE (65536 / 8)
727#define TSS_REDIRECTION_SIZE (256 / 8)
Joe Perches7d76b4d2008-03-23 01:02:34 -0700728#define RMODE_TSS_SIZE \
729 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
Hollis Blanchard53e0aa72007-12-03 16:15:26 -0600730
Izik Eidus37817f22008-03-24 23:14:53 +0200731enum {
732 TASK_SWITCH_CALL = 0,
733 TASK_SWITCH_IRET = 1,
734 TASK_SWITCH_JMP = 2,
735 TASK_SWITCH_GATE = 3,
736};
737
Alexander Graf1371d902008-11-25 20:17:04 +0100738#define HF_GIF_MASK (1 << 0)
Alexander Graf3d6368e2008-11-25 20:17:07 +0100739#define HF_HIF_MASK (1 << 1)
740#define HF_VINTR_MASK (1 << 2)
Gleb Natapov95ba8273132009-04-21 17:45:08 +0300741#define HF_NMI_MASK (1 << 3)
Gleb Natapov44c11432009-05-11 13:35:52 +0300742#define HF_IRET_MASK (1 << 4)
Alexander Graf1371d902008-11-25 20:17:04 +0100743
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300744/*
745 * Hardware virtualization extension instructions may fault if a
746 * reboot turns off virtualization while processes are running.
747 * Trap the fault and ignore the instruction if that happens.
748 */
749asmlinkage void kvm_handle_fault_on_reboot(void);
750
751#define __kvm_handle_fault_on_reboot(insn) \
752 "666: " insn "\n\t" \
Eduardo Habkost18b13e52008-08-19 20:00:08 -0300753 ".pushsection .fixup, \"ax\" \n" \
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300754 "667: \n\t" \
Avi Kivity8ceed3472008-08-14 21:25:47 +0300755 __ASM_SIZE(push) " $666b \n\t" \
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300756 "jmp kvm_handle_fault_on_reboot \n\t" \
757 ".popsection \n\t" \
758 ".pushsection __ex_table, \"a\" \n\t" \
Avi Kivity8ceed3472008-08-14 21:25:47 +0300759 _ASM_PTR " 666b, 667b \n\t" \
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300760 ".popsection"
761
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200762#define KVM_ARCH_WANT_MMU_NOTIFIER
763int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
764int kvm_age_hva(struct kvm *kvm, unsigned long hva);
Izik Eidus3da0dd42009-09-23 21:47:18 +0300765void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
Dong, Eddie82725b22009-03-30 16:21:08 +0800766int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
Gleb Natapova1b37102009-07-09 15:33:52 +0300767int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
768int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
Gleb Natapov0b717852009-07-09 15:33:53 +0300769int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200770
Avi Kivity18863bd2009-09-07 11:12:18 +0300771void kvm_define_shared_msr(unsigned index, u32 msr);
Avi Kivityd5696722009-12-02 12:28:47 +0200772void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
Avi Kivity18863bd2009-09-07 11:12:18 +0300773
Jan Kiszkaf92653e2010-02-23 17:47:55 +0100774bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
775
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700776#endif /* _ASM_X86_KVM_HOST_H */