blob: 35f81b1102606dcfa971cdf465f65b1e5c9400bd [file] [log] [blame]
Vegard Nossuma656c8e2008-07-22 21:27:11 +02001/*
Carsten Otte043405e2007-10-10 17:16:19 +02002 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This header defines architecture specific interfaces, x86 version
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2. See
7 * the COPYING file in the top-level directory.
8 *
9 */
10
H. Peter Anvin1965aae2008-10-22 22:26:29 -070011#ifndef _ASM_X86_KVM_HOST_H
12#define _ASM_X86_KVM_HOST_H
Carsten Otte043405e2007-10-10 17:16:19 +020013
Zhang Xiantao34c16ee2007-10-20 15:34:38 +080014#include <linux/types.h>
15#include <linux/mm.h>
Andrea Arcangelie930bff2008-07-25 16:24:52 +020016#include <linux/mmu_notifier.h>
Marcelo Tosatti229456f2009-06-17 09:22:14 -030017#include <linux/tracepoint.h>
Sheng Yangf5f48ee2010-06-30 12:25:15 +080018#include <linux/cpumask.h>
Zhang Xiantao34c16ee2007-10-20 15:34:38 +080019
20#include <linux/kvm.h>
21#include <linux/kvm_para.h>
Avi Kivityedf88412007-12-16 11:02:48 +020022#include <linux/kvm_types.h>
Zhang Xiantao34c16ee2007-10-20 15:34:38 +080023
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +020024#include <asm/pvclock-abi.h>
Hollis Blancharde01a1b52007-12-03 15:30:25 -060025#include <asm/desc.h>
Sheng Yang0bed3b52008-10-09 16:01:54 +080026#include <asm/mtrr.h>
Alexander Graf9962d032008-11-25 20:17:02 +010027#include <asm/msr-index.h>
Hollis Blancharde01a1b52007-12-03 15:30:25 -060028
Avi Kivity0680fe52009-12-27 17:00:46 +020029#define KVM_MAX_VCPUS 64
Avi Kivity69a9f692008-03-21 12:38:23 +020030#define KVM_MEMORY_SLOTS 32
31/* memory slots that does not exposed to userspace */
32#define KVM_PRIVATE_MEM_SLOTS 4
33
34#define KVM_PIO_PAGE_OFFSET 1
Laurent Vivier542472b2008-05-30 16:05:55 +020035#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
Avi Kivity69a9f692008-03-21 12:38:23 +020036
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080037#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
38#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
Joe Perches7d76b4d2008-03-23 01:02:34 -070039#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
40 0xFFFFFF0000000000ULL)
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080041
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080042#define INVALID_PAGE (~(hpa_t)0)
Xiao Guangrongdd180b32010-07-03 16:02:42 +080043#define VALID_PAGE(x) ((x) != INVALID_PAGE)
44
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080045#define UNMAPPED_GVA (~(gpa_t)0)
46
Joerg Roedelec04b262009-06-19 15:16:23 +020047/* KVM Hugepage definitions for x86 */
Joerg Roedel04326ca2009-07-27 16:30:47 +020048#define KVM_NR_PAGE_SIZES 3
Joerg Roedel82855412010-07-01 16:00:11 +020049#define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9)
50#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
Joerg Roedelec04b262009-06-19 15:16:23 +020051#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
52#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
53#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
Marcelo Tosatti05da4552008-02-23 11:44:30 -030054
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080055#define DE_VECTOR 0
Jan Kiszka19bd8af2008-07-13 13:40:55 +020056#define DB_VECTOR 1
Jan Kiszka77ab6db2008-07-14 12:28:51 +020057#define BP_VECTOR 3
58#define OF_VECTOR 4
59#define BR_VECTOR 5
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080060#define UD_VECTOR 6
61#define NM_VECTOR 7
62#define DF_VECTOR 8
63#define TS_VECTOR 10
64#define NP_VECTOR 11
65#define SS_VECTOR 12
66#define GP_VECTOR 13
67#define PF_VECTOR 14
Jan Kiszka77ab6db2008-07-14 12:28:51 +020068#define MF_VECTOR 16
Joerg Roedel53371b52008-04-09 14:15:30 +020069#define MC_VECTOR 18
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080070
71#define SELECTOR_TI_MASK (1 << 2)
72#define SELECTOR_RPL_MASK 0x03
73
74#define IOPL_SHIFT 12
75
Zhang Xiantaod657a982007-12-14 09:41:22 +080076#define KVM_PERMILLE_MMU_PAGES 20
77#define KVM_MIN_ALLOC_MMU_PAGES 64
Dong, Eddie1ae0a132008-01-07 13:20:25 +020078#define KVM_MMU_HASH_SHIFT 10
79#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
Zhang Xiantaod657a982007-12-14 09:41:22 +080080#define KVM_MIN_FREE_MMU_PAGES 5
81#define KVM_REFILL_PAGES 25
Andre Przywara73c11602010-12-01 12:17:44 +010082#define KVM_MAX_CPUID_ENTRIES 80
Sheng Yang0bed3b52008-10-09 16:01:54 +080083#define KVM_NR_FIXED_MTRR_REGION 88
Avi Kivity9ba075a2008-05-26 20:06:35 +030084#define KVM_NR_VAR_MTRR 8
Zhang Xiantaod657a982007-12-14 09:41:22 +080085
Gleb Natapovaf585b92010-10-14 11:22:46 +020086#define ASYNC_PF_PER_VCPU 64
87
Jan Kiszkae935b832011-02-08 12:55:33 +010088extern raw_spinlock_t kvm_lock;
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +080089extern struct list_head vm_list;
90
Zhang Xiantaod657a982007-12-14 09:41:22 +080091struct kvm_vcpu;
92struct kvm;
Gleb Natapovaf585b92010-10-14 11:22:46 +020093struct kvm_async_pf;
Zhang Xiantaod657a982007-12-14 09:41:22 +080094
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030095enum kvm_reg {
Zhang Xiantao2b3ccfa2007-11-19 14:56:05 +080096 VCPU_REGS_RAX = 0,
97 VCPU_REGS_RCX = 1,
98 VCPU_REGS_RDX = 2,
99 VCPU_REGS_RBX = 3,
100 VCPU_REGS_RSP = 4,
101 VCPU_REGS_RBP = 5,
102 VCPU_REGS_RSI = 6,
103 VCPU_REGS_RDI = 7,
104#ifdef CONFIG_X86_64
105 VCPU_REGS_R8 = 8,
106 VCPU_REGS_R9 = 9,
107 VCPU_REGS_R10 = 10,
108 VCPU_REGS_R11 = 11,
109 VCPU_REGS_R12 = 12,
110 VCPU_REGS_R13 = 13,
111 VCPU_REGS_R14 = 14,
112 VCPU_REGS_R15 = 15,
113#endif
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300114 VCPU_REGS_RIP,
Zhang Xiantao2b3ccfa2007-11-19 14:56:05 +0800115 NR_VCPU_REGS
116};
117
Avi Kivity6de4f3a2009-05-31 22:58:47 +0300118enum kvm_reg_ex {
119 VCPU_EXREG_PDPTR = NR_VCPU_REGS,
Avi Kivityaff48ba2010-12-05 18:56:11 +0200120 VCPU_EXREG_CR3,
Avi Kivity6de12732011-03-07 12:51:22 +0200121 VCPU_EXREG_RFLAGS,
Avi Kivity69c73022011-03-07 15:26:44 +0200122 VCPU_EXREG_CPL,
Avi Kivity6de4f3a2009-05-31 22:58:47 +0300123};
124
Zhang Xiantao2b3ccfa2007-11-19 14:56:05 +0800125enum {
Zhang Xiantao2b3ccfa2007-11-19 14:56:05 +0800126 VCPU_SREG_ES,
Avi Kivity81609e32008-05-27 16:26:01 +0300127 VCPU_SREG_CS,
128 VCPU_SREG_SS,
129 VCPU_SREG_DS,
Zhang Xiantao2b3ccfa2007-11-19 14:56:05 +0800130 VCPU_SREG_FS,
131 VCPU_SREG_GS,
Zhang Xiantao2b3ccfa2007-11-19 14:56:05 +0800132 VCPU_SREG_TR,
133 VCPU_SREG_LDTR,
134};
135
Avi Kivity56e82312009-08-12 15:04:37 +0300136#include <asm/kvm_emulate.h>
Zhang Xiantao2b3ccfa2007-11-19 14:56:05 +0800137
Zhang Xiantaod657a982007-12-14 09:41:22 +0800138#define KVM_NR_MEM_OBJS 40
139
Jan Kiszka42dbaa52008-12-15 13:52:10 +0100140#define KVM_NR_DB_REGS 4
141
142#define DR6_BD (1 << 13)
143#define DR6_BS (1 << 14)
144#define DR6_FIXED_1 0xffff0ff0
145#define DR6_VOLATILE 0x0000e00f
146
147#define DR7_BP_EN_MASK 0x000000ff
148#define DR7_GE (1 << 9)
149#define DR7_GD (1 << 13)
150#define DR7_FIXED_1 0x00000400
151#define DR7_VOLATILE 0xffff23ff
152
Zhang Xiantaod657a982007-12-14 09:41:22 +0800153/*
154 * We don't want allocation failures within the mmu code, so we preallocate
155 * enough memory for a single page fault in a cache.
156 */
157struct kvm_mmu_memory_cache {
158 int nobjs;
159 void *objects[KVM_NR_MEM_OBJS];
160};
161
162#define NR_PTE_CHAIN_ENTRIES 5
163
164struct kvm_pte_chain {
165 u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
166 struct hlist_node link;
167};
168
169/*
170 * kvm_mmu_page_role, below, is defined as:
171 *
172 * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
173 * bits 4:7 - page table level for this shadow (1-4)
174 * bits 8:9 - page table quadrant for 2-level guests
Avi Kivityf6e2c02b2009-01-11 13:02:10 +0200175 * bit 16 - direct mapping of virtual to physical mapping at gfn
176 * used for real mode and two-dimensional paging
Zhang Xiantaod657a982007-12-14 09:41:22 +0800177 * bits 17:19 - common access permissions for all ptes in this shadow page
178 */
179union kvm_mmu_page_role {
180 unsigned word;
181 struct {
Joe Perches7d76b4d2008-03-23 01:02:34 -0700182 unsigned level:4;
Avi Kivity5b7e0102010-04-14 19:20:03 +0300183 unsigned cr4_pae:1;
Joe Perches7d76b4d2008-03-23 01:02:34 -0700184 unsigned quadrant:2;
185 unsigned pad_for_nice_hex_output:6;
Avi Kivityf6e2c02b2009-01-11 13:02:10 +0200186 unsigned direct:1;
Joe Perches7d76b4d2008-03-23 01:02:34 -0700187 unsigned access:3;
Marcelo Tosatti2e53d632008-02-20 14:47:24 -0500188 unsigned invalid:1;
Avi Kivity9645bb562009-03-31 11:31:54 +0300189 unsigned nxe:1;
Avi Kivity3dbe1412010-05-12 11:48:18 +0300190 unsigned cr0_wp:1;
Zhang Xiantaod657a982007-12-14 09:41:22 +0800191 };
192};
193
194struct kvm_mmu_page {
195 struct list_head link;
196 struct hlist_node hash_link;
197
198 /*
199 * The following two entries are used to key the shadow page in the
200 * hash table.
201 */
202 gfn_t gfn;
203 union kvm_mmu_page_role role;
204
205 u64 *spt;
206 /* hold the gfn of each spte inside spt */
207 gfn_t *gfns;
Sheng Yang291f26b2008-10-16 17:30:57 +0800208 /*
209 * One bit set per slot which has memory
210 * in this shadow page.
211 */
212 DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
Xiao Guangrong0571d362010-04-16 21:27:54 +0800213 bool multimapped; /* More than one parent_pte? */
Marcelo Tosatti4731d4c2008-09-23 13:18:39 -0300214 bool unsync;
Xiao Guangrong0571d362010-04-16 21:27:54 +0800215 int root_count; /* Currently serving as active root */
Marcelo Tosatti60c8aec2008-12-01 22:32:02 -0200216 unsigned int unsync_children;
Zhang Xiantaod657a982007-12-14 09:41:22 +0800217 union {
218 u64 *parent_pte; /* !multimapped */
219 struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
220 };
Marcelo Tosatti0074ff62008-09-23 13:18:40 -0300221 DECLARE_BITMAP(unsync_child_bitmap, 512);
Zhang Xiantaod657a982007-12-14 09:41:22 +0800222};
223
Dave Hansen6ad18fb2008-08-11 10:01:49 -0700224struct kvm_pv_mmu_op_buffer {
225 void *ptr;
226 unsigned len;
227 unsigned processed;
228 char buf[512] __aligned(sizeof(long));
229};
230
Avi Kivity1c083642009-01-04 12:39:07 +0200231struct kvm_pio_request {
232 unsigned long count;
Avi Kivity1c083642009-01-04 12:39:07 +0200233 int in;
234 int port;
235 int size;
Avi Kivity1c083642009-01-04 12:39:07 +0200236};
237
Zhang Xiantaod657a982007-12-14 09:41:22 +0800238/*
239 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
240 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
241 * mode.
242 */
243struct kvm_mmu {
244 void (*new_cr3)(struct kvm_vcpu *vcpu);
Joerg Roedelf43addd2010-09-10 17:30:40 +0200245 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
Joerg Roedel5777ed32010-09-10 17:30:42 +0200246 unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
Xiao Guangrong78b2c542010-12-07 10:48:06 +0800247 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
248 bool prefault);
Avi Kivity6389ee92010-11-29 16:12:30 +0200249 void (*inject_page_fault)(struct kvm_vcpu *vcpu,
250 struct x86_exception *fault);
Zhang Xiantaod657a982007-12-14 09:41:22 +0800251 void (*free)(struct kvm_vcpu *vcpu);
Gleb Natapov1871c602010-02-10 14:21:32 +0200252 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
Avi Kivityab9ae312010-11-22 17:53:26 +0200253 struct x86_exception *exception);
Joerg Roedelc30a3582010-09-10 17:30:48 +0200254 gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
Zhang Xiantaod657a982007-12-14 09:41:22 +0800255 void (*prefetch_page)(struct kvm_vcpu *vcpu,
256 struct kvm_mmu_page *page);
Marcelo Tosattie8bc2172008-09-23 13:18:33 -0300257 int (*sync_page)(struct kvm_vcpu *vcpu,
Xiao Guangronga4a8e6f2010-11-19 17:04:03 +0800258 struct kvm_mmu_page *sp);
Marcelo Tosattia7052892008-09-23 13:18:35 -0300259 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
Xiao Guangrong0f53b5b2011-03-09 15:43:51 +0800260 void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
261 u64 *spte, const void *pte, unsigned long mmu_seq);
Zhang Xiantaod657a982007-12-14 09:41:22 +0800262 hpa_t root_hpa;
263 int root_level;
264 int shadow_root_level;
Avi Kivitya770f6f2008-12-21 19:20:09 +0200265 union kvm_mmu_page_role base_role;
Joerg Roedelc5a78f2b2010-09-10 17:30:39 +0200266 bool direct_map;
Zhang Xiantaod657a982007-12-14 09:41:22 +0800267
268 u64 *pae_root;
Joerg Roedel81407ca2010-09-10 17:31:00 +0200269 u64 *lm_root;
Dong, Eddie82725b22009-03-30 16:21:08 +0800270 u64 rsvd_bits_mask[2][4];
Joerg Roedelff03a072010-09-10 17:30:57 +0200271
Joerg Roedel2d48a982010-09-10 17:31:01 +0200272 bool nx;
273
Joerg Roedelff03a072010-09-10 17:30:57 +0200274 u64 pdptrs[4]; /* pae */
Zhang Xiantaod657a982007-12-14 09:41:22 +0800275};
276
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800277struct kvm_vcpu_arch {
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300278 /*
279 * rip and regs accesses must go through
280 * kvm_{register,rip}_{read,write} functions.
281 */
282 unsigned long regs[NR_VCPU_REGS];
283 u32 regs_avail;
284 u32 regs_dirty;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800285
286 unsigned long cr0;
Avi Kivitye8467fd2009-12-29 18:43:06 +0200287 unsigned long cr0_guest_owned_bits;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800288 unsigned long cr2;
289 unsigned long cr3;
290 unsigned long cr4;
Avi Kivityfc78f512009-12-07 12:16:48 +0200291 unsigned long cr4_guest_owned_bits;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800292 unsigned long cr8;
Alexander Graf1371d902008-11-25 20:17:04 +0100293 u32 hflags;
Avi Kivityf6801df2010-01-21 15:31:50 +0200294 u64 efer;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800295 u64 apic_base;
296 struct kvm_lapic *apic; /* kernel irqchip context */
Gleb Natapove1035712009-03-05 16:34:59 +0200297 int32_t apic_arb_prio;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800298 int mp_state;
299 int sipi_vector;
300 u64 ia32_misc_enable_msr;
Avi Kivityb209749f2007-10-22 16:50:39 +0200301 bool tpr_access_reporting;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800302
Joerg Roedel14dfe852010-09-10 17:30:49 +0200303 /*
304 * Paging state of the vcpu
305 *
306 * If the vcpu runs in guest mode with two level paging this still saves
307 * the paging mode of the l1 guest. This context is always used to
308 * handle faults.
309 */
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800310 struct kvm_mmu mmu;
Joerg Roedel8df25a32010-09-10 17:30:46 +0200311
312 /*
Joerg Roedel6539e732010-09-10 17:30:50 +0200313 * Paging state of an L2 guest (used for nested npt)
314 *
315 * This context will save all necessary information to walk page tables
316 * of the an L2 guest. This context is only initialized for page table
317 * walking and not for faulting since we never handle l2 page faults on
318 * the host.
319 */
320 struct kvm_mmu nested_mmu;
321
322 /*
Joerg Roedel14dfe852010-09-10 17:30:49 +0200323 * Pointer to the mmu context currently used for
324 * gva_to_gpa translations.
325 */
326 struct kvm_mmu *walk_mmu;
327
Dave Hansen6ad18fb2008-08-11 10:01:49 -0700328 /* only needed in kvm_pv_mmu_op() path, but it's hot so
329 * put it here to avoid allocation */
330 struct kvm_pv_mmu_op_buffer mmu_op_buffer;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800331
332 struct kvm_mmu_memory_cache mmu_pte_chain_cache;
333 struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
334 struct kvm_mmu_memory_cache mmu_page_cache;
335 struct kvm_mmu_memory_cache mmu_page_header_cache;
336
337 gfn_t last_pt_write_gfn;
338 int last_pt_write_count;
339 u64 *last_pte_updated;
Avi Kivity1b7fcd32008-05-15 13:51:35 +0300340 gfn_t last_pte_gfn;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800341
Sheng Yang98918832010-05-17 17:08:28 +0800342 struct fpu guest_fpu;
Dexuan Cui2acf9232010-06-10 11:27:12 +0800343 u64 xcr0;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800344
345 gva_t mmio_fault_cr2;
346 struct kvm_pio_request pio;
347 void *pio_data;
348
Gleb Natapov66fd3f72009-05-11 13:35:50 +0300349 u8 event_exit_inst_len;
350
Avi Kivity298101d2007-11-25 13:41:11 +0200351 struct kvm_queued_exception {
352 bool pending;
353 bool has_error_code;
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200354 bool reinject;
Avi Kivity298101d2007-11-25 13:41:11 +0200355 u8 nr;
356 u32 error_code;
357 } exception;
358
Avi Kivity937a7ea2008-07-03 15:17:01 +0300359 struct kvm_queued_interrupt {
360 bool pending;
Gleb Natapov66fd3f72009-05-11 13:35:50 +0300361 bool soft;
Avi Kivity937a7ea2008-07-03 15:17:01 +0300362 u8 nr;
363 } interrupt;
364
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800365 int halt_request; /* real mode on Intel only */
366
367 int cpuid_nent;
Dan Kenigsberg07716712007-11-21 17:10:04 +0200368 struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800369 /* emulate context */
370
371 struct x86_emulate_ctxt emulate_ctxt;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200372
373 gpa_t time;
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200374 struct pvclock_vcpu_time_info hv_clock;
Zachary Amsdene48672f2010-08-19 22:07:23 -1000375 unsigned int hw_tsc_khz;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200376 unsigned int time_offset;
377 struct page *time_page;
Zachary Amsdene48672f2010-08-19 22:07:23 -1000378 u64 last_host_tsc;
Zachary Amsden1d5f0662010-08-19 22:07:30 -1000379 u64 last_guest_tsc;
380 u64 last_kernel_ns;
Zachary Amsdenc2855452010-09-18 14:38:15 -1000381 u64 last_tsc_nsec;
382 u64 last_tsc_write;
383 bool tsc_catchup;
Sheng Yang3419ffc2008-05-15 09:52:48 +0800384
385 bool nmi_pending;
Avi Kivity668f6122008-07-02 09:28:55 +0300386 bool nmi_injected;
Avi Kivity9ba075a2008-05-26 20:06:35 +0300387
Sheng Yang0bed3b52008-10-09 16:01:54 +0800388 struct mtrr_state_type mtrr_state;
389 u32 pat;
Jan Kiszka42dbaa52008-12-15 13:52:10 +0100390
391 int switch_db_regs;
Jan Kiszka42dbaa52008-12-15 13:52:10 +0100392 unsigned long db[KVM_NR_DB_REGS];
393 unsigned long dr6;
394 unsigned long dr7;
395 unsigned long eff_db[KVM_NR_DB_REGS];
Huang Ying890ca9a2009-05-11 16:48:15 +0800396
397 u64 mcg_cap;
398 u64 mcg_status;
399 u64 mcg_ctl;
400 u64 *mce_banks;
Jan Kiszka94fe45d2009-10-18 13:24:44 +0200401
402 /* used for guest single stepping over the given code position */
Jan Kiszka94fe45d2009-10-18 13:24:44 +0200403 unsigned long singlestep_rip;
Jan Kiszkaf92653e2010-02-23 17:47:55 +0100404
Gleb Natapov10388a02010-01-17 15:51:23 +0200405 /* fields used by HYPER-V emulation */
406 u64 hv_vapic;
Sheng Yangf5f48ee2010-06-30 12:25:15 +0800407
408 cpumask_var_t wbinvd_dirty_mask;
Gleb Natapovaf585b92010-10-14 11:22:46 +0200409
410 struct {
411 bool halted;
412 gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
Gleb Natapov344d9582010-10-14 11:22:50 +0200413 struct gfn_to_hva_cache data;
414 u64 msr_val;
Gleb Natapov7c907052010-10-14 11:22:53 +0200415 u32 id;
Gleb Natapov6adba522010-10-14 11:22:55 +0200416 bool send_user_only;
Gleb Natapovaf585b92010-10-14 11:22:46 +0200417 } apf;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800418};
419
Marcelo Tosattifef9cce2009-12-23 14:35:17 -0200420struct kvm_arch {
Dave Hansen49d5ca22010-08-19 18:11:28 -0700421 unsigned int n_used_mmu_pages;
Zhang Xiantaof05e70a2007-12-14 10:01:48 +0800422 unsigned int n_requested_mmu_pages;
Dave Hansen39de71e2010-08-19 18:11:14 -0700423 unsigned int n_max_mmu_pages;
Avi Kivity08e850c2010-03-15 13:59:57 +0200424 atomic_t invlpg_counter;
Zhang Xiantaof05e70a2007-12-14 10:01:48 +0800425 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
426 /*
427 * Hash table of struct kvm_mmu_page.
428 */
429 struct list_head active_mmu_pages;
Ben-Ami Yassour4d5c5d02008-07-28 19:26:26 +0300430 struct list_head assigned_dev_head;
Joerg Roedel19de40a2008-12-03 14:43:34 +0100431 struct iommu_domain *iommu_domain;
Sheng Yang522c68c2009-04-27 20:35:43 +0800432 int iommu_flags;
Zhang Xiantaod7deeeb02007-12-14 10:17:34 +0800433 struct kvm_pic *vpic;
434 struct kvm_ioapic *vioapic;
Sheng Yang78376992008-01-28 05:10:22 +0800435 struct kvm_pit *vpit;
Jan Kiszkacc6e4622008-10-20 10:20:03 +0200436 int vapics_in_nmi_mode;
Zhang Xiantaobfc6d222007-12-14 10:20:16 +0800437
Zhang Xiantaobfc6d222007-12-14 10:20:16 +0800438 unsigned int tss_addr;
439 struct page *apic_access_page;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200440
441 gpa_t wall_clock;
Sheng Yangb7ebfb02008-04-25 21:44:52 +0800442
443 struct page *ept_identity_pagetable;
444 bool ept_identity_pagetable_done;
Sheng Yangb927a3c2009-07-21 10:42:48 +0800445 gpa_t ept_identity_map_addr;
Sheng Yang5550af42008-10-15 20:15:06 +0800446
447 unsigned long irq_sources_bitmap;
Glauber Costaafbcf7a2009-10-16 15:28:36 -0400448 s64 kvmclock_offset;
Jan Kiszka038f8c12011-02-04 10:49:11 +0100449 raw_spinlock_t tsc_write_lock;
Zachary Amsdenf38e0982010-08-19 22:07:20 -1000450 u64 last_tsc_nsec;
451 u64 last_tsc_offset;
452 u64 last_tsc_write;
Zachary Amsdenc2855452010-09-18 14:38:15 -1000453 u32 virtual_tsc_khz;
454 u32 virtual_tsc_mult;
455 s8 virtual_tsc_shift;
Ed Swierkffde22a2009-10-15 15:21:43 -0700456
457 struct kvm_xen_hvm_config xen_hvm_config;
Gleb Natapov55cd8e52010-01-17 15:51:22 +0200458
459 /* fields used by HYPER-V emulation */
460 u64 hv_guest_os_id;
461 u64 hv_hypercall;
Xiao Guangrongb034cf02010-12-23 16:08:35 +0800462
463 #ifdef CONFIG_KVM_MMU_AUDIT
464 int audit_point;
465 #endif
Zhang Xiantaod69fb812007-12-14 09:54:20 +0800466};
467
Zhang Xiantao07114562007-12-14 10:23:23 +0800468struct kvm_vm_stat {
469 u32 mmu_shadow_zapped;
470 u32 mmu_pte_write;
471 u32 mmu_pte_updated;
472 u32 mmu_pde_zapped;
473 u32 mmu_flooded;
474 u32 mmu_recycled;
Avi Kivitydfc5aa02007-12-18 19:47:18 +0200475 u32 mmu_cache_miss;
Marcelo Tosatti4731d4c2008-09-23 13:18:39 -0300476 u32 mmu_unsync;
Zhang Xiantao07114562007-12-14 10:23:23 +0800477 u32 remote_tlb_flush;
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300478 u32 lpages;
Zhang Xiantao07114562007-12-14 10:23:23 +0800479};
480
Zhang Xiantao77b4c252007-12-14 09:49:26 +0800481struct kvm_vcpu_stat {
482 u32 pf_fixed;
483 u32 pf_guest;
484 u32 tlb_flush;
485 u32 invlpg;
486
487 u32 exits;
488 u32 io_exits;
489 u32 mmio_exits;
490 u32 signal_exits;
491 u32 irq_window_exits;
Sheng Yangf08864b2008-05-15 18:23:25 +0800492 u32 nmi_window_exits;
Zhang Xiantao77b4c252007-12-14 09:49:26 +0800493 u32 halt_exits;
494 u32 halt_wakeup;
495 u32 request_irq_exits;
496 u32 irq_exits;
497 u32 host_state_reload;
498 u32 efer_reload;
499 u32 fpu_reload;
500 u32 insn_emulation;
501 u32 insn_emulation_fail;
Amit Shahf11c3a82008-02-21 01:00:30 +0530502 u32 hypercalls;
Avi Kivityfa89a812008-09-01 15:57:51 +0300503 u32 irq_injections;
Jan Kiszkac4abb7c2008-09-26 09:30:55 +0200504 u32 nmi_injections;
Zhang Xiantao77b4c252007-12-14 09:49:26 +0800505};
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800506
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800507struct kvm_x86_ops {
508 int (*cpu_has_kvm_support)(void); /* __init */
509 int (*disabled_by_bios)(void); /* __init */
Alexander Graf10474ae2009-09-15 11:37:46 +0200510 int (*hardware_enable)(void *dummy);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800511 void (*hardware_disable)(void *dummy);
512 void (*check_processor_compatibility)(void *rtn);
513 int (*hardware_setup)(void); /* __init */
514 void (*hardware_unsetup)(void); /* __exit */
Avi Kivity774ead32007-12-26 13:57:04 +0200515 bool (*cpu_has_accelerated_tpr)(void);
Sheng Yang0e851882009-12-18 16:48:46 +0800516 void (*cpuid_update)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800517
518 /* Create, but do not attach this VCPU */
519 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
520 void (*vcpu_free)(struct kvm_vcpu *vcpu);
521 int (*vcpu_reset)(struct kvm_vcpu *vcpu);
522
523 void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
524 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
525 void (*vcpu_put)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800526
Jan Kiszka355be0b2009-10-03 00:31:21 +0200527 void (*set_guest_debug)(struct kvm_vcpu *vcpu,
528 struct kvm_guest_debug *dbg);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800529 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
530 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
531 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
532 void (*get_segment)(struct kvm_vcpu *vcpu,
533 struct kvm_segment *var, int seg);
Izik Eidus2e4d2652008-03-24 19:38:34 +0200534 int (*get_cpl)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800535 void (*set_segment)(struct kvm_vcpu *vcpu,
536 struct kvm_segment *var, int seg);
537 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
Avi Kivitye8467fd2009-12-29 18:43:06 +0200538 void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
Avi Kivityaff48ba2010-12-05 18:56:11 +0200539 void (*decache_cr3)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800540 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
541 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
542 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
543 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
544 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
Gleb Natapov89a27f42010-02-16 10:51:48 +0200545 void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
546 void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
547 void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
548 void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
Gleb Natapov020df072010-04-13 10:05:23 +0300549 void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300550 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800551 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
552 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
Avi Kivity6b52d182010-01-21 15:31:47 +0200553 void (*fpu_activate)(struct kvm_vcpu *vcpu);
Avi Kivity02daab22009-12-30 12:40:26 +0200554 void (*fpu_deactivate)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800555
556 void (*tlb_flush)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800557
Avi Kivity851ba692009-08-24 11:10:17 +0300558 void (*run)(struct kvm_vcpu *vcpu);
559 int (*handle_exit)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800560 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
Glauber Costa2809f5d2009-05-12 16:21:05 -0400561 void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
562 u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800563 void (*patch_hypercall)(struct kvm_vcpu *vcpu,
564 unsigned char *hypercall_addr);
Gleb Natapov66fd3f72009-05-11 13:35:50 +0300565 void (*set_irq)(struct kvm_vcpu *vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +0300566 void (*set_nmi)(struct kvm_vcpu *vcpu);
Avi Kivity298101d2007-11-25 13:41:11 +0200567 void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200568 bool has_error_code, u32 error_code,
569 bool reinject);
Avi Kivityb463a6f2010-07-20 15:06:17 +0300570 void (*cancel_injection)(struct kvm_vcpu *vcpu);
Gleb Natapov78646122009-03-23 12:12:11 +0200571 int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +0300572 int (*nmi_allowed)(struct kvm_vcpu *vcpu);
Jan Kiszka3cfc3092009-11-12 01:04:25 +0100573 bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
574 void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
Gleb Natapov95ba8273132009-04-21 17:45:08 +0300575 void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
576 void (*enable_irq_window)(struct kvm_vcpu *vcpu);
577 void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800578 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
Sheng Yang67253af2008-04-25 10:20:22 +0800579 int (*get_tdp_level)(void);
Sheng Yang4b12f0d2009-04-27 20:35:42 +0800580 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
Sheng Yang17cc3932010-01-05 19:02:27 +0800581 int (*get_lpage_level)(void);
Sheng Yang4e47c7a2009-12-18 16:48:47 +0800582 bool (*rdtscp_supported)(void);
Zachary Amsdene48672f2010-08-19 22:07:23 -1000583 void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment);
Joerg Roedel344f4142009-07-27 16:30:48 +0200584
Joerg Roedel1c97f0a2010-09-10 17:30:41 +0200585 void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
586
Joerg Roedeld4330ef2010-04-22 12:33:11 +0200587 void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
588
Sheng Yangf5f48ee2010-06-30 12:25:15 +0800589 bool (*has_wbinvd_exit)(void);
590
Zachary Amsden99e3e302010-08-19 22:07:17 -1000591 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
592
Avi Kivity586f9602010-11-18 13:09:54 +0200593 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
Marcelo Tosatti229456f2009-06-17 09:22:14 -0300594 const struct trace_print_flags *exit_reasons_str;
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800595};
596
Gleb Natapovaf585b92010-10-14 11:22:46 +0200597struct kvm_arch_async_pf {
Gleb Natapov7c907052010-10-14 11:22:53 +0200598 u32 token;
Gleb Natapovaf585b92010-10-14 11:22:46 +0200599 gfn_t gfn;
Xiao Guangrongfb67e142010-12-07 10:35:25 +0800600 unsigned long cr3;
Xiao Guangrongc4806ac2010-11-12 14:49:55 +0800601 bool direct_map;
Gleb Natapovaf585b92010-10-14 11:22:46 +0200602};
603
Zhang Xiantao97896d02007-11-14 20:09:30 +0800604extern struct kvm_x86_ops *kvm_x86_ops;
605
Zhang Xiantao54f15852007-11-19 15:24:28 +0800606int kvm_mmu_module_init(void);
607void kvm_mmu_module_exit(void);
608
609void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
610int kvm_mmu_create(struct kvm_vcpu *vcpu);
611int kvm_mmu_setup(struct kvm_vcpu *vcpu);
612void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
Sheng Yang7b523452008-04-25 21:13:50 +0800613void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
Sheng Yang4b12f0d2009-04-27 20:35:42 +0800614 u64 dirty_mask, u64 nx_mask, u64 x_mask);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800615
616int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
617void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
618void kvm_mmu_zap_all(struct kvm *kvm);
Zhang Xiantao3ad82a72007-11-20 13:11:38 +0800619unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800620void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
621
Joerg Roedelff03a072010-09-10 17:30:57 +0200622int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
Joerg Roedelcc4b6872008-02-07 13:47:43 +0100623
Marcelo Tosatti3200f402008-03-29 20:17:59 -0300624int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
Avi Kivity9f811282008-03-02 14:06:05 +0200625 const void *val, int bytes);
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -0500626int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
627 gpa_t addr, unsigned long *ret);
Sheng Yang4b12f0d2009-04-27 20:35:42 +0800628u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -0500629
630extern bool tdp_enabled;
Avi Kivity9f811282008-03-02 14:06:05 +0200631
Zhang Xiantao54f15852007-11-19 15:24:28 +0800632enum emulation_result {
633 EMULATE_DONE, /* no further processing */
634 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
635 EMULATE_FAIL, /* can't emulate this instruction */
636};
637
Sheng Yang571008d2008-01-02 14:49:22 +0800638#define EMULTYPE_NO_DECODE (1 << 0)
639#define EMULTYPE_TRAP_UD (1 << 1)
Gleb Natapovba8afb62009-04-12 13:36:57 +0300640#define EMULTYPE_SKIP (1 << 2)
Andre Przywaradc25e892010-12-21 11:12:07 +0100641int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
642 int emulation_type, void *insn, int insn_len);
Andre Przywara51d8b662010-12-21 11:12:02 +0100643
644static inline int emulate_instruction(struct kvm_vcpu *vcpu,
645 int emulation_type)
646{
Andre Przywaradc25e892010-12-21 11:12:07 +0100647 return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
Andre Przywara51d8b662010-12-21 11:12:02 +0100648}
649
Zhang Xiantao54f15852007-11-19 15:24:28 +0800650void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
651void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800652
Joerg Roedelf2b4b7d2008-01-31 14:57:37 +0100653void kvm_enable_efer_bits(u64);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800654int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
655int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
656
657struct x86_emulate_ctxt;
658
Gleb Natapovcf8f70b2010-03-18 15:20:23 +0200659int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800660void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
661int kvm_emulate_halt(struct kvm_vcpu *vcpu);
662int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
663int emulate_clts(struct kvm_vcpu *vcpu);
Sheng Yangf5f48ee2010-06-30 12:25:15 +0800664int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800665
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +0200666void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
Gleb Natapovc6975182010-02-18 12:15:01 +0200667int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +0200668
Jan Kiszkae269fb22010-04-14 15:51:09 +0200669int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
670 bool has_error_code, u32 error_code);
Izik Eidus37817f22008-03-24 23:14:53 +0200671
Avi Kivity49a9b072010-06-10 17:02:14 +0300672int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
Avi Kivity23902182010-06-10 17:02:16 +0300673int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
Avi Kivitya83b29c2010-06-10 17:02:15 +0300674int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
Andre Przywaraeea1cff2010-12-21 11:12:00 +0100675int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
Gleb Natapov020df072010-04-13 10:05:23 +0300676int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
677int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200678unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
679void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800680void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
Dexuan Cui2acf9232010-06-10 11:27:12 +0800681int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800682
683int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
684int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
685
Jan Kiszka91586a32009-10-05 13:07:21 +0200686unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
687void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
688
Avi Kivity298101d2007-11-25 13:41:11 +0200689void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
690void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200691void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
692void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
Avi Kivity6389ee92010-11-29 16:12:30 +0200693void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
Joerg Roedelec92fe42010-09-10 17:30:51 +0200694int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
695 gfn_t gfn, void *data, int offset, int len,
696 u32 access);
Avi Kivity6389ee92010-11-29 16:12:30 +0200697void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
Avi Kivity0a79b002009-09-01 12:03:25 +0300698bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
Avi Kivity298101d2007-11-25 13:41:11 +0200699
Gleb Natapov49256632009-02-04 17:28:14 +0200700int kvm_pic_set_irq(void *opaque, int irq, int level);
Xiantao Zhang3de42dc2008-10-06 13:48:45 +0800701
Sheng Yang3419ffc2008-05-15 09:52:48 +0800702void kvm_inject_nmi(struct kvm_vcpu *vcpu);
703
Jan Kiszka10ab25c2010-05-25 16:01:50 +0200704int fx_init(struct kvm_vcpu *vcpu);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800705
Avi Kivityd835dfe2007-11-21 02:57:59 +0200706void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800707void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
Marcelo Tosattiad218f82008-12-01 22:32:05 -0200708 const u8 *new, int bytes,
709 bool guest_initiated);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800710int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
711void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
712int kvm_mmu_load(struct kvm_vcpu *vcpu);
713void kvm_mmu_unload(struct kvm_vcpu *vcpu);
Marcelo Tosatti0ba73cd2008-09-23 13:18:34 -0300714void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
Avi Kivityab9ae312010-11-22 17:53:26 +0200715gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
716 struct x86_exception *exception);
717gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
718 struct x86_exception *exception);
719gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
720 struct x86_exception *exception);
721gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
722 struct x86_exception *exception);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800723
724int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
725
726int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
727
Andre Przywaradc25e892010-12-21 11:12:07 +0100728int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
729 void *insn, int insn_len);
Marcelo Tosattia7052892008-09-23 13:18:35 -0300730void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800731
Joerg Roedel18552672008-02-07 13:47:41 +0100732void kvm_enable_tdp(void);
Joerg Roedel5f4cb662008-07-14 20:36:36 +0200733void kvm_disable_tdp(void);
Joerg Roedel18552672008-02-07 13:47:41 +0100734
Carsten Ottede7d7892007-10-30 18:44:25 +0100735int complete_pio(struct kvm_vcpu *vcpu);
Gleb Natapovf850e2e2010-02-10 14:21:33 +0200736bool kvm_check_iopl(struct kvm_vcpu *vcpu);
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800737
738static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
739{
740 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
741
742 return (struct kvm_mmu_page *)page_private(page);
743}
744
Avi Kivityd6e88ae2008-07-10 16:53:33 +0300745static inline u16 kvm_read_ldt(void)
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800746{
747 u16 ldt;
748 asm("sldt %0" : "=g"(ldt));
749 return ldt;
750}
751
Avi Kivityd6e88ae2008-07-10 16:53:33 +0300752static inline void kvm_load_ldt(u16 sel)
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800753{
754 asm("lldt %0" : : "rm"(sel));
755}
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800756
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800757#ifdef CONFIG_X86_64
758static inline unsigned long read_msr(unsigned long msr)
759{
760 u64 value;
761
762 rdmsrl(msr, value);
763 return value;
764}
765#endif
766
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800767static inline u32 get_rdx_init_val(void)
768{
769 return 0x600; /* P6 family */
770}
771
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200772static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
773{
774 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
775}
776
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800777#define TSS_IOPB_BASE_OFFSET 0x66
778#define TSS_BASE_SIZE 0x68
779#define TSS_IOPB_SIZE (65536 / 8)
780#define TSS_REDIRECTION_SIZE (256 / 8)
Joe Perches7d76b4d2008-03-23 01:02:34 -0700781#define RMODE_TSS_SIZE \
782 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
Hollis Blanchard53e0aa72007-12-03 16:15:26 -0600783
Izik Eidus37817f22008-03-24 23:14:53 +0200784enum {
785 TASK_SWITCH_CALL = 0,
786 TASK_SWITCH_IRET = 1,
787 TASK_SWITCH_JMP = 2,
788 TASK_SWITCH_GATE = 3,
789};
790
Alexander Graf1371d902008-11-25 20:17:04 +0100791#define HF_GIF_MASK (1 << 0)
Alexander Graf3d6368e2008-11-25 20:17:07 +0100792#define HF_HIF_MASK (1 << 1)
793#define HF_VINTR_MASK (1 << 2)
Gleb Natapov95ba8273132009-04-21 17:45:08 +0300794#define HF_NMI_MASK (1 << 3)
Gleb Natapov44c11432009-05-11 13:35:52 +0300795#define HF_IRET_MASK (1 << 4)
Joerg Roedelec9e60b2010-11-29 17:51:47 +0100796#define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */
Alexander Graf1371d902008-11-25 20:17:04 +0100797
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300798/*
799 * Hardware virtualization extension instructions may fault if a
800 * reboot turns off virtualization while processes are running.
801 * Trap the fault and ignore the instruction if that happens.
802 */
Avi Kivityb7c41452010-12-02 17:52:50 +0200803asmlinkage void kvm_spurious_fault(void);
804extern bool kvm_rebooting;
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300805
806#define __kvm_handle_fault_on_reboot(insn) \
807 "666: " insn "\n\t" \
Avi Kivityb7c41452010-12-02 17:52:50 +0200808 "668: \n\t" \
Eduardo Habkost18b13e52008-08-19 20:00:08 -0300809 ".pushsection .fixup, \"ax\" \n" \
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300810 "667: \n\t" \
Avi Kivityb7c41452010-12-02 17:52:50 +0200811 "cmpb $0, kvm_rebooting \n\t" \
812 "jne 668b \n\t" \
Avi Kivity8ceed3472008-08-14 21:25:47 +0300813 __ASM_SIZE(push) " $666b \n\t" \
Avi Kivityb7c41452010-12-02 17:52:50 +0200814 "call kvm_spurious_fault \n\t" \
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300815 ".popsection \n\t" \
816 ".pushsection __ex_table, \"a\" \n\t" \
Avi Kivity8ceed3472008-08-14 21:25:47 +0300817 _ASM_PTR " 666b, 667b \n\t" \
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300818 ".popsection"
819
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200820#define KVM_ARCH_WANT_MMU_NOTIFIER
821int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
822int kvm_age_hva(struct kvm *kvm, unsigned long hva);
Andrea Arcangeli8ee53822011-01-13 15:47:10 -0800823int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
Izik Eidus3da0dd42009-09-23 21:47:18 +0300824void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
Dong, Eddie82725b22009-03-30 16:21:08 +0800825int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
Gleb Natapova1b37102009-07-09 15:33:52 +0300826int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
827int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
Gleb Natapov0b717852009-07-09 15:33:53 +0300828int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200829
Avi Kivity18863bd2009-09-07 11:12:18 +0300830void kvm_define_shared_msr(unsigned index, u32 msr);
Avi Kivityd5696722009-12-02 12:28:47 +0200831void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
Avi Kivity18863bd2009-09-07 11:12:18 +0300832
Jan Kiszkaf92653e2010-02-23 17:47:55 +0100833bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
834
Gleb Natapovaf585b92010-10-14 11:22:46 +0200835void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
836 struct kvm_async_pf *work);
837void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
838 struct kvm_async_pf *work);
Gleb Natapov56028d02010-10-17 18:13:42 +0200839void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
840 struct kvm_async_pf *work);
Gleb Natapov7c907052010-10-14 11:22:53 +0200841bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
Gleb Natapovaf585b92010-10-14 11:22:46 +0200842extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
843
Andre Przywaradb8fcef2010-12-21 11:12:01 +0100844void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
845
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700846#endif /* _ASM_X86_KVM_HOST_H */