blob: 8c3f74b7352452886767a3b62ef1d86a2a9d1629 [file] [log] [blame]
Carsten Otte043405e2007-10-10 17:16:19 +02001#/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This header defines architecture specific interfaces, x86 version
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2. See
7 * the COPYING file in the top-level directory.
8 *
9 */
10
Avi Kivityedf88412007-12-16 11:02:48 +020011#ifndef ASM_KVM_HOST_H
12#define ASM_KVM_HOST_H
Carsten Otte043405e2007-10-10 17:16:19 +020013
Zhang Xiantao34c16ee2007-10-20 15:34:38 +080014#include <linux/types.h>
15#include <linux/mm.h>
16
17#include <linux/kvm.h>
18#include <linux/kvm_para.h>
Avi Kivityedf88412007-12-16 11:02:48 +020019#include <linux/kvm_types.h>
Zhang Xiantao34c16ee2007-10-20 15:34:38 +080020
Hollis Blancharde01a1b52007-12-03 15:30:25 -060021#include <asm/desc.h>
22
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080023#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
24#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
Joe Perches7d76b4d2008-03-23 01:02:34 -070025#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
26 0xFFFFFF0000000000ULL)
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080027
Joe Perches7d76b4d2008-03-23 01:02:34 -070028#define KVM_GUEST_CR0_MASK \
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080029 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \
30 | X86_CR0_NW | X86_CR0_CD)
Joe Perches7d76b4d2008-03-23 01:02:34 -070031#define KVM_VM_CR0_ALWAYS_ON \
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080032 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \
33 | X86_CR0_MP)
Joe Perches7d76b4d2008-03-23 01:02:34 -070034#define KVM_GUEST_CR4_MASK \
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080035 (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
36#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
37#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
38
39#define INVALID_PAGE (~(hpa_t)0)
40#define UNMAPPED_GVA (~(gpa_t)0)
41
42#define DE_VECTOR 0
43#define UD_VECTOR 6
44#define NM_VECTOR 7
45#define DF_VECTOR 8
46#define TS_VECTOR 10
47#define NP_VECTOR 11
48#define SS_VECTOR 12
49#define GP_VECTOR 13
50#define PF_VECTOR 14
51
52#define SELECTOR_TI_MASK (1 << 2)
53#define SELECTOR_RPL_MASK 0x03
54
55#define IOPL_SHIFT 12
56
Zhang Xiantaod69fb812007-12-14 09:54:20 +080057#define KVM_ALIAS_SLOTS 4
58
Zhang Xiantaod657a982007-12-14 09:41:22 +080059#define KVM_PERMILLE_MMU_PAGES 20
60#define KVM_MIN_ALLOC_MMU_PAGES 64
Dong, Eddie1ae0a132008-01-07 13:20:25 +020061#define KVM_MMU_HASH_SHIFT 10
62#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
Zhang Xiantaod657a982007-12-14 09:41:22 +080063#define KVM_MIN_FREE_MMU_PAGES 5
64#define KVM_REFILL_PAGES 25
65#define KVM_MAX_CPUID_ENTRIES 40
66
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +080067extern spinlock_t kvm_lock;
68extern struct list_head vm_list;
69
Zhang Xiantaod657a982007-12-14 09:41:22 +080070struct kvm_vcpu;
71struct kvm;
72
Zhang Xiantao2b3ccfa2007-11-19 14:56:05 +080073enum {
74 VCPU_REGS_RAX = 0,
75 VCPU_REGS_RCX = 1,
76 VCPU_REGS_RDX = 2,
77 VCPU_REGS_RBX = 3,
78 VCPU_REGS_RSP = 4,
79 VCPU_REGS_RBP = 5,
80 VCPU_REGS_RSI = 6,
81 VCPU_REGS_RDI = 7,
82#ifdef CONFIG_X86_64
83 VCPU_REGS_R8 = 8,
84 VCPU_REGS_R9 = 9,
85 VCPU_REGS_R10 = 10,
86 VCPU_REGS_R11 = 11,
87 VCPU_REGS_R12 = 12,
88 VCPU_REGS_R13 = 13,
89 VCPU_REGS_R14 = 14,
90 VCPU_REGS_R15 = 15,
91#endif
92 NR_VCPU_REGS
93};
94
95enum {
96 VCPU_SREG_CS,
97 VCPU_SREG_DS,
98 VCPU_SREG_ES,
99 VCPU_SREG_FS,
100 VCPU_SREG_GS,
101 VCPU_SREG_SS,
102 VCPU_SREG_TR,
103 VCPU_SREG_LDTR,
104};
105
Avi Kivityedf88412007-12-16 11:02:48 +0200106#include <asm/kvm_x86_emulate.h>
Zhang Xiantao2b3ccfa2007-11-19 14:56:05 +0800107
Zhang Xiantaod657a982007-12-14 09:41:22 +0800108#define KVM_NR_MEM_OBJS 40
109
110/*
111 * We don't want allocation failures within the mmu code, so we preallocate
112 * enough memory for a single page fault in a cache.
113 */
114struct kvm_mmu_memory_cache {
115 int nobjs;
116 void *objects[KVM_NR_MEM_OBJS];
117};
118
119#define NR_PTE_CHAIN_ENTRIES 5
120
121struct kvm_pte_chain {
122 u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
123 struct hlist_node link;
124};
125
126/*
127 * kvm_mmu_page_role, below, is defined as:
128 *
129 * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
130 * bits 4:7 - page table level for this shadow (1-4)
131 * bits 8:9 - page table quadrant for 2-level guests
132 * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
133 * bits 17:19 - common access permissions for all ptes in this shadow page
134 */
135union kvm_mmu_page_role {
136 unsigned word;
137 struct {
Joe Perches7d76b4d2008-03-23 01:02:34 -0700138 unsigned glevels:4;
139 unsigned level:4;
140 unsigned quadrant:2;
141 unsigned pad_for_nice_hex_output:6;
142 unsigned metaphysical:1;
143 unsigned access:3;
Marcelo Tosatti2e53d632008-02-20 14:47:24 -0500144 unsigned invalid:1;
Zhang Xiantaod657a982007-12-14 09:41:22 +0800145 };
146};
147
148struct kvm_mmu_page {
149 struct list_head link;
150 struct hlist_node hash_link;
151
152 /*
153 * The following two entries are used to key the shadow page in the
154 * hash table.
155 */
156 gfn_t gfn;
157 union kvm_mmu_page_role role;
158
159 u64 *spt;
160 /* hold the gfn of each spte inside spt */
161 gfn_t *gfns;
162 unsigned long slot_bitmap; /* One bit set per slot which has memory
163 * in this shadow page.
164 */
165 int multimapped; /* More than one parent_pte? */
166 int root_count; /* Currently serving as active root */
167 union {
168 u64 *parent_pte; /* !multimapped */
169 struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
170 };
171};
172
173/*
174 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
175 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
176 * mode.
177 */
178struct kvm_mmu {
179 void (*new_cr3)(struct kvm_vcpu *vcpu);
180 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
181 void (*free)(struct kvm_vcpu *vcpu);
182 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
183 void (*prefetch_page)(struct kvm_vcpu *vcpu,
184 struct kvm_mmu_page *page);
185 hpa_t root_hpa;
186 int root_level;
187 int shadow_root_level;
188
189 u64 *pae_root;
190};
191
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800192struct kvm_vcpu_arch {
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800193 u64 host_tsc;
194 int interrupt_window_open;
195 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
196 DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS);
197 unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */
198 unsigned long rip; /* needs vcpu_load_rsp_rip() */
199
200 unsigned long cr0;
201 unsigned long cr2;
202 unsigned long cr3;
203 unsigned long cr4;
204 unsigned long cr8;
205 u64 pdptrs[4]; /* pae */
206 u64 shadow_efer;
207 u64 apic_base;
208 struct kvm_lapic *apic; /* kernel irqchip context */
209#define VCPU_MP_STATE_RUNNABLE 0
210#define VCPU_MP_STATE_UNINITIALIZED 1
211#define VCPU_MP_STATE_INIT_RECEIVED 2
212#define VCPU_MP_STATE_SIPI_RECEIVED 3
213#define VCPU_MP_STATE_HALTED 4
214 int mp_state;
215 int sipi_vector;
216 u64 ia32_misc_enable_msr;
Avi Kivityb209749f2007-10-22 16:50:39 +0200217 bool tpr_access_reporting;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800218
219 struct kvm_mmu mmu;
220
221 struct kvm_mmu_memory_cache mmu_pte_chain_cache;
222 struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
223 struct kvm_mmu_memory_cache mmu_page_cache;
224 struct kvm_mmu_memory_cache mmu_page_header_cache;
225
226 gfn_t last_pt_write_gfn;
227 int last_pt_write_count;
228 u64 *last_pte_updated;
229
Avi Kivityd7824ff2007-12-30 12:29:05 +0200230 struct {
231 gfn_t gfn; /* presumed gfn during guest pte update */
232 struct page *page; /* page corresponding to that gfn */
233 } update_pte;
234
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800235 struct i387_fxsave_struct host_fx_image;
236 struct i387_fxsave_struct guest_fx_image;
237
238 gva_t mmio_fault_cr2;
239 struct kvm_pio_request pio;
240 void *pio_data;
241
Avi Kivity298101d2007-11-25 13:41:11 +0200242 struct kvm_queued_exception {
243 bool pending;
244 bool has_error_code;
245 u8 nr;
246 u32 error_code;
247 } exception;
248
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800249 struct {
250 int active;
251 u8 save_iopl;
252 struct kvm_save_segment {
253 u16 selector;
254 unsigned long base;
255 u32 limit;
256 u32 ar;
257 } tr, es, ds, fs, gs;
258 } rmode;
259 int halt_request; /* real mode on Intel only */
260
261 int cpuid_nent;
Dan Kenigsberg07716712007-11-21 17:10:04 +0200262 struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800263 /* emulate context */
264
265 struct x86_emulate_ctxt emulate_ctxt;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200266
267 gpa_t time;
268 struct kvm_vcpu_time_info hv_clock;
269 unsigned int time_offset;
270 struct page *time_page;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800271};
272
Zhang Xiantaod69fb812007-12-14 09:54:20 +0800273struct kvm_mem_alias {
274 gfn_t base_gfn;
275 unsigned long npages;
276 gfn_t target_gfn;
277};
278
279struct kvm_arch{
280 int naliases;
281 struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
Zhang Xiantaof05e70a2007-12-14 10:01:48 +0800282
283 unsigned int n_free_mmu_pages;
284 unsigned int n_requested_mmu_pages;
285 unsigned int n_alloc_mmu_pages;
286 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
287 /*
288 * Hash table of struct kvm_mmu_page.
289 */
290 struct list_head active_mmu_pages;
Zhang Xiantaod7deeeb02007-12-14 10:17:34 +0800291 struct kvm_pic *vpic;
292 struct kvm_ioapic *vioapic;
Zhang Xiantaobfc6d222007-12-14 10:20:16 +0800293
294 int round_robin_prev_vcpu;
295 unsigned int tss_addr;
296 struct page *apic_access_page;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200297
298 gpa_t wall_clock;
Zhang Xiantaod69fb812007-12-14 09:54:20 +0800299};
300
Zhang Xiantao07114562007-12-14 10:23:23 +0800301struct kvm_vm_stat {
302 u32 mmu_shadow_zapped;
303 u32 mmu_pte_write;
304 u32 mmu_pte_updated;
305 u32 mmu_pde_zapped;
306 u32 mmu_flooded;
307 u32 mmu_recycled;
Avi Kivitydfc5aa02007-12-18 19:47:18 +0200308 u32 mmu_cache_miss;
Zhang Xiantao07114562007-12-14 10:23:23 +0800309 u32 remote_tlb_flush;
310};
311
Zhang Xiantao77b4c252007-12-14 09:49:26 +0800312struct kvm_vcpu_stat {
313 u32 pf_fixed;
314 u32 pf_guest;
315 u32 tlb_flush;
316 u32 invlpg;
317
318 u32 exits;
319 u32 io_exits;
320 u32 mmio_exits;
321 u32 signal_exits;
322 u32 irq_window_exits;
323 u32 halt_exits;
324 u32 halt_wakeup;
325 u32 request_irq_exits;
326 u32 irq_exits;
327 u32 host_state_reload;
328 u32 efer_reload;
329 u32 fpu_reload;
330 u32 insn_emulation;
331 u32 insn_emulation_fail;
Amit Shahf11c3a82008-02-21 01:00:30 +0530332 u32 hypercalls;
Zhang Xiantao77b4c252007-12-14 09:49:26 +0800333};
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800334
Hollis Blancharde01a1b52007-12-03 15:30:25 -0600335struct descriptor_table {
336 u16 limit;
337 unsigned long base;
338} __attribute__((packed));
339
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800340struct kvm_x86_ops {
341 int (*cpu_has_kvm_support)(void); /* __init */
342 int (*disabled_by_bios)(void); /* __init */
343 void (*hardware_enable)(void *dummy); /* __init */
344 void (*hardware_disable)(void *dummy);
345 void (*check_processor_compatibility)(void *rtn);
346 int (*hardware_setup)(void); /* __init */
347 void (*hardware_unsetup)(void); /* __exit */
Avi Kivity774ead32007-12-26 13:57:04 +0200348 bool (*cpu_has_accelerated_tpr)(void);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800349
350 /* Create, but do not attach this VCPU */
351 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
352 void (*vcpu_free)(struct kvm_vcpu *vcpu);
353 int (*vcpu_reset)(struct kvm_vcpu *vcpu);
354
355 void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
356 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
357 void (*vcpu_put)(struct kvm_vcpu *vcpu);
358 void (*vcpu_decache)(struct kvm_vcpu *vcpu);
359
360 int (*set_guest_debug)(struct kvm_vcpu *vcpu,
361 struct kvm_debug_guest *dbg);
362 void (*guest_debug_pre)(struct kvm_vcpu *vcpu);
363 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
364 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
365 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
366 void (*get_segment)(struct kvm_vcpu *vcpu,
367 struct kvm_segment *var, int seg);
368 void (*set_segment)(struct kvm_vcpu *vcpu,
369 struct kvm_segment *var, int seg);
370 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
371 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
372 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
373 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
374 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
375 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
376 void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
377 void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
378 void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
379 void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
380 unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr);
381 void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value,
382 int *exception);
383 void (*cache_regs)(struct kvm_vcpu *vcpu);
384 void (*decache_regs)(struct kvm_vcpu *vcpu);
385 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
386 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
387
388 void (*tlb_flush)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800389
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800390 void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
391 int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
392 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
393 void (*patch_hypercall)(struct kvm_vcpu *vcpu,
394 unsigned char *hypercall_addr);
395 int (*get_irq)(struct kvm_vcpu *vcpu);
396 void (*set_irq)(struct kvm_vcpu *vcpu, int vec);
Avi Kivity298101d2007-11-25 13:41:11 +0200397 void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
398 bool has_error_code, u32 error_code);
399 bool (*exception_injected)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800400 void (*inject_pending_irq)(struct kvm_vcpu *vcpu);
401 void (*inject_pending_vectors)(struct kvm_vcpu *vcpu,
402 struct kvm_run *run);
403
404 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
405};
406
Zhang Xiantao97896d02007-11-14 20:09:30 +0800407extern struct kvm_x86_ops *kvm_x86_ops;
408
Zhang Xiantao54f15852007-11-19 15:24:28 +0800409int kvm_mmu_module_init(void);
410void kvm_mmu_module_exit(void);
411
412void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
413int kvm_mmu_create(struct kvm_vcpu *vcpu);
414int kvm_mmu_setup(struct kvm_vcpu *vcpu);
415void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
416
417int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
418void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
419void kvm_mmu_zap_all(struct kvm *kvm);
Zhang Xiantao3ad82a72007-11-20 13:11:38 +0800420unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800421void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
422
Joerg Roedelcc4b6872008-02-07 13:47:43 +0100423int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
424
Zhang Xiantao54f15852007-11-19 15:24:28 +0800425enum emulation_result {
426 EMULATE_DONE, /* no further processing */
427 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
428 EMULATE_FAIL, /* can't emulate this instruction */
429};
430
Sheng Yang571008d2008-01-02 14:49:22 +0800431#define EMULTYPE_NO_DECODE (1 << 0)
432#define EMULTYPE_TRAP_UD (1 << 1)
Zhang Xiantao54f15852007-11-19 15:24:28 +0800433int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run,
Sheng Yang571008d2008-01-02 14:49:22 +0800434 unsigned long cr2, u16 error_code, int emulation_type);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800435void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context);
436void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
437void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
438void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
439 unsigned long *rflags);
440
441unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
442void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
443 unsigned long *rflags);
Joerg Roedelf2b4b7d2008-01-31 14:57:37 +0100444void kvm_enable_efer_bits(u64);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800445int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
446int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
447
448struct x86_emulate_ctxt;
449
450int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
451 int size, unsigned port);
452int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
453 int size, unsigned long count, int down,
454 gva_t address, int rep, unsigned port);
455void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
456int kvm_emulate_halt(struct kvm_vcpu *vcpu);
457int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
458int emulate_clts(struct kvm_vcpu *vcpu);
459int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
460 unsigned long *dest);
461int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
462 unsigned long value);
463
464void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
465void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0);
466void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0);
467void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0);
468unsigned long get_cr8(struct kvm_vcpu *vcpu);
469void lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
470void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
471
472int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
473int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
474
Avi Kivity298101d2007-11-25 13:41:11 +0200475void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
476void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
Avi Kivityc3c91fe2007-11-25 14:04:58 +0200477void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2,
478 u32 error_code);
Avi Kivity298101d2007-11-25 13:41:11 +0200479
Zhang Xiantao54f15852007-11-19 15:24:28 +0800480void fx_init(struct kvm_vcpu *vcpu);
481
482int emulator_read_std(unsigned long addr,
483 void *val,
484 unsigned int bytes,
485 struct kvm_vcpu *vcpu);
486int emulator_write_emulated(unsigned long addr,
487 const void *val,
488 unsigned int bytes,
489 struct kvm_vcpu *vcpu);
490
491unsigned long segment_base(u16 selector);
492
Avi Kivityd835dfe2007-11-21 02:57:59 +0200493void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800494void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
495 const u8 *new, int bytes);
496int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
497void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
498int kvm_mmu_load(struct kvm_vcpu *vcpu);
499void kvm_mmu_unload(struct kvm_vcpu *vcpu);
500
501int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
502
503int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
504
Avi Kivity30677142007-10-28 18:48:59 +0200505int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800506
Joerg Roedel18552672008-02-07 13:47:41 +0100507void kvm_enable_tdp(void);
508
Carsten Ottea03490e2007-10-29 16:09:35 +0100509int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
Carsten Ottede7d7892007-10-30 18:44:25 +0100510int complete_pio(struct kvm_vcpu *vcpu);
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800511
512static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
513{
514 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
515
516 return (struct kvm_mmu_page *)page_private(page);
517}
518
519static inline u16 read_fs(void)
520{
521 u16 seg;
522 asm("mov %%fs, %0" : "=g"(seg));
523 return seg;
524}
525
526static inline u16 read_gs(void)
527{
528 u16 seg;
529 asm("mov %%gs, %0" : "=g"(seg));
530 return seg;
531}
532
533static inline u16 read_ldt(void)
534{
535 u16 ldt;
536 asm("sldt %0" : "=g"(ldt));
537 return ldt;
538}
539
540static inline void load_fs(u16 sel)
541{
542 asm("mov %0, %%fs" : : "rm"(sel));
543}
544
545static inline void load_gs(u16 sel)
546{
547 asm("mov %0, %%gs" : : "rm"(sel));
548}
549
550#ifndef load_ldt
551static inline void load_ldt(u16 sel)
552{
553 asm("lldt %0" : : "rm"(sel));
554}
555#endif
556
557static inline void get_idt(struct descriptor_table *table)
558{
559 asm("sidt %0" : "=m"(*table));
560}
561
562static inline void get_gdt(struct descriptor_table *table)
563{
564 asm("sgdt %0" : "=m"(*table));
565}
566
567static inline unsigned long read_tr_base(void)
568{
569 u16 tr;
570 asm("str %0" : "=g"(tr));
571 return segment_base(tr);
572}
573
574#ifdef CONFIG_X86_64
575static inline unsigned long read_msr(unsigned long msr)
576{
577 u64 value;
578
579 rdmsrl(msr, value);
580 return value;
581}
582#endif
583
584static inline void fx_save(struct i387_fxsave_struct *image)
585{
586 asm("fxsave (%0)":: "r" (image));
587}
588
589static inline void fx_restore(struct i387_fxsave_struct *image)
590{
591 asm("fxrstor (%0)":: "r" (image));
592}
593
594static inline void fpu_init(void)
595{
596 asm("finit");
597}
598
599static inline u32 get_rdx_init_val(void)
600{
601 return 0x600; /* P6 family */
602}
603
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200604static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
605{
606 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
607}
608
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800609#define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30"
610#define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2"
611#define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3"
612#define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30"
613#define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0"
614#define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0"
615#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
616#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
617#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
Sheng Yang2384d2b2008-01-17 15:14:33 +0800618#define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08"
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800619
620#define MSR_IA32_TIME_STAMP_COUNTER 0x010
621
622#define TSS_IOPB_BASE_OFFSET 0x66
623#define TSS_BASE_SIZE 0x68
624#define TSS_IOPB_SIZE (65536 / 8)
625#define TSS_REDIRECTION_SIZE (256 / 8)
Joe Perches7d76b4d2008-03-23 01:02:34 -0700626#define RMODE_TSS_SIZE \
627 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
Hollis Blanchard53e0aa72007-12-03 16:15:26 -0600628
Carsten Otte043405e2007-10-10 17:16:19 +0200629#endif