blob: 65c6a0e5b739ad507541d848d99d198b747a4e85 [file] [log] [blame]
Vegard Nossuma656c8e2008-07-22 21:27:11 +02001/*
Carsten Otte043405e2007-10-10 17:16:19 +02002 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This header defines architecture specific interfaces, x86 version
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2. See
7 * the COPYING file in the top-level directory.
8 *
9 */
10
Vegard Nossum77ef50a2008-06-18 17:08:48 +020011#ifndef ASM_X86__KVM_HOST_H
12#define ASM_X86__KVM_HOST_H
Carsten Otte043405e2007-10-10 17:16:19 +020013
Zhang Xiantao34c16ee2007-10-20 15:34:38 +080014#include <linux/types.h>
15#include <linux/mm.h>
Andrea Arcangelie930bff2008-07-25 16:24:52 +020016#include <linux/mmu_notifier.h>
Zhang Xiantao34c16ee2007-10-20 15:34:38 +080017
18#include <linux/kvm.h>
19#include <linux/kvm_para.h>
Avi Kivityedf88412007-12-16 11:02:48 +020020#include <linux/kvm_types.h>
Zhang Xiantao34c16ee2007-10-20 15:34:38 +080021
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +020022#include <asm/pvclock-abi.h>
Hollis Blancharde01a1b52007-12-03 15:30:25 -060023#include <asm/desc.h>
24
Avi Kivity69a9f692008-03-21 12:38:23 +020025#define KVM_MAX_VCPUS 16
26#define KVM_MEMORY_SLOTS 32
27/* memory slots that does not exposed to userspace */
28#define KVM_PRIVATE_MEM_SLOTS 4
29
30#define KVM_PIO_PAGE_OFFSET 1
Laurent Vivier542472b2008-05-30 16:05:55 +020031#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
Avi Kivity69a9f692008-03-21 12:38:23 +020032
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080033#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
34#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
Joe Perches7d76b4d2008-03-23 01:02:34 -070035#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
36 0xFFFFFF0000000000ULL)
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080037
Joe Perches7d76b4d2008-03-23 01:02:34 -070038#define KVM_GUEST_CR0_MASK \
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080039 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \
40 | X86_CR0_NW | X86_CR0_CD)
Joe Perches7d76b4d2008-03-23 01:02:34 -070041#define KVM_VM_CR0_ALWAYS_ON \
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080042 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \
43 | X86_CR0_MP)
Joe Perches7d76b4d2008-03-23 01:02:34 -070044#define KVM_GUEST_CR4_MASK \
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080045 (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
46#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
47#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
48
49#define INVALID_PAGE (~(hpa_t)0)
50#define UNMAPPED_GVA (~(gpa_t)0)
51
Marcelo Tosatti05da4552008-02-23 11:44:30 -030052/* shadow tables are PAE even on non-PAE hosts */
53#define KVM_HPAGE_SHIFT 21
54#define KVM_HPAGE_SIZE (1UL << KVM_HPAGE_SHIFT)
55#define KVM_HPAGE_MASK (~(KVM_HPAGE_SIZE - 1))
56
57#define KVM_PAGES_PER_HPAGE (KVM_HPAGE_SIZE / PAGE_SIZE)
58
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080059#define DE_VECTOR 0
60#define UD_VECTOR 6
61#define NM_VECTOR 7
62#define DF_VECTOR 8
63#define TS_VECTOR 10
64#define NP_VECTOR 11
65#define SS_VECTOR 12
66#define GP_VECTOR 13
67#define PF_VECTOR 14
Joerg Roedel53371b52008-04-09 14:15:30 +020068#define MC_VECTOR 18
Zhang Xiantaocd6e8f82007-11-19 14:33:37 +080069
70#define SELECTOR_TI_MASK (1 << 2)
71#define SELECTOR_RPL_MASK 0x03
72
73#define IOPL_SHIFT 12
74
Zhang Xiantaod69fb812007-12-14 09:54:20 +080075#define KVM_ALIAS_SLOTS 4
76
Zhang Xiantaod657a982007-12-14 09:41:22 +080077#define KVM_PERMILLE_MMU_PAGES 20
78#define KVM_MIN_ALLOC_MMU_PAGES 64
Dong, Eddie1ae0a132008-01-07 13:20:25 +020079#define KVM_MMU_HASH_SHIFT 10
80#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
Zhang Xiantaod657a982007-12-14 09:41:22 +080081#define KVM_MIN_FREE_MMU_PAGES 5
82#define KVM_REFILL_PAGES 25
83#define KVM_MAX_CPUID_ENTRIES 40
Avi Kivity9ba075a2008-05-26 20:06:35 +030084#define KVM_NR_VAR_MTRR 8
Zhang Xiantaod657a982007-12-14 09:41:22 +080085
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +080086extern spinlock_t kvm_lock;
87extern struct list_head vm_list;
88
Zhang Xiantaod657a982007-12-14 09:41:22 +080089struct kvm_vcpu;
90struct kvm;
91
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030092enum kvm_reg {
Zhang Xiantao2b3ccfa2007-11-19 14:56:05 +080093 VCPU_REGS_RAX = 0,
94 VCPU_REGS_RCX = 1,
95 VCPU_REGS_RDX = 2,
96 VCPU_REGS_RBX = 3,
97 VCPU_REGS_RSP = 4,
98 VCPU_REGS_RBP = 5,
99 VCPU_REGS_RSI = 6,
100 VCPU_REGS_RDI = 7,
101#ifdef CONFIG_X86_64
102 VCPU_REGS_R8 = 8,
103 VCPU_REGS_R9 = 9,
104 VCPU_REGS_R10 = 10,
105 VCPU_REGS_R11 = 11,
106 VCPU_REGS_R12 = 12,
107 VCPU_REGS_R13 = 13,
108 VCPU_REGS_R14 = 14,
109 VCPU_REGS_R15 = 15,
110#endif
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300111 VCPU_REGS_RIP,
Zhang Xiantao2b3ccfa2007-11-19 14:56:05 +0800112 NR_VCPU_REGS
113};
114
115enum {
Zhang Xiantao2b3ccfa2007-11-19 14:56:05 +0800116 VCPU_SREG_ES,
Avi Kivity81609e32008-05-27 16:26:01 +0300117 VCPU_SREG_CS,
118 VCPU_SREG_SS,
119 VCPU_SREG_DS,
Zhang Xiantao2b3ccfa2007-11-19 14:56:05 +0800120 VCPU_SREG_FS,
121 VCPU_SREG_GS,
Zhang Xiantao2b3ccfa2007-11-19 14:56:05 +0800122 VCPU_SREG_TR,
123 VCPU_SREG_LDTR,
124};
125
Avi Kivityedf88412007-12-16 11:02:48 +0200126#include <asm/kvm_x86_emulate.h>
Zhang Xiantao2b3ccfa2007-11-19 14:56:05 +0800127
Zhang Xiantaod657a982007-12-14 09:41:22 +0800128#define KVM_NR_MEM_OBJS 40
129
Avi Kivity69a9f692008-03-21 12:38:23 +0200130struct kvm_guest_debug {
131 int enabled;
132 unsigned long bp[4];
133 int singlestep;
134};
135
Zhang Xiantaod657a982007-12-14 09:41:22 +0800136/*
137 * We don't want allocation failures within the mmu code, so we preallocate
138 * enough memory for a single page fault in a cache.
139 */
140struct kvm_mmu_memory_cache {
141 int nobjs;
142 void *objects[KVM_NR_MEM_OBJS];
143};
144
145#define NR_PTE_CHAIN_ENTRIES 5
146
147struct kvm_pte_chain {
148 u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
149 struct hlist_node link;
150};
151
152/*
153 * kvm_mmu_page_role, below, is defined as:
154 *
155 * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
156 * bits 4:7 - page table level for this shadow (1-4)
157 * bits 8:9 - page table quadrant for 2-level guests
158 * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
159 * bits 17:19 - common access permissions for all ptes in this shadow page
160 */
161union kvm_mmu_page_role {
162 unsigned word;
163 struct {
Joe Perches7d76b4d2008-03-23 01:02:34 -0700164 unsigned glevels:4;
165 unsigned level:4;
166 unsigned quadrant:2;
167 unsigned pad_for_nice_hex_output:6;
168 unsigned metaphysical:1;
169 unsigned access:3;
Marcelo Tosatti2e53d632008-02-20 14:47:24 -0500170 unsigned invalid:1;
Zhang Xiantaod657a982007-12-14 09:41:22 +0800171 };
172};
173
174struct kvm_mmu_page {
175 struct list_head link;
176 struct hlist_node hash_link;
177
178 /*
179 * The following two entries are used to key the shadow page in the
180 * hash table.
181 */
182 gfn_t gfn;
183 union kvm_mmu_page_role role;
184
185 u64 *spt;
186 /* hold the gfn of each spte inside spt */
187 gfn_t *gfns;
188 unsigned long slot_bitmap; /* One bit set per slot which has memory
189 * in this shadow page.
190 */
191 int multimapped; /* More than one parent_pte? */
192 int root_count; /* Currently serving as active root */
193 union {
194 u64 *parent_pte; /* !multimapped */
195 struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
196 };
197};
198
199/*
200 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
201 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
202 * mode.
203 */
204struct kvm_mmu {
205 void (*new_cr3)(struct kvm_vcpu *vcpu);
206 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
207 void (*free)(struct kvm_vcpu *vcpu);
208 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
209 void (*prefetch_page)(struct kvm_vcpu *vcpu,
210 struct kvm_mmu_page *page);
211 hpa_t root_hpa;
212 int root_level;
213 int shadow_root_level;
214
215 u64 *pae_root;
216};
217
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800218struct kvm_vcpu_arch {
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800219 u64 host_tsc;
220 int interrupt_window_open;
221 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
222 DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS);
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300223 /*
224 * rip and regs accesses must go through
225 * kvm_{register,rip}_{read,write} functions.
226 */
227 unsigned long regs[NR_VCPU_REGS];
228 u32 regs_avail;
229 u32 regs_dirty;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800230
231 unsigned long cr0;
232 unsigned long cr2;
233 unsigned long cr3;
234 unsigned long cr4;
235 unsigned long cr8;
236 u64 pdptrs[4]; /* pae */
237 u64 shadow_efer;
238 u64 apic_base;
239 struct kvm_lapic *apic; /* kernel irqchip context */
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800240 int mp_state;
241 int sipi_vector;
242 u64 ia32_misc_enable_msr;
Avi Kivityb209749f2007-10-22 16:50:39 +0200243 bool tpr_access_reporting;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800244
245 struct kvm_mmu mmu;
246
247 struct kvm_mmu_memory_cache mmu_pte_chain_cache;
248 struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
249 struct kvm_mmu_memory_cache mmu_page_cache;
250 struct kvm_mmu_memory_cache mmu_page_header_cache;
251
252 gfn_t last_pt_write_gfn;
253 int last_pt_write_count;
254 u64 *last_pte_updated;
Avi Kivity1b7fcd32008-05-15 13:51:35 +0300255 gfn_t last_pte_gfn;
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800256
Avi Kivityd7824ff2007-12-30 12:29:05 +0200257 struct {
Anthony Liguori35149e22008-04-02 14:46:56 -0500258 gfn_t gfn; /* presumed gfn during guest pte update */
259 pfn_t pfn; /* pfn corresponding to that gfn */
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300260 int largepage;
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200261 unsigned long mmu_seq;
Avi Kivityd7824ff2007-12-30 12:29:05 +0200262 } update_pte;
263
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800264 struct i387_fxsave_struct host_fx_image;
265 struct i387_fxsave_struct guest_fx_image;
266
267 gva_t mmio_fault_cr2;
268 struct kvm_pio_request pio;
269 void *pio_data;
270
Avi Kivity298101d2007-11-25 13:41:11 +0200271 struct kvm_queued_exception {
272 bool pending;
273 bool has_error_code;
274 u8 nr;
275 u32 error_code;
276 } exception;
277
Avi Kivity937a7ea2008-07-03 15:17:01 +0300278 struct kvm_queued_interrupt {
279 bool pending;
280 u8 nr;
281 } interrupt;
282
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800283 struct {
284 int active;
285 u8 save_iopl;
286 struct kvm_save_segment {
287 u16 selector;
288 unsigned long base;
289 u32 limit;
290 u32 ar;
291 } tr, es, ds, fs, gs;
292 } rmode;
293 int halt_request; /* real mode on Intel only */
294
295 int cpuid_nent;
Dan Kenigsberg07716712007-11-21 17:10:04 +0200296 struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800297 /* emulate context */
298
299 struct x86_emulate_ctxt emulate_ctxt;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200300
301 gpa_t time;
Gerd Hoffmann50d0a0f2008-06-03 16:17:31 +0200302 struct pvclock_vcpu_time_info hv_clock;
303 unsigned int hv_clock_tsc_khz;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200304 unsigned int time_offset;
305 struct page *time_page;
Sheng Yang3419ffc2008-05-15 09:52:48 +0800306
307 bool nmi_pending;
Avi Kivity668f6122008-07-02 09:28:55 +0300308 bool nmi_injected;
Avi Kivity9ba075a2008-05-26 20:06:35 +0300309
310 u64 mtrr[0x100];
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800311};
312
Zhang Xiantaod69fb812007-12-14 09:54:20 +0800313struct kvm_mem_alias {
314 gfn_t base_gfn;
315 unsigned long npages;
316 gfn_t target_gfn;
317};
318
319struct kvm_arch{
320 int naliases;
321 struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
Zhang Xiantaof05e70a2007-12-14 10:01:48 +0800322
323 unsigned int n_free_mmu_pages;
324 unsigned int n_requested_mmu_pages;
325 unsigned int n_alloc_mmu_pages;
326 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
327 /*
328 * Hash table of struct kvm_mmu_page.
329 */
330 struct list_head active_mmu_pages;
Zhang Xiantaod7deeeb2007-12-14 10:17:34 +0800331 struct kvm_pic *vpic;
332 struct kvm_ioapic *vioapic;
Sheng Yang78376992008-01-28 05:10:22 +0800333 struct kvm_pit *vpit;
Zhang Xiantaobfc6d222007-12-14 10:20:16 +0800334
335 int round_robin_prev_vcpu;
336 unsigned int tss_addr;
337 struct page *apic_access_page;
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -0200338
339 gpa_t wall_clock;
Sheng Yangb7ebfb02008-04-25 21:44:52 +0800340
341 struct page *ept_identity_pagetable;
342 bool ept_identity_pagetable_done;
Zhang Xiantaod69fb812007-12-14 09:54:20 +0800343};
344
Zhang Xiantao07114562007-12-14 10:23:23 +0800345struct kvm_vm_stat {
346 u32 mmu_shadow_zapped;
347 u32 mmu_pte_write;
348 u32 mmu_pte_updated;
349 u32 mmu_pde_zapped;
350 u32 mmu_flooded;
351 u32 mmu_recycled;
Avi Kivitydfc5aa02007-12-18 19:47:18 +0200352 u32 mmu_cache_miss;
Zhang Xiantao07114562007-12-14 10:23:23 +0800353 u32 remote_tlb_flush;
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300354 u32 lpages;
Zhang Xiantao07114562007-12-14 10:23:23 +0800355};
356
Zhang Xiantao77b4c252007-12-14 09:49:26 +0800357struct kvm_vcpu_stat {
358 u32 pf_fixed;
359 u32 pf_guest;
360 u32 tlb_flush;
361 u32 invlpg;
362
363 u32 exits;
364 u32 io_exits;
365 u32 mmio_exits;
366 u32 signal_exits;
367 u32 irq_window_exits;
Sheng Yangf08864b2008-05-15 18:23:25 +0800368 u32 nmi_window_exits;
Zhang Xiantao77b4c252007-12-14 09:49:26 +0800369 u32 halt_exits;
370 u32 halt_wakeup;
371 u32 request_irq_exits;
372 u32 irq_exits;
373 u32 host_state_reload;
374 u32 efer_reload;
375 u32 fpu_reload;
376 u32 insn_emulation;
377 u32 insn_emulation_fail;
Amit Shahf11c3a82008-02-21 01:00:30 +0530378 u32 hypercalls;
Zhang Xiantao77b4c252007-12-14 09:49:26 +0800379};
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800380
Hollis Blancharde01a1b52007-12-03 15:30:25 -0600381struct descriptor_table {
382 u16 limit;
383 unsigned long base;
384} __attribute__((packed));
385
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800386struct kvm_x86_ops {
387 int (*cpu_has_kvm_support)(void); /* __init */
388 int (*disabled_by_bios)(void); /* __init */
389 void (*hardware_enable)(void *dummy); /* __init */
390 void (*hardware_disable)(void *dummy);
391 void (*check_processor_compatibility)(void *rtn);
392 int (*hardware_setup)(void); /* __init */
393 void (*hardware_unsetup)(void); /* __exit */
Avi Kivity774ead32007-12-26 13:57:04 +0200394 bool (*cpu_has_accelerated_tpr)(void);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800395
396 /* Create, but do not attach this VCPU */
397 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
398 void (*vcpu_free)(struct kvm_vcpu *vcpu);
399 int (*vcpu_reset)(struct kvm_vcpu *vcpu);
400
401 void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
402 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
403 void (*vcpu_put)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800404
405 int (*set_guest_debug)(struct kvm_vcpu *vcpu,
406 struct kvm_debug_guest *dbg);
407 void (*guest_debug_pre)(struct kvm_vcpu *vcpu);
408 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
409 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
410 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
411 void (*get_segment)(struct kvm_vcpu *vcpu,
412 struct kvm_segment *var, int seg);
Izik Eidus2e4d2652008-03-24 19:38:34 +0200413 int (*get_cpl)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800414 void (*set_segment)(struct kvm_vcpu *vcpu,
415 struct kvm_segment *var, int seg);
416 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
417 void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
418 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
419 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
420 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
421 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
422 void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
423 void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
424 void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
425 void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
426 unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr);
427 void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value,
428 int *exception);
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300429 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800430 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
431 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
432
433 void (*tlb_flush)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800434
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800435 void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
436 int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
437 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
438 void (*patch_hypercall)(struct kvm_vcpu *vcpu,
439 unsigned char *hypercall_addr);
440 int (*get_irq)(struct kvm_vcpu *vcpu);
441 void (*set_irq)(struct kvm_vcpu *vcpu, int vec);
Avi Kivity298101d2007-11-25 13:41:11 +0200442 void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
443 bool has_error_code, u32 error_code);
444 bool (*exception_injected)(struct kvm_vcpu *vcpu);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800445 void (*inject_pending_irq)(struct kvm_vcpu *vcpu);
446 void (*inject_pending_vectors)(struct kvm_vcpu *vcpu,
447 struct kvm_run *run);
448
449 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
Sheng Yang67253af2008-04-25 10:20:22 +0800450 int (*get_tdp_level)(void);
Zhang Xiantaoea4a5ff2007-11-19 14:40:47 +0800451};
452
Zhang Xiantao97896d02007-11-14 20:09:30 +0800453extern struct kvm_x86_ops *kvm_x86_ops;
454
Zhang Xiantao54f15852007-11-19 15:24:28 +0800455int kvm_mmu_module_init(void);
456void kvm_mmu_module_exit(void);
457
458void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
459int kvm_mmu_create(struct kvm_vcpu *vcpu);
460int kvm_mmu_setup(struct kvm_vcpu *vcpu);
461void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
Sheng Yang7b523452008-04-25 21:13:50 +0800462void kvm_mmu_set_base_ptes(u64 base_pte);
463void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
464 u64 dirty_mask, u64 nx_mask, u64 x_mask);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800465
466int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
467void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
468void kvm_mmu_zap_all(struct kvm *kvm);
Zhang Xiantao3ad82a72007-11-20 13:11:38 +0800469unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800470void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
471
Joerg Roedelcc4b6872008-02-07 13:47:43 +0100472int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
473
Marcelo Tosatti3200f402008-03-29 20:17:59 -0300474int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
Avi Kivity9f811282008-03-02 14:06:05 +0200475 const void *val, int bytes);
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -0500476int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
477 gpa_t addr, unsigned long *ret);
478
479extern bool tdp_enabled;
Avi Kivity9f811282008-03-02 14:06:05 +0200480
Zhang Xiantao54f15852007-11-19 15:24:28 +0800481enum emulation_result {
482 EMULATE_DONE, /* no further processing */
483 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
484 EMULATE_FAIL, /* can't emulate this instruction */
485};
486
Sheng Yang571008d2008-01-02 14:49:22 +0800487#define EMULTYPE_NO_DECODE (1 << 0)
488#define EMULTYPE_TRAP_UD (1 << 1)
Zhang Xiantao54f15852007-11-19 15:24:28 +0800489int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run,
Sheng Yang571008d2008-01-02 14:49:22 +0800490 unsigned long cr2, u16 error_code, int emulation_type);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800491void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context);
492void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
493void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
494void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
495 unsigned long *rflags);
496
497unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
498void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
499 unsigned long *rflags);
Joerg Roedelf2b4b7d2008-01-31 14:57:37 +0100500void kvm_enable_efer_bits(u64);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800501int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
502int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
503
504struct x86_emulate_ctxt;
505
506int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
507 int size, unsigned port);
508int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
509 int size, unsigned long count, int down,
510 gva_t address, int rep, unsigned port);
511void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
512int kvm_emulate_halt(struct kvm_vcpu *vcpu);
513int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
514int emulate_clts(struct kvm_vcpu *vcpu);
515int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
516 unsigned long *dest);
517int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
518 unsigned long value);
519
Guillaume Thouvenin3e6e0aa2008-05-27 10:18:46 +0200520void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
521int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
522 int type_bits, int seg);
523
Izik Eidus37817f22008-03-24 23:14:53 +0200524int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason);
525
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200526void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
Joerg Roedel9c204562008-04-01 16:44:56 +0200527void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
528void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
529void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
Avi Kivity2d3ad1f2008-02-24 11:20:43 +0200530unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
531void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800532void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
533
534int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
535int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
536
Avi Kivity298101d2007-11-25 13:41:11 +0200537void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
538void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
Avi Kivityc3c91fe2007-11-25 14:04:58 +0200539void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2,
540 u32 error_code);
Avi Kivity298101d2007-11-25 13:41:11 +0200541
Sheng Yang3419ffc2008-05-15 09:52:48 +0800542void kvm_inject_nmi(struct kvm_vcpu *vcpu);
543
Zhang Xiantao54f15852007-11-19 15:24:28 +0800544void fx_init(struct kvm_vcpu *vcpu);
545
546int emulator_read_std(unsigned long addr,
547 void *val,
548 unsigned int bytes,
549 struct kvm_vcpu *vcpu);
550int emulator_write_emulated(unsigned long addr,
551 const void *val,
552 unsigned int bytes,
553 struct kvm_vcpu *vcpu);
554
555unsigned long segment_base(u16 selector);
556
Avi Kivityd835dfe2007-11-21 02:57:59 +0200557void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
Zhang Xiantao54f15852007-11-19 15:24:28 +0800558void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
559 const u8 *new, int bytes);
560int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
561void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
562int kvm_mmu_load(struct kvm_vcpu *vcpu);
563void kvm_mmu_unload(struct kvm_vcpu *vcpu);
564
565int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
566
567int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
568
Avi Kivity30677142007-10-28 18:48:59 +0200569int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
Zhang Xiantao34c16ee2007-10-20 15:34:38 +0800570
Joerg Roedel18552672008-02-07 13:47:41 +0100571void kvm_enable_tdp(void);
Joerg Roedel5f4cb662008-07-14 20:36:36 +0200572void kvm_disable_tdp(void);
Joerg Roedel18552672008-02-07 13:47:41 +0100573
Carsten Ottea03490e2007-10-29 16:09:35 +0100574int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
Carsten Ottede7d7892007-10-30 18:44:25 +0100575int complete_pio(struct kvm_vcpu *vcpu);
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800576
577static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
578{
579 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
580
581 return (struct kvm_mmu_page *)page_private(page);
582}
583
Avi Kivityd6e88ae2008-07-10 16:53:33 +0300584static inline u16 kvm_read_fs(void)
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800585{
586 u16 seg;
587 asm("mov %%fs, %0" : "=g"(seg));
588 return seg;
589}
590
Avi Kivityd6e88ae2008-07-10 16:53:33 +0300591static inline u16 kvm_read_gs(void)
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800592{
593 u16 seg;
594 asm("mov %%gs, %0" : "=g"(seg));
595 return seg;
596}
597
Avi Kivityd6e88ae2008-07-10 16:53:33 +0300598static inline u16 kvm_read_ldt(void)
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800599{
600 u16 ldt;
601 asm("sldt %0" : "=g"(ldt));
602 return ldt;
603}
604
Avi Kivityd6e88ae2008-07-10 16:53:33 +0300605static inline void kvm_load_fs(u16 sel)
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800606{
607 asm("mov %0, %%fs" : : "rm"(sel));
608}
609
Avi Kivityd6e88ae2008-07-10 16:53:33 +0300610static inline void kvm_load_gs(u16 sel)
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800611{
612 asm("mov %0, %%gs" : : "rm"(sel));
613}
614
Avi Kivityd6e88ae2008-07-10 16:53:33 +0300615static inline void kvm_load_ldt(u16 sel)
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800616{
617 asm("lldt %0" : : "rm"(sel));
618}
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800619
Avi Kivityd6e88ae2008-07-10 16:53:33 +0300620static inline void kvm_get_idt(struct descriptor_table *table)
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800621{
622 asm("sidt %0" : "=m"(*table));
623}
624
Avi Kivityd6e88ae2008-07-10 16:53:33 +0300625static inline void kvm_get_gdt(struct descriptor_table *table)
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800626{
627 asm("sgdt %0" : "=m"(*table));
628}
629
Avi Kivityd6e88ae2008-07-10 16:53:33 +0300630static inline unsigned long kvm_read_tr_base(void)
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800631{
632 u16 tr;
633 asm("str %0" : "=g"(tr));
634 return segment_base(tr);
635}
636
637#ifdef CONFIG_X86_64
638static inline unsigned long read_msr(unsigned long msr)
639{
640 u64 value;
641
642 rdmsrl(msr, value);
643 return value;
644}
645#endif
646
Avi Kivityd6e88ae2008-07-10 16:53:33 +0300647static inline void kvm_fx_save(struct i387_fxsave_struct *image)
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800648{
649 asm("fxsave (%0)":: "r" (image));
650}
651
Avi Kivityd6e88ae2008-07-10 16:53:33 +0300652static inline void kvm_fx_restore(struct i387_fxsave_struct *image)
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800653{
654 asm("fxrstor (%0)":: "r" (image));
655}
656
Avi Kivityd6e88ae2008-07-10 16:53:33 +0300657static inline void kvm_fx_finit(void)
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800658{
659 asm("finit");
660}
661
662static inline u32 get_rdx_init_val(void)
663{
664 return 0x600; /* P6 family */
665}
666
Avi Kivityc1a5d4f2007-11-25 14:12:03 +0200667static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
668{
669 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
670}
671
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800672#define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30"
673#define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2"
674#define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3"
675#define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30"
676#define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0"
677#define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0"
678#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
679#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
680#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
Sheng Yang14394422008-04-28 12:24:45 +0800681#define ASM_VMX_INVEPT ".byte 0x66, 0x0f, 0x38, 0x80, 0x08"
Sheng Yang2384d2b2008-01-17 15:14:33 +0800682#define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08"
Zhang Xiantaoec6d2732007-11-19 15:08:31 +0800683
684#define MSR_IA32_TIME_STAMP_COUNTER 0x010
685
686#define TSS_IOPB_BASE_OFFSET 0x66
687#define TSS_BASE_SIZE 0x68
688#define TSS_IOPB_SIZE (65536 / 8)
689#define TSS_REDIRECTION_SIZE (256 / 8)
Joe Perches7d76b4d2008-03-23 01:02:34 -0700690#define RMODE_TSS_SIZE \
691 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
Hollis Blanchard53e0aa72007-12-03 16:15:26 -0600692
Izik Eidus37817f22008-03-24 23:14:53 +0200693enum {
694 TASK_SWITCH_CALL = 0,
695 TASK_SWITCH_IRET = 1,
696 TASK_SWITCH_JMP = 2,
697 TASK_SWITCH_GATE = 3,
698};
699
Feng (Eric) Liu2714d1d2008-04-10 15:31:10 -0400700
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300701#ifdef CONFIG_64BIT
Ingo Molnar33a37eb2008-07-21 10:57:15 +0200702# define KVM_EX_ENTRY ".quad"
703# define KVM_EX_PUSH "pushq"
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300704#else
Ingo Molnar33a37eb2008-07-21 10:57:15 +0200705# define KVM_EX_ENTRY ".long"
706# define KVM_EX_PUSH "pushl"
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300707#endif
708
709/*
710 * Hardware virtualization extension instructions may fault if a
711 * reboot turns off virtualization while processes are running.
712 * Trap the fault and ignore the instruction if that happens.
713 */
714asmlinkage void kvm_handle_fault_on_reboot(void);
715
716#define __kvm_handle_fault_on_reboot(insn) \
717 "666: " insn "\n\t" \
Eduardo Habkost18b13e52008-08-19 20:00:08 -0300718 ".pushsection .fixup, \"ax\" \n" \
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300719 "667: \n\t" \
Ingo Molnar33a37eb2008-07-21 10:57:15 +0200720 KVM_EX_PUSH " $666b \n\t" \
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300721 "jmp kvm_handle_fault_on_reboot \n\t" \
722 ".popsection \n\t" \
723 ".pushsection __ex_table, \"a\" \n\t" \
724 KVM_EX_ENTRY " 666b, 667b \n\t" \
725 ".popsection"
726
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200727#define KVM_ARCH_WANT_MMU_NOTIFIER
728int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
729int kvm_age_hva(struct kvm *kvm, unsigned long hva);
730
Vegard Nossum77ef50a2008-06-18 17:08:48 +0200731#endif /* ASM_X86__KVM_HOST_H */