blob: e8fe1039e3b52a3f304f2c89da25180a4b39cdac [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001#ifndef __KVM_H
2#define __KVM_H
3
4/*
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
7 */
8
9#include <linux/types.h>
10#include <linux/list.h>
11#include <linux/mutex.h>
12#include <linux/spinlock.h>
13#include <linux/mm.h>
14
15#include "vmx.h"
16#include <linux/kvm.h>
17
18#define CR0_PE_MASK (1ULL << 0)
19#define CR0_TS_MASK (1ULL << 3)
20#define CR0_NE_MASK (1ULL << 5)
21#define CR0_WP_MASK (1ULL << 16)
22#define CR0_NW_MASK (1ULL << 29)
23#define CR0_CD_MASK (1ULL << 30)
24#define CR0_PG_MASK (1ULL << 31)
25
26#define CR3_WPT_MASK (1ULL << 3)
27#define CR3_PCD_MASK (1ULL << 4)
28
29#define CR3_RESEVED_BITS 0x07ULL
30#define CR3_L_MODE_RESEVED_BITS (~((1ULL << 40) - 1) | 0x0fe7ULL)
31#define CR3_FLAGS_MASK ((1ULL << 5) - 1)
32
33#define CR4_VME_MASK (1ULL << 0)
34#define CR4_PSE_MASK (1ULL << 4)
35#define CR4_PAE_MASK (1ULL << 5)
36#define CR4_PGE_MASK (1ULL << 7)
37#define CR4_VMXE_MASK (1ULL << 13)
38
39#define KVM_GUEST_CR0_MASK \
40 (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK \
41 | CR0_NW_MASK | CR0_CD_MASK)
42#define KVM_VM_CR0_ALWAYS_ON \
43 (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK)
44#define KVM_GUEST_CR4_MASK \
45 (CR4_PSE_MASK | CR4_PAE_MASK | CR4_PGE_MASK | CR4_VMXE_MASK | CR4_VME_MASK)
46#define KVM_PMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK)
47#define KVM_RMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK | CR4_VME_MASK)
48
49#define INVALID_PAGE (~(hpa_t)0)
50#define UNMAPPED_GVA (~(gpa_t)0)
51
52#define KVM_MAX_VCPUS 1
53#define KVM_MEMORY_SLOTS 4
54#define KVM_NUM_MMU_PAGES 256
55
56#define FX_IMAGE_SIZE 512
57#define FX_IMAGE_ALIGN 16
58#define FX_BUF_SIZE (2 * FX_IMAGE_SIZE + FX_IMAGE_ALIGN)
59
60#define DE_VECTOR 0
61#define DF_VECTOR 8
62#define TS_VECTOR 10
63#define NP_VECTOR 11
64#define SS_VECTOR 12
65#define GP_VECTOR 13
66#define PF_VECTOR 14
67
68#define SELECTOR_TI_MASK (1 << 2)
69#define SELECTOR_RPL_MASK 0x03
70
71#define IOPL_SHIFT 12
72
73/*
74 * Address types:
75 *
76 * gva - guest virtual address
77 * gpa - guest physical address
78 * gfn - guest frame number
79 * hva - host virtual address
80 * hpa - host physical address
81 * hfn - host frame number
82 */
83
84typedef unsigned long gva_t;
85typedef u64 gpa_t;
86typedef unsigned long gfn_t;
87
88typedef unsigned long hva_t;
89typedef u64 hpa_t;
90typedef unsigned long hfn_t;
91
92struct kvm_mmu_page {
93 struct list_head link;
94 hpa_t page_hpa;
95 unsigned long slot_bitmap; /* One bit set per slot which has memory
96 * in this shadow page.
97 */
98 int global; /* Set if all ptes in this page are global */
99 u64 *parent_pte;
100};
101
102struct vmcs {
103 u32 revision_id;
104 u32 abort;
105 char data[0];
106};
107
108#define vmx_msr_entry kvm_msr_entry
109
110struct kvm_vcpu;
111
112/*
113 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
114 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
115 * mode.
116 */
117struct kvm_mmu {
118 void (*new_cr3)(struct kvm_vcpu *vcpu);
119 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
120 void (*inval_page)(struct kvm_vcpu *vcpu, gva_t gva);
121 void (*free)(struct kvm_vcpu *vcpu);
122 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
123 hpa_t root_hpa;
124 int root_level;
125 int shadow_root_level;
126};
127
128struct kvm_guest_debug {
129 int enabled;
130 unsigned long bp[4];
131 int singlestep;
132};
133
134enum {
135 VCPU_REGS_RAX = 0,
136 VCPU_REGS_RCX = 1,
137 VCPU_REGS_RDX = 2,
138 VCPU_REGS_RBX = 3,
139 VCPU_REGS_RSP = 4,
140 VCPU_REGS_RBP = 5,
141 VCPU_REGS_RSI = 6,
142 VCPU_REGS_RDI = 7,
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800143#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -0800144 VCPU_REGS_R8 = 8,
145 VCPU_REGS_R9 = 9,
146 VCPU_REGS_R10 = 10,
147 VCPU_REGS_R11 = 11,
148 VCPU_REGS_R12 = 12,
149 VCPU_REGS_R13 = 13,
150 VCPU_REGS_R14 = 14,
151 VCPU_REGS_R15 = 15,
152#endif
153 NR_VCPU_REGS
154};
155
156enum {
157 VCPU_SREG_CS,
158 VCPU_SREG_DS,
159 VCPU_SREG_ES,
160 VCPU_SREG_FS,
161 VCPU_SREG_GS,
162 VCPU_SREG_SS,
163 VCPU_SREG_TR,
164 VCPU_SREG_LDTR,
165};
166
167struct kvm_vcpu {
168 struct kvm *kvm;
169 union {
170 struct vmcs *vmcs;
171 struct vcpu_svm *svm;
172 };
173 struct mutex mutex;
174 int cpu;
175 int launched;
Dor Laorc1150d82007-01-05 16:36:24 -0800176 int interrupt_window_open;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800177 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
178#define NR_IRQ_WORDS KVM_IRQ_BITMAP_SIZE(unsigned long)
179 unsigned long irq_pending[NR_IRQ_WORDS];
180 unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */
181 unsigned long rip; /* needs vcpu_load_rsp_rip() */
182
183 unsigned long cr0;
184 unsigned long cr2;
185 unsigned long cr3;
186 unsigned long cr4;
187 unsigned long cr8;
188 u64 shadow_efer;
189 u64 apic_base;
190 int nmsrs;
191 struct vmx_msr_entry *guest_msrs;
192 struct vmx_msr_entry *host_msrs;
193
194 struct list_head free_pages;
195 struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES];
196 struct kvm_mmu mmu;
197
198 struct kvm_guest_debug guest_debug;
199
200 char fx_buf[FX_BUF_SIZE];
201 char *host_fx_image;
202 char *guest_fx_image;
203
204 int mmio_needed;
205 int mmio_read_completed;
206 int mmio_is_write;
207 int mmio_size;
208 unsigned char mmio_data[8];
209 gpa_t mmio_phys_addr;
210
211 struct {
212 int active;
213 u8 save_iopl;
214 struct kvm_save_segment {
215 u16 selector;
216 unsigned long base;
217 u32 limit;
218 u32 ar;
219 } tr, es, ds, fs, gs;
220 } rmode;
221};
222
223struct kvm_memory_slot {
224 gfn_t base_gfn;
225 unsigned long npages;
226 unsigned long flags;
227 struct page **phys_mem;
228 unsigned long *dirty_bitmap;
229};
230
231struct kvm {
232 spinlock_t lock; /* protects everything except vcpus */
233 int nmemslots;
234 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS];
235 struct list_head active_mmu_pages;
236 struct kvm_vcpu vcpus[KVM_MAX_VCPUS];
237 int memory_config_version;
238 int busy;
239};
240
241struct kvm_stat {
242 u32 pf_fixed;
243 u32 pf_guest;
244 u32 tlb_flush;
245 u32 invlpg;
246
247 u32 exits;
248 u32 io_exits;
249 u32 mmio_exits;
250 u32 signal_exits;
Dor Laorc1150d82007-01-05 16:36:24 -0800251 u32 irq_window_exits;
252 u32 halt_exits;
253 u32 request_irq_exits;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800254 u32 irq_exits;
255};
256
257struct descriptor_table {
258 u16 limit;
259 unsigned long base;
260} __attribute__((packed));
261
262struct kvm_arch_ops {
263 int (*cpu_has_kvm_support)(void); /* __init */
264 int (*disabled_by_bios)(void); /* __init */
265 void (*hardware_enable)(void *dummy); /* __init */
266 void (*hardware_disable)(void *dummy);
267 int (*hardware_setup)(void); /* __init */
268 void (*hardware_unsetup)(void); /* __exit */
269
270 int (*vcpu_create)(struct kvm_vcpu *vcpu);
271 void (*vcpu_free)(struct kvm_vcpu *vcpu);
272
273 struct kvm_vcpu *(*vcpu_load)(struct kvm_vcpu *vcpu);
274 void (*vcpu_put)(struct kvm_vcpu *vcpu);
275
276 int (*set_guest_debug)(struct kvm_vcpu *vcpu,
277 struct kvm_debug_guest *dbg);
278 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
279 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
280 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
281 void (*get_segment)(struct kvm_vcpu *vcpu,
282 struct kvm_segment *var, int seg);
283 void (*set_segment)(struct kvm_vcpu *vcpu,
284 struct kvm_segment *var, int seg);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800285 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
Avi Kivity399badf2007-01-05 16:36:38 -0800286 void (*decache_cr0_cr4_guest_bits)(struct kvm_vcpu *vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800287 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
288 void (*set_cr0_no_modeswitch)(struct kvm_vcpu *vcpu,
289 unsigned long cr0);
290 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
291 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
292 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
293 void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
294 void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
295 void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
296 void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt);
297 unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr);
298 void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value,
299 int *exception);
300 void (*cache_regs)(struct kvm_vcpu *vcpu);
301 void (*decache_regs)(struct kvm_vcpu *vcpu);
302 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
303 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
304
305 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t addr);
306 void (*tlb_flush)(struct kvm_vcpu *vcpu);
307 void (*inject_page_fault)(struct kvm_vcpu *vcpu,
308 unsigned long addr, u32 err_code);
309
310 void (*inject_gp)(struct kvm_vcpu *vcpu, unsigned err_code);
311
312 int (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
313 int (*vcpu_setup)(struct kvm_vcpu *vcpu);
314 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
315};
316
317extern struct kvm_stat kvm_stat;
318extern struct kvm_arch_ops *kvm_arch_ops;
319
320#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
321#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
322
323int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module);
324void kvm_exit_arch(void);
325
326void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
Ingo Molnar8018c272006-12-29 16:50:01 -0800327int kvm_mmu_create(struct kvm_vcpu *vcpu);
328int kvm_mmu_setup(struct kvm_vcpu *vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800329
330int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
331void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
332
333hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa);
334#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
335#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
336static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
337hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva);
338
339void kvm_emulator_want_group7_invlpg(void);
340
341extern hpa_t bad_page_address;
342
343static inline struct page *gfn_to_page(struct kvm_memory_slot *slot, gfn_t gfn)
344{
345 return slot->phys_mem[gfn - slot->base_gfn];
346}
347
348struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
349void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
350
351enum emulation_result {
352 EMULATE_DONE, /* no further processing */
353 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
354 EMULATE_FAIL, /* can't emulate this instruction */
355};
356
357int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run,
358 unsigned long cr2, u16 error_code);
359void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
360void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
361void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
362 unsigned long *rflags);
363
364unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
365void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
366 unsigned long *rflags);
367
368struct x86_emulate_ctxt;
369
370int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
371int emulate_clts(struct kvm_vcpu *vcpu);
372int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr,
373 unsigned long *dest);
374int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
375 unsigned long value);
376
377void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
378void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0);
379void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0);
380void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0);
381void lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
382
Avi Kivity3bab1f52006-12-29 16:49:48 -0800383int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
384int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800385
386void fx_init(struct kvm_vcpu *vcpu);
387
388void load_msrs(struct vmx_msr_entry *e, int n);
389void save_msrs(struct vmx_msr_entry *e, int n);
390void kvm_resched(struct kvm_vcpu *vcpu);
391
392int kvm_read_guest(struct kvm_vcpu *vcpu,
393 gva_t addr,
394 unsigned long size,
395 void *dest);
396
397int kvm_write_guest(struct kvm_vcpu *vcpu,
398 gva_t addr,
399 unsigned long size,
400 void *data);
401
402unsigned long segment_base(u16 selector);
403
404static inline struct page *_gfn_to_page(struct kvm *kvm, gfn_t gfn)
405{
406 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
407 return (slot) ? slot->phys_mem[gfn - slot->base_gfn] : NULL;
408}
409
Avi Kivitya9058ec2006-12-29 16:49:37 -0800410static inline int is_long_mode(struct kvm_vcpu *vcpu)
411{
412#ifdef CONFIG_X86_64
413 return vcpu->shadow_efer & EFER_LME;
414#else
415 return 0;
416#endif
417}
418
Avi Kivity6aa8b732006-12-10 02:21:36 -0800419static inline int is_pae(struct kvm_vcpu *vcpu)
420{
421 return vcpu->cr4 & CR4_PAE_MASK;
422}
423
424static inline int is_pse(struct kvm_vcpu *vcpu)
425{
426 return vcpu->cr4 & CR4_PSE_MASK;
427}
428
429static inline int is_paging(struct kvm_vcpu *vcpu)
430{
431 return vcpu->cr0 & CR0_PG_MASK;
432}
433
434static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
435{
436 return slot - kvm->memslots;
437}
438
439static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
440{
441 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
442
443 return (struct kvm_mmu_page *)page->private;
444}
445
446static inline u16 read_fs(void)
447{
448 u16 seg;
449 asm ("mov %%fs, %0" : "=g"(seg));
450 return seg;
451}
452
453static inline u16 read_gs(void)
454{
455 u16 seg;
456 asm ("mov %%gs, %0" : "=g"(seg));
457 return seg;
458}
459
460static inline u16 read_ldt(void)
461{
462 u16 ldt;
463 asm ("sldt %0" : "=g"(ldt));
464 return ldt;
465}
466
467static inline void load_fs(u16 sel)
468{
469 asm ("mov %0, %%fs" : : "rm"(sel));
470}
471
472static inline void load_gs(u16 sel)
473{
474 asm ("mov %0, %%gs" : : "rm"(sel));
475}
476
477#ifndef load_ldt
478static inline void load_ldt(u16 sel)
479{
480 asm ("lldt %0" : : "g"(sel));
481}
482#endif
483
484static inline void get_idt(struct descriptor_table *table)
485{
486 asm ("sidt %0" : "=m"(*table));
487}
488
489static inline void get_gdt(struct descriptor_table *table)
490{
491 asm ("sgdt %0" : "=m"(*table));
492}
493
494static inline unsigned long read_tr_base(void)
495{
496 u16 tr;
497 asm ("str %0" : "=g"(tr));
498 return segment_base(tr);
499}
500
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800501#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -0800502static inline unsigned long read_msr(unsigned long msr)
503{
504 u64 value;
505
506 rdmsrl(msr, value);
507 return value;
508}
509#endif
510
511static inline void fx_save(void *image)
512{
513 asm ("fxsave (%0)":: "r" (image));
514}
515
516static inline void fx_restore(void *image)
517{
518 asm ("fxrstor (%0)":: "r" (image));
519}
520
521static inline void fpu_init(void)
522{
523 asm ("finit");
524}
525
526static inline u32 get_rdx_init_val(void)
527{
528 return 0x600; /* P6 family */
529}
530
531#define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30"
532#define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2"
533#define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3"
534#define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30"
535#define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0"
536#define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0"
537#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
538#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
539#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
540
541#define MSR_IA32_TIME_STAMP_COUNTER 0x010
542
543#define TSS_IOPB_BASE_OFFSET 0x66
544#define TSS_BASE_SIZE 0x68
545#define TSS_IOPB_SIZE (65536 / 8)
546#define TSS_REDIRECTION_SIZE (256 / 8)
547#define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
548
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800549#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -0800550
551/*
552 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64. Therefore
553 * we need to allocate shadow page tables in the first 4GB of memory, which
554 * happens to fit the DMA32 zone.
555 */
556#define GFP_KVM_MMU (GFP_KERNEL | __GFP_DMA32)
557
558#else
559
560#define GFP_KVM_MMU GFP_KERNEL
561
562#endif
563
564#endif