Carsten Otte | 043405e | 2007-10-10 17:16:19 +0200 | [diff] [blame] | 1 | #/* |
| 2 | * Kernel-based Virtual Machine driver for Linux |
| 3 | * |
| 4 | * This header defines architecture specific interfaces, x86 version |
| 5 | * |
| 6 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 7 | * the COPYING file in the top-level directory. |
| 8 | * |
| 9 | */ |
| 10 | |
| 11 | #ifndef KVM_X86_H |
| 12 | #define KVM_X86_H |
| 13 | |
Zhang Xiantao | 34c16ee | 2007-10-20 15:34:38 +0800 | [diff] [blame] | 14 | #include <linux/types.h> |
| 15 | #include <linux/mm.h> |
| 16 | |
| 17 | #include <linux/kvm.h> |
| 18 | #include <linux/kvm_para.h> |
| 19 | |
Hollis Blanchard | e01a1b5 | 2007-12-03 15:30:25 -0600 | [diff] [blame] | 20 | #include <asm/desc.h> |
| 21 | |
Zhang Xiantao | d657a98 | 2007-12-14 09:41:22 +0800 | [diff] [blame] | 22 | #include "types.h" |
| 23 | |
Zhang Xiantao | cd6e8f8 | 2007-11-19 14:33:37 +0800 | [diff] [blame] | 24 | #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1) |
| 25 | #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD)) |
| 26 | #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS|0xFFFFFF0000000000ULL) |
| 27 | |
| 28 | #define KVM_GUEST_CR0_MASK \ |
| 29 | (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \ |
| 30 | | X86_CR0_NW | X86_CR0_CD) |
| 31 | #define KVM_VM_CR0_ALWAYS_ON \ |
| 32 | (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \ |
| 33 | | X86_CR0_MP) |
| 34 | #define KVM_GUEST_CR4_MASK \ |
| 35 | (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE) |
| 36 | #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) |
| 37 | #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) |
| 38 | |
| 39 | #define INVALID_PAGE (~(hpa_t)0) |
| 40 | #define UNMAPPED_GVA (~(gpa_t)0) |
| 41 | |
| 42 | #define DE_VECTOR 0 |
| 43 | #define UD_VECTOR 6 |
| 44 | #define NM_VECTOR 7 |
| 45 | #define DF_VECTOR 8 |
| 46 | #define TS_VECTOR 10 |
| 47 | #define NP_VECTOR 11 |
| 48 | #define SS_VECTOR 12 |
| 49 | #define GP_VECTOR 13 |
| 50 | #define PF_VECTOR 14 |
| 51 | |
| 52 | #define SELECTOR_TI_MASK (1 << 2) |
| 53 | #define SELECTOR_RPL_MASK 0x03 |
| 54 | |
| 55 | #define IOPL_SHIFT 12 |
| 56 | |
Zhang Xiantao | d69fb81 | 2007-12-14 09:54:20 +0800 | [diff] [blame^] | 57 | #define KVM_ALIAS_SLOTS 4 |
| 58 | |
Zhang Xiantao | d657a98 | 2007-12-14 09:41:22 +0800 | [diff] [blame] | 59 | #define KVM_PERMILLE_MMU_PAGES 20 |
| 60 | #define KVM_MIN_ALLOC_MMU_PAGES 64 |
| 61 | #define KVM_NUM_MMU_PAGES 1024 |
| 62 | #define KVM_MIN_FREE_MMU_PAGES 5 |
| 63 | #define KVM_REFILL_PAGES 25 |
| 64 | #define KVM_MAX_CPUID_ENTRIES 40 |
| 65 | |
Zhang Xiantao | e9b11c1 | 2007-11-14 20:38:21 +0800 | [diff] [blame] | 66 | extern spinlock_t kvm_lock; |
| 67 | extern struct list_head vm_list; |
| 68 | |
Zhang Xiantao | d657a98 | 2007-12-14 09:41:22 +0800 | [diff] [blame] | 69 | struct kvm_vcpu; |
| 70 | struct kvm; |
| 71 | |
Zhang Xiantao | 2b3ccfa | 2007-11-19 14:56:05 +0800 | [diff] [blame] | 72 | enum { |
| 73 | VCPU_REGS_RAX = 0, |
| 74 | VCPU_REGS_RCX = 1, |
| 75 | VCPU_REGS_RDX = 2, |
| 76 | VCPU_REGS_RBX = 3, |
| 77 | VCPU_REGS_RSP = 4, |
| 78 | VCPU_REGS_RBP = 5, |
| 79 | VCPU_REGS_RSI = 6, |
| 80 | VCPU_REGS_RDI = 7, |
| 81 | #ifdef CONFIG_X86_64 |
| 82 | VCPU_REGS_R8 = 8, |
| 83 | VCPU_REGS_R9 = 9, |
| 84 | VCPU_REGS_R10 = 10, |
| 85 | VCPU_REGS_R11 = 11, |
| 86 | VCPU_REGS_R12 = 12, |
| 87 | VCPU_REGS_R13 = 13, |
| 88 | VCPU_REGS_R14 = 14, |
| 89 | VCPU_REGS_R15 = 15, |
| 90 | #endif |
| 91 | NR_VCPU_REGS |
| 92 | }; |
| 93 | |
| 94 | enum { |
| 95 | VCPU_SREG_CS, |
| 96 | VCPU_SREG_DS, |
| 97 | VCPU_SREG_ES, |
| 98 | VCPU_SREG_FS, |
| 99 | VCPU_SREG_GS, |
| 100 | VCPU_SREG_SS, |
| 101 | VCPU_SREG_TR, |
| 102 | VCPU_SREG_LDTR, |
| 103 | }; |
| 104 | |
| 105 | #include "x86_emulate.h" |
| 106 | |
Zhang Xiantao | d657a98 | 2007-12-14 09:41:22 +0800 | [diff] [blame] | 107 | #define KVM_NR_MEM_OBJS 40 |
| 108 | |
| 109 | /* |
| 110 | * We don't want allocation failures within the mmu code, so we preallocate |
| 111 | * enough memory for a single page fault in a cache. |
| 112 | */ |
| 113 | struct kvm_mmu_memory_cache { |
| 114 | int nobjs; |
| 115 | void *objects[KVM_NR_MEM_OBJS]; |
| 116 | }; |
| 117 | |
| 118 | #define NR_PTE_CHAIN_ENTRIES 5 |
| 119 | |
| 120 | struct kvm_pte_chain { |
| 121 | u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES]; |
| 122 | struct hlist_node link; |
| 123 | }; |
| 124 | |
| 125 | /* |
| 126 | * kvm_mmu_page_role, below, is defined as: |
| 127 | * |
| 128 | * bits 0:3 - total guest paging levels (2-4, or zero for real mode) |
| 129 | * bits 4:7 - page table level for this shadow (1-4) |
| 130 | * bits 8:9 - page table quadrant for 2-level guests |
| 131 | * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode) |
| 132 | * bits 17:19 - common access permissions for all ptes in this shadow page |
| 133 | */ |
| 134 | union kvm_mmu_page_role { |
| 135 | unsigned word; |
| 136 | struct { |
| 137 | unsigned glevels : 4; |
| 138 | unsigned level : 4; |
| 139 | unsigned quadrant : 2; |
| 140 | unsigned pad_for_nice_hex_output : 6; |
| 141 | unsigned metaphysical : 1; |
| 142 | unsigned access : 3; |
| 143 | }; |
| 144 | }; |
| 145 | |
| 146 | struct kvm_mmu_page { |
| 147 | struct list_head link; |
| 148 | struct hlist_node hash_link; |
| 149 | |
| 150 | /* |
| 151 | * The following two entries are used to key the shadow page in the |
| 152 | * hash table. |
| 153 | */ |
| 154 | gfn_t gfn; |
| 155 | union kvm_mmu_page_role role; |
| 156 | |
| 157 | u64 *spt; |
| 158 | /* hold the gfn of each spte inside spt */ |
| 159 | gfn_t *gfns; |
| 160 | unsigned long slot_bitmap; /* One bit set per slot which has memory |
| 161 | * in this shadow page. |
| 162 | */ |
| 163 | int multimapped; /* More than one parent_pte? */ |
| 164 | int root_count; /* Currently serving as active root */ |
| 165 | union { |
| 166 | u64 *parent_pte; /* !multimapped */ |
| 167 | struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */ |
| 168 | }; |
| 169 | }; |
| 170 | |
| 171 | /* |
| 172 | * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level |
| 173 | * 32-bit). The kvm_mmu structure abstracts the details of the current mmu |
| 174 | * mode. |
| 175 | */ |
| 176 | struct kvm_mmu { |
| 177 | void (*new_cr3)(struct kvm_vcpu *vcpu); |
| 178 | int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); |
| 179 | void (*free)(struct kvm_vcpu *vcpu); |
| 180 | gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva); |
| 181 | void (*prefetch_page)(struct kvm_vcpu *vcpu, |
| 182 | struct kvm_mmu_page *page); |
| 183 | hpa_t root_hpa; |
| 184 | int root_level; |
| 185 | int shadow_root_level; |
| 186 | |
| 187 | u64 *pae_root; |
| 188 | }; |
| 189 | |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 190 | struct kvm_vcpu_arch { |
Zhang Xiantao | 34c16ee | 2007-10-20 15:34:38 +0800 | [diff] [blame] | 191 | u64 host_tsc; |
| 192 | int interrupt_window_open; |
| 193 | unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */ |
| 194 | DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS); |
| 195 | unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */ |
| 196 | unsigned long rip; /* needs vcpu_load_rsp_rip() */ |
| 197 | |
| 198 | unsigned long cr0; |
| 199 | unsigned long cr2; |
| 200 | unsigned long cr3; |
| 201 | unsigned long cr4; |
| 202 | unsigned long cr8; |
| 203 | u64 pdptrs[4]; /* pae */ |
| 204 | u64 shadow_efer; |
| 205 | u64 apic_base; |
| 206 | struct kvm_lapic *apic; /* kernel irqchip context */ |
| 207 | #define VCPU_MP_STATE_RUNNABLE 0 |
| 208 | #define VCPU_MP_STATE_UNINITIALIZED 1 |
| 209 | #define VCPU_MP_STATE_INIT_RECEIVED 2 |
| 210 | #define VCPU_MP_STATE_SIPI_RECEIVED 3 |
| 211 | #define VCPU_MP_STATE_HALTED 4 |
| 212 | int mp_state; |
| 213 | int sipi_vector; |
| 214 | u64 ia32_misc_enable_msr; |
| 215 | |
| 216 | struct kvm_mmu mmu; |
| 217 | |
| 218 | struct kvm_mmu_memory_cache mmu_pte_chain_cache; |
| 219 | struct kvm_mmu_memory_cache mmu_rmap_desc_cache; |
| 220 | struct kvm_mmu_memory_cache mmu_page_cache; |
| 221 | struct kvm_mmu_memory_cache mmu_page_header_cache; |
| 222 | |
| 223 | gfn_t last_pt_write_gfn; |
| 224 | int last_pt_write_count; |
| 225 | u64 *last_pte_updated; |
| 226 | |
Zhang Xiantao | 34c16ee | 2007-10-20 15:34:38 +0800 | [diff] [blame] | 227 | struct i387_fxsave_struct host_fx_image; |
| 228 | struct i387_fxsave_struct guest_fx_image; |
| 229 | |
| 230 | gva_t mmio_fault_cr2; |
| 231 | struct kvm_pio_request pio; |
| 232 | void *pio_data; |
| 233 | |
Avi Kivity | 298101d | 2007-11-25 13:41:11 +0200 | [diff] [blame] | 234 | struct kvm_queued_exception { |
| 235 | bool pending; |
| 236 | bool has_error_code; |
| 237 | u8 nr; |
| 238 | u32 error_code; |
| 239 | } exception; |
| 240 | |
Zhang Xiantao | 34c16ee | 2007-10-20 15:34:38 +0800 | [diff] [blame] | 241 | struct { |
| 242 | int active; |
| 243 | u8 save_iopl; |
| 244 | struct kvm_save_segment { |
| 245 | u16 selector; |
| 246 | unsigned long base; |
| 247 | u32 limit; |
| 248 | u32 ar; |
| 249 | } tr, es, ds, fs, gs; |
| 250 | } rmode; |
| 251 | int halt_request; /* real mode on Intel only */ |
| 252 | |
| 253 | int cpuid_nent; |
Dan Kenigsberg | 0771671 | 2007-11-21 17:10:04 +0200 | [diff] [blame] | 254 | struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES]; |
Zhang Xiantao | 34c16ee | 2007-10-20 15:34:38 +0800 | [diff] [blame] | 255 | /* emulate context */ |
| 256 | |
| 257 | struct x86_emulate_ctxt emulate_ctxt; |
| 258 | }; |
| 259 | |
Zhang Xiantao | d69fb81 | 2007-12-14 09:54:20 +0800 | [diff] [blame^] | 260 | struct kvm_mem_alias { |
| 261 | gfn_t base_gfn; |
| 262 | unsigned long npages; |
| 263 | gfn_t target_gfn; |
| 264 | }; |
| 265 | |
| 266 | struct kvm_arch{ |
| 267 | int naliases; |
| 268 | struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS]; |
| 269 | }; |
| 270 | |
Zhang Xiantao | 77b4c25 | 2007-12-14 09:49:26 +0800 | [diff] [blame] | 271 | struct kvm_vcpu_stat { |
| 272 | u32 pf_fixed; |
| 273 | u32 pf_guest; |
| 274 | u32 tlb_flush; |
| 275 | u32 invlpg; |
| 276 | |
| 277 | u32 exits; |
| 278 | u32 io_exits; |
| 279 | u32 mmio_exits; |
| 280 | u32 signal_exits; |
| 281 | u32 irq_window_exits; |
| 282 | u32 halt_exits; |
| 283 | u32 halt_wakeup; |
| 284 | u32 request_irq_exits; |
| 285 | u32 irq_exits; |
| 286 | u32 host_state_reload; |
| 287 | u32 efer_reload; |
| 288 | u32 fpu_reload; |
| 289 | u32 insn_emulation; |
| 290 | u32 insn_emulation_fail; |
| 291 | }; |
Zhang Xiantao | ad312c7 | 2007-12-13 23:50:52 +0800 | [diff] [blame] | 292 | |
Hollis Blanchard | e01a1b5 | 2007-12-03 15:30:25 -0600 | [diff] [blame] | 293 | struct descriptor_table { |
| 294 | u16 limit; |
| 295 | unsigned long base; |
| 296 | } __attribute__((packed)); |
| 297 | |
Zhang Xiantao | ea4a5ff | 2007-11-19 14:40:47 +0800 | [diff] [blame] | 298 | struct kvm_x86_ops { |
| 299 | int (*cpu_has_kvm_support)(void); /* __init */ |
| 300 | int (*disabled_by_bios)(void); /* __init */ |
| 301 | void (*hardware_enable)(void *dummy); /* __init */ |
| 302 | void (*hardware_disable)(void *dummy); |
| 303 | void (*check_processor_compatibility)(void *rtn); |
| 304 | int (*hardware_setup)(void); /* __init */ |
| 305 | void (*hardware_unsetup)(void); /* __exit */ |
| 306 | |
| 307 | /* Create, but do not attach this VCPU */ |
| 308 | struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id); |
| 309 | void (*vcpu_free)(struct kvm_vcpu *vcpu); |
| 310 | int (*vcpu_reset)(struct kvm_vcpu *vcpu); |
| 311 | |
| 312 | void (*prepare_guest_switch)(struct kvm_vcpu *vcpu); |
| 313 | void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); |
| 314 | void (*vcpu_put)(struct kvm_vcpu *vcpu); |
| 315 | void (*vcpu_decache)(struct kvm_vcpu *vcpu); |
| 316 | |
| 317 | int (*set_guest_debug)(struct kvm_vcpu *vcpu, |
| 318 | struct kvm_debug_guest *dbg); |
| 319 | void (*guest_debug_pre)(struct kvm_vcpu *vcpu); |
| 320 | int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); |
| 321 | int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); |
| 322 | u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); |
| 323 | void (*get_segment)(struct kvm_vcpu *vcpu, |
| 324 | struct kvm_segment *var, int seg); |
| 325 | void (*set_segment)(struct kvm_vcpu *vcpu, |
| 326 | struct kvm_segment *var, int seg); |
| 327 | void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); |
| 328 | void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); |
| 329 | void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); |
| 330 | void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); |
| 331 | void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); |
| 332 | void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); |
| 333 | void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); |
| 334 | void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); |
| 335 | void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); |
| 336 | void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); |
| 337 | unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr); |
| 338 | void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value, |
| 339 | int *exception); |
| 340 | void (*cache_regs)(struct kvm_vcpu *vcpu); |
| 341 | void (*decache_regs)(struct kvm_vcpu *vcpu); |
| 342 | unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); |
| 343 | void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); |
| 344 | |
| 345 | void (*tlb_flush)(struct kvm_vcpu *vcpu); |
Zhang Xiantao | ea4a5ff | 2007-11-19 14:40:47 +0800 | [diff] [blame] | 346 | |
Zhang Xiantao | ea4a5ff | 2007-11-19 14:40:47 +0800 | [diff] [blame] | 347 | void (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run); |
| 348 | int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu); |
| 349 | void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); |
| 350 | void (*patch_hypercall)(struct kvm_vcpu *vcpu, |
| 351 | unsigned char *hypercall_addr); |
| 352 | int (*get_irq)(struct kvm_vcpu *vcpu); |
| 353 | void (*set_irq)(struct kvm_vcpu *vcpu, int vec); |
Avi Kivity | 298101d | 2007-11-25 13:41:11 +0200 | [diff] [blame] | 354 | void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr, |
| 355 | bool has_error_code, u32 error_code); |
| 356 | bool (*exception_injected)(struct kvm_vcpu *vcpu); |
Zhang Xiantao | ea4a5ff | 2007-11-19 14:40:47 +0800 | [diff] [blame] | 357 | void (*inject_pending_irq)(struct kvm_vcpu *vcpu); |
| 358 | void (*inject_pending_vectors)(struct kvm_vcpu *vcpu, |
| 359 | struct kvm_run *run); |
| 360 | |
| 361 | int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); |
| 362 | }; |
| 363 | |
Zhang Xiantao | 97896d0 | 2007-11-14 20:09:30 +0800 | [diff] [blame] | 364 | extern struct kvm_x86_ops *kvm_x86_ops; |
| 365 | |
Zhang Xiantao | 54f1585 | 2007-11-19 15:24:28 +0800 | [diff] [blame] | 366 | int kvm_mmu_module_init(void); |
| 367 | void kvm_mmu_module_exit(void); |
| 368 | |
| 369 | void kvm_mmu_destroy(struct kvm_vcpu *vcpu); |
| 370 | int kvm_mmu_create(struct kvm_vcpu *vcpu); |
| 371 | int kvm_mmu_setup(struct kvm_vcpu *vcpu); |
| 372 | void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); |
| 373 | |
| 374 | int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); |
| 375 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); |
| 376 | void kvm_mmu_zap_all(struct kvm *kvm); |
Zhang Xiantao | 3ad82a7 | 2007-11-20 13:11:38 +0800 | [diff] [blame] | 377 | unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); |
Zhang Xiantao | 54f1585 | 2007-11-19 15:24:28 +0800 | [diff] [blame] | 378 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); |
| 379 | |
| 380 | enum emulation_result { |
| 381 | EMULATE_DONE, /* no further processing */ |
| 382 | EMULATE_DO_MMIO, /* kvm_run filled with mmio request */ |
| 383 | EMULATE_FAIL, /* can't emulate this instruction */ |
| 384 | }; |
| 385 | |
| 386 | int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run, |
| 387 | unsigned long cr2, u16 error_code, int no_decode); |
| 388 | void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context); |
| 389 | void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); |
| 390 | void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); |
| 391 | void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw, |
| 392 | unsigned long *rflags); |
| 393 | |
| 394 | unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr); |
| 395 | void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value, |
| 396 | unsigned long *rflags); |
| 397 | int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); |
| 398 | int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); |
| 399 | |
| 400 | struct x86_emulate_ctxt; |
| 401 | |
| 402 | int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, |
| 403 | int size, unsigned port); |
| 404 | int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, |
| 405 | int size, unsigned long count, int down, |
| 406 | gva_t address, int rep, unsigned port); |
| 407 | void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); |
| 408 | int kvm_emulate_halt(struct kvm_vcpu *vcpu); |
| 409 | int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address); |
| 410 | int emulate_clts(struct kvm_vcpu *vcpu); |
| 411 | int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, |
| 412 | unsigned long *dest); |
| 413 | int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, |
| 414 | unsigned long value); |
| 415 | |
| 416 | void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); |
| 417 | void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0); |
| 418 | void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0); |
| 419 | void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0); |
| 420 | unsigned long get_cr8(struct kvm_vcpu *vcpu); |
| 421 | void lmsw(struct kvm_vcpu *vcpu, unsigned long msw); |
| 422 | void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); |
| 423 | |
| 424 | int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); |
| 425 | int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); |
| 426 | |
Avi Kivity | 298101d | 2007-11-25 13:41:11 +0200 | [diff] [blame] | 427 | void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); |
| 428 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); |
Avi Kivity | c3c91fe | 2007-11-25 14:04:58 +0200 | [diff] [blame] | 429 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, |
| 430 | u32 error_code); |
Avi Kivity | 298101d | 2007-11-25 13:41:11 +0200 | [diff] [blame] | 431 | |
Zhang Xiantao | 54f1585 | 2007-11-19 15:24:28 +0800 | [diff] [blame] | 432 | void fx_init(struct kvm_vcpu *vcpu); |
| 433 | |
| 434 | int emulator_read_std(unsigned long addr, |
| 435 | void *val, |
| 436 | unsigned int bytes, |
| 437 | struct kvm_vcpu *vcpu); |
| 438 | int emulator_write_emulated(unsigned long addr, |
| 439 | const void *val, |
| 440 | unsigned int bytes, |
| 441 | struct kvm_vcpu *vcpu); |
| 442 | |
| 443 | unsigned long segment_base(u16 selector); |
| 444 | |
Avi Kivity | d835dfe | 2007-11-21 02:57:59 +0200 | [diff] [blame] | 445 | void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu); |
Zhang Xiantao | 54f1585 | 2007-11-19 15:24:28 +0800 | [diff] [blame] | 446 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, |
| 447 | const u8 *new, int bytes); |
| 448 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); |
| 449 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); |
| 450 | int kvm_mmu_load(struct kvm_vcpu *vcpu); |
| 451 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); |
| 452 | |
| 453 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); |
| 454 | |
| 455 | int kvm_fix_hypercall(struct kvm_vcpu *vcpu); |
| 456 | |
Avi Kivity | 3067714 | 2007-10-28 18:48:59 +0200 | [diff] [blame] | 457 | int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code); |
Zhang Xiantao | 34c16ee | 2007-10-20 15:34:38 +0800 | [diff] [blame] | 458 | |
Carsten Otte | a03490e | 2007-10-29 16:09:35 +0100 | [diff] [blame] | 459 | int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); |
Carsten Otte | de7d789 | 2007-10-30 18:44:25 +0100 | [diff] [blame] | 460 | int complete_pio(struct kvm_vcpu *vcpu); |
Zhang Xiantao | ec6d273 | 2007-11-19 15:08:31 +0800 | [diff] [blame] | 461 | |
| 462 | static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) |
| 463 | { |
| 464 | struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); |
| 465 | |
| 466 | return (struct kvm_mmu_page *)page_private(page); |
| 467 | } |
| 468 | |
| 469 | static inline u16 read_fs(void) |
| 470 | { |
| 471 | u16 seg; |
| 472 | asm("mov %%fs, %0" : "=g"(seg)); |
| 473 | return seg; |
| 474 | } |
| 475 | |
| 476 | static inline u16 read_gs(void) |
| 477 | { |
| 478 | u16 seg; |
| 479 | asm("mov %%gs, %0" : "=g"(seg)); |
| 480 | return seg; |
| 481 | } |
| 482 | |
| 483 | static inline u16 read_ldt(void) |
| 484 | { |
| 485 | u16 ldt; |
| 486 | asm("sldt %0" : "=g"(ldt)); |
| 487 | return ldt; |
| 488 | } |
| 489 | |
| 490 | static inline void load_fs(u16 sel) |
| 491 | { |
| 492 | asm("mov %0, %%fs" : : "rm"(sel)); |
| 493 | } |
| 494 | |
| 495 | static inline void load_gs(u16 sel) |
| 496 | { |
| 497 | asm("mov %0, %%gs" : : "rm"(sel)); |
| 498 | } |
| 499 | |
| 500 | #ifndef load_ldt |
| 501 | static inline void load_ldt(u16 sel) |
| 502 | { |
| 503 | asm("lldt %0" : : "rm"(sel)); |
| 504 | } |
| 505 | #endif |
| 506 | |
| 507 | static inline void get_idt(struct descriptor_table *table) |
| 508 | { |
| 509 | asm("sidt %0" : "=m"(*table)); |
| 510 | } |
| 511 | |
| 512 | static inline void get_gdt(struct descriptor_table *table) |
| 513 | { |
| 514 | asm("sgdt %0" : "=m"(*table)); |
| 515 | } |
| 516 | |
| 517 | static inline unsigned long read_tr_base(void) |
| 518 | { |
| 519 | u16 tr; |
| 520 | asm("str %0" : "=g"(tr)); |
| 521 | return segment_base(tr); |
| 522 | } |
| 523 | |
| 524 | #ifdef CONFIG_X86_64 |
| 525 | static inline unsigned long read_msr(unsigned long msr) |
| 526 | { |
| 527 | u64 value; |
| 528 | |
| 529 | rdmsrl(msr, value); |
| 530 | return value; |
| 531 | } |
| 532 | #endif |
| 533 | |
| 534 | static inline void fx_save(struct i387_fxsave_struct *image) |
| 535 | { |
| 536 | asm("fxsave (%0)":: "r" (image)); |
| 537 | } |
| 538 | |
| 539 | static inline void fx_restore(struct i387_fxsave_struct *image) |
| 540 | { |
| 541 | asm("fxrstor (%0)":: "r" (image)); |
| 542 | } |
| 543 | |
| 544 | static inline void fpu_init(void) |
| 545 | { |
| 546 | asm("finit"); |
| 547 | } |
| 548 | |
| 549 | static inline u32 get_rdx_init_val(void) |
| 550 | { |
| 551 | return 0x600; /* P6 family */ |
| 552 | } |
| 553 | |
Avi Kivity | c1a5d4f | 2007-11-25 14:12:03 +0200 | [diff] [blame] | 554 | static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) |
| 555 | { |
| 556 | kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); |
| 557 | } |
| 558 | |
Zhang Xiantao | ec6d273 | 2007-11-19 15:08:31 +0800 | [diff] [blame] | 559 | #define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30" |
| 560 | #define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2" |
| 561 | #define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3" |
| 562 | #define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30" |
| 563 | #define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0" |
| 564 | #define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0" |
| 565 | #define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4" |
| 566 | #define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4" |
| 567 | #define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30" |
| 568 | |
| 569 | #define MSR_IA32_TIME_STAMP_COUNTER 0x010 |
| 570 | |
| 571 | #define TSS_IOPB_BASE_OFFSET 0x66 |
| 572 | #define TSS_BASE_SIZE 0x68 |
| 573 | #define TSS_IOPB_SIZE (65536 / 8) |
| 574 | #define TSS_REDIRECTION_SIZE (256 / 8) |
| 575 | #define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) |
Hollis Blanchard | 53e0aa7 | 2007-12-03 16:15:26 -0600 | [diff] [blame] | 576 | |
Carsten Otte | 043405e | 2007-10-10 17:16:19 +0200 | [diff] [blame] | 577 | #endif |