Zhang Xiantao | 1d737c8 | 2007-12-14 09:35:10 +0800 | [diff] [blame] | 1 | #ifndef __KVM_X86_MMU_H |
| 2 | #define __KVM_X86_MMU_H |
| 3 | |
Avi Kivity | edf8841 | 2007-12-16 11:02:48 +0200 | [diff] [blame] | 4 | #include <linux/kvm_host.h> |
Zhang Xiantao | 1d737c8 | 2007-12-14 09:35:10 +0800 | [diff] [blame] | 5 | |
Sheng Yang | 8c6d6ad | 2008-04-25 10:17:08 +0800 | [diff] [blame] | 6 | #define PT64_PT_BITS 9 |
| 7 | #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS) |
| 8 | #define PT32_PT_BITS 10 |
| 9 | #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS) |
| 10 | |
| 11 | #define PT_WRITABLE_SHIFT 1 |
| 12 | |
| 13 | #define PT_PRESENT_MASK (1ULL << 0) |
| 14 | #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT) |
| 15 | #define PT_USER_MASK (1ULL << 2) |
| 16 | #define PT_PWT_MASK (1ULL << 3) |
| 17 | #define PT_PCD_MASK (1ULL << 4) |
Avi Kivity | 1b7fcd3 | 2008-05-15 13:51:35 +0300 | [diff] [blame] | 18 | #define PT_ACCESSED_SHIFT 5 |
| 19 | #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT) |
Sheng Yang | 8c6d6ad | 2008-04-25 10:17:08 +0800 | [diff] [blame] | 20 | #define PT_DIRTY_MASK (1ULL << 6) |
| 21 | #define PT_PAGE_SIZE_MASK (1ULL << 7) |
| 22 | #define PT_PAT_MASK (1ULL << 7) |
| 23 | #define PT_GLOBAL_MASK (1ULL << 8) |
| 24 | #define PT64_NX_SHIFT 63 |
| 25 | #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT) |
| 26 | |
| 27 | #define PT_PAT_SHIFT 7 |
| 28 | #define PT_DIR_PAT_SHIFT 12 |
| 29 | #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT) |
| 30 | |
| 31 | #define PT32_DIR_PSE36_SIZE 4 |
| 32 | #define PT32_DIR_PSE36_SHIFT 13 |
| 33 | #define PT32_DIR_PSE36_MASK \ |
| 34 | (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT) |
| 35 | |
| 36 | #define PT64_ROOT_LEVEL 4 |
| 37 | #define PT32_ROOT_LEVEL 2 |
| 38 | #define PT32E_ROOT_LEVEL 3 |
| 39 | |
Zhang Xiantao | 1d737c8 | 2007-12-14 09:35:10 +0800 | [diff] [blame] | 40 | static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) |
| 41 | { |
Zhang Xiantao | f05e70a | 2007-12-14 10:01:48 +0800 | [diff] [blame] | 42 | if (unlikely(vcpu->kvm->arch.n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES)) |
Zhang Xiantao | 1d737c8 | 2007-12-14 09:35:10 +0800 | [diff] [blame] | 43 | __kvm_mmu_free_some_pages(vcpu); |
| 44 | } |
| 45 | |
| 46 | static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) |
| 47 | { |
| 48 | if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE)) |
| 49 | return 0; |
| 50 | |
| 51 | return kvm_mmu_load(vcpu); |
| 52 | } |
| 53 | |
| 54 | static inline int is_long_mode(struct kvm_vcpu *vcpu) |
| 55 | { |
| 56 | #ifdef CONFIG_X86_64 |
Amit Shah | 41d6af1 | 2008-02-28 16:06:15 +0530 | [diff] [blame] | 57 | return vcpu->arch.shadow_efer & EFER_LMA; |
Zhang Xiantao | 1d737c8 | 2007-12-14 09:35:10 +0800 | [diff] [blame] | 58 | #else |
| 59 | return 0; |
| 60 | #endif |
| 61 | } |
| 62 | |
| 63 | static inline int is_pae(struct kvm_vcpu *vcpu) |
| 64 | { |
| 65 | return vcpu->arch.cr4 & X86_CR4_PAE; |
| 66 | } |
| 67 | |
| 68 | static inline int is_pse(struct kvm_vcpu *vcpu) |
| 69 | { |
| 70 | return vcpu->arch.cr4 & X86_CR4_PSE; |
| 71 | } |
| 72 | |
| 73 | static inline int is_paging(struct kvm_vcpu *vcpu) |
| 74 | { |
| 75 | return vcpu->arch.cr0 & X86_CR0_PG; |
| 76 | } |
| 77 | |
| 78 | #endif |