blob: 258e5d56298ee953b645bef4f36f1a11594771e1 [file] [log] [blame]
Zhang Xiantao1d737c82007-12-14 09:35:10 +08001#ifndef __KVM_X86_MMU_H
2#define __KVM_X86_MMU_H
3
Avi Kivityedf88412007-12-16 11:02:48 +02004#include <linux/kvm_host.h>
Zhang Xiantao1d737c82007-12-14 09:35:10 +08005
Sheng Yang8c6d6ad2008-04-25 10:17:08 +08006#define PT64_PT_BITS 9
7#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
8#define PT32_PT_BITS 10
9#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
10
11#define PT_WRITABLE_SHIFT 1
12
13#define PT_PRESENT_MASK (1ULL << 0)
14#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
15#define PT_USER_MASK (1ULL << 2)
16#define PT_PWT_MASK (1ULL << 3)
17#define PT_PCD_MASK (1ULL << 4)
Avi Kivity1b7fcd32008-05-15 13:51:35 +030018#define PT_ACCESSED_SHIFT 5
19#define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
Sheng Yang8c6d6ad2008-04-25 10:17:08 +080020#define PT_DIRTY_MASK (1ULL << 6)
21#define PT_PAGE_SIZE_MASK (1ULL << 7)
22#define PT_PAT_MASK (1ULL << 7)
23#define PT_GLOBAL_MASK (1ULL << 8)
24#define PT64_NX_SHIFT 63
25#define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
26
27#define PT_PAT_SHIFT 7
28#define PT_DIR_PAT_SHIFT 12
29#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
30
31#define PT32_DIR_PSE36_SIZE 4
32#define PT32_DIR_PSE36_SHIFT 13
33#define PT32_DIR_PSE36_MASK \
34 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
35
36#define PT64_ROOT_LEVEL 4
37#define PT32_ROOT_LEVEL 2
38#define PT32E_ROOT_LEVEL 3
39
Zhang Xiantao1d737c82007-12-14 09:35:10 +080040static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
41{
Zhang Xiantaof05e70a2007-12-14 10:01:48 +080042 if (unlikely(vcpu->kvm->arch.n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
Zhang Xiantao1d737c82007-12-14 09:35:10 +080043 __kvm_mmu_free_some_pages(vcpu);
44}
45
46static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
47{
48 if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))
49 return 0;
50
51 return kvm_mmu_load(vcpu);
52}
53
54static inline int is_long_mode(struct kvm_vcpu *vcpu)
55{
56#ifdef CONFIG_X86_64
57 return vcpu->arch.shadow_efer & EFER_LME;
58#else
59 return 0;
60#endif
61}
62
63static inline int is_pae(struct kvm_vcpu *vcpu)
64{
65 return vcpu->arch.cr4 & X86_CR4_PAE;
66}
67
68static inline int is_pse(struct kvm_vcpu *vcpu)
69{
70 return vcpu->arch.cr4 & X86_CR4_PSE;
71}
72
73static inline int is_paging(struct kvm_vcpu *vcpu)
74{
75 return vcpu->arch.cr0 & X86_CR0_PG;
76}
77
78#endif