blob: 762cdf2595f992fd4ac8bb1e4c2c8914b344db04 [file] [log] [blame]
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001#ifndef ASM_KVM_CACHE_REGS_H
2#define ASM_KVM_CACHE_REGS_H
3
Avi Kivity8ae09912010-01-21 15:31:51 +02004#define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
5#define KVM_POSSIBLE_CR4_GUEST_BITS \
6 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
7 | X86_CR4_OSXMMEXCPT | X86_CR4_PGE)
8
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03009static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
10 enum kvm_reg reg)
11{
12 if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail))
13 kvm_x86_ops->cache_reg(vcpu, reg);
14
15 return vcpu->arch.regs[reg];
16}
17
18static inline void kvm_register_write(struct kvm_vcpu *vcpu,
19 enum kvm_reg reg,
20 unsigned long val)
21{
22 vcpu->arch.regs[reg] = val;
23 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
24 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
25}
26
27static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
28{
29 return kvm_register_read(vcpu, VCPU_REGS_RIP);
30}
31
32static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
33{
34 kvm_register_write(vcpu, VCPU_REGS_RIP, val);
35}
36
Avi Kivity6de4f3a2009-05-31 22:58:47 +030037static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
38{
Avi Kivity08acfa12010-05-04 13:00:55 +030039 might_sleep(); /* on svm */
40
Avi Kivity6de4f3a2009-05-31 22:58:47 +030041 if (!test_bit(VCPU_EXREG_PDPTR,
42 (unsigned long *)&vcpu->arch.regs_avail))
43 kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR);
44
Joerg Roedelff03a072010-09-10 17:30:57 +020045 return vcpu->arch.walk_mmu->pdptrs[index];
Avi Kivity6de4f3a2009-05-31 22:58:47 +030046}
47
Avi Kivity4d4ec082009-12-29 18:07:30 +020048static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
49{
Avi Kivity8ae09912010-01-21 15:31:51 +020050 ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
51 if (tmask & vcpu->arch.cr0_guest_owned_bits)
Avi Kivitye8467fd2009-12-29 18:43:06 +020052 kvm_x86_ops->decache_cr0_guest_bits(vcpu);
Avi Kivity4d4ec082009-12-29 18:07:30 +020053 return vcpu->arch.cr0 & mask;
54}
55
56static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
57{
58 return kvm_read_cr0_bits(vcpu, ~0UL);
59}
60
Avi Kivityfc78f512009-12-07 12:16:48 +020061static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
62{
Avi Kivity8ae09912010-01-21 15:31:51 +020063 ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
64 if (tmask & vcpu->arch.cr4_guest_owned_bits)
Avi Kivityfc78f512009-12-07 12:16:48 +020065 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
66 return vcpu->arch.cr4 & mask;
67}
68
Avi Kivity9f8fe502010-12-05 17:30:00 +020069static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
70{
Avi Kivityaff48ba2010-12-05 18:56:11 +020071 if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
72 kvm_x86_ops->decache_cr3(vcpu);
Avi Kivity9f8fe502010-12-05 17:30:00 +020073 return vcpu->arch.cr3;
74}
75
Avi Kivityfc78f512009-12-07 12:16:48 +020076static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
77{
78 return kvm_read_cr4_bits(vcpu, ~0UL);
79}
80
Dexuan Cui2acf9232010-06-10 11:27:12 +080081static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
82{
83 return (kvm_register_read(vcpu, VCPU_REGS_RAX) & -1u)
84 | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
85}
86
Huaitong Hanbe94f6b2016-03-22 16:51:20 +080087static inline u32 kvm_read_pkru(struct kvm_vcpu *vcpu)
88{
89 return kvm_x86_ops->get_pkru(vcpu);
90}
91
Joerg Roedelec9e60b2010-11-29 17:51:47 +010092static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
93{
94 vcpu->arch.hflags |= HF_GUEST_MASK;
95}
96
97static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
98{
99 vcpu->arch.hflags &= ~HF_GUEST_MASK;
100}
101
102static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
103{
104 return vcpu->arch.hflags & HF_GUEST_MASK;
105}
106
Paolo Bonzinif0778252015-04-01 15:06:40 +0200107static inline bool is_smm(struct kvm_vcpu *vcpu)
108{
109 return vcpu->arch.hflags & HF_SMM_MASK;
110}
111
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300112#endif