blob: 7ce3634ab5fe6189a95163d8cd9781a923d21727 [file] [log] [blame]
Avi Kivity26eef702008-07-03 14:59:22 +03001#ifndef ARCH_X86_KVM_X86_H
2#define ARCH_X86_KVM_X86_H
3
4#include <linux/kvm_host.h>
Avi Kivity3eeb3282010-01-21 15:31:48 +02005#include "kvm_cache_regs.h"
Avi Kivity26eef702008-07-03 14:59:22 +03006
Radim Krčmář74545702015-04-27 15:11:25 +02007#define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
8
Avi Kivity26eef702008-07-03 14:59:22 +03009static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
10{
11 vcpu->arch.exception.pending = false;
12}
13
Gleb Natapov66fd3f72009-05-11 13:35:50 +030014static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
15 bool soft)
Avi Kivity937a7ea2008-07-03 15:17:01 +030016{
17 vcpu->arch.interrupt.pending = true;
Gleb Natapov66fd3f72009-05-11 13:35:50 +030018 vcpu->arch.interrupt.soft = soft;
Avi Kivity937a7ea2008-07-03 15:17:01 +030019 vcpu->arch.interrupt.nr = vector;
20}
21
22static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
23{
24 vcpu->arch.interrupt.pending = false;
25}
26
Gleb Natapov3298b752009-05-11 13:35:46 +030027static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
28{
29 return vcpu->arch.exception.pending || vcpu->arch.interrupt.pending ||
30 vcpu->arch.nmi_injected;
31}
Gleb Natapov66fd3f72009-05-11 13:35:50 +030032
33static inline bool kvm_exception_is_soft(unsigned int nr)
34{
35 return (nr == BP_VECTOR) || (nr == OF_VECTOR);
36}
Gleb Natapovfc61b802009-07-05 17:39:35 +030037
Avi Kivity3eeb3282010-01-21 15:31:48 +020038static inline bool is_protmode(struct kvm_vcpu *vcpu)
39{
40 return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
41}
42
Avi Kivity836a1b32010-01-21 15:31:49 +020043static inline int is_long_mode(struct kvm_vcpu *vcpu)
44{
45#ifdef CONFIG_X86_64
Avi Kivityf6801df2010-01-21 15:31:50 +020046 return vcpu->arch.efer & EFER_LMA;
Avi Kivity836a1b32010-01-21 15:31:49 +020047#else
48 return 0;
49#endif
50}
51
Nadav Amit57773922014-06-18 17:19:23 +030052static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
53{
54 int cs_db, cs_l;
55
56 if (!is_long_mode(vcpu))
57 return false;
58 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
59 return cs_l;
60}
61
Joerg Roedel6539e732010-09-10 17:30:50 +020062static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
63{
64 return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
65}
66
Avi Kivity836a1b32010-01-21 15:31:49 +020067static inline int is_pae(struct kvm_vcpu *vcpu)
68{
69 return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
70}
71
72static inline int is_pse(struct kvm_vcpu *vcpu)
73{
74 return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
75}
76
77static inline int is_paging(struct kvm_vcpu *vcpu)
78{
Davidlohr Buesoc36fc042012-03-08 12:45:54 +010079 return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
Avi Kivity836a1b32010-01-21 15:31:49 +020080}
81
Joerg Roedel24d1b152010-12-07 17:15:05 +010082static inline u32 bit(int bitno)
83{
84 return 1 << (bitno & 31);
85}
86
Xiao Guangrongbebb1062011-07-12 03:23:20 +080087static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
88 gva_t gva, gfn_t gfn, unsigned access)
89{
90 vcpu->arch.mmio_gva = gva & PAGE_MASK;
91 vcpu->arch.access = access;
92 vcpu->arch.mmio_gfn = gfn;
David Matlack56f17dd2014-08-18 15:46:07 -070093 vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation;
94}
95
96static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
97{
98 return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
Xiao Guangrongbebb1062011-07-12 03:23:20 +080099}
100
101/*
David Matlack56f17dd2014-08-18 15:46:07 -0700102 * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
103 * clear all mmio cache info.
Xiao Guangrongbebb1062011-07-12 03:23:20 +0800104 */
David Matlack56f17dd2014-08-18 15:46:07 -0700105#define MMIO_GVA_ANY (~(gva_t)0)
106
Xiao Guangrongbebb1062011-07-12 03:23:20 +0800107static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
108{
David Matlack56f17dd2014-08-18 15:46:07 -0700109 if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
Xiao Guangrongbebb1062011-07-12 03:23:20 +0800110 return;
111
112 vcpu->arch.mmio_gva = 0;
113}
114
115static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
116{
David Matlack56f17dd2014-08-18 15:46:07 -0700117 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
118 vcpu->arch.mmio_gva == (gva & PAGE_MASK))
Xiao Guangrongbebb1062011-07-12 03:23:20 +0800119 return true;
120
121 return false;
122}
123
124static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
125{
David Matlack56f17dd2014-08-18 15:46:07 -0700126 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
127 vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
Xiao Guangrongbebb1062011-07-12 03:23:20 +0800128 return true;
129
130 return false;
131}
132
Nadav Amit57773922014-06-18 17:19:23 +0300133static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu,
134 enum kvm_reg reg)
135{
136 unsigned long val = kvm_register_read(vcpu, reg);
137
138 return is_64_bit_mode(vcpu) ? val : (u32)val;
139}
140
Nadav Amit27e6fb52014-06-18 17:19:26 +0300141static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
142 enum kvm_reg reg,
143 unsigned long val)
144{
145 if (!is_64_bit_mode(vcpu))
146 val = (u32)val;
147 return kvm_register_write(vcpu, reg, val);
148}
149
Andrey Smetanine83d5882015-07-03 15:01:34 +0300150static inline u64 get_kernel_ns(void)
151{
152 return ktime_get_boot_ns();
153}
154
Paolo Bonzini41dbc6b2015-07-23 08:22:45 +0200155static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
156{
157 return !(kvm->arch.disabled_quirks & quirk);
158}
159
Zhang, Yanminff9d07a2010-04-19 13:32:45 +0800160void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
161void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
Nicholas Krausebab5bb32015-01-01 22:05:18 -0500162void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
Serge E. Hallyn71f98332011-04-13 09:12:54 -0500163int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
Zhang, Yanminff9d07a2010-04-19 13:32:45 +0800164
Will Auld8fe8ab42012-11-29 12:42:12 -0800165void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
Zachary Amsden99e3e302010-08-19 22:07:17 -1000166
Nadav Har'El064aea72011-05-25 23:04:56 +0300167int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
168 gva_t addr, void *val, unsigned int bytes,
169 struct x86_exception *exception);
170
Nadav Har'El6a4d7552011-05-25 23:08:00 +0300171int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
172 gva_t addr, void *val, unsigned int bytes,
173 struct x86_exception *exception);
174
Xiao Guangrong19efffa2015-06-15 16:55:31 +0800175void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
Xiao Guangrongff536042015-06-15 16:55:22 +0800176u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
Nadav Amit45666542014-09-18 22:39:44 +0300177bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
Xiao Guangrongff536042015-06-15 16:55:22 +0800178int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
179int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
Xiao Guangrong6a39bbc2015-06-15 16:55:35 +0800180bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
181 int page_num);
Feng Wu520040142016-01-25 16:53:33 +0800182bool kvm_vector_hashing_enabled(void);
Nadav Amit45666542014-09-18 22:39:44 +0300183
Dave Hansend91cab72015-09-02 16:31:26 -0700184#define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
185 | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
Huaitong Han17a511f2016-03-22 16:51:16 +0800186 | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \
187 | XFEATURE_MASK_PKRU)
Avi Kivity00b27a32011-11-23 16:30:32 +0200188extern u64 host_xcr0;
189
Paolo Bonzini4ff41732014-02-24 12:15:16 +0100190extern u64 kvm_supported_xcr0(void);
191
Marcelo Tosatti9ed96e82014-01-06 12:00:02 -0200192extern unsigned int min_timer_period_us;
193
Marcelo Tosattid0659d92014-12-16 09:08:15 -0500194extern unsigned int lapic_timer_advance_ns;
195
Gleb Natapov54e98182012-08-05 15:58:32 +0300196extern struct static_key kvm_no_apic_vcpu;
Paolo Bonzinib51012d2016-01-22 11:39:22 +0100197
198/* Same "calling convention" as do_div:
199 * - divide (n << 32) by base
200 * - put result in n
201 * - return remainder
202 */
203#define do_shl32_div32(n, base) \
204 ({ \
205 u32 __quot, __rem; \
206 asm("divl %2" : "=a" (__quot), "=d" (__rem) \
207 : "rm" (base), "0" (0), "1" ((u32) n)); \
208 n = __quot; \
209 __rem; \
210 })
211
Avi Kivity26eef702008-07-03 14:59:22 +0300212#endif