blob: 7d35ce672989ddcbc719db96081d37607c79734f [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Avi Kivity26eef702008-07-03 14:59:22 +03002#ifndef ARCH_X86_KVM_X86_H
3#define ARCH_X86_KVM_X86_H
4
5#include <linux/kvm_host.h>
Marcelo Tosatti8d93c872016-06-20 22:28:02 -03006#include <asm/pvclock.h>
Avi Kivity3eeb3282010-01-21 15:31:48 +02007#include "kvm_cache_regs.h"
Avi Kivity26eef702008-07-03 14:59:22 +03008
Babu Mogerc8e88712018-03-16 16:37:24 -04009#define KVM_DEFAULT_PLE_GAP 128
10#define KVM_VMX_DEFAULT_PLE_WINDOW 4096
11#define KVM_DEFAULT_PLE_WINDOW_GROW 2
12#define KVM_DEFAULT_PLE_WINDOW_SHRINK 0
13#define KVM_VMX_DEFAULT_PLE_WINDOW_MAX UINT_MAX
Babu Moger8566ac82018-03-16 16:37:26 -040014#define KVM_SVM_DEFAULT_PLE_WINDOW_MAX USHRT_MAX
15#define KVM_SVM_DEFAULT_PLE_WINDOW 3000
Babu Mogerc8e88712018-03-16 16:37:24 -040016
17static inline unsigned int __grow_ple_window(unsigned int val,
18 unsigned int base, unsigned int modifier, unsigned int max)
19{
20 u64 ret = val;
21
22 if (modifier < 1)
23 return base;
24
25 if (modifier < base)
26 ret *= modifier;
27 else
28 ret += modifier;
29
30 return min(ret, (u64)max);
31}
32
33static inline unsigned int __shrink_ple_window(unsigned int val,
34 unsigned int base, unsigned int modifier, unsigned int min)
35{
36 if (modifier < 1)
37 return base;
38
39 if (modifier < base)
40 val /= modifier;
41 else
42 val -= modifier;
43
44 return max(val, min);
45}
46
Radim Krčmář74545702015-04-27 15:11:25 +020047#define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
48
Avi Kivity26eef702008-07-03 14:59:22 +030049static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
50{
Liran Alon5c7d4f92017-11-19 18:25:43 +020051 vcpu->arch.exception.pending = false;
Wanpeng Li664f8e22017-08-24 03:35:09 -070052 vcpu->arch.exception.injected = false;
Avi Kivity26eef702008-07-03 14:59:22 +030053}
54
Gleb Natapov66fd3f72009-05-11 13:35:50 +030055static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
56 bool soft)
Avi Kivity937a7ea2008-07-03 15:17:01 +030057{
Liran Alon04140b42018-03-23 03:01:31 +030058 vcpu->arch.interrupt.injected = true;
Gleb Natapov66fd3f72009-05-11 13:35:50 +030059 vcpu->arch.interrupt.soft = soft;
Avi Kivity937a7ea2008-07-03 15:17:01 +030060 vcpu->arch.interrupt.nr = vector;
61}
62
63static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
64{
Liran Alon04140b42018-03-23 03:01:31 +030065 vcpu->arch.interrupt.injected = false;
Avi Kivity937a7ea2008-07-03 15:17:01 +030066}
67
Gleb Natapov3298b752009-05-11 13:35:46 +030068static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
69{
Liran Alon04140b42018-03-23 03:01:31 +030070 return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected ||
Gleb Natapov3298b752009-05-11 13:35:46 +030071 vcpu->arch.nmi_injected;
72}
Gleb Natapov66fd3f72009-05-11 13:35:50 +030073
74static inline bool kvm_exception_is_soft(unsigned int nr)
75{
76 return (nr == BP_VECTOR) || (nr == OF_VECTOR);
77}
Gleb Natapovfc61b802009-07-05 17:39:35 +030078
Avi Kivity3eeb3282010-01-21 15:31:48 +020079static inline bool is_protmode(struct kvm_vcpu *vcpu)
80{
81 return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
82}
83
Avi Kivity836a1b32010-01-21 15:31:49 +020084static inline int is_long_mode(struct kvm_vcpu *vcpu)
85{
86#ifdef CONFIG_X86_64
Avi Kivityf6801df2010-01-21 15:31:50 +020087 return vcpu->arch.efer & EFER_LMA;
Avi Kivity836a1b32010-01-21 15:31:49 +020088#else
89 return 0;
90#endif
91}
92
Nadav Amit57773922014-06-18 17:19:23 +030093static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
94{
95 int cs_db, cs_l;
96
97 if (!is_long_mode(vcpu))
98 return false;
99 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
100 return cs_l;
101}
102
Yu Zhang855feb62017-08-24 20:27:55 +0800103static inline bool is_la57_mode(struct kvm_vcpu *vcpu)
104{
105#ifdef CONFIG_X86_64
106 return (vcpu->arch.efer & EFER_LMA) &&
107 kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
108#else
109 return 0;
110#endif
111}
112
Joerg Roedel6539e732010-09-10 17:30:50 +0200113static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
114{
115 return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
116}
117
Avi Kivity836a1b32010-01-21 15:31:49 +0200118static inline int is_pae(struct kvm_vcpu *vcpu)
119{
120 return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
121}
122
123static inline int is_pse(struct kvm_vcpu *vcpu)
124{
125 return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
126}
127
128static inline int is_paging(struct kvm_vcpu *vcpu)
129{
Davidlohr Buesoc36fc042012-03-08 12:45:54 +0100130 return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
Avi Kivity836a1b32010-01-21 15:31:49 +0200131}
132
Joerg Roedel24d1b152010-12-07 17:15:05 +0100133static inline u32 bit(int bitno)
134{
135 return 1 << (bitno & 31);
136}
137
Yu Zhangfd8cb432017-08-24 20:27:56 +0800138static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
139{
140 return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48;
141}
142
143static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
144{
145 return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48;
146}
147
148static inline u64 get_canonical(u64 la, u8 vaddr_bits)
149{
150 return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits);
151}
152
153static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
154{
155#ifdef CONFIG_X86_64
156 return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la;
157#else
158 return false;
159#endif
160}
161
162static inline bool emul_is_noncanonical_address(u64 la,
163 struct x86_emulate_ctxt *ctxt)
164{
165#ifdef CONFIG_X86_64
166 return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la;
167#else
168 return false;
169#endif
170}
171
Xiao Guangrongbebb1062011-07-12 03:23:20 +0800172static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
173 gva_t gva, gfn_t gfn, unsigned access)
174{
Paolo Bonzini9034e6e2017-08-17 18:36:58 +0200175 /*
176 * If this is a shadow nested page table, the "GVA" is
177 * actually a nGPA.
178 */
179 vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
Xiao Guangrongbebb1062011-07-12 03:23:20 +0800180 vcpu->arch.access = access;
181 vcpu->arch.mmio_gfn = gfn;
David Matlack56f17dd2014-08-18 15:46:07 -0700182 vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation;
183}
184
185static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
186{
187 return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
Xiao Guangrongbebb1062011-07-12 03:23:20 +0800188}
189
190/*
David Matlack56f17dd2014-08-18 15:46:07 -0700191 * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
192 * clear all mmio cache info.
Xiao Guangrongbebb1062011-07-12 03:23:20 +0800193 */
David Matlack56f17dd2014-08-18 15:46:07 -0700194#define MMIO_GVA_ANY (~(gva_t)0)
195
Xiao Guangrongbebb1062011-07-12 03:23:20 +0800196static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
197{
David Matlack56f17dd2014-08-18 15:46:07 -0700198 if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
Xiao Guangrongbebb1062011-07-12 03:23:20 +0800199 return;
200
201 vcpu->arch.mmio_gva = 0;
202}
203
204static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
205{
David Matlack56f17dd2014-08-18 15:46:07 -0700206 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
207 vcpu->arch.mmio_gva == (gva & PAGE_MASK))
Xiao Guangrongbebb1062011-07-12 03:23:20 +0800208 return true;
209
210 return false;
211}
212
213static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
214{
David Matlack56f17dd2014-08-18 15:46:07 -0700215 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
216 vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
Xiao Guangrongbebb1062011-07-12 03:23:20 +0800217 return true;
218
219 return false;
220}
221
Nadav Amit57773922014-06-18 17:19:23 +0300222static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu,
223 enum kvm_reg reg)
224{
225 unsigned long val = kvm_register_read(vcpu, reg);
226
227 return is_64_bit_mode(vcpu) ? val : (u32)val;
228}
229
Nadav Amit27e6fb52014-06-18 17:19:26 +0300230static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
231 enum kvm_reg reg,
232 unsigned long val)
233{
234 if (!is_64_bit_mode(vcpu))
235 val = (u32)val;
236 return kvm_register_write(vcpu, reg, val);
237}
238
Paolo Bonzini41dbc6b2015-07-23 08:22:45 +0200239static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
240{
241 return !(kvm->arch.disabled_quirks & quirk);
242}
243
Nicholas Krausebab5bb32015-01-01 22:05:18 -0500244void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
Serge E. Hallyn71f98332011-04-13 09:12:54 -0500245int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
Zhang, Yanminff9d07a2010-04-19 13:32:45 +0800246
Will Auld8fe8ab42012-11-29 12:42:12 -0800247void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
Paolo Bonzini108b2492016-09-01 14:21:03 +0200248u64 get_kvmclock_ns(struct kvm *kvm);
Zachary Amsden99e3e302010-08-19 22:07:17 -1000249
Nadav Har'El064aea72011-05-25 23:04:56 +0300250int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
251 gva_t addr, void *val, unsigned int bytes,
252 struct x86_exception *exception);
253
Nadav Har'El6a4d7552011-05-25 23:08:00 +0300254int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
255 gva_t addr, void *val, unsigned int bytes,
256 struct x86_exception *exception);
257
Wanpeng Li082d06e2018-04-03 16:28:48 -0700258int handle_ud(struct kvm_vcpu *vcpu);
259
Xiao Guangrong19efffa2015-06-15 16:55:31 +0800260void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
Xiao Guangrongff536042015-06-15 16:55:22 +0800261u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
Nadav Amit45666542014-09-18 22:39:44 +0300262bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
Xiao Guangrongff536042015-06-15 16:55:22 +0800263int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
264int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
Xiao Guangrong6a39bbc2015-06-15 16:55:35 +0800265bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
266 int page_num);
Feng Wu520040142016-01-25 16:53:33 +0800267bool kvm_vector_hashing_enabled(void);
Nadav Amit45666542014-09-18 22:39:44 +0300268
Dave Hansend91cab72015-09-02 16:31:26 -0700269#define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
270 | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
Huaitong Han17a511f2016-03-22 16:51:16 +0800271 | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \
272 | XFEATURE_MASK_PKRU)
Avi Kivity00b27a32011-11-23 16:30:32 +0200273extern u64 host_xcr0;
274
Paolo Bonzini4ff41732014-02-24 12:15:16 +0100275extern u64 kvm_supported_xcr0(void);
276
Marcelo Tosatti9ed96e82014-01-06 12:00:02 -0200277extern unsigned int min_timer_period_us;
278
Marcelo Tosattid0659d92014-12-16 09:08:15 -0500279extern unsigned int lapic_timer_advance_ns;
280
Liran Alonc4ae60e2018-03-12 13:12:47 +0200281extern bool enable_vmware_backdoor;
282
Gleb Natapov54e98182012-08-05 15:58:32 +0300283extern struct static_key kvm_no_apic_vcpu;
Paolo Bonzinib51012d2016-01-22 11:39:22 +0100284
Marcelo Tosatti8d93c872016-06-20 22:28:02 -0300285static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
286{
287 return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
288 vcpu->arch.virtual_tsc_shift);
289}
290
Paolo Bonzinib51012d2016-01-22 11:39:22 +0100291/* Same "calling convention" as do_div:
292 * - divide (n << 32) by base
293 * - put result in n
294 * - return remainder
295 */
296#define do_shl32_div32(n, base) \
297 ({ \
298 u32 __quot, __rem; \
299 asm("divl %2" : "=a" (__quot), "=d" (__rem) \
300 : "rm" (base), "0" (0), "1" ((u32) n)); \
301 n = __quot; \
302 __rem; \
303 })
304
Wanpeng Li4d5422c2018-03-12 04:53:02 -0700305#define KVM_X86_DISABLE_EXITS_MWAIT (1 << 0)
Wanpeng Licaa057a2018-03-12 04:53:03 -0700306#define KVM_X86_DISABLE_EXITS_HTL (1 << 1)
Wanpeng Lib31c1142018-03-12 04:53:04 -0700307#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2)
Wanpeng Licaa057a2018-03-12 04:53:03 -0700308#define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \
Wanpeng Lib31c1142018-03-12 04:53:04 -0700309 KVM_X86_DISABLE_EXITS_HTL | \
310 KVM_X86_DISABLE_EXITS_PAUSE)
Wanpeng Li4d5422c2018-03-12 04:53:02 -0700311
312static inline bool kvm_mwait_in_guest(struct kvm *kvm)
Michael S. Tsirkin668fffa32017-04-21 12:27:17 +0200313{
Wanpeng Li4d5422c2018-03-12 04:53:02 -0700314 return kvm->arch.mwait_in_guest;
Michael S. Tsirkin668fffa32017-04-21 12:27:17 +0200315}
316
Wanpeng Licaa057a2018-03-12 04:53:03 -0700317static inline bool kvm_hlt_in_guest(struct kvm *kvm)
318{
319 return kvm->arch.hlt_in_guest;
320}
321
Wanpeng Lib31c1142018-03-12 04:53:04 -0700322static inline bool kvm_pause_in_guest(struct kvm *kvm)
323{
324 return kvm->arch.pause_in_guest;
325}
326
Andi Kleendd60d212017-07-25 17:20:32 -0700327DECLARE_PER_CPU(struct kvm_vcpu *, current_vcpu);
328
329static inline void kvm_before_interrupt(struct kvm_vcpu *vcpu)
330{
331 __this_cpu_write(current_vcpu, vcpu);
332}
333
334static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
335{
336 __this_cpu_write(current_vcpu, NULL);
337}
338
Avi Kivity26eef702008-07-03 14:59:22 +0300339#endif