blob: 6525e926f56682549d697bb0253f5da600728710 [file] [log] [blame]
Avi Kivity00b27a32011-11-23 16:30:32 +02001/*
2 * Kernel-based Virtual Machine driver for Linux
3 * cpuid support routines
4 *
5 * derived from arch/x86/kvm/x86.c
6 *
7 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
8 * Copyright IBM Corporation, 2008
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
12 *
13 */
14
15#include <linux/kvm_host.h>
16#include <linux/module.h>
Jan Kiszkabb5a7982011-12-14 17:58:18 +010017#include <linux/vmalloc.h>
18#include <linux/uaccess.h>
Linus Torvalds4e241552015-06-24 09:36:49 -070019#include <asm/fpu/internal.h> /* For use_eager_fpu. Ugh! */
Avi Kivity00b27a32011-11-23 16:30:32 +020020#include <asm/user.h>
Ingo Molnar669ebab2015-04-28 08:41:33 +020021#include <asm/fpu/xstate.h>
Avi Kivity00b27a32011-11-23 16:30:32 +020022#include "cpuid.h"
23#include "lapic.h"
24#include "mmu.h"
25#include "trace.h"
Wei Huang474a5bb2015-06-19 13:54:23 +020026#include "pmu.h"
Avi Kivity00b27a32011-11-23 16:30:32 +020027
Paolo Bonzini412a3c42014-12-03 14:38:01 +010028static u32 xstate_required_size(u64 xstate_bv, bool compacted)
Paolo Bonzini4344ee92013-10-02 16:06:16 +020029{
30 int feature_bit = 0;
31 u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
32
Dave Hansend91cab72015-09-02 16:31:26 -070033 xstate_bv &= XFEATURE_MASK_EXTEND;
Paolo Bonzini4344ee92013-10-02 16:06:16 +020034 while (xstate_bv) {
35 if (xstate_bv & 0x1) {
Paolo Bonzini412a3c42014-12-03 14:38:01 +010036 u32 eax, ebx, ecx, edx, offset;
Paolo Bonzini4344ee92013-10-02 16:06:16 +020037 cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
Paolo Bonzini412a3c42014-12-03 14:38:01 +010038 offset = compacted ? ret : ebx;
39 ret = max(ret, offset + eax);
Paolo Bonzini4344ee92013-10-02 16:06:16 +020040 }
41
42 xstate_bv >>= 1;
43 feature_bit++;
44 }
45
46 return ret;
47}
48
Paolo Bonzini4ff41732014-02-24 12:15:16 +010049u64 kvm_supported_xcr0(void)
50{
51 u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0;
52
Paolo Bonzini93c4adc2014-03-05 23:19:52 +010053 if (!kvm_x86_ops->mpx_supported())
Dave Hansend91cab72015-09-02 16:31:26 -070054 xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
Paolo Bonzini4ff41732014-02-24 12:15:16 +010055
56 return xcr0;
57}
58
Paolo Bonzini5c404ca2014-12-03 14:34:47 +010059#define F(x) bit(X86_FEATURE_##x)
60
Nadav Amitdd598092014-09-16 15:10:03 +030061int kvm_update_cpuid(struct kvm_vcpu *vcpu)
Avi Kivity00b27a32011-11-23 16:30:32 +020062{
63 struct kvm_cpuid_entry2 *best;
64 struct kvm_lapic *apic = vcpu->arch.apic;
65
66 best = kvm_find_cpuid_entry(vcpu, 1, 0);
67 if (!best)
Nadav Amitdd598092014-09-16 15:10:03 +030068 return 0;
Avi Kivity00b27a32011-11-23 16:30:32 +020069
70 /* Update OSXSAVE bit */
71 if (cpu_has_xsave && best->function == 0x1) {
Paolo Bonzini5c404ca2014-12-03 14:34:47 +010072 best->ecx &= ~F(OSXSAVE);
Avi Kivity00b27a32011-11-23 16:30:32 +020073 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
Paolo Bonzini5c404ca2014-12-03 14:34:47 +010074 best->ecx |= F(OSXSAVE);
Avi Kivity00b27a32011-11-23 16:30:32 +020075 }
76
77 if (apic) {
Paolo Bonzini5c404ca2014-12-03 14:34:47 +010078 if (best->ecx & F(TSC_DEADLINE_TIMER))
Avi Kivity00b27a32011-11-23 16:30:32 +020079 apic->lapic_timer.timer_mode_mask = 3 << 17;
80 else
81 apic->lapic_timer.timer_mode_mask = 1 << 17;
82 }
Gleb Natapovf5132b02011-11-10 14:57:22 +020083
Paolo Bonzinid7876f12013-10-02 16:06:15 +020084 best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
Paolo Bonzini4344ee92013-10-02 16:06:16 +020085 if (!best) {
Paolo Bonzinid7876f12013-10-02 16:06:15 +020086 vcpu->arch.guest_supported_xcr0 = 0;
Paolo Bonzini4344ee92013-10-02 16:06:16 +020087 vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
88 } else {
Paolo Bonzinid7876f12013-10-02 16:06:15 +020089 vcpu->arch.guest_supported_xcr0 =
90 (best->eax | ((u64)best->edx << 32)) &
Paolo Bonzini4ff41732014-02-24 12:15:16 +010091 kvm_supported_xcr0();
Liu, Jinsong56c103e2014-02-21 17:39:02 +000092 vcpu->arch.guest_xstate_size = best->ebx =
Paolo Bonzini412a3c42014-12-03 14:38:01 +010093 xstate_required_size(vcpu->arch.xcr0, false);
Paolo Bonzini4344ee92013-10-02 16:06:16 +020094 }
Paolo Bonzinid7876f12013-10-02 16:06:15 +020095
Paolo Bonzini412a3c42014-12-03 14:38:01 +010096 best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
97 if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
98 best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
99
Paolo Bonzinia9b4fb72015-05-20 11:46:12 +0200100 vcpu->arch.eager_fpu = use_eager_fpu() || guest_cpuid_has_mpx(vcpu);
Radim Krčmář370777d2015-07-03 15:49:28 +0200101 if (vcpu->arch.eager_fpu)
102 kvm_x86_ops->fpu_activate(vcpu);
Liang Lic447e762015-05-21 04:41:25 +0800103
Nadav Amitdd598092014-09-16 15:10:03 +0300104 /*
105 * The existing code assumes virtual address is 48-bit in the canonical
106 * address checks; exit if it is ever changed.
107 */
108 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
109 if (best && ((best->eax & 0xff00) >> 8) != 48 &&
110 ((best->eax & 0xff00) >> 8) != 0)
111 return -EINVAL;
112
Eugene Korenevsky5a4f55c2015-03-29 23:56:12 +0300113 /* Update physical-address width */
114 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
115
Wei Huangc6702c92015-06-19 13:44:45 +0200116 kvm_pmu_refresh(vcpu);
Nadav Amitdd598092014-09-16 15:10:03 +0300117 return 0;
Avi Kivity00b27a32011-11-23 16:30:32 +0200118}
119
120static int is_efer_nx(void)
121{
122 unsigned long long efer = 0;
123
124 rdmsrl_safe(MSR_EFER, &efer);
125 return efer & EFER_NX;
126}
127
128static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
129{
130 int i;
131 struct kvm_cpuid_entry2 *e, *entry;
132
133 entry = NULL;
134 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
135 e = &vcpu->arch.cpuid_entries[i];
136 if (e->function == 0x80000001) {
137 entry = e;
138 break;
139 }
140 }
Paolo Bonzini5c404ca2014-12-03 14:34:47 +0100141 if (entry && (entry->edx & F(NX)) && !is_efer_nx()) {
142 entry->edx &= ~F(NX);
Avi Kivity00b27a32011-11-23 16:30:32 +0200143 printk(KERN_INFO "kvm: guest NX capability removed\n");
144 }
145}
146
Eugene Korenevsky5a4f55c2015-03-29 23:56:12 +0300147int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
148{
149 struct kvm_cpuid_entry2 *best;
150
151 best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
152 if (!best || best->eax < 0x80000008)
153 goto not_found;
154 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
155 if (best)
156 return best->eax & 0xff;
157not_found:
158 return 36;
159}
160EXPORT_SYMBOL_GPL(cpuid_query_maxphyaddr);
161
Avi Kivity00b27a32011-11-23 16:30:32 +0200162/* when an old userspace process fills a new kernel module */
163int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
164 struct kvm_cpuid *cpuid,
165 struct kvm_cpuid_entry __user *entries)
166{
167 int r, i;
168 struct kvm_cpuid_entry *cpuid_entries;
169
170 r = -E2BIG;
171 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
172 goto out;
173 r = -ENOMEM;
174 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
175 if (!cpuid_entries)
176 goto out;
177 r = -EFAULT;
178 if (copy_from_user(cpuid_entries, entries,
179 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
180 goto out_free;
181 for (i = 0; i < cpuid->nent; i++) {
182 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
183 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
184 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
185 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
186 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
187 vcpu->arch.cpuid_entries[i].index = 0;
188 vcpu->arch.cpuid_entries[i].flags = 0;
189 vcpu->arch.cpuid_entries[i].padding[0] = 0;
190 vcpu->arch.cpuid_entries[i].padding[1] = 0;
191 vcpu->arch.cpuid_entries[i].padding[2] = 0;
192 }
193 vcpu->arch.cpuid_nent = cpuid->nent;
194 cpuid_fix_nx_cap(vcpu);
Avi Kivity00b27a32011-11-23 16:30:32 +0200195 kvm_apic_set_version(vcpu);
196 kvm_x86_ops->cpuid_update(vcpu);
Nadav Amitdd598092014-09-16 15:10:03 +0300197 r = kvm_update_cpuid(vcpu);
Avi Kivity00b27a32011-11-23 16:30:32 +0200198
199out_free:
200 vfree(cpuid_entries);
201out:
202 return r;
203}
204
205int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
206 struct kvm_cpuid2 *cpuid,
207 struct kvm_cpuid_entry2 __user *entries)
208{
209 int r;
210
211 r = -E2BIG;
212 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
213 goto out;
214 r = -EFAULT;
215 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
216 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
217 goto out;
218 vcpu->arch.cpuid_nent = cpuid->nent;
219 kvm_apic_set_version(vcpu);
220 kvm_x86_ops->cpuid_update(vcpu);
Nadav Amitdd598092014-09-16 15:10:03 +0300221 r = kvm_update_cpuid(vcpu);
Avi Kivity00b27a32011-11-23 16:30:32 +0200222out:
223 return r;
224}
225
226int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
227 struct kvm_cpuid2 *cpuid,
228 struct kvm_cpuid_entry2 __user *entries)
229{
230 int r;
231
232 r = -E2BIG;
233 if (cpuid->nent < vcpu->arch.cpuid_nent)
234 goto out;
235 r = -EFAULT;
236 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
237 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
238 goto out;
239 return 0;
240
241out:
242 cpuid->nent = vcpu->arch.cpuid_nent;
243 return r;
244}
245
246static void cpuid_mask(u32 *word, int wordnum)
247{
248 *word &= boot_cpu_data.x86_capability[wordnum];
249}
250
251static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
252 u32 index)
253{
254 entry->function = function;
255 entry->index = index;
256 cpuid_count(entry->function, entry->index,
257 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
258 entry->flags = 0;
259}
260
Borislav Petkov9c15bb12013-09-22 16:44:50 +0200261static int __do_cpuid_ent_emulated(struct kvm_cpuid_entry2 *entry,
262 u32 func, u32 index, int *nent, int maxnent)
263{
Borislav Petkov84cffe42013-10-29 12:54:56 +0100264 switch (func) {
265 case 0:
266 entry->eax = 1; /* only one leaf currently */
267 ++*nent;
268 break;
269 case 1:
270 entry->ecx = F(MOVBE);
271 ++*nent;
272 break;
273 default:
274 break;
275 }
276
277 entry->function = func;
278 entry->index = index;
279
Borislav Petkov9c15bb12013-09-22 16:44:50 +0200280 return 0;
281}
282
283static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
284 u32 index, int *nent, int maxnent)
Avi Kivity00b27a32011-11-23 16:30:32 +0200285{
Sasha Levin831bf662011-11-28 11:20:29 +0200286 int r;
Avi Kivity00b27a32011-11-23 16:30:32 +0200287 unsigned f_nx = is_efer_nx() ? F(NX) : 0;
288#ifdef CONFIG_X86_64
289 unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
290 ? F(GBPAGES) : 0;
291 unsigned f_lm = F(LM);
292#else
293 unsigned f_gbpages = 0;
294 unsigned f_lm = 0;
295#endif
296 unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
Mao, Junjiead756a12012-07-02 01:18:48 +0000297 unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
Paolo Bonzini93c4adc2014-03-05 23:19:52 +0100298 unsigned f_mpx = kvm_x86_ops->mpx_supported() ? F(MPX) : 0;
Wanpeng Li55412b22014-12-02 19:21:30 +0800299 unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
Avi Kivity00b27a32011-11-23 16:30:32 +0200300
301 /* cpuid 1.edx */
302 const u32 kvm_supported_word0_x86_features =
303 F(FPU) | F(VME) | F(DE) | F(PSE) |
304 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
305 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
306 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
H. Peter Anvin840d2832014-02-27 08:31:30 -0800307 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
Avi Kivity00b27a32011-11-23 16:30:32 +0200308 0 /* Reserved, DS, ACPI */ | F(MMX) |
309 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
310 0 /* HTT, TM, Reserved, PBE */;
311 /* cpuid 0x80000001.edx */
312 const u32 kvm_supported_word1_x86_features =
313 F(FPU) | F(VME) | F(DE) | F(PSE) |
314 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
315 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
316 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
317 F(PAT) | F(PSE36) | 0 /* Reserved */ |
318 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
319 F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
320 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
321 /* cpuid 1.ecx */
322 const u32 kvm_supported_word4_x86_features =
Gabriel L. Somlo87c00572014-05-07 16:52:13 -0400323 /* NOTE: MONITOR (and MWAIT) are emulated as NOP,
324 * but *not* advertised to guests via CPUID ! */
Avi Kivity00b27a32011-11-23 16:30:32 +0200325 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
326 0 /* DS-CPL, VMX, SMX, EST */ |
327 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
Liu, Jinsongfb215362011-11-28 03:55:19 -0800328 F(FMA) | F(CX16) | 0 /* xTPR Update, PDCM */ |
Mao, Junjiead756a12012-07-02 01:18:48 +0000329 F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
Avi Kivity00b27a32011-11-23 16:30:32 +0200330 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
331 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
332 F(F16C) | F(RDRAND);
333 /* cpuid 0x80000001.ecx */
334 const u32 kvm_supported_word6_x86_features =
335 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
336 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
Boris Ostrovsky2b036c62012-01-09 14:00:35 -0500337 F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
Avi Kivity00b27a32011-11-23 16:30:32 +0200338 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
339
340 /* cpuid 0xC0000001.edx */
341 const u32 kvm_supported_word5_x86_features =
342 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
343 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
344 F(PMM) | F(PMM_EN);
345
346 /* cpuid 7.0.ebx */
347 const u32 kvm_supported_word9_x86_features =
Liu, Jinsong83c52912012-02-28 05:15:46 +0000348 F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
Liu, Jinsong390bd522014-02-24 10:58:09 +0000349 F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
Chao Peng612263b2014-10-22 17:35:24 +0800350 F(ADX) | F(SMAP) | F(AVX512F) | F(AVX512PF) | F(AVX512ER) |
Xiao Guangrong8b3e34e2015-09-09 14:05:51 +0800351 F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(PCOMMIT);
Avi Kivity00b27a32011-11-23 16:30:32 +0200352
Paolo Bonzinib65d6e12014-11-21 18:13:26 +0100353 /* cpuid 0xD.1.eax */
354 const u32 kvm_supported_word10_x86_features =
Wanpeng Li55412b22014-12-02 19:21:30 +0800355 F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | f_xsaves;
Paolo Bonzinib65d6e12014-11-21 18:13:26 +0100356
Avi Kivity00b27a32011-11-23 16:30:32 +0200357 /* all calls to cpuid_count() should be made on the same cpu */
358 get_cpu();
Sasha Levin831bf662011-11-28 11:20:29 +0200359
360 r = -E2BIG;
361
362 if (*nent >= maxnent)
363 goto out;
364
Avi Kivity00b27a32011-11-23 16:30:32 +0200365 do_cpuid_1_ent(entry, function, index);
366 ++*nent;
367
368 switch (function) {
369 case 0:
370 entry->eax = min(entry->eax, (u32)0xd);
371 break;
372 case 1:
373 entry->edx &= kvm_supported_word0_x86_features;
374 cpuid_mask(&entry->edx, 0);
375 entry->ecx &= kvm_supported_word4_x86_features;
376 cpuid_mask(&entry->ecx, 4);
377 /* we support x2apic emulation even if host does not support
378 * it since we emulate x2apic in software */
379 entry->ecx |= F(X2APIC);
380 break;
381 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
382 * may return different values. This forces us to get_cpu() before
383 * issuing the first command, and also to emulate this annoying behavior
384 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
385 case 2: {
386 int t, times = entry->eax & 0xff;
387
388 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
389 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
Sasha Levin831bf662011-11-28 11:20:29 +0200390 for (t = 1; t < times; ++t) {
391 if (*nent >= maxnent)
392 goto out;
393
Avi Kivity00b27a32011-11-23 16:30:32 +0200394 do_cpuid_1_ent(&entry[t], function, 0);
395 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
396 ++*nent;
397 }
398 break;
399 }
400 /* function 4 has additional index. */
401 case 4: {
402 int i, cache_type;
403
404 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
405 /* read more entries until cache_type is zero */
Sasha Levin831bf662011-11-28 11:20:29 +0200406 for (i = 1; ; ++i) {
407 if (*nent >= maxnent)
408 goto out;
409
Avi Kivity00b27a32011-11-23 16:30:32 +0200410 cache_type = entry[i - 1].eax & 0x1f;
411 if (!cache_type)
412 break;
413 do_cpuid_1_ent(&entry[i], function, i);
414 entry[i].flags |=
415 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
416 ++*nent;
417 }
418 break;
419 }
Jan Kiszkae453aa02015-05-24 17:22:38 +0200420 case 6: /* Thermal management */
421 entry->eax = 0x4; /* allow ARAT */
422 entry->ebx = 0;
423 entry->ecx = 0;
424 entry->edx = 0;
425 break;
Avi Kivity00b27a32011-11-23 16:30:32 +0200426 case 7: {
427 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
Guo Chaobbbda792012-06-28 15:20:58 +0800428 /* Mask ebx against host capability word 9 */
Avi Kivity00b27a32011-11-23 16:30:32 +0200429 if (index == 0) {
430 entry->ebx &= kvm_supported_word9_x86_features;
431 cpuid_mask(&entry->ebx, 9);
Will Auldba904632012-11-29 12:42:50 -0800432 // TSC_ADJUST is emulated
433 entry->ebx |= F(TSC_ADJUST);
Avi Kivity00b27a32011-11-23 16:30:32 +0200434 } else
435 entry->ebx = 0;
436 entry->eax = 0;
437 entry->ecx = 0;
438 entry->edx = 0;
439 break;
440 }
441 case 9:
442 break;
Gleb Natapova6c06ed2011-11-10 14:57:28 +0200443 case 0xa: { /* Architectural Performance Monitoring */
444 struct x86_pmu_capability cap;
445 union cpuid10_eax eax;
446 union cpuid10_edx edx;
447
448 perf_get_x86_pmu_capability(&cap);
449
450 /*
451 * Only support guest architectural pmu on a host
452 * with architectural pmu.
453 */
454 if (!cap.version)
455 memset(&cap, 0, sizeof(cap));
456
457 eax.split.version_id = min(cap.version, 2);
458 eax.split.num_counters = cap.num_counters_gp;
459 eax.split.bit_width = cap.bit_width_gp;
460 eax.split.mask_length = cap.events_mask_len;
461
462 edx.split.num_counters_fixed = cap.num_counters_fixed;
463 edx.split.bit_width_fixed = cap.bit_width_fixed;
464 edx.split.reserved = 0;
465
466 entry->eax = eax.full;
467 entry->ebx = cap.events_mask;
468 entry->ecx = 0;
469 entry->edx = edx.full;
470 break;
471 }
Avi Kivity00b27a32011-11-23 16:30:32 +0200472 /* function 0xb has additional index. */
473 case 0xb: {
474 int i, level_type;
475
476 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
477 /* read more entries until level_type is zero */
Sasha Levin831bf662011-11-28 11:20:29 +0200478 for (i = 1; ; ++i) {
479 if (*nent >= maxnent)
480 goto out;
481
Avi Kivity00b27a32011-11-23 16:30:32 +0200482 level_type = entry[i - 1].ecx & 0xff00;
483 if (!level_type)
484 break;
485 do_cpuid_1_ent(&entry[i], function, i);
486 entry[i].flags |=
487 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
488 ++*nent;
489 }
490 break;
491 }
492 case 0xd: {
493 int idx, i;
Paolo Bonzini4ff41732014-02-24 12:15:16 +0100494 u64 supported = kvm_supported_xcr0();
Avi Kivity00b27a32011-11-23 16:30:32 +0200495
Paolo Bonzini4ff41732014-02-24 12:15:16 +0100496 entry->eax &= supported;
Radim Krčmáře08e8332014-12-04 18:30:41 +0100497 entry->ebx = xstate_required_size(supported, false);
498 entry->ecx = entry->ebx;
Paolo Bonzini4ff41732014-02-24 12:15:16 +0100499 entry->edx &= supported >> 32;
Avi Kivity00b27a32011-11-23 16:30:32 +0200500 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
Paolo Bonzinib65d6e12014-11-21 18:13:26 +0100501 if (!supported)
502 break;
503
Sasha Levin831bf662011-11-28 11:20:29 +0200504 for (idx = 1, i = 1; idx < 64; ++idx) {
Paolo Bonzini4ff41732014-02-24 12:15:16 +0100505 u64 mask = ((u64)1 << idx);
Sasha Levin831bf662011-11-28 11:20:29 +0200506 if (*nent >= maxnent)
507 goto out;
508
Avi Kivity00b27a32011-11-23 16:30:32 +0200509 do_cpuid_1_ent(&entry[i], function, idx);
Paolo Bonzini412a3c42014-12-03 14:38:01 +0100510 if (idx == 1) {
Paolo Bonzinib65d6e12014-11-21 18:13:26 +0100511 entry[i].eax &= kvm_supported_word10_x86_features;
Paolo Bonzini412a3c42014-12-03 14:38:01 +0100512 entry[i].ebx = 0;
513 if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
514 entry[i].ebx =
515 xstate_required_size(supported,
516 true);
Paolo Bonzini404e0a12014-12-04 15:11:11 +0100517 } else {
518 if (entry[i].eax == 0 || !(supported & mask))
519 continue;
520 if (WARN_ON_ONCE(entry[i].ecx & 1))
521 continue;
522 }
523 entry[i].ecx = 0;
524 entry[i].edx = 0;
Avi Kivity00b27a32011-11-23 16:30:32 +0200525 entry[i].flags |=
526 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
527 ++*nent;
528 ++i;
529 }
530 break;
531 }
532 case KVM_CPUID_SIGNATURE: {
Mathias Krause326d07c2012-08-30 01:30:13 +0200533 static const char signature[12] = "KVMKVMKVM\0\0";
534 const u32 *sigptr = (const u32 *)signature;
Michael S. Tsirkin57c22e52012-05-02 17:55:56 +0300535 entry->eax = KVM_CPUID_FEATURES;
Avi Kivity00b27a32011-11-23 16:30:32 +0200536 entry->ebx = sigptr[0];
537 entry->ecx = sigptr[1];
538 entry->edx = sigptr[2];
539 break;
540 }
541 case KVM_CPUID_FEATURES:
542 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
543 (1 << KVM_FEATURE_NOP_IO_DELAY) |
544 (1 << KVM_FEATURE_CLOCKSOURCE2) |
545 (1 << KVM_FEATURE_ASYNC_PF) |
Michael S. Tsirkinae7a2a32012-06-24 19:25:07 +0300546 (1 << KVM_FEATURE_PV_EOI) |
Srivatsa Vaddagiri6aef2662013-08-26 14:18:34 +0530547 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
548 (1 << KVM_FEATURE_PV_UNHALT);
Avi Kivity00b27a32011-11-23 16:30:32 +0200549
550 if (sched_info_on())
551 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
552
553 entry->ebx = 0;
554 entry->ecx = 0;
555 entry->edx = 0;
556 break;
557 case 0x80000000:
558 entry->eax = min(entry->eax, 0x8000001a);
559 break;
560 case 0x80000001:
561 entry->edx &= kvm_supported_word1_x86_features;
562 cpuid_mask(&entry->edx, 1);
563 entry->ecx &= kvm_supported_word6_x86_features;
564 cpuid_mask(&entry->ecx, 6);
565 break;
Marcelo Tosattie4c9a5a12014-04-26 22:30:23 -0300566 case 0x80000007: /* Advanced power management */
567 /* invariant TSC is CPUID.80000007H:EDX[8] */
568 entry->edx &= (1 << 8);
569 /* mask against host */
570 entry->edx &= boot_cpu_data.x86_power;
571 entry->eax = entry->ebx = entry->ecx = 0;
572 break;
Avi Kivity00b27a32011-11-23 16:30:32 +0200573 case 0x80000008: {
574 unsigned g_phys_as = (entry->eax >> 16) & 0xff;
575 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
576 unsigned phys_as = entry->eax & 0xff;
577
578 if (!g_phys_as)
579 g_phys_as = phys_as;
580 entry->eax = g_phys_as | (virt_as << 8);
581 entry->ebx = entry->edx = 0;
582 break;
583 }
584 case 0x80000019:
585 entry->ecx = entry->edx = 0;
586 break;
587 case 0x8000001a:
588 break;
589 case 0x8000001d:
590 break;
591 /*Add support for Centaur's CPUID instruction*/
592 case 0xC0000000:
593 /*Just support up to 0xC0000004 now*/
594 entry->eax = min(entry->eax, 0xC0000004);
595 break;
596 case 0xC0000001:
597 entry->edx &= kvm_supported_word5_x86_features;
598 cpuid_mask(&entry->edx, 5);
599 break;
600 case 3: /* Processor serial number */
601 case 5: /* MONITOR/MWAIT */
Avi Kivity00b27a32011-11-23 16:30:32 +0200602 case 0xC0000002:
603 case 0xC0000003:
604 case 0xC0000004:
605 default:
606 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
607 break;
608 }
609
610 kvm_x86_ops->set_supported_cpuid(function, entry);
611
Sasha Levin831bf662011-11-28 11:20:29 +0200612 r = 0;
613
614out:
Avi Kivity00b27a32011-11-23 16:30:32 +0200615 put_cpu();
Sasha Levin831bf662011-11-28 11:20:29 +0200616
617 return r;
Avi Kivity00b27a32011-11-23 16:30:32 +0200618}
619
Borislav Petkov9c15bb12013-09-22 16:44:50 +0200620static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 func,
621 u32 idx, int *nent, int maxnent, unsigned int type)
622{
623 if (type == KVM_GET_EMULATED_CPUID)
624 return __do_cpuid_ent_emulated(entry, func, idx, nent, maxnent);
625
626 return __do_cpuid_ent(entry, func, idx, nent, maxnent);
627}
628
Avi Kivity00b27a32011-11-23 16:30:32 +0200629#undef F
630
Sasha Levin831bf662011-11-28 11:20:29 +0200631struct kvm_cpuid_param {
632 u32 func;
633 u32 idx;
634 bool has_leaf_count;
Mathias Krause326d07c2012-08-30 01:30:13 +0200635 bool (*qualifier)(const struct kvm_cpuid_param *param);
Sasha Levin831bf662011-11-28 11:20:29 +0200636};
637
Mathias Krause326d07c2012-08-30 01:30:13 +0200638static bool is_centaur_cpu(const struct kvm_cpuid_param *param)
Sasha Levin831bf662011-11-28 11:20:29 +0200639{
640 return boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR;
641}
642
Borislav Petkov9c15bb12013-09-22 16:44:50 +0200643static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries,
644 __u32 num_entries, unsigned int ioctl_type)
645{
646 int i;
Borislav Petkov1b2ca422013-11-06 15:46:02 +0100647 __u32 pad[3];
Borislav Petkov9c15bb12013-09-22 16:44:50 +0200648
649 if (ioctl_type != KVM_GET_EMULATED_CPUID)
650 return false;
651
652 /*
653 * We want to make sure that ->padding is being passed clean from
654 * userspace in case we want to use it for something in the future.
655 *
656 * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
657 * have to give ourselves satisfied only with the emulated side. /me
658 * sheds a tear.
659 */
660 for (i = 0; i < num_entries; i++) {
Borislav Petkov1b2ca422013-11-06 15:46:02 +0100661 if (copy_from_user(pad, entries[i].padding, sizeof(pad)))
662 return true;
663
664 if (pad[0] || pad[1] || pad[2])
Borislav Petkov9c15bb12013-09-22 16:44:50 +0200665 return true;
666 }
667 return false;
668}
669
670int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
671 struct kvm_cpuid_entry2 __user *entries,
672 unsigned int type)
Avi Kivity00b27a32011-11-23 16:30:32 +0200673{
674 struct kvm_cpuid_entry2 *cpuid_entries;
Sasha Levin831bf662011-11-28 11:20:29 +0200675 int limit, nent = 0, r = -E2BIG, i;
Avi Kivity00b27a32011-11-23 16:30:32 +0200676 u32 func;
Mathias Krause326d07c2012-08-30 01:30:13 +0200677 static const struct kvm_cpuid_param param[] = {
Sasha Levin831bf662011-11-28 11:20:29 +0200678 { .func = 0, .has_leaf_count = true },
679 { .func = 0x80000000, .has_leaf_count = true },
680 { .func = 0xC0000000, .qualifier = is_centaur_cpu, .has_leaf_count = true },
681 { .func = KVM_CPUID_SIGNATURE },
682 { .func = KVM_CPUID_FEATURES },
683 };
Avi Kivity00b27a32011-11-23 16:30:32 +0200684
685 if (cpuid->nent < 1)
686 goto out;
687 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
688 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
Borislav Petkov9c15bb12013-09-22 16:44:50 +0200689
690 if (sanity_check_entries(entries, cpuid->nent, type))
691 return -EINVAL;
692
Avi Kivity00b27a32011-11-23 16:30:32 +0200693 r = -ENOMEM;
Borislav Petkov84cffe42013-10-29 12:54:56 +0100694 cpuid_entries = vzalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
Avi Kivity00b27a32011-11-23 16:30:32 +0200695 if (!cpuid_entries)
696 goto out;
697
Sasha Levin831bf662011-11-28 11:20:29 +0200698 r = 0;
699 for (i = 0; i < ARRAY_SIZE(param); i++) {
Mathias Krause326d07c2012-08-30 01:30:13 +0200700 const struct kvm_cpuid_param *ent = &param[i];
Avi Kivity00b27a32011-11-23 16:30:32 +0200701
Sasha Levin831bf662011-11-28 11:20:29 +0200702 if (ent->qualifier && !ent->qualifier(ent))
703 continue;
Avi Kivity00b27a32011-11-23 16:30:32 +0200704
Sasha Levin831bf662011-11-28 11:20:29 +0200705 r = do_cpuid_ent(&cpuid_entries[nent], ent->func, ent->idx,
Borislav Petkov9c15bb12013-09-22 16:44:50 +0200706 &nent, cpuid->nent, type);
Avi Kivity00b27a32011-11-23 16:30:32 +0200707
Sasha Levin831bf662011-11-28 11:20:29 +0200708 if (r)
Avi Kivity00b27a32011-11-23 16:30:32 +0200709 goto out_free;
710
Sasha Levin831bf662011-11-28 11:20:29 +0200711 if (!ent->has_leaf_count)
712 continue;
713
Avi Kivity00b27a32011-11-23 16:30:32 +0200714 limit = cpuid_entries[nent - 1].eax;
Sasha Levin831bf662011-11-28 11:20:29 +0200715 for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func)
716 r = do_cpuid_ent(&cpuid_entries[nent], func, ent->idx,
Borislav Petkov9c15bb12013-09-22 16:44:50 +0200717 &nent, cpuid->nent, type);
Avi Kivity00b27a32011-11-23 16:30:32 +0200718
Sasha Levin831bf662011-11-28 11:20:29 +0200719 if (r)
Avi Kivity00b27a32011-11-23 16:30:32 +0200720 goto out_free;
721 }
722
Avi Kivity00b27a32011-11-23 16:30:32 +0200723 r = -EFAULT;
724 if (copy_to_user(entries, cpuid_entries,
725 nent * sizeof(struct kvm_cpuid_entry2)))
726 goto out_free;
727 cpuid->nent = nent;
728 r = 0;
729
730out_free:
731 vfree(cpuid_entries);
732out:
733 return r;
734}
735
736static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
737{
738 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
739 int j, nent = vcpu->arch.cpuid_nent;
740
741 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
742 /* when no next entry is found, the current entry[i] is reselected */
743 for (j = i + 1; ; j = (j + 1) % nent) {
744 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
745 if (ej->function == e->function) {
746 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
747 return j;
748 }
749 }
750 return 0; /* silence gcc, even though control never reaches here */
751}
752
753/* find an entry with matching function, matching index (if needed), and that
754 * should be read next (if it's stateful) */
755static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
756 u32 function, u32 index)
757{
758 if (e->function != function)
759 return 0;
760 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
761 return 0;
762 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
763 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
764 return 0;
765 return 1;
766}
767
768struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
769 u32 function, u32 index)
770{
771 int i;
772 struct kvm_cpuid_entry2 *best = NULL;
773
774 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
775 struct kvm_cpuid_entry2 *e;
776
777 e = &vcpu->arch.cpuid_entries[i];
778 if (is_matching_cpuid_entry(e, function, index)) {
779 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
780 move_to_next_stateful_cpuid_entry(vcpu, i);
781 best = e;
782 break;
783 }
784 }
785 return best;
786}
787EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
788
Avi Kivity00b27a32011-11-23 16:30:32 +0200789/*
790 * If no match is found, check whether we exceed the vCPU's limit
791 * and return the content of the highest valid _standard_ leaf instead.
792 * This is to satisfy the CPUID specification.
793 */
794static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
795 u32 function, u32 index)
796{
797 struct kvm_cpuid_entry2 *maxlevel;
798
799 maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
800 if (!maxlevel || maxlevel->eax >= function)
801 return NULL;
802 if (function & 0x80000000) {
803 maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
804 if (!maxlevel)
805 return NULL;
806 }
807 return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
808}
809
Avi Kivity62046e52012-06-07 14:07:48 +0300810void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
Avi Kivity00b27a32011-11-23 16:30:32 +0200811{
Avi Kivity62046e52012-06-07 14:07:48 +0300812 u32 function = *eax, index = *ecx;
Avi Kivity00b27a32011-11-23 16:30:32 +0200813 struct kvm_cpuid_entry2 *best;
814
Avi Kivity00b27a32011-11-23 16:30:32 +0200815 best = kvm_find_cpuid_entry(vcpu, function, index);
816
817 if (!best)
818 best = check_cpuid_limit(vcpu, function, index);
819
Marcelo Tosattibc613492014-09-18 18:24:57 -0300820 /*
821 * Perfmon not yet supported for L2 guest.
822 */
823 if (is_guest_mode(vcpu) && function == 0xa)
824 best = NULL;
825
Avi Kivity00b27a32011-11-23 16:30:32 +0200826 if (best) {
Avi Kivity62046e52012-06-07 14:07:48 +0300827 *eax = best->eax;
828 *ebx = best->ebx;
829 *ecx = best->ecx;
830 *edx = best->edx;
831 } else
832 *eax = *ebx = *ecx = *edx = 0;
Gleb Natapova9d4e432013-11-04 15:52:43 +0200833 trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx);
Avi Kivity62046e52012-06-07 14:07:48 +0300834}
Julian Stecklina66f7b722012-12-05 15:26:19 +0100835EXPORT_SYMBOL_GPL(kvm_cpuid);
Avi Kivity62046e52012-06-07 14:07:48 +0300836
837void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
838{
839 u32 function, eax, ebx, ecx, edx;
840
841 function = eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
842 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
843 kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx);
844 kvm_register_write(vcpu, VCPU_REGS_RAX, eax);
845 kvm_register_write(vcpu, VCPU_REGS_RBX, ebx);
846 kvm_register_write(vcpu, VCPU_REGS_RCX, ecx);
847 kvm_register_write(vcpu, VCPU_REGS_RDX, edx);
Avi Kivity00b27a32011-11-23 16:30:32 +0200848 kvm_x86_ops->skip_emulated_instruction(vcpu);
Avi Kivity00b27a32011-11-23 16:30:32 +0200849}
850EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);