Avi Kivity | 00b27a3 | 2011-11-23 16:30:32 +0200 | [diff] [blame] | 1 | #ifndef ARCH_X86_KVM_CPUID_H |
| 2 | #define ARCH_X86_KVM_CPUID_H |
| 3 | |
| 4 | #include "x86.h" |
| 5 | |
Nadav Amit | dd59809 | 2014-09-16 15:10:03 +0300 | [diff] [blame] | 6 | int kvm_update_cpuid(struct kvm_vcpu *vcpu); |
Avi Kivity | 00b27a3 | 2011-11-23 16:30:32 +0200 | [diff] [blame] | 7 | struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, |
| 8 | u32 function, u32 index); |
Borislav Petkov | 9c15bb1 | 2013-09-22 16:44:50 +0200 | [diff] [blame] | 9 | int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, |
| 10 | struct kvm_cpuid_entry2 __user *entries, |
| 11 | unsigned int type); |
Avi Kivity | 00b27a3 | 2011-11-23 16:30:32 +0200 | [diff] [blame] | 12 | int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, |
| 13 | struct kvm_cpuid *cpuid, |
| 14 | struct kvm_cpuid_entry __user *entries); |
| 15 | int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, |
| 16 | struct kvm_cpuid2 *cpuid, |
| 17 | struct kvm_cpuid_entry2 __user *entries); |
| 18 | int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, |
| 19 | struct kvm_cpuid2 *cpuid, |
| 20 | struct kvm_cpuid_entry2 __user *entries); |
Avi Kivity | 62046e5 | 2012-06-07 14:07:48 +0300 | [diff] [blame] | 21 | void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); |
Avi Kivity | 00b27a3 | 2011-11-23 16:30:32 +0200 | [diff] [blame] | 22 | |
| 23 | |
| 24 | static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu) |
| 25 | { |
| 26 | struct kvm_cpuid_entry2 *best; |
| 27 | |
Petr Matousek | 6d1068b | 2012-11-06 19:24:07 +0100 | [diff] [blame] | 28 | if (!static_cpu_has(X86_FEATURE_XSAVE)) |
| 29 | return 0; |
| 30 | |
Avi Kivity | 00b27a3 | 2011-11-23 16:30:32 +0200 | [diff] [blame] | 31 | best = kvm_find_cpuid_entry(vcpu, 1, 0); |
| 32 | return best && (best->ecx & bit(X86_FEATURE_XSAVE)); |
| 33 | } |
| 34 | |
Will Auld | ba90463 | 2012-11-29 12:42:50 -0800 | [diff] [blame] | 35 | static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu) |
| 36 | { |
| 37 | struct kvm_cpuid_entry2 *best; |
| 38 | |
| 39 | best = kvm_find_cpuid_entry(vcpu, 7, 0); |
| 40 | return best && (best->ebx & bit(X86_FEATURE_TSC_ADJUST)); |
| 41 | } |
| 42 | |
Avi Kivity | 00b27a3 | 2011-11-23 16:30:32 +0200 | [diff] [blame] | 43 | static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu) |
| 44 | { |
| 45 | struct kvm_cpuid_entry2 *best; |
| 46 | |
| 47 | best = kvm_find_cpuid_entry(vcpu, 7, 0); |
| 48 | return best && (best->ebx & bit(X86_FEATURE_SMEP)); |
| 49 | } |
| 50 | |
Feng Wu | 97ec8c0 | 2014-04-01 17:46:34 +0800 | [diff] [blame] | 51 | static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu) |
| 52 | { |
| 53 | struct kvm_cpuid_entry2 *best; |
| 54 | |
| 55 | best = kvm_find_cpuid_entry(vcpu, 7, 0); |
| 56 | return best && (best->ebx & bit(X86_FEATURE_SMAP)); |
| 57 | } |
| 58 | |
Avi Kivity | 00b27a3 | 2011-11-23 16:30:32 +0200 | [diff] [blame] | 59 | static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu) |
| 60 | { |
| 61 | struct kvm_cpuid_entry2 *best; |
| 62 | |
| 63 | best = kvm_find_cpuid_entry(vcpu, 7, 0); |
| 64 | return best && (best->ebx & bit(X86_FEATURE_FSGSBASE)); |
| 65 | } |
| 66 | |
Boris Ostrovsky | 2b036c6 | 2012-01-09 14:00:35 -0500 | [diff] [blame] | 67 | static inline bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu) |
| 68 | { |
| 69 | struct kvm_cpuid_entry2 *best; |
| 70 | |
| 71 | best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); |
| 72 | return best && (best->ecx & bit(X86_FEATURE_OSVW)); |
| 73 | } |
| 74 | |
Mao, Junjie | ad756a1 | 2012-07-02 01:18:48 +0000 | [diff] [blame] | 75 | static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu) |
| 76 | { |
| 77 | struct kvm_cpuid_entry2 *best; |
| 78 | |
| 79 | best = kvm_find_cpuid_entry(vcpu, 1, 0); |
| 80 | return best && (best->ecx & bit(X86_FEATURE_PCID)); |
| 81 | } |
| 82 | |
Jan Kiszka | 58cb628 | 2014-01-24 16:48:44 +0100 | [diff] [blame] | 83 | static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu) |
| 84 | { |
| 85 | struct kvm_cpuid_entry2 *best; |
| 86 | |
| 87 | best = kvm_find_cpuid_entry(vcpu, 1, 0); |
| 88 | return best && (best->ecx & bit(X86_FEATURE_X2APIC)); |
| 89 | } |
| 90 | |
Paolo Bonzini | a0c0feb | 2014-09-02 13:24:12 +0200 | [diff] [blame] | 91 | static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu) |
| 92 | { |
| 93 | struct kvm_cpuid_entry2 *best; |
| 94 | |
| 95 | best = kvm_find_cpuid_entry(vcpu, 0, 0); |
| 96 | return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx; |
| 97 | } |
| 98 | |
Nadav Amit | 5f7dde7 | 2014-05-07 15:32:50 +0300 | [diff] [blame] | 99 | static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu) |
| 100 | { |
| 101 | struct kvm_cpuid_entry2 *best; |
| 102 | |
| 103 | best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); |
| 104 | return best && (best->edx & bit(X86_FEATURE_GBPAGES)); |
| 105 | } |
Nadav Amit | 6f43ed0 | 2014-07-15 17:37:46 +0300 | [diff] [blame] | 106 | |
| 107 | static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu) |
| 108 | { |
| 109 | struct kvm_cpuid_entry2 *best; |
| 110 | |
| 111 | best = kvm_find_cpuid_entry(vcpu, 7, 0); |
| 112 | return best && (best->ebx & bit(X86_FEATURE_RTM)); |
| 113 | } |
Avi Kivity | 00b27a3 | 2011-11-23 16:30:32 +0200 | [diff] [blame] | 114 | #endif |