Avi Kivity | 00b27a3 | 2011-11-23 16:30:32 +0200 | [diff] [blame] | 1 | #ifndef ARCH_X86_KVM_CPUID_H |
| 2 | #define ARCH_X86_KVM_CPUID_H |
| 3 | |
| 4 | #include "x86.h" |
Borislav Petkov | 91713fa | 2015-11-23 11:12:22 +0100 | [diff] [blame^] | 5 | #include <asm/cpu.h> |
Avi Kivity | 00b27a3 | 2011-11-23 16:30:32 +0200 | [diff] [blame] | 6 | |
Nadav Amit | dd59809 | 2014-09-16 15:10:03 +0300 | [diff] [blame] | 7 | int kvm_update_cpuid(struct kvm_vcpu *vcpu); |
Avi Kivity | 00b27a3 | 2011-11-23 16:30:32 +0200 | [diff] [blame] | 8 | struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, |
| 9 | u32 function, u32 index); |
Borislav Petkov | 9c15bb1 | 2013-09-22 16:44:50 +0200 | [diff] [blame] | 10 | int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, |
| 11 | struct kvm_cpuid_entry2 __user *entries, |
| 12 | unsigned int type); |
Avi Kivity | 00b27a3 | 2011-11-23 16:30:32 +0200 | [diff] [blame] | 13 | int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, |
| 14 | struct kvm_cpuid *cpuid, |
| 15 | struct kvm_cpuid_entry __user *entries); |
| 16 | int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, |
| 17 | struct kvm_cpuid2 *cpuid, |
| 18 | struct kvm_cpuid_entry2 __user *entries); |
| 19 | int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, |
| 20 | struct kvm_cpuid2 *cpuid, |
| 21 | struct kvm_cpuid_entry2 __user *entries); |
Avi Kivity | 62046e5 | 2012-06-07 14:07:48 +0300 | [diff] [blame] | 22 | void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); |
Avi Kivity | 00b27a3 | 2011-11-23 16:30:32 +0200 | [diff] [blame] | 23 | |
Eugene Korenevsky | 5a4f55c | 2015-03-29 23:56:12 +0300 | [diff] [blame] | 24 | int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu); |
| 25 | |
| 26 | static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu) |
| 27 | { |
| 28 | return vcpu->arch.maxphyaddr; |
| 29 | } |
Avi Kivity | 00b27a3 | 2011-11-23 16:30:32 +0200 | [diff] [blame] | 30 | |
| 31 | static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu) |
| 32 | { |
| 33 | struct kvm_cpuid_entry2 *best; |
| 34 | |
Petr Matousek | 6d1068b | 2012-11-06 19:24:07 +0100 | [diff] [blame] | 35 | if (!static_cpu_has(X86_FEATURE_XSAVE)) |
Joe Perches | 1d804d0 | 2015-03-30 16:46:09 -0700 | [diff] [blame] | 36 | return false; |
Petr Matousek | 6d1068b | 2012-11-06 19:24:07 +0100 | [diff] [blame] | 37 | |
Avi Kivity | 00b27a3 | 2011-11-23 16:30:32 +0200 | [diff] [blame] | 38 | best = kvm_find_cpuid_entry(vcpu, 1, 0); |
| 39 | return best && (best->ecx & bit(X86_FEATURE_XSAVE)); |
| 40 | } |
| 41 | |
Will Auld | ba90463 | 2012-11-29 12:42:50 -0800 | [diff] [blame] | 42 | static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu) |
| 43 | { |
| 44 | struct kvm_cpuid_entry2 *best; |
| 45 | |
| 46 | best = kvm_find_cpuid_entry(vcpu, 7, 0); |
| 47 | return best && (best->ebx & bit(X86_FEATURE_TSC_ADJUST)); |
| 48 | } |
| 49 | |
Avi Kivity | 00b27a3 | 2011-11-23 16:30:32 +0200 | [diff] [blame] | 50 | static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu) |
| 51 | { |
| 52 | struct kvm_cpuid_entry2 *best; |
| 53 | |
| 54 | best = kvm_find_cpuid_entry(vcpu, 7, 0); |
| 55 | return best && (best->ebx & bit(X86_FEATURE_SMEP)); |
| 56 | } |
| 57 | |
Feng Wu | 97ec8c0 | 2014-04-01 17:46:34 +0800 | [diff] [blame] | 58 | static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu) |
| 59 | { |
| 60 | struct kvm_cpuid_entry2 *best; |
| 61 | |
| 62 | best = kvm_find_cpuid_entry(vcpu, 7, 0); |
| 63 | return best && (best->ebx & bit(X86_FEATURE_SMAP)); |
| 64 | } |
| 65 | |
Avi Kivity | 00b27a3 | 2011-11-23 16:30:32 +0200 | [diff] [blame] | 66 | static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu) |
| 67 | { |
| 68 | struct kvm_cpuid_entry2 *best; |
| 69 | |
| 70 | best = kvm_find_cpuid_entry(vcpu, 7, 0); |
| 71 | return best && (best->ebx & bit(X86_FEATURE_FSGSBASE)); |
| 72 | } |
| 73 | |
Paolo Bonzini | 660a5d5 | 2015-05-05 11:50:23 +0200 | [diff] [blame] | 74 | static inline bool guest_cpuid_has_longmode(struct kvm_vcpu *vcpu) |
| 75 | { |
| 76 | struct kvm_cpuid_entry2 *best; |
| 77 | |
| 78 | best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); |
| 79 | return best && (best->edx & bit(X86_FEATURE_LM)); |
| 80 | } |
| 81 | |
Boris Ostrovsky | 2b036c6 | 2012-01-09 14:00:35 -0500 | [diff] [blame] | 82 | static inline bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu) |
| 83 | { |
| 84 | struct kvm_cpuid_entry2 *best; |
| 85 | |
| 86 | best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); |
| 87 | return best && (best->ecx & bit(X86_FEATURE_OSVW)); |
| 88 | } |
| 89 | |
Mao, Junjie | ad756a1 | 2012-07-02 01:18:48 +0000 | [diff] [blame] | 90 | static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu) |
| 91 | { |
| 92 | struct kvm_cpuid_entry2 *best; |
| 93 | |
| 94 | best = kvm_find_cpuid_entry(vcpu, 1, 0); |
| 95 | return best && (best->ecx & bit(X86_FEATURE_PCID)); |
| 96 | } |
| 97 | |
Jan Kiszka | 58cb628 | 2014-01-24 16:48:44 +0100 | [diff] [blame] | 98 | static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu) |
| 99 | { |
| 100 | struct kvm_cpuid_entry2 *best; |
| 101 | |
| 102 | best = kvm_find_cpuid_entry(vcpu, 1, 0); |
| 103 | return best && (best->ecx & bit(X86_FEATURE_X2APIC)); |
| 104 | } |
| 105 | |
Paolo Bonzini | a0c0feb5 | 2014-09-02 13:24:12 +0200 | [diff] [blame] | 106 | static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu) |
| 107 | { |
| 108 | struct kvm_cpuid_entry2 *best; |
| 109 | |
| 110 | best = kvm_find_cpuid_entry(vcpu, 0, 0); |
| 111 | return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx; |
| 112 | } |
| 113 | |
Nadav Amit | 5f7dde7 | 2014-05-07 15:32:50 +0300 | [diff] [blame] | 114 | static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu) |
| 115 | { |
| 116 | struct kvm_cpuid_entry2 *best; |
| 117 | |
| 118 | best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); |
| 119 | return best && (best->edx & bit(X86_FEATURE_GBPAGES)); |
| 120 | } |
Nadav Amit | 6f43ed0 | 2014-07-15 17:37:46 +0300 | [diff] [blame] | 121 | |
| 122 | static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu) |
| 123 | { |
| 124 | struct kvm_cpuid_entry2 *best; |
| 125 | |
| 126 | best = kvm_find_cpuid_entry(vcpu, 7, 0); |
| 127 | return best && (best->ebx & bit(X86_FEATURE_RTM)); |
| 128 | } |
Liang Li | c447e76 | 2015-05-21 04:41:25 +0800 | [diff] [blame] | 129 | |
| 130 | static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu) |
| 131 | { |
| 132 | struct kvm_cpuid_entry2 *best; |
| 133 | |
| 134 | best = kvm_find_cpuid_entry(vcpu, 7, 0); |
| 135 | return best && (best->ebx & bit(X86_FEATURE_MPX)); |
| 136 | } |
Xiao Guangrong | 8b3e34e | 2015-09-09 14:05:51 +0800 | [diff] [blame] | 137 | |
| 138 | static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu) |
| 139 | { |
| 140 | struct kvm_cpuid_entry2 *best; |
| 141 | |
| 142 | best = kvm_find_cpuid_entry(vcpu, 7, 0); |
| 143 | return best && (best->ebx & bit(X86_FEATURE_PCOMMIT)); |
| 144 | } |
Xiao Guangrong | 1cea0ce | 2015-09-09 14:05:57 +0800 | [diff] [blame] | 145 | |
| 146 | static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu) |
| 147 | { |
| 148 | struct kvm_cpuid_entry2 *best; |
| 149 | |
| 150 | best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); |
| 151 | return best && (best->edx & bit(X86_FEATURE_RDTSCP)); |
| 152 | } |
Joerg Roedel | 6092d3d | 2015-10-14 15:10:54 +0200 | [diff] [blame] | 153 | |
| 154 | /* |
| 155 | * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3 |
| 156 | */ |
| 157 | #define BIT_NRIPS 3 |
| 158 | |
| 159 | static inline bool guest_cpuid_has_nrips(struct kvm_vcpu *vcpu) |
| 160 | { |
| 161 | struct kvm_cpuid_entry2 *best; |
| 162 | |
| 163 | best = kvm_find_cpuid_entry(vcpu, 0x8000000a, 0); |
| 164 | |
| 165 | /* |
| 166 | * NRIPS is a scattered cpuid feature, so we can't use |
| 167 | * X86_FEATURE_NRIPS here (X86_FEATURE_NRIPS would be bit |
| 168 | * position 8, not 3). |
| 169 | */ |
| 170 | return best && (best->edx & bit(BIT_NRIPS)); |
| 171 | } |
| 172 | #undef BIT_NRIPS |
| 173 | |
Borislav Petkov | 91713fa | 2015-11-23 11:12:22 +0100 | [diff] [blame^] | 174 | static inline int guest_cpuid_family(struct kvm_vcpu *vcpu) |
| 175 | { |
| 176 | struct kvm_cpuid_entry2 *best; |
| 177 | |
| 178 | best = kvm_find_cpuid_entry(vcpu, 0x1, 0); |
| 179 | if (!best) |
| 180 | return -1; |
| 181 | |
| 182 | return x86_family(best->eax); |
| 183 | } |
| 184 | |
| 185 | static inline int guest_cpuid_model(struct kvm_vcpu *vcpu) |
| 186 | { |
| 187 | struct kvm_cpuid_entry2 *best; |
| 188 | |
| 189 | best = kvm_find_cpuid_entry(vcpu, 0x1, 0); |
| 190 | if (!best) |
| 191 | return -1; |
| 192 | |
| 193 | return x86_model(best->eax); |
| 194 | } |
| 195 | |
| 196 | static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu) |
| 197 | { |
| 198 | struct kvm_cpuid_entry2 *best; |
| 199 | |
| 200 | best = kvm_find_cpuid_entry(vcpu, 0x1, 0); |
| 201 | if (!best) |
| 202 | return -1; |
| 203 | |
| 204 | return x86_stepping(best->eax); |
| 205 | } |
| 206 | |
Avi Kivity | 00b27a3 | 2011-11-23 16:30:32 +0200 | [diff] [blame] | 207 | #endif |