blob: 431fa5f84537d1f8019ac2f23c4c01e0573ab955 [file] [log] [blame]
Kees Cookdd78b972013-10-10 17:18:13 -07001#include <linux/types.h>
2#include "bitops.h"
3
4#include <asm/processor-flags.h>
5#include <asm/required-features.h>
6#include <asm/msr-index.h>
7#include "cpuflags.h"
8
9struct cpu_features cpu;
10u32 cpu_vendor[3];
11
12static bool loaded_flags;
13
14static int has_fpu(void)
15{
16 u16 fcw = -1, fsw = -1;
17 unsigned long cr0;
18
19 asm volatile("mov %%cr0,%0" : "=r" (cr0));
20 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
21 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
22 asm volatile("mov %0,%%cr0" : : "r" (cr0));
23 }
24
25 asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
26 : "+m" (fsw), "+m" (fcw));
27
28 return fsw == 0 && (fcw & 0x103f) == 0x003f;
29}
30
David Woodhouse5fbbc252014-01-30 11:00:28 +000031/*
32 * For building the 16-bit code we want to explicitly specify 32-bit
33 * push/pop operations, rather than just saying 'pushf' or 'popf' and
34 * letting the compiler choose. But this is also included from the
35 * compressed/ directory where it may be 64-bit code, and thus needs
36 * to be 'pushfq' or 'popfq' in that case.
37 */
38#ifdef __x86_64__
39#define PUSHF "pushfq"
40#define POPF "popfq"
41#else
42#define PUSHF "pushfl"
43#define POPF "popfl"
44#endif
45
Kees Cookdd78b972013-10-10 17:18:13 -070046int has_eflag(unsigned long mask)
47{
48 unsigned long f0, f1;
49
David Woodhouse5fbbc252014-01-30 11:00:28 +000050 asm volatile(PUSHF " \n\t"
51 PUSHF " \n\t"
Kees Cookdd78b972013-10-10 17:18:13 -070052 "pop %0 \n\t"
53 "mov %0,%1 \n\t"
54 "xor %2,%1 \n\t"
55 "push %1 \n\t"
David Woodhouse5fbbc252014-01-30 11:00:28 +000056 POPF " \n\t"
57 PUSHF " \n\t"
Kees Cookdd78b972013-10-10 17:18:13 -070058 "pop %1 \n\t"
David Woodhouse5fbbc252014-01-30 11:00:28 +000059 POPF
Kees Cookdd78b972013-10-10 17:18:13 -070060 : "=&r" (f0), "=&r" (f1)
61 : "ri" (mask));
62
63 return !!((f0^f1) & mask);
64}
65
66/* Handle x86_32 PIC using ebx. */
67#if defined(__i386__) && defined(__PIC__)
68# define EBX_REG "=r"
69#else
70# define EBX_REG "=b"
71#endif
72
73static inline void cpuid(u32 id, u32 *a, u32 *b, u32 *c, u32 *d)
74{
75 asm volatile(".ifnc %%ebx,%3 ; movl %%ebx,%3 ; .endif \n\t"
76 "cpuid \n\t"
77 ".ifnc %%ebx,%3 ; xchgl %%ebx,%3 ; .endif \n\t"
78 : "=a" (*a), "=c" (*c), "=d" (*d), EBX_REG (*b)
79 : "a" (id)
80 );
81}
82
H. Peter Anvin6e6a4932013-10-13 04:08:56 -070083void get_cpuflags(void)
Kees Cookdd78b972013-10-10 17:18:13 -070084{
85 u32 max_intel_level, max_amd_level;
86 u32 tfms;
87 u32 ignored;
88
89 if (loaded_flags)
90 return;
91 loaded_flags = true;
92
93 if (has_fpu())
94 set_bit(X86_FEATURE_FPU, cpu.flags);
95
96 if (has_eflag(X86_EFLAGS_ID)) {
97 cpuid(0x0, &max_intel_level, &cpu_vendor[0], &cpu_vendor[2],
98 &cpu_vendor[1]);
99
100 if (max_intel_level >= 0x00000001 &&
101 max_intel_level <= 0x0000ffff) {
102 cpuid(0x1, &tfms, &ignored, &cpu.flags[4],
103 &cpu.flags[0]);
104 cpu.level = (tfms >> 8) & 15;
105 cpu.model = (tfms >> 4) & 15;
106 if (cpu.level >= 6)
107 cpu.model += ((tfms >> 16) & 0xf) << 4;
108 }
109
110 cpuid(0x80000000, &max_amd_level, &ignored, &ignored,
111 &ignored);
112
113 if (max_amd_level >= 0x80000001 &&
114 max_amd_level <= 0x8000ffff) {
115 cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6],
116 &cpu.flags[1]);
117 }
118 }
119}