H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 1 | #ifndef _ASM_X86_MICROCODE_H |
| 2 | #define _ASM_X86_MICROCODE_H |
Dmitry Adamushko | d45de40 | 2008-08-20 00:22:26 +0200 | [diff] [blame] | 3 | |
Borislav Petkov | e1b43e3 | 2013-12-04 12:31:31 +0100 | [diff] [blame] | 4 | #define native_rdmsr(msr, val1, val2) \ |
| 5 | do { \ |
| 6 | u64 __val = native_read_msr((msr)); \ |
| 7 | (void)((val1) = (u32)__val); \ |
| 8 | (void)((val2) = (u32)(__val >> 32)); \ |
| 9 | } while (0) |
| 10 | |
| 11 | #define native_wrmsr(msr, low, high) \ |
| 12 | native_write_msr(msr, low, high) |
| 13 | |
| 14 | #define native_wrmsrl(msr, val) \ |
| 15 | native_write_msr((msr), \ |
| 16 | (u32)((u64)(val)), \ |
| 17 | (u32)((u64)(val) >> 32)) |
| 18 | |
Dmitry Adamushko | 18dbc91 | 2008-09-23 12:08:44 +0200 | [diff] [blame] | 19 | struct cpu_signature { |
| 20 | unsigned int sig; |
| 21 | unsigned int pf; |
| 22 | unsigned int rev; |
| 23 | }; |
Peter Oruba | 8d86f39 | 2008-07-28 18:44:21 +0200 | [diff] [blame] | 24 | |
Dmitry Adamushko | a0a29b6 | 2008-09-11 23:27:52 +0200 | [diff] [blame] | 25 | struct device; |
Dmitry Adamushko | d45de40 | 2008-08-20 00:22:26 +0200 | [diff] [blame] | 26 | |
Dmitry Adamushko | 871b72d | 2009-05-11 23:48:27 +0200 | [diff] [blame] | 27 | enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND }; |
Borislav Petkov | 65cef13 | 2014-05-19 20:59:17 +0200 | [diff] [blame] | 28 | extern bool dis_ucode_ldr; |
Dmitry Adamushko | 871b72d | 2009-05-11 23:48:27 +0200 | [diff] [blame] | 29 | |
Peter Oruba | 26bf7a4 | 2008-07-28 18:44:20 +0200 | [diff] [blame] | 30 | struct microcode_ops { |
Dmitry Adamushko | 871b72d | 2009-05-11 23:48:27 +0200 | [diff] [blame] | 31 | enum ucode_state (*request_microcode_user) (int cpu, |
| 32 | const void __user *buf, size_t size); |
Dmitry Adamushko | a0a29b6 | 2008-09-11 23:27:52 +0200 | [diff] [blame] | 33 | |
Borislav Petkov | 48e3068 | 2012-07-26 15:51:00 +0200 | [diff] [blame] | 34 | enum ucode_state (*request_microcode_fw) (int cpu, struct device *, |
| 35 | bool refresh_fw); |
Dmitry Adamushko | a0a29b6 | 2008-09-11 23:27:52 +0200 | [diff] [blame] | 36 | |
Dmitry Adamushko | a0a29b6 | 2008-09-11 23:27:52 +0200 | [diff] [blame] | 37 | void (*microcode_fini_cpu) (int cpu); |
Dmitry Adamushko | 871b72d | 2009-05-11 23:48:27 +0200 | [diff] [blame] | 38 | |
| 39 | /* |
| 40 | * The generic 'microcode_core' part guarantees that |
| 41 | * the callbacks below run on a target cpu when they |
| 42 | * are being called. |
| 43 | * See also the "Synchronization" section in microcode_core.c. |
| 44 | */ |
| 45 | int (*apply_microcode) (int cpu); |
| 46 | int (*collect_cpu_info) (int cpu, struct cpu_signature *csig); |
Peter Oruba | 26bf7a4 | 2008-07-28 18:44:20 +0200 | [diff] [blame] | 47 | }; |
| 48 | |
Dmitry Adamushko | d45de40 | 2008-08-20 00:22:26 +0200 | [diff] [blame] | 49 | struct ucode_cpu_info { |
Dmitry Adamushko | 871b72d | 2009-05-11 23:48:27 +0200 | [diff] [blame] | 50 | struct cpu_signature cpu_sig; |
| 51 | int valid; |
| 52 | void *mc; |
Peter Oruba | c3b71bc | 2008-07-28 18:44:15 +0200 | [diff] [blame] | 53 | }; |
Dmitry Adamushko | d45de40 | 2008-08-20 00:22:26 +0200 | [diff] [blame] | 54 | extern struct ucode_cpu_info ucode_cpu_info[]; |
| 55 | |
Dmitry Adamushko | 18dbc91 | 2008-09-23 12:08:44 +0200 | [diff] [blame] | 56 | #ifdef CONFIG_MICROCODE_INTEL |
| 57 | extern struct microcode_ops * __init init_intel_microcode(void); |
| 58 | #else |
| 59 | static inline struct microcode_ops * __init init_intel_microcode(void) |
| 60 | { |
| 61 | return NULL; |
| 62 | } |
| 63 | #endif /* CONFIG_MICROCODE_INTEL */ |
| 64 | |
| 65 | #ifdef CONFIG_MICROCODE_AMD |
| 66 | extern struct microcode_ops * __init init_amd_microcode(void); |
Borislav Petkov | f72c1a5 | 2011-12-02 16:50:04 +0100 | [diff] [blame] | 67 | extern void __exit exit_amd_microcode(void); |
Dmitry Adamushko | 18dbc91 | 2008-09-23 12:08:44 +0200 | [diff] [blame] | 68 | #else |
| 69 | static inline struct microcode_ops * __init init_amd_microcode(void) |
| 70 | { |
| 71 | return NULL; |
| 72 | } |
Borislav Petkov | f72c1a5 | 2011-12-02 16:50:04 +0100 | [diff] [blame] | 73 | static inline void __exit exit_amd_microcode(void) {} |
Dmitry Adamushko | 18dbc91 | 2008-09-23 12:08:44 +0200 | [diff] [blame] | 74 | #endif |
| 75 | |
Fenghua Yu | a8ebf6d | 2012-12-20 23:44:25 -0800 | [diff] [blame] | 76 | #ifdef CONFIG_MICROCODE_EARLY |
| 77 | #define MAX_UCODE_COUNT 128 |
Borislav Petkov | 58ce8d6 | 2015-02-09 21:42:34 +0100 | [diff] [blame^] | 78 | |
| 79 | #define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24)) |
| 80 | #define CPUID_INTEL1 QCHAR('G', 'e', 'n', 'u') |
| 81 | #define CPUID_INTEL2 QCHAR('i', 'n', 'e', 'I') |
| 82 | #define CPUID_INTEL3 QCHAR('n', 't', 'e', 'l') |
| 83 | #define CPUID_AMD1 QCHAR('A', 'u', 't', 'h') |
| 84 | #define CPUID_AMD2 QCHAR('e', 'n', 't', 'i') |
| 85 | #define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D') |
| 86 | |
| 87 | #define CPUID_IS(a, b, c, ebx, ecx, edx) \ |
| 88 | (!((ebx ^ (a))|(edx ^ (b))|(ecx ^ (c)))) |
| 89 | |
| 90 | /* |
| 91 | * In early loading microcode phase on BSP, boot_cpu_data is not set up yet. |
| 92 | * x86_vendor() gets vendor id for BSP. |
| 93 | * |
| 94 | * In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify |
| 95 | * coding, we still use x86_vendor() to get vendor id for AP. |
| 96 | * |
| 97 | * x86_vendor() gets vendor information directly from CPUID. |
| 98 | */ |
| 99 | static inline int x86_vendor(void) |
| 100 | { |
| 101 | u32 eax = 0x00000000; |
| 102 | u32 ebx, ecx = 0, edx; |
| 103 | |
| 104 | native_cpuid(&eax, &ebx, &ecx, &edx); |
| 105 | |
| 106 | if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx)) |
| 107 | return X86_VENDOR_INTEL; |
| 108 | |
| 109 | if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx)) |
| 110 | return X86_VENDOR_AMD; |
| 111 | |
| 112 | return X86_VENDOR_UNKNOWN; |
| 113 | } |
| 114 | |
| 115 | static inline unsigned int __x86_family(unsigned int sig) |
| 116 | { |
| 117 | unsigned int x86; |
| 118 | |
| 119 | x86 = (sig >> 8) & 0xf; |
| 120 | |
| 121 | if (x86 == 0xf) |
| 122 | x86 += (sig >> 20) & 0xff; |
| 123 | |
| 124 | return x86; |
| 125 | } |
| 126 | |
| 127 | static inline unsigned int x86_family(void) |
| 128 | { |
| 129 | u32 eax = 0x00000001; |
| 130 | u32 ebx, ecx = 0, edx; |
| 131 | |
| 132 | native_cpuid(&eax, &ebx, &ecx, &edx); |
| 133 | |
| 134 | return __x86_family(eax); |
| 135 | } |
| 136 | |
| 137 | static inline unsigned int x86_model(unsigned int sig) |
| 138 | { |
| 139 | unsigned int x86, model; |
| 140 | |
| 141 | x86 = __x86_family(sig); |
| 142 | |
| 143 | model = (sig >> 4) & 0xf; |
| 144 | |
| 145 | if (x86 == 0x6 || x86 == 0xf) |
| 146 | model += ((sig >> 16) & 0xf) << 4; |
| 147 | |
| 148 | return model; |
| 149 | } |
| 150 | |
Fenghua Yu | a8ebf6d | 2012-12-20 23:44:25 -0800 | [diff] [blame] | 151 | extern void __init load_ucode_bsp(void); |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 152 | extern void load_ucode_ap(void); |
Fenghua Yu | a8ebf6d | 2012-12-20 23:44:25 -0800 | [diff] [blame] | 153 | extern int __init save_microcode_in_initrd(void); |
Borislav Petkov | fbae4ba | 2014-12-03 17:21:41 +0100 | [diff] [blame] | 154 | void reload_early_microcode(void); |
Fenghua Yu | a8ebf6d | 2012-12-20 23:44:25 -0800 | [diff] [blame] | 155 | #else |
| 156 | static inline void __init load_ucode_bsp(void) {} |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 157 | static inline void load_ucode_ap(void) {} |
Fenghua Yu | a8ebf6d | 2012-12-20 23:44:25 -0800 | [diff] [blame] | 158 | static inline int __init save_microcode_in_initrd(void) |
| 159 | { |
| 160 | return 0; |
| 161 | } |
Borislav Petkov | fbae4ba | 2014-12-03 17:21:41 +0100 | [diff] [blame] | 162 | static inline void reload_early_microcode(void) {} |
Fenghua Yu | a8ebf6d | 2012-12-20 23:44:25 -0800 | [diff] [blame] | 163 | #endif |
| 164 | |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 165 | #endif /* _ASM_X86_MICROCODE_H */ |