blob: ac67a76550bdea3c82dc8d5ef0b5119c4b26e9d6 [file] [log] [blame]
Jeremy Fitzhardinge1353ebb2007-05-02 19:27:12 +02001/*
Jeremy Fitzhardinge1353ebb2007-05-02 19:27:12 +02002 * Copyright (C) 1994 Linus Torvalds
3 *
4 * Cyrix stuff, June 1998 by:
5 * - Rafael R. Reilova (moved everything from head.S),
6 * <rreilova@ececs.uc.edu>
7 * - Channing Corn (tests & fixes),
8 * - Andrew D. Balsa (code cleanup).
9 */
10#include <linux/init.h>
11#include <linux/utsname.h>
Thomas Gleixner45a98822018-01-07 22:48:01 +010012#include <linux/cpu.h>
Andi Kleena1745ad2018-01-25 15:50:28 -080013#include <linux/module.h>
Thomas Gleixnera078e3e2018-04-29 15:26:40 +020014#include <linux/nospec.h>
15#include <linux/prctl.h>
David Woodhouse8f96937ee2018-01-11 21:46:26 +000016
Thomas Gleixner7a2d2352018-04-29 15:01:37 +020017#include <asm/spec-ctrl.h>
David Woodhouse8f96937ee2018-01-11 21:46:26 +000018#include <asm/cmdline.h>
Josh Triplett91eb1b72007-07-31 00:39:20 -070019#include <asm/bugs.h>
Jeremy Fitzhardinge1353ebb2007-05-02 19:27:12 +020020#include <asm/processor.h>
Dave Jones7ebad702008-01-30 13:30:39 +010021#include <asm/processor-flags.h>
Ingo Molnar952f07e2015-04-26 16:56:05 +020022#include <asm/fpu/internal.h>
Jeremy Fitzhardinge1353ebb2007-05-02 19:27:12 +020023#include <asm/msr.h>
Thomas Gleixner80e55b52018-07-13 16:23:16 +020024#include <asm/vmx.h>
Jeremy Fitzhardinge1353ebb2007-05-02 19:27:12 +020025#include <asm/paravirt.h>
26#include <asm/alternative.h>
Borislav Petkov56eff362016-10-24 19:38:43 +020027#include <asm/pgtable.h>
28#include <asm/cacheflush.h>
David Woodhouseabf67b12018-01-12 17:49:25 +000029#include <asm/intel-family.h>
Andi Kleen432e99b2018-06-13 15:48:26 -070030#include <asm/e820.h>
Jeremy Fitzhardinge1353ebb2007-05-02 19:27:12 +020031
David Woodhouse8f96937ee2018-01-11 21:46:26 +000032static void __init spectre_v2_select_mitigation(void);
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -040033static void __init ssb_select_mitigation(void);
Andi Kleen432e99b2018-06-13 15:48:26 -070034static void __init l1tf_select_mitigation(void);
David Woodhouse8f96937ee2018-01-11 21:46:26 +000035
Konrad Rzeszutek Wilk0f5dd652018-04-25 22:04:18 -040036/*
37 * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
38 * writes to SPEC_CTRL contain whatever reserved bits have been set.
39 */
Thomas Gleixner89c6e9b2018-04-29 15:21:42 +020040u64 __ro_after_init x86_spec_ctrl_base;
Thomas Gleixner599288e2018-05-12 20:49:16 +020041EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
Konrad Rzeszutek Wilk0f5dd652018-04-25 22:04:18 -040042
Konrad Rzeszutek Wilk99b13112018-04-25 22:04:23 -040043/*
44 * The vendor and possibly platform specific bits which can be modified in
45 * x86_spec_ctrl_base.
46 */
Thomas Gleixner0ec827f2018-05-12 20:10:00 +020047static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
Konrad Rzeszutek Wilk99b13112018-04-25 22:04:23 -040048
David Woodhousef8544342018-05-20 20:52:05 +010049/*
50 * AMD specific MSR info for Speculative Store Bypass control.
Konrad Rzeszutek Wilkbf3da842018-05-09 21:41:38 +020051 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
David Woodhousef8544342018-05-20 20:52:05 +010052 */
53u64 __ro_after_init x86_amd_ls_cfg_base;
Konrad Rzeszutek Wilkbf3da842018-05-09 21:41:38 +020054u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
David Woodhousef8544342018-05-20 20:52:05 +010055
Jeremy Fitzhardinge1353ebb2007-05-02 19:27:12 +020056void __init check_bugs(void)
57{
58 identify_boot_cpu();
Borislav Petkov55a36b62013-04-08 17:57:44 +020059
Thomas Gleixner929d3b22018-07-13 16:23:24 +020060 /*
61 * identify_boot_cpu() initialized SMT support information, let the
62 * core code know.
63 */
Thomas Gleixnerc504b9f2018-08-07 08:19:57 +020064 cpu_smt_check_topology_early();
Thomas Gleixner929d3b22018-07-13 16:23:24 +020065
Borislav Petkov56eff362016-10-24 19:38:43 +020066 if (!IS_ENABLED(CONFIG_SMP)) {
67 pr_info("CPU: ");
68 print_cpu_info(&boot_cpu_data);
69 }
70
Konrad Rzeszutek Wilk0f5dd652018-04-25 22:04:18 -040071 /*
72 * Read the SPEC_CTRL MSR to account for reserved bits which may
David Woodhousef8544342018-05-20 20:52:05 +010073 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
74 * init code as it is not enumerated and depends on the family.
Konrad Rzeszutek Wilk0f5dd652018-04-25 22:04:18 -040075 */
Thomas Gleixnera7c34322018-05-10 19:13:18 +020076 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
Konrad Rzeszutek Wilk0f5dd652018-04-25 22:04:18 -040077 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
78
Thomas Gleixner0ec827f2018-05-12 20:10:00 +020079 /* Allow STIBP in MSR_SPEC_CTRL if supported */
80 if (boot_cpu_has(X86_FEATURE_STIBP))
81 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
82
David Woodhouse8f96937ee2018-01-11 21:46:26 +000083 /* Select the proper spectre mitigation before patching alternatives */
84 spectre_v2_select_mitigation();
85
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -040086 /*
87 * Select proper mitigation for any exposure to the Speculative Store
88 * Bypass vulnerability.
89 */
90 ssb_select_mitigation();
91
Andi Kleen432e99b2018-06-13 15:48:26 -070092 l1tf_select_mitigation();
93
Borislav Petkov56eff362016-10-24 19:38:43 +020094#ifdef CONFIG_X86_32
Borislav Petkov55a36b62013-04-08 17:57:44 +020095 /*
96 * Check whether we are able to run this kernel safely on SMP.
97 *
98 * - i386 is no longer supported.
99 * - In order to run on anything without a TSC, we need to be
100 * compiled for a i486.
101 */
102 if (boot_cpu_data.x86 < 4)
103 panic("Kernel requires i486+ for 'invlpg' and other features");
104
Miklos Vajnabfe4bb12008-05-17 22:48:13 +0200105 init_utsname()->machine[1] =
106 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
Jeremy Fitzhardinge1353ebb2007-05-02 19:27:12 +0200107 alternative_instructions();
Suresh Siddha304bced2012-08-24 14:13:02 -0700108
Ingo Molnar4d164092015-04-22 13:44:25 +0200109 fpu__init_check_bugs();
Borislav Petkov56eff362016-10-24 19:38:43 +0200110#else /* CONFIG_X86_64 */
111 alternative_instructions();
112
113 /*
114 * Make sure the first 2MB area is not mapped by huge pages
115 * There are typically fixed size MTRRs in there and overlapping
116 * MTRRs into large pages causes slow downs.
117 *
118 * Right now we don't do that with gbpages because there seems
119 * very little benefit for that case.
120 */
121 if (!direct_gbpages)
122 set_memory_4k((unsigned long)__va(0), 1);
123#endif
Jeremy Fitzhardinge1353ebb2007-05-02 19:27:12 +0200124}
Thomas Gleixner45a98822018-01-07 22:48:01 +0100125
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000126/* The kernel command line selection */
127enum spectre_v2_mitigation_cmd {
128 SPECTRE_V2_CMD_NONE,
129 SPECTRE_V2_CMD_AUTO,
130 SPECTRE_V2_CMD_FORCE,
131 SPECTRE_V2_CMD_RETPOLINE,
132 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
133 SPECTRE_V2_CMD_RETPOLINE_AMD,
134};
135
136static const char *spectre_v2_strings[] = {
137 [SPECTRE_V2_NONE] = "Vulnerable",
138 [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline",
139 [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
140 [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
141 [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
142};
143
144#undef pr_fmt
Borislav Petkov557cbfa2018-01-26 13:11:39 +0100145#define pr_fmt(fmt) "Spectre V2 : " fmt
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000146
Kees Cookea055f72018-05-03 15:03:30 -0700147static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
148 SPECTRE_V2_NONE;
Andi Kleena1745ad2018-01-25 15:50:28 -0800149
Borislav Petkovea999352018-05-12 00:14:51 +0200150void
151x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
Konrad Rzeszutek Wilkcf21f582018-04-25 22:04:19 -0400152{
Thomas Gleixner0ec827f2018-05-12 20:10:00 +0200153 u64 msrval, guestval, hostval = x86_spec_ctrl_base;
Borislav Petkovea999352018-05-12 00:14:51 +0200154 struct thread_info *ti = current_thread_info();
Thomas Gleixner89c6e9b2018-04-29 15:21:42 +0200155
Thomas Gleixnera7c34322018-05-10 19:13:18 +0200156 /* Is MSR_SPEC_CTRL implemented ? */
Borislav Petkovea999352018-05-12 00:14:51 +0200157 if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
Thomas Gleixner0ec827f2018-05-12 20:10:00 +0200158 /*
159 * Restrict guest_spec_ctrl to supported values. Clear the
160 * modifiable bits in the host base value and or the
161 * modifiable bits from the guest value.
162 */
163 guestval = hostval & ~x86_spec_ctrl_mask;
164 guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
165
Borislav Petkovea999352018-05-12 00:14:51 +0200166 /* SSBD controlled in MSR_SPEC_CTRL */
167 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
Thomas Gleixner0ec827f2018-05-12 20:10:00 +0200168 hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
Thomas Gleixner89c6e9b2018-04-29 15:21:42 +0200169
Thomas Gleixner0ec827f2018-05-12 20:10:00 +0200170 if (hostval != guestval) {
171 msrval = setguest ? guestval : hostval;
172 wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
Borislav Petkovea999352018-05-12 00:14:51 +0200173 }
174 }
Thomas Gleixnerb0ef8c72018-05-10 20:42:48 +0200175
176 /*
177 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
178 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
179 */
180 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
181 !static_cpu_has(X86_FEATURE_VIRT_SSBD))
182 return;
183
184 /*
185 * If the host has SSBD mitigation enabled, force it in the host's
186 * virtual MSR value. If its not permanently enabled, evaluate
187 * current's TIF_SSBD thread flag.
188 */
189 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
190 hostval = SPEC_CTRL_SSBD;
191 else
192 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
193
194 /* Sanitize the guest value */
195 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
196
197 if (hostval != guestval) {
198 unsigned long tif;
199
200 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
201 ssbd_spec_ctrl_to_tif(hostval);
202
203 speculative_store_bypass_update(tif);
204 }
Konrad Rzeszutek Wilkcf21f582018-04-25 22:04:19 -0400205}
Borislav Petkovea999352018-05-12 00:14:51 +0200206EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
Konrad Rzeszutek Wilkcf21f582018-04-25 22:04:19 -0400207
Konrad Rzeszutek Wilkbf3da842018-05-09 21:41:38 +0200208static void x86_amd_ssb_disable(void)
David Woodhousef8544342018-05-20 20:52:05 +0100209{
Konrad Rzeszutek Wilkbf3da842018-05-09 21:41:38 +0200210 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
David Woodhousef8544342018-05-20 20:52:05 +0100211
Tom Lendacky7c0b2dc2018-05-17 17:09:18 +0200212 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
213 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
214 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
David Woodhousef8544342018-05-20 20:52:05 +0100215 wrmsrl(MSR_AMD64_LS_CFG, msrval);
216}
217
Andi Kleena1745ad2018-01-25 15:50:28 -0800218#ifdef RETPOLINE
Thomas Gleixner98911222018-01-27 15:45:14 +0100219static bool spectre_v2_bad_module;
220
Andi Kleena1745ad2018-01-25 15:50:28 -0800221bool retpoline_module_ok(bool has_retpoline)
222{
223 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
224 return true;
225
Colin Ian Kingf67e05d2018-01-30 19:32:18 +0000226 pr_err("System may be vulnerable to spectre v2\n");
Andi Kleena1745ad2018-01-25 15:50:28 -0800227 spectre_v2_bad_module = true;
228 return false;
229}
Thomas Gleixner98911222018-01-27 15:45:14 +0100230
231static inline const char *spectre_v2_module_string(void)
232{
233 return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
234}
235#else
236static inline const char *spectre_v2_module_string(void) { return ""; }
Andi Kleena1745ad2018-01-25 15:50:28 -0800237#endif
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000238
239static void __init spec2_print_if_insecure(const char *reason)
240{
241 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
KarimAllah Ahmed961cb142018-02-01 11:27:21 +0000242 pr_info("%s selected on command line.\n", reason);
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000243}
244
245static void __init spec2_print_if_secure(const char *reason)
246{
247 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
KarimAllah Ahmed961cb142018-02-01 11:27:21 +0000248 pr_info("%s selected on command line.\n", reason);
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000249}
250
251static inline bool retp_compiler(void)
252{
253 return __is_defined(RETPOLINE);
254}
255
256static inline bool match_option(const char *arg, int arglen, const char *opt)
257{
258 int len = strlen(opt);
259
260 return len == arglen && !strncmp(arg, opt, len);
261}
262
KarimAllah Ahmed961cb142018-02-01 11:27:21 +0000263static const struct {
264 const char *option;
265 enum spectre_v2_mitigation_cmd cmd;
266 bool secure;
267} mitigation_options[] = {
268 { "off", SPECTRE_V2_CMD_NONE, false },
269 { "on", SPECTRE_V2_CMD_FORCE, true },
270 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
271 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false },
272 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
273 { "auto", SPECTRE_V2_CMD_AUTO, false },
274};
275
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000276static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
277{
278 char arg[20];
KarimAllah Ahmed961cb142018-02-01 11:27:21 +0000279 int ret, i;
280 enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000281
KarimAllah Ahmed961cb142018-02-01 11:27:21 +0000282 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
283 return SPECTRE_V2_CMD_NONE;
284 else {
Ingo Molnar3aad6fe2018-02-13 09:03:08 +0100285 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
KarimAllah Ahmed961cb142018-02-01 11:27:21 +0000286 if (ret < 0)
287 return SPECTRE_V2_CMD_AUTO;
288
289 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
290 if (!match_option(arg, ret, mitigation_options[i].option))
291 continue;
292 cmd = mitigation_options[i].cmd;
293 break;
294 }
295
296 if (i >= ARRAY_SIZE(mitigation_options)) {
Dan Carpenter14eb4132018-02-14 10:14:17 +0300297 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000298 return SPECTRE_V2_CMD_AUTO;
299 }
300 }
301
KarimAllah Ahmed961cb142018-02-01 11:27:21 +0000302 if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
303 cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
304 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
305 !IS_ENABLED(CONFIG_RETPOLINE)) {
Ingo Molnar3aad6fe2018-02-13 09:03:08 +0100306 pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000307 return SPECTRE_V2_CMD_AUTO;
KarimAllah Ahmed961cb142018-02-01 11:27:21 +0000308 }
309
310 if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
311 boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
312 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
313 return SPECTRE_V2_CMD_AUTO;
314 }
315
316 if (mitigation_options[i].secure)
317 spec2_print_if_secure(mitigation_options[i].option);
318 else
319 spec2_print_if_insecure(mitigation_options[i].option);
320
321 return cmd;
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000322}
323
324static void __init spectre_v2_select_mitigation(void)
325{
326 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
327 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
328
329 /*
330 * If the CPU is not affected and the command line mode is NONE or AUTO
331 * then nothing to do.
332 */
333 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
334 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
335 return;
336
337 switch (cmd) {
338 case SPECTRE_V2_CMD_NONE:
339 return;
340
341 case SPECTRE_V2_CMD_FORCE:
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000342 case SPECTRE_V2_CMD_AUTO:
Dou Liyang9eedeb72018-01-30 14:13:50 +0800343 if (IS_ENABLED(CONFIG_RETPOLINE))
344 goto retpoline_auto;
345 break;
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000346 case SPECTRE_V2_CMD_RETPOLINE_AMD:
347 if (IS_ENABLED(CONFIG_RETPOLINE))
348 goto retpoline_amd;
349 break;
350 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
351 if (IS_ENABLED(CONFIG_RETPOLINE))
352 goto retpoline_generic;
353 break;
354 case SPECTRE_V2_CMD_RETPOLINE:
355 if (IS_ENABLED(CONFIG_RETPOLINE))
356 goto retpoline_auto;
357 break;
358 }
Ingo Molnar3aad6fe2018-02-13 09:03:08 +0100359 pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000360 return;
361
362retpoline_auto:
363 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
364 retpoline_amd:
365 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
Ingo Molnar3aad6fe2018-02-13 09:03:08 +0100366 pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000367 goto retpoline_generic;
368 }
369 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
370 SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
371 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
372 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
373 } else {
374 retpoline_generic:
375 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
376 SPECTRE_V2_RETPOLINE_MINIMAL;
377 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
378 }
379
380 spectre_v2_enabled = mode;
381 pr_info("%s\n", spectre_v2_strings[mode]);
David Woodhouseabf67b12018-01-12 17:49:25 +0000382
383 /*
Jiri Kosina6455f412018-07-26 13:14:55 +0200384 * If spectre v2 protection has been enabled, unconditionally fill
385 * RSB during a context switch; this protects against two independent
386 * issues:
David Woodhouseabf67b12018-01-12 17:49:25 +0000387 *
Jiri Kosina6455f412018-07-26 13:14:55 +0200388 * - RSB underflow (and switch to BTB) on Skylake+
389 * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
David Woodhouseabf67b12018-01-12 17:49:25 +0000390 */
Jiri Kosina6455f412018-07-26 13:14:55 +0200391 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
392 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
David Woodhouse31fd9ed2018-01-25 16:14:15 +0000393
394 /* Initialize Indirect Branch Prediction Barrier if supported */
David Woodhouse77b3b3e2018-01-27 16:24:32 +0000395 if (boot_cpu_has(X86_FEATURE_IBPB)) {
396 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
Ingo Molnar3aad6fe2018-02-13 09:03:08 +0100397 pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
David Woodhouse31fd9ed2018-01-25 16:14:15 +0000398 }
David Woodhousea27ede12018-02-19 10:50:54 +0000399
400 /*
401 * Retpoline means the kernel is safe because it has no indirect
402 * branches. But firmware isn't, so use IBRS to protect that.
403 */
404 if (boot_cpu_has(X86_FEATURE_IBRS)) {
405 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
406 pr_info("Enabling Restricted Speculation for firmware calls\n");
407 }
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000408}
409
410#undef pr_fmt
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -0400411#define pr_fmt(fmt) "Speculative Store Bypass: " fmt
412
Kees Cookea055f72018-05-03 15:03:30 -0700413static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -0400414
415/* The kernel command line selection */
416enum ssb_mitigation_cmd {
417 SPEC_STORE_BYPASS_CMD_NONE,
418 SPEC_STORE_BYPASS_CMD_AUTO,
419 SPEC_STORE_BYPASS_CMD_ON,
Thomas Gleixnera078e3e2018-04-29 15:26:40 +0200420 SPEC_STORE_BYPASS_CMD_PRCTL,
Kees Cook05a85a32018-05-03 14:37:54 -0700421 SPEC_STORE_BYPASS_CMD_SECCOMP,
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -0400422};
423
424static const char *ssb_strings[] = {
425 [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
Thomas Gleixnera078e3e2018-04-29 15:26:40 +0200426 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
Kees Cook05a85a32018-05-03 14:37:54 -0700427 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
428 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -0400429};
430
431static const struct {
432 const char *option;
433 enum ssb_mitigation_cmd cmd;
434} ssb_mitigation_options[] = {
Kees Cook05a85a32018-05-03 14:37:54 -0700435 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
436 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
437 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
438 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
439 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -0400440};
441
442static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
443{
444 enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
445 char arg[20];
446 int ret, i;
447
448 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
449 return SPEC_STORE_BYPASS_CMD_NONE;
450 } else {
451 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
452 arg, sizeof(arg));
453 if (ret < 0)
454 return SPEC_STORE_BYPASS_CMD_AUTO;
455
456 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
457 if (!match_option(arg, ret, ssb_mitigation_options[i].option))
458 continue;
459
460 cmd = ssb_mitigation_options[i].cmd;
461 break;
462 }
463
464 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
465 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
466 return SPEC_STORE_BYPASS_CMD_AUTO;
467 }
468 }
469
470 return cmd;
471}
472
Jiri Kosinaeb7b5622018-05-10 22:47:18 +0200473static enum ssb_mitigation __init __ssb_select_mitigation(void)
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -0400474{
475 enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
476 enum ssb_mitigation_cmd cmd;
477
Konrad Rzeszutek Wilkbf3da842018-05-09 21:41:38 +0200478 if (!boot_cpu_has(X86_FEATURE_SSBD))
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -0400479 return mode;
480
481 cmd = ssb_parse_cmdline();
482 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
483 (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
484 cmd == SPEC_STORE_BYPASS_CMD_AUTO))
485 return mode;
486
487 switch (cmd) {
488 case SPEC_STORE_BYPASS_CMD_AUTO:
Kees Cook05a85a32018-05-03 14:37:54 -0700489 case SPEC_STORE_BYPASS_CMD_SECCOMP:
490 /*
491 * Choose prctl+seccomp as the default mode if seccomp is
492 * enabled.
493 */
494 if (IS_ENABLED(CONFIG_SECCOMP))
495 mode = SPEC_STORE_BYPASS_SECCOMP;
496 else
497 mode = SPEC_STORE_BYPASS_PRCTL;
Thomas Gleixnera078e3e2018-04-29 15:26:40 +0200498 break;
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -0400499 case SPEC_STORE_BYPASS_CMD_ON:
500 mode = SPEC_STORE_BYPASS_DISABLE;
501 break;
Thomas Gleixnera078e3e2018-04-29 15:26:40 +0200502 case SPEC_STORE_BYPASS_CMD_PRCTL:
503 mode = SPEC_STORE_BYPASS_PRCTL;
504 break;
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -0400505 case SPEC_STORE_BYPASS_CMD_NONE:
506 break;
507 }
508
Konrad Rzeszutek Wilk19e3a2b2018-04-25 22:04:22 -0400509 /*
510 * We have three CPU feature flags that are in play here:
511 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
Konrad Rzeszutek Wilkbf3da842018-05-09 21:41:38 +0200512 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
Konrad Rzeszutek Wilk19e3a2b2018-04-25 22:04:22 -0400513 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
514 */
Thomas Gleixnera078e3e2018-04-29 15:26:40 +0200515 if (mode == SPEC_STORE_BYPASS_DISABLE) {
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -0400516 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
Konrad Rzeszutek Wilk19e3a2b2018-04-25 22:04:22 -0400517 /*
518 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
519 * a completely different MSR and bit dependent on family.
520 */
521 switch (boot_cpu_data.x86_vendor) {
522 case X86_VENDOR_INTEL:
Konrad Rzeszutek Wilkbf3da842018-05-09 21:41:38 +0200523 x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
Thomas Gleixner0ec827f2018-05-12 20:10:00 +0200524 x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
Thomas Gleixnerec904642018-05-12 20:53:14 +0200525 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
Konrad Rzeszutek Wilk19e3a2b2018-04-25 22:04:22 -0400526 break;
527 case X86_VENDOR_AMD:
Konrad Rzeszutek Wilkbf3da842018-05-09 21:41:38 +0200528 x86_amd_ssb_disable();
Konrad Rzeszutek Wilk19e3a2b2018-04-25 22:04:22 -0400529 break;
530 }
531 }
532
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -0400533 return mode;
534}
535
Konrad Rzeszutek Wilk6fdd2772018-05-11 16:50:35 -0400536static void ssb_select_mitigation(void)
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -0400537{
538 ssb_mode = __ssb_select_mitigation();
539
540 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
541 pr_info("%s\n", ssb_strings[ssb_mode]);
542}
543
544#undef pr_fmt
Kees Cook05a85a32018-05-03 14:37:54 -0700545#define pr_fmt(fmt) "Speculation prctl: " fmt
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000546
Kees Cook4272f522018-05-01 15:19:04 -0700547static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
Thomas Gleixnera078e3e2018-04-29 15:26:40 +0200548{
Thomas Gleixner036608d2018-05-03 22:09:15 +0200549 bool update;
Thomas Gleixnera078e3e2018-04-29 15:26:40 +0200550
Kees Cook05a85a32018-05-03 14:37:54 -0700551 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
552 ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
Thomas Gleixnera078e3e2018-04-29 15:26:40 +0200553 return -ENXIO;
554
Thomas Gleixner036608d2018-05-03 22:09:15 +0200555 switch (ctrl) {
556 case PR_SPEC_ENABLE:
557 /* If speculation is force disabled, enable is not allowed */
558 if (task_spec_ssb_force_disable(task))
559 return -EPERM;
560 task_clear_spec_ssb_disable(task);
Konrad Rzeszutek Wilkbf3da842018-05-09 21:41:38 +0200561 update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
Thomas Gleixner036608d2018-05-03 22:09:15 +0200562 break;
563 case PR_SPEC_DISABLE:
564 task_set_spec_ssb_disable(task);
Konrad Rzeszutek Wilkbf3da842018-05-09 21:41:38 +0200565 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
Thomas Gleixner036608d2018-05-03 22:09:15 +0200566 break;
567 case PR_SPEC_FORCE_DISABLE:
568 task_set_spec_ssb_disable(task);
569 task_set_spec_ssb_force_disable(task);
Konrad Rzeszutek Wilkbf3da842018-05-09 21:41:38 +0200570 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
Thomas Gleixner036608d2018-05-03 22:09:15 +0200571 break;
572 default:
573 return -ERANGE;
574 }
Thomas Gleixnera078e3e2018-04-29 15:26:40 +0200575
Kees Cook4272f522018-05-01 15:19:04 -0700576 /*
577 * If being set on non-current task, delay setting the CPU
578 * mitigation until it is next scheduled.
579 */
Thomas Gleixner036608d2018-05-03 22:09:15 +0200580 if (task == current && update)
Thomas Gleixnerb7b84402018-05-10 20:31:44 +0200581 speculative_store_bypass_update_current();
Thomas Gleixnera078e3e2018-04-29 15:26:40 +0200582
583 return 0;
584}
585
Thomas Gleixner094c2762018-05-04 15:12:06 +0200586int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
587 unsigned long ctrl)
588{
589 switch (which) {
590 case PR_SPEC_STORE_BYPASS:
591 return ssb_prctl_set(task, ctrl);
592 default:
593 return -ENODEV;
594 }
595}
596
597#ifdef CONFIG_SECCOMP
598void arch_seccomp_spec_mitigate(struct task_struct *task)
599{
Kees Cook05a85a32018-05-03 14:37:54 -0700600 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
601 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
Thomas Gleixner094c2762018-05-04 15:12:06 +0200602}
603#endif
604
Kees Cook4272f522018-05-01 15:19:04 -0700605static int ssb_prctl_get(struct task_struct *task)
Thomas Gleixnera078e3e2018-04-29 15:26:40 +0200606{
607 switch (ssb_mode) {
608 case SPEC_STORE_BYPASS_DISABLE:
609 return PR_SPEC_DISABLE;
Kees Cook05a85a32018-05-03 14:37:54 -0700610 case SPEC_STORE_BYPASS_SECCOMP:
Thomas Gleixnera078e3e2018-04-29 15:26:40 +0200611 case SPEC_STORE_BYPASS_PRCTL:
Thomas Gleixner036608d2018-05-03 22:09:15 +0200612 if (task_spec_ssb_force_disable(task))
613 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
614 if (task_spec_ssb_disable(task))
Thomas Gleixnera078e3e2018-04-29 15:26:40 +0200615 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
616 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
617 default:
618 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
619 return PR_SPEC_ENABLE;
620 return PR_SPEC_NOT_AFFECTED;
621 }
622}
623
Kees Cook4272f522018-05-01 15:19:04 -0700624int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
Thomas Gleixnera078e3e2018-04-29 15:26:40 +0200625{
626 switch (which) {
627 case PR_SPEC_STORE_BYPASS:
Kees Cook4272f522018-05-01 15:19:04 -0700628 return ssb_prctl_get(task);
Thomas Gleixnera078e3e2018-04-29 15:26:40 +0200629 default:
630 return -ENODEV;
631 }
632}
633
Konrad Rzeszutek Wilk19e3a2b2018-04-25 22:04:22 -0400634void x86_spec_ctrl_setup_ap(void)
635{
Thomas Gleixnera7c34322018-05-10 19:13:18 +0200636 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
Thomas Gleixnerec904642018-05-12 20:53:14 +0200637 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
David Woodhousef8544342018-05-20 20:52:05 +0100638
639 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
Konrad Rzeszutek Wilkbf3da842018-05-09 21:41:38 +0200640 x86_amd_ssb_disable();
Konrad Rzeszutek Wilk19e3a2b2018-04-25 22:04:22 -0400641}
642
Konrad Rzeszutek Wilk1ac1dc12018-06-20 16:42:57 -0400643#undef pr_fmt
644#define pr_fmt(fmt) "L1TF: " fmt
Thomas Gleixner80e55b52018-07-13 16:23:16 +0200645
Jiri Kosina2decbf52018-07-13 16:23:25 +0200646/* Default mitigation for L1TF-affected CPUs */
647enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
Thomas Gleixner80e55b52018-07-13 16:23:16 +0200648#if IS_ENABLED(CONFIG_KVM_INTEL)
Jiri Kosina2decbf52018-07-13 16:23:25 +0200649EXPORT_SYMBOL_GPL(l1tf_mitigation);
Guenter Roeck61341a32018-08-15 08:38:33 -0700650#endif
Thomas Gleixner4797c2f2018-07-13 16:23:22 +0200651enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
Thomas Gleixner80e55b52018-07-13 16:23:16 +0200652EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
Thomas Gleixner80e55b52018-07-13 16:23:16 +0200653
Konrad Rzeszutek Wilk1ac1dc12018-06-20 16:42:57 -0400654static void __init l1tf_select_mitigation(void)
655{
656 u64 half_pa;
657
658 if (!boot_cpu_has_bug(X86_BUG_L1TF))
659 return;
660
Jiri Kosina2decbf52018-07-13 16:23:25 +0200661 switch (l1tf_mitigation) {
662 case L1TF_MITIGATION_OFF:
663 case L1TF_MITIGATION_FLUSH_NOWARN:
664 case L1TF_MITIGATION_FLUSH:
665 break;
666 case L1TF_MITIGATION_FLUSH_NOSMT:
667 case L1TF_MITIGATION_FULL:
668 cpu_smt_disable(false);
669 break;
670 case L1TF_MITIGATION_FULL_FORCE:
671 cpu_smt_disable(true);
672 break;
673 }
674
Konrad Rzeszutek Wilk1ac1dc12018-06-20 16:42:57 -0400675#if CONFIG_PGTABLE_LEVELS == 2
676 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
677 return;
678#endif
679
680 /*
681 * This is extremely unlikely to happen because almost all
682 * systems have far more MAX_PA/2 than RAM can be fit into
683 * DIMM slots.
684 */
685 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
686 if (e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) {
687 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
688 return;
689 }
690
691 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
692}
Jiri Kosina2decbf52018-07-13 16:23:25 +0200693
694static int __init l1tf_cmdline(char *str)
695{
696 if (!boot_cpu_has_bug(X86_BUG_L1TF))
697 return 0;
698
699 if (!str)
700 return -EINVAL;
701
702 if (!strcmp(str, "off"))
703 l1tf_mitigation = L1TF_MITIGATION_OFF;
704 else if (!strcmp(str, "flush,nowarn"))
705 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
706 else if (!strcmp(str, "flush"))
707 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
708 else if (!strcmp(str, "flush,nosmt"))
709 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
710 else if (!strcmp(str, "full"))
711 l1tf_mitigation = L1TF_MITIGATION_FULL;
712 else if (!strcmp(str, "full,force"))
713 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
714
715 return 0;
716}
717early_param("l1tf", l1tf_cmdline);
718
Konrad Rzeszutek Wilk1ac1dc12018-06-20 16:42:57 -0400719#undef pr_fmt
720
Thomas Gleixner45a98822018-01-07 22:48:01 +0100721#ifdef CONFIG_SYSFS
Konrad Rzeszutek Wilk3effee62018-04-25 22:04:17 -0400722
Thomas Gleixner80e55b52018-07-13 16:23:16 +0200723#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
724
725#if IS_ENABLED(CONFIG_KVM_INTEL)
726static const char *l1tf_vmx_states[] = {
Thomas Gleixner4186ae82018-07-13 16:23:18 +0200727 [VMENTER_L1D_FLUSH_AUTO] = "auto",
728 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
729 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
730 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
731 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
Paolo Bonzini383f1602018-08-05 16:07:46 +0200732 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary"
Thomas Gleixner80e55b52018-07-13 16:23:16 +0200733};
734
735static ssize_t l1tf_show_state(char *buf)
736{
737 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
738 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
739
Paolo Bonziniee782ed2018-08-05 16:07:45 +0200740 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
741 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
742 cpu_smt_control == CPU_SMT_ENABLED))
743 return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
744 l1tf_vmx_states[l1tf_vmx_mitigation]);
745
746 return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
747 l1tf_vmx_states[l1tf_vmx_mitigation],
748 cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled");
Thomas Gleixner80e55b52018-07-13 16:23:16 +0200749}
750#else
751static ssize_t l1tf_show_state(char *buf)
752{
753 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
754}
755#endif
756
Jiri Kosinadbb264a2018-05-10 22:47:32 +0200757static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
Konrad Rzeszutek Wilk6fdd2772018-05-11 16:50:35 -0400758 char *buf, unsigned int bug)
Konrad Rzeszutek Wilk3effee62018-04-25 22:04:17 -0400759{
760 if (!boot_cpu_has_bug(bug))
761 return sprintf(buf, "Not affected\n");
762
763 switch (bug) {
764 case X86_BUG_CPU_MELTDOWN:
765 if (boot_cpu_has(X86_FEATURE_KAISER))
766 return sprintf(buf, "Mitigation: PTI\n");
767
768 break;
769
770 case X86_BUG_SPECTRE_V1:
771 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
772
773 case X86_BUG_SPECTRE_V2:
774 return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
775 boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
776 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
777 spectre_v2_module_string());
778
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -0400779 case X86_BUG_SPEC_STORE_BYPASS:
780 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
781
Andi Kleen432e99b2018-06-13 15:48:26 -0700782 case X86_BUG_L1TF:
783 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
Thomas Gleixner80e55b52018-07-13 16:23:16 +0200784 return l1tf_show_state(buf);
Andi Kleen432e99b2018-06-13 15:48:26 -0700785 break;
Konrad Rzeszutek Wilk3effee62018-04-25 22:04:17 -0400786 default:
787 break;
788 }
789
790 return sprintf(buf, "Vulnerable\n");
791}
792
Ingo Molnar3aad6fe2018-02-13 09:03:08 +0100793ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
Thomas Gleixner45a98822018-01-07 22:48:01 +0100794{
Konrad Rzeszutek Wilk3effee62018-04-25 22:04:17 -0400795 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
Thomas Gleixner45a98822018-01-07 22:48:01 +0100796}
797
Ingo Molnar3aad6fe2018-02-13 09:03:08 +0100798ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
Thomas Gleixner45a98822018-01-07 22:48:01 +0100799{
Konrad Rzeszutek Wilk3effee62018-04-25 22:04:17 -0400800 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
Thomas Gleixner45a98822018-01-07 22:48:01 +0100801}
802
Ingo Molnar3aad6fe2018-02-13 09:03:08 +0100803ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
Thomas Gleixner45a98822018-01-07 22:48:01 +0100804{
Konrad Rzeszutek Wilk3effee62018-04-25 22:04:17 -0400805 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
Thomas Gleixner45a98822018-01-07 22:48:01 +0100806}
Konrad Rzeszutek Wilk24e4dd92018-04-25 22:04:20 -0400807
808ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
809{
810 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
811}
Andi Kleen432e99b2018-06-13 15:48:26 -0700812
813ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
814{
815 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
816}
Thomas Gleixner45a98822018-01-07 22:48:01 +0100817#endif