blob: 07a6c1fa173be1ca98f52faeacc7bb712a167a49 [file] [log] [blame]
Jeremy Fitzhardinge1353ebb2007-05-02 19:27:12 +02001/*
Jeremy Fitzhardinge1353ebb2007-05-02 19:27:12 +02002 * Copyright (C) 1994 Linus Torvalds
3 *
4 * Cyrix stuff, June 1998 by:
5 * - Rafael R. Reilova (moved everything from head.S),
6 * <rreilova@ececs.uc.edu>
7 * - Channing Corn (tests & fixes),
8 * - Andrew D. Balsa (code cleanup).
9 */
10#include <linux/init.h>
11#include <linux/utsname.h>
Thomas Gleixner45a98822018-01-07 22:48:01 +010012#include <linux/cpu.h>
Andi Kleena1745ad2018-01-25 15:50:28 -080013#include <linux/module.h>
Thomas Gleixnera078e3e2018-04-29 15:26:40 +020014#include <linux/nospec.h>
15#include <linux/prctl.h>
Thomas Gleixnera3c901b2018-11-25 19:33:39 +010016#include <linux/sched/smt.h>
David Woodhouse8f96937ee2018-01-11 21:46:26 +000017
Thomas Gleixner7a2d2352018-04-29 15:01:37 +020018#include <asm/spec-ctrl.h>
David Woodhouse8f96937ee2018-01-11 21:46:26 +000019#include <asm/cmdline.h>
Josh Triplett91eb1b72007-07-31 00:39:20 -070020#include <asm/bugs.h>
Jeremy Fitzhardinge1353ebb2007-05-02 19:27:12 +020021#include <asm/processor.h>
Dave Jones7ebad702008-01-30 13:30:39 +010022#include <asm/processor-flags.h>
Ingo Molnar952f07e2015-04-26 16:56:05 +020023#include <asm/fpu/internal.h>
Jeremy Fitzhardinge1353ebb2007-05-02 19:27:12 +020024#include <asm/msr.h>
Thomas Gleixner80e55b52018-07-13 16:23:16 +020025#include <asm/vmx.h>
Jeremy Fitzhardinge1353ebb2007-05-02 19:27:12 +020026#include <asm/paravirt.h>
27#include <asm/alternative.h>
Thomas Gleixnerba08d562019-02-18 22:51:43 +010028#include <asm/hypervisor.h>
Borislav Petkov56eff362016-10-24 19:38:43 +020029#include <asm/pgtable.h>
30#include <asm/cacheflush.h>
David Woodhouseabf67b12018-01-12 17:49:25 +000031#include <asm/intel-family.h>
Andi Kleen432e99b2018-06-13 15:48:26 -070032#include <asm/e820.h>
Jeremy Fitzhardinge1353ebb2007-05-02 19:27:12 +020033
David Woodhouse8f96937ee2018-01-11 21:46:26 +000034static void __init spectre_v2_select_mitigation(void);
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -040035static void __init ssb_select_mitigation(void);
Andi Kleen432e99b2018-06-13 15:48:26 -070036static void __init l1tf_select_mitigation(void);
Thomas Gleixner4e722ae2019-02-18 22:04:08 +010037static void __init mds_select_mitigation(void);
David Woodhouse8f96937ee2018-01-11 21:46:26 +000038
Jiri Kosinab410c572018-09-25 14:38:55 +020039/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
40u64 x86_spec_ctrl_base;
Thomas Gleixner599288e2018-05-12 20:49:16 +020041EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
Jiri Kosinab410c572018-09-25 14:38:55 +020042static DEFINE_MUTEX(spec_ctrl_mutex);
Konrad Rzeszutek Wilk0f5dd652018-04-25 22:04:18 -040043
Konrad Rzeszutek Wilk99b13112018-04-25 22:04:23 -040044/*
45 * The vendor and possibly platform specific bits which can be modified in
46 * x86_spec_ctrl_base.
47 */
Thomas Gleixner0ec827f2018-05-12 20:10:00 +020048static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
Konrad Rzeszutek Wilk99b13112018-04-25 22:04:23 -040049
David Woodhousef8544342018-05-20 20:52:05 +010050/*
51 * AMD specific MSR info for Speculative Store Bypass control.
Konrad Rzeszutek Wilkbf3da842018-05-09 21:41:38 +020052 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
David Woodhousef8544342018-05-20 20:52:05 +010053 */
54u64 __ro_after_init x86_amd_ls_cfg_base;
Konrad Rzeszutek Wilkbf3da842018-05-09 21:41:38 +020055u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
David Woodhousef8544342018-05-20 20:52:05 +010056
Thomas Gleixnerdda365c2018-11-25 19:33:45 +010057/* Control conditional STIPB in switch_to() */
58DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
Thomas Gleixnerc89ef652018-11-25 19:33:49 +010059/* Control conditional IBPB in switch_mm() */
60DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
61/* Control unconditional IBPB in switch_mm() */
62DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
Thomas Gleixnerdda365c2018-11-25 19:33:45 +010063
Thomas Gleixner20041a02019-02-18 23:42:51 +010064/* Control MDS CPU buffer clear before returning to user space */
65DEFINE_STATIC_KEY_FALSE(mds_user_clear);
Thomas Gleixner3a8e7f62019-02-27 12:48:14 +010066EXPORT_SYMBOL_GPL(mds_user_clear);
Thomas Gleixner2394f592019-02-18 23:04:01 +010067/* Control MDS CPU buffer clear before idling (halt, mwait) */
68DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
69EXPORT_SYMBOL_GPL(mds_idle_clear);
Thomas Gleixner20041a02019-02-18 23:42:51 +010070
Jeremy Fitzhardinge1353ebb2007-05-02 19:27:12 +020071void __init check_bugs(void)
72{
73 identify_boot_cpu();
Borislav Petkov55a36b62013-04-08 17:57:44 +020074
Thomas Gleixner929d3b22018-07-13 16:23:24 +020075 /*
76 * identify_boot_cpu() initialized SMT support information, let the
77 * core code know.
78 */
Thomas Gleixnerc504b9f2018-08-07 08:19:57 +020079 cpu_smt_check_topology_early();
Thomas Gleixner929d3b22018-07-13 16:23:24 +020080
Borislav Petkov56eff362016-10-24 19:38:43 +020081 if (!IS_ENABLED(CONFIG_SMP)) {
82 pr_info("CPU: ");
83 print_cpu_info(&boot_cpu_data);
84 }
85
Konrad Rzeszutek Wilk0f5dd652018-04-25 22:04:18 -040086 /*
87 * Read the SPEC_CTRL MSR to account for reserved bits which may
David Woodhousef8544342018-05-20 20:52:05 +010088 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
89 * init code as it is not enumerated and depends on the family.
Konrad Rzeszutek Wilk0f5dd652018-04-25 22:04:18 -040090 */
Thomas Gleixnera7c34322018-05-10 19:13:18 +020091 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
Konrad Rzeszutek Wilk0f5dd652018-04-25 22:04:18 -040092 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
93
Thomas Gleixner0ec827f2018-05-12 20:10:00 +020094 /* Allow STIBP in MSR_SPEC_CTRL if supported */
95 if (boot_cpu_has(X86_FEATURE_STIBP))
96 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
97
David Woodhouse8f96937ee2018-01-11 21:46:26 +000098 /* Select the proper spectre mitigation before patching alternatives */
99 spectre_v2_select_mitigation();
100
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -0400101 /*
102 * Select proper mitigation for any exposure to the Speculative Store
103 * Bypass vulnerability.
104 */
105 ssb_select_mitigation();
106
Andi Kleen432e99b2018-06-13 15:48:26 -0700107 l1tf_select_mitigation();
108
Thomas Gleixner4e722ae2019-02-18 22:04:08 +0100109 mds_select_mitigation();
110
Josh Poimboeuf98c4b3c2019-04-02 10:00:14 -0500111 arch_smt_update();
112
Borislav Petkov56eff362016-10-24 19:38:43 +0200113#ifdef CONFIG_X86_32
Borislav Petkov55a36b62013-04-08 17:57:44 +0200114 /*
115 * Check whether we are able to run this kernel safely on SMP.
116 *
117 * - i386 is no longer supported.
118 * - In order to run on anything without a TSC, we need to be
119 * compiled for a i486.
120 */
121 if (boot_cpu_data.x86 < 4)
122 panic("Kernel requires i486+ for 'invlpg' and other features");
123
Miklos Vajnabfe4bb12008-05-17 22:48:13 +0200124 init_utsname()->machine[1] =
125 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
Jeremy Fitzhardinge1353ebb2007-05-02 19:27:12 +0200126 alternative_instructions();
Suresh Siddha304bced2012-08-24 14:13:02 -0700127
Ingo Molnar4d164092015-04-22 13:44:25 +0200128 fpu__init_check_bugs();
Borislav Petkov56eff362016-10-24 19:38:43 +0200129#else /* CONFIG_X86_64 */
130 alternative_instructions();
131
132 /*
133 * Make sure the first 2MB area is not mapped by huge pages
134 * There are typically fixed size MTRRs in there and overlapping
135 * MTRRs into large pages causes slow downs.
136 *
137 * Right now we don't do that with gbpages because there seems
138 * very little benefit for that case.
139 */
140 if (!direct_gbpages)
141 set_memory_4k((unsigned long)__va(0), 1);
142#endif
Jeremy Fitzhardinge1353ebb2007-05-02 19:27:12 +0200143}
Thomas Gleixner45a98822018-01-07 22:48:01 +0100144
Borislav Petkovea999352018-05-12 00:14:51 +0200145void
146x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
Konrad Rzeszutek Wilkcf21f582018-04-25 22:04:19 -0400147{
Thomas Gleixner0ec827f2018-05-12 20:10:00 +0200148 u64 msrval, guestval, hostval = x86_spec_ctrl_base;
Borislav Petkovea999352018-05-12 00:14:51 +0200149 struct thread_info *ti = current_thread_info();
Thomas Gleixner89c6e9b2018-04-29 15:21:42 +0200150
Thomas Gleixnera7c34322018-05-10 19:13:18 +0200151 /* Is MSR_SPEC_CTRL implemented ? */
Borislav Petkovea999352018-05-12 00:14:51 +0200152 if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
Thomas Gleixner0ec827f2018-05-12 20:10:00 +0200153 /*
154 * Restrict guest_spec_ctrl to supported values. Clear the
155 * modifiable bits in the host base value and or the
156 * modifiable bits from the guest value.
157 */
158 guestval = hostval & ~x86_spec_ctrl_mask;
159 guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
160
Borislav Petkovea999352018-05-12 00:14:51 +0200161 /* SSBD controlled in MSR_SPEC_CTRL */
Tom Lendackya7501dc2018-07-02 16:36:02 -0500162 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
163 static_cpu_has(X86_FEATURE_AMD_SSBD))
Thomas Gleixner0ec827f2018-05-12 20:10:00 +0200164 hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
Thomas Gleixner89c6e9b2018-04-29 15:21:42 +0200165
Tim Chena35a8c62018-11-25 19:33:46 +0100166 /* Conditional STIBP enabled? */
167 if (static_branch_unlikely(&switch_to_cond_stibp))
168 hostval |= stibp_tif_to_spec_ctrl(ti->flags);
169
Thomas Gleixner0ec827f2018-05-12 20:10:00 +0200170 if (hostval != guestval) {
171 msrval = setguest ? guestval : hostval;
172 wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
Borislav Petkovea999352018-05-12 00:14:51 +0200173 }
174 }
Thomas Gleixnerb0ef8c72018-05-10 20:42:48 +0200175
176 /*
177 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
178 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
179 */
180 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
181 !static_cpu_has(X86_FEATURE_VIRT_SSBD))
182 return;
183
184 /*
185 * If the host has SSBD mitigation enabled, force it in the host's
186 * virtual MSR value. If its not permanently enabled, evaluate
187 * current's TIF_SSBD thread flag.
188 */
189 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
190 hostval = SPEC_CTRL_SSBD;
191 else
192 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
193
194 /* Sanitize the guest value */
195 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
196
197 if (hostval != guestval) {
198 unsigned long tif;
199
200 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
201 ssbd_spec_ctrl_to_tif(hostval);
202
Thomas Gleixnerfd8d77e2018-11-25 19:33:34 +0100203 speculation_ctrl_update(tif);
Thomas Gleixnerb0ef8c72018-05-10 20:42:48 +0200204 }
Konrad Rzeszutek Wilkcf21f582018-04-25 22:04:19 -0400205}
Borislav Petkovea999352018-05-12 00:14:51 +0200206EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
Konrad Rzeszutek Wilkcf21f582018-04-25 22:04:19 -0400207
Konrad Rzeszutek Wilkbf3da842018-05-09 21:41:38 +0200208static void x86_amd_ssb_disable(void)
David Woodhousef8544342018-05-20 20:52:05 +0100209{
Konrad Rzeszutek Wilkbf3da842018-05-09 21:41:38 +0200210 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
David Woodhousef8544342018-05-20 20:52:05 +0100211
Tom Lendacky7c0b2dc2018-05-17 17:09:18 +0200212 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
213 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
214 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
David Woodhousef8544342018-05-20 20:52:05 +0100215 wrmsrl(MSR_AMD64_LS_CFG, msrval);
216}
217
Thomas Gleixner5fdb1232018-11-25 19:33:41 +0100218#undef pr_fmt
Thomas Gleixner4e722ae2019-02-18 22:04:08 +0100219#define pr_fmt(fmt) "MDS: " fmt
220
Boris Ostrovsky450aa012019-04-12 17:50:57 -0400221/* Default mitigation for MDS-affected CPUs */
Thomas Gleixner4e722ae2019-02-18 22:04:08 +0100222static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
Josh Poimboeuff02eee62019-04-02 09:59:33 -0500223static bool mds_nosmt __ro_after_init = false;
Thomas Gleixner4e722ae2019-02-18 22:04:08 +0100224
225static const char * const mds_strings[] = {
226 [MDS_MITIGATION_OFF] = "Vulnerable",
Thomas Gleixner81ea1092019-02-20 09:40:40 +0100227 [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
228 [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
Thomas Gleixner4e722ae2019-02-18 22:04:08 +0100229};
230
231static void __init mds_select_mitigation(void)
232{
Josh Poimboeuf025b9cf2019-04-17 16:39:02 -0500233 if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
Thomas Gleixner4e722ae2019-02-18 22:04:08 +0100234 mds_mitigation = MDS_MITIGATION_OFF;
235 return;
236 }
237
238 if (mds_mitigation == MDS_MITIGATION_FULL) {
Thomas Gleixner81ea1092019-02-20 09:40:40 +0100239 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
240 mds_mitigation = MDS_MITIGATION_VMWERV;
Josh Poimboeuff02eee62019-04-02 09:59:33 -0500241
Thomas Gleixner81ea1092019-02-20 09:40:40 +0100242 static_branch_enable(&mds_user_clear);
Josh Poimboeuff02eee62019-04-02 09:59:33 -0500243
Josh Poimboeuf025b9cf2019-04-17 16:39:02 -0500244 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
245 (mds_nosmt || cpu_mitigations_auto_nosmt()))
Josh Poimboeuff02eee62019-04-02 09:59:33 -0500246 cpu_smt_disable(false);
Thomas Gleixner4e722ae2019-02-18 22:04:08 +0100247 }
Josh Poimboeuff02eee62019-04-02 09:59:33 -0500248
Thomas Gleixner4e722ae2019-02-18 22:04:08 +0100249 pr_info("%s\n", mds_strings[mds_mitigation]);
250}
251
252static int __init mds_cmdline(char *str)
253{
254 if (!boot_cpu_has_bug(X86_BUG_MDS))
255 return 0;
256
257 if (!str)
258 return -EINVAL;
259
260 if (!strcmp(str, "off"))
261 mds_mitigation = MDS_MITIGATION_OFF;
262 else if (!strcmp(str, "full"))
263 mds_mitigation = MDS_MITIGATION_FULL;
Josh Poimboeuff02eee62019-04-02 09:59:33 -0500264 else if (!strcmp(str, "full,nosmt")) {
265 mds_mitigation = MDS_MITIGATION_FULL;
266 mds_nosmt = true;
267 }
Thomas Gleixner4e722ae2019-02-18 22:04:08 +0100268
269 return 0;
270}
271early_param("mds", mds_cmdline);
272
273#undef pr_fmt
Thomas Gleixner5fdb1232018-11-25 19:33:41 +0100274#define pr_fmt(fmt) "Spectre V2 : " fmt
275
276static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
277 SPECTRE_V2_NONE;
278
Thomas Gleixnerdda365c2018-11-25 19:33:45 +0100279static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
280 SPECTRE_V2_USER_NONE;
281
Andi Kleena1745ad2018-01-25 15:50:28 -0800282#ifdef RETPOLINE
Thomas Gleixner98911222018-01-27 15:45:14 +0100283static bool spectre_v2_bad_module;
284
Andi Kleena1745ad2018-01-25 15:50:28 -0800285bool retpoline_module_ok(bool has_retpoline)
286{
287 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
288 return true;
289
Colin Ian Kingf67e05d2018-01-30 19:32:18 +0000290 pr_err("System may be vulnerable to spectre v2\n");
Andi Kleena1745ad2018-01-25 15:50:28 -0800291 spectre_v2_bad_module = true;
292 return false;
293}
Thomas Gleixner98911222018-01-27 15:45:14 +0100294
295static inline const char *spectre_v2_module_string(void)
296{
297 return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
298}
299#else
300static inline const char *spectre_v2_module_string(void) { return ""; }
Andi Kleena1745ad2018-01-25 15:50:28 -0800301#endif
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000302
Thomas Gleixner5fdb1232018-11-25 19:33:41 +0100303static inline bool match_option(const char *arg, int arglen, const char *opt)
304{
305 int len = strlen(opt);
306
307 return len == arglen && !strncmp(arg, opt, len);
308}
309
310/* The kernel command line selection for spectre v2 */
311enum spectre_v2_mitigation_cmd {
312 SPECTRE_V2_CMD_NONE,
313 SPECTRE_V2_CMD_AUTO,
314 SPECTRE_V2_CMD_FORCE,
315 SPECTRE_V2_CMD_RETPOLINE,
316 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
317 SPECTRE_V2_CMD_RETPOLINE_AMD,
318};
319
Thomas Gleixnerdda365c2018-11-25 19:33:45 +0100320enum spectre_v2_user_cmd {
321 SPECTRE_V2_USER_CMD_NONE,
322 SPECTRE_V2_USER_CMD_AUTO,
323 SPECTRE_V2_USER_CMD_FORCE,
Thomas Gleixner91d9bbd2018-11-25 19:33:54 +0100324 SPECTRE_V2_USER_CMD_PRCTL,
Thomas Gleixnere58cf372018-11-25 19:33:56 +0100325 SPECTRE_V2_USER_CMD_PRCTL_IBPB,
Thomas Gleixner6f4b9252018-11-25 19:33:55 +0100326 SPECTRE_V2_USER_CMD_SECCOMP,
Thomas Gleixnere58cf372018-11-25 19:33:56 +0100327 SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
Thomas Gleixnerdda365c2018-11-25 19:33:45 +0100328};
329
330static const char * const spectre_v2_user_strings[] = {
331 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
332 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
Thomas Gleixner91d9bbd2018-11-25 19:33:54 +0100333 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
Thomas Gleixner6f4b9252018-11-25 19:33:55 +0100334 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
Thomas Gleixnerdda365c2018-11-25 19:33:45 +0100335};
336
337static const struct {
338 const char *option;
339 enum spectre_v2_user_cmd cmd;
340 bool secure;
Andi Kleen192d1972019-03-29 17:47:43 -0700341} v2_user_options[] __initconst = {
Thomas Gleixnere58cf372018-11-25 19:33:56 +0100342 { "auto", SPECTRE_V2_USER_CMD_AUTO, false },
343 { "off", SPECTRE_V2_USER_CMD_NONE, false },
344 { "on", SPECTRE_V2_USER_CMD_FORCE, true },
345 { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false },
346 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false },
347 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false },
348 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false },
Thomas Gleixnerdda365c2018-11-25 19:33:45 +0100349};
350
351static void __init spec_v2_user_print_cond(const char *reason, bool secure)
352{
353 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
354 pr_info("spectre_v2_user=%s forced on command line.\n", reason);
355}
356
357static enum spectre_v2_user_cmd __init
358spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
359{
360 char arg[20];
361 int ret, i;
362
363 switch (v2_cmd) {
364 case SPECTRE_V2_CMD_NONE:
365 return SPECTRE_V2_USER_CMD_NONE;
366 case SPECTRE_V2_CMD_FORCE:
367 return SPECTRE_V2_USER_CMD_FORCE;
368 default:
369 break;
370 }
371
372 ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
373 arg, sizeof(arg));
374 if (ret < 0)
375 return SPECTRE_V2_USER_CMD_AUTO;
376
377 for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
378 if (match_option(arg, ret, v2_user_options[i].option)) {
379 spec_v2_user_print_cond(v2_user_options[i].option,
380 v2_user_options[i].secure);
381 return v2_user_options[i].cmd;
382 }
383 }
384
385 pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
386 return SPECTRE_V2_USER_CMD_AUTO;
387}
388
389static void __init
390spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
391{
392 enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
393 bool smt_possible = IS_ENABLED(CONFIG_SMP);
Thomas Gleixnere58cf372018-11-25 19:33:56 +0100394 enum spectre_v2_user_cmd cmd;
Thomas Gleixnerdda365c2018-11-25 19:33:45 +0100395
396 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
397 return;
398
399 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
400 cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
401 smt_possible = false;
402
Thomas Gleixnere58cf372018-11-25 19:33:56 +0100403 cmd = spectre_v2_parse_user_cmdline(v2_cmd);
404 switch (cmd) {
Thomas Gleixnerdda365c2018-11-25 19:33:45 +0100405 case SPECTRE_V2_USER_CMD_NONE:
406 goto set_mode;
407 case SPECTRE_V2_USER_CMD_FORCE:
408 mode = SPECTRE_V2_USER_STRICT;
409 break;
Thomas Gleixner91d9bbd2018-11-25 19:33:54 +0100410 case SPECTRE_V2_USER_CMD_PRCTL:
Thomas Gleixnere58cf372018-11-25 19:33:56 +0100411 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
Thomas Gleixner91d9bbd2018-11-25 19:33:54 +0100412 mode = SPECTRE_V2_USER_PRCTL;
413 break;
Thomas Gleixner6f4b9252018-11-25 19:33:55 +0100414 case SPECTRE_V2_USER_CMD_AUTO:
415 case SPECTRE_V2_USER_CMD_SECCOMP:
Thomas Gleixnere58cf372018-11-25 19:33:56 +0100416 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
Thomas Gleixner6f4b9252018-11-25 19:33:55 +0100417 if (IS_ENABLED(CONFIG_SECCOMP))
418 mode = SPECTRE_V2_USER_SECCOMP;
419 else
420 mode = SPECTRE_V2_USER_PRCTL;
421 break;
Thomas Gleixnerdda365c2018-11-25 19:33:45 +0100422 }
423
424 /* Initialize Indirect Branch Prediction Barrier */
425 if (boot_cpu_has(X86_FEATURE_IBPB)) {
426 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
Thomas Gleixnerc89ef652018-11-25 19:33:49 +0100427
Thomas Gleixnere58cf372018-11-25 19:33:56 +0100428 switch (cmd) {
429 case SPECTRE_V2_USER_CMD_FORCE:
430 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
431 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
Thomas Gleixnerc89ef652018-11-25 19:33:49 +0100432 static_branch_enable(&switch_mm_always_ibpb);
433 break;
Thomas Gleixnere58cf372018-11-25 19:33:56 +0100434 case SPECTRE_V2_USER_CMD_PRCTL:
435 case SPECTRE_V2_USER_CMD_AUTO:
436 case SPECTRE_V2_USER_CMD_SECCOMP:
Thomas Gleixner91d9bbd2018-11-25 19:33:54 +0100437 static_branch_enable(&switch_mm_cond_ibpb);
438 break;
Thomas Gleixnerc89ef652018-11-25 19:33:49 +0100439 default:
440 break;
441 }
442
443 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
Thomas Gleixnere58cf372018-11-25 19:33:56 +0100444 static_key_enabled(&switch_mm_always_ibpb) ?
445 "always-on" : "conditional");
Thomas Gleixnerdda365c2018-11-25 19:33:45 +0100446 }
447
448 /* If enhanced IBRS is enabled no STIPB required */
449 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
450 return;
451
Thomas Gleixner91d9bbd2018-11-25 19:33:54 +0100452 /*
453 * If SMT is not possible or STIBP is not available clear the STIPB
454 * mode.
455 */
456 if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
457 mode = SPECTRE_V2_USER_NONE;
Thomas Gleixnerdda365c2018-11-25 19:33:45 +0100458set_mode:
459 spectre_v2_user = mode;
460 /* Only print the STIBP mode when SMT possible */
461 if (smt_possible)
462 pr_info("%s\n", spectre_v2_user_strings[mode]);
463}
464
Thomas Gleixner8d331572018-11-25 19:33:42 +0100465static const char * const spectre_v2_strings[] = {
Thomas Gleixner5fdb1232018-11-25 19:33:41 +0100466 [SPECTRE_V2_NONE] = "Vulnerable",
467 [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline",
468 [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
469 [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
470 [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
471 [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
472};
473
474static const struct {
475 const char *option;
476 enum spectre_v2_mitigation_cmd cmd;
477 bool secure;
Andi Kleen192d1972019-03-29 17:47:43 -0700478} mitigation_options[] __initconst = {
Thomas Gleixner5fdb1232018-11-25 19:33:41 +0100479 { "off", SPECTRE_V2_CMD_NONE, false },
480 { "on", SPECTRE_V2_CMD_FORCE, true },
481 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
482 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false },
483 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
484 { "auto", SPECTRE_V2_CMD_AUTO, false },
485};
486
Thomas Gleixnerd343a942018-11-25 19:33:44 +0100487static void __init spec_v2_print_cond(const char *reason, bool secure)
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000488{
Thomas Gleixnerd343a942018-11-25 19:33:44 +0100489 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
KarimAllah Ahmed961cb142018-02-01 11:27:21 +0000490 pr_info("%s selected on command line.\n", reason);
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000491}
492
493static inline bool retp_compiler(void)
494{
495 return __is_defined(RETPOLINE);
496}
497
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000498static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
499{
Thomas Gleixner5fdb1232018-11-25 19:33:41 +0100500 enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000501 char arg[20];
KarimAllah Ahmed961cb142018-02-01 11:27:21 +0000502 int ret, i;
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000503
Josh Poimboeuf17092842019-04-12 15:39:29 -0500504 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
505 cpu_mitigations_off())
KarimAllah Ahmed961cb142018-02-01 11:27:21 +0000506 return SPECTRE_V2_CMD_NONE;
KarimAllah Ahmed961cb142018-02-01 11:27:21 +0000507
Tim Chen61549812018-11-25 19:33:30 +0100508 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
509 if (ret < 0)
510 return SPECTRE_V2_CMD_AUTO;
KarimAllah Ahmed961cb142018-02-01 11:27:21 +0000511
Tim Chen61549812018-11-25 19:33:30 +0100512 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
513 if (!match_option(arg, ret, mitigation_options[i].option))
514 continue;
515 cmd = mitigation_options[i].cmd;
516 break;
517 }
518
519 if (i >= ARRAY_SIZE(mitigation_options)) {
520 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
521 return SPECTRE_V2_CMD_AUTO;
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000522 }
523
KarimAllah Ahmed961cb142018-02-01 11:27:21 +0000524 if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
525 cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
526 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
527 !IS_ENABLED(CONFIG_RETPOLINE)) {
Ingo Molnar3aad6fe2018-02-13 09:03:08 +0100528 pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000529 return SPECTRE_V2_CMD_AUTO;
KarimAllah Ahmed961cb142018-02-01 11:27:21 +0000530 }
531
532 if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
533 boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
534 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
535 return SPECTRE_V2_CMD_AUTO;
536 }
537
Thomas Gleixnerd343a942018-11-25 19:33:44 +0100538 spec_v2_print_cond(mitigation_options[i].option,
539 mitigation_options[i].secure);
KarimAllah Ahmed961cb142018-02-01 11:27:21 +0000540 return cmd;
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000541}
542
543static void __init spectre_v2_select_mitigation(void)
544{
545 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
546 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
547
548 /*
549 * If the CPU is not affected and the command line mode is NONE or AUTO
550 * then nothing to do.
551 */
552 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
553 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
554 return;
555
556 switch (cmd) {
557 case SPECTRE_V2_CMD_NONE:
558 return;
559
560 case SPECTRE_V2_CMD_FORCE:
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000561 case SPECTRE_V2_CMD_AUTO:
Sai Praneethb0c05452018-08-01 11:42:25 -0700562 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
563 mode = SPECTRE_V2_IBRS_ENHANCED;
564 /* Force it so VMEXIT will restore correctly */
565 x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
566 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
567 goto specv2_set_mode;
568 }
Dou Liyang9eedeb72018-01-30 14:13:50 +0800569 if (IS_ENABLED(CONFIG_RETPOLINE))
570 goto retpoline_auto;
571 break;
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000572 case SPECTRE_V2_CMD_RETPOLINE_AMD:
573 if (IS_ENABLED(CONFIG_RETPOLINE))
574 goto retpoline_amd;
575 break;
576 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
577 if (IS_ENABLED(CONFIG_RETPOLINE))
578 goto retpoline_generic;
579 break;
580 case SPECTRE_V2_CMD_RETPOLINE:
581 if (IS_ENABLED(CONFIG_RETPOLINE))
582 goto retpoline_auto;
583 break;
584 }
Ingo Molnar3aad6fe2018-02-13 09:03:08 +0100585 pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000586 return;
587
588retpoline_auto:
589 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
590 retpoline_amd:
591 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
Ingo Molnar3aad6fe2018-02-13 09:03:08 +0100592 pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000593 goto retpoline_generic;
594 }
595 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
596 SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
597 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
598 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
599 } else {
600 retpoline_generic:
601 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
602 SPECTRE_V2_RETPOLINE_MINIMAL;
603 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
604 }
605
Sai Praneethb0c05452018-08-01 11:42:25 -0700606specv2_set_mode:
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000607 spectre_v2_enabled = mode;
608 pr_info("%s\n", spectre_v2_strings[mode]);
David Woodhouseabf67b12018-01-12 17:49:25 +0000609
610 /*
Jiri Kosina6455f412018-07-26 13:14:55 +0200611 * If spectre v2 protection has been enabled, unconditionally fill
612 * RSB during a context switch; this protects against two independent
613 * issues:
David Woodhouseabf67b12018-01-12 17:49:25 +0000614 *
Jiri Kosina6455f412018-07-26 13:14:55 +0200615 * - RSB underflow (and switch to BTB) on Skylake+
616 * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
David Woodhouseabf67b12018-01-12 17:49:25 +0000617 */
Jiri Kosina6455f412018-07-26 13:14:55 +0200618 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
619 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
David Woodhouse31fd9ed2018-01-25 16:14:15 +0000620
David Woodhousea27ede12018-02-19 10:50:54 +0000621 /*
622 * Retpoline means the kernel is safe because it has no indirect
Sai Praneethb0c05452018-08-01 11:42:25 -0700623 * branches. Enhanced IBRS protects firmware too, so, enable restricted
624 * speculation around firmware calls only when Enhanced IBRS isn't
625 * supported.
626 *
627 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
628 * the user might select retpoline on the kernel command line and if
629 * the CPU supports Enhanced IBRS, kernel might un-intentionally not
630 * enable IBRS around firmware calls.
David Woodhousea27ede12018-02-19 10:50:54 +0000631 */
Sai Praneethb0c05452018-08-01 11:42:25 -0700632 if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
David Woodhousea27ede12018-02-19 10:50:54 +0000633 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
634 pr_info("Enabling Restricted Speculation for firmware calls\n");
635 }
Jiri Kosinab410c572018-09-25 14:38:55 +0200636
Thomas Gleixnerdda365c2018-11-25 19:33:45 +0100637 /* Set up IBPB and STIBP depending on the general spectre V2 command */
638 spectre_v2_user_select_mitigation(cmd);
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000639}
640
Thomas Gleixner6596ca92018-11-25 19:33:52 +0100641static void update_stibp_msr(void * __unused)
Thomas Gleixner5fdb1232018-11-25 19:33:41 +0100642{
643 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
644}
645
Thomas Gleixner6596ca92018-11-25 19:33:52 +0100646/* Update x86_spec_ctrl_base in case SMT state changed. */
647static void update_stibp_strict(void)
648{
649 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
650
651 if (sched_smt_active())
652 mask |= SPEC_CTRL_STIBP;
653
654 if (mask == x86_spec_ctrl_base)
655 return;
656
657 pr_info("Update user space SMT mitigation: STIBP %s\n",
658 mask & SPEC_CTRL_STIBP ? "always-on" : "off");
659 x86_spec_ctrl_base = mask;
660 on_each_cpu(update_stibp_msr, NULL, 1);
661}
662
Thomas Gleixner91d9bbd2018-11-25 19:33:54 +0100663/* Update the static key controlling the evaluation of TIF_SPEC_IB */
664static void update_indir_branch_cond(void)
665{
666 if (sched_smt_active())
667 static_branch_enable(&switch_to_cond_stibp);
668 else
669 static_branch_disable(&switch_to_cond_stibp);
670}
671
Josh Poimboeuff8a0bbe2019-04-02 10:00:51 -0500672#undef pr_fmt
673#define pr_fmt(fmt) fmt
674
Thomas Gleixner4e722ae2019-02-18 22:04:08 +0100675/* Update the static key controlling the MDS CPU buffer clear in idle */
676static void update_mds_branch_idle(void)
677{
678 /*
679 * Enable the idle clearing if SMT is active on CPUs which are
680 * affected only by MSBDS and not any other MDS variant.
681 *
682 * The other variants cannot be mitigated when SMT is enabled, so
683 * clearing the buffers on idle just to prevent the Store Buffer
684 * repartitioning leak would be a window dressing exercise.
685 */
686 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
687 return;
688
689 if (sched_smt_active())
690 static_branch_enable(&mds_idle_clear);
691 else
692 static_branch_disable(&mds_idle_clear);
693}
694
Josh Poimboeuff8a0bbe2019-04-02 10:00:51 -0500695#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
696
Thomas Gleixner5fdb1232018-11-25 19:33:41 +0100697void arch_smt_update(void)
698{
Thomas Gleixner6596ca92018-11-25 19:33:52 +0100699 /* Enhanced IBRS implies STIBP. No update required. */
700 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
Thomas Gleixner5fdb1232018-11-25 19:33:41 +0100701 return;
702
703 mutex_lock(&spec_ctrl_mutex);
704
Thomas Gleixner6596ca92018-11-25 19:33:52 +0100705 switch (spectre_v2_user) {
706 case SPECTRE_V2_USER_NONE:
707 break;
708 case SPECTRE_V2_USER_STRICT:
709 update_stibp_strict();
710 break;
Thomas Gleixner2d99bc02018-11-25 19:33:53 +0100711 case SPECTRE_V2_USER_PRCTL:
Thomas Gleixner6f4b9252018-11-25 19:33:55 +0100712 case SPECTRE_V2_USER_SECCOMP:
Thomas Gleixner91d9bbd2018-11-25 19:33:54 +0100713 update_indir_branch_cond();
Thomas Gleixner2d99bc02018-11-25 19:33:53 +0100714 break;
Thomas Gleixner5fdb1232018-11-25 19:33:41 +0100715 }
Thomas Gleixner6596ca92018-11-25 19:33:52 +0100716
Thomas Gleixner81ea1092019-02-20 09:40:40 +0100717 switch (mds_mitigation) {
718 case MDS_MITIGATION_FULL:
719 case MDS_MITIGATION_VMWERV:
Josh Poimboeuff8a0bbe2019-04-02 10:00:51 -0500720 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
721 pr_warn_once(MDS_MSG_SMT);
Thomas Gleixner4e722ae2019-02-18 22:04:08 +0100722 update_mds_branch_idle();
Thomas Gleixner81ea1092019-02-20 09:40:40 +0100723 break;
724 case MDS_MITIGATION_OFF:
725 break;
726 }
Thomas Gleixner4e722ae2019-02-18 22:04:08 +0100727
Thomas Gleixner5fdb1232018-11-25 19:33:41 +0100728 mutex_unlock(&spec_ctrl_mutex);
729}
730
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000731#undef pr_fmt
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -0400732#define pr_fmt(fmt) "Speculative Store Bypass: " fmt
733
Kees Cookea055f72018-05-03 15:03:30 -0700734static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -0400735
736/* The kernel command line selection */
737enum ssb_mitigation_cmd {
738 SPEC_STORE_BYPASS_CMD_NONE,
739 SPEC_STORE_BYPASS_CMD_AUTO,
740 SPEC_STORE_BYPASS_CMD_ON,
Thomas Gleixnera078e3e2018-04-29 15:26:40 +0200741 SPEC_STORE_BYPASS_CMD_PRCTL,
Kees Cook05a85a32018-05-03 14:37:54 -0700742 SPEC_STORE_BYPASS_CMD_SECCOMP,
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -0400743};
744
Thomas Gleixner8d331572018-11-25 19:33:42 +0100745static const char * const ssb_strings[] = {
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -0400746 [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
Thomas Gleixnera078e3e2018-04-29 15:26:40 +0200747 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
Kees Cook05a85a32018-05-03 14:37:54 -0700748 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
749 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -0400750};
751
752static const struct {
753 const char *option;
754 enum ssb_mitigation_cmd cmd;
Andi Kleen192d1972019-03-29 17:47:43 -0700755} ssb_mitigation_options[] __initconst = {
Kees Cook05a85a32018-05-03 14:37:54 -0700756 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
757 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
758 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
759 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
760 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -0400761};
762
763static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
764{
765 enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
766 char arg[20];
767 int ret, i;
768
Josh Poimboeuf17092842019-04-12 15:39:29 -0500769 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
770 cpu_mitigations_off()) {
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -0400771 return SPEC_STORE_BYPASS_CMD_NONE;
772 } else {
773 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
774 arg, sizeof(arg));
775 if (ret < 0)
776 return SPEC_STORE_BYPASS_CMD_AUTO;
777
778 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
779 if (!match_option(arg, ret, ssb_mitigation_options[i].option))
780 continue;
781
782 cmd = ssb_mitigation_options[i].cmd;
783 break;
784 }
785
786 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
787 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
788 return SPEC_STORE_BYPASS_CMD_AUTO;
789 }
790 }
791
792 return cmd;
793}
794
Jiri Kosinaeb7b5622018-05-10 22:47:18 +0200795static enum ssb_mitigation __init __ssb_select_mitigation(void)
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -0400796{
797 enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
798 enum ssb_mitigation_cmd cmd;
799
Konrad Rzeszutek Wilkbf3da842018-05-09 21:41:38 +0200800 if (!boot_cpu_has(X86_FEATURE_SSBD))
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -0400801 return mode;
802
803 cmd = ssb_parse_cmdline();
804 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
805 (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
806 cmd == SPEC_STORE_BYPASS_CMD_AUTO))
807 return mode;
808
809 switch (cmd) {
810 case SPEC_STORE_BYPASS_CMD_AUTO:
Kees Cook05a85a32018-05-03 14:37:54 -0700811 case SPEC_STORE_BYPASS_CMD_SECCOMP:
812 /*
813 * Choose prctl+seccomp as the default mode if seccomp is
814 * enabled.
815 */
816 if (IS_ENABLED(CONFIG_SECCOMP))
817 mode = SPEC_STORE_BYPASS_SECCOMP;
818 else
819 mode = SPEC_STORE_BYPASS_PRCTL;
Thomas Gleixnera078e3e2018-04-29 15:26:40 +0200820 break;
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -0400821 case SPEC_STORE_BYPASS_CMD_ON:
822 mode = SPEC_STORE_BYPASS_DISABLE;
823 break;
Thomas Gleixnera078e3e2018-04-29 15:26:40 +0200824 case SPEC_STORE_BYPASS_CMD_PRCTL:
825 mode = SPEC_STORE_BYPASS_PRCTL;
826 break;
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -0400827 case SPEC_STORE_BYPASS_CMD_NONE:
828 break;
829 }
830
Konrad Rzeszutek Wilk19e3a2b2018-04-25 22:04:22 -0400831 /*
Alejandro Jimenezbed86472019-06-10 13:20:10 -0400832 * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
833 * bit in the mask to allow guests to use the mitigation even in the
834 * case where the host does not enable it.
835 */
836 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
837 static_cpu_has(X86_FEATURE_AMD_SSBD)) {
838 x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
839 }
840
841 /*
Konrad Rzeszutek Wilk19e3a2b2018-04-25 22:04:22 -0400842 * We have three CPU feature flags that are in play here:
843 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
Konrad Rzeszutek Wilkbf3da842018-05-09 21:41:38 +0200844 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
Konrad Rzeszutek Wilk19e3a2b2018-04-25 22:04:22 -0400845 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
846 */
Thomas Gleixnera078e3e2018-04-29 15:26:40 +0200847 if (mode == SPEC_STORE_BYPASS_DISABLE) {
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -0400848 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
Konrad Rzeszutek Wilk19e3a2b2018-04-25 22:04:22 -0400849 /*
Konrad Rzeszutek Wilk9ad05582018-06-01 10:59:20 -0400850 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
851 * use a completely different MSR and bit dependent on family.
Konrad Rzeszutek Wilk19e3a2b2018-04-25 22:04:22 -0400852 */
Tom Lendackya7501dc2018-07-02 16:36:02 -0500853 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
854 !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
Konrad Rzeszutek Wilkc2185a42018-06-01 10:59:21 -0400855 x86_amd_ssb_disable();
Tom Lendackya7501dc2018-07-02 16:36:02 -0500856 } else {
Konrad Rzeszutek Wilkbf3da842018-05-09 21:41:38 +0200857 x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
Thomas Gleixnerec904642018-05-12 20:53:14 +0200858 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
Konrad Rzeszutek Wilk19e3a2b2018-04-25 22:04:22 -0400859 }
860 }
861
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -0400862 return mode;
863}
864
Konrad Rzeszutek Wilk6fdd2772018-05-11 16:50:35 -0400865static void ssb_select_mitigation(void)
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -0400866{
867 ssb_mode = __ssb_select_mitigation();
868
869 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
870 pr_info("%s\n", ssb_strings[ssb_mode]);
871}
872
873#undef pr_fmt
Kees Cook05a85a32018-05-03 14:37:54 -0700874#define pr_fmt(fmt) "Speculation prctl: " fmt
David Woodhouse8f96937ee2018-01-11 21:46:26 +0000875
Thomas Gleixner6febf942018-11-28 10:56:57 +0100876static void task_update_spec_tif(struct task_struct *tsk)
Thomas Gleixnera078e3e2018-04-29 15:26:40 +0200877{
Thomas Gleixner6febf942018-11-28 10:56:57 +0100878 /* Force the update of the real TIF bits */
879 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
Thomas Gleixner607a3b32018-11-25 19:33:51 +0100880
881 /*
882 * Immediately update the speculation control MSRs for the current
883 * task, but for a non-current task delay setting the CPU
884 * mitigation until it is scheduled next.
885 *
886 * This can only happen for SECCOMP mitigation. For PRCTL it's
887 * always the current task.
888 */
Thomas Gleixner6febf942018-11-28 10:56:57 +0100889 if (tsk == current)
Thomas Gleixner607a3b32018-11-25 19:33:51 +0100890 speculation_ctrl_update_current();
891}
892
893static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
894{
Kees Cook05a85a32018-05-03 14:37:54 -0700895 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
896 ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
Thomas Gleixnera078e3e2018-04-29 15:26:40 +0200897 return -ENXIO;
898
Thomas Gleixner036608d2018-05-03 22:09:15 +0200899 switch (ctrl) {
900 case PR_SPEC_ENABLE:
901 /* If speculation is force disabled, enable is not allowed */
902 if (task_spec_ssb_force_disable(task))
903 return -EPERM;
904 task_clear_spec_ssb_disable(task);
Thomas Gleixner6febf942018-11-28 10:56:57 +0100905 task_update_spec_tif(task);
Thomas Gleixner036608d2018-05-03 22:09:15 +0200906 break;
907 case PR_SPEC_DISABLE:
908 task_set_spec_ssb_disable(task);
Thomas Gleixner6febf942018-11-28 10:56:57 +0100909 task_update_spec_tif(task);
Thomas Gleixner036608d2018-05-03 22:09:15 +0200910 break;
911 case PR_SPEC_FORCE_DISABLE:
912 task_set_spec_ssb_disable(task);
913 task_set_spec_ssb_force_disable(task);
Thomas Gleixner6febf942018-11-28 10:56:57 +0100914 task_update_spec_tif(task);
Thomas Gleixner036608d2018-05-03 22:09:15 +0200915 break;
916 default:
917 return -ERANGE;
918 }
Thomas Gleixnera078e3e2018-04-29 15:26:40 +0200919 return 0;
920}
921
Thomas Gleixner2d99bc02018-11-25 19:33:53 +0100922static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
923{
924 switch (ctrl) {
925 case PR_SPEC_ENABLE:
926 if (spectre_v2_user == SPECTRE_V2_USER_NONE)
927 return 0;
928 /*
929 * Indirect branch speculation is always disabled in strict
930 * mode.
931 */
932 if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
933 return -EPERM;
934 task_clear_spec_ib_disable(task);
935 task_update_spec_tif(task);
936 break;
937 case PR_SPEC_DISABLE:
938 case PR_SPEC_FORCE_DISABLE:
939 /*
940 * Indirect branch speculation is always allowed when
941 * mitigation is force disabled.
942 */
943 if (spectre_v2_user == SPECTRE_V2_USER_NONE)
944 return -EPERM;
945 if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
946 return 0;
947 task_set_spec_ib_disable(task);
948 if (ctrl == PR_SPEC_FORCE_DISABLE)
949 task_set_spec_ib_force_disable(task);
950 task_update_spec_tif(task);
951 break;
952 default:
953 return -ERANGE;
954 }
955 return 0;
956}
957
Thomas Gleixner094c2762018-05-04 15:12:06 +0200958int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
959 unsigned long ctrl)
960{
961 switch (which) {
962 case PR_SPEC_STORE_BYPASS:
963 return ssb_prctl_set(task, ctrl);
Thomas Gleixner2d99bc02018-11-25 19:33:53 +0100964 case PR_SPEC_INDIRECT_BRANCH:
965 return ib_prctl_set(task, ctrl);
Thomas Gleixner094c2762018-05-04 15:12:06 +0200966 default:
967 return -ENODEV;
968 }
969}
970
971#ifdef CONFIG_SECCOMP
972void arch_seccomp_spec_mitigate(struct task_struct *task)
973{
Kees Cook05a85a32018-05-03 14:37:54 -0700974 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
975 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
Thomas Gleixner6f4b9252018-11-25 19:33:55 +0100976 if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
977 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
Thomas Gleixner094c2762018-05-04 15:12:06 +0200978}
979#endif
980
Kees Cook4272f522018-05-01 15:19:04 -0700981static int ssb_prctl_get(struct task_struct *task)
Thomas Gleixnera078e3e2018-04-29 15:26:40 +0200982{
983 switch (ssb_mode) {
984 case SPEC_STORE_BYPASS_DISABLE:
985 return PR_SPEC_DISABLE;
Kees Cook05a85a32018-05-03 14:37:54 -0700986 case SPEC_STORE_BYPASS_SECCOMP:
Thomas Gleixnera078e3e2018-04-29 15:26:40 +0200987 case SPEC_STORE_BYPASS_PRCTL:
Thomas Gleixner036608d2018-05-03 22:09:15 +0200988 if (task_spec_ssb_force_disable(task))
989 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
990 if (task_spec_ssb_disable(task))
Thomas Gleixnera078e3e2018-04-29 15:26:40 +0200991 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
992 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
993 default:
994 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
995 return PR_SPEC_ENABLE;
996 return PR_SPEC_NOT_AFFECTED;
997 }
998}
999
Thomas Gleixner2d99bc02018-11-25 19:33:53 +01001000static int ib_prctl_get(struct task_struct *task)
1001{
1002 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
1003 return PR_SPEC_NOT_AFFECTED;
1004
1005 switch (spectre_v2_user) {
1006 case SPECTRE_V2_USER_NONE:
1007 return PR_SPEC_ENABLE;
1008 case SPECTRE_V2_USER_PRCTL:
Thomas Gleixner6f4b9252018-11-25 19:33:55 +01001009 case SPECTRE_V2_USER_SECCOMP:
Thomas Gleixner2d99bc02018-11-25 19:33:53 +01001010 if (task_spec_ib_force_disable(task))
1011 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1012 if (task_spec_ib_disable(task))
1013 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1014 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1015 case SPECTRE_V2_USER_STRICT:
1016 return PR_SPEC_DISABLE;
1017 default:
1018 return PR_SPEC_NOT_AFFECTED;
1019 }
1020}
1021
Kees Cook4272f522018-05-01 15:19:04 -07001022int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
Thomas Gleixnera078e3e2018-04-29 15:26:40 +02001023{
1024 switch (which) {
1025 case PR_SPEC_STORE_BYPASS:
Kees Cook4272f522018-05-01 15:19:04 -07001026 return ssb_prctl_get(task);
Thomas Gleixner2d99bc02018-11-25 19:33:53 +01001027 case PR_SPEC_INDIRECT_BRANCH:
1028 return ib_prctl_get(task);
Thomas Gleixnera078e3e2018-04-29 15:26:40 +02001029 default:
1030 return -ENODEV;
1031 }
1032}
1033
Konrad Rzeszutek Wilk19e3a2b2018-04-25 22:04:22 -04001034void x86_spec_ctrl_setup_ap(void)
1035{
Thomas Gleixnera7c34322018-05-10 19:13:18 +02001036 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
Thomas Gleixnerec904642018-05-12 20:53:14 +02001037 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
David Woodhousef8544342018-05-20 20:52:05 +01001038
1039 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
Konrad Rzeszutek Wilkbf3da842018-05-09 21:41:38 +02001040 x86_amd_ssb_disable();
Konrad Rzeszutek Wilk19e3a2b2018-04-25 22:04:22 -04001041}
1042
Konrad Rzeszutek Wilk1ac1dc12018-06-20 16:42:57 -04001043#undef pr_fmt
1044#define pr_fmt(fmt) "L1TF: " fmt
Thomas Gleixner80e55b52018-07-13 16:23:16 +02001045
Jiri Kosina2decbf52018-07-13 16:23:25 +02001046/* Default mitigation for L1TF-affected CPUs */
1047enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
Thomas Gleixner80e55b52018-07-13 16:23:16 +02001048#if IS_ENABLED(CONFIG_KVM_INTEL)
Jiri Kosina2decbf52018-07-13 16:23:25 +02001049EXPORT_SYMBOL_GPL(l1tf_mitigation);
Guenter Roeck61341a32018-08-15 08:38:33 -07001050#endif
Thomas Gleixner4797c2f2018-07-13 16:23:22 +02001051enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
Thomas Gleixner80e55b52018-07-13 16:23:16 +02001052EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
Thomas Gleixner80e55b52018-07-13 16:23:16 +02001053
Andi Kleenef3d45c2018-08-24 10:03:50 -07001054/*
1055 * These CPUs all support 44bits physical address space internally in the
1056 * cache but CPUID can report a smaller number of physical address bits.
1057 *
1058 * The L1TF mitigation uses the top most address bit for the inversion of
1059 * non present PTEs. When the installed memory reaches into the top most
1060 * address bit due to memory holes, which has been observed on machines
1061 * which report 36bits physical address bits and have 32G RAM installed,
1062 * then the mitigation range check in l1tf_select_mitigation() triggers.
1063 * This is a false positive because the mitigation is still possible due to
1064 * the fact that the cache uses 44bit internally. Use the cache bits
1065 * instead of the reported physical bits and adjust them on the affected
1066 * machines to 44bit if the reported bits are less than 44.
1067 */
1068static void override_cache_bits(struct cpuinfo_x86 *c)
1069{
1070 if (c->x86 != 6)
1071 return;
1072
1073 switch (c->x86_model) {
1074 case INTEL_FAM6_NEHALEM:
1075 case INTEL_FAM6_WESTMERE:
1076 case INTEL_FAM6_SANDYBRIDGE:
1077 case INTEL_FAM6_IVYBRIDGE:
1078 case INTEL_FAM6_HASWELL_CORE:
1079 case INTEL_FAM6_HASWELL_ULT:
1080 case INTEL_FAM6_HASWELL_GT3E:
1081 case INTEL_FAM6_BROADWELL_CORE:
1082 case INTEL_FAM6_BROADWELL_GT3E:
1083 case INTEL_FAM6_SKYLAKE_MOBILE:
1084 case INTEL_FAM6_SKYLAKE_DESKTOP:
1085 case INTEL_FAM6_KABYLAKE_MOBILE:
1086 case INTEL_FAM6_KABYLAKE_DESKTOP:
1087 if (c->x86_cache_bits < 44)
1088 c->x86_cache_bits = 44;
1089 break;
1090 }
1091}
1092
Konrad Rzeszutek Wilk1ac1dc12018-06-20 16:42:57 -04001093static void __init l1tf_select_mitigation(void)
1094{
1095 u64 half_pa;
1096
1097 if (!boot_cpu_has_bug(X86_BUG_L1TF))
1098 return;
1099
Josh Poimboeuf17092842019-04-12 15:39:29 -05001100 if (cpu_mitigations_off())
1101 l1tf_mitigation = L1TF_MITIGATION_OFF;
1102 else if (cpu_mitigations_auto_nosmt())
1103 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
1104
Andi Kleenef3d45c2018-08-24 10:03:50 -07001105 override_cache_bits(&boot_cpu_data);
1106
Jiri Kosina2decbf52018-07-13 16:23:25 +02001107 switch (l1tf_mitigation) {
1108 case L1TF_MITIGATION_OFF:
1109 case L1TF_MITIGATION_FLUSH_NOWARN:
1110 case L1TF_MITIGATION_FLUSH:
1111 break;
1112 case L1TF_MITIGATION_FLUSH_NOSMT:
1113 case L1TF_MITIGATION_FULL:
1114 cpu_smt_disable(false);
1115 break;
1116 case L1TF_MITIGATION_FULL_FORCE:
1117 cpu_smt_disable(true);
1118 break;
1119 }
1120
Konrad Rzeszutek Wilk1ac1dc12018-06-20 16:42:57 -04001121#if CONFIG_PGTABLE_LEVELS == 2
1122 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
1123 return;
1124#endif
1125
Konrad Rzeszutek Wilk1ac1dc12018-06-20 16:42:57 -04001126 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
Michal Hockoc3692582018-11-13 19:49:10 +01001127 if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
1128 e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) {
Konrad Rzeszutek Wilk1ac1dc12018-06-20 16:42:57 -04001129 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
Vlastimil Babka24217382018-08-23 16:21:29 +02001130 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
1131 half_pa);
1132 pr_info("However, doing so will make a part of your RAM unusable.\n");
Thomas Gleixnercb106032019-02-19 11:10:49 +01001133 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
Konrad Rzeszutek Wilk1ac1dc12018-06-20 16:42:57 -04001134 return;
1135 }
1136
1137 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
1138}
Jiri Kosina2decbf52018-07-13 16:23:25 +02001139
1140static int __init l1tf_cmdline(char *str)
1141{
1142 if (!boot_cpu_has_bug(X86_BUG_L1TF))
1143 return 0;
1144
1145 if (!str)
1146 return -EINVAL;
1147
1148 if (!strcmp(str, "off"))
1149 l1tf_mitigation = L1TF_MITIGATION_OFF;
1150 else if (!strcmp(str, "flush,nowarn"))
1151 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
1152 else if (!strcmp(str, "flush"))
1153 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
1154 else if (!strcmp(str, "flush,nosmt"))
1155 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
1156 else if (!strcmp(str, "full"))
1157 l1tf_mitigation = L1TF_MITIGATION_FULL;
1158 else if (!strcmp(str, "full,force"))
1159 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
1160
1161 return 0;
1162}
1163early_param("l1tf", l1tf_cmdline);
1164
Konrad Rzeszutek Wilk1ac1dc12018-06-20 16:42:57 -04001165#undef pr_fmt
Josh Poimboeuff8a0bbe2019-04-02 10:00:51 -05001166#define pr_fmt(fmt) fmt
Konrad Rzeszutek Wilk1ac1dc12018-06-20 16:42:57 -04001167
Thomas Gleixner45a98822018-01-07 22:48:01 +01001168#ifdef CONFIG_SYSFS
Konrad Rzeszutek Wilk3effee62018-04-25 22:04:17 -04001169
Thomas Gleixner80e55b52018-07-13 16:23:16 +02001170#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
1171
1172#if IS_ENABLED(CONFIG_KVM_INTEL)
Thomas Gleixner8d331572018-11-25 19:33:42 +01001173static const char * const l1tf_vmx_states[] = {
Thomas Gleixner4186ae82018-07-13 16:23:18 +02001174 [VMENTER_L1D_FLUSH_AUTO] = "auto",
1175 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
1176 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
1177 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
1178 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
Paolo Bonzini383f1602018-08-05 16:07:46 +02001179 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary"
Thomas Gleixner80e55b52018-07-13 16:23:16 +02001180};
1181
1182static ssize_t l1tf_show_state(char *buf)
1183{
1184 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
1185 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1186
Paolo Bonziniee782ed2018-08-05 16:07:45 +02001187 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
1188 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
Thomas Gleixner9d6f23f2018-11-25 19:33:40 +01001189 sched_smt_active())) {
Paolo Bonziniee782ed2018-08-05 16:07:45 +02001190 return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
1191 l1tf_vmx_states[l1tf_vmx_mitigation]);
Thomas Gleixner9d6f23f2018-11-25 19:33:40 +01001192 }
Paolo Bonziniee782ed2018-08-05 16:07:45 +02001193
1194 return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
1195 l1tf_vmx_states[l1tf_vmx_mitigation],
Thomas Gleixner9d6f23f2018-11-25 19:33:40 +01001196 sched_smt_active() ? "vulnerable" : "disabled");
Thomas Gleixner80e55b52018-07-13 16:23:16 +02001197}
1198#else
1199static ssize_t l1tf_show_state(char *buf)
1200{
1201 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1202}
1203#endif
1204
Thomas Gleixnerba08d562019-02-18 22:51:43 +01001205static ssize_t mds_show_state(char *buf)
1206{
1207#ifdef CONFIG_HYPERVISOR_GUEST
1208 if (x86_hyper) {
1209 return sprintf(buf, "%s; SMT Host state unknown\n",
1210 mds_strings[mds_mitigation]);
1211 }
1212#endif
1213
1214 if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
1215 return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
Konrad Rzeszutek Wilk3645b362019-04-12 17:50:58 -04001216 (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
1217 sched_smt_active() ? "mitigated" : "disabled"));
Thomas Gleixnerba08d562019-02-18 22:51:43 +01001218 }
1219
1220 return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
1221 sched_smt_active() ? "vulnerable" : "disabled");
1222}
1223
Tim Chen20ba13a2018-11-25 19:33:32 +01001224static char *stibp_state(void)
1225{
Tim Chen8a7723d2018-11-25 19:33:33 +01001226 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
1227 return "";
1228
Thomas Gleixnerdda365c2018-11-25 19:33:45 +01001229 switch (spectre_v2_user) {
1230 case SPECTRE_V2_USER_NONE:
1231 return ", STIBP: disabled";
1232 case SPECTRE_V2_USER_STRICT:
1233 return ", STIBP: forced";
Thomas Gleixner2d99bc02018-11-25 19:33:53 +01001234 case SPECTRE_V2_USER_PRCTL:
Thomas Gleixner6f4b9252018-11-25 19:33:55 +01001235 case SPECTRE_V2_USER_SECCOMP:
Thomas Gleixner91d9bbd2018-11-25 19:33:54 +01001236 if (static_key_enabled(&switch_to_cond_stibp))
1237 return ", STIBP: conditional";
Thomas Gleixnerdda365c2018-11-25 19:33:45 +01001238 }
1239 return "";
Tim Chen20ba13a2018-11-25 19:33:32 +01001240}
1241
1242static char *ibpb_state(void)
1243{
Thomas Gleixnerc89ef652018-11-25 19:33:49 +01001244 if (boot_cpu_has(X86_FEATURE_IBPB)) {
Thomas Gleixner91d9bbd2018-11-25 19:33:54 +01001245 if (static_key_enabled(&switch_mm_always_ibpb))
Thomas Gleixnerc89ef652018-11-25 19:33:49 +01001246 return ", IBPB: always-on";
Thomas Gleixner91d9bbd2018-11-25 19:33:54 +01001247 if (static_key_enabled(&switch_mm_cond_ibpb))
1248 return ", IBPB: conditional";
1249 return ", IBPB: disabled";
Thomas Gleixnerc89ef652018-11-25 19:33:49 +01001250 }
1251 return "";
Tim Chen20ba13a2018-11-25 19:33:32 +01001252}
1253
Jiri Kosinadbb264a2018-05-10 22:47:32 +02001254static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
Konrad Rzeszutek Wilk6fdd2772018-05-11 16:50:35 -04001255 char *buf, unsigned int bug)
Konrad Rzeszutek Wilk3effee62018-04-25 22:04:17 -04001256{
1257 if (!boot_cpu_has_bug(bug))
1258 return sprintf(buf, "Not affected\n");
1259
1260 switch (bug) {
1261 case X86_BUG_CPU_MELTDOWN:
1262 if (boot_cpu_has(X86_FEATURE_KAISER))
1263 return sprintf(buf, "Mitigation: PTI\n");
1264
1265 break;
1266
1267 case X86_BUG_SPECTRE_V1:
1268 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
1269
1270 case X86_BUG_SPECTRE_V2:
Tim Chen66c0d892018-11-25 19:33:31 +01001271 return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
Tim Chen20ba13a2018-11-25 19:33:32 +01001272 ibpb_state(),
Konrad Rzeszutek Wilk3effee62018-04-25 22:04:17 -04001273 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
Tim Chen20ba13a2018-11-25 19:33:32 +01001274 stibp_state(),
Jiri Kosina787b3672018-09-25 14:39:28 +02001275 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
Konrad Rzeszutek Wilk3effee62018-04-25 22:04:17 -04001276 spectre_v2_module_string());
1277
Konrad Rzeszutek Wilk6f70a552018-04-25 22:04:21 -04001278 case X86_BUG_SPEC_STORE_BYPASS:
1279 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
1280
Andi Kleen432e99b2018-06-13 15:48:26 -07001281 case X86_BUG_L1TF:
1282 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
Thomas Gleixner80e55b52018-07-13 16:23:16 +02001283 return l1tf_show_state(buf);
Andi Kleen432e99b2018-06-13 15:48:26 -07001284 break;
Thomas Gleixnerba08d562019-02-18 22:51:43 +01001285
1286 case X86_BUG_MDS:
1287 return mds_show_state(buf);
1288
Konrad Rzeszutek Wilk3effee62018-04-25 22:04:17 -04001289 default:
1290 break;
1291 }
1292
1293 return sprintf(buf, "Vulnerable\n");
1294}
1295
Ingo Molnar3aad6fe2018-02-13 09:03:08 +01001296ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
Thomas Gleixner45a98822018-01-07 22:48:01 +01001297{
Konrad Rzeszutek Wilk3effee62018-04-25 22:04:17 -04001298 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
Thomas Gleixner45a98822018-01-07 22:48:01 +01001299}
1300
Ingo Molnar3aad6fe2018-02-13 09:03:08 +01001301ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
Thomas Gleixner45a98822018-01-07 22:48:01 +01001302{
Konrad Rzeszutek Wilk3effee62018-04-25 22:04:17 -04001303 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
Thomas Gleixner45a98822018-01-07 22:48:01 +01001304}
1305
Ingo Molnar3aad6fe2018-02-13 09:03:08 +01001306ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
Thomas Gleixner45a98822018-01-07 22:48:01 +01001307{
Konrad Rzeszutek Wilk3effee62018-04-25 22:04:17 -04001308 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
Thomas Gleixner45a98822018-01-07 22:48:01 +01001309}
Konrad Rzeszutek Wilk24e4dd92018-04-25 22:04:20 -04001310
1311ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
1312{
1313 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
1314}
Andi Kleen432e99b2018-06-13 15:48:26 -07001315
1316ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
1317{
1318 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
1319}
Thomas Gleixnerba08d562019-02-18 22:51:43 +01001320
1321ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
1322{
1323 return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
1324}
Thomas Gleixner45a98822018-01-07 22:48:01 +01001325#endif