blob: ff85fc8001836627c91f67b1b7110c234baf155f [file] [log] [blame]
Michael Ellerman62dfddf2018-06-02 21:08:55 +10001// SPDX-License-Identifier: GPL-2.0+
2//
3// Security related flags and so on.
4//
5// Copyright 2018, Michael Ellerman, IBM Corporation.
6
Breno Leitaoca929792018-10-22 11:54:12 -03007#include <linux/cpu.h>
Michael Ellerman62dfddf2018-06-02 21:08:55 +10008#include <linux/kernel.h>
Nicholas Piggine9b911a2018-06-02 21:09:08 +10009#include <linux/debugfs.h>
Michael Ellerman6f812542018-06-02 21:08:58 +100010#include <linux/device.h>
Michael Ellermana8f60012018-06-02 21:08:59 +100011#include <linux/seq_buf.h>
Michael Ellerman6f812542018-06-02 21:08:58 +100012
Michael Ellerman98f6ded2019-04-11 21:46:15 +100013#include <asm/asm-prototypes.h>
14#include <asm/code-patching.h>
Michal Suchaneke1217b42019-04-11 21:46:01 +100015#include <asm/debug.h>
Michael Ellerman62dfddf2018-06-02 21:08:55 +100016#include <asm/security_features.h>
Michal Suchanekf3286f12019-04-11 21:45:58 +100017#include <asm/setup.h>
Michael Ellerman62dfddf2018-06-02 21:08:55 +100018
19
Mauricio Faria de Oliveira4ec7e5e2018-06-02 21:09:05 +100020unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
Michael Ellerman6f812542018-06-02 21:08:58 +100021
Michael Ellerman98f6ded2019-04-11 21:46:15 +100022enum count_cache_flush_type {
23 COUNT_CACHE_FLUSH_NONE = 0x1,
24 COUNT_CACHE_FLUSH_SW = 0x2,
25 COUNT_CACHE_FLUSH_HW = 0x4,
26};
Diana Craciun1eb1ddb2019-04-11 21:46:20 +100027static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
Michael Ellerman113408c2019-11-13 21:05:41 +110028static bool link_stack_flush_enabled;
Michael Ellerman98f6ded2019-04-11 21:46:15 +100029
Michal Suchanek73b0f622019-04-11 21:46:00 +100030bool barrier_nospec_enabled;
Diana Craciun78749d12019-04-11 21:46:06 +100031static bool no_nospec;
Diana Craciun7133df42019-04-11 21:46:22 +100032static bool btb_flush_enabled;
Christopher M. Riedle59fed32019-05-23 21:46:48 -050033#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64)
Diana Craciun7133df42019-04-11 21:46:22 +100034static bool no_spectrev2;
35#endif
Michal Suchanekf3286f12019-04-11 21:45:58 +100036
37static void enable_barrier_nospec(bool enable)
38{
39 barrier_nospec_enabled = enable;
40 do_barrier_nospec_fixups(enable);
41}
42
Michal Suchaneke1217b42019-04-11 21:46:01 +100043void setup_barrier_nospec(void)
44{
45 bool enable;
46
47 /*
48 * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well.
49 * But there's a good reason not to. The two flags we check below are
50 * both are enabled by default in the kernel, so if the hcall is not
51 * functional they will be enabled.
52 * On a system where the host firmware has been updated (so the ori
53 * functions as a barrier), but on which the hypervisor (KVM/Qemu) has
54 * not been updated, we would like to enable the barrier. Dropping the
55 * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is
56 * we potentially enable the barrier on systems where the host firmware
57 * is not updated, but that's harmless as it's a no-op.
58 */
59 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
60 security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR);
61
Diana Craciun78749d12019-04-11 21:46:06 +100062 if (!no_nospec)
63 enable_barrier_nospec(enable);
Michal Suchaneke1217b42019-04-11 21:46:01 +100064}
65
Diana Craciun78749d12019-04-11 21:46:06 +100066static int __init handle_nospectre_v1(char *p)
67{
68 no_nospec = true;
69
70 return 0;
71}
72early_param("nospectre_v1", handle_nospectre_v1);
73
Michal Suchaneke1217b42019-04-11 21:46:01 +100074#ifdef CONFIG_DEBUG_FS
75static int barrier_nospec_set(void *data, u64 val)
76{
77 switch (val) {
78 case 0:
79 case 1:
80 break;
81 default:
82 return -EINVAL;
83 }
84
85 if (!!val == !!barrier_nospec_enabled)
86 return 0;
87
88 enable_barrier_nospec(!!val);
89
90 return 0;
91}
92
93static int barrier_nospec_get(void *data, u64 *val)
94{
95 *val = barrier_nospec_enabled ? 1 : 0;
96 return 0;
97}
98
99DEFINE_SIMPLE_ATTRIBUTE(fops_barrier_nospec,
100 barrier_nospec_get, barrier_nospec_set, "%llu\n");
101
102static __init int barrier_nospec_debugfs_init(void)
103{
104 debugfs_create_file("barrier_nospec", 0600, powerpc_debugfs_root, NULL,
105 &fops_barrier_nospec);
106 return 0;
107}
108device_initcall(barrier_nospec_debugfs_init);
109#endif /* CONFIG_DEBUG_FS */
110
Christopher M. Riedle59fed32019-05-23 21:46:48 -0500111#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64)
Diana Craciun7133df42019-04-11 21:46:22 +1000112static int __init handle_nospectre_v2(char *p)
113{
114 no_spectrev2 = true;
115
116 return 0;
117}
118early_param("nospectre_v2", handle_nospectre_v2);
Christopher M. Riedle59fed32019-05-23 21:46:48 -0500119#endif /* CONFIG_PPC_FSL_BOOK3E || CONFIG_PPC_BOOK3S_64 */
120
121#ifdef CONFIG_PPC_FSL_BOOK3E
Diana Craciun7133df42019-04-11 21:46:22 +1000122void setup_spectre_v2(void)
123{
124 if (no_spectrev2)
125 do_btb_flush_fixups();
126 else
127 btb_flush_enabled = true;
128}
129#endif /* CONFIG_PPC_FSL_BOOK3E */
130
Diana Craciuna96e47d2019-04-11 21:46:10 +1000131#ifdef CONFIG_PPC_BOOK3S_64
Michael Ellerman6f812542018-06-02 21:08:58 +1000132ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
133{
Michael Ellermana8f60012018-06-02 21:08:59 +1000134 bool thread_priv;
135
136 thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV);
137
Gustavo L. F. Walbon9da4be32019-05-02 18:09:07 -0300138 if (rfi_flush) {
Michael Ellermana8f60012018-06-02 21:08:59 +1000139 struct seq_buf s;
140 seq_buf_init(&s, buf, PAGE_SIZE - 1);
141
Gustavo L. F. Walbon9da4be32019-05-02 18:09:07 -0300142 seq_buf_printf(&s, "Mitigation: RFI Flush");
Michael Ellermana8f60012018-06-02 21:08:59 +1000143 if (thread_priv)
Gustavo L. F. Walbon9da4be32019-05-02 18:09:07 -0300144 seq_buf_printf(&s, ", L1D private per thread");
Michael Ellermana8f60012018-06-02 21:08:59 +1000145
146 seq_buf_printf(&s, "\n");
147
148 return s.len;
149 }
150
Gustavo L. F. Walbon9da4be32019-05-02 18:09:07 -0300151 if (thread_priv)
152 return sprintf(buf, "Vulnerable: L1D private per thread\n");
153
Michael Ellermana8f60012018-06-02 21:08:59 +1000154 if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
155 !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
156 return sprintf(buf, "Not affected\n");
Michael Ellerman6f812542018-06-02 21:08:58 +1000157
158 return sprintf(buf, "Vulnerable\n");
159}
Anthony Steinhausercb4bbe22019-10-29 12:07:59 -0700160
161ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
162{
163 return cpu_show_meltdown(dev, attr, buf);
164}
Diana Craciuna96e47d2019-04-11 21:46:10 +1000165#endif
Michael Ellermaned50e032018-06-02 21:09:02 +1000166
167ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
168{
Michael Ellermanee617f52019-04-11 21:46:05 +1000169 struct seq_buf s;
Michael Ellermaned50e032018-06-02 21:09:02 +1000170
Michael Ellermanee617f52019-04-11 21:46:05 +1000171 seq_buf_init(&s, buf, PAGE_SIZE - 1);
Michal Suchanekf7493c92019-04-11 21:46:04 +1000172
Michael Ellermanee617f52019-04-11 21:46:05 +1000173 if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) {
174 if (barrier_nospec_enabled)
175 seq_buf_printf(&s, "Mitigation: __user pointer sanitization");
176 else
177 seq_buf_printf(&s, "Vulnerable");
178
179 if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31))
180 seq_buf_printf(&s, ", ori31 speculation barrier enabled");
181
182 seq_buf_printf(&s, "\n");
183 } else
184 seq_buf_printf(&s, "Not affected\n");
185
186 return s.len;
Michael Ellermaned50e032018-06-02 21:09:02 +1000187}
Michael Ellerman1dc0f1f2018-06-02 21:09:03 +1000188
189ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
190{
Michael Ellerman1dc0f1f2018-06-02 21:09:03 +1000191 struct seq_buf s;
Michael Ellermanee617f52019-04-11 21:46:05 +1000192 bool bcs, ccd;
Michael Ellerman1dc0f1f2018-06-02 21:09:03 +1000193
194 seq_buf_init(&s, buf, PAGE_SIZE - 1);
195
196 bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
197 ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
Michael Ellerman1dc0f1f2018-06-02 21:09:03 +1000198
Michael Ellerman91f69a3c2019-04-11 21:46:30 +1000199 if (bcs || ccd) {
Michael Ellerman1dc0f1f2018-06-02 21:09:03 +1000200 seq_buf_printf(&s, "Mitigation: ");
201
Michael Ellerman91f69a3c2019-04-11 21:46:30 +1000202 if (bcs)
Michael Ellerman1dc0f1f2018-06-02 21:09:03 +1000203 seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
204
Michael Ellerman91f69a3c2019-04-11 21:46:30 +1000205 if (bcs && ccd)
Michael Ellerman1dc0f1f2018-06-02 21:09:03 +1000206 seq_buf_printf(&s, ", ");
207
Michael Ellerman91f69a3c2019-04-11 21:46:30 +1000208 if (ccd)
209 seq_buf_printf(&s, "Indirect branch cache disabled");
Michael Ellerman113408c2019-11-13 21:05:41 +1100210
211 if (link_stack_flush_enabled)
212 seq_buf_printf(&s, ", Software link stack flush");
213
Michael Ellerman91f69a3c2019-04-11 21:46:30 +1000214 } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
215 seq_buf_printf(&s, "Mitigation: Software count cache flush");
Michael Ellerman98f6ded2019-04-11 21:46:15 +1000216
217 if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
Michael Ellerman91f69a3c2019-04-11 21:46:30 +1000218 seq_buf_printf(&s, " (hardware accelerated)");
Michael Ellerman113408c2019-11-13 21:05:41 +1100219
220 if (link_stack_flush_enabled)
221 seq_buf_printf(&s, ", Software link stack flush");
222
Diana Craciun0ac0d2b2019-04-11 21:46:27 +1000223 } else if (btb_flush_enabled) {
224 seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
225 } else {
Michael Ellerman1dc0f1f2018-06-02 21:09:03 +1000226 seq_buf_printf(&s, "Vulnerable");
Diana Craciun0ac0d2b2019-04-11 21:46:27 +1000227 }
Michael Ellerman1dc0f1f2018-06-02 21:09:03 +1000228
Michael Ellerman1dc0f1f2018-06-02 21:09:03 +1000229 seq_buf_printf(&s, "\n");
230
231 return s.len;
232}
Nicholas Piggine9b911a2018-06-02 21:09:08 +1000233
Diana Craciun1ec00d22019-04-11 21:46:07 +1000234#ifdef CONFIG_PPC_BOOK3S_64
Nicholas Piggine9b911a2018-06-02 21:09:08 +1000235/*
236 * Store-forwarding barrier support.
237 */
238
239static enum stf_barrier_type stf_enabled_flush_types;
240static bool no_stf_barrier;
241bool stf_barrier;
242
243static int __init handle_no_stf_barrier(char *p)
244{
245 pr_info("stf-barrier: disabled on command line.");
246 no_stf_barrier = true;
247 return 0;
248}
249
250early_param("no_stf_barrier", handle_no_stf_barrier);
251
252/* This is the generic flag used by other architectures */
253static int __init handle_ssbd(char *p)
254{
255 if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) {
256 /* Until firmware tells us, we have the barrier with auto */
257 return 0;
258 } else if (strncmp(p, "off", 3) == 0) {
259 handle_no_stf_barrier(NULL);
260 return 0;
261 } else
262 return 1;
263
264 return 0;
265}
266early_param("spec_store_bypass_disable", handle_ssbd);
267
268/* This is the generic flag used by other architectures */
269static int __init handle_no_ssbd(char *p)
270{
271 handle_no_stf_barrier(NULL);
272 return 0;
273}
274early_param("nospec_store_bypass_disable", handle_no_ssbd);
275
276static void stf_barrier_enable(bool enable)
277{
278 if (enable)
279 do_stf_barrier_fixups(stf_enabled_flush_types);
280 else
281 do_stf_barrier_fixups(STF_BARRIER_NONE);
282
283 stf_barrier = enable;
284}
285
286void setup_stf_barrier(void)
287{
288 enum stf_barrier_type type;
289 bool enable, hv;
290
291 hv = cpu_has_feature(CPU_FTR_HVMODE);
292
293 /* Default to fallback in case fw-features are not available */
294 if (cpu_has_feature(CPU_FTR_ARCH_300))
295 type = STF_BARRIER_EIEIO;
296 else if (cpu_has_feature(CPU_FTR_ARCH_207S))
297 type = STF_BARRIER_SYNC_ORI;
298 else if (cpu_has_feature(CPU_FTR_ARCH_206))
299 type = STF_BARRIER_FALLBACK;
300 else
301 type = STF_BARRIER_NONE;
302
303 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
304 (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) ||
305 (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv));
306
307 if (type == STF_BARRIER_FALLBACK) {
308 pr_info("stf-barrier: fallback barrier available\n");
309 } else if (type == STF_BARRIER_SYNC_ORI) {
310 pr_info("stf-barrier: hwsync barrier available\n");
311 } else if (type == STF_BARRIER_EIEIO) {
312 pr_info("stf-barrier: eieio barrier available\n");
313 }
314
315 stf_enabled_flush_types = type;
316
317 if (!no_stf_barrier)
318 stf_barrier_enable(enable);
319}
320
321ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
322{
323 if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) {
324 const char *type;
325 switch (stf_enabled_flush_types) {
326 case STF_BARRIER_EIEIO:
327 type = "eieio";
328 break;
329 case STF_BARRIER_SYNC_ORI:
330 type = "hwsync";
331 break;
332 case STF_BARRIER_FALLBACK:
333 type = "fallback";
334 break;
335 default:
336 type = "unknown";
337 }
338 return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type);
339 }
340
341 if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
342 !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
343 return sprintf(buf, "Not affected\n");
344
345 return sprintf(buf, "Vulnerable\n");
346}
347
348#ifdef CONFIG_DEBUG_FS
349static int stf_barrier_set(void *data, u64 val)
350{
351 bool enable;
352
353 if (val == 1)
354 enable = true;
355 else if (val == 0)
356 enable = false;
357 else
358 return -EINVAL;
359
360 /* Only do anything if we're changing state */
361 if (enable != stf_barrier)
362 stf_barrier_enable(enable);
363
364 return 0;
365}
366
367static int stf_barrier_get(void *data, u64 *val)
368{
369 *val = stf_barrier ? 1 : 0;
370 return 0;
371}
372
373DEFINE_SIMPLE_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, "%llu\n");
374
375static __init int stf_barrier_debugfs_init(void)
376{
377 debugfs_create_file("stf_barrier", 0600, powerpc_debugfs_root, NULL, &fops_stf_barrier);
378 return 0;
379}
380device_initcall(stf_barrier_debugfs_init);
381#endif /* CONFIG_DEBUG_FS */
Michael Ellerman98f6ded2019-04-11 21:46:15 +1000382
Michael Ellerman113408c2019-11-13 21:05:41 +1100383static void no_count_cache_flush(void)
384{
385 count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
386 pr_info("count-cache-flush: software flush disabled.\n");
387}
388
Michael Ellerman98f6ded2019-04-11 21:46:15 +1000389static void toggle_count_cache_flush(bool enable)
390{
Michael Ellerman113408c2019-11-13 21:05:41 +1100391 if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE) &&
392 !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK))
393 enable = false;
394
395 if (!enable) {
Michael Ellerman98f6ded2019-04-11 21:46:15 +1000396 patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP);
Michael Ellermane2c87b12019-11-13 21:05:44 +1100397#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
398 patch_instruction_site(&patch__call_kvm_flush_link_stack, PPC_INST_NOP);
399#endif
Michael Ellerman113408c2019-11-13 21:05:41 +1100400 pr_info("link-stack-flush: software flush disabled.\n");
401 link_stack_flush_enabled = false;
402 no_count_cache_flush();
Michael Ellerman98f6ded2019-04-11 21:46:15 +1000403 return;
404 }
405
Michael Ellerman113408c2019-11-13 21:05:41 +1100406 // This enables the branch from _switch to flush_count_cache
Michael Ellerman98f6ded2019-04-11 21:46:15 +1000407 patch_branch_site(&patch__call_flush_count_cache,
408 (u64)&flush_count_cache, BRANCH_SET_LINK);
409
Michael Ellermane2c87b12019-11-13 21:05:44 +1100410#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
411 // This enables the branch from guest_exit_cont to kvm_flush_link_stack
412 patch_branch_site(&patch__call_kvm_flush_link_stack,
413 (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
414#endif
415
Michael Ellerman113408c2019-11-13 21:05:41 +1100416 pr_info("link-stack-flush: software flush enabled.\n");
417 link_stack_flush_enabled = true;
418
419 // If we just need to flush the link stack, patch an early return
420 if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
421 patch_instruction_site(&patch__flush_link_stack_return, PPC_INST_BLR);
422 no_count_cache_flush();
423 return;
424 }
425
Michael Ellerman98f6ded2019-04-11 21:46:15 +1000426 if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) {
427 count_cache_flush_type = COUNT_CACHE_FLUSH_SW;
428 pr_info("count-cache-flush: full software flush sequence enabled.\n");
429 return;
430 }
431
432 patch_instruction_site(&patch__flush_count_cache_return, PPC_INST_BLR);
433 count_cache_flush_type = COUNT_CACHE_FLUSH_HW;
434 pr_info("count-cache-flush: hardware assisted flush sequence enabled\n");
435}
436
437void setup_count_cache_flush(void)
438{
Christopher M. Riedle59fed32019-05-23 21:46:48 -0500439 bool enable = true;
440
441 if (no_spectrev2 || cpu_mitigations_off()) {
442 if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) ||
443 security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED))
Michael Ellerman113408c2019-11-13 21:05:41 +1100444 pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n");
Christopher M. Riedle59fed32019-05-23 21:46:48 -0500445
446 enable = false;
447 }
448
Michael Ellerman113408c2019-11-13 21:05:41 +1100449 /*
450 * There's no firmware feature flag/hypervisor bit to tell us we need to
451 * flush the link stack on context switch. So we set it here if we see
452 * either of the Spectre v2 mitigations that aim to protect userspace.
453 */
454 if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) ||
455 security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE))
456 security_ftr_set(SEC_FTR_FLUSH_LINK_STACK);
457
Christopher M. Riedle59fed32019-05-23 21:46:48 -0500458 toggle_count_cache_flush(enable);
Michael Ellerman98f6ded2019-04-11 21:46:15 +1000459}
460
461#ifdef CONFIG_DEBUG_FS
462static int count_cache_flush_set(void *data, u64 val)
463{
464 bool enable;
465
466 if (val == 1)
467 enable = true;
468 else if (val == 0)
469 enable = false;
470 else
471 return -EINVAL;
472
473 toggle_count_cache_flush(enable);
474
475 return 0;
476}
477
478static int count_cache_flush_get(void *data, u64 *val)
479{
480 if (count_cache_flush_type == COUNT_CACHE_FLUSH_NONE)
481 *val = 0;
482 else
483 *val = 1;
484
485 return 0;
486}
487
488DEFINE_SIMPLE_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get,
489 count_cache_flush_set, "%llu\n");
490
491static __init int count_cache_flush_debugfs_init(void)
492{
493 debugfs_create_file("count_cache_flush", 0600, powerpc_debugfs_root,
494 NULL, &fops_count_cache_flush);
495 return 0;
496}
497device_initcall(count_cache_flush_debugfs_init);
498#endif /* CONFIG_DEBUG_FS */
Diana Craciun1ec00d22019-04-11 21:46:07 +1000499#endif /* CONFIG_PPC_BOOK3S_64 */