Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 1 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 2 | |
Suresh Siddha | 61c4628 | 2008-03-10 15:28:04 -0700 | [diff] [blame] | 3 | #include <linux/errno.h> |
| 4 | #include <linux/kernel.h> |
| 5 | #include <linux/mm.h> |
| 6 | #include <linux/smp.h> |
Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 7 | #include <linux/prctl.h> |
Suresh Siddha | 61c4628 | 2008-03-10 15:28:04 -0700 | [diff] [blame] | 8 | #include <linux/slab.h> |
| 9 | #include <linux/sched.h> |
Paul Gortmaker | 186f436 | 2016-07-13 20:18:56 -0400 | [diff] [blame] | 10 | #include <linux/init.h> |
| 11 | #include <linux/export.h> |
Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 12 | #include <linux/pm.h> |
Thomas Gleixner | 162a688 | 2015-04-03 02:01:28 +0200 | [diff] [blame] | 13 | #include <linux/tick.h> |
Amerigo Wang | 9d62dcd | 2009-05-11 22:05:28 -0400 | [diff] [blame] | 14 | #include <linux/random.h> |
Avi Kivity | 7c68af6 | 2009-09-19 09:40:22 +0300 | [diff] [blame] | 15 | #include <linux/user-return-notifier.h> |
Andy Isaacson | 814e2c8 | 2009-12-08 00:29:42 -0800 | [diff] [blame] | 16 | #include <linux/dmi.h> |
| 17 | #include <linux/utsname.h> |
Richard Weinberger | 90e2401 | 2012-03-25 23:00:04 +0200 | [diff] [blame] | 18 | #include <linux/stackprotector.h> |
| 19 | #include <linux/tick.h> |
| 20 | #include <linux/cpuidle.h> |
Arjan van de Ven | 6161352 | 2009-09-17 16:11:28 +0200 | [diff] [blame] | 21 | #include <trace/events/power.h> |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 22 | #include <linux/hw_breakpoint.h> |
Borislav Petkov | 93789b3 | 2011-01-20 15:42:52 +0100 | [diff] [blame] | 23 | #include <asm/cpu.h> |
Ivan Vecera | d3ec5ca | 2008-11-11 14:33:44 +0100 | [diff] [blame] | 24 | #include <asm/apic.h> |
Jaswinder Singh Rajput | 2c1b284 | 2009-04-11 00:03:10 +0530 | [diff] [blame] | 25 | #include <asm/syscalls.h> |
Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 26 | #include <asm/idle.h> |
| 27 | #include <asm/uaccess.h> |
Len Brown | b253149 | 2014-01-15 00:37:34 -0500 | [diff] [blame] | 28 | #include <asm/mwait.h> |
Ingo Molnar | 78f7f1e | 2015-04-24 02:54:44 +0200 | [diff] [blame] | 29 | #include <asm/fpu/internal.h> |
K.Prasad | 66cb591 | 2009-06-01 23:44:55 +0530 | [diff] [blame] | 30 | #include <asm/debugreg.h> |
Richard Weinberger | 90e2401 | 2012-03-25 23:00:04 +0200 | [diff] [blame] | 31 | #include <asm/nmi.h> |
Andy Lutomirski | 375074c | 2014-10-24 15:58:07 -0700 | [diff] [blame] | 32 | #include <asm/tlbflush.h> |
Ashok Raj | 8838eb6 | 2015-08-12 18:29:40 +0200 | [diff] [blame] | 33 | #include <asm/mce.h> |
Brian Gerst | 9fda6a0 | 2015-07-29 01:41:16 -0400 | [diff] [blame] | 34 | #include <asm/vm86.h> |
Brian Gerst | 7b32aea | 2016-08-13 12:38:18 -0400 | [diff] [blame] | 35 | #include <asm/switch_to.h> |
Thomas Gleixner | 89c6e9b | 2018-04-29 15:21:42 +0200 | [diff] [blame] | 36 | #include <asm/spec-ctrl.h> |
Richard Weinberger | 90e2401 | 2012-03-25 23:00:04 +0200 | [diff] [blame] | 37 | |
Thomas Gleixner | b5741ef | 2018-11-25 19:33:47 +0100 | [diff] [blame] | 38 | #include "process.h" |
| 39 | |
Thomas Gleixner | 4504689 | 2012-05-03 09:03:01 +0000 | [diff] [blame] | 40 | /* |
| 41 | * per-CPU TSS segments. Threads are completely 'soft' on Linux, |
| 42 | * no more per-task TSS's. The TSS size is kept cacheline-aligned |
| 43 | * so they are allowed to end up in the .data..cacheline_aligned |
| 44 | * section. Since TSS's are completely CPU-local, we want them |
| 45 | * on exact cacheline boundaries, to eliminate cacheline ping-pong. |
| 46 | */ |
Richard Fellner | 13be448 | 2017-05-04 14:26:50 +0200 | [diff] [blame] | 47 | __visible DEFINE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(struct tss_struct, cpu_tss) = { |
Andy Lutomirski | d0a0de2 | 2015-03-05 19:19:06 -0800 | [diff] [blame] | 48 | .x86_tss = { |
Andy Lutomirski | d9e05cc | 2015-03-10 11:05:59 -0700 | [diff] [blame] | 49 | .sp0 = TOP_OF_INIT_STACK, |
Andy Lutomirski | d0a0de2 | 2015-03-05 19:19:06 -0800 | [diff] [blame] | 50 | #ifdef CONFIG_X86_32 |
| 51 | .ss0 = __KERNEL_DS, |
| 52 | .ss1 = __KERNEL_CS, |
| 53 | .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, |
| 54 | #endif |
| 55 | }, |
| 56 | #ifdef CONFIG_X86_32 |
| 57 | /* |
| 58 | * Note that the .io_bitmap member must be extra-big. This is because |
| 59 | * the CPU will access an additional byte beyond the end of the IO |
| 60 | * permission bitmap. The extra byte must be all 1 bits, and must |
| 61 | * be within the limit. |
| 62 | */ |
| 63 | .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, |
| 64 | #endif |
Andy Lutomirski | 2a41aa4 | 2016-03-09 19:00:33 -0800 | [diff] [blame] | 65 | #ifdef CONFIG_X86_32 |
| 66 | .SYSENTER_stack_canary = STACK_END_MAGIC, |
| 67 | #endif |
Andy Lutomirski | d0a0de2 | 2015-03-05 19:19:06 -0800 | [diff] [blame] | 68 | }; |
Marc Dionne | de71ad2 | 2015-05-04 15:16:44 -0300 | [diff] [blame] | 69 | EXPORT_PER_CPU_SYMBOL(cpu_tss); |
Thomas Gleixner | 4504689 | 2012-05-03 09:03:01 +0000 | [diff] [blame] | 70 | |
Richard Weinberger | 90e2401 | 2012-03-25 23:00:04 +0200 | [diff] [blame] | 71 | #ifdef CONFIG_X86_64 |
| 72 | static DEFINE_PER_CPU(unsigned char, is_idle); |
| 73 | static ATOMIC_NOTIFIER_HEAD(idle_notifier); |
| 74 | |
| 75 | void idle_notifier_register(struct notifier_block *n) |
| 76 | { |
| 77 | atomic_notifier_chain_register(&idle_notifier, n); |
| 78 | } |
| 79 | EXPORT_SYMBOL_GPL(idle_notifier_register); |
| 80 | |
| 81 | void idle_notifier_unregister(struct notifier_block *n) |
| 82 | { |
| 83 | atomic_notifier_chain_unregister(&idle_notifier, n); |
| 84 | } |
| 85 | EXPORT_SYMBOL_GPL(idle_notifier_unregister); |
| 86 | #endif |
Zhao Yakui | c1e3b37 | 2008-06-24 17:58:53 +0800 | [diff] [blame] | 87 | |
Suresh Siddha | 55ccf3f | 2012-05-16 15:03:51 -0700 | [diff] [blame] | 88 | /* |
| 89 | * this gets called so that we can store lazy state into memory and copy the |
| 90 | * current task into the new thread. |
| 91 | */ |
Suresh Siddha | 61c4628 | 2008-03-10 15:28:04 -0700 | [diff] [blame] | 92 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
| 93 | { |
Ingo Molnar | 5aaeb5c | 2015-07-17 12:28:12 +0200 | [diff] [blame] | 94 | memcpy(dst, src, arch_task_struct_size); |
Andy Lutomirski | 2459ee8 | 2015-10-30 22:42:46 -0700 | [diff] [blame] | 95 | #ifdef CONFIG_VM86 |
| 96 | dst->thread.vm86 = NULL; |
| 97 | #endif |
Oleg Nesterov | f185350 | 2014-09-02 19:57:23 +0200 | [diff] [blame] | 98 | |
Ingo Molnar | c69e098 | 2015-04-24 02:07:15 +0200 | [diff] [blame] | 99 | return fpu__copy(&dst->thread.fpu, &src->thread.fpu); |
Suresh Siddha | 61c4628 | 2008-03-10 15:28:04 -0700 | [diff] [blame] | 100 | } |
Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 101 | |
Thomas Gleixner | 00dba56 | 2008-06-09 18:35:28 +0200 | [diff] [blame] | 102 | /* |
Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 103 | * Free current thread data structures etc.. |
| 104 | */ |
Jiri Slaby | e646469 | 2016-05-20 17:00:20 -0700 | [diff] [blame] | 105 | void exit_thread(struct task_struct *tsk) |
Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 106 | { |
Jiri Slaby | e646469 | 2016-05-20 17:00:20 -0700 | [diff] [blame] | 107 | struct thread_struct *t = &tsk->thread; |
Thomas Gleixner | 250981e | 2009-03-16 13:07:21 +0100 | [diff] [blame] | 108 | unsigned long *bp = t->io_bitmap_ptr; |
Ingo Molnar | ca6787b | 2015-04-23 12:33:50 +0200 | [diff] [blame] | 109 | struct fpu *fpu = &t->fpu; |
Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 110 | |
Thomas Gleixner | 250981e | 2009-03-16 13:07:21 +0100 | [diff] [blame] | 111 | if (bp) { |
Andy Lutomirski | 24933b8 | 2015-03-05 19:19:05 -0800 | [diff] [blame] | 112 | struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu()); |
Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 113 | |
Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 114 | t->io_bitmap_ptr = NULL; |
| 115 | clear_thread_flag(TIF_IO_BITMAP); |
| 116 | /* |
| 117 | * Careful, clear this in the TSS too: |
| 118 | */ |
| 119 | memset(tss->io_bitmap, 0xff, t->io_bitmap_max); |
| 120 | t->io_bitmap_max = 0; |
| 121 | put_cpu(); |
Thomas Gleixner | 250981e | 2009-03-16 13:07:21 +0100 | [diff] [blame] | 122 | kfree(bp); |
Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 123 | } |
Suresh Siddha | 1dcc8d7 | 2012-05-16 15:03:54 -0700 | [diff] [blame] | 124 | |
Brian Gerst | 9fda6a0 | 2015-07-29 01:41:16 -0400 | [diff] [blame] | 125 | free_vm86(t); |
| 126 | |
Ingo Molnar | 5033861 | 2015-04-29 19:04:31 +0200 | [diff] [blame] | 127 | fpu__drop(fpu); |
Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 128 | } |
| 129 | |
| 130 | void flush_thread(void) |
| 131 | { |
| 132 | struct task_struct *tsk = current; |
| 133 | |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 134 | flush_ptrace_hw_breakpoint(tsk); |
Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 135 | memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); |
Oleg Nesterov | 110d7f7 | 2015-01-19 19:52:12 +0100 | [diff] [blame] | 136 | |
Ingo Molnar | 04c8e01 | 2015-04-29 20:35:33 +0200 | [diff] [blame] | 137 | fpu__clear(&tsk->thread.fpu); |
Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 138 | } |
| 139 | |
Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 140 | void disable_TSC(void) |
| 141 | { |
| 142 | preempt_disable(); |
| 143 | if (!test_and_set_thread_flag(TIF_NOTSC)) |
| 144 | /* |
| 145 | * Must flip the CPU state synchronously with |
| 146 | * TIF_NOTSC in the current running context. |
| 147 | */ |
Thomas Gleixner | 5ed7788 | 2017-02-14 00:11:04 -0800 | [diff] [blame] | 148 | cr4_set_bits(X86_CR4_TSD); |
Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 149 | preempt_enable(); |
| 150 | } |
| 151 | |
Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 152 | static void enable_TSC(void) |
| 153 | { |
| 154 | preempt_disable(); |
| 155 | if (test_and_clear_thread_flag(TIF_NOTSC)) |
| 156 | /* |
| 157 | * Must flip the CPU state synchronously with |
| 158 | * TIF_NOTSC in the current running context. |
| 159 | */ |
Thomas Gleixner | 5ed7788 | 2017-02-14 00:11:04 -0800 | [diff] [blame] | 160 | cr4_clear_bits(X86_CR4_TSD); |
Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 161 | preempt_enable(); |
| 162 | } |
| 163 | |
| 164 | int get_tsc_mode(unsigned long adr) |
| 165 | { |
| 166 | unsigned int val; |
| 167 | |
| 168 | if (test_thread_flag(TIF_NOTSC)) |
| 169 | val = PR_TSC_SIGSEGV; |
| 170 | else |
| 171 | val = PR_TSC_ENABLE; |
| 172 | |
| 173 | return put_user(val, (unsigned int __user *)adr); |
| 174 | } |
| 175 | |
| 176 | int set_tsc_mode(unsigned int val) |
| 177 | { |
| 178 | if (val == PR_TSC_SIGSEGV) |
| 179 | disable_TSC(); |
| 180 | else if (val == PR_TSC_ENABLE) |
| 181 | enable_TSC(); |
| 182 | else |
| 183 | return -EINVAL; |
| 184 | |
| 185 | return 0; |
| 186 | } |
| 187 | |
Thomas Gleixner | b5741ef | 2018-11-25 19:33:47 +0100 | [diff] [blame] | 188 | static inline void switch_to_bitmap(struct thread_struct *prev, |
Kyle Huey | fd01e82 | 2017-02-14 00:11:02 -0800 | [diff] [blame] | 189 | struct thread_struct *next, |
| 190 | unsigned long tifp, unsigned long tifn) |
Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 191 | { |
Thomas Gleixner | b5741ef | 2018-11-25 19:33:47 +0100 | [diff] [blame] | 192 | struct tss_struct *tss = this_cpu_ptr(&cpu_tss); |
| 193 | |
Kyle Huey | fd01e82 | 2017-02-14 00:11:02 -0800 | [diff] [blame] | 194 | if (tifn & _TIF_IO_BITMAP) { |
Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 195 | /* |
| 196 | * Copy the relevant range of the IO bitmap. |
| 197 | * Normally this is 128 bytes or less: |
| 198 | */ |
| 199 | memcpy(tss->io_bitmap, next->io_bitmap_ptr, |
| 200 | max(prev->io_bitmap_max, next->io_bitmap_max)); |
Kyle Huey | fd01e82 | 2017-02-14 00:11:02 -0800 | [diff] [blame] | 201 | } else if (tifp & _TIF_IO_BITMAP) { |
Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 202 | /* |
| 203 | * Clear any possible leftover bits: |
| 204 | */ |
| 205 | memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); |
| 206 | } |
Kyle Huey | fd01e82 | 2017-02-14 00:11:02 -0800 | [diff] [blame] | 207 | } |
| 208 | |
Thomas Gleixner | d0cb78f | 2018-05-09 21:53:09 +0200 | [diff] [blame] | 209 | #ifdef CONFIG_SMP |
| 210 | |
| 211 | struct ssb_state { |
| 212 | struct ssb_state *shared_state; |
| 213 | raw_spinlock_t lock; |
| 214 | unsigned int disable_state; |
| 215 | unsigned long local_state; |
| 216 | }; |
| 217 | |
| 218 | #define LSTATE_SSB 0 |
| 219 | |
| 220 | static DEFINE_PER_CPU(struct ssb_state, ssb_state); |
| 221 | |
| 222 | void speculative_store_bypass_ht_init(void) |
| 223 | { |
| 224 | struct ssb_state *st = this_cpu_ptr(&ssb_state); |
| 225 | unsigned int this_cpu = smp_processor_id(); |
| 226 | unsigned int cpu; |
| 227 | |
| 228 | st->local_state = 0; |
| 229 | |
| 230 | /* |
| 231 | * Shared state setup happens once on the first bringup |
| 232 | * of the CPU. It's not destroyed on CPU hotunplug. |
| 233 | */ |
| 234 | if (st->shared_state) |
| 235 | return; |
| 236 | |
| 237 | raw_spin_lock_init(&st->lock); |
| 238 | |
| 239 | /* |
| 240 | * Go over HT siblings and check whether one of them has set up the |
| 241 | * shared state pointer already. |
| 242 | */ |
| 243 | for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) { |
| 244 | if (cpu == this_cpu) |
| 245 | continue; |
| 246 | |
| 247 | if (!per_cpu(ssb_state, cpu).shared_state) |
| 248 | continue; |
| 249 | |
| 250 | /* Link it to the state of the sibling: */ |
| 251 | st->shared_state = per_cpu(ssb_state, cpu).shared_state; |
| 252 | return; |
| 253 | } |
| 254 | |
| 255 | /* |
| 256 | * First HT sibling to come up on the core. Link shared state of |
| 257 | * the first HT sibling to itself. The siblings on the same core |
| 258 | * which come up later will see the shared state pointer and link |
| 259 | * themself to the state of this CPU. |
| 260 | */ |
| 261 | st->shared_state = st; |
| 262 | } |
| 263 | |
| 264 | /* |
| 265 | * Logic is: First HT sibling enables SSBD for both siblings in the core |
| 266 | * and last sibling to disable it, disables it for the whole core. This how |
| 267 | * MSR_SPEC_CTRL works in "hardware": |
| 268 | * |
| 269 | * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL |
| 270 | */ |
| 271 | static __always_inline void amd_set_core_ssb_state(unsigned long tifn) |
| 272 | { |
| 273 | struct ssb_state *st = this_cpu_ptr(&ssb_state); |
| 274 | u64 msr = x86_amd_ls_cfg_base; |
| 275 | |
| 276 | if (!static_cpu_has(X86_FEATURE_ZEN)) { |
| 277 | msr |= ssbd_tif_to_amd_ls_cfg(tifn); |
| 278 | wrmsrl(MSR_AMD64_LS_CFG, msr); |
| 279 | return; |
| 280 | } |
| 281 | |
| 282 | if (tifn & _TIF_SSBD) { |
| 283 | /* |
| 284 | * Since this can race with prctl(), block reentry on the |
| 285 | * same CPU. |
| 286 | */ |
| 287 | if (__test_and_set_bit(LSTATE_SSB, &st->local_state)) |
| 288 | return; |
| 289 | |
| 290 | msr |= x86_amd_ls_cfg_ssbd_mask; |
| 291 | |
| 292 | raw_spin_lock(&st->shared_state->lock); |
| 293 | /* First sibling enables SSBD: */ |
| 294 | if (!st->shared_state->disable_state) |
| 295 | wrmsrl(MSR_AMD64_LS_CFG, msr); |
| 296 | st->shared_state->disable_state++; |
| 297 | raw_spin_unlock(&st->shared_state->lock); |
| 298 | } else { |
| 299 | if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state)) |
| 300 | return; |
| 301 | |
| 302 | raw_spin_lock(&st->shared_state->lock); |
| 303 | st->shared_state->disable_state--; |
| 304 | if (!st->shared_state->disable_state) |
| 305 | wrmsrl(MSR_AMD64_LS_CFG, msr); |
| 306 | raw_spin_unlock(&st->shared_state->lock); |
| 307 | } |
| 308 | } |
| 309 | #else |
| 310 | static __always_inline void amd_set_core_ssb_state(unsigned long tifn) |
| 311 | { |
| 312 | u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn); |
| 313 | |
| 314 | wrmsrl(MSR_AMD64_LS_CFG, msr); |
| 315 | } |
| 316 | #endif |
| 317 | |
Tom Lendacky | 7c0b2dc | 2018-05-17 17:09:18 +0200 | [diff] [blame] | 318 | static __always_inline void amd_set_ssb_virt_state(unsigned long tifn) |
| 319 | { |
| 320 | /* |
| 321 | * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL, |
| 322 | * so ssbd_tif_to_spec_ctrl() just works. |
| 323 | */ |
| 324 | wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn)); |
| 325 | } |
| 326 | |
Tim Chen | dbbc533 | 2018-11-25 19:33:35 +0100 | [diff] [blame] | 327 | /* |
| 328 | * Update the MSRs managing speculation control, during context switch. |
| 329 | * |
| 330 | * tifp: Previous task's thread flags |
| 331 | * tifn: Next task's thread flags |
| 332 | */ |
| 333 | static __always_inline void __speculation_ctrl_update(unsigned long tifp, |
| 334 | unsigned long tifn) |
Thomas Gleixner | d0cb78f | 2018-05-09 21:53:09 +0200 | [diff] [blame] | 335 | { |
Tim Chen | a35a8c6 | 2018-11-25 19:33:46 +0100 | [diff] [blame] | 336 | unsigned long tif_diff = tifp ^ tifn; |
Tim Chen | dbbc533 | 2018-11-25 19:33:35 +0100 | [diff] [blame] | 337 | u64 msr = x86_spec_ctrl_base; |
| 338 | bool updmsr = false; |
Thomas Gleixner | d0cb78f | 2018-05-09 21:53:09 +0200 | [diff] [blame] | 339 | |
Tim Chen | a35a8c6 | 2018-11-25 19:33:46 +0100 | [diff] [blame] | 340 | /* |
| 341 | * If TIF_SSBD is different, select the proper mitigation |
| 342 | * method. Note that if SSBD mitigation is disabled or permanentely |
| 343 | * enabled this branch can't be taken because nothing can set |
| 344 | * TIF_SSBD. |
| 345 | */ |
| 346 | if (tif_diff & _TIF_SSBD) { |
Tim Chen | dbbc533 | 2018-11-25 19:33:35 +0100 | [diff] [blame] | 347 | if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) { |
| 348 | amd_set_ssb_virt_state(tifn); |
| 349 | } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) { |
| 350 | amd_set_core_ssb_state(tifn); |
| 351 | } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || |
| 352 | static_cpu_has(X86_FEATURE_AMD_SSBD)) { |
| 353 | msr |= ssbd_tif_to_spec_ctrl(tifn); |
| 354 | updmsr = true; |
| 355 | } |
| 356 | } |
Thomas Gleixner | d0cb78f | 2018-05-09 21:53:09 +0200 | [diff] [blame] | 357 | |
Tim Chen | a35a8c6 | 2018-11-25 19:33:46 +0100 | [diff] [blame] | 358 | /* |
| 359 | * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled, |
| 360 | * otherwise avoid the MSR write. |
| 361 | */ |
| 362 | if (IS_ENABLED(CONFIG_SMP) && |
| 363 | static_branch_unlikely(&switch_to_cond_stibp)) { |
| 364 | updmsr |= !!(tif_diff & _TIF_SPEC_IB); |
| 365 | msr |= stibp_tif_to_spec_ctrl(tifn); |
| 366 | } |
| 367 | |
Tim Chen | dbbc533 | 2018-11-25 19:33:35 +0100 | [diff] [blame] | 368 | if (updmsr) |
| 369 | wrmsrl(MSR_IA32_SPEC_CTRL, msr); |
Thomas Gleixner | 89c6e9b | 2018-04-29 15:21:42 +0200 | [diff] [blame] | 370 | } |
| 371 | |
Thomas Gleixner | 6febf94 | 2018-11-28 10:56:57 +0100 | [diff] [blame] | 372 | static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk) |
| 373 | { |
| 374 | if (test_and_clear_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE)) { |
| 375 | if (task_spec_ssb_disable(tsk)) |
| 376 | set_tsk_thread_flag(tsk, TIF_SSBD); |
| 377 | else |
| 378 | clear_tsk_thread_flag(tsk, TIF_SSBD); |
Thomas Gleixner | 2d99bc0 | 2018-11-25 19:33:53 +0100 | [diff] [blame] | 379 | |
| 380 | if (task_spec_ib_disable(tsk)) |
| 381 | set_tsk_thread_flag(tsk, TIF_SPEC_IB); |
| 382 | else |
| 383 | clear_tsk_thread_flag(tsk, TIF_SPEC_IB); |
Thomas Gleixner | 6febf94 | 2018-11-28 10:56:57 +0100 | [diff] [blame] | 384 | } |
| 385 | /* Return the updated threadinfo flags*/ |
| 386 | return task_thread_info(tsk)->flags; |
| 387 | } |
| 388 | |
Thomas Gleixner | fd8d77e | 2018-11-25 19:33:34 +0100 | [diff] [blame] | 389 | void speculation_ctrl_update(unsigned long tif) |
Thomas Gleixner | 89c6e9b | 2018-04-29 15:21:42 +0200 | [diff] [blame] | 390 | { |
Tim Chen | dbbc533 | 2018-11-25 19:33:35 +0100 | [diff] [blame] | 391 | /* Forced update. Make sure all relevant TIF flags are different */ |
Thomas Gleixner | d0cb78f | 2018-05-09 21:53:09 +0200 | [diff] [blame] | 392 | preempt_disable(); |
Tim Chen | dbbc533 | 2018-11-25 19:33:35 +0100 | [diff] [blame] | 393 | __speculation_ctrl_update(~tif, tif); |
Thomas Gleixner | d0cb78f | 2018-05-09 21:53:09 +0200 | [diff] [blame] | 394 | preempt_enable(); |
Thomas Gleixner | 89c6e9b | 2018-04-29 15:21:42 +0200 | [diff] [blame] | 395 | } |
| 396 | |
Thomas Gleixner | 6febf94 | 2018-11-28 10:56:57 +0100 | [diff] [blame] | 397 | /* Called from seccomp/prctl update */ |
| 398 | void speculation_ctrl_update_current(void) |
| 399 | { |
| 400 | preempt_disable(); |
| 401 | speculation_ctrl_update(speculation_ctrl_update_tif(current)); |
| 402 | preempt_enable(); |
| 403 | } |
| 404 | |
Thomas Gleixner | b5741ef | 2018-11-25 19:33:47 +0100 | [diff] [blame] | 405 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p) |
Kyle Huey | fd01e82 | 2017-02-14 00:11:02 -0800 | [diff] [blame] | 406 | { |
| 407 | struct thread_struct *prev, *next; |
| 408 | unsigned long tifp, tifn; |
| 409 | |
| 410 | prev = &prev_p->thread; |
| 411 | next = &next_p->thread; |
| 412 | |
| 413 | tifn = READ_ONCE(task_thread_info(next_p)->flags); |
| 414 | tifp = READ_ONCE(task_thread_info(prev_p)->flags); |
Thomas Gleixner | b5741ef | 2018-11-25 19:33:47 +0100 | [diff] [blame] | 415 | switch_to_bitmap(prev, next, tifp, tifn); |
Kyle Huey | fd01e82 | 2017-02-14 00:11:02 -0800 | [diff] [blame] | 416 | |
Avi Kivity | 7c68af6 | 2009-09-19 09:40:22 +0300 | [diff] [blame] | 417 | propagate_user_return_notify(prev_p, next_p); |
Kyle Huey | fd01e82 | 2017-02-14 00:11:02 -0800 | [diff] [blame] | 418 | |
Kyle Huey | 439f2ef8 | 2017-02-14 00:11:03 -0800 | [diff] [blame] | 419 | if ((tifp & _TIF_BLOCKSTEP || tifn & _TIF_BLOCKSTEP) && |
| 420 | arch_has_block_step()) { |
| 421 | unsigned long debugctl, msk; |
Kyle Huey | fd01e82 | 2017-02-14 00:11:02 -0800 | [diff] [blame] | 422 | |
Kyle Huey | 439f2ef8 | 2017-02-14 00:11:03 -0800 | [diff] [blame] | 423 | rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); |
Kyle Huey | fd01e82 | 2017-02-14 00:11:02 -0800 | [diff] [blame] | 424 | debugctl &= ~DEBUGCTLMSR_BTF; |
Kyle Huey | 439f2ef8 | 2017-02-14 00:11:03 -0800 | [diff] [blame] | 425 | msk = tifn & _TIF_BLOCKSTEP; |
| 426 | debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT; |
| 427 | wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); |
Kyle Huey | fd01e82 | 2017-02-14 00:11:02 -0800 | [diff] [blame] | 428 | } |
| 429 | |
Thomas Gleixner | 5ed7788 | 2017-02-14 00:11:04 -0800 | [diff] [blame] | 430 | if ((tifp ^ tifn) & _TIF_NOTSC) |
| 431 | cr4_toggle_bits(X86_CR4_TSD); |
Thomas Gleixner | 89c6e9b | 2018-04-29 15:21:42 +0200 | [diff] [blame] | 432 | |
Thomas Gleixner | 6febf94 | 2018-11-28 10:56:57 +0100 | [diff] [blame] | 433 | if (likely(!((tifp | tifn) & _TIF_SPEC_FORCE_UPDATE))) { |
| 434 | __speculation_ctrl_update(tifp, tifn); |
| 435 | } else { |
| 436 | speculation_ctrl_update_tif(prev_p); |
| 437 | tifn = speculation_ctrl_update_tif(next_p); |
| 438 | |
| 439 | /* Enforce MSR update to ensure consistent state */ |
| 440 | __speculation_ctrl_update(~tifn, tifn); |
| 441 | } |
Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 442 | } |
| 443 | |
Brian Gerst | df59e7b | 2009-12-09 12:34:44 -0500 | [diff] [blame] | 444 | /* |
Thomas Gleixner | 00dba56 | 2008-06-09 18:35:28 +0200 | [diff] [blame] | 445 | * Idle related variables and functions |
| 446 | */ |
Thomas Renninger | d189604 | 2010-11-03 17:06:14 +0100 | [diff] [blame] | 447 | unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; |
Thomas Gleixner | 00dba56 | 2008-06-09 18:35:28 +0200 | [diff] [blame] | 448 | EXPORT_SYMBOL(boot_option_idle_override); |
| 449 | |
Len Brown | a476bda | 2013-02-09 21:45:03 -0500 | [diff] [blame] | 450 | static void (*x86_idle)(void); |
Thomas Gleixner | 00dba56 | 2008-06-09 18:35:28 +0200 | [diff] [blame] | 451 | |
Richard Weinberger | 90e2401 | 2012-03-25 23:00:04 +0200 | [diff] [blame] | 452 | #ifndef CONFIG_SMP |
| 453 | static inline void play_dead(void) |
| 454 | { |
| 455 | BUG(); |
| 456 | } |
| 457 | #endif |
| 458 | |
| 459 | #ifdef CONFIG_X86_64 |
| 460 | void enter_idle(void) |
| 461 | { |
Alex Shi | c6ae41e | 2012-05-11 15:35:27 +0800 | [diff] [blame] | 462 | this_cpu_write(is_idle, 1); |
Richard Weinberger | 90e2401 | 2012-03-25 23:00:04 +0200 | [diff] [blame] | 463 | atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL); |
| 464 | } |
| 465 | |
| 466 | static void __exit_idle(void) |
| 467 | { |
| 468 | if (x86_test_and_clear_bit_percpu(0, is_idle) == 0) |
| 469 | return; |
| 470 | atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL); |
| 471 | } |
| 472 | |
| 473 | /* Called from interrupts to signify idle end */ |
| 474 | void exit_idle(void) |
| 475 | { |
| 476 | /* idle loop has pid 0 */ |
| 477 | if (current->pid) |
| 478 | return; |
| 479 | __exit_idle(); |
| 480 | } |
| 481 | #endif |
| 482 | |
Thomas Gleixner | 7d1a941 | 2013-03-21 22:50:03 +0100 | [diff] [blame] | 483 | void arch_cpu_idle_enter(void) |
| 484 | { |
| 485 | local_touch_nmi(); |
| 486 | enter_idle(); |
| 487 | } |
Richard Weinberger | 90e2401 | 2012-03-25 23:00:04 +0200 | [diff] [blame] | 488 | |
Thomas Gleixner | 7d1a941 | 2013-03-21 22:50:03 +0100 | [diff] [blame] | 489 | void arch_cpu_idle_exit(void) |
| 490 | { |
| 491 | __exit_idle(); |
| 492 | } |
Richard Weinberger | 90e2401 | 2012-03-25 23:00:04 +0200 | [diff] [blame] | 493 | |
Thomas Gleixner | 7d1a941 | 2013-03-21 22:50:03 +0100 | [diff] [blame] | 494 | void arch_cpu_idle_dead(void) |
| 495 | { |
| 496 | play_dead(); |
Richard Weinberger | 90e2401 | 2012-03-25 23:00:04 +0200 | [diff] [blame] | 497 | } |
| 498 | |
Thomas Gleixner | 00dba56 | 2008-06-09 18:35:28 +0200 | [diff] [blame] | 499 | /* |
Thomas Gleixner | 7d1a941 | 2013-03-21 22:50:03 +0100 | [diff] [blame] | 500 | * Called from the generic idle code. |
| 501 | */ |
| 502 | void arch_cpu_idle(void) |
| 503 | { |
Nicolas Pitre | 16f8b05 | 2014-01-29 12:45:12 -0500 | [diff] [blame] | 504 | x86_idle(); |
Thomas Gleixner | 7d1a941 | 2013-03-21 22:50:03 +0100 | [diff] [blame] | 505 | } |
| 506 | |
| 507 | /* |
| 508 | * We use this if we don't have any better idle routine.. |
Thomas Gleixner | 00dba56 | 2008-06-09 18:35:28 +0200 | [diff] [blame] | 509 | */ |
Chris Metcalf | 6727ad9 | 2016-10-07 17:02:55 -0700 | [diff] [blame] | 510 | void __cpuidle default_idle(void) |
Thomas Gleixner | 00dba56 | 2008-06-09 18:35:28 +0200 | [diff] [blame] | 511 | { |
Daniel Lezcano | 4d0e42c | 2012-10-25 18:13:11 +0200 | [diff] [blame] | 512 | trace_cpu_idle_rcuidle(1, smp_processor_id()); |
Thomas Gleixner | 7d1a941 | 2013-03-21 22:50:03 +0100 | [diff] [blame] | 513 | safe_halt(); |
Daniel Lezcano | 4d0e42c | 2012-10-25 18:13:11 +0200 | [diff] [blame] | 514 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); |
Thomas Gleixner | 00dba56 | 2008-06-09 18:35:28 +0200 | [diff] [blame] | 515 | } |
Andy Whitcroft | 60b8b1d | 2011-06-14 12:45:10 -0700 | [diff] [blame] | 516 | #ifdef CONFIG_APM_MODULE |
Thomas Gleixner | 00dba56 | 2008-06-09 18:35:28 +0200 | [diff] [blame] | 517 | EXPORT_SYMBOL(default_idle); |
| 518 | #endif |
| 519 | |
Len Brown | 6a377dd | 2013-02-09 23:08:07 -0500 | [diff] [blame] | 520 | #ifdef CONFIG_XEN |
| 521 | bool xen_set_default_idle(void) |
Konrad Rzeszutek Wilk | e5fd47b | 2011-11-21 18:02:02 -0500 | [diff] [blame] | 522 | { |
Len Brown | a476bda | 2013-02-09 21:45:03 -0500 | [diff] [blame] | 523 | bool ret = !!x86_idle; |
Konrad Rzeszutek Wilk | e5fd47b | 2011-11-21 18:02:02 -0500 | [diff] [blame] | 524 | |
Len Brown | a476bda | 2013-02-09 21:45:03 -0500 | [diff] [blame] | 525 | x86_idle = default_idle; |
Konrad Rzeszutek Wilk | e5fd47b | 2011-11-21 18:02:02 -0500 | [diff] [blame] | 526 | |
| 527 | return ret; |
| 528 | } |
Len Brown | 6a377dd | 2013-02-09 23:08:07 -0500 | [diff] [blame] | 529 | #endif |
Ivan Vecera | d3ec5ca | 2008-11-11 14:33:44 +0100 | [diff] [blame] | 530 | void stop_this_cpu(void *dummy) |
| 531 | { |
| 532 | local_irq_disable(); |
| 533 | /* |
| 534 | * Remove this CPU: |
| 535 | */ |
Rusty Russell | 4f06289 | 2009-03-13 14:49:54 +1030 | [diff] [blame] | 536 | set_cpu_online(smp_processor_id(), false); |
Ivan Vecera | d3ec5ca | 2008-11-11 14:33:44 +0100 | [diff] [blame] | 537 | disable_local_APIC(); |
Ashok Raj | 8838eb6 | 2015-08-12 18:29:40 +0200 | [diff] [blame] | 538 | mcheck_cpu_clear(this_cpu_ptr(&cpu_info)); |
Ivan Vecera | d3ec5ca | 2008-11-11 14:33:44 +0100 | [diff] [blame] | 539 | |
Len Brown | 27be457 | 2013-02-10 02:28:46 -0500 | [diff] [blame] | 540 | for (;;) |
| 541 | halt(); |
Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 542 | } |
| 543 | |
Len Brown | 02c68a0 | 2011-04-01 16:59:53 -0400 | [diff] [blame] | 544 | bool amd_e400_c1e_detected; |
| 545 | EXPORT_SYMBOL(amd_e400_c1e_detected); |
Thomas Gleixner | aa276e1 | 2008-06-09 19:15:00 +0200 | [diff] [blame] | 546 | |
Len Brown | 02c68a0 | 2011-04-01 16:59:53 -0400 | [diff] [blame] | 547 | static cpumask_var_t amd_e400_c1e_mask; |
Thomas Gleixner | 4faac97 | 2008-09-22 18:54:29 +0200 | [diff] [blame] | 548 | |
Len Brown | 02c68a0 | 2011-04-01 16:59:53 -0400 | [diff] [blame] | 549 | void amd_e400_remove_cpu(int cpu) |
Thomas Gleixner | 4faac97 | 2008-09-22 18:54:29 +0200 | [diff] [blame] | 550 | { |
Len Brown | 02c68a0 | 2011-04-01 16:59:53 -0400 | [diff] [blame] | 551 | if (amd_e400_c1e_mask != NULL) |
| 552 | cpumask_clear_cpu(cpu, amd_e400_c1e_mask); |
Thomas Gleixner | 4faac97 | 2008-09-22 18:54:29 +0200 | [diff] [blame] | 553 | } |
| 554 | |
Thomas Gleixner | aa276e1 | 2008-06-09 19:15:00 +0200 | [diff] [blame] | 555 | /* |
Len Brown | 02c68a0 | 2011-04-01 16:59:53 -0400 | [diff] [blame] | 556 | * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt |
Thomas Gleixner | aa276e1 | 2008-06-09 19:15:00 +0200 | [diff] [blame] | 557 | * pending message MSR. If we detect C1E, then we handle it the same |
| 558 | * way as C3 power states (local apic timer and TSC stop) |
| 559 | */ |
Len Brown | 02c68a0 | 2011-04-01 16:59:53 -0400 | [diff] [blame] | 560 | static void amd_e400_idle(void) |
Thomas Gleixner | aa276e1 | 2008-06-09 19:15:00 +0200 | [diff] [blame] | 561 | { |
Len Brown | 02c68a0 | 2011-04-01 16:59:53 -0400 | [diff] [blame] | 562 | if (!amd_e400_c1e_detected) { |
Thomas Gleixner | aa276e1 | 2008-06-09 19:15:00 +0200 | [diff] [blame] | 563 | u32 lo, hi; |
| 564 | |
| 565 | rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); |
Michal Schmidt | e8c534e | 2010-07-27 18:53:35 +0200 | [diff] [blame] | 566 | |
Thomas Gleixner | aa276e1 | 2008-06-09 19:15:00 +0200 | [diff] [blame] | 567 | if (lo & K8_INTP_C1E_ACTIVE_MASK) { |
Len Brown | 02c68a0 | 2011-04-01 16:59:53 -0400 | [diff] [blame] | 568 | amd_e400_c1e_detected = true; |
Venki Pallipadi | 40fb171 | 2008-11-17 16:11:37 -0800 | [diff] [blame] | 569 | if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) |
Andreas Herrmann | 09bfeea | 2008-09-18 21:12:10 +0200 | [diff] [blame] | 570 | mark_tsc_unstable("TSC halt in AMD C1E"); |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 571 | pr_info("System has AMD C1E enabled\n"); |
Thomas Gleixner | aa276e1 | 2008-06-09 19:15:00 +0200 | [diff] [blame] | 572 | } |
| 573 | } |
| 574 | |
Len Brown | 02c68a0 | 2011-04-01 16:59:53 -0400 | [diff] [blame] | 575 | if (amd_e400_c1e_detected) { |
Thomas Gleixner | aa276e1 | 2008-06-09 19:15:00 +0200 | [diff] [blame] | 576 | int cpu = smp_processor_id(); |
| 577 | |
Len Brown | 02c68a0 | 2011-04-01 16:59:53 -0400 | [diff] [blame] | 578 | if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) { |
| 579 | cpumask_set_cpu(cpu, amd_e400_c1e_mask); |
Thomas Gleixner | 162a688 | 2015-04-03 02:01:28 +0200 | [diff] [blame] | 580 | /* Force broadcast so ACPI can not interfere. */ |
| 581 | tick_broadcast_force(); |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 582 | pr_info("Switch to broadcast mode on CPU%d\n", cpu); |
Thomas Gleixner | aa276e1 | 2008-06-09 19:15:00 +0200 | [diff] [blame] | 583 | } |
Thomas Gleixner | 435c350 | 2015-04-03 02:05:53 +0200 | [diff] [blame] | 584 | tick_broadcast_enter(); |
Thomas Gleixner | 0beefa2 | 2008-06-17 09:12:03 +0200 | [diff] [blame] | 585 | |
Thomas Gleixner | aa276e1 | 2008-06-09 19:15:00 +0200 | [diff] [blame] | 586 | default_idle(); |
Thomas Gleixner | 0beefa2 | 2008-06-17 09:12:03 +0200 | [diff] [blame] | 587 | |
| 588 | /* |
| 589 | * The switch back from broadcast mode needs to be |
| 590 | * called with interrupts disabled. |
| 591 | */ |
Peter Zijlstra | ea81174 | 2013-09-11 12:43:13 +0200 | [diff] [blame] | 592 | local_irq_disable(); |
Thomas Gleixner | 435c350 | 2015-04-03 02:05:53 +0200 | [diff] [blame] | 593 | tick_broadcast_exit(); |
Peter Zijlstra | ea81174 | 2013-09-11 12:43:13 +0200 | [diff] [blame] | 594 | local_irq_enable(); |
Thomas Gleixner | aa276e1 | 2008-06-09 19:15:00 +0200 | [diff] [blame] | 595 | } else |
| 596 | default_idle(); |
| 597 | } |
| 598 | |
Len Brown | b253149 | 2014-01-15 00:37:34 -0500 | [diff] [blame] | 599 | /* |
| 600 | * Intel Core2 and older machines prefer MWAIT over HALT for C1. |
| 601 | * We can't rely on cpuidle installing MWAIT, because it will not load |
| 602 | * on systems that support only C1 -- so the boot default must be MWAIT. |
| 603 | * |
| 604 | * Some AMD machines are the opposite, they depend on using HALT. |
| 605 | * |
| 606 | * So for default C1, which is used during boot until cpuidle loads, |
| 607 | * use MWAIT-C1 on Intel HW that has it, else use HALT. |
| 608 | */ |
| 609 | static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c) |
| 610 | { |
| 611 | if (c->x86_vendor != X86_VENDOR_INTEL) |
| 612 | return 0; |
| 613 | |
Peter Zijlstra | 08e237f | 2016-07-18 11:41:10 -0700 | [diff] [blame] | 614 | if (!cpu_has(c, X86_FEATURE_MWAIT) || static_cpu_has_bug(X86_BUG_MONITOR)) |
Len Brown | b253149 | 2014-01-15 00:37:34 -0500 | [diff] [blame] | 615 | return 0; |
| 616 | |
| 617 | return 1; |
| 618 | } |
| 619 | |
| 620 | /* |
Huang Rui | 0fb0328 | 2015-05-26 10:28:09 +0200 | [diff] [blame] | 621 | * MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT |
| 622 | * with interrupts enabled and no flags, which is backwards compatible with the |
| 623 | * original MWAIT implementation. |
Len Brown | b253149 | 2014-01-15 00:37:34 -0500 | [diff] [blame] | 624 | */ |
Chris Metcalf | 6727ad9 | 2016-10-07 17:02:55 -0700 | [diff] [blame] | 625 | static __cpuidle void mwait_idle(void) |
Len Brown | b253149 | 2014-01-15 00:37:34 -0500 | [diff] [blame] | 626 | { |
Mike Galbraith | f8e617f | 2014-01-18 17:14:44 +0100 | [diff] [blame] | 627 | if (!current_set_polling_and_test()) { |
Jisheng Zhang | e43d018 | 2015-08-20 12:54:39 +0800 | [diff] [blame] | 628 | trace_cpu_idle_rcuidle(1, smp_processor_id()); |
Mike Galbraith | f8e617f | 2014-01-18 17:14:44 +0100 | [diff] [blame] | 629 | if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) { |
Michael S. Tsirkin | ca59809 | 2016-01-28 19:02:51 +0200 | [diff] [blame] | 630 | mb(); /* quirk */ |
Len Brown | b253149 | 2014-01-15 00:37:34 -0500 | [diff] [blame] | 631 | clflush((void *)¤t_thread_info()->flags); |
Michael S. Tsirkin | ca59809 | 2016-01-28 19:02:51 +0200 | [diff] [blame] | 632 | mb(); /* quirk */ |
Mike Galbraith | f8e617f | 2014-01-18 17:14:44 +0100 | [diff] [blame] | 633 | } |
Len Brown | b253149 | 2014-01-15 00:37:34 -0500 | [diff] [blame] | 634 | |
| 635 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
Len Brown | b253149 | 2014-01-15 00:37:34 -0500 | [diff] [blame] | 636 | if (!need_resched()) |
| 637 | __sti_mwait(0, 0); |
| 638 | else |
| 639 | local_irq_enable(); |
Jisheng Zhang | e43d018 | 2015-08-20 12:54:39 +0800 | [diff] [blame] | 640 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); |
Mike Galbraith | f8e617f | 2014-01-18 17:14:44 +0100 | [diff] [blame] | 641 | } else { |
Len Brown | b253149 | 2014-01-15 00:37:34 -0500 | [diff] [blame] | 642 | local_irq_enable(); |
Mike Galbraith | f8e617f | 2014-01-18 17:14:44 +0100 | [diff] [blame] | 643 | } |
| 644 | __current_clr_polling(); |
Len Brown | b253149 | 2014-01-15 00:37:34 -0500 | [diff] [blame] | 645 | } |
| 646 | |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 647 | void select_idle_routine(const struct cpuinfo_x86 *c) |
Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 648 | { |
Ingo Molnar | 3e5095d | 2009-01-27 17:07:08 +0100 | [diff] [blame] | 649 | #ifdef CONFIG_SMP |
Thomas Gleixner | 7d1a941 | 2013-03-21 22:50:03 +0100 | [diff] [blame] | 650 | if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1) |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 651 | pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n"); |
Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 652 | #endif |
Thomas Gleixner | 7d1a941 | 2013-03-21 22:50:03 +0100 | [diff] [blame] | 653 | if (x86_idle || boot_option_idle_override == IDLE_POLL) |
Thomas Gleixner | 6ddd2a2 | 2008-06-09 16:59:53 +0200 | [diff] [blame] | 654 | return; |
| 655 | |
Thomas Gleixner | bd7e769 | 2016-12-09 19:29:09 +0100 | [diff] [blame] | 656 | if (boot_cpu_has_bug(X86_BUG_AMD_E400)) { |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 657 | pr_info("using AMD E400 aware idle routine\n"); |
Len Brown | a476bda | 2013-02-09 21:45:03 -0500 | [diff] [blame] | 658 | x86_idle = amd_e400_idle; |
Len Brown | b253149 | 2014-01-15 00:37:34 -0500 | [diff] [blame] | 659 | } else if (prefer_mwait_c1_over_halt(c)) { |
| 660 | pr_info("using mwait in idle threads\n"); |
| 661 | x86_idle = mwait_idle; |
Thomas Gleixner | 6ddd2a2 | 2008-06-09 16:59:53 +0200 | [diff] [blame] | 662 | } else |
Len Brown | a476bda | 2013-02-09 21:45:03 -0500 | [diff] [blame] | 663 | x86_idle = default_idle; |
Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 664 | } |
| 665 | |
Len Brown | 02c68a0 | 2011-04-01 16:59:53 -0400 | [diff] [blame] | 666 | void __init init_amd_e400_c1e_mask(void) |
Rusty Russell | 30e1e6d | 2009-03-17 14:50:34 +1030 | [diff] [blame] | 667 | { |
Len Brown | 02c68a0 | 2011-04-01 16:59:53 -0400 | [diff] [blame] | 668 | /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */ |
Len Brown | a476bda | 2013-02-09 21:45:03 -0500 | [diff] [blame] | 669 | if (x86_idle == amd_e400_idle) |
Len Brown | 02c68a0 | 2011-04-01 16:59:53 -0400 | [diff] [blame] | 670 | zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL); |
Rusty Russell | 30e1e6d | 2009-03-17 14:50:34 +1030 | [diff] [blame] | 671 | } |
| 672 | |
Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 673 | static int __init idle_setup(char *str) |
| 674 | { |
Cyrill Gorcunov | ab6bc3e | 2008-07-05 15:53:36 +0400 | [diff] [blame] | 675 | if (!str) |
| 676 | return -EINVAL; |
| 677 | |
Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 678 | if (!strcmp(str, "poll")) { |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 679 | pr_info("using polling idle threads\n"); |
Thomas Renninger | d189604 | 2010-11-03 17:06:14 +0100 | [diff] [blame] | 680 | boot_option_idle_override = IDLE_POLL; |
Thomas Gleixner | 7d1a941 | 2013-03-21 22:50:03 +0100 | [diff] [blame] | 681 | cpu_idle_poll_ctrl(true); |
Thomas Renninger | d189604 | 2010-11-03 17:06:14 +0100 | [diff] [blame] | 682 | } else if (!strcmp(str, "halt")) { |
Zhao Yakui | c1e3b37 | 2008-06-24 17:58:53 +0800 | [diff] [blame] | 683 | /* |
| 684 | * When the boot option of idle=halt is added, halt is |
| 685 | * forced to be used for CPU idle. In such case CPU C2/C3 |
| 686 | * won't be used again. |
| 687 | * To continue to load the CPU idle driver, don't touch |
| 688 | * the boot_option_idle_override. |
| 689 | */ |
Len Brown | a476bda | 2013-02-09 21:45:03 -0500 | [diff] [blame] | 690 | x86_idle = default_idle; |
Thomas Renninger | d189604 | 2010-11-03 17:06:14 +0100 | [diff] [blame] | 691 | boot_option_idle_override = IDLE_HALT; |
Zhao Yakui | da5e09a | 2008-06-24 18:01:09 +0800 | [diff] [blame] | 692 | } else if (!strcmp(str, "nomwait")) { |
| 693 | /* |
| 694 | * If the boot option of "idle=nomwait" is added, |
| 695 | * it means that mwait will be disabled for CPU C2/C3 |
| 696 | * states. In such case it won't touch the variable |
| 697 | * of boot_option_idle_override. |
| 698 | */ |
Thomas Renninger | d189604 | 2010-11-03 17:06:14 +0100 | [diff] [blame] | 699 | boot_option_idle_override = IDLE_NOMWAIT; |
Zhao Yakui | c1e3b37 | 2008-06-24 17:58:53 +0800 | [diff] [blame] | 700 | } else |
Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 701 | return -1; |
| 702 | |
Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 703 | return 0; |
| 704 | } |
| 705 | early_param("idle", idle_setup); |
| 706 | |
Amerigo Wang | 9d62dcd | 2009-05-11 22:05:28 -0400 | [diff] [blame] | 707 | unsigned long arch_align_stack(unsigned long sp) |
| 708 | { |
| 709 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) |
| 710 | sp -= get_random_int() % 8192; |
| 711 | return sp & ~0xf; |
| 712 | } |
| 713 | |
| 714 | unsigned long arch_randomize_brk(struct mm_struct *mm) |
| 715 | { |
Jason Cooper | 9c6f090 | 2016-10-11 13:53:56 -0700 | [diff] [blame] | 716 | return randomize_page(mm->brk, 0x02000000); |
Amerigo Wang | 9d62dcd | 2009-05-11 22:05:28 -0400 | [diff] [blame] | 717 | } |
| 718 | |
Thomas Gleixner | 7ba7805 | 2015-09-30 08:38:23 +0000 | [diff] [blame] | 719 | /* |
Brian Gerst | ffcb043 | 2016-08-13 12:38:21 -0400 | [diff] [blame] | 720 | * Return saved PC of a blocked thread. |
| 721 | * What is this good for? it will be always the scheduler or ret_from_fork. |
| 722 | */ |
| 723 | unsigned long thread_saved_pc(struct task_struct *tsk) |
| 724 | { |
| 725 | struct inactive_task_frame *frame = |
| 726 | (struct inactive_task_frame *) READ_ONCE(tsk->thread.sp); |
| 727 | return READ_ONCE_NOCHECK(frame->ret_addr); |
| 728 | } |
| 729 | |
| 730 | /* |
Thomas Gleixner | 7ba7805 | 2015-09-30 08:38:23 +0000 | [diff] [blame] | 731 | * Called from fs/proc with a reference on @p to find the function |
| 732 | * which called into schedule(). This needs to be done carefully |
| 733 | * because the task might wake up and we might look at a stack |
| 734 | * changing under us. |
| 735 | */ |
| 736 | unsigned long get_wchan(struct task_struct *p) |
| 737 | { |
Andy Lutomirski | 74327a3 | 2016-09-15 22:45:46 -0700 | [diff] [blame] | 738 | unsigned long start, bottom, top, sp, fp, ip, ret = 0; |
Thomas Gleixner | 7ba7805 | 2015-09-30 08:38:23 +0000 | [diff] [blame] | 739 | int count = 0; |
| 740 | |
| 741 | if (!p || p == current || p->state == TASK_RUNNING) |
| 742 | return 0; |
| 743 | |
Andy Lutomirski | 74327a3 | 2016-09-15 22:45:46 -0700 | [diff] [blame] | 744 | if (!try_get_task_stack(p)) |
| 745 | return 0; |
| 746 | |
Thomas Gleixner | 7ba7805 | 2015-09-30 08:38:23 +0000 | [diff] [blame] | 747 | start = (unsigned long)task_stack_page(p); |
| 748 | if (!start) |
Andy Lutomirski | 74327a3 | 2016-09-15 22:45:46 -0700 | [diff] [blame] | 749 | goto out; |
Thomas Gleixner | 7ba7805 | 2015-09-30 08:38:23 +0000 | [diff] [blame] | 750 | |
| 751 | /* |
| 752 | * Layout of the stack page: |
| 753 | * |
| 754 | * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long) |
| 755 | * PADDING |
| 756 | * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING |
| 757 | * stack |
Andy Lutomirski | 15f4eae | 2016-09-13 14:29:25 -0700 | [diff] [blame] | 758 | * ----------- bottom = start |
Thomas Gleixner | 7ba7805 | 2015-09-30 08:38:23 +0000 | [diff] [blame] | 759 | * |
| 760 | * The tasks stack pointer points at the location where the |
| 761 | * framepointer is stored. The data on the stack is: |
| 762 | * ... IP FP ... IP FP |
| 763 | * |
| 764 | * We need to read FP and IP, so we need to adjust the upper |
| 765 | * bound by another unsigned long. |
| 766 | */ |
| 767 | top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; |
| 768 | top -= 2 * sizeof(unsigned long); |
Andy Lutomirski | 15f4eae | 2016-09-13 14:29:25 -0700 | [diff] [blame] | 769 | bottom = start; |
Thomas Gleixner | 7ba7805 | 2015-09-30 08:38:23 +0000 | [diff] [blame] | 770 | |
| 771 | sp = READ_ONCE(p->thread.sp); |
| 772 | if (sp < bottom || sp > top) |
Andy Lutomirski | 74327a3 | 2016-09-15 22:45:46 -0700 | [diff] [blame] | 773 | goto out; |
Thomas Gleixner | 7ba7805 | 2015-09-30 08:38:23 +0000 | [diff] [blame] | 774 | |
Brian Gerst | 7b32aea | 2016-08-13 12:38:18 -0400 | [diff] [blame] | 775 | fp = READ_ONCE_NOCHECK(((struct inactive_task_frame *)sp)->bp); |
Thomas Gleixner | 7ba7805 | 2015-09-30 08:38:23 +0000 | [diff] [blame] | 776 | do { |
| 777 | if (fp < bottom || fp > top) |
Andy Lutomirski | 74327a3 | 2016-09-15 22:45:46 -0700 | [diff] [blame] | 778 | goto out; |
Andrey Ryabinin | f7d27c3 | 2015-10-19 11:37:18 +0300 | [diff] [blame] | 779 | ip = READ_ONCE_NOCHECK(*(unsigned long *)(fp + sizeof(unsigned long))); |
Andy Lutomirski | 74327a3 | 2016-09-15 22:45:46 -0700 | [diff] [blame] | 780 | if (!in_sched_functions(ip)) { |
| 781 | ret = ip; |
| 782 | goto out; |
| 783 | } |
Andrey Ryabinin | f7d27c3 | 2015-10-19 11:37:18 +0300 | [diff] [blame] | 784 | fp = READ_ONCE_NOCHECK(*(unsigned long *)fp); |
Thomas Gleixner | 7ba7805 | 2015-09-30 08:38:23 +0000 | [diff] [blame] | 785 | } while (count++ < 16 && p->state != TASK_RUNNING); |
Andy Lutomirski | 74327a3 | 2016-09-15 22:45:46 -0700 | [diff] [blame] | 786 | |
| 787 | out: |
| 788 | put_task_stack(p); |
| 789 | return ret; |
Thomas Gleixner | 7ba7805 | 2015-09-30 08:38:23 +0000 | [diff] [blame] | 790 | } |