Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
David Howells | f05e798 | 2012-03-28 18:11:12 +0100 | [diff] [blame] | 2 | #ifndef _ASM_X86_SWITCH_TO_H |
| 3 | #define _ASM_X86_SWITCH_TO_H |
| 4 | |
Andy Lutomirski | d375cf1 | 2017-11-02 00:59:16 -0700 | [diff] [blame] | 5 | #include <linux/sched/task_stack.h> |
| 6 | |
David Howells | f05e798 | 2012-03-28 18:11:12 +0100 | [diff] [blame] | 7 | struct task_struct; /* one of the stranger aspects of C forward declarations */ |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 8 | |
| 9 | struct task_struct *__switch_to_asm(struct task_struct *prev, |
| 10 | struct task_struct *next); |
| 11 | |
Andi Kleen | 35ea7903 | 2013-08-05 15:02:39 -0700 | [diff] [blame] | 12 | __visible struct task_struct *__switch_to(struct task_struct *prev, |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 13 | struct task_struct *next); |
David Howells | f05e798 | 2012-03-28 18:11:12 +0100 | [diff] [blame] | 14 | struct tss_struct; |
| 15 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, |
| 16 | struct tss_struct *tss); |
| 17 | |
Andy Lutomirski | e37e43a | 2016-08-11 02:35:23 -0700 | [diff] [blame] | 18 | /* This runs runs on the previous thread's stack. */ |
rodrigosiqueira | 7ac139e | 2017-12-15 11:15:33 -0200 | [diff] [blame] | 19 | static inline void prepare_switch_to(struct task_struct *next) |
Andy Lutomirski | e37e43a | 2016-08-11 02:35:23 -0700 | [diff] [blame] | 20 | { |
| 21 | #ifdef CONFIG_VMAP_STACK |
| 22 | /* |
| 23 | * If we switch to a stack that has a top-level paging entry |
| 24 | * that is not present in the current mm, the resulting #PF will |
| 25 | * will be promoted to a double-fault and we'll panic. Probe |
| 26 | * the new stack now so that vmalloc_fault can fix up the page |
| 27 | * tables if needed. This can only happen if we use a stack |
| 28 | * in vmap space. |
| 29 | * |
| 30 | * We assume that the stack is aligned so that it never spans |
| 31 | * more than one top-level paging entry. |
| 32 | * |
| 33 | * To minimize cache pollution, just follow the stack pointer. |
| 34 | */ |
| 35 | READ_ONCE(*(unsigned char *)next->thread.sp); |
| 36 | #endif |
| 37 | } |
| 38 | |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 39 | asmlinkage void ret_from_fork(void); |
| 40 | |
Josh Poimboeuf | 2c96b2f | 2017-01-09 12:00:24 -0600 | [diff] [blame] | 41 | /* |
| 42 | * This is the structure pointed to by thread.sp for an inactive task. The |
| 43 | * order of the fields must match the code in __switch_to_asm(). |
| 44 | */ |
Brian Gerst | 7b32aea | 2016-08-13 12:38:18 -0400 | [diff] [blame] | 45 | struct inactive_task_frame { |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 46 | #ifdef CONFIG_X86_64 |
| 47 | unsigned long r15; |
| 48 | unsigned long r14; |
| 49 | unsigned long r13; |
| 50 | unsigned long r12; |
| 51 | #else |
| 52 | unsigned long si; |
| 53 | unsigned long di; |
| 54 | #endif |
| 55 | unsigned long bx; |
Josh Poimboeuf | 2c96b2f | 2017-01-09 12:00:24 -0600 | [diff] [blame] | 56 | |
| 57 | /* |
| 58 | * These two fields must be together. They form a stack frame header, |
| 59 | * needed by get_frame_pointer(). |
| 60 | */ |
Brian Gerst | 7b32aea | 2016-08-13 12:38:18 -0400 | [diff] [blame] | 61 | unsigned long bp; |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 62 | unsigned long ret_addr; |
Brian Gerst | 7b32aea | 2016-08-13 12:38:18 -0400 | [diff] [blame] | 63 | }; |
| 64 | |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 65 | struct fork_frame { |
| 66 | struct inactive_task_frame frame; |
| 67 | struct pt_regs regs; |
| 68 | }; |
David Howells | f05e798 | 2012-03-28 18:11:12 +0100 | [diff] [blame] | 69 | |
David Howells | f05e798 | 2012-03-28 18:11:12 +0100 | [diff] [blame] | 70 | #define switch_to(prev, next, last) \ |
| 71 | do { \ |
rodrigosiqueira | 7ac139e | 2017-12-15 11:15:33 -0200 | [diff] [blame] | 72 | prepare_switch_to(next); \ |
Andy Lutomirski | e37e43a | 2016-08-11 02:35:23 -0700 | [diff] [blame] | 73 | \ |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 74 | ((last) = __switch_to_asm((prev), (next))); \ |
David Howells | f05e798 | 2012-03-28 18:11:12 +0100 | [diff] [blame] | 75 | } while (0) |
| 76 | |
Andy Lutomirski | bd7dc5a | 2017-11-02 00:59:09 -0700 | [diff] [blame] | 77 | #ifdef CONFIG_X86_32 |
| 78 | static inline void refresh_sysenter_cs(struct thread_struct *thread) |
| 79 | { |
| 80 | /* Only happens when SEP is enabled, no need to test "SEP"arately: */ |
Andy Lutomirski | c482fee | 2017-12-04 15:07:29 +0100 | [diff] [blame] | 81 | if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs)) |
Andy Lutomirski | bd7dc5a | 2017-11-02 00:59:09 -0700 | [diff] [blame] | 82 | return; |
| 83 | |
Andy Lutomirski | c482fee | 2017-12-04 15:07:29 +0100 | [diff] [blame] | 84 | this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs); |
Andy Lutomirski | bd7dc5a | 2017-11-02 00:59:09 -0700 | [diff] [blame] | 85 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); |
| 86 | } |
| 87 | #endif |
| 88 | |
Andy Lutomirski | 46f5a10 | 2017-11-02 00:59:14 -0700 | [diff] [blame] | 89 | /* This is used when switching tasks or entering/exiting vm86 mode. */ |
| 90 | static inline void update_sp0(struct task_struct *task) |
| 91 | { |
Andy Lutomirski | 7f2590a | 2017-12-04 15:07:23 +0100 | [diff] [blame] | 92 | /* On x86_64, sp0 always points to the entry trampoline stack, which is constant: */ |
Andy Lutomirski | d375cf1 | 2017-11-02 00:59:16 -0700 | [diff] [blame] | 93 | #ifdef CONFIG_X86_32 |
Andy Lutomirski | 46f5a10 | 2017-11-02 00:59:14 -0700 | [diff] [blame] | 94 | load_sp0(task->thread.sp0); |
Andy Lutomirski | d375cf1 | 2017-11-02 00:59:16 -0700 | [diff] [blame] | 95 | #else |
Andy Lutomirski | 7f2590a | 2017-12-04 15:07:23 +0100 | [diff] [blame] | 96 | if (static_cpu_has(X86_FEATURE_XENPV)) |
| 97 | load_sp0(task_top_of_stack(task)); |
Andy Lutomirski | d375cf1 | 2017-11-02 00:59:16 -0700 | [diff] [blame] | 98 | #endif |
Andy Lutomirski | 46f5a10 | 2017-11-02 00:59:14 -0700 | [diff] [blame] | 99 | } |
| 100 | |
David Howells | f05e798 | 2012-03-28 18:11:12 +0100 | [diff] [blame] | 101 | #endif /* _ASM_X86_SWITCH_TO_H */ |