Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/x86_64/entry.S |
| 3 | * |
| 4 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 5 | * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs |
| 6 | * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 7 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | * entry.S contains the system-call and fault low-level handling routines. |
| 9 | * |
Andy Lutomirski | 8b4777a | 2011-06-05 13:50:18 -0400 | [diff] [blame] | 10 | * Some of this is documented in Documentation/x86/entry_64.txt |
| 11 | * |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 12 | * A note on terminology: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 13 | * - iret frame: Architecture defined interrupt frame from SS to RIP |
| 14 | * at the top of the kernel process stack. |
Andi Kleen | 2e91a17 | 2006-09-26 10:52:29 +0200 | [diff] [blame] | 15 | * |
| 16 | * Some macro usage: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 17 | * - ENTRY/END: Define functions in the symbol table. |
| 18 | * - TRACE_IRQ_*: Trace hardirq state for lock debugging. |
| 19 | * - idtentry: Define exception entry points. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <linux/linkage.h> |
| 22 | #include <asm/segment.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #include <asm/cache.h> |
| 24 | #include <asm/errno.h> |
Ingo Molnar | d36f947 | 2015-06-03 18:29:26 +0200 | [diff] [blame] | 25 | #include "calling.h" |
Sam Ravnborg | e2d5df9 | 2005-09-09 21:28:48 +0200 | [diff] [blame] | 26 | #include <asm/asm-offsets.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #include <asm/msr.h> |
| 28 | #include <asm/unistd.h> |
| 29 | #include <asm/thread_info.h> |
| 30 | #include <asm/hw_irq.h> |
Jeremy Fitzhardinge | 0341c14 | 2009-02-13 11:14:01 -0800 | [diff] [blame] | 31 | #include <asm/page_types.h> |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 32 | #include <asm/irqflags.h> |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 33 | #include <asm/paravirt.h> |
Tejun Heo | 9939dda | 2009-01-13 20:41:35 +0900 | [diff] [blame] | 34 | #include <asm/percpu.h> |
H. Peter Anvin | d7abc0f | 2012-04-20 12:19:50 -0700 | [diff] [blame] | 35 | #include <asm/asm.h> |
H. Peter Anvin | 63bcff2 | 2012-09-21 12:43:12 -0700 | [diff] [blame] | 36 | #include <asm/smap.h> |
H. Peter Anvin | 3891a04 | 2014-04-29 16:46:09 -0700 | [diff] [blame] | 37 | #include <asm/pgtable_types.h> |
Al Viro | 784d569 | 2016-01-11 11:04:34 -0500 | [diff] [blame] | 38 | #include <asm/export.h> |
Richard Fellner | 13be448 | 2017-05-04 14:26:50 +0200 | [diff] [blame] | 39 | #include <asm/kaiser.h> |
Eric Paris | d7e7528 | 2012-01-03 14:23:06 -0500 | [diff] [blame] | 40 | #include <linux/err.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | |
Roland McGrath | 86a1c34 | 2008-06-23 15:37:04 -0700 | [diff] [blame] | 42 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ |
| 43 | #include <linux/elf-em.h> |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 44 | #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) |
| 45 | #define __AUDIT_ARCH_64BIT 0x80000000 |
| 46 | #define __AUDIT_ARCH_LE 0x40000000 |
Roland McGrath | 86a1c34 | 2008-06-23 15:37:04 -0700 | [diff] [blame] | 47 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 48 | .code64 |
| 49 | .section .entry.text, "ax" |
Arnaldo Carvalho de Melo | 16444a8 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 50 | |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 51 | #ifdef CONFIG_PARAVIRT |
Jeremy Fitzhardinge | 2be2998 | 2008-06-25 00:19:28 -0400 | [diff] [blame] | 52 | ENTRY(native_usergs_sysret64) |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 53 | swapgs |
| 54 | sysretq |
Cyrill Gorcunov | b3baaa1 | 2009-02-23 22:57:00 +0300 | [diff] [blame] | 55 | ENDPROC(native_usergs_sysret64) |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 56 | #endif /* CONFIG_PARAVIRT */ |
| 57 | |
Greg Kroah-Hartman | a11ca51 | 2017-12-04 12:59:57 +0100 | [diff] [blame] | 58 | .macro TRACE_IRQS_IRETQ |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 59 | #ifdef CONFIG_TRACE_IRQFLAGS |
Greg Kroah-Hartman | a11ca51 | 2017-12-04 12:59:57 +0100 | [diff] [blame] | 60 | bt $9, EFLAGS(%rsp) /* interrupts off? */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 61 | jnc 1f |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 62 | TRACE_IRQS_ON |
| 63 | 1: |
| 64 | #endif |
| 65 | .endm |
| 66 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | /* |
Steven Rostedt | 5963e31 | 2012-05-30 11:54:53 -0400 | [diff] [blame] | 68 | * When dynamic function tracer is enabled it will add a breakpoint |
| 69 | * to all locations that it is about to modify, sync CPUs, update |
| 70 | * all the code, sync CPUs, then remove the breakpoints. In this time |
| 71 | * if lockdep is enabled, it might jump back into the debug handler |
| 72 | * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF). |
| 73 | * |
| 74 | * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to |
| 75 | * make sure the stack pointer does not get reset back to the top |
| 76 | * of the debug stack, and instead just reuses the current stack. |
| 77 | */ |
| 78 | #if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS) |
| 79 | |
| 80 | .macro TRACE_IRQS_OFF_DEBUG |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 81 | call debug_stack_set_zero |
Steven Rostedt | 5963e31 | 2012-05-30 11:54:53 -0400 | [diff] [blame] | 82 | TRACE_IRQS_OFF |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 83 | call debug_stack_reset |
Steven Rostedt | 5963e31 | 2012-05-30 11:54:53 -0400 | [diff] [blame] | 84 | .endm |
| 85 | |
| 86 | .macro TRACE_IRQS_ON_DEBUG |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 87 | call debug_stack_set_zero |
Steven Rostedt | 5963e31 | 2012-05-30 11:54:53 -0400 | [diff] [blame] | 88 | TRACE_IRQS_ON |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 89 | call debug_stack_reset |
Steven Rostedt | 5963e31 | 2012-05-30 11:54:53 -0400 | [diff] [blame] | 90 | .endm |
| 91 | |
Denys Vlasenko | f2db938 | 2015-02-26 14:40:30 -0800 | [diff] [blame] | 92 | .macro TRACE_IRQS_IRETQ_DEBUG |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 93 | bt $9, EFLAGS(%rsp) /* interrupts off? */ |
| 94 | jnc 1f |
Steven Rostedt | 5963e31 | 2012-05-30 11:54:53 -0400 | [diff] [blame] | 95 | TRACE_IRQS_ON_DEBUG |
| 96 | 1: |
| 97 | .endm |
| 98 | |
| 99 | #else |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 100 | # define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF |
| 101 | # define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON |
| 102 | # define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ |
Steven Rostedt | 5963e31 | 2012-05-30 11:54:53 -0400 | [diff] [blame] | 103 | #endif |
| 104 | |
| 105 | /* |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 106 | * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | * |
Andy Lutomirski | fda57b2 | 2016-03-09 19:00:35 -0800 | [diff] [blame] | 108 | * This is the only entry point used for 64-bit system calls. The |
| 109 | * hardware interface is reasonably well designed and the register to |
| 110 | * argument mapping Linux uses fits well with the registers that are |
| 111 | * available when SYSCALL is used. |
| 112 | * |
| 113 | * SYSCALL instructions can be found inlined in libc implementations as |
| 114 | * well as some other programs and libraries. There are also a handful |
| 115 | * of SYSCALL instructions in the vDSO used, for example, as a |
| 116 | * clock_gettimeofday fallback. |
| 117 | * |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 118 | * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 119 | * then loads new ss, cs, and rip from previously programmed MSRs. |
| 120 | * rflags gets masked by a value from another MSR (so CLD and CLAC |
| 121 | * are not needed). SYSCALL does not save anything on the stack |
| 122 | * and does not change rsp. |
| 123 | * |
| 124 | * Registers on entry: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | * rax system call number |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 126 | * rcx return address |
| 127 | * r11 saved rflags (note: r11 is callee-clobbered register in C ABI) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | * rdi arg0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | * rsi arg1 |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 130 | * rdx arg2 |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 131 | * r10 arg3 (needs to be moved to rcx to conform to C ABI) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | * r8 arg4 |
| 133 | * r9 arg5 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 134 | * (note: r12-r15, rbp, rbx are callee-preserved in C ABI) |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 135 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | * Only called from user space. |
| 137 | * |
Ingo Molnar | 7fcb3bc | 2015-03-17 14:42:59 +0100 | [diff] [blame] | 138 | * When user can change pt_regs->foo always force IRET. That is because |
Andi Kleen | 7bf36bb | 2006-04-07 19:50:00 +0200 | [diff] [blame] | 139 | * it deals with uncanonical addresses better. SYSRET has trouble |
| 140 | * with them due to bugs in both AMD and Intel CPUs. |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 141 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | |
Ingo Molnar | b2502b4 | 2015-06-08 08:42:03 +0200 | [diff] [blame] | 143 | ENTRY(entry_SYSCALL_64) |
Denys Vlasenko | 9ed8e7d | 2015-03-19 18:17:47 +0100 | [diff] [blame] | 144 | /* |
| 145 | * Interrupts are off on entry. |
| 146 | * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, |
| 147 | * it is too small to ever cause noticeable irq latency. |
| 148 | */ |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 149 | SWAPGS_UNSAFE_STACK |
Richard Fellner | 13be448 | 2017-05-04 14:26:50 +0200 | [diff] [blame] | 150 | SWITCH_KERNEL_CR3_NO_STACK |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 151 | /* |
| 152 | * A hypervisor implementation might want to use a label |
| 153 | * after the swapgs, so that it can do the swapgs |
| 154 | * for the guest and jump here on syscall. |
| 155 | */ |
Ingo Molnar | b2502b4 | 2015-06-08 08:42:03 +0200 | [diff] [blame] | 156 | GLOBAL(entry_SYSCALL_64_after_swapgs) |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 157 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 158 | movq %rsp, PER_CPU_VAR(rsp_scratch) |
| 159 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp |
Denys Vlasenko | 9ed8e7d | 2015-03-19 18:17:47 +0100 | [diff] [blame] | 160 | |
Andy Lutomirski | 1e423bf | 2016-01-28 15:11:28 -0800 | [diff] [blame] | 161 | TRACE_IRQS_OFF |
| 162 | |
Denys Vlasenko | 9ed8e7d | 2015-03-19 18:17:47 +0100 | [diff] [blame] | 163 | /* Construct struct pt_regs on stack */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 164 | pushq $__USER_DS /* pt_regs->ss */ |
| 165 | pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 166 | pushq %r11 /* pt_regs->flags */ |
| 167 | pushq $__USER_CS /* pt_regs->cs */ |
| 168 | pushq %rcx /* pt_regs->ip */ |
| 169 | pushq %rax /* pt_regs->orig_ax */ |
| 170 | pushq %rdi /* pt_regs->di */ |
| 171 | pushq %rsi /* pt_regs->si */ |
| 172 | pushq %rdx /* pt_regs->dx */ |
| 173 | pushq %rcx /* pt_regs->cx */ |
| 174 | pushq $-ENOSYS /* pt_regs->ax */ |
| 175 | pushq %r8 /* pt_regs->r8 */ |
| 176 | pushq %r9 /* pt_regs->r9 */ |
| 177 | pushq %r10 /* pt_regs->r10 */ |
| 178 | pushq %r11 /* pt_regs->r11 */ |
| 179 | sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */ |
Denys Vlasenko | 9ed8e7d | 2015-03-19 18:17:47 +0100 | [diff] [blame] | 180 | |
Andy Lutomirski | 1e423bf | 2016-01-28 15:11:28 -0800 | [diff] [blame] | 181 | /* |
| 182 | * If we need to do entry work or if we guess we'll need to do |
| 183 | * exit work, go straight to the slow path. |
| 184 | */ |
Andy Lutomirski | 15f4eae | 2016-09-13 14:29:25 -0700 | [diff] [blame] | 185 | movq PER_CPU_VAR(current_task), %r11 |
| 186 | testl $_TIF_WORK_SYSCALL_ENTRY|_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) |
Andy Lutomirski | 1e423bf | 2016-01-28 15:11:28 -0800 | [diff] [blame] | 187 | jnz entry_SYSCALL64_slow_path |
| 188 | |
Ingo Molnar | b2502b4 | 2015-06-08 08:42:03 +0200 | [diff] [blame] | 189 | entry_SYSCALL_64_fastpath: |
Andy Lutomirski | 1e423bf | 2016-01-28 15:11:28 -0800 | [diff] [blame] | 190 | /* |
| 191 | * Easy case: enable interrupts and issue the syscall. If the syscall |
| 192 | * needs pt_regs, we'll call a stub that disables interrupts again |
| 193 | * and jumps to the slow path. |
| 194 | */ |
| 195 | TRACE_IRQS_ON |
| 196 | ENABLE_INTERRUPTS(CLBR_NONE) |
H. Peter Anvin | fca460f | 2012-02-19 07:56:26 -0800 | [diff] [blame] | 197 | #if __SYSCALL_MASK == ~0 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 198 | cmpq $__NR_syscall_max, %rax |
H. Peter Anvin | fca460f | 2012-02-19 07:56:26 -0800 | [diff] [blame] | 199 | #else |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 200 | andl $__SYSCALL_MASK, %eax |
| 201 | cmpl $__NR_syscall_max, %eax |
H. Peter Anvin | fca460f | 2012-02-19 07:56:26 -0800 | [diff] [blame] | 202 | #endif |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 203 | ja 1f /* return -ENOSYS (already in pt_regs->ax) */ |
| 204 | movq %r10, %rcx |
Andy Lutomirski | 302f5b2 | 2016-01-28 15:11:25 -0800 | [diff] [blame] | 205 | |
| 206 | /* |
| 207 | * This call instruction is handled specially in stub_ptregs_64. |
Andy Lutomirski | b776508 | 2016-01-31 09:33:26 -0800 | [diff] [blame] | 208 | * It might end up jumping to the slow path. If it jumps, RAX |
| 209 | * and all argument registers are clobbered. |
Andy Lutomirski | 302f5b2 | 2016-01-28 15:11:25 -0800 | [diff] [blame] | 210 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 211 | call *sys_call_table(, %rax, 8) |
Andy Lutomirski | 302f5b2 | 2016-01-28 15:11:25 -0800 | [diff] [blame] | 212 | .Lentry_SYSCALL_64_after_fastpath_call: |
| 213 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 214 | movq %rax, RAX(%rsp) |
Denys Vlasenko | 146b2b0 | 2015-03-25 18:18:13 +0100 | [diff] [blame] | 215 | 1: |
Andy Lutomirski | 1e423bf | 2016-01-28 15:11:28 -0800 | [diff] [blame] | 216 | |
Denys Vlasenko | 4416c5a | 2015-03-31 19:00:03 +0200 | [diff] [blame] | 217 | /* |
Andy Lutomirski | 1e423bf | 2016-01-28 15:11:28 -0800 | [diff] [blame] | 218 | * If we get here, then we know that pt_regs is clean for SYSRET64. |
| 219 | * If we see that no exit work is required (which we are required |
| 220 | * to check with IRQs off), then we can go straight to SYSRET64. |
Denys Vlasenko | 4416c5a | 2015-03-31 19:00:03 +0200 | [diff] [blame] | 221 | */ |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 222 | DISABLE_INTERRUPTS(CLBR_NONE) |
Andy Lutomirski | 1e423bf | 2016-01-28 15:11:28 -0800 | [diff] [blame] | 223 | TRACE_IRQS_OFF |
Andy Lutomirski | 15f4eae | 2016-09-13 14:29:25 -0700 | [diff] [blame] | 224 | movq PER_CPU_VAR(current_task), %r11 |
| 225 | testl $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) |
Andy Lutomirski | 1e423bf | 2016-01-28 15:11:28 -0800 | [diff] [blame] | 226 | jnz 1f |
Andy Lutomirski | b3494a4 | 2015-03-23 12:32:54 -0700 | [diff] [blame] | 227 | |
Andy Lutomirski | 1e423bf | 2016-01-28 15:11:28 -0800 | [diff] [blame] | 228 | LOCKDEP_SYS_EXIT |
| 229 | TRACE_IRQS_ON /* user mode is traced as IRQs on */ |
Andy Lutomirski | eb2a54c | 2016-01-31 09:33:27 -0800 | [diff] [blame] | 230 | movq RIP(%rsp), %rcx |
| 231 | movq EFLAGS(%rsp), %r11 |
| 232 | RESTORE_C_REGS_EXCEPT_RCX_R11 |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 233 | /* |
| 234 | * This opens a window where we have a user CR3, but are |
| 235 | * running in the kernel. This makes using the CS |
| 236 | * register useless for telling whether or not we need to |
| 237 | * switch CR3 in NMIs. Normal interrupts are OK because |
| 238 | * they are off here. |
| 239 | */ |
Richard Fellner | 13be448 | 2017-05-04 14:26:50 +0200 | [diff] [blame] | 240 | SWITCH_USER_CR3 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 241 | movq RSP(%rsp), %rsp |
Jeremy Fitzhardinge | 2be2998 | 2008-06-25 00:19:28 -0400 | [diff] [blame] | 242 | USERGS_SYSRET64 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | |
Andy Lutomirski | 1e423bf | 2016-01-28 15:11:28 -0800 | [diff] [blame] | 244 | 1: |
| 245 | /* |
| 246 | * The fast path looked good when we started, but something changed |
| 247 | * along the way and we need to switch to the slow path. Calling |
| 248 | * raise(3) will trigger this, for example. IRQs are off. |
| 249 | */ |
Andy Lutomirski | 29ea1b2 | 2015-07-03 12:44:28 -0700 | [diff] [blame] | 250 | TRACE_IRQS_ON |
| 251 | ENABLE_INTERRUPTS(CLBR_NONE) |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 252 | SAVE_EXTRA_REGS |
Andy Lutomirski | 29ea1b2 | 2015-07-03 12:44:28 -0700 | [diff] [blame] | 253 | movq %rsp, %rdi |
| 254 | call syscall_return_slowpath /* returns with IRQs disabled */ |
Andy Lutomirski | 1e423bf | 2016-01-28 15:11:28 -0800 | [diff] [blame] | 255 | jmp return_from_SYSCALL_64 |
| 256 | |
| 257 | entry_SYSCALL64_slow_path: |
| 258 | /* IRQs are off. */ |
| 259 | SAVE_EXTRA_REGS |
| 260 | movq %rsp, %rdi |
| 261 | call do_syscall_64 /* returns with IRQs disabled */ |
| 262 | |
| 263 | return_from_SYSCALL_64: |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 264 | RESTORE_EXTRA_REGS |
Andy Lutomirski | 29ea1b2 | 2015-07-03 12:44:28 -0700 | [diff] [blame] | 265 | TRACE_IRQS_IRETQ /* we're about to change IF */ |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 266 | |
| 267 | /* |
| 268 | * Try to use SYSRET instead of IRET if we're returning to |
| 269 | * a completely clean 64-bit userspace context. |
| 270 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 271 | movq RCX(%rsp), %rcx |
| 272 | movq RIP(%rsp), %r11 |
| 273 | cmpq %rcx, %r11 /* RCX == RIP */ |
| 274 | jne opportunistic_sysret_failed |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 275 | |
| 276 | /* |
| 277 | * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP |
| 278 | * in kernel space. This essentially lets the user take over |
Denys Vlasenko | 17be0ae | 2015-04-21 18:27:29 +0200 | [diff] [blame] | 279 | * the kernel, since userspace controls RSP. |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 280 | * |
Denys Vlasenko | 17be0ae | 2015-04-21 18:27:29 +0200 | [diff] [blame] | 281 | * If width of "canonical tail" ever becomes variable, this will need |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 282 | * to be updated to remain correct on both old and new CPUs. |
| 283 | */ |
| 284 | .ifne __VIRTUAL_MASK_SHIFT - 47 |
| 285 | .error "virtual address width changed -- SYSRET checks need update" |
| 286 | .endif |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 287 | |
Denys Vlasenko | 17be0ae | 2015-04-21 18:27:29 +0200 | [diff] [blame] | 288 | /* Change top 16 bits to be the sign-extension of 47th bit */ |
| 289 | shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx |
| 290 | sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 291 | |
Denys Vlasenko | 17be0ae | 2015-04-21 18:27:29 +0200 | [diff] [blame] | 292 | /* If this changed %rcx, it was not canonical */ |
| 293 | cmpq %rcx, %r11 |
| 294 | jne opportunistic_sysret_failed |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 295 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 296 | cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */ |
| 297 | jne opportunistic_sysret_failed |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 298 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 299 | movq R11(%rsp), %r11 |
| 300 | cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */ |
| 301 | jne opportunistic_sysret_failed |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 302 | |
| 303 | /* |
Borislav Petkov | 3e03530 | 2016-08-03 19:14:29 +0200 | [diff] [blame] | 304 | * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot |
| 305 | * restore RF properly. If the slowpath sets it for whatever reason, we |
| 306 | * need to restore it correctly. |
| 307 | * |
| 308 | * SYSRET can restore TF, but unlike IRET, restoring TF results in a |
| 309 | * trap from userspace immediately after SYSRET. This would cause an |
| 310 | * infinite loop whenever #DB happens with register state that satisfies |
| 311 | * the opportunistic SYSRET conditions. For example, single-stepping |
| 312 | * this user code: |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 313 | * |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 314 | * movq $stuck_here, %rcx |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 315 | * pushfq |
| 316 | * popq %r11 |
| 317 | * stuck_here: |
| 318 | * |
| 319 | * would never get past 'stuck_here'. |
| 320 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 321 | testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 |
| 322 | jnz opportunistic_sysret_failed |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 323 | |
| 324 | /* nothing to check for RSP */ |
| 325 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 326 | cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */ |
| 327 | jne opportunistic_sysret_failed |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 328 | |
| 329 | /* |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 330 | * We win! This label is here just for ease of understanding |
| 331 | * perf profiles. Nothing jumps here. |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 332 | */ |
| 333 | syscall_return_via_sysret: |
Denys Vlasenko | 17be0ae | 2015-04-21 18:27:29 +0200 | [diff] [blame] | 334 | /* rcx and r11 are already restored (see code above) */ |
| 335 | RESTORE_C_REGS_EXCEPT_RCX_R11 |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 336 | /* |
| 337 | * This opens a window where we have a user CR3, but are |
| 338 | * running in the kernel. This makes using the CS |
| 339 | * register useless for telling whether or not we need to |
| 340 | * switch CR3 in NMIs. Normal interrupts are OK because |
| 341 | * they are off here. |
| 342 | */ |
Richard Fellner | 13be448 | 2017-05-04 14:26:50 +0200 | [diff] [blame] | 343 | SWITCH_USER_CR3 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 344 | movq RSP(%rsp), %rsp |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 345 | USERGS_SYSRET64 |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 346 | |
| 347 | opportunistic_sysret_failed: |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 348 | /* |
| 349 | * This opens a window where we have a user CR3, but are |
| 350 | * running in the kernel. This makes using the CS |
| 351 | * register useless for telling whether or not we need to |
| 352 | * switch CR3 in NMIs. Normal interrupts are OK because |
| 353 | * they are off here. |
| 354 | */ |
Richard Fellner | 13be448 | 2017-05-04 14:26:50 +0200 | [diff] [blame] | 355 | SWITCH_USER_CR3 |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 356 | SWAPGS |
| 357 | jmp restore_c_regs_and_iret |
Ingo Molnar | b2502b4 | 2015-06-08 08:42:03 +0200 | [diff] [blame] | 358 | END(entry_SYSCALL_64) |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 359 | |
Andy Lutomirski | 302f5b2 | 2016-01-28 15:11:25 -0800 | [diff] [blame] | 360 | ENTRY(stub_ptregs_64) |
| 361 | /* |
| 362 | * Syscalls marked as needing ptregs land here. |
Andy Lutomirski | b776508 | 2016-01-31 09:33:26 -0800 | [diff] [blame] | 363 | * If we are on the fast path, we need to save the extra regs, |
| 364 | * which we achieve by trying again on the slow path. If we are on |
| 365 | * the slow path, the extra regs are already saved. |
Andy Lutomirski | 302f5b2 | 2016-01-28 15:11:25 -0800 | [diff] [blame] | 366 | * |
| 367 | * RAX stores a pointer to the C function implementing the syscall. |
Andy Lutomirski | b776508 | 2016-01-31 09:33:26 -0800 | [diff] [blame] | 368 | * IRQs are on. |
Andy Lutomirski | 302f5b2 | 2016-01-28 15:11:25 -0800 | [diff] [blame] | 369 | */ |
| 370 | cmpq $.Lentry_SYSCALL_64_after_fastpath_call, (%rsp) |
| 371 | jne 1f |
| 372 | |
Andy Lutomirski | b776508 | 2016-01-31 09:33:26 -0800 | [diff] [blame] | 373 | /* |
| 374 | * Called from fast path -- disable IRQs again, pop return address |
| 375 | * and jump to slow path |
| 376 | */ |
| 377 | DISABLE_INTERRUPTS(CLBR_NONE) |
| 378 | TRACE_IRQS_OFF |
Andy Lutomirski | 302f5b2 | 2016-01-28 15:11:25 -0800 | [diff] [blame] | 379 | popq %rax |
Andy Lutomirski | b776508 | 2016-01-31 09:33:26 -0800 | [diff] [blame] | 380 | jmp entry_SYSCALL64_slow_path |
Andy Lutomirski | 302f5b2 | 2016-01-28 15:11:25 -0800 | [diff] [blame] | 381 | |
| 382 | 1: |
Borislav Petkov | b3830e8 | 2016-08-01 12:05:02 +0200 | [diff] [blame] | 383 | jmp *%rax /* Called from C */ |
Andy Lutomirski | 302f5b2 | 2016-01-28 15:11:25 -0800 | [diff] [blame] | 384 | END(stub_ptregs_64) |
| 385 | |
| 386 | .macro ptregs_stub func |
| 387 | ENTRY(ptregs_\func) |
| 388 | leaq \func(%rip), %rax |
| 389 | jmp stub_ptregs_64 |
| 390 | END(ptregs_\func) |
| 391 | .endm |
| 392 | |
| 393 | /* Instantiate ptregs_stub for each ptregs-using syscall */ |
| 394 | #define __SYSCALL_64_QUAL_(sym) |
| 395 | #define __SYSCALL_64_QUAL_ptregs(sym) ptregs_stub sym |
| 396 | #define __SYSCALL_64(nr, sym, qual) __SYSCALL_64_QUAL_##qual(sym) |
| 397 | #include <asm/syscalls_64.h> |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 398 | |
Jan Beulich | 7effaa8 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 399 | /* |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 400 | * %rdi: prev task |
| 401 | * %rsi: next task |
| 402 | */ |
| 403 | ENTRY(__switch_to_asm) |
| 404 | /* |
| 405 | * Save callee-saved registers |
| 406 | * This must match the order in inactive_task_frame |
| 407 | */ |
| 408 | pushq %rbp |
| 409 | pushq %rbx |
| 410 | pushq %r12 |
| 411 | pushq %r13 |
| 412 | pushq %r14 |
| 413 | pushq %r15 |
| 414 | |
| 415 | /* switch stack */ |
| 416 | movq %rsp, TASK_threadsp(%rdi) |
| 417 | movq TASK_threadsp(%rsi), %rsp |
| 418 | |
| 419 | #ifdef CONFIG_CC_STACKPROTECTOR |
| 420 | movq TASK_stack_canary(%rsi), %rbx |
| 421 | movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset |
| 422 | #endif |
| 423 | |
| 424 | /* restore callee-saved registers */ |
| 425 | popq %r15 |
| 426 | popq %r14 |
| 427 | popq %r13 |
| 428 | popq %r12 |
| 429 | popq %rbx |
| 430 | popq %rbp |
| 431 | |
| 432 | jmp __switch_to |
| 433 | END(__switch_to_asm) |
| 434 | |
| 435 | /* |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 436 | * A newly forked process directly context switches into this address. |
| 437 | * |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 438 | * rax: prev task we switched from |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 439 | * rbx: kernel thread func (NULL for user thread) |
| 440 | * r12: kernel thread arg |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 441 | */ |
| 442 | ENTRY(ret_from_fork) |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 443 | movq %rax, %rdi |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 444 | call schedule_tail /* rdi: 'prev' task parameter */ |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 445 | |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 446 | testq %rbx, %rbx /* from kernel_thread? */ |
| 447 | jnz 1f /* kernel threads are uncommon */ |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 448 | |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 449 | 2: |
Andy Lutomirski | 24d978b | 2016-01-28 15:11:27 -0800 | [diff] [blame] | 450 | movq %rsp, %rdi |
| 451 | call syscall_return_slowpath /* returns with IRQs disabled */ |
| 452 | TRACE_IRQS_ON /* user mode is traced as IRQS on */ |
Richard Fellner | 13be448 | 2017-05-04 14:26:50 +0200 | [diff] [blame] | 453 | SWITCH_USER_CR3 |
Andy Lutomirski | 24d978b | 2016-01-28 15:11:27 -0800 | [diff] [blame] | 454 | SWAPGS |
| 455 | jmp restore_regs_and_iret |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 456 | |
| 457 | 1: |
| 458 | /* kernel thread */ |
| 459 | movq %r12, %rdi |
| 460 | call *%rbx |
| 461 | /* |
| 462 | * A kernel thread is allowed to return here after successfully |
| 463 | * calling do_execve(). Exit to userspace to complete the execve() |
| 464 | * syscall. |
| 465 | */ |
| 466 | movq $0, RAX(%rsp) |
| 467 | jmp 2b |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 468 | END(ret_from_fork) |
| 469 | |
| 470 | /* |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 471 | * Build the entry stubs with some assembler magic. |
| 472 | * We pack 1 stub into every 8-byte block. |
H. Peter Anvin | 939b787 | 2008-11-11 13:51:52 -0800 | [diff] [blame] | 473 | */ |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 474 | .align 8 |
H. Peter Anvin | 939b787 | 2008-11-11 13:51:52 -0800 | [diff] [blame] | 475 | ENTRY(irq_entries_start) |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 476 | vector=FIRST_EXTERNAL_VECTOR |
| 477 | .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 478 | pushq $(~vector+0x80) /* Note: always in signed byte range */ |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 479 | vector=vector+1 |
| 480 | jmp common_interrupt |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 481 | .align 8 |
| 482 | .endr |
H. Peter Anvin | 939b787 | 2008-11-11 13:51:52 -0800 | [diff] [blame] | 483 | END(irq_entries_start) |
| 484 | |
Alexander van Heukelum | d99015b | 2008-11-19 01:18:11 +0100 | [diff] [blame] | 485 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 486 | * Interrupt entry/exit. |
| 487 | * |
| 488 | * Interrupt entry points save only callee clobbered registers in fast path. |
Alexander van Heukelum | d99015b | 2008-11-19 01:18:11 +0100 | [diff] [blame] | 489 | * |
| 490 | * Entry runs with interrupts off. |
| 491 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 | |
Alexander van Heukelum | 722024d | 2008-11-13 13:50:20 +0100 | [diff] [blame] | 493 | /* 0(%rsp): ~(interrupt number) */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 494 | .macro interrupt func |
Denys Vlasenko | f6f6468 | 2015-01-08 17:25:15 +0100 | [diff] [blame] | 495 | cld |
Andy Lutomirski | ff46759 | 2015-07-03 12:44:29 -0700 | [diff] [blame] | 496 | ALLOC_PT_GPREGS_ON_STACK |
| 497 | SAVE_C_REGS |
| 498 | SAVE_EXTRA_REGS |
Denys Vlasenko | f6f6468 | 2015-01-08 17:25:15 +0100 | [diff] [blame] | 499 | |
Andy Lutomirski | ff46759 | 2015-07-03 12:44:29 -0700 | [diff] [blame] | 500 | testb $3, CS(%rsp) |
Denys Vlasenko | dde74f2 | 2015-04-27 15:21:51 +0200 | [diff] [blame] | 501 | jz 1f |
Andy Lutomirski | 02bc776 | 2015-07-03 12:44:31 -0700 | [diff] [blame] | 502 | |
| 503 | /* |
| 504 | * IRQ from user mode. Switch to kernel gsbase and inform context |
| 505 | * tracking that we're in kernel mode. |
| 506 | */ |
Denys Vlasenko | f6f6468 | 2015-01-08 17:25:15 +0100 | [diff] [blame] | 507 | SWAPGS |
Richard Fellner | 13be448 | 2017-05-04 14:26:50 +0200 | [diff] [blame] | 508 | SWITCH_KERNEL_CR3 |
Andy Lutomirski | f107505 | 2015-11-12 12:59:00 -0800 | [diff] [blame] | 509 | |
| 510 | /* |
| 511 | * We need to tell lockdep that IRQs are off. We can't do this until |
| 512 | * we fix gsbase, and we should do it before enter_from_user_mode |
| 513 | * (which can take locks). Since TRACE_IRQS_OFF idempotent, |
| 514 | * the simplest way to handle it is to just call it twice if |
| 515 | * we enter from user mode. There's no reason to optimize this since |
| 516 | * TRACE_IRQS_OFF is a no-op if lockdep is off. |
| 517 | */ |
| 518 | TRACE_IRQS_OFF |
| 519 | |
Andy Lutomirski | 478dc89 | 2015-11-12 12:59:04 -0800 | [diff] [blame] | 520 | CALL_enter_from_user_mode |
Andy Lutomirski | 02bc776 | 2015-07-03 12:44:31 -0700 | [diff] [blame] | 521 | |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 522 | 1: |
Denys Vlasenko | f6f6468 | 2015-01-08 17:25:15 +0100 | [diff] [blame] | 523 | /* |
Denys Vlasenko | e90e147 | 2015-02-26 14:40:28 -0800 | [diff] [blame] | 524 | * Save previous stack pointer, optionally switch to interrupt stack. |
Denys Vlasenko | f6f6468 | 2015-01-08 17:25:15 +0100 | [diff] [blame] | 525 | * irq_count is used to check if a CPU is already on an interrupt stack |
| 526 | * or not. While this is essentially redundant with preempt_count it is |
| 527 | * a little cheaper to use a separate counter in the PDA (short of |
| 528 | * moving irq_enter into assembly, which would be too much work) |
| 529 | */ |
Andy Lutomirski | a586f98 | 2015-07-03 12:44:30 -0700 | [diff] [blame] | 530 | movq %rsp, %rdi |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 531 | incl PER_CPU_VAR(irq_count) |
| 532 | cmovzq PER_CPU_VAR(irq_stack_ptr), %rsp |
Andy Lutomirski | a586f98 | 2015-07-03 12:44:30 -0700 | [diff] [blame] | 533 | pushq %rdi |
Denys Vlasenko | f6f6468 | 2015-01-08 17:25:15 +0100 | [diff] [blame] | 534 | /* We entered an interrupt context - irqs are off: */ |
| 535 | TRACE_IRQS_OFF |
| 536 | |
Andy Lutomirski | a586f98 | 2015-07-03 12:44:30 -0700 | [diff] [blame] | 537 | call \func /* rdi points to pt_regs */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 538 | .endm |
| 539 | |
Alexander van Heukelum | 722024d | 2008-11-13 13:50:20 +0100 | [diff] [blame] | 540 | /* |
| 541 | * The interrupt stubs push (~vector+0x80) onto the stack and |
| 542 | * then jump to common_interrupt. |
| 543 | */ |
H. Peter Anvin | 939b787 | 2008-11-11 13:51:52 -0800 | [diff] [blame] | 544 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
| 545 | common_interrupt: |
Jan Beulich | ee4eb87 | 2012-11-02 11:18:39 +0000 | [diff] [blame] | 546 | ASM_CLAC |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 547 | addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 548 | interrupt do_IRQ |
Denys Vlasenko | 34061f1 | 2015-03-23 14:03:59 +0100 | [diff] [blame] | 549 | /* 0(%rsp): old RSP */ |
Jan Beulich | 7effaa8 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 550 | ret_from_intr: |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 551 | DISABLE_INTERRUPTS(CLBR_NONE) |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 552 | TRACE_IRQS_OFF |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 553 | decl PER_CPU_VAR(irq_count) |
Frederic Weisbecker | 625dbc3 | 2011-01-06 15:22:47 +0100 | [diff] [blame] | 554 | |
Frederic Weisbecker | a2bbe75 | 2011-07-02 16:52:45 +0200 | [diff] [blame] | 555 | /* Restore saved previous stack */ |
Andy Lutomirski | ff46759 | 2015-07-03 12:44:29 -0700 | [diff] [blame] | 556 | popq %rsp |
Frederic Weisbecker | 625dbc3 | 2011-01-06 15:22:47 +0100 | [diff] [blame] | 557 | |
Denys Vlasenko | 03335e9 | 2015-04-27 15:21:52 +0200 | [diff] [blame] | 558 | testb $3, CS(%rsp) |
Denys Vlasenko | dde74f2 | 2015-04-27 15:21:51 +0200 | [diff] [blame] | 559 | jz retint_kernel |
Andy Lutomirski | 02bc776 | 2015-07-03 12:44:31 -0700 | [diff] [blame] | 560 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 561 | /* Interrupt came from user space */ |
Andy Lutomirski | 02bc776 | 2015-07-03 12:44:31 -0700 | [diff] [blame] | 562 | GLOBAL(retint_user) |
| 563 | mov %rsp,%rdi |
| 564 | call prepare_exit_to_usermode |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 565 | TRACE_IRQS_IRETQ |
Richard Fellner | 13be448 | 2017-05-04 14:26:50 +0200 | [diff] [blame] | 566 | SWITCH_USER_CR3 |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 567 | SWAPGS |
Andy Lutomirski | ff46759 | 2015-07-03 12:44:29 -0700 | [diff] [blame] | 568 | jmp restore_regs_and_iret |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 569 | |
Denys Vlasenko | 627276c | 2015-03-30 20:09:31 +0200 | [diff] [blame] | 570 | /* Returning to kernel space */ |
Denys Vlasenko | 6ba71b7 | 2015-03-31 19:00:05 +0200 | [diff] [blame] | 571 | retint_kernel: |
Denys Vlasenko | 627276c | 2015-03-30 20:09:31 +0200 | [diff] [blame] | 572 | #ifdef CONFIG_PREEMPT |
| 573 | /* Interrupts are off */ |
| 574 | /* Check if we need preemption */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 575 | bt $9, EFLAGS(%rsp) /* were interrupts off? */ |
Denys Vlasenko | 6ba71b7 | 2015-03-31 19:00:05 +0200 | [diff] [blame] | 576 | jnc 1f |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 577 | 0: cmpl $0, PER_CPU_VAR(__preempt_count) |
Denys Vlasenko | 36acef2 | 2015-03-31 19:00:07 +0200 | [diff] [blame] | 578 | jnz 1f |
Denys Vlasenko | 627276c | 2015-03-30 20:09:31 +0200 | [diff] [blame] | 579 | call preempt_schedule_irq |
Denys Vlasenko | 36acef2 | 2015-03-31 19:00:07 +0200 | [diff] [blame] | 580 | jmp 0b |
Denys Vlasenko | 6ba71b7 | 2015-03-31 19:00:05 +0200 | [diff] [blame] | 581 | 1: |
Denys Vlasenko | 627276c | 2015-03-30 20:09:31 +0200 | [diff] [blame] | 582 | #endif |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 583 | /* |
| 584 | * The iretq could re-enable interrupts: |
| 585 | */ |
| 586 | TRACE_IRQS_IRETQ |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 587 | |
| 588 | /* |
| 589 | * At this label, code paths which return to kernel and to user, |
| 590 | * which come from interrupts/exception and from syscalls, merge. |
| 591 | */ |
Andy Lutomirski | ee08c6b | 2015-10-05 17:48:09 -0700 | [diff] [blame] | 592 | GLOBAL(restore_regs_and_iret) |
Andy Lutomirski | ff46759 | 2015-07-03 12:44:29 -0700 | [diff] [blame] | 593 | RESTORE_EXTRA_REGS |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 594 | restore_c_regs_and_iret: |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 595 | RESTORE_C_REGS |
| 596 | REMOVE_PT_GPREGS_FROM_STACK 8 |
Andy Lutomirski | 7209a75 | 2014-07-23 08:34:11 -0700 | [diff] [blame] | 597 | INTERRUPT_RETURN |
| 598 | |
| 599 | ENTRY(native_iret) |
H. Peter Anvin | 3891a04 | 2014-04-29 16:46:09 -0700 | [diff] [blame] | 600 | /* |
| 601 | * Are we returning to a stack segment from the LDT? Note: in |
| 602 | * 64-bit mode SS:RSP on the exception stack is always valid. |
| 603 | */ |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 604 | #ifdef CONFIG_X86_ESPFIX64 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 605 | testb $4, (SS-RIP)(%rsp) |
| 606 | jnz native_irq_return_ldt |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 607 | #endif |
H. Peter Anvin | 3891a04 | 2014-04-29 16:46:09 -0700 | [diff] [blame] | 608 | |
Andy Lutomirski | af726f2 | 2014-11-22 18:00:31 -0800 | [diff] [blame] | 609 | .global native_irq_return_iret |
Andy Lutomirski | 7209a75 | 2014-07-23 08:34:11 -0700 | [diff] [blame] | 610 | native_irq_return_iret: |
Andy Lutomirski | b645af2 | 2014-11-22 18:00:33 -0800 | [diff] [blame] | 611 | /* |
| 612 | * This may fault. Non-paranoid faults on return to userspace are |
| 613 | * handled by fixup_bad_iret. These include #SS, #GP, and #NP. |
| 614 | * Double-faults due to espfix64 are handled in do_double_fault. |
| 615 | * Other faults here are fatal. |
| 616 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 617 | iretq |
Ingo Molnar | 3701d863 | 2008-02-09 23:24:08 +0100 | [diff] [blame] | 618 | |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 619 | #ifdef CONFIG_X86_ESPFIX64 |
Andy Lutomirski | 7209a75 | 2014-07-23 08:34:11 -0700 | [diff] [blame] | 620 | native_irq_return_ldt: |
Andy Lutomirski | 85063fa | 2016-09-12 15:05:51 -0700 | [diff] [blame] | 621 | /* |
| 622 | * We are running with user GSBASE. All GPRs contain their user |
| 623 | * values. We have a percpu ESPFIX stack that is eight slots |
| 624 | * long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom |
| 625 | * of the ESPFIX stack. |
| 626 | * |
| 627 | * We clobber RAX and RDI in this code. We stash RDI on the |
| 628 | * normal stack and RAX on the ESPFIX stack. |
| 629 | * |
| 630 | * The ESPFIX stack layout we set up looks like this: |
| 631 | * |
| 632 | * --- top of ESPFIX stack --- |
| 633 | * SS |
| 634 | * RSP |
| 635 | * RFLAGS |
| 636 | * CS |
| 637 | * RIP <-- RSP points here when we're done |
| 638 | * RAX <-- espfix_waddr points here |
| 639 | * --- bottom of ESPFIX stack --- |
| 640 | */ |
| 641 | |
| 642 | pushq %rdi /* Stash user RDI */ |
H. Peter Anvin | 3891a04 | 2014-04-29 16:46:09 -0700 | [diff] [blame] | 643 | SWAPGS |
Richard Fellner | 13be448 | 2017-05-04 14:26:50 +0200 | [diff] [blame] | 644 | SWITCH_KERNEL_CR3 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 645 | movq PER_CPU_VAR(espfix_waddr), %rdi |
Andy Lutomirski | 85063fa | 2016-09-12 15:05:51 -0700 | [diff] [blame] | 646 | movq %rax, (0*8)(%rdi) /* user RAX */ |
| 647 | movq (1*8)(%rsp), %rax /* user RIP */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 648 | movq %rax, (1*8)(%rdi) |
Andy Lutomirski | 85063fa | 2016-09-12 15:05:51 -0700 | [diff] [blame] | 649 | movq (2*8)(%rsp), %rax /* user CS */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 650 | movq %rax, (2*8)(%rdi) |
Andy Lutomirski | 85063fa | 2016-09-12 15:05:51 -0700 | [diff] [blame] | 651 | movq (3*8)(%rsp), %rax /* user RFLAGS */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 652 | movq %rax, (3*8)(%rdi) |
Andy Lutomirski | 85063fa | 2016-09-12 15:05:51 -0700 | [diff] [blame] | 653 | movq (5*8)(%rsp), %rax /* user SS */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 654 | movq %rax, (5*8)(%rdi) |
Andy Lutomirski | 85063fa | 2016-09-12 15:05:51 -0700 | [diff] [blame] | 655 | movq (4*8)(%rsp), %rax /* user RSP */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 656 | movq %rax, (4*8)(%rdi) |
Andy Lutomirski | 85063fa | 2016-09-12 15:05:51 -0700 | [diff] [blame] | 657 | /* Now RAX == RSP. */ |
| 658 | |
| 659 | andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */ |
| 660 | popq %rdi /* Restore user RDI */ |
| 661 | |
| 662 | /* |
| 663 | * espfix_stack[31:16] == 0. The page tables are set up such that |
| 664 | * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of |
| 665 | * espfix_waddr for any X. That is, there are 65536 RO aliases of |
| 666 | * the same page. Set up RSP so that RSP[31:16] contains the |
| 667 | * respective 16 bits of the /userspace/ RSP and RSP nonetheless |
| 668 | * still points to an RO alias of the ESPFIX stack. |
| 669 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 670 | orq PER_CPU_VAR(espfix_stack), %rax |
Richard Fellner | 13be448 | 2017-05-04 14:26:50 +0200 | [diff] [blame] | 671 | SWITCH_USER_CR3 |
H. Peter Anvin | 3891a04 | 2014-04-29 16:46:09 -0700 | [diff] [blame] | 672 | SWAPGS |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 673 | movq %rax, %rsp |
Andy Lutomirski | 85063fa | 2016-09-12 15:05:51 -0700 | [diff] [blame] | 674 | |
| 675 | /* |
| 676 | * At this point, we cannot write to the stack any more, but we can |
| 677 | * still read. |
| 678 | */ |
| 679 | popq %rax /* Restore user RAX */ |
| 680 | |
| 681 | /* |
| 682 | * RSP now points to an ordinary IRET frame, except that the page |
| 683 | * is read-only and RSP[31:16] are preloaded with the userspace |
| 684 | * values. We can now IRET back to userspace. |
| 685 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 686 | jmp native_irq_return_iret |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 687 | #endif |
Jan Beulich | 4b787e0 | 2006-06-26 13:56:55 +0200 | [diff] [blame] | 688 | END(common_interrupt) |
H. Peter Anvin | 3891a04 | 2014-04-29 16:46:09 -0700 | [diff] [blame] | 689 | |
Masami Hiramatsu | 8222d71 | 2009-08-27 13:23:25 -0400 | [diff] [blame] | 690 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 691 | * APIC interrupts. |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 692 | */ |
Seiji Aguchi | cf910e8 | 2013-06-20 11:46:53 -0400 | [diff] [blame] | 693 | .macro apicinterrupt3 num sym do_sym |
Alexander van Heukelum | 322648d | 2008-11-23 10:08:28 +0100 | [diff] [blame] | 694 | ENTRY(\sym) |
Jan Beulich | ee4eb87 | 2012-11-02 11:18:39 +0000 | [diff] [blame] | 695 | ASM_CLAC |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 696 | pushq $~(\num) |
Jan Beulich | 39e9543 | 2011-11-29 11:03:46 +0000 | [diff] [blame] | 697 | .Lcommon_\sym: |
Alexander van Heukelum | 322648d | 2008-11-23 10:08:28 +0100 | [diff] [blame] | 698 | interrupt \do_sym |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 699 | jmp ret_from_intr |
Alexander van Heukelum | 322648d | 2008-11-23 10:08:28 +0100 | [diff] [blame] | 700 | END(\sym) |
| 701 | .endm |
Jacob Shin | 89b831e | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 702 | |
Seiji Aguchi | cf910e8 | 2013-06-20 11:46:53 -0400 | [diff] [blame] | 703 | #ifdef CONFIG_TRACING |
| 704 | #define trace(sym) trace_##sym |
| 705 | #define smp_trace(sym) smp_trace_##sym |
| 706 | |
| 707 | .macro trace_apicinterrupt num sym |
| 708 | apicinterrupt3 \num trace(\sym) smp_trace(\sym) |
| 709 | .endm |
| 710 | #else |
| 711 | .macro trace_apicinterrupt num sym do_sym |
| 712 | .endm |
| 713 | #endif |
| 714 | |
Alexander Potapenko | 469f002 | 2016-07-15 11:42:43 +0200 | [diff] [blame] | 715 | /* Make sure APIC interrupt handlers end up in the irqentry section: */ |
| 716 | #if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN) |
| 717 | # define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax" |
| 718 | # define POP_SECTION_IRQENTRY .popsection |
| 719 | #else |
| 720 | # define PUSH_SECTION_IRQENTRY |
| 721 | # define POP_SECTION_IRQENTRY |
| 722 | #endif |
| 723 | |
Seiji Aguchi | cf910e8 | 2013-06-20 11:46:53 -0400 | [diff] [blame] | 724 | .macro apicinterrupt num sym do_sym |
Alexander Potapenko | 469f002 | 2016-07-15 11:42:43 +0200 | [diff] [blame] | 725 | PUSH_SECTION_IRQENTRY |
Seiji Aguchi | cf910e8 | 2013-06-20 11:46:53 -0400 | [diff] [blame] | 726 | apicinterrupt3 \num \sym \do_sym |
| 727 | trace_apicinterrupt \num \sym |
Alexander Potapenko | 469f002 | 2016-07-15 11:42:43 +0200 | [diff] [blame] | 728 | POP_SECTION_IRQENTRY |
Seiji Aguchi | cf910e8 | 2013-06-20 11:46:53 -0400 | [diff] [blame] | 729 | .endm |
| 730 | |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 731 | #ifdef CONFIG_SMP |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 732 | apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt |
| 733 | apicinterrupt3 REBOOT_VECTOR reboot_interrupt smp_reboot_interrupt |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 734 | #endif |
| 735 | |
Nick Piggin | 03b4863 | 2009-01-20 04:36:04 +0100 | [diff] [blame] | 736 | #ifdef CONFIG_X86_UV |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 737 | apicinterrupt3 UV_BAU_MESSAGE uv_bau_message_intr1 uv_bau_message_interrupt |
Nick Piggin | 03b4863 | 2009-01-20 04:36:04 +0100 | [diff] [blame] | 738 | #endif |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 739 | |
| 740 | apicinterrupt LOCAL_TIMER_VECTOR apic_timer_interrupt smp_apic_timer_interrupt |
| 741 | apicinterrupt X86_PLATFORM_IPI_VECTOR x86_platform_ipi smp_x86_platform_ipi |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 742 | |
Yang Zhang | d78f266 | 2013-04-11 19:25:11 +0800 | [diff] [blame] | 743 | #ifdef CONFIG_HAVE_KVM |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 744 | apicinterrupt3 POSTED_INTR_VECTOR kvm_posted_intr_ipi smp_kvm_posted_intr_ipi |
| 745 | apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR kvm_posted_intr_wakeup_ipi smp_kvm_posted_intr_wakeup_ipi |
Yang Zhang | d78f266 | 2013-04-11 19:25:11 +0800 | [diff] [blame] | 746 | #endif |
| 747 | |
Seiji Aguchi | 33e5ff6 | 2013-06-22 07:33:30 -0400 | [diff] [blame] | 748 | #ifdef CONFIG_X86_MCE_THRESHOLD |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 749 | apicinterrupt THRESHOLD_APIC_VECTOR threshold_interrupt smp_threshold_interrupt |
Seiji Aguchi | 33e5ff6 | 2013-06-22 07:33:30 -0400 | [diff] [blame] | 750 | #endif |
| 751 | |
Aravind Gopalakrishnan | 24fd78a | 2015-05-06 06:58:56 -0500 | [diff] [blame] | 752 | #ifdef CONFIG_X86_MCE_AMD |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 753 | apicinterrupt DEFERRED_ERROR_VECTOR deferred_error_interrupt smp_deferred_error_interrupt |
Aravind Gopalakrishnan | 24fd78a | 2015-05-06 06:58:56 -0500 | [diff] [blame] | 754 | #endif |
| 755 | |
Seiji Aguchi | 33e5ff6 | 2013-06-22 07:33:30 -0400 | [diff] [blame] | 756 | #ifdef CONFIG_X86_THERMAL_VECTOR |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 757 | apicinterrupt THERMAL_APIC_VECTOR thermal_interrupt smp_thermal_interrupt |
Seiji Aguchi | 33e5ff6 | 2013-06-22 07:33:30 -0400 | [diff] [blame] | 758 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 759 | |
Alexander van Heukelum | 322648d | 2008-11-23 10:08:28 +0100 | [diff] [blame] | 760 | #ifdef CONFIG_SMP |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 761 | apicinterrupt CALL_FUNCTION_SINGLE_VECTOR call_function_single_interrupt smp_call_function_single_interrupt |
| 762 | apicinterrupt CALL_FUNCTION_VECTOR call_function_interrupt smp_call_function_interrupt |
| 763 | apicinterrupt RESCHEDULE_VECTOR reschedule_interrupt smp_reschedule_interrupt |
Alexander van Heukelum | 322648d | 2008-11-23 10:08:28 +0100 | [diff] [blame] | 764 | #endif |
| 765 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 766 | apicinterrupt ERROR_APIC_VECTOR error_interrupt smp_error_interrupt |
| 767 | apicinterrupt SPURIOUS_APIC_VECTOR spurious_interrupt smp_spurious_interrupt |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 768 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 769 | #ifdef CONFIG_IRQ_WORK |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 770 | apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 771 | #endif |
| 772 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 773 | /* |
| 774 | * Exception entry points. |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 775 | */ |
Andy Lutomirski | 9b47668 | 2015-03-05 19:19:07 -0800 | [diff] [blame] | 776 | #define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8) |
Andy Lutomirski | 577ed45 | 2014-05-21 15:07:09 -0700 | [diff] [blame] | 777 | |
| 778 | .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 |
Alexander van Heukelum | 322648d | 2008-11-23 10:08:28 +0100 | [diff] [blame] | 779 | ENTRY(\sym) |
Andy Lutomirski | 577ed45 | 2014-05-21 15:07:09 -0700 | [diff] [blame] | 780 | /* Sanity check */ |
| 781 | .if \shift_ist != -1 && \paranoid == 0 |
| 782 | .error "using shift_ist requires paranoid=1" |
| 783 | .endif |
| 784 | |
Jan Beulich | ee4eb87 | 2012-11-02 11:18:39 +0000 | [diff] [blame] | 785 | ASM_CLAC |
Alexander van Heukelum | b8b1d08 | 2008-11-21 16:44:28 +0100 | [diff] [blame] | 786 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 787 | |
| 788 | .ifeq \has_error_code |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 789 | pushq $-1 /* ORIG_RAX: no syscall to restart */ |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 790 | .endif |
| 791 | |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 792 | ALLOC_PT_GPREGS_ON_STACK |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 793 | |
| 794 | .if \paranoid |
Andy Lutomirski | 48e08d0 | 2014-11-11 12:49:41 -0800 | [diff] [blame] | 795 | .if \paranoid == 1 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 796 | testb $3, CS(%rsp) /* If coming from userspace, switch stacks */ |
| 797 | jnz 1f |
Andy Lutomirski | 48e08d0 | 2014-11-11 12:49:41 -0800 | [diff] [blame] | 798 | .endif |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 799 | call paranoid_entry |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 800 | .else |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 801 | call error_entry |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 802 | .endif |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 803 | /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */ |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 804 | |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 805 | .if \paranoid |
Andy Lutomirski | 577ed45 | 2014-05-21 15:07:09 -0700 | [diff] [blame] | 806 | .if \shift_ist != -1 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 807 | TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */ |
Andy Lutomirski | 577ed45 | 2014-05-21 15:07:09 -0700 | [diff] [blame] | 808 | .else |
Alexander van Heukelum | b8b1d08 | 2008-11-21 16:44:28 +0100 | [diff] [blame] | 809 | TRACE_IRQS_OFF |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 810 | .endif |
Andy Lutomirski | 577ed45 | 2014-05-21 15:07:09 -0700 | [diff] [blame] | 811 | .endif |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 812 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 813 | movq %rsp, %rdi /* pt_regs pointer */ |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 814 | |
| 815 | .if \has_error_code |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 816 | movq ORIG_RAX(%rsp), %rsi /* get error code */ |
| 817 | movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 818 | .else |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 819 | xorl %esi, %esi /* no error code */ |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 820 | .endif |
| 821 | |
Andy Lutomirski | 577ed45 | 2014-05-21 15:07:09 -0700 | [diff] [blame] | 822 | .if \shift_ist != -1 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 823 | subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) |
Andy Lutomirski | 577ed45 | 2014-05-21 15:07:09 -0700 | [diff] [blame] | 824 | .endif |
| 825 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 826 | call \do_sym |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 827 | |
Andy Lutomirski | 577ed45 | 2014-05-21 15:07:09 -0700 | [diff] [blame] | 828 | .if \shift_ist != -1 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 829 | addq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) |
Andy Lutomirski | 577ed45 | 2014-05-21 15:07:09 -0700 | [diff] [blame] | 830 | .endif |
| 831 | |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 832 | /* these procedures expect "no swapgs" flag in ebx */ |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 833 | .if \paranoid |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 834 | jmp paranoid_exit |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 835 | .else |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 836 | jmp error_exit |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 837 | .endif |
| 838 | |
Andy Lutomirski | 48e08d0 | 2014-11-11 12:49:41 -0800 | [diff] [blame] | 839 | .if \paranoid == 1 |
Andy Lutomirski | 48e08d0 | 2014-11-11 12:49:41 -0800 | [diff] [blame] | 840 | /* |
| 841 | * Paranoid entry from userspace. Switch stacks and treat it |
| 842 | * as a normal entry. This means that paranoid handlers |
| 843 | * run in real process context if user_mode(regs). |
| 844 | */ |
| 845 | 1: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 846 | call error_entry |
Andy Lutomirski | 48e08d0 | 2014-11-11 12:49:41 -0800 | [diff] [blame] | 847 | |
Andy Lutomirski | 48e08d0 | 2014-11-11 12:49:41 -0800 | [diff] [blame] | 848 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 849 | movq %rsp, %rdi /* pt_regs pointer */ |
| 850 | call sync_regs |
| 851 | movq %rax, %rsp /* switch stack */ |
Andy Lutomirski | 48e08d0 | 2014-11-11 12:49:41 -0800 | [diff] [blame] | 852 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 853 | movq %rsp, %rdi /* pt_regs pointer */ |
Andy Lutomirski | 48e08d0 | 2014-11-11 12:49:41 -0800 | [diff] [blame] | 854 | |
| 855 | .if \has_error_code |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 856 | movq ORIG_RAX(%rsp), %rsi /* get error code */ |
| 857 | movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ |
Andy Lutomirski | 48e08d0 | 2014-11-11 12:49:41 -0800 | [diff] [blame] | 858 | .else |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 859 | xorl %esi, %esi /* no error code */ |
Andy Lutomirski | 48e08d0 | 2014-11-11 12:49:41 -0800 | [diff] [blame] | 860 | .endif |
| 861 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 862 | call \do_sym |
Andy Lutomirski | 48e08d0 | 2014-11-11 12:49:41 -0800 | [diff] [blame] | 863 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 864 | jmp error_exit /* %ebx: no swapgs flag */ |
Andy Lutomirski | 48e08d0 | 2014-11-11 12:49:41 -0800 | [diff] [blame] | 865 | .endif |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 866 | END(\sym) |
Alexander van Heukelum | 322648d | 2008-11-23 10:08:28 +0100 | [diff] [blame] | 867 | .endm |
Alexander van Heukelum | b8b1d08 | 2008-11-21 16:44:28 +0100 | [diff] [blame] | 868 | |
Seiji Aguchi | 25c74b1 | 2013-10-30 16:37:00 -0400 | [diff] [blame] | 869 | #ifdef CONFIG_TRACING |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 870 | .macro trace_idtentry sym do_sym has_error_code:req |
| 871 | idtentry trace(\sym) trace(\do_sym) has_error_code=\has_error_code |
| 872 | idtentry \sym \do_sym has_error_code=\has_error_code |
Seiji Aguchi | 25c74b1 | 2013-10-30 16:37:00 -0400 | [diff] [blame] | 873 | .endm |
| 874 | #else |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 875 | .macro trace_idtentry sym do_sym has_error_code:req |
| 876 | idtentry \sym \do_sym has_error_code=\has_error_code |
Seiji Aguchi | 25c74b1 | 2013-10-30 16:37:00 -0400 | [diff] [blame] | 877 | .endm |
| 878 | #endif |
| 879 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 880 | idtentry divide_error do_divide_error has_error_code=0 |
| 881 | idtentry overflow do_overflow has_error_code=0 |
| 882 | idtentry bounds do_bounds has_error_code=0 |
| 883 | idtentry invalid_op do_invalid_op has_error_code=0 |
| 884 | idtentry device_not_available do_device_not_available has_error_code=0 |
| 885 | idtentry double_fault do_double_fault has_error_code=1 paranoid=2 |
| 886 | idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0 |
| 887 | idtentry invalid_TSS do_invalid_TSS has_error_code=1 |
| 888 | idtentry segment_not_present do_segment_not_present has_error_code=1 |
| 889 | idtentry spurious_interrupt_bug do_spurious_interrupt_bug has_error_code=0 |
| 890 | idtentry coprocessor_error do_coprocessor_error has_error_code=0 |
| 891 | idtentry alignment_check do_alignment_check has_error_code=1 |
| 892 | idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0 |
Andy Lutomirski | 5cec93c | 2011-06-05 13:50:24 -0400 | [diff] [blame] | 893 | |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 894 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 895 | /* |
| 896 | * Reload gs selector with exception handling |
| 897 | * edi: new selector |
| 898 | */ |
Jeremy Fitzhardinge | 9f9d489 | 2008-06-25 00:19:32 -0400 | [diff] [blame] | 899 | ENTRY(native_load_gs_index) |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 900 | pushfq |
Jeremy Fitzhardinge | b8aa287 | 2009-01-28 14:35:03 -0800 | [diff] [blame] | 901 | DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) |
Cyrill Gorcunov | 9f1e87e | 2008-11-27 21:10:08 +0300 | [diff] [blame] | 902 | SWAPGS |
Borislav Petkov | 42c748bb | 2016-04-07 17:31:50 -0700 | [diff] [blame] | 903 | .Lgs_change: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 904 | movl %edi, %gs |
Borislav Petkov | 96e5d28 | 2016-04-07 17:31:49 -0700 | [diff] [blame] | 905 | 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 906 | SWAPGS |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 907 | popfq |
Cyrill Gorcunov | 9f1e87e | 2008-11-27 21:10:08 +0300 | [diff] [blame] | 908 | ret |
Alexander van Heukelum | 6efdcfa | 2008-11-23 10:15:32 +0100 | [diff] [blame] | 909 | END(native_load_gs_index) |
Al Viro | 784d569 | 2016-01-11 11:04:34 -0500 | [diff] [blame] | 910 | EXPORT_SYMBOL(native_load_gs_index) |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 911 | |
Borislav Petkov | 42c748bb | 2016-04-07 17:31:50 -0700 | [diff] [blame] | 912 | _ASM_EXTABLE(.Lgs_change, bad_gs) |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 913 | .section .fixup, "ax" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 914 | /* running with kernelgs */ |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 915 | bad_gs: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 916 | SWAPGS /* switch back to user gs */ |
Andy Lutomirski | b038c84 | 2016-04-26 12:23:27 -0700 | [diff] [blame] | 917 | .macro ZAP_GS |
| 918 | /* This can't be a string because the preprocessor needs to see it. */ |
| 919 | movl $__USER_DS, %eax |
| 920 | movl %eax, %gs |
| 921 | .endm |
| 922 | ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 923 | xorl %eax, %eax |
| 924 | movl %eax, %gs |
| 925 | jmp 2b |
Cyrill Gorcunov | 9f1e87e | 2008-11-27 21:10:08 +0300 | [diff] [blame] | 926 | .previous |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 927 | |
Andi Kleen | 2699500 | 2006-08-02 22:37:28 +0200 | [diff] [blame] | 928 | /* Call softirq on interrupt stack. Interrupts are off. */ |
Frederic Weisbecker | 7d65f4a | 2013-09-05 15:49:45 +0200 | [diff] [blame] | 929 | ENTRY(do_softirq_own_stack) |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 930 | pushq %rbp |
| 931 | mov %rsp, %rbp |
| 932 | incl PER_CPU_VAR(irq_count) |
| 933 | cmove PER_CPU_VAR(irq_stack_ptr), %rsp |
| 934 | push %rbp /* frame pointer backlink */ |
| 935 | call __do_softirq |
Andi Kleen | 2699500 | 2006-08-02 22:37:28 +0200 | [diff] [blame] | 936 | leaveq |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 937 | decl PER_CPU_VAR(irq_count) |
Andi Kleen | ed6b676 | 2005-07-28 21:15:49 -0700 | [diff] [blame] | 938 | ret |
Frederic Weisbecker | 7d65f4a | 2013-09-05 15:49:45 +0200 | [diff] [blame] | 939 | END(do_softirq_own_stack) |
Andi Kleen | 75154f4 | 2007-06-23 02:29:25 +0200 | [diff] [blame] | 940 | |
Jeremy Fitzhardinge | 3d75e1b | 2008-07-08 15:06:49 -0700 | [diff] [blame] | 941 | #ifdef CONFIG_XEN |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 942 | idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0 |
Jeremy Fitzhardinge | 3d75e1b | 2008-07-08 15:06:49 -0700 | [diff] [blame] | 943 | |
| 944 | /* |
Cyrill Gorcunov | 9f1e87e | 2008-11-27 21:10:08 +0300 | [diff] [blame] | 945 | * A note on the "critical region" in our callback handler. |
| 946 | * We want to avoid stacking callback handlers due to events occurring |
| 947 | * during handling of the last event. To do this, we keep events disabled |
| 948 | * until we've done all processing. HOWEVER, we must enable events before |
| 949 | * popping the stack frame (can't be done atomically) and so it would still |
| 950 | * be possible to get enough handler activations to overflow the stack. |
| 951 | * Although unlikely, bugs of that kind are hard to track down, so we'd |
| 952 | * like to avoid the possibility. |
| 953 | * So, on entry to the handler we detect whether we interrupted an |
| 954 | * existing activation in its critical region -- if so, we pop the current |
| 955 | * activation and restart the handler using the previous one. |
| 956 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 957 | ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */ |
| 958 | |
Cyrill Gorcunov | 9f1e87e | 2008-11-27 21:10:08 +0300 | [diff] [blame] | 959 | /* |
| 960 | * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will |
| 961 | * see the correct pointer to the pt_regs |
| 962 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 963 | movq %rdi, %rsp /* we don't return, adjust the stack frame */ |
| 964 | 11: incl PER_CPU_VAR(irq_count) |
| 965 | movq %rsp, %rbp |
| 966 | cmovzq PER_CPU_VAR(irq_stack_ptr), %rsp |
| 967 | pushq %rbp /* frame pointer backlink */ |
| 968 | call xen_evtchn_do_upcall |
| 969 | popq %rsp |
| 970 | decl PER_CPU_VAR(irq_count) |
David Vrabel | fdfd811 | 2015-02-19 15:23:17 +0000 | [diff] [blame] | 971 | #ifndef CONFIG_PREEMPT |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 972 | call xen_maybe_preempt_hcall |
David Vrabel | fdfd811 | 2015-02-19 15:23:17 +0000 | [diff] [blame] | 973 | #endif |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 974 | jmp error_exit |
Alexander van Heukelum | 371c394 | 2011-03-11 21:59:38 +0100 | [diff] [blame] | 975 | END(xen_do_hypervisor_callback) |
Jeremy Fitzhardinge | 3d75e1b | 2008-07-08 15:06:49 -0700 | [diff] [blame] | 976 | |
| 977 | /* |
Cyrill Gorcunov | 9f1e87e | 2008-11-27 21:10:08 +0300 | [diff] [blame] | 978 | * Hypervisor uses this for application faults while it executes. |
| 979 | * We get here for two reasons: |
| 980 | * 1. Fault while reloading DS, ES, FS or GS |
| 981 | * 2. Fault while executing IRET |
| 982 | * Category 1 we do not need to fix up as Xen has already reloaded all segment |
| 983 | * registers that could be reloaded and zeroed the others. |
| 984 | * Category 2 we fix up by killing the current process. We cannot use the |
| 985 | * normal Linux return path in this case because if we use the IRET hypercall |
| 986 | * to pop the stack frame we end up in an infinite loop of failsafe callbacks. |
| 987 | * We distinguish between categories by comparing each saved segment register |
| 988 | * with its current contents: any discrepancy means we in category 1. |
| 989 | */ |
Jeremy Fitzhardinge | 3d75e1b | 2008-07-08 15:06:49 -0700 | [diff] [blame] | 990 | ENTRY(xen_failsafe_callback) |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 991 | movl %ds, %ecx |
| 992 | cmpw %cx, 0x10(%rsp) |
| 993 | jne 1f |
| 994 | movl %es, %ecx |
| 995 | cmpw %cx, 0x18(%rsp) |
| 996 | jne 1f |
| 997 | movl %fs, %ecx |
| 998 | cmpw %cx, 0x20(%rsp) |
| 999 | jne 1f |
| 1000 | movl %gs, %ecx |
| 1001 | cmpw %cx, 0x28(%rsp) |
| 1002 | jne 1f |
Jeremy Fitzhardinge | 3d75e1b | 2008-07-08 15:06:49 -0700 | [diff] [blame] | 1003 | /* All segments match their saved values => Category 2 (Bad IRET). */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1004 | movq (%rsp), %rcx |
| 1005 | movq 8(%rsp), %r11 |
| 1006 | addq $0x30, %rsp |
| 1007 | pushq $0 /* RIP */ |
| 1008 | pushq %r11 |
| 1009 | pushq %rcx |
| 1010 | jmp general_protection |
Jeremy Fitzhardinge | 3d75e1b | 2008-07-08 15:06:49 -0700 | [diff] [blame] | 1011 | 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1012 | movq (%rsp), %rcx |
| 1013 | movq 8(%rsp), %r11 |
| 1014 | addq $0x30, %rsp |
| 1015 | pushq $-1 /* orig_ax = -1 => not a system call */ |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 1016 | ALLOC_PT_GPREGS_ON_STACK |
| 1017 | SAVE_C_REGS |
| 1018 | SAVE_EXTRA_REGS |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1019 | jmp error_exit |
Jeremy Fitzhardinge | 3d75e1b | 2008-07-08 15:06:49 -0700 | [diff] [blame] | 1020 | END(xen_failsafe_callback) |
| 1021 | |
Seiji Aguchi | cf910e8 | 2013-06-20 11:46:53 -0400 | [diff] [blame] | 1022 | apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ |
Sheng Yang | 38e20b0 | 2010-05-14 12:40:51 +0100 | [diff] [blame] | 1023 | xen_hvm_callback_vector xen_evtchn_do_upcall |
| 1024 | |
Jeremy Fitzhardinge | 3d75e1b | 2008-07-08 15:06:49 -0700 | [diff] [blame] | 1025 | #endif /* CONFIG_XEN */ |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1026 | |
K. Y. Srinivasan | bc2b033 | 2013-02-03 17:22:39 -0800 | [diff] [blame] | 1027 | #if IS_ENABLED(CONFIG_HYPERV) |
Seiji Aguchi | cf910e8 | 2013-06-20 11:46:53 -0400 | [diff] [blame] | 1028 | apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ |
K. Y. Srinivasan | bc2b033 | 2013-02-03 17:22:39 -0800 | [diff] [blame] | 1029 | hyperv_callback_vector hyperv_vector_handler |
| 1030 | #endif /* CONFIG_HYPERV */ |
| 1031 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1032 | idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK |
| 1033 | idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK |
| 1034 | idtentry stack_segment do_stack_segment has_error_code=1 |
| 1035 | |
Jeremy Fitzhardinge | 6cac5a9 | 2009-03-29 19:56:29 -0700 | [diff] [blame] | 1036 | #ifdef CONFIG_XEN |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1037 | idtentry xen_debug do_debug has_error_code=0 |
| 1038 | idtentry xen_int3 do_int3 has_error_code=0 |
| 1039 | idtentry xen_stack_segment do_stack_segment has_error_code=1 |
Jeremy Fitzhardinge | 6cac5a9 | 2009-03-29 19:56:29 -0700 | [diff] [blame] | 1040 | #endif |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1041 | |
| 1042 | idtentry general_protection do_general_protection has_error_code=1 |
| 1043 | trace_idtentry page_fault do_page_fault has_error_code=1 |
| 1044 | |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 1045 | #ifdef CONFIG_KVM_GUEST |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1046 | idtentry async_page_fault do_async_page_fault has_error_code=1 |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 1047 | #endif |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1048 | |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1049 | #ifdef CONFIG_X86_MCE |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1050 | idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip) |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1051 | #endif |
| 1052 | |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 1053 | /* |
| 1054 | * Save all registers in pt_regs, and switch gs if needed. |
| 1055 | * Use slow, but surefire "are we in kernel?" check. |
Hugh Dickins | 05ddad1 | 2017-09-26 18:43:07 -0700 | [diff] [blame] | 1056 | * |
| 1057 | * Return: ebx=0: needs swapgs but not SWITCH_USER_CR3 in paranoid_exit |
| 1058 | * ebx=1: needs neither swapgs nor SWITCH_USER_CR3 in paranoid_exit |
| 1059 | * ebx=2: needs both swapgs and SWITCH_USER_CR3 in paranoid_exit |
| 1060 | * ebx=3: needs SWITCH_USER_CR3 but not swapgs in paranoid_exit |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 1061 | */ |
| 1062 | ENTRY(paranoid_entry) |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 1063 | cld |
| 1064 | SAVE_C_REGS 8 |
| 1065 | SAVE_EXTRA_REGS 8 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1066 | movl $1, %ebx |
| 1067 | movl $MSR_GS_BASE, %ecx |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 1068 | rdmsr |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1069 | testl %edx, %edx |
| 1070 | js 1f /* negative -> in kernel */ |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 1071 | SWAPGS |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1072 | xorl %ebx, %ebx |
Hugh Dickins | 05ddad1 | 2017-09-26 18:43:07 -0700 | [diff] [blame] | 1073 | 1: |
| 1074 | #ifdef CONFIG_KAISER |
| 1075 | /* |
| 1076 | * We might have come in between a swapgs and a SWITCH_KERNEL_CR3 |
| 1077 | * on entry, or between a SWITCH_USER_CR3 and a swapgs on exit. |
| 1078 | * Do a conditional SWITCH_KERNEL_CR3: this could safely be done |
| 1079 | * unconditionally, but we need to find out whether the reverse |
| 1080 | * should be done on return (conveyed to paranoid_exit in %ebx). |
| 1081 | */ |
Hugh Dickins | 23e0943 | 2017-09-24 16:59:49 -0700 | [diff] [blame^] | 1082 | ALTERNATIVE "jmp 2f", "movq %cr3, %rax", X86_FEATURE_KAISER |
Hugh Dickins | 05ddad1 | 2017-09-26 18:43:07 -0700 | [diff] [blame] | 1083 | testl $KAISER_SHADOW_PGD_OFFSET, %eax |
| 1084 | jz 2f |
| 1085 | orl $2, %ebx |
| 1086 | andq $(~(X86_CR3_PCID_ASID_MASK | KAISER_SHADOW_PGD_OFFSET)), %rax |
| 1087 | orq x86_cr3_pcid_noflush, %rax |
| 1088 | movq %rax, %cr3 |
| 1089 | 2: |
| 1090 | #endif |
| 1091 | ret |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 1092 | END(paranoid_entry) |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 1093 | |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 1094 | /* |
| 1095 | * "Paranoid" exit path from exception stack. This is invoked |
| 1096 | * only on return from non-NMI IST interrupts that came |
| 1097 | * from kernel space. |
| 1098 | * |
| 1099 | * We may be returning to very strange contexts (e.g. very early |
| 1100 | * in syscall entry), so checking for preemption here would |
| 1101 | * be complicated. Fortunately, we there's no good reason |
| 1102 | * to try to handle preemption here. |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1103 | * |
Hugh Dickins | 05ddad1 | 2017-09-26 18:43:07 -0700 | [diff] [blame] | 1104 | * On entry: ebx=0: needs swapgs but not SWITCH_USER_CR3 |
| 1105 | * ebx=1: needs neither swapgs nor SWITCH_USER_CR3 |
| 1106 | * ebx=2: needs both swapgs and SWITCH_USER_CR3 |
| 1107 | * ebx=3: needs SWITCH_USER_CR3 but not swapgs |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 1108 | */ |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1109 | ENTRY(paranoid_exit) |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1110 | DISABLE_INTERRUPTS(CLBR_NONE) |
Steven Rostedt | 5963e31 | 2012-05-30 11:54:53 -0400 | [diff] [blame] | 1111 | TRACE_IRQS_OFF_DEBUG |
Denys Vlasenko | f2db938 | 2015-02-26 14:40:30 -0800 | [diff] [blame] | 1112 | TRACE_IRQS_IRETQ_DEBUG |
Hugh Dickins | 05ddad1 | 2017-09-26 18:43:07 -0700 | [diff] [blame] | 1113 | #ifdef CONFIG_KAISER |
Hugh Dickins | 23e0943 | 2017-09-24 16:59:49 -0700 | [diff] [blame^] | 1114 | /* No ALTERNATIVE for X86_FEATURE_KAISER: paranoid_entry sets %ebx */ |
Hugh Dickins | 05ddad1 | 2017-09-26 18:43:07 -0700 | [diff] [blame] | 1115 | testl $2, %ebx /* SWITCH_USER_CR3 needed? */ |
| 1116 | jz paranoid_exit_no_switch |
| 1117 | SWITCH_USER_CR3 |
| 1118 | paranoid_exit_no_switch: |
| 1119 | #endif |
| 1120 | testl $1, %ebx /* swapgs needed? */ |
| 1121 | jnz paranoid_exit_no_swapgs |
| 1122 | SWAPGS_UNSAFE_STACK |
| 1123 | paranoid_exit_no_swapgs: |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 1124 | RESTORE_EXTRA_REGS |
| 1125 | RESTORE_C_REGS |
| 1126 | REMOVE_PT_GPREGS_FROM_STACK 8 |
Andy Lutomirski | 48e08d0 | 2014-11-11 12:49:41 -0800 | [diff] [blame] | 1127 | INTERRUPT_RETURN |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1128 | END(paranoid_exit) |
| 1129 | |
| 1130 | /* |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 1131 | * Save all registers in pt_regs, and switch gs if needed. |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame] | 1132 | * Return: EBX=0: came from user mode; EBX=1: otherwise |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1133 | */ |
| 1134 | ENTRY(error_entry) |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1135 | cld |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 1136 | SAVE_C_REGS 8 |
| 1137 | SAVE_EXTRA_REGS 8 |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 1138 | /* |
| 1139 | * error_entry() always returns with a kernel gsbase and |
| 1140 | * CR3. We must also have a kernel CR3/gsbase before |
| 1141 | * calling TRACE_IRQS_*. Just unconditionally switch to |
| 1142 | * the kernel CR3 here. |
| 1143 | */ |
| 1144 | SWITCH_KERNEL_CR3 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1145 | xorl %ebx, %ebx |
Denys Vlasenko | 03335e9 | 2015-04-27 15:21:52 +0200 | [diff] [blame] | 1146 | testb $3, CS+8(%rsp) |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 1147 | jz .Lerror_kernelspace |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame] | 1148 | |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 1149 | /* |
| 1150 | * We entered from user mode or we're pretending to have entered |
| 1151 | * from user mode due to an IRET fault. |
| 1152 | */ |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1153 | SWAPGS |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame] | 1154 | |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 1155 | .Lerror_entry_from_usermode_after_swapgs: |
Andy Lutomirski | f107505 | 2015-11-12 12:59:00 -0800 | [diff] [blame] | 1156 | /* |
| 1157 | * We need to tell lockdep that IRQs are off. We can't do this until |
| 1158 | * we fix gsbase, and we should do it before enter_from_user_mode |
| 1159 | * (which can take locks). |
| 1160 | */ |
| 1161 | TRACE_IRQS_OFF |
Andy Lutomirski | 478dc89 | 2015-11-12 12:59:04 -0800 | [diff] [blame] | 1162 | CALL_enter_from_user_mode |
Andy Lutomirski | f107505 | 2015-11-12 12:59:00 -0800 | [diff] [blame] | 1163 | ret |
Andy Lutomirski | 02bc776 | 2015-07-03 12:44:31 -0700 | [diff] [blame] | 1164 | |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 1165 | .Lerror_entry_done: |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1166 | TRACE_IRQS_OFF |
| 1167 | ret |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1168 | |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 1169 | /* |
| 1170 | * There are two places in the kernel that can potentially fault with |
| 1171 | * usergs. Handle them here. B stepping K8s sometimes report a |
| 1172 | * truncated RIP for IRET exceptions returning to compat mode. Check |
| 1173 | * for these here too. |
| 1174 | */ |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 1175 | .Lerror_kernelspace: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1176 | incl %ebx |
| 1177 | leaq native_irq_return_iret(%rip), %rcx |
| 1178 | cmpq %rcx, RIP+8(%rsp) |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 1179 | je .Lerror_bad_iret |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1180 | movl %ecx, %eax /* zero extend */ |
| 1181 | cmpq %rax, RIP+8(%rsp) |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 1182 | je .Lbstep_iret |
Borislav Petkov | 42c748bb | 2016-04-07 17:31:50 -0700 | [diff] [blame] | 1183 | cmpq $.Lgs_change, RIP+8(%rsp) |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 1184 | jne .Lerror_entry_done |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame] | 1185 | |
| 1186 | /* |
Borislav Petkov | 42c748bb | 2016-04-07 17:31:50 -0700 | [diff] [blame] | 1187 | * hack: .Lgs_change can fail with user gsbase. If this happens, fix up |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame] | 1188 | * gsbase and proceed. We'll fix up the exception and land in |
Borislav Petkov | 42c748bb | 2016-04-07 17:31:50 -0700 | [diff] [blame] | 1189 | * .Lgs_change's error handler with kernel gsbase. |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame] | 1190 | */ |
Wanpeng Li | 2fa5f04 | 2016-09-30 09:01:06 +0800 | [diff] [blame] | 1191 | SWAPGS |
| 1192 | jmp .Lerror_entry_done |
Brian Gerst | ae24ffe | 2009-10-12 10:18:23 -0400 | [diff] [blame] | 1193 | |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 1194 | .Lbstep_iret: |
Brian Gerst | ae24ffe | 2009-10-12 10:18:23 -0400 | [diff] [blame] | 1195 | /* Fix truncated RIP */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1196 | movq %rcx, RIP+8(%rsp) |
Andy Lutomirski | b645af2 | 2014-11-22 18:00:33 -0800 | [diff] [blame] | 1197 | /* fall through */ |
| 1198 | |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 1199 | .Lerror_bad_iret: |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame] | 1200 | /* |
| 1201 | * We came from an IRET to user mode, so we have user gsbase. |
| 1202 | * Switch to kernel gsbase: |
| 1203 | */ |
Andy Lutomirski | b645af2 | 2014-11-22 18:00:33 -0800 | [diff] [blame] | 1204 | SWAPGS |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame] | 1205 | |
| 1206 | /* |
| 1207 | * Pretend that the exception came from user mode: set up pt_regs |
| 1208 | * as if we faulted immediately after IRET and clear EBX so that |
| 1209 | * error_exit knows that we will be returning to user mode. |
| 1210 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1211 | mov %rsp, %rdi |
| 1212 | call fixup_bad_iret |
| 1213 | mov %rax, %rsp |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame] | 1214 | decl %ebx |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 1215 | jmp .Lerror_entry_from_usermode_after_swapgs |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1216 | END(error_entry) |
| 1217 | |
| 1218 | |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame] | 1219 | /* |
Nicolas Iooss | 75ca5b2 | 2016-07-29 13:39:51 +0200 | [diff] [blame] | 1220 | * On entry, EBX is a "return to kernel mode" flag: |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame] | 1221 | * 1: already in kernel mode, don't need SWAPGS |
| 1222 | * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode |
| 1223 | */ |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1224 | ENTRY(error_exit) |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1225 | movl %ebx, %eax |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1226 | DISABLE_INTERRUPTS(CLBR_NONE) |
| 1227 | TRACE_IRQS_OFF |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1228 | testl %eax, %eax |
| 1229 | jnz retint_kernel |
| 1230 | jmp retint_user |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1231 | END(error_exit) |
| 1232 | |
Denys Vlasenko | 0784b36 | 2015-04-01 16:50:57 +0200 | [diff] [blame] | 1233 | /* Runs on exception stack */ |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1234 | ENTRY(nmi) |
Andy Lutomirski | fc57a7c | 2015-09-20 16:32:04 -0700 | [diff] [blame] | 1235 | /* |
| 1236 | * Fix up the exception frame if we're on Xen. |
| 1237 | * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most |
| 1238 | * one value to the stack on native, so it may clobber the rdx |
| 1239 | * scratch slot, but it won't clobber any of the important |
| 1240 | * slots past it. |
| 1241 | * |
| 1242 | * Xen is a different story, because the Xen frame itself overlaps |
| 1243 | * the "NMI executing" variable. |
| 1244 | */ |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1245 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
Andy Lutomirski | fc57a7c | 2015-09-20 16:32:04 -0700 | [diff] [blame] | 1246 | |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1247 | /* |
| 1248 | * We allow breakpoints in NMIs. If a breakpoint occurs, then |
| 1249 | * the iretq it performs will take us out of NMI context. |
| 1250 | * This means that we can have nested NMIs where the next |
| 1251 | * NMI is using the top of the stack of the previous NMI. We |
| 1252 | * can't let it execute because the nested NMI will corrupt the |
| 1253 | * stack of the previous NMI. NMI handlers are not re-entrant |
| 1254 | * anyway. |
| 1255 | * |
| 1256 | * To handle this case we do the following: |
| 1257 | * Check the a special location on the stack that contains |
| 1258 | * a variable that is set when NMIs are executing. |
| 1259 | * The interrupted task's stack is also checked to see if it |
| 1260 | * is an NMI stack. |
| 1261 | * If the variable is not set and the stack is not the NMI |
| 1262 | * stack then: |
| 1263 | * o Set the special variable on the stack |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1264 | * o Copy the interrupt frame into an "outermost" location on the |
| 1265 | * stack |
| 1266 | * o Copy the interrupt frame into an "iret" location on the stack |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1267 | * o Continue processing the NMI |
| 1268 | * If the variable is set or the previous stack is the NMI stack: |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1269 | * o Modify the "iret" location to jump to the repeat_nmi |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1270 | * o return back to the first NMI |
| 1271 | * |
| 1272 | * Now on exit of the first NMI, we first clear the stack variable |
| 1273 | * The NMI stack will tell any nested NMIs at that point that it is |
| 1274 | * nested. Then we pop the stack normally with iret, and if there was |
| 1275 | * a nested NMI that updated the copy interrupt stack frame, a |
| 1276 | * jump will be made to the repeat_nmi code that will handle the second |
| 1277 | * NMI. |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1278 | * |
| 1279 | * However, espfix prevents us from directly returning to userspace |
| 1280 | * with a single IRET instruction. Similarly, IRET to user mode |
| 1281 | * can fault. We therefore handle NMIs from user space like |
| 1282 | * other IST entries. |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1283 | */ |
| 1284 | |
Andy Lutomirski | 10d5bf2 | 2017-08-07 19:43:13 -0700 | [diff] [blame] | 1285 | ASM_CLAC |
| 1286 | |
Denys Vlasenko | 146b2b0 | 2015-03-25 18:18:13 +0100 | [diff] [blame] | 1287 | /* Use %rdx as our temp variable throughout */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1288 | pushq %rdx |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1289 | |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1290 | testb $3, CS-RIP+8(%rsp) |
| 1291 | jz .Lnmi_from_kernel |
Steven Rostedt | 45d5a16 | 2012-02-19 16:43:37 -0500 | [diff] [blame] | 1292 | |
| 1293 | /* |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1294 | * NMI from user mode. We need to run on the thread stack, but we |
| 1295 | * can't go through the normal entry paths: NMIs are masked, and |
| 1296 | * we don't want to enable interrupts, because then we'll end |
| 1297 | * up in an awkward situation in which IRQs are on but NMIs |
| 1298 | * are off. |
Andy Lutomirski | 83c133c | 2015-09-20 16:32:05 -0700 | [diff] [blame] | 1299 | * |
| 1300 | * We also must not push anything to the stack before switching |
| 1301 | * stacks lest we corrupt the "NMI executing" variable. |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1302 | */ |
| 1303 | |
Andy Lutomirski | 83c133c | 2015-09-20 16:32:05 -0700 | [diff] [blame] | 1304 | SWAPGS_UNSAFE_STACK |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 1305 | /* |
| 1306 | * percpu variables are mapped with user CR3, so no need |
| 1307 | * to switch CR3 here. |
| 1308 | */ |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1309 | cld |
| 1310 | movq %rsp, %rdx |
| 1311 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp |
| 1312 | pushq 5*8(%rdx) /* pt_regs->ss */ |
| 1313 | pushq 4*8(%rdx) /* pt_regs->rsp */ |
| 1314 | pushq 3*8(%rdx) /* pt_regs->flags */ |
| 1315 | pushq 2*8(%rdx) /* pt_regs->cs */ |
| 1316 | pushq 1*8(%rdx) /* pt_regs->rip */ |
| 1317 | pushq $-1 /* pt_regs->orig_ax */ |
| 1318 | pushq %rdi /* pt_regs->di */ |
| 1319 | pushq %rsi /* pt_regs->si */ |
| 1320 | pushq (%rdx) /* pt_regs->dx */ |
| 1321 | pushq %rcx /* pt_regs->cx */ |
| 1322 | pushq %rax /* pt_regs->ax */ |
| 1323 | pushq %r8 /* pt_regs->r8 */ |
| 1324 | pushq %r9 /* pt_regs->r9 */ |
| 1325 | pushq %r10 /* pt_regs->r10 */ |
| 1326 | pushq %r11 /* pt_regs->r11 */ |
| 1327 | pushq %rbx /* pt_regs->rbx */ |
| 1328 | pushq %rbp /* pt_regs->rbp */ |
| 1329 | pushq %r12 /* pt_regs->r12 */ |
| 1330 | pushq %r13 /* pt_regs->r13 */ |
| 1331 | pushq %r14 /* pt_regs->r14 */ |
| 1332 | pushq %r15 /* pt_regs->r15 */ |
| 1333 | |
| 1334 | /* |
| 1335 | * At this point we no longer need to worry about stack damage |
| 1336 | * due to nesting -- we're on the normal thread stack and we're |
| 1337 | * done with the NMI stack. |
| 1338 | */ |
| 1339 | |
| 1340 | movq %rsp, %rdi |
| 1341 | movq $-1, %rsi |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 1342 | #ifdef CONFIG_KAISER |
| 1343 | /* Unconditionally use kernel CR3 for do_nmi() */ |
| 1344 | /* %rax is saved above, so OK to clobber here */ |
Hugh Dickins | 23e0943 | 2017-09-24 16:59:49 -0700 | [diff] [blame^] | 1345 | ALTERNATIVE "jmp 2f", "movq %cr3, %rax", X86_FEATURE_KAISER |
Hugh Dickins | d0142ce | 2017-08-27 16:24:27 -0700 | [diff] [blame] | 1346 | /* If PCID enabled, NOFLUSH now and NOFLUSH on return */ |
| 1347 | orq x86_cr3_pcid_noflush, %rax |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 1348 | pushq %rax |
Hugh Dickins | 2684b12 | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 1349 | /* mask off "user" bit of pgd address and 12 PCID bits: */ |
| 1350 | andq $(~(X86_CR3_PCID_ASID_MASK | KAISER_SHADOW_PGD_OFFSET)), %rax |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 1351 | movq %rax, %cr3 |
Hugh Dickins | 23e0943 | 2017-09-24 16:59:49 -0700 | [diff] [blame^] | 1352 | 2: |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 1353 | #endif |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1354 | call do_nmi |
Hugh Dickins | 1937794 | 2017-09-21 20:39:56 -0700 | [diff] [blame] | 1355 | |
| 1356 | #ifdef CONFIG_KAISER |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 1357 | /* |
| 1358 | * Unconditionally restore CR3. I know we return to |
| 1359 | * kernel code that needs user CR3, but do we ever return |
| 1360 | * to "user mode" where we need the kernel CR3? |
| 1361 | */ |
Hugh Dickins | 23e0943 | 2017-09-24 16:59:49 -0700 | [diff] [blame^] | 1362 | ALTERNATIVE "", "popq %rax; movq %rax, %cr3", X86_FEATURE_KAISER |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 1363 | #endif |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1364 | |
| 1365 | /* |
| 1366 | * Return back to user mode. We must *not* do the normal exit |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 1367 | * work, because we don't want to enable interrupts. Do not |
| 1368 | * switch to user CR3: we might be going back to kernel code |
| 1369 | * that had a user CR3 set. |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1370 | */ |
| 1371 | SWAPGS |
| 1372 | jmp restore_c_regs_and_iret |
| 1373 | |
| 1374 | .Lnmi_from_kernel: |
| 1375 | /* |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1376 | * Here's what our stack frame will look like: |
| 1377 | * +---------------------------------------------------------+ |
| 1378 | * | original SS | |
| 1379 | * | original Return RSP | |
| 1380 | * | original RFLAGS | |
| 1381 | * | original CS | |
| 1382 | * | original RIP | |
| 1383 | * +---------------------------------------------------------+ |
| 1384 | * | temp storage for rdx | |
| 1385 | * +---------------------------------------------------------+ |
| 1386 | * | "NMI executing" variable | |
| 1387 | * +---------------------------------------------------------+ |
| 1388 | * | iret SS } Copied from "outermost" frame | |
| 1389 | * | iret Return RSP } on each loop iteration; overwritten | |
| 1390 | * | iret RFLAGS } by a nested NMI to force another | |
| 1391 | * | iret CS } iteration if needed. | |
| 1392 | * | iret RIP } | |
| 1393 | * +---------------------------------------------------------+ |
| 1394 | * | outermost SS } initialized in first_nmi; | |
| 1395 | * | outermost Return RSP } will not be changed before | |
| 1396 | * | outermost RFLAGS } NMI processing is done. | |
| 1397 | * | outermost CS } Copied to "iret" frame on each | |
| 1398 | * | outermost RIP } iteration. | |
| 1399 | * +---------------------------------------------------------+ |
| 1400 | * | pt_regs | |
| 1401 | * +---------------------------------------------------------+ |
| 1402 | * |
| 1403 | * The "original" frame is used by hardware. Before re-enabling |
| 1404 | * NMIs, we need to be done with it, and we need to leave enough |
| 1405 | * space for the asm code here. |
| 1406 | * |
| 1407 | * We return by executing IRET while RSP points to the "iret" frame. |
| 1408 | * That will either return for real or it will loop back into NMI |
| 1409 | * processing. |
| 1410 | * |
| 1411 | * The "outermost" frame is copied to the "iret" frame on each |
| 1412 | * iteration of the loop, so each iteration starts with the "iret" |
| 1413 | * frame pointing to the final return target. |
| 1414 | */ |
| 1415 | |
| 1416 | /* |
| 1417 | * Determine whether we're a nested NMI. |
| 1418 | * |
Andy Lutomirski | a27507c | 2015-07-15 10:29:37 -0700 | [diff] [blame] | 1419 | * If we interrupted kernel code between repeat_nmi and |
| 1420 | * end_repeat_nmi, then we are a nested NMI. We must not |
| 1421 | * modify the "iret" frame because it's being written by |
| 1422 | * the outer NMI. That's okay; the outer NMI handler is |
| 1423 | * about to about to call do_nmi anyway, so we can just |
| 1424 | * resume the outer NMI. |
| 1425 | */ |
| 1426 | |
| 1427 | movq $repeat_nmi, %rdx |
| 1428 | cmpq 8(%rsp), %rdx |
| 1429 | ja 1f |
| 1430 | movq $end_repeat_nmi, %rdx |
| 1431 | cmpq 8(%rsp), %rdx |
| 1432 | ja nested_nmi_out |
| 1433 | 1: |
| 1434 | |
| 1435 | /* |
| 1436 | * Now check "NMI executing". If it's set, then we're nested. |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1437 | * This will not detect if we interrupted an outer NMI just |
| 1438 | * before IRET. |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1439 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1440 | cmpl $1, -8(%rsp) |
| 1441 | je nested_nmi |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1442 | |
| 1443 | /* |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1444 | * Now test if the previous stack was an NMI stack. This covers |
| 1445 | * the case where we interrupt an outer NMI after it clears |
Andy Lutomirski | 810bc07 | 2015-07-15 10:29:38 -0700 | [diff] [blame] | 1446 | * "NMI executing" but before IRET. We need to be careful, though: |
| 1447 | * there is one case in which RSP could point to the NMI stack |
| 1448 | * despite there being no NMI active: naughty userspace controls |
| 1449 | * RSP at the very beginning of the SYSCALL targets. We can |
| 1450 | * pull a fast one on naughty userspace, though: we program |
| 1451 | * SYSCALL to mask DF, so userspace cannot cause DF to be set |
| 1452 | * if it controls the kernel's RSP. We set DF before we clear |
| 1453 | * "NMI executing". |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1454 | */ |
Denys Vlasenko | 0784b36 | 2015-04-01 16:50:57 +0200 | [diff] [blame] | 1455 | lea 6*8(%rsp), %rdx |
| 1456 | /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */ |
| 1457 | cmpq %rdx, 4*8(%rsp) |
| 1458 | /* If the stack pointer is above the NMI stack, this is a normal NMI */ |
| 1459 | ja first_nmi |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1460 | |
Denys Vlasenko | 0784b36 | 2015-04-01 16:50:57 +0200 | [diff] [blame] | 1461 | subq $EXCEPTION_STKSZ, %rdx |
| 1462 | cmpq %rdx, 4*8(%rsp) |
| 1463 | /* If it is below the NMI stack, it is a normal NMI */ |
| 1464 | jb first_nmi |
Andy Lutomirski | 810bc07 | 2015-07-15 10:29:38 -0700 | [diff] [blame] | 1465 | |
| 1466 | /* Ah, it is within the NMI stack. */ |
| 1467 | |
| 1468 | testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp) |
| 1469 | jz first_nmi /* RSP was user controlled. */ |
| 1470 | |
| 1471 | /* This is a nested NMI. */ |
Denys Vlasenko | 0784b36 | 2015-04-01 16:50:57 +0200 | [diff] [blame] | 1472 | |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1473 | nested_nmi: |
| 1474 | /* |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1475 | * Modify the "iret" frame to point to repeat_nmi, forcing another |
| 1476 | * iteration of NMI handling. |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1477 | */ |
Andy Lutomirski | 23a781e | 2015-07-15 10:29:39 -0700 | [diff] [blame] | 1478 | subq $8, %rsp |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1479 | leaq -10*8(%rsp), %rdx |
| 1480 | pushq $__KERNEL_DS |
| 1481 | pushq %rdx |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 1482 | pushfq |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1483 | pushq $__KERNEL_CS |
| 1484 | pushq $repeat_nmi |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1485 | |
| 1486 | /* Put stack back */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1487 | addq $(6*8), %rsp |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1488 | |
| 1489 | nested_nmi_out: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1490 | popq %rdx |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1491 | |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1492 | /* We are returning to kernel mode, so this cannot result in a fault. */ |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1493 | INTERRUPT_RETURN |
| 1494 | |
| 1495 | first_nmi: |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1496 | /* Restore rdx. */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1497 | movq (%rsp), %rdx |
Jan Beulich | 6261091 | 2012-02-24 14:54:37 +0000 | [diff] [blame] | 1498 | |
Andy Lutomirski | 36f1a77 | 2015-07-15 10:29:40 -0700 | [diff] [blame] | 1499 | /* Make room for "NMI executing". */ |
| 1500 | pushq $0 |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1501 | |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1502 | /* Leave room for the "iret" frame */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1503 | subq $(5*8), %rsp |
Salman Qazi | 28696f4 | 2012-10-01 17:29:25 -0700 | [diff] [blame] | 1504 | |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1505 | /* Copy the "original" frame to the "outermost" frame */ |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1506 | .rept 5 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1507 | pushq 11*8(%rsp) |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1508 | .endr |
Jan Beulich | 6261091 | 2012-02-24 14:54:37 +0000 | [diff] [blame] | 1509 | |
Steven Rostedt | 79fb4ad | 2012-02-24 15:55:13 -0500 | [diff] [blame] | 1510 | /* Everything up to here is safe from nested NMIs */ |
| 1511 | |
Andy Lutomirski | a97439a | 2015-07-15 10:29:41 -0700 | [diff] [blame] | 1512 | #ifdef CONFIG_DEBUG_ENTRY |
| 1513 | /* |
| 1514 | * For ease of testing, unmask NMIs right away. Disabled by |
| 1515 | * default because IRET is very expensive. |
| 1516 | */ |
| 1517 | pushq $0 /* SS */ |
| 1518 | pushq %rsp /* RSP (minus 8 because of the previous push) */ |
| 1519 | addq $8, (%rsp) /* Fix up RSP */ |
| 1520 | pushfq /* RFLAGS */ |
| 1521 | pushq $__KERNEL_CS /* CS */ |
| 1522 | pushq $1f /* RIP */ |
| 1523 | INTERRUPT_RETURN /* continues at repeat_nmi below */ |
| 1524 | 1: |
| 1525 | #endif |
| 1526 | |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1527 | repeat_nmi: |
Jan Beulich | 6261091 | 2012-02-24 14:54:37 +0000 | [diff] [blame] | 1528 | /* |
| 1529 | * If there was a nested NMI, the first NMI's iret will return |
| 1530 | * here. But NMIs are still enabled and we can take another |
| 1531 | * nested NMI. The nested NMI checks the interrupted RIP to see |
| 1532 | * if it is between repeat_nmi and end_repeat_nmi, and if so |
| 1533 | * it will just return, as we are about to repeat an NMI anyway. |
| 1534 | * This makes it safe to copy to the stack frame that a nested |
| 1535 | * NMI will update. |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1536 | * |
| 1537 | * RSP is pointing to "outermost RIP". gsbase is unknown, but, if |
| 1538 | * we're repeating an NMI, gsbase has the same value that it had on |
| 1539 | * the first iteration. paranoid_entry will load the kernel |
Andy Lutomirski | 36f1a77 | 2015-07-15 10:29:40 -0700 | [diff] [blame] | 1540 | * gsbase if needed before we call do_nmi. "NMI executing" |
| 1541 | * is zero. |
Jan Beulich | 6261091 | 2012-02-24 14:54:37 +0000 | [diff] [blame] | 1542 | */ |
Andy Lutomirski | 36f1a77 | 2015-07-15 10:29:40 -0700 | [diff] [blame] | 1543 | movq $1, 10*8(%rsp) /* Set "NMI executing". */ |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1544 | |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1545 | /* |
| 1546 | * Copy the "outermost" frame to the "iret" frame. NMIs that nest |
| 1547 | * here must not modify the "iret" frame while we're writing to |
| 1548 | * it or it will end up containing garbage. |
| 1549 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1550 | addq $(10*8), %rsp |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1551 | .rept 5 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1552 | pushq -6*8(%rsp) |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1553 | .endr |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1554 | subq $(5*8), %rsp |
Jan Beulich | 6261091 | 2012-02-24 14:54:37 +0000 | [diff] [blame] | 1555 | end_repeat_nmi: |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1556 | |
| 1557 | /* |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1558 | * Everything below this point can be preempted by a nested NMI. |
| 1559 | * If this happens, then the inner NMI will change the "iret" |
| 1560 | * frame to point back to repeat_nmi. |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1561 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1562 | pushq $-1 /* ORIG_RAX: no syscall to restart */ |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 1563 | ALLOC_PT_GPREGS_ON_STACK |
| 1564 | |
Steven Rostedt | 1fd466e | 2011-12-08 12:32:27 -0500 | [diff] [blame] | 1565 | /* |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 1566 | * Use the same approach as paranoid_entry to handle SWAPGS, but |
| 1567 | * without CR3 handling since we do that differently in NMIs. No |
| 1568 | * need to use paranoid_exit as we should not be calling schedule |
| 1569 | * in NMI context. Even with normal interrupts enabled. An NMI |
| 1570 | * should not be setting NEED_RESCHED or anything that normal |
| 1571 | * interrupts and exceptions might do. |
Steven Rostedt | 1fd466e | 2011-12-08 12:32:27 -0500 | [diff] [blame] | 1572 | */ |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 1573 | cld |
| 1574 | SAVE_C_REGS |
| 1575 | SAVE_EXTRA_REGS |
| 1576 | movl $1, %ebx |
| 1577 | movl $MSR_GS_BASE, %ecx |
| 1578 | rdmsr |
| 1579 | testl %edx, %edx |
| 1580 | js 1f /* negative -> in kernel */ |
| 1581 | SWAPGS |
| 1582 | xorl %ebx, %ebx |
| 1583 | 1: |
Hugh Dickins | 1937794 | 2017-09-21 20:39:56 -0700 | [diff] [blame] | 1584 | movq %rsp, %rdi |
| 1585 | movq $-1, %rsi |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 1586 | #ifdef CONFIG_KAISER |
| 1587 | /* Unconditionally use kernel CR3 for do_nmi() */ |
| 1588 | /* %rax is saved above, so OK to clobber here */ |
Hugh Dickins | 23e0943 | 2017-09-24 16:59:49 -0700 | [diff] [blame^] | 1589 | ALTERNATIVE "jmp 2f", "movq %cr3, %rax", X86_FEATURE_KAISER |
Hugh Dickins | d0142ce | 2017-08-27 16:24:27 -0700 | [diff] [blame] | 1590 | /* If PCID enabled, NOFLUSH now and NOFLUSH on return */ |
| 1591 | orq x86_cr3_pcid_noflush, %rax |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 1592 | pushq %rax |
Hugh Dickins | 2684b12 | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 1593 | /* mask off "user" bit of pgd address and 12 PCID bits: */ |
| 1594 | andq $(~(X86_CR3_PCID_ASID_MASK | KAISER_SHADOW_PGD_OFFSET)), %rax |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 1595 | movq %rax, %cr3 |
Hugh Dickins | 23e0943 | 2017-09-24 16:59:49 -0700 | [diff] [blame^] | 1596 | 2: |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 1597 | #endif |
Steven Rostedt | 7fbb98c | 2012-06-07 10:21:21 -0400 | [diff] [blame] | 1598 | |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1599 | /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1600 | call do_nmi |
Hugh Dickins | 1937794 | 2017-09-21 20:39:56 -0700 | [diff] [blame] | 1601 | |
| 1602 | #ifdef CONFIG_KAISER |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 1603 | /* |
| 1604 | * Unconditionally restore CR3. We might be returning to |
| 1605 | * kernel code that needs user CR3, like just just before |
| 1606 | * a sysret. |
| 1607 | */ |
Hugh Dickins | 23e0943 | 2017-09-24 16:59:49 -0700 | [diff] [blame^] | 1608 | ALTERNATIVE "", "popq %rax; movq %rax, %cr3", X86_FEATURE_KAISER |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 1609 | #endif |
Steven Rostedt | 7fbb98c | 2012-06-07 10:21:21 -0400 | [diff] [blame] | 1610 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1611 | testl %ebx, %ebx /* swapgs needed? */ |
| 1612 | jnz nmi_restore |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1613 | nmi_swapgs: |
Dave Hansen | 8f0baad | 2017-08-30 16:23:00 -0700 | [diff] [blame] | 1614 | /* We fixed up CR3 above, so no need to switch it here */ |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1615 | SWAPGS_UNSAFE_STACK |
| 1616 | nmi_restore: |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 1617 | RESTORE_EXTRA_REGS |
| 1618 | RESTORE_C_REGS |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1619 | |
| 1620 | /* Point RSP at the "iret" frame. */ |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 1621 | REMOVE_PT_GPREGS_FROM_STACK 6*8 |
Salman Qazi | 28696f4 | 2012-10-01 17:29:25 -0700 | [diff] [blame] | 1622 | |
Andy Lutomirski | 810bc07 | 2015-07-15 10:29:38 -0700 | [diff] [blame] | 1623 | /* |
| 1624 | * Clear "NMI executing". Set DF first so that we can easily |
| 1625 | * distinguish the remaining code between here and IRET from |
| 1626 | * the SYSCALL entry and exit paths. On a native kernel, we |
| 1627 | * could just inspect RIP, but, on paravirt kernels, |
| 1628 | * INTERRUPT_RETURN can translate into a jump into a |
| 1629 | * hypercall page. |
| 1630 | */ |
| 1631 | std |
| 1632 | movq $0, 5*8(%rsp) /* clear "NMI executing" */ |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1633 | |
| 1634 | /* |
| 1635 | * INTERRUPT_RETURN reads the "iret" frame and exits the NMI |
| 1636 | * stack in a single instruction. We are returning to kernel |
| 1637 | * mode, so this cannot result in a fault. |
| 1638 | */ |
Andy Lutomirski | 5ca6f70 | 2015-06-04 13:24:29 -0700 | [diff] [blame] | 1639 | INTERRUPT_RETURN |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1640 | END(nmi) |
| 1641 | |
| 1642 | ENTRY(ignore_sysret) |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1643 | mov $-ENOSYS, %eax |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1644 | sysret |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1645 | END(ignore_sysret) |
Andy Lutomirski | 2deb4be | 2016-07-14 13:22:55 -0700 | [diff] [blame] | 1646 | |
| 1647 | ENTRY(rewind_stack_do_exit) |
| 1648 | /* Prevent any naive code from trying to unwind to our caller. */ |
| 1649 | xorl %ebp, %ebp |
| 1650 | |
| 1651 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rax |
| 1652 | leaq -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%rax), %rsp |
| 1653 | |
| 1654 | call do_exit |
| 1655 | 1: jmp 1b |
| 1656 | END(rewind_stack_do_exit) |