Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/x86_64/entry.S |
| 3 | * |
| 4 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 5 | * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs |
| 6 | * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 7 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | * entry.S contains the system-call and fault low-level handling routines. |
| 9 | * |
Andy Lutomirski | 8b4777a | 2011-06-05 13:50:18 -0400 | [diff] [blame] | 10 | * Some of this is documented in Documentation/x86/entry_64.txt |
| 11 | * |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 12 | * A note on terminology: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 13 | * - iret frame: Architecture defined interrupt frame from SS to RIP |
| 14 | * at the top of the kernel process stack. |
Andi Kleen | 2e91a17 | 2006-09-26 10:52:29 +0200 | [diff] [blame] | 15 | * |
| 16 | * Some macro usage: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 17 | * - ENTRY/END: Define functions in the symbol table. |
| 18 | * - TRACE_IRQ_*: Trace hardirq state for lock debugging. |
| 19 | * - idtentry: Define exception entry points. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <linux/linkage.h> |
| 22 | #include <asm/segment.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #include <asm/cache.h> |
| 24 | #include <asm/errno.h> |
Ingo Molnar | d36f947 | 2015-06-03 18:29:26 +0200 | [diff] [blame] | 25 | #include "calling.h" |
Sam Ravnborg | e2d5df9 | 2005-09-09 21:28:48 +0200 | [diff] [blame] | 26 | #include <asm/asm-offsets.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #include <asm/msr.h> |
| 28 | #include <asm/unistd.h> |
| 29 | #include <asm/thread_info.h> |
| 30 | #include <asm/hw_irq.h> |
Jeremy Fitzhardinge | 0341c14 | 2009-02-13 11:14:01 -0800 | [diff] [blame] | 31 | #include <asm/page_types.h> |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 32 | #include <asm/irqflags.h> |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 33 | #include <asm/paravirt.h> |
Tejun Heo | 9939dda | 2009-01-13 20:41:35 +0900 | [diff] [blame] | 34 | #include <asm/percpu.h> |
H. Peter Anvin | d7abc0f | 2012-04-20 12:19:50 -0700 | [diff] [blame] | 35 | #include <asm/asm.h> |
Frederic Weisbecker | 91d1aa43 | 2012-11-27 19:33:25 +0100 | [diff] [blame] | 36 | #include <asm/context_tracking.h> |
H. Peter Anvin | 63bcff2 | 2012-09-21 12:43:12 -0700 | [diff] [blame] | 37 | #include <asm/smap.h> |
H. Peter Anvin | 3891a04 | 2014-04-29 16:46:09 -0700 | [diff] [blame] | 38 | #include <asm/pgtable_types.h> |
Eric Paris | d7e7528 | 2012-01-03 14:23:06 -0500 | [diff] [blame] | 39 | #include <linux/err.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | |
Roland McGrath | 86a1c34 | 2008-06-23 15:37:04 -0700 | [diff] [blame] | 41 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ |
| 42 | #include <linux/elf-em.h> |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 43 | #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) |
| 44 | #define __AUDIT_ARCH_64BIT 0x80000000 |
| 45 | #define __AUDIT_ARCH_LE 0x40000000 |
Roland McGrath | 86a1c34 | 2008-06-23 15:37:04 -0700 | [diff] [blame] | 46 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 47 | .code64 |
| 48 | .section .entry.text, "ax" |
Arnaldo Carvalho de Melo | 16444a8 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 49 | |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 50 | #ifdef CONFIG_PARAVIRT |
Jeremy Fitzhardinge | 2be2998 | 2008-06-25 00:19:28 -0400 | [diff] [blame] | 51 | ENTRY(native_usergs_sysret64) |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 52 | swapgs |
| 53 | sysretq |
Cyrill Gorcunov | b3baaa1 | 2009-02-23 22:57:00 +0300 | [diff] [blame] | 54 | ENDPROC(native_usergs_sysret64) |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 55 | #endif /* CONFIG_PARAVIRT */ |
| 56 | |
Denys Vlasenko | f2db938 | 2015-02-26 14:40:30 -0800 | [diff] [blame] | 57 | .macro TRACE_IRQS_IRETQ |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 58 | #ifdef CONFIG_TRACE_IRQFLAGS |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 59 | bt $9, EFLAGS(%rsp) /* interrupts off? */ |
| 60 | jnc 1f |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 61 | TRACE_IRQS_ON |
| 62 | 1: |
| 63 | #endif |
| 64 | .endm |
| 65 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | /* |
Steven Rostedt | 5963e31 | 2012-05-30 11:54:53 -0400 | [diff] [blame] | 67 | * When dynamic function tracer is enabled it will add a breakpoint |
| 68 | * to all locations that it is about to modify, sync CPUs, update |
| 69 | * all the code, sync CPUs, then remove the breakpoints. In this time |
| 70 | * if lockdep is enabled, it might jump back into the debug handler |
| 71 | * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF). |
| 72 | * |
| 73 | * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to |
| 74 | * make sure the stack pointer does not get reset back to the top |
| 75 | * of the debug stack, and instead just reuses the current stack. |
| 76 | */ |
| 77 | #if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS) |
| 78 | |
| 79 | .macro TRACE_IRQS_OFF_DEBUG |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 80 | call debug_stack_set_zero |
Steven Rostedt | 5963e31 | 2012-05-30 11:54:53 -0400 | [diff] [blame] | 81 | TRACE_IRQS_OFF |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 82 | call debug_stack_reset |
Steven Rostedt | 5963e31 | 2012-05-30 11:54:53 -0400 | [diff] [blame] | 83 | .endm |
| 84 | |
| 85 | .macro TRACE_IRQS_ON_DEBUG |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 86 | call debug_stack_set_zero |
Steven Rostedt | 5963e31 | 2012-05-30 11:54:53 -0400 | [diff] [blame] | 87 | TRACE_IRQS_ON |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 88 | call debug_stack_reset |
Steven Rostedt | 5963e31 | 2012-05-30 11:54:53 -0400 | [diff] [blame] | 89 | .endm |
| 90 | |
Denys Vlasenko | f2db938 | 2015-02-26 14:40:30 -0800 | [diff] [blame] | 91 | .macro TRACE_IRQS_IRETQ_DEBUG |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 92 | bt $9, EFLAGS(%rsp) /* interrupts off? */ |
| 93 | jnc 1f |
Steven Rostedt | 5963e31 | 2012-05-30 11:54:53 -0400 | [diff] [blame] | 94 | TRACE_IRQS_ON_DEBUG |
| 95 | 1: |
| 96 | .endm |
| 97 | |
| 98 | #else |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 99 | # define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF |
| 100 | # define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON |
| 101 | # define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ |
Steven Rostedt | 5963e31 | 2012-05-30 11:54:53 -0400 | [diff] [blame] | 102 | #endif |
| 103 | |
| 104 | /* |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 105 | * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | * |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 107 | * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 108 | * then loads new ss, cs, and rip from previously programmed MSRs. |
| 109 | * rflags gets masked by a value from another MSR (so CLD and CLAC |
| 110 | * are not needed). SYSCALL does not save anything on the stack |
| 111 | * and does not change rsp. |
| 112 | * |
| 113 | * Registers on entry: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | * rax system call number |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 115 | * rcx return address |
| 116 | * r11 saved rflags (note: r11 is callee-clobbered register in C ABI) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | * rdi arg0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | * rsi arg1 |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 119 | * rdx arg2 |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 120 | * r10 arg3 (needs to be moved to rcx to conform to C ABI) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | * r8 arg4 |
| 122 | * r9 arg5 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 123 | * (note: r12-r15, rbp, rbx are callee-preserved in C ABI) |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 124 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | * Only called from user space. |
| 126 | * |
Ingo Molnar | 7fcb3bc | 2015-03-17 14:42:59 +0100 | [diff] [blame] | 127 | * When user can change pt_regs->foo always force IRET. That is because |
Andi Kleen | 7bf36bb | 2006-04-07 19:50:00 +0200 | [diff] [blame] | 128 | * it deals with uncanonical addresses better. SYSRET has trouble |
| 129 | * with them due to bugs in both AMD and Intel CPUs. |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 130 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | |
Ingo Molnar | b2502b4 | 2015-06-08 08:42:03 +0200 | [diff] [blame] | 132 | ENTRY(entry_SYSCALL_64) |
Denys Vlasenko | 9ed8e7d | 2015-03-19 18:17:47 +0100 | [diff] [blame] | 133 | /* |
| 134 | * Interrupts are off on entry. |
| 135 | * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, |
| 136 | * it is too small to ever cause noticeable irq latency. |
| 137 | */ |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 138 | SWAPGS_UNSAFE_STACK |
| 139 | /* |
| 140 | * A hypervisor implementation might want to use a label |
| 141 | * after the swapgs, so that it can do the swapgs |
| 142 | * for the guest and jump here on syscall. |
| 143 | */ |
Ingo Molnar | b2502b4 | 2015-06-08 08:42:03 +0200 | [diff] [blame] | 144 | GLOBAL(entry_SYSCALL_64_after_swapgs) |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 145 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 146 | movq %rsp, PER_CPU_VAR(rsp_scratch) |
| 147 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp |
Denys Vlasenko | 9ed8e7d | 2015-03-19 18:17:47 +0100 | [diff] [blame] | 148 | |
| 149 | /* Construct struct pt_regs on stack */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 150 | pushq $__USER_DS /* pt_regs->ss */ |
| 151 | pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */ |
Denys Vlasenko | 33db1fd | 2015-03-17 14:52:24 +0100 | [diff] [blame] | 152 | /* |
Denys Vlasenko | 9ed8e7d | 2015-03-19 18:17:47 +0100 | [diff] [blame] | 153 | * Re-enable interrupts. |
| 154 | * We use 'rsp_scratch' as a scratch space, hence irq-off block above |
| 155 | * must execute atomically in the face of possible interrupt-driven |
| 156 | * task preemption. We must enable interrupts only after we're done |
| 157 | * with using rsp_scratch: |
Denys Vlasenko | 33db1fd | 2015-03-17 14:52:24 +0100 | [diff] [blame] | 158 | */ |
| 159 | ENABLE_INTERRUPTS(CLBR_NONE) |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 160 | pushq %r11 /* pt_regs->flags */ |
| 161 | pushq $__USER_CS /* pt_regs->cs */ |
| 162 | pushq %rcx /* pt_regs->ip */ |
| 163 | pushq %rax /* pt_regs->orig_ax */ |
| 164 | pushq %rdi /* pt_regs->di */ |
| 165 | pushq %rsi /* pt_regs->si */ |
| 166 | pushq %rdx /* pt_regs->dx */ |
| 167 | pushq %rcx /* pt_regs->cx */ |
| 168 | pushq $-ENOSYS /* pt_regs->ax */ |
| 169 | pushq %r8 /* pt_regs->r8 */ |
| 170 | pushq %r9 /* pt_regs->r9 */ |
| 171 | pushq %r10 /* pt_regs->r10 */ |
| 172 | pushq %r11 /* pt_regs->r11 */ |
| 173 | sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */ |
Denys Vlasenko | 9ed8e7d | 2015-03-19 18:17:47 +0100 | [diff] [blame] | 174 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 175 | testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) |
| 176 | jnz tracesys |
Ingo Molnar | b2502b4 | 2015-06-08 08:42:03 +0200 | [diff] [blame] | 177 | entry_SYSCALL_64_fastpath: |
H. Peter Anvin | fca460f | 2012-02-19 07:56:26 -0800 | [diff] [blame] | 178 | #if __SYSCALL_MASK == ~0 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 179 | cmpq $__NR_syscall_max, %rax |
H. Peter Anvin | fca460f | 2012-02-19 07:56:26 -0800 | [diff] [blame] | 180 | #else |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 181 | andl $__SYSCALL_MASK, %eax |
| 182 | cmpl $__NR_syscall_max, %eax |
H. Peter Anvin | fca460f | 2012-02-19 07:56:26 -0800 | [diff] [blame] | 183 | #endif |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 184 | ja 1f /* return -ENOSYS (already in pt_regs->ax) */ |
| 185 | movq %r10, %rcx |
| 186 | call *sys_call_table(, %rax, 8) |
| 187 | movq %rax, RAX(%rsp) |
Denys Vlasenko | 146b2b0 | 2015-03-25 18:18:13 +0100 | [diff] [blame] | 188 | 1: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | /* |
Denys Vlasenko | 146b2b0 | 2015-03-25 18:18:13 +0100 | [diff] [blame] | 190 | * Syscall return path ending with SYSRET (fast path). |
| 191 | * Has incompletely filled pt_regs. |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 192 | */ |
Peter Zijlstra | 10cd706 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 193 | LOCKDEP_SYS_EXIT |
Denys Vlasenko | 4416c5a | 2015-03-31 19:00:03 +0200 | [diff] [blame] | 194 | /* |
| 195 | * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, |
| 196 | * it is too small to ever cause noticeable irq latency. |
| 197 | */ |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 198 | DISABLE_INTERRUPTS(CLBR_NONE) |
Andy Lutomirski | b3494a4 | 2015-03-23 12:32:54 -0700 | [diff] [blame] | 199 | |
| 200 | /* |
| 201 | * We must check ti flags with interrupts (or at least preemption) |
| 202 | * off because we must *never* return to userspace without |
| 203 | * processing exit work that is enqueued if we're preempted here. |
| 204 | * In particular, returning to userspace with any of the one-shot |
| 205 | * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is |
| 206 | * very bad. |
| 207 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 208 | testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) |
| 209 | jnz int_ret_from_sys_call_irqs_off /* Go to the slow path */ |
Andy Lutomirski | b3494a4 | 2015-03-23 12:32:54 -0700 | [diff] [blame] | 210 | |
Denys Vlasenko | 29722cd | 2015-03-09 19:39:21 +0100 | [diff] [blame] | 211 | RESTORE_C_REGS_EXCEPT_RCX_R11 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 212 | movq RIP(%rsp), %rcx |
| 213 | movq EFLAGS(%rsp), %r11 |
| 214 | movq RSP(%rsp), %rsp |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 215 | /* |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 216 | * 64-bit SYSRET restores rip from rcx, |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 217 | * rflags from r11 (but RF and VM bits are forced to 0), |
| 218 | * cs and ss are loaded from MSRs. |
Denys Vlasenko | 4416c5a | 2015-03-31 19:00:03 +0200 | [diff] [blame] | 219 | * Restoration of rflags re-enables interrupts. |
Andy Lutomirski | 61f01dd | 2015-04-26 16:47:59 -0700 | [diff] [blame] | 220 | * |
| 221 | * NB: On AMD CPUs with the X86_BUG_SYSRET_SS_ATTRS bug, the ss |
| 222 | * descriptor is not reinitialized. This means that we should |
| 223 | * avoid SYSRET with SS == NULL, which could happen if we schedule, |
| 224 | * exit the kernel, and re-enter using an interrupt vector. (All |
| 225 | * interrupt entries on x86_64 set SS to NULL.) We prevent that |
| 226 | * from happening by reloading SS in __switch_to. (Actually |
| 227 | * detecting the failure in 64-bit userspace is tricky but can be |
| 228 | * done.) |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 229 | */ |
Jeremy Fitzhardinge | 2be2998 | 2008-06-25 00:19:28 -0400 | [diff] [blame] | 230 | USERGS_SYSRET64 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | |
Ingo Molnar | 7fcb3bc | 2015-03-17 14:42:59 +0100 | [diff] [blame] | 232 | /* Do syscall entry tracing */ |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 233 | tracesys: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 234 | movq %rsp, %rdi |
| 235 | movl $AUDIT_ARCH_X86_64, %esi |
| 236 | call syscall_trace_enter_phase1 |
| 237 | test %rax, %rax |
| 238 | jnz tracesys_phase2 /* if needed, run the slow path */ |
| 239 | RESTORE_C_REGS_EXCEPT_RAX /* else restore clobbered regs */ |
| 240 | movq ORIG_RAX(%rsp), %rax |
| 241 | jmp entry_SYSCALL_64_fastpath /* and return to the fast path */ |
Andy Lutomirski | 1dcf74f | 2014-09-05 15:13:56 -0700 | [diff] [blame] | 242 | |
| 243 | tracesys_phase2: |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 244 | SAVE_EXTRA_REGS |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 245 | movq %rsp, %rdi |
| 246 | movl $AUDIT_ARCH_X86_64, %esi |
| 247 | movq %rax, %rdx |
| 248 | call syscall_trace_enter_phase2 |
Andy Lutomirski | 1dcf74f | 2014-09-05 15:13:56 -0700 | [diff] [blame] | 249 | |
Roland McGrath | d4d6715 | 2008-07-09 02:38:07 -0700 | [diff] [blame] | 250 | /* |
Denys Vlasenko | e90e147 | 2015-02-26 14:40:28 -0800 | [diff] [blame] | 251 | * Reload registers from stack in case ptrace changed them. |
Andy Lutomirski | 1dcf74f | 2014-09-05 15:13:56 -0700 | [diff] [blame] | 252 | * We don't reload %rax because syscall_trace_entry_phase2() returned |
Roland McGrath | d4d6715 | 2008-07-09 02:38:07 -0700 | [diff] [blame] | 253 | * the value it wants us to use in the table lookup. |
| 254 | */ |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 255 | RESTORE_C_REGS_EXCEPT_RAX |
| 256 | RESTORE_EXTRA_REGS |
H. Peter Anvin | fca460f | 2012-02-19 07:56:26 -0800 | [diff] [blame] | 257 | #if __SYSCALL_MASK == ~0 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 258 | cmpq $__NR_syscall_max, %rax |
H. Peter Anvin | fca460f | 2012-02-19 07:56:26 -0800 | [diff] [blame] | 259 | #else |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 260 | andl $__SYSCALL_MASK, %eax |
| 261 | cmpl $__NR_syscall_max, %eax |
H. Peter Anvin | fca460f | 2012-02-19 07:56:26 -0800 | [diff] [blame] | 262 | #endif |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 263 | ja 1f /* return -ENOSYS (already in pt_regs->ax) */ |
| 264 | movq %r10, %rcx /* fixup for C */ |
| 265 | call *sys_call_table(, %rax, 8) |
| 266 | movq %rax, RAX(%rsp) |
Denys Vlasenko | a6de5a2 | 2015-03-31 19:00:11 +0200 | [diff] [blame] | 267 | 1: |
Ingo Molnar | 7fcb3bc | 2015-03-17 14:42:59 +0100 | [diff] [blame] | 268 | /* Use IRET because user could have changed pt_regs->foo */ |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 269 | |
| 270 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | * Syscall return path ending with IRET. |
Ingo Molnar | 7fcb3bc | 2015-03-17 14:42:59 +0100 | [diff] [blame] | 272 | * Has correct iret frame. |
Jan Beulich | bcddc01 | 2006-12-07 02:14:02 +0100 | [diff] [blame] | 273 | */ |
Cyrill Gorcunov | bc8b2b9 | 2009-02-23 22:57:01 +0300 | [diff] [blame] | 274 | GLOBAL(int_ret_from_sys_call) |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 275 | DISABLE_INTERRUPTS(CLBR_NONE) |
Denys Vlasenko | 4416c5a | 2015-03-31 19:00:03 +0200 | [diff] [blame] | 276 | int_ret_from_sys_call_irqs_off: /* jumps come here from the irqs-off SYSRET path */ |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 277 | TRACE_IRQS_OFF |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 278 | movl $_TIF_ALLWORK_MASK, %edi |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | /* edi: mask to check */ |
Cyrill Gorcunov | bc8b2b9 | 2009-02-23 22:57:01 +0300 | [diff] [blame] | 280 | GLOBAL(int_with_check) |
Peter Zijlstra | 10cd706 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 281 | LOCKDEP_SYS_EXIT_IRQ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | GET_THREAD_INFO(%rcx) |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 283 | movl TI_flags(%rcx), %edx |
| 284 | andl %edi, %edx |
| 285 | jnz int_careful |
| 286 | andl $~TS_COMPAT, TI_status(%rcx) |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 287 | jmp syscall_return |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 289 | /* |
| 290 | * Either reschedule or signal or syscall exit tracking needed. |
| 291 | * First do a reschedule test. |
| 292 | * edx: work, edi: workmask |
| 293 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | int_careful: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 295 | bt $TIF_NEED_RESCHED, %edx |
| 296 | jnc int_very_careful |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 297 | TRACE_IRQS_ON |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 298 | ENABLE_INTERRUPTS(CLBR_NONE) |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 299 | pushq %rdi |
Frederic Weisbecker | 0430499 | 2012-07-11 20:26:38 +0200 | [diff] [blame] | 300 | SCHEDULE_USER |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 301 | popq %rdi |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 302 | DISABLE_INTERRUPTS(CLBR_NONE) |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 303 | TRACE_IRQS_OFF |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 304 | jmp int_with_check |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | |
Ingo Molnar | 7fcb3bc | 2015-03-17 14:42:59 +0100 | [diff] [blame] | 306 | /* handle signals and tracing -- both require a full pt_regs */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | int_very_careful: |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 308 | TRACE_IRQS_ON |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 309 | ENABLE_INTERRUPTS(CLBR_NONE) |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 310 | SAVE_EXTRA_REGS |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 311 | /* Check for syscall exit trace */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 312 | testl $_TIF_WORK_SYSCALL_EXIT, %edx |
| 313 | jz int_signal |
| 314 | pushq %rdi |
| 315 | leaq 8(%rsp), %rdi /* &ptregs -> arg1 */ |
| 316 | call syscall_trace_leave |
| 317 | popq %rdi |
| 318 | andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU), %edi |
| 319 | jmp int_restore_rest |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 320 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | int_signal: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 322 | testl $_TIF_DO_NOTIFY_MASK, %edx |
| 323 | jz 1f |
| 324 | movq %rsp, %rdi /* &ptregs -> arg1 */ |
| 325 | xorl %esi, %esi /* oldset -> arg2 */ |
| 326 | call do_notify_resume |
| 327 | 1: movl $_TIF_WORK_MASK, %edi |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | int_restore_rest: |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 329 | RESTORE_EXTRA_REGS |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 330 | DISABLE_INTERRUPTS(CLBR_NONE) |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 331 | TRACE_IRQS_OFF |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 332 | jmp int_with_check |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 333 | |
| 334 | syscall_return: |
| 335 | /* The IRETQ could re-enable interrupts: */ |
| 336 | DISABLE_INTERRUPTS(CLBR_ANY) |
| 337 | TRACE_IRQS_IRETQ |
| 338 | |
| 339 | /* |
| 340 | * Try to use SYSRET instead of IRET if we're returning to |
| 341 | * a completely clean 64-bit userspace context. |
| 342 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 343 | movq RCX(%rsp), %rcx |
| 344 | movq RIP(%rsp), %r11 |
| 345 | cmpq %rcx, %r11 /* RCX == RIP */ |
| 346 | jne opportunistic_sysret_failed |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 347 | |
| 348 | /* |
| 349 | * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP |
| 350 | * in kernel space. This essentially lets the user take over |
Denys Vlasenko | 17be0ae | 2015-04-21 18:27:29 +0200 | [diff] [blame] | 351 | * the kernel, since userspace controls RSP. |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 352 | * |
Denys Vlasenko | 17be0ae | 2015-04-21 18:27:29 +0200 | [diff] [blame] | 353 | * If width of "canonical tail" ever becomes variable, this will need |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 354 | * to be updated to remain correct on both old and new CPUs. |
| 355 | */ |
| 356 | .ifne __VIRTUAL_MASK_SHIFT - 47 |
| 357 | .error "virtual address width changed -- SYSRET checks need update" |
| 358 | .endif |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 359 | |
Denys Vlasenko | 17be0ae | 2015-04-21 18:27:29 +0200 | [diff] [blame] | 360 | /* Change top 16 bits to be the sign-extension of 47th bit */ |
| 361 | shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx |
| 362 | sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 363 | |
Denys Vlasenko | 17be0ae | 2015-04-21 18:27:29 +0200 | [diff] [blame] | 364 | /* If this changed %rcx, it was not canonical */ |
| 365 | cmpq %rcx, %r11 |
| 366 | jne opportunistic_sysret_failed |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 367 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 368 | cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */ |
| 369 | jne opportunistic_sysret_failed |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 370 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 371 | movq R11(%rsp), %r11 |
| 372 | cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */ |
| 373 | jne opportunistic_sysret_failed |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 374 | |
| 375 | /* |
| 376 | * SYSRET can't restore RF. SYSRET can restore TF, but unlike IRET, |
| 377 | * restoring TF results in a trap from userspace immediately after |
| 378 | * SYSRET. This would cause an infinite loop whenever #DB happens |
| 379 | * with register state that satisfies the opportunistic SYSRET |
| 380 | * conditions. For example, single-stepping this user code: |
| 381 | * |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 382 | * movq $stuck_here, %rcx |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 383 | * pushfq |
| 384 | * popq %r11 |
| 385 | * stuck_here: |
| 386 | * |
| 387 | * would never get past 'stuck_here'. |
| 388 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 389 | testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 |
| 390 | jnz opportunistic_sysret_failed |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 391 | |
| 392 | /* nothing to check for RSP */ |
| 393 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 394 | cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */ |
| 395 | jne opportunistic_sysret_failed |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 396 | |
| 397 | /* |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 398 | * We win! This label is here just for ease of understanding |
| 399 | * perf profiles. Nothing jumps here. |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 400 | */ |
| 401 | syscall_return_via_sysret: |
Denys Vlasenko | 17be0ae | 2015-04-21 18:27:29 +0200 | [diff] [blame] | 402 | /* rcx and r11 are already restored (see code above) */ |
| 403 | RESTORE_C_REGS_EXCEPT_RCX_R11 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 404 | movq RSP(%rsp), %rsp |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 405 | USERGS_SYSRET64 |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 406 | |
| 407 | opportunistic_sysret_failed: |
| 408 | SWAPGS |
| 409 | jmp restore_c_regs_and_iret |
Ingo Molnar | b2502b4 | 2015-06-08 08:42:03 +0200 | [diff] [blame] | 410 | END(entry_SYSCALL_64) |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 411 | |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 412 | |
Al Viro | 1d4b4b2 | 2012-10-22 22:34:11 -0400 | [diff] [blame] | 413 | .macro FORK_LIKE func |
| 414 | ENTRY(stub_\func) |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 415 | SAVE_EXTRA_REGS 8 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 416 | jmp sys_\func |
Al Viro | 1d4b4b2 | 2012-10-22 22:34:11 -0400 | [diff] [blame] | 417 | END(stub_\func) |
| 418 | .endm |
| 419 | |
| 420 | FORK_LIKE clone |
| 421 | FORK_LIKE fork |
| 422 | FORK_LIKE vfork |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | ENTRY(stub_execve) |
Denys Vlasenko | fc3e958 | 2015-04-04 20:55:19 +0200 | [diff] [blame] | 425 | call sys_execve |
| 426 | return_from_execve: |
| 427 | testl %eax, %eax |
| 428 | jz 1f |
| 429 | /* exec failed, can use fast SYSRET code path in this case */ |
| 430 | ret |
| 431 | 1: |
| 432 | /* must use IRET code path (pt_regs->cs may have changed) */ |
| 433 | addq $8, %rsp |
| 434 | ZERO_EXTRA_REGS |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 435 | movq %rax, RAX(%rsp) |
Denys Vlasenko | fc3e958 | 2015-04-04 20:55:19 +0200 | [diff] [blame] | 436 | jmp int_ret_from_sys_call |
Jan Beulich | 4b787e0 | 2006-06-26 13:56:55 +0200 | [diff] [blame] | 437 | END(stub_execve) |
Denys Vlasenko | a37f34a | 2015-04-07 22:43:44 +0200 | [diff] [blame] | 438 | /* |
| 439 | * Remaining execve stubs are only 7 bytes long. |
| 440 | * ENTRY() often aligns to 16 bytes, which in this case has no benefits. |
| 441 | */ |
| 442 | .align 8 |
| 443 | GLOBAL(stub_execveat) |
Denys Vlasenko | fc3e958 | 2015-04-04 20:55:19 +0200 | [diff] [blame] | 444 | call sys_execveat |
| 445 | jmp return_from_execve |
David Drysdale | 27d6ec7 | 2014-12-12 16:57:33 -0800 | [diff] [blame] | 446 | END(stub_execveat) |
| 447 | |
Denys Vlasenko | ac7f5df | 2015-04-21 18:03:13 +0200 | [diff] [blame] | 448 | #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION) |
Denys Vlasenko | a37f34a | 2015-04-07 22:43:44 +0200 | [diff] [blame] | 449 | .align 8 |
| 450 | GLOBAL(stub_x32_execve) |
Denys Vlasenko | a37f34a | 2015-04-07 22:43:44 +0200 | [diff] [blame] | 451 | GLOBAL(stub32_execve) |
Denys Vlasenko | 0f90fb9 | 2015-04-07 22:43:39 +0200 | [diff] [blame] | 452 | call compat_sys_execve |
| 453 | jmp return_from_execve |
Denys Vlasenko | 0f90fb9 | 2015-04-07 22:43:39 +0200 | [diff] [blame] | 454 | END(stub32_execve) |
Denys Vlasenko | ac7f5df | 2015-04-21 18:03:13 +0200 | [diff] [blame] | 455 | END(stub_x32_execve) |
Denys Vlasenko | a37f34a | 2015-04-07 22:43:44 +0200 | [diff] [blame] | 456 | .align 8 |
Denys Vlasenko | ac7f5df | 2015-04-21 18:03:13 +0200 | [diff] [blame] | 457 | GLOBAL(stub_x32_execveat) |
Denys Vlasenko | a37f34a | 2015-04-07 22:43:44 +0200 | [diff] [blame] | 458 | GLOBAL(stub32_execveat) |
Denys Vlasenko | 0f90fb9 | 2015-04-07 22:43:39 +0200 | [diff] [blame] | 459 | call compat_sys_execveat |
| 460 | jmp return_from_execve |
Denys Vlasenko | 0f90fb9 | 2015-04-07 22:43:39 +0200 | [diff] [blame] | 461 | END(stub32_execveat) |
Denys Vlasenko | ac7f5df | 2015-04-21 18:03:13 +0200 | [diff] [blame] | 462 | END(stub_x32_execveat) |
Denys Vlasenko | 0f90fb9 | 2015-04-07 22:43:39 +0200 | [diff] [blame] | 463 | #endif |
| 464 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | /* |
| 466 | * sigreturn is special because it needs to restore all registers on return. |
| 467 | * This cannot be done with SYSRET, so use the IRET return path instead. |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 468 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 469 | ENTRY(stub_rt_sigreturn) |
Denys Vlasenko | 31f0119 | 2015-04-07 22:43:37 +0200 | [diff] [blame] | 470 | /* |
| 471 | * SAVE_EXTRA_REGS result is not normally needed: |
| 472 | * sigreturn overwrites all pt_regs->GPREGS. |
| 473 | * But sigreturn can fail (!), and there is no easy way to detect that. |
| 474 | * To make sure RESTORE_EXTRA_REGS doesn't restore garbage on error, |
| 475 | * we SAVE_EXTRA_REGS here. |
| 476 | */ |
| 477 | SAVE_EXTRA_REGS 8 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 478 | call sys_rt_sigreturn |
Denys Vlasenko | 31f0119 | 2015-04-07 22:43:37 +0200 | [diff] [blame] | 479 | return_from_stub: |
| 480 | addq $8, %rsp |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 481 | RESTORE_EXTRA_REGS |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 482 | movq %rax, RAX(%rsp) |
| 483 | jmp int_ret_from_sys_call |
Jan Beulich | 4b787e0 | 2006-06-26 13:56:55 +0200 | [diff] [blame] | 484 | END(stub_rt_sigreturn) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 485 | |
H. Peter Anvin | c5a3739 | 2012-02-19 09:41:09 -0800 | [diff] [blame] | 486 | #ifdef CONFIG_X86_X32_ABI |
H. Peter Anvin | c5a3739 | 2012-02-19 09:41:09 -0800 | [diff] [blame] | 487 | ENTRY(stub_x32_rt_sigreturn) |
Denys Vlasenko | 31f0119 | 2015-04-07 22:43:37 +0200 | [diff] [blame] | 488 | SAVE_EXTRA_REGS 8 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 489 | call sys32_x32_rt_sigreturn |
| 490 | jmp return_from_stub |
H. Peter Anvin | c5a3739 | 2012-02-19 09:41:09 -0800 | [diff] [blame] | 491 | END(stub_x32_rt_sigreturn) |
H. Peter Anvin | c5a3739 | 2012-02-19 09:41:09 -0800 | [diff] [blame] | 492 | #endif |
| 493 | |
Jan Beulich | 7effaa8 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 494 | /* |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 495 | * A newly forked process directly context switches into this address. |
| 496 | * |
| 497 | * rdi: prev task we switched from |
| 498 | */ |
| 499 | ENTRY(ret_from_fork) |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 500 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 501 | LOCK ; btr $TIF_FORK, TI_flags(%r8) |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 502 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 503 | pushq $0x0002 |
| 504 | popfq /* reset kernel eflags */ |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 505 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 506 | call schedule_tail /* rdi: 'prev' task parameter */ |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 507 | |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 508 | RESTORE_EXTRA_REGS |
| 509 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 510 | testb $3, CS(%rsp) /* from kernel_thread? */ |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 511 | |
Andy Lutomirski | 1e3fbb8 | 2015-02-26 14:40:39 -0800 | [diff] [blame] | 512 | /* |
| 513 | * By the time we get here, we have no idea whether our pt_regs, |
| 514 | * ti flags, and ti status came from the 64-bit SYSCALL fast path, |
Ingo Molnar | 138bd56 | 2015-06-05 14:11:49 +0200 | [diff] [blame] | 515 | * the slow path, or one of the 32-bit compat paths. |
Denys Vlasenko | 66ad4ef | 2015-04-07 22:43:42 +0200 | [diff] [blame] | 516 | * Use IRET code path to return, since it can safely handle |
Andy Lutomirski | 1e3fbb8 | 2015-02-26 14:40:39 -0800 | [diff] [blame] | 517 | * all of the above. |
| 518 | */ |
Denys Vlasenko | 66ad4ef | 2015-04-07 22:43:42 +0200 | [diff] [blame] | 519 | jnz int_ret_from_sys_call |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 520 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 521 | /* |
| 522 | * We came from kernel_thread |
| 523 | * nb: we depend on RESTORE_EXTRA_REGS above |
| 524 | */ |
| 525 | movq %rbp, %rdi |
| 526 | call *%rbx |
| 527 | movl $0, RAX(%rsp) |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 528 | RESTORE_EXTRA_REGS |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 529 | jmp int_ret_from_sys_call |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 530 | END(ret_from_fork) |
| 531 | |
| 532 | /* |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 533 | * Build the entry stubs with some assembler magic. |
| 534 | * We pack 1 stub into every 8-byte block. |
H. Peter Anvin | 939b787 | 2008-11-11 13:51:52 -0800 | [diff] [blame] | 535 | */ |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 536 | .align 8 |
H. Peter Anvin | 939b787 | 2008-11-11 13:51:52 -0800 | [diff] [blame] | 537 | ENTRY(irq_entries_start) |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 538 | vector=FIRST_EXTERNAL_VECTOR |
| 539 | .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 540 | pushq $(~vector+0x80) /* Note: always in signed byte range */ |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 541 | vector=vector+1 |
| 542 | jmp common_interrupt |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 543 | .align 8 |
| 544 | .endr |
H. Peter Anvin | 939b787 | 2008-11-11 13:51:52 -0800 | [diff] [blame] | 545 | END(irq_entries_start) |
| 546 | |
Alexander van Heukelum | d99015b | 2008-11-19 01:18:11 +0100 | [diff] [blame] | 547 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 548 | * Interrupt entry/exit. |
| 549 | * |
| 550 | * Interrupt entry points save only callee clobbered registers in fast path. |
Alexander van Heukelum | d99015b | 2008-11-19 01:18:11 +0100 | [diff] [blame] | 551 | * |
| 552 | * Entry runs with interrupts off. |
| 553 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 554 | |
Alexander van Heukelum | 722024d | 2008-11-13 13:50:20 +0100 | [diff] [blame] | 555 | /* 0(%rsp): ~(interrupt number) */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 556 | .macro interrupt func |
Denys Vlasenko | f6f6468 | 2015-01-08 17:25:15 +0100 | [diff] [blame] | 557 | cld |
Denys Vlasenko | e90e147 | 2015-02-26 14:40:28 -0800 | [diff] [blame] | 558 | /* |
| 559 | * Since nothing in interrupt handling code touches r12...r15 members |
| 560 | * of "struct pt_regs", and since interrupts can nest, we can save |
| 561 | * four stack slots and simultaneously provide |
| 562 | * an unwind-friendly stack layout by saving "truncated" pt_regs |
| 563 | * exactly up to rbp slot, without these members. |
| 564 | */ |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 565 | ALLOC_PT_GPREGS_ON_STACK -RBP |
| 566 | SAVE_C_REGS -RBP |
| 567 | /* this goes to 0(%rsp) for unwinder, not for saving the value: */ |
| 568 | SAVE_EXTRA_REGS_RBP -RBP |
Denys Vlasenko | f6f6468 | 2015-01-08 17:25:15 +0100 | [diff] [blame] | 569 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 570 | leaq -RBP(%rsp), %rdi /* arg1 for \func (pointer to pt_regs) */ |
Denys Vlasenko | f6f6468 | 2015-01-08 17:25:15 +0100 | [diff] [blame] | 571 | |
Denys Vlasenko | 03335e9 | 2015-04-27 15:21:52 +0200 | [diff] [blame] | 572 | testb $3, CS-RBP(%rsp) |
Denys Vlasenko | dde74f2 | 2015-04-27 15:21:51 +0200 | [diff] [blame] | 573 | jz 1f |
Denys Vlasenko | f6f6468 | 2015-01-08 17:25:15 +0100 | [diff] [blame] | 574 | SWAPGS |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 575 | 1: |
Denys Vlasenko | f6f6468 | 2015-01-08 17:25:15 +0100 | [diff] [blame] | 576 | /* |
Denys Vlasenko | e90e147 | 2015-02-26 14:40:28 -0800 | [diff] [blame] | 577 | * Save previous stack pointer, optionally switch to interrupt stack. |
Denys Vlasenko | f6f6468 | 2015-01-08 17:25:15 +0100 | [diff] [blame] | 578 | * irq_count is used to check if a CPU is already on an interrupt stack |
| 579 | * or not. While this is essentially redundant with preempt_count it is |
| 580 | * a little cheaper to use a separate counter in the PDA (short of |
| 581 | * moving irq_enter into assembly, which would be too much work) |
| 582 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 583 | movq %rsp, %rsi |
| 584 | incl PER_CPU_VAR(irq_count) |
| 585 | cmovzq PER_CPU_VAR(irq_stack_ptr), %rsp |
| 586 | pushq %rsi |
Denys Vlasenko | f6f6468 | 2015-01-08 17:25:15 +0100 | [diff] [blame] | 587 | /* We entered an interrupt context - irqs are off: */ |
| 588 | TRACE_IRQS_OFF |
| 589 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 590 | call \func |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 591 | .endm |
| 592 | |
Alexander van Heukelum | 722024d | 2008-11-13 13:50:20 +0100 | [diff] [blame] | 593 | /* |
| 594 | * The interrupt stubs push (~vector+0x80) onto the stack and |
| 595 | * then jump to common_interrupt. |
| 596 | */ |
H. Peter Anvin | 939b787 | 2008-11-11 13:51:52 -0800 | [diff] [blame] | 597 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
| 598 | common_interrupt: |
Jan Beulich | ee4eb87 | 2012-11-02 11:18:39 +0000 | [diff] [blame] | 599 | ASM_CLAC |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 600 | addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 601 | interrupt do_IRQ |
Denys Vlasenko | 34061f1 | 2015-03-23 14:03:59 +0100 | [diff] [blame] | 602 | /* 0(%rsp): old RSP */ |
Jan Beulich | 7effaa8 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 603 | ret_from_intr: |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 604 | DISABLE_INTERRUPTS(CLBR_NONE) |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 605 | TRACE_IRQS_OFF |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 606 | decl PER_CPU_VAR(irq_count) |
Frederic Weisbecker | 625dbc3 | 2011-01-06 15:22:47 +0100 | [diff] [blame] | 607 | |
Frederic Weisbecker | a2bbe75 | 2011-07-02 16:52:45 +0200 | [diff] [blame] | 608 | /* Restore saved previous stack */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 609 | popq %rsi |
Denys Vlasenko | e90e147 | 2015-02-26 14:40:28 -0800 | [diff] [blame] | 610 | /* return code expects complete pt_regs - adjust rsp accordingly: */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 611 | leaq -RBP(%rsi), %rsp |
Frederic Weisbecker | 625dbc3 | 2011-01-06 15:22:47 +0100 | [diff] [blame] | 612 | |
Denys Vlasenko | 03335e9 | 2015-04-27 15:21:52 +0200 | [diff] [blame] | 613 | testb $3, CS(%rsp) |
Denys Vlasenko | dde74f2 | 2015-04-27 15:21:51 +0200 | [diff] [blame] | 614 | jz retint_kernel |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 615 | /* Interrupt came from user space */ |
Jan Beulich | 2f63b9d | 2015-06-01 13:03:59 +0100 | [diff] [blame] | 616 | retint_user: |
Denys Vlasenko | a3675b3 | 2015-03-30 20:09:34 +0200 | [diff] [blame] | 617 | GET_THREAD_INFO(%rcx) |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 618 | |
| 619 | /* %rcx: thread info. Interrupts are off. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 620 | retint_with_reschedule: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 621 | movl $_TIF_WORK_MASK, %edi |
Jan Beulich | 7effaa8 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 622 | retint_check: |
Peter Zijlstra | 10cd706 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 623 | LOCKDEP_SYS_EXIT_IRQ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 624 | movl TI_flags(%rcx), %edx |
| 625 | andl %edi, %edx |
| 626 | jnz retint_careful |
Peter Zijlstra | 10cd706 | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 627 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 628 | retint_swapgs: /* return to user-space */ |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 629 | /* |
| 630 | * The iretq could re-enable interrupts: |
| 631 | */ |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 632 | DISABLE_INTERRUPTS(CLBR_ANY) |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 633 | TRACE_IRQS_IRETQ |
Andy Lutomirski | 2a23c6b | 2014-07-22 12:46:50 -0700 | [diff] [blame] | 634 | |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 635 | SWAPGS |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 636 | jmp restore_c_regs_and_iret |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 637 | |
Denys Vlasenko | 627276c | 2015-03-30 20:09:31 +0200 | [diff] [blame] | 638 | /* Returning to kernel space */ |
Denys Vlasenko | 6ba71b7 | 2015-03-31 19:00:05 +0200 | [diff] [blame] | 639 | retint_kernel: |
Denys Vlasenko | 627276c | 2015-03-30 20:09:31 +0200 | [diff] [blame] | 640 | #ifdef CONFIG_PREEMPT |
| 641 | /* Interrupts are off */ |
| 642 | /* Check if we need preemption */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 643 | bt $9, EFLAGS(%rsp) /* were interrupts off? */ |
Denys Vlasenko | 6ba71b7 | 2015-03-31 19:00:05 +0200 | [diff] [blame] | 644 | jnc 1f |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 645 | 0: cmpl $0, PER_CPU_VAR(__preempt_count) |
Denys Vlasenko | 36acef2 | 2015-03-31 19:00:07 +0200 | [diff] [blame] | 646 | jnz 1f |
Denys Vlasenko | 627276c | 2015-03-30 20:09:31 +0200 | [diff] [blame] | 647 | call preempt_schedule_irq |
Denys Vlasenko | 36acef2 | 2015-03-31 19:00:07 +0200 | [diff] [blame] | 648 | jmp 0b |
Denys Vlasenko | 6ba71b7 | 2015-03-31 19:00:05 +0200 | [diff] [blame] | 649 | 1: |
Denys Vlasenko | 627276c | 2015-03-30 20:09:31 +0200 | [diff] [blame] | 650 | #endif |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 651 | /* |
| 652 | * The iretq could re-enable interrupts: |
| 653 | */ |
| 654 | TRACE_IRQS_IRETQ |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 655 | |
| 656 | /* |
| 657 | * At this label, code paths which return to kernel and to user, |
| 658 | * which come from interrupts/exception and from syscalls, merge. |
| 659 | */ |
| 660 | restore_c_regs_and_iret: |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 661 | RESTORE_C_REGS |
| 662 | REMOVE_PT_GPREGS_FROM_STACK 8 |
Andy Lutomirski | 7209a75 | 2014-07-23 08:34:11 -0700 | [diff] [blame] | 663 | INTERRUPT_RETURN |
| 664 | |
| 665 | ENTRY(native_iret) |
H. Peter Anvin | 3891a04 | 2014-04-29 16:46:09 -0700 | [diff] [blame] | 666 | /* |
| 667 | * Are we returning to a stack segment from the LDT? Note: in |
| 668 | * 64-bit mode SS:RSP on the exception stack is always valid. |
| 669 | */ |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 670 | #ifdef CONFIG_X86_ESPFIX64 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 671 | testb $4, (SS-RIP)(%rsp) |
| 672 | jnz native_irq_return_ldt |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 673 | #endif |
H. Peter Anvin | 3891a04 | 2014-04-29 16:46:09 -0700 | [diff] [blame] | 674 | |
Andy Lutomirski | af726f2 | 2014-11-22 18:00:31 -0800 | [diff] [blame] | 675 | .global native_irq_return_iret |
Andy Lutomirski | 7209a75 | 2014-07-23 08:34:11 -0700 | [diff] [blame] | 676 | native_irq_return_iret: |
Andy Lutomirski | b645af2 | 2014-11-22 18:00:33 -0800 | [diff] [blame] | 677 | /* |
| 678 | * This may fault. Non-paranoid faults on return to userspace are |
| 679 | * handled by fixup_bad_iret. These include #SS, #GP, and #NP. |
| 680 | * Double-faults due to espfix64 are handled in do_double_fault. |
| 681 | * Other faults here are fatal. |
| 682 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 683 | iretq |
Ingo Molnar | 3701d863 | 2008-02-09 23:24:08 +0100 | [diff] [blame] | 684 | |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 685 | #ifdef CONFIG_X86_ESPFIX64 |
Andy Lutomirski | 7209a75 | 2014-07-23 08:34:11 -0700 | [diff] [blame] | 686 | native_irq_return_ldt: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 687 | pushq %rax |
| 688 | pushq %rdi |
H. Peter Anvin | 3891a04 | 2014-04-29 16:46:09 -0700 | [diff] [blame] | 689 | SWAPGS |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 690 | movq PER_CPU_VAR(espfix_waddr), %rdi |
| 691 | movq %rax, (0*8)(%rdi) /* RAX */ |
| 692 | movq (2*8)(%rsp), %rax /* RIP */ |
| 693 | movq %rax, (1*8)(%rdi) |
| 694 | movq (3*8)(%rsp), %rax /* CS */ |
| 695 | movq %rax, (2*8)(%rdi) |
| 696 | movq (4*8)(%rsp), %rax /* RFLAGS */ |
| 697 | movq %rax, (3*8)(%rdi) |
| 698 | movq (6*8)(%rsp), %rax /* SS */ |
| 699 | movq %rax, (5*8)(%rdi) |
| 700 | movq (5*8)(%rsp), %rax /* RSP */ |
| 701 | movq %rax, (4*8)(%rdi) |
| 702 | andl $0xffff0000, %eax |
| 703 | popq %rdi |
| 704 | orq PER_CPU_VAR(espfix_stack), %rax |
H. Peter Anvin | 3891a04 | 2014-04-29 16:46:09 -0700 | [diff] [blame] | 705 | SWAPGS |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 706 | movq %rax, %rsp |
| 707 | popq %rax |
| 708 | jmp native_irq_return_iret |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 709 | #endif |
H. Peter Anvin | 3891a04 | 2014-04-29 16:46:09 -0700 | [diff] [blame] | 710 | |
Jan Beulich | 7effaa8 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 711 | /* edi: workmask, edx: work */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 712 | retint_careful: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 713 | bt $TIF_NEED_RESCHED, %edx |
| 714 | jnc retint_signal |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 715 | TRACE_IRQS_ON |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 716 | ENABLE_INTERRUPTS(CLBR_NONE) |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 717 | pushq %rdi |
Frederic Weisbecker | 0430499 | 2012-07-11 20:26:38 +0200 | [diff] [blame] | 718 | SCHEDULE_USER |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 719 | popq %rdi |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 720 | GET_THREAD_INFO(%rcx) |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 721 | DISABLE_INTERRUPTS(CLBR_NONE) |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 722 | TRACE_IRQS_OFF |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 723 | jmp retint_check |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 724 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 725 | retint_signal: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 726 | testl $_TIF_DO_NOTIFY_MASK, %edx |
| 727 | jz retint_swapgs |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 728 | TRACE_IRQS_ON |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 729 | ENABLE_INTERRUPTS(CLBR_NONE) |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 730 | SAVE_EXTRA_REGS |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 731 | movq $-1, ORIG_RAX(%rsp) |
| 732 | xorl %esi, %esi /* oldset */ |
| 733 | movq %rsp, %rdi /* &pt_regs */ |
| 734 | call do_notify_resume |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 735 | RESTORE_EXTRA_REGS |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 736 | DISABLE_INTERRUPTS(CLBR_NONE) |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 737 | TRACE_IRQS_OFF |
Andi Kleen | be9e687 | 2005-05-01 08:58:51 -0700 | [diff] [blame] | 738 | GET_THREAD_INFO(%rcx) |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 739 | jmp retint_with_reschedule |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 740 | |
Jan Beulich | 4b787e0 | 2006-06-26 13:56:55 +0200 | [diff] [blame] | 741 | END(common_interrupt) |
H. Peter Anvin | 3891a04 | 2014-04-29 16:46:09 -0700 | [diff] [blame] | 742 | |
Masami Hiramatsu | 8222d71 | 2009-08-27 13:23:25 -0400 | [diff] [blame] | 743 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 744 | * APIC interrupts. |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 745 | */ |
Seiji Aguchi | cf910e8 | 2013-06-20 11:46:53 -0400 | [diff] [blame] | 746 | .macro apicinterrupt3 num sym do_sym |
Alexander van Heukelum | 322648d | 2008-11-23 10:08:28 +0100 | [diff] [blame] | 747 | ENTRY(\sym) |
Jan Beulich | ee4eb87 | 2012-11-02 11:18:39 +0000 | [diff] [blame] | 748 | ASM_CLAC |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 749 | pushq $~(\num) |
Jan Beulich | 39e9543 | 2011-11-29 11:03:46 +0000 | [diff] [blame] | 750 | .Lcommon_\sym: |
Alexander van Heukelum | 322648d | 2008-11-23 10:08:28 +0100 | [diff] [blame] | 751 | interrupt \do_sym |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 752 | jmp ret_from_intr |
Alexander van Heukelum | 322648d | 2008-11-23 10:08:28 +0100 | [diff] [blame] | 753 | END(\sym) |
| 754 | .endm |
Jacob Shin | 89b831e | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 755 | |
Seiji Aguchi | cf910e8 | 2013-06-20 11:46:53 -0400 | [diff] [blame] | 756 | #ifdef CONFIG_TRACING |
| 757 | #define trace(sym) trace_##sym |
| 758 | #define smp_trace(sym) smp_trace_##sym |
| 759 | |
| 760 | .macro trace_apicinterrupt num sym |
| 761 | apicinterrupt3 \num trace(\sym) smp_trace(\sym) |
| 762 | .endm |
| 763 | #else |
| 764 | .macro trace_apicinterrupt num sym do_sym |
| 765 | .endm |
| 766 | #endif |
| 767 | |
| 768 | .macro apicinterrupt num sym do_sym |
| 769 | apicinterrupt3 \num \sym \do_sym |
| 770 | trace_apicinterrupt \num \sym |
| 771 | .endm |
| 772 | |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 773 | #ifdef CONFIG_SMP |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 774 | apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt |
| 775 | apicinterrupt3 REBOOT_VECTOR reboot_interrupt smp_reboot_interrupt |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 776 | #endif |
| 777 | |
Nick Piggin | 03b4863 | 2009-01-20 04:36:04 +0100 | [diff] [blame] | 778 | #ifdef CONFIG_X86_UV |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 779 | apicinterrupt3 UV_BAU_MESSAGE uv_bau_message_intr1 uv_bau_message_interrupt |
Nick Piggin | 03b4863 | 2009-01-20 04:36:04 +0100 | [diff] [blame] | 780 | #endif |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 781 | |
| 782 | apicinterrupt LOCAL_TIMER_VECTOR apic_timer_interrupt smp_apic_timer_interrupt |
| 783 | apicinterrupt X86_PLATFORM_IPI_VECTOR x86_platform_ipi smp_x86_platform_ipi |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 784 | |
Yang Zhang | d78f266 | 2013-04-11 19:25:11 +0800 | [diff] [blame] | 785 | #ifdef CONFIG_HAVE_KVM |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 786 | apicinterrupt3 POSTED_INTR_VECTOR kvm_posted_intr_ipi smp_kvm_posted_intr_ipi |
| 787 | apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR kvm_posted_intr_wakeup_ipi smp_kvm_posted_intr_wakeup_ipi |
Yang Zhang | d78f266 | 2013-04-11 19:25:11 +0800 | [diff] [blame] | 788 | #endif |
| 789 | |
Seiji Aguchi | 33e5ff6 | 2013-06-22 07:33:30 -0400 | [diff] [blame] | 790 | #ifdef CONFIG_X86_MCE_THRESHOLD |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 791 | apicinterrupt THRESHOLD_APIC_VECTOR threshold_interrupt smp_threshold_interrupt |
Seiji Aguchi | 33e5ff6 | 2013-06-22 07:33:30 -0400 | [diff] [blame] | 792 | #endif |
| 793 | |
Aravind Gopalakrishnan | 24fd78a | 2015-05-06 06:58:56 -0500 | [diff] [blame] | 794 | #ifdef CONFIG_X86_MCE_AMD |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 795 | apicinterrupt DEFERRED_ERROR_VECTOR deferred_error_interrupt smp_deferred_error_interrupt |
Aravind Gopalakrishnan | 24fd78a | 2015-05-06 06:58:56 -0500 | [diff] [blame] | 796 | #endif |
| 797 | |
Seiji Aguchi | 33e5ff6 | 2013-06-22 07:33:30 -0400 | [diff] [blame] | 798 | #ifdef CONFIG_X86_THERMAL_VECTOR |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 799 | apicinterrupt THERMAL_APIC_VECTOR thermal_interrupt smp_thermal_interrupt |
Seiji Aguchi | 33e5ff6 | 2013-06-22 07:33:30 -0400 | [diff] [blame] | 800 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 801 | |
Alexander van Heukelum | 322648d | 2008-11-23 10:08:28 +0100 | [diff] [blame] | 802 | #ifdef CONFIG_SMP |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 803 | apicinterrupt CALL_FUNCTION_SINGLE_VECTOR call_function_single_interrupt smp_call_function_single_interrupt |
| 804 | apicinterrupt CALL_FUNCTION_VECTOR call_function_interrupt smp_call_function_interrupt |
| 805 | apicinterrupt RESCHEDULE_VECTOR reschedule_interrupt smp_reschedule_interrupt |
Alexander van Heukelum | 322648d | 2008-11-23 10:08:28 +0100 | [diff] [blame] | 806 | #endif |
| 807 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 808 | apicinterrupt ERROR_APIC_VECTOR error_interrupt smp_error_interrupt |
| 809 | apicinterrupt SPURIOUS_APIC_VECTOR spurious_interrupt smp_spurious_interrupt |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 810 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 811 | #ifdef CONFIG_IRQ_WORK |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 812 | apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 813 | #endif |
| 814 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 815 | /* |
| 816 | * Exception entry points. |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 817 | */ |
Andy Lutomirski | 9b47668 | 2015-03-05 19:19:07 -0800 | [diff] [blame] | 818 | #define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8) |
Andy Lutomirski | 577ed45 | 2014-05-21 15:07:09 -0700 | [diff] [blame] | 819 | |
| 820 | .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 |
Alexander van Heukelum | 322648d | 2008-11-23 10:08:28 +0100 | [diff] [blame] | 821 | ENTRY(\sym) |
Andy Lutomirski | 577ed45 | 2014-05-21 15:07:09 -0700 | [diff] [blame] | 822 | /* Sanity check */ |
| 823 | .if \shift_ist != -1 && \paranoid == 0 |
| 824 | .error "using shift_ist requires paranoid=1" |
| 825 | .endif |
| 826 | |
Jan Beulich | ee4eb87 | 2012-11-02 11:18:39 +0000 | [diff] [blame] | 827 | ASM_CLAC |
Alexander van Heukelum | b8b1d08 | 2008-11-21 16:44:28 +0100 | [diff] [blame] | 828 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 829 | |
| 830 | .ifeq \has_error_code |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 831 | pushq $-1 /* ORIG_RAX: no syscall to restart */ |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 832 | .endif |
| 833 | |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 834 | ALLOC_PT_GPREGS_ON_STACK |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 835 | |
| 836 | .if \paranoid |
Andy Lutomirski | 48e08d0 | 2014-11-11 12:49:41 -0800 | [diff] [blame] | 837 | .if \paranoid == 1 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 838 | testb $3, CS(%rsp) /* If coming from userspace, switch stacks */ |
| 839 | jnz 1f |
Andy Lutomirski | 48e08d0 | 2014-11-11 12:49:41 -0800 | [diff] [blame] | 840 | .endif |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 841 | call paranoid_entry |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 842 | .else |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 843 | call error_entry |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 844 | .endif |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 845 | /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */ |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 846 | |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 847 | .if \paranoid |
Andy Lutomirski | 577ed45 | 2014-05-21 15:07:09 -0700 | [diff] [blame] | 848 | .if \shift_ist != -1 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 849 | TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */ |
Andy Lutomirski | 577ed45 | 2014-05-21 15:07:09 -0700 | [diff] [blame] | 850 | .else |
Alexander van Heukelum | b8b1d08 | 2008-11-21 16:44:28 +0100 | [diff] [blame] | 851 | TRACE_IRQS_OFF |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 852 | .endif |
Andy Lutomirski | 577ed45 | 2014-05-21 15:07:09 -0700 | [diff] [blame] | 853 | .endif |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 854 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 855 | movq %rsp, %rdi /* pt_regs pointer */ |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 856 | |
| 857 | .if \has_error_code |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 858 | movq ORIG_RAX(%rsp), %rsi /* get error code */ |
| 859 | movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 860 | .else |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 861 | xorl %esi, %esi /* no error code */ |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 862 | .endif |
| 863 | |
Andy Lutomirski | 577ed45 | 2014-05-21 15:07:09 -0700 | [diff] [blame] | 864 | .if \shift_ist != -1 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 865 | subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) |
Andy Lutomirski | 577ed45 | 2014-05-21 15:07:09 -0700 | [diff] [blame] | 866 | .endif |
| 867 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 868 | call \do_sym |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 869 | |
Andy Lutomirski | 577ed45 | 2014-05-21 15:07:09 -0700 | [diff] [blame] | 870 | .if \shift_ist != -1 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 871 | addq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) |
Andy Lutomirski | 577ed45 | 2014-05-21 15:07:09 -0700 | [diff] [blame] | 872 | .endif |
| 873 | |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 874 | /* these procedures expect "no swapgs" flag in ebx */ |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 875 | .if \paranoid |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 876 | jmp paranoid_exit |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 877 | .else |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 878 | jmp error_exit |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 879 | .endif |
| 880 | |
Andy Lutomirski | 48e08d0 | 2014-11-11 12:49:41 -0800 | [diff] [blame] | 881 | .if \paranoid == 1 |
Andy Lutomirski | 48e08d0 | 2014-11-11 12:49:41 -0800 | [diff] [blame] | 882 | /* |
| 883 | * Paranoid entry from userspace. Switch stacks and treat it |
| 884 | * as a normal entry. This means that paranoid handlers |
| 885 | * run in real process context if user_mode(regs). |
| 886 | */ |
| 887 | 1: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 888 | call error_entry |
Andy Lutomirski | 48e08d0 | 2014-11-11 12:49:41 -0800 | [diff] [blame] | 889 | |
Andy Lutomirski | 48e08d0 | 2014-11-11 12:49:41 -0800 | [diff] [blame] | 890 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 891 | movq %rsp, %rdi /* pt_regs pointer */ |
| 892 | call sync_regs |
| 893 | movq %rax, %rsp /* switch stack */ |
Andy Lutomirski | 48e08d0 | 2014-11-11 12:49:41 -0800 | [diff] [blame] | 894 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 895 | movq %rsp, %rdi /* pt_regs pointer */ |
Andy Lutomirski | 48e08d0 | 2014-11-11 12:49:41 -0800 | [diff] [blame] | 896 | |
| 897 | .if \has_error_code |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 898 | movq ORIG_RAX(%rsp), %rsi /* get error code */ |
| 899 | movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ |
Andy Lutomirski | 48e08d0 | 2014-11-11 12:49:41 -0800 | [diff] [blame] | 900 | .else |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 901 | xorl %esi, %esi /* no error code */ |
Andy Lutomirski | 48e08d0 | 2014-11-11 12:49:41 -0800 | [diff] [blame] | 902 | .endif |
| 903 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 904 | call \do_sym |
Andy Lutomirski | 48e08d0 | 2014-11-11 12:49:41 -0800 | [diff] [blame] | 905 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 906 | jmp error_exit /* %ebx: no swapgs flag */ |
Andy Lutomirski | 48e08d0 | 2014-11-11 12:49:41 -0800 | [diff] [blame] | 907 | .endif |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 908 | END(\sym) |
Alexander van Heukelum | 322648d | 2008-11-23 10:08:28 +0100 | [diff] [blame] | 909 | .endm |
Alexander van Heukelum | b8b1d08 | 2008-11-21 16:44:28 +0100 | [diff] [blame] | 910 | |
Seiji Aguchi | 25c74b1 | 2013-10-30 16:37:00 -0400 | [diff] [blame] | 911 | #ifdef CONFIG_TRACING |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 912 | .macro trace_idtentry sym do_sym has_error_code:req |
| 913 | idtentry trace(\sym) trace(\do_sym) has_error_code=\has_error_code |
| 914 | idtentry \sym \do_sym has_error_code=\has_error_code |
Seiji Aguchi | 25c74b1 | 2013-10-30 16:37:00 -0400 | [diff] [blame] | 915 | .endm |
| 916 | #else |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 917 | .macro trace_idtentry sym do_sym has_error_code:req |
| 918 | idtentry \sym \do_sym has_error_code=\has_error_code |
Seiji Aguchi | 25c74b1 | 2013-10-30 16:37:00 -0400 | [diff] [blame] | 919 | .endm |
| 920 | #endif |
| 921 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 922 | idtentry divide_error do_divide_error has_error_code=0 |
| 923 | idtentry overflow do_overflow has_error_code=0 |
| 924 | idtentry bounds do_bounds has_error_code=0 |
| 925 | idtentry invalid_op do_invalid_op has_error_code=0 |
| 926 | idtentry device_not_available do_device_not_available has_error_code=0 |
| 927 | idtentry double_fault do_double_fault has_error_code=1 paranoid=2 |
| 928 | idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0 |
| 929 | idtentry invalid_TSS do_invalid_TSS has_error_code=1 |
| 930 | idtentry segment_not_present do_segment_not_present has_error_code=1 |
| 931 | idtentry spurious_interrupt_bug do_spurious_interrupt_bug has_error_code=0 |
| 932 | idtentry coprocessor_error do_coprocessor_error has_error_code=0 |
| 933 | idtentry alignment_check do_alignment_check has_error_code=1 |
| 934 | idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0 |
Andy Lutomirski | 5cec93c | 2011-06-05 13:50:24 -0400 | [diff] [blame] | 935 | |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 936 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 937 | /* |
| 938 | * Reload gs selector with exception handling |
| 939 | * edi: new selector |
| 940 | */ |
Jeremy Fitzhardinge | 9f9d489 | 2008-06-25 00:19:32 -0400 | [diff] [blame] | 941 | ENTRY(native_load_gs_index) |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 942 | pushfq |
Jeremy Fitzhardinge | b8aa287 | 2009-01-28 14:35:03 -0800 | [diff] [blame] | 943 | DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) |
Cyrill Gorcunov | 9f1e87e | 2008-11-27 21:10:08 +0300 | [diff] [blame] | 944 | SWAPGS |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 945 | gs_change: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 946 | movl %edi, %gs |
| 947 | 2: mfence /* workaround */ |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 948 | SWAPGS |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 949 | popfq |
Cyrill Gorcunov | 9f1e87e | 2008-11-27 21:10:08 +0300 | [diff] [blame] | 950 | ret |
Alexander van Heukelum | 6efdcfa | 2008-11-23 10:15:32 +0100 | [diff] [blame] | 951 | END(native_load_gs_index) |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 952 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 953 | _ASM_EXTABLE(gs_change, bad_gs) |
| 954 | .section .fixup, "ax" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 955 | /* running with kernelgs */ |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 956 | bad_gs: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 957 | SWAPGS /* switch back to user gs */ |
| 958 | xorl %eax, %eax |
| 959 | movl %eax, %gs |
| 960 | jmp 2b |
Cyrill Gorcunov | 9f1e87e | 2008-11-27 21:10:08 +0300 | [diff] [blame] | 961 | .previous |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 962 | |
Andi Kleen | 2699500 | 2006-08-02 22:37:28 +0200 | [diff] [blame] | 963 | /* Call softirq on interrupt stack. Interrupts are off. */ |
Frederic Weisbecker | 7d65f4a | 2013-09-05 15:49:45 +0200 | [diff] [blame] | 964 | ENTRY(do_softirq_own_stack) |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 965 | pushq %rbp |
| 966 | mov %rsp, %rbp |
| 967 | incl PER_CPU_VAR(irq_count) |
| 968 | cmove PER_CPU_VAR(irq_stack_ptr), %rsp |
| 969 | push %rbp /* frame pointer backlink */ |
| 970 | call __do_softirq |
Andi Kleen | 2699500 | 2006-08-02 22:37:28 +0200 | [diff] [blame] | 971 | leaveq |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 972 | decl PER_CPU_VAR(irq_count) |
Andi Kleen | ed6b676 | 2005-07-28 21:15:49 -0700 | [diff] [blame] | 973 | ret |
Frederic Weisbecker | 7d65f4a | 2013-09-05 15:49:45 +0200 | [diff] [blame] | 974 | END(do_softirq_own_stack) |
Andi Kleen | 75154f4 | 2007-06-23 02:29:25 +0200 | [diff] [blame] | 975 | |
Jeremy Fitzhardinge | 3d75e1b | 2008-07-08 15:06:49 -0700 | [diff] [blame] | 976 | #ifdef CONFIG_XEN |
Andy Lutomirski | cb5dd2c | 2014-05-21 15:07:08 -0700 | [diff] [blame] | 977 | idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0 |
Jeremy Fitzhardinge | 3d75e1b | 2008-07-08 15:06:49 -0700 | [diff] [blame] | 978 | |
| 979 | /* |
Cyrill Gorcunov | 9f1e87e | 2008-11-27 21:10:08 +0300 | [diff] [blame] | 980 | * A note on the "critical region" in our callback handler. |
| 981 | * We want to avoid stacking callback handlers due to events occurring |
| 982 | * during handling of the last event. To do this, we keep events disabled |
| 983 | * until we've done all processing. HOWEVER, we must enable events before |
| 984 | * popping the stack frame (can't be done atomically) and so it would still |
| 985 | * be possible to get enough handler activations to overflow the stack. |
| 986 | * Although unlikely, bugs of that kind are hard to track down, so we'd |
| 987 | * like to avoid the possibility. |
| 988 | * So, on entry to the handler we detect whether we interrupted an |
| 989 | * existing activation in its critical region -- if so, we pop the current |
| 990 | * activation and restart the handler using the previous one. |
| 991 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 992 | ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */ |
| 993 | |
Cyrill Gorcunov | 9f1e87e | 2008-11-27 21:10:08 +0300 | [diff] [blame] | 994 | /* |
| 995 | * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will |
| 996 | * see the correct pointer to the pt_regs |
| 997 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 998 | movq %rdi, %rsp /* we don't return, adjust the stack frame */ |
| 999 | 11: incl PER_CPU_VAR(irq_count) |
| 1000 | movq %rsp, %rbp |
| 1001 | cmovzq PER_CPU_VAR(irq_stack_ptr), %rsp |
| 1002 | pushq %rbp /* frame pointer backlink */ |
| 1003 | call xen_evtchn_do_upcall |
| 1004 | popq %rsp |
| 1005 | decl PER_CPU_VAR(irq_count) |
David Vrabel | fdfd811 | 2015-02-19 15:23:17 +0000 | [diff] [blame] | 1006 | #ifndef CONFIG_PREEMPT |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1007 | call xen_maybe_preempt_hcall |
David Vrabel | fdfd811 | 2015-02-19 15:23:17 +0000 | [diff] [blame] | 1008 | #endif |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1009 | jmp error_exit |
Alexander van Heukelum | 371c394 | 2011-03-11 21:59:38 +0100 | [diff] [blame] | 1010 | END(xen_do_hypervisor_callback) |
Jeremy Fitzhardinge | 3d75e1b | 2008-07-08 15:06:49 -0700 | [diff] [blame] | 1011 | |
| 1012 | /* |
Cyrill Gorcunov | 9f1e87e | 2008-11-27 21:10:08 +0300 | [diff] [blame] | 1013 | * Hypervisor uses this for application faults while it executes. |
| 1014 | * We get here for two reasons: |
| 1015 | * 1. Fault while reloading DS, ES, FS or GS |
| 1016 | * 2. Fault while executing IRET |
| 1017 | * Category 1 we do not need to fix up as Xen has already reloaded all segment |
| 1018 | * registers that could be reloaded and zeroed the others. |
| 1019 | * Category 2 we fix up by killing the current process. We cannot use the |
| 1020 | * normal Linux return path in this case because if we use the IRET hypercall |
| 1021 | * to pop the stack frame we end up in an infinite loop of failsafe callbacks. |
| 1022 | * We distinguish between categories by comparing each saved segment register |
| 1023 | * with its current contents: any discrepancy means we in category 1. |
| 1024 | */ |
Jeremy Fitzhardinge | 3d75e1b | 2008-07-08 15:06:49 -0700 | [diff] [blame] | 1025 | ENTRY(xen_failsafe_callback) |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1026 | movl %ds, %ecx |
| 1027 | cmpw %cx, 0x10(%rsp) |
| 1028 | jne 1f |
| 1029 | movl %es, %ecx |
| 1030 | cmpw %cx, 0x18(%rsp) |
| 1031 | jne 1f |
| 1032 | movl %fs, %ecx |
| 1033 | cmpw %cx, 0x20(%rsp) |
| 1034 | jne 1f |
| 1035 | movl %gs, %ecx |
| 1036 | cmpw %cx, 0x28(%rsp) |
| 1037 | jne 1f |
Jeremy Fitzhardinge | 3d75e1b | 2008-07-08 15:06:49 -0700 | [diff] [blame] | 1038 | /* All segments match their saved values => Category 2 (Bad IRET). */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1039 | movq (%rsp), %rcx |
| 1040 | movq 8(%rsp), %r11 |
| 1041 | addq $0x30, %rsp |
| 1042 | pushq $0 /* RIP */ |
| 1043 | pushq %r11 |
| 1044 | pushq %rcx |
| 1045 | jmp general_protection |
Jeremy Fitzhardinge | 3d75e1b | 2008-07-08 15:06:49 -0700 | [diff] [blame] | 1046 | 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1047 | movq (%rsp), %rcx |
| 1048 | movq 8(%rsp), %r11 |
| 1049 | addq $0x30, %rsp |
| 1050 | pushq $-1 /* orig_ax = -1 => not a system call */ |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 1051 | ALLOC_PT_GPREGS_ON_STACK |
| 1052 | SAVE_C_REGS |
| 1053 | SAVE_EXTRA_REGS |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1054 | jmp error_exit |
Jeremy Fitzhardinge | 3d75e1b | 2008-07-08 15:06:49 -0700 | [diff] [blame] | 1055 | END(xen_failsafe_callback) |
| 1056 | |
Seiji Aguchi | cf910e8 | 2013-06-20 11:46:53 -0400 | [diff] [blame] | 1057 | apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ |
Sheng Yang | 38e20b0 | 2010-05-14 12:40:51 +0100 | [diff] [blame] | 1058 | xen_hvm_callback_vector xen_evtchn_do_upcall |
| 1059 | |
Jeremy Fitzhardinge | 3d75e1b | 2008-07-08 15:06:49 -0700 | [diff] [blame] | 1060 | #endif /* CONFIG_XEN */ |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1061 | |
K. Y. Srinivasan | bc2b033 | 2013-02-03 17:22:39 -0800 | [diff] [blame] | 1062 | #if IS_ENABLED(CONFIG_HYPERV) |
Seiji Aguchi | cf910e8 | 2013-06-20 11:46:53 -0400 | [diff] [blame] | 1063 | apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ |
K. Y. Srinivasan | bc2b033 | 2013-02-03 17:22:39 -0800 | [diff] [blame] | 1064 | hyperv_callback_vector hyperv_vector_handler |
| 1065 | #endif /* CONFIG_HYPERV */ |
| 1066 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1067 | idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK |
| 1068 | idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK |
| 1069 | idtentry stack_segment do_stack_segment has_error_code=1 |
| 1070 | |
Jeremy Fitzhardinge | 6cac5a9 | 2009-03-29 19:56:29 -0700 | [diff] [blame] | 1071 | #ifdef CONFIG_XEN |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1072 | idtentry xen_debug do_debug has_error_code=0 |
| 1073 | idtentry xen_int3 do_int3 has_error_code=0 |
| 1074 | idtentry xen_stack_segment do_stack_segment has_error_code=1 |
Jeremy Fitzhardinge | 6cac5a9 | 2009-03-29 19:56:29 -0700 | [diff] [blame] | 1075 | #endif |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1076 | |
| 1077 | idtentry general_protection do_general_protection has_error_code=1 |
| 1078 | trace_idtentry page_fault do_page_fault has_error_code=1 |
| 1079 | |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 1080 | #ifdef CONFIG_KVM_GUEST |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1081 | idtentry async_page_fault do_async_page_fault has_error_code=1 |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 1082 | #endif |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1083 | |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1084 | #ifdef CONFIG_X86_MCE |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1085 | idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip) |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1086 | #endif |
| 1087 | |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 1088 | /* |
| 1089 | * Save all registers in pt_regs, and switch gs if needed. |
| 1090 | * Use slow, but surefire "are we in kernel?" check. |
| 1091 | * Return: ebx=0: need swapgs on exit, ebx=1: otherwise |
| 1092 | */ |
| 1093 | ENTRY(paranoid_entry) |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 1094 | cld |
| 1095 | SAVE_C_REGS 8 |
| 1096 | SAVE_EXTRA_REGS 8 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1097 | movl $1, %ebx |
| 1098 | movl $MSR_GS_BASE, %ecx |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 1099 | rdmsr |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1100 | testl %edx, %edx |
| 1101 | js 1f /* negative -> in kernel */ |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 1102 | SWAPGS |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1103 | xorl %ebx, %ebx |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 1104 | 1: ret |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 1105 | END(paranoid_entry) |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 1106 | |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 1107 | /* |
| 1108 | * "Paranoid" exit path from exception stack. This is invoked |
| 1109 | * only on return from non-NMI IST interrupts that came |
| 1110 | * from kernel space. |
| 1111 | * |
| 1112 | * We may be returning to very strange contexts (e.g. very early |
| 1113 | * in syscall entry), so checking for preemption here would |
| 1114 | * be complicated. Fortunately, we there's no good reason |
| 1115 | * to try to handle preemption here. |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1116 | * |
| 1117 | * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 1118 | */ |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1119 | ENTRY(paranoid_exit) |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1120 | DISABLE_INTERRUPTS(CLBR_NONE) |
Steven Rostedt | 5963e31 | 2012-05-30 11:54:53 -0400 | [diff] [blame] | 1121 | TRACE_IRQS_OFF_DEBUG |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1122 | testl %ebx, %ebx /* swapgs needed? */ |
| 1123 | jnz paranoid_exit_no_swapgs |
Denys Vlasenko | f2db938 | 2015-02-26 14:40:30 -0800 | [diff] [blame] | 1124 | TRACE_IRQS_IRETQ |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1125 | SWAPGS_UNSAFE_STACK |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1126 | jmp paranoid_exit_restore |
Denys Vlasenko | 0d55083 | 2015-02-26 14:40:29 -0800 | [diff] [blame] | 1127 | paranoid_exit_no_swapgs: |
Denys Vlasenko | f2db938 | 2015-02-26 14:40:30 -0800 | [diff] [blame] | 1128 | TRACE_IRQS_IRETQ_DEBUG |
Denys Vlasenko | 0d55083 | 2015-02-26 14:40:29 -0800 | [diff] [blame] | 1129 | paranoid_exit_restore: |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 1130 | RESTORE_EXTRA_REGS |
| 1131 | RESTORE_C_REGS |
| 1132 | REMOVE_PT_GPREGS_FROM_STACK 8 |
Andy Lutomirski | 48e08d0 | 2014-11-11 12:49:41 -0800 | [diff] [blame] | 1133 | INTERRUPT_RETURN |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1134 | END(paranoid_exit) |
| 1135 | |
| 1136 | /* |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 1137 | * Save all registers in pt_regs, and switch gs if needed. |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame^] | 1138 | * Return: EBX=0: came from user mode; EBX=1: otherwise |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1139 | */ |
| 1140 | ENTRY(error_entry) |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1141 | cld |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 1142 | SAVE_C_REGS 8 |
| 1143 | SAVE_EXTRA_REGS 8 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1144 | xorl %ebx, %ebx |
Denys Vlasenko | 03335e9 | 2015-04-27 15:21:52 +0200 | [diff] [blame] | 1145 | testb $3, CS+8(%rsp) |
Denys Vlasenko | dde74f2 | 2015-04-27 15:21:51 +0200 | [diff] [blame] | 1146 | jz error_kernelspace |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame^] | 1147 | |
| 1148 | /* We entered from user mode */ |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1149 | SWAPGS |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame^] | 1150 | |
| 1151 | error_entry_done: |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1152 | TRACE_IRQS_OFF |
| 1153 | ret |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1154 | |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 1155 | /* |
| 1156 | * There are two places in the kernel that can potentially fault with |
| 1157 | * usergs. Handle them here. B stepping K8s sometimes report a |
| 1158 | * truncated RIP for IRET exceptions returning to compat mode. Check |
| 1159 | * for these here too. |
| 1160 | */ |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1161 | error_kernelspace: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1162 | incl %ebx |
| 1163 | leaq native_irq_return_iret(%rip), %rcx |
| 1164 | cmpq %rcx, RIP+8(%rsp) |
| 1165 | je error_bad_iret |
| 1166 | movl %ecx, %eax /* zero extend */ |
| 1167 | cmpq %rax, RIP+8(%rsp) |
| 1168 | je bstep_iret |
| 1169 | cmpq $gs_change, RIP+8(%rsp) |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame^] | 1170 | jne error_entry_done |
| 1171 | |
| 1172 | /* |
| 1173 | * hack: gs_change can fail with user gsbase. If this happens, fix up |
| 1174 | * gsbase and proceed. We'll fix up the exception and land in |
| 1175 | * gs_change's error handler with kernel gsbase. |
| 1176 | */ |
| 1177 | SWAPGS |
| 1178 | jmp error_entry_done |
Brian Gerst | ae24ffe | 2009-10-12 10:18:23 -0400 | [diff] [blame] | 1179 | |
| 1180 | bstep_iret: |
| 1181 | /* Fix truncated RIP */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1182 | movq %rcx, RIP+8(%rsp) |
Andy Lutomirski | b645af2 | 2014-11-22 18:00:33 -0800 | [diff] [blame] | 1183 | /* fall through */ |
| 1184 | |
| 1185 | error_bad_iret: |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame^] | 1186 | /* |
| 1187 | * We came from an IRET to user mode, so we have user gsbase. |
| 1188 | * Switch to kernel gsbase: |
| 1189 | */ |
Andy Lutomirski | b645af2 | 2014-11-22 18:00:33 -0800 | [diff] [blame] | 1190 | SWAPGS |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame^] | 1191 | |
| 1192 | /* |
| 1193 | * Pretend that the exception came from user mode: set up pt_regs |
| 1194 | * as if we faulted immediately after IRET and clear EBX so that |
| 1195 | * error_exit knows that we will be returning to user mode. |
| 1196 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1197 | mov %rsp, %rdi |
| 1198 | call fixup_bad_iret |
| 1199 | mov %rax, %rsp |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame^] | 1200 | decl %ebx |
| 1201 | jmp error_entry_done |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1202 | END(error_entry) |
| 1203 | |
| 1204 | |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame^] | 1205 | /* |
| 1206 | * On entry, EBS is a "return to kernel mode" flag: |
| 1207 | * 1: already in kernel mode, don't need SWAPGS |
| 1208 | * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode |
| 1209 | */ |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1210 | ENTRY(error_exit) |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1211 | movl %ebx, %eax |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 1212 | RESTORE_EXTRA_REGS |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1213 | DISABLE_INTERRUPTS(CLBR_NONE) |
| 1214 | TRACE_IRQS_OFF |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1215 | testl %eax, %eax |
| 1216 | jnz retint_kernel |
| 1217 | jmp retint_user |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1218 | END(error_exit) |
| 1219 | |
Denys Vlasenko | 0784b36 | 2015-04-01 16:50:57 +0200 | [diff] [blame] | 1220 | /* Runs on exception stack */ |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1221 | ENTRY(nmi) |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1222 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1223 | /* |
| 1224 | * We allow breakpoints in NMIs. If a breakpoint occurs, then |
| 1225 | * the iretq it performs will take us out of NMI context. |
| 1226 | * This means that we can have nested NMIs where the next |
| 1227 | * NMI is using the top of the stack of the previous NMI. We |
| 1228 | * can't let it execute because the nested NMI will corrupt the |
| 1229 | * stack of the previous NMI. NMI handlers are not re-entrant |
| 1230 | * anyway. |
| 1231 | * |
| 1232 | * To handle this case we do the following: |
| 1233 | * Check the a special location on the stack that contains |
| 1234 | * a variable that is set when NMIs are executing. |
| 1235 | * The interrupted task's stack is also checked to see if it |
| 1236 | * is an NMI stack. |
| 1237 | * If the variable is not set and the stack is not the NMI |
| 1238 | * stack then: |
| 1239 | * o Set the special variable on the stack |
| 1240 | * o Copy the interrupt frame into a "saved" location on the stack |
| 1241 | * o Copy the interrupt frame into a "copy" location on the stack |
| 1242 | * o Continue processing the NMI |
| 1243 | * If the variable is set or the previous stack is the NMI stack: |
| 1244 | * o Modify the "copy" location to jump to the repeate_nmi |
| 1245 | * o return back to the first NMI |
| 1246 | * |
| 1247 | * Now on exit of the first NMI, we first clear the stack variable |
| 1248 | * The NMI stack will tell any nested NMIs at that point that it is |
| 1249 | * nested. Then we pop the stack normally with iret, and if there was |
| 1250 | * a nested NMI that updated the copy interrupt stack frame, a |
| 1251 | * jump will be made to the repeat_nmi code that will handle the second |
| 1252 | * NMI. |
| 1253 | */ |
| 1254 | |
Denys Vlasenko | 146b2b0 | 2015-03-25 18:18:13 +0100 | [diff] [blame] | 1255 | /* Use %rdx as our temp variable throughout */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1256 | pushq %rdx |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1257 | |
| 1258 | /* |
Steven Rostedt | 45d5a16 | 2012-02-19 16:43:37 -0500 | [diff] [blame] | 1259 | * If %cs was not the kernel segment, then the NMI triggered in user |
| 1260 | * space, which means it is definitely not nested. |
| 1261 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1262 | cmpl $__KERNEL_CS, 16(%rsp) |
| 1263 | jne first_nmi |
Steven Rostedt | 45d5a16 | 2012-02-19 16:43:37 -0500 | [diff] [blame] | 1264 | |
| 1265 | /* |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1266 | * Check the special variable on the stack to see if NMIs are |
| 1267 | * executing. |
| 1268 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1269 | cmpl $1, -8(%rsp) |
| 1270 | je nested_nmi |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1271 | |
| 1272 | /* |
| 1273 | * Now test if the previous stack was an NMI stack. |
| 1274 | * We need the double check. We check the NMI stack to satisfy the |
| 1275 | * race when the first NMI clears the variable before returning. |
| 1276 | * We check the variable because the first NMI could be in a |
| 1277 | * breakpoint routine using a breakpoint stack. |
| 1278 | */ |
Denys Vlasenko | 0784b36 | 2015-04-01 16:50:57 +0200 | [diff] [blame] | 1279 | lea 6*8(%rsp), %rdx |
| 1280 | /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */ |
| 1281 | cmpq %rdx, 4*8(%rsp) |
| 1282 | /* If the stack pointer is above the NMI stack, this is a normal NMI */ |
| 1283 | ja first_nmi |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1284 | |
Denys Vlasenko | 0784b36 | 2015-04-01 16:50:57 +0200 | [diff] [blame] | 1285 | subq $EXCEPTION_STKSZ, %rdx |
| 1286 | cmpq %rdx, 4*8(%rsp) |
| 1287 | /* If it is below the NMI stack, it is a normal NMI */ |
| 1288 | jb first_nmi |
| 1289 | /* Ah, it is within the NMI stack, treat it as nested */ |
Denys Vlasenko | 0784b36 | 2015-04-01 16:50:57 +0200 | [diff] [blame] | 1290 | |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1291 | nested_nmi: |
| 1292 | /* |
| 1293 | * Do nothing if we interrupted the fixup in repeat_nmi. |
| 1294 | * It's about to repeat the NMI handler, so we are fine |
| 1295 | * with ignoring this one. |
| 1296 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1297 | movq $repeat_nmi, %rdx |
| 1298 | cmpq 8(%rsp), %rdx |
| 1299 | ja 1f |
| 1300 | movq $end_repeat_nmi, %rdx |
| 1301 | cmpq 8(%rsp), %rdx |
| 1302 | ja nested_nmi_out |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1303 | |
| 1304 | 1: |
| 1305 | /* Set up the interrupted NMIs stack to jump to repeat_nmi */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1306 | leaq -1*8(%rsp), %rdx |
| 1307 | movq %rdx, %rsp |
| 1308 | leaq -10*8(%rsp), %rdx |
| 1309 | pushq $__KERNEL_DS |
| 1310 | pushq %rdx |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 1311 | pushfq |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1312 | pushq $__KERNEL_CS |
| 1313 | pushq $repeat_nmi |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1314 | |
| 1315 | /* Put stack back */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1316 | addq $(6*8), %rsp |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1317 | |
| 1318 | nested_nmi_out: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1319 | popq %rdx |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1320 | |
| 1321 | /* No need to check faults here */ |
| 1322 | INTERRUPT_RETURN |
| 1323 | |
| 1324 | first_nmi: |
| 1325 | /* |
| 1326 | * Because nested NMIs will use the pushed location that we |
| 1327 | * stored in rdx, we must keep that space available. |
| 1328 | * Here's what our stack frame will look like: |
| 1329 | * +-------------------------+ |
| 1330 | * | original SS | |
| 1331 | * | original Return RSP | |
| 1332 | * | original RFLAGS | |
| 1333 | * | original CS | |
| 1334 | * | original RIP | |
| 1335 | * +-------------------------+ |
| 1336 | * | temp storage for rdx | |
| 1337 | * +-------------------------+ |
| 1338 | * | NMI executing variable | |
| 1339 | * +-------------------------+ |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1340 | * | copied SS | |
| 1341 | * | copied Return RSP | |
| 1342 | * | copied RFLAGS | |
| 1343 | * | copied CS | |
| 1344 | * | copied RIP | |
| 1345 | * +-------------------------+ |
Salman Qazi | 28696f4 | 2012-10-01 17:29:25 -0700 | [diff] [blame] | 1346 | * | Saved SS | |
| 1347 | * | Saved Return RSP | |
| 1348 | * | Saved RFLAGS | |
| 1349 | * | Saved CS | |
| 1350 | * | Saved RIP | |
| 1351 | * +-------------------------+ |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1352 | * | pt_regs | |
| 1353 | * +-------------------------+ |
| 1354 | * |
Steven Rostedt | 79fb4ad | 2012-02-24 15:55:13 -0500 | [diff] [blame] | 1355 | * The saved stack frame is used to fix up the copied stack frame |
| 1356 | * that a nested NMI may change to make the interrupted NMI iret jump |
| 1357 | * to the repeat_nmi. The original stack frame and the temp storage |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1358 | * is also used by nested NMIs and can not be trusted on exit. |
| 1359 | */ |
Steven Rostedt | 79fb4ad | 2012-02-24 15:55:13 -0500 | [diff] [blame] | 1360 | /* Do not pop rdx, nested NMIs will corrupt that part of the stack */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1361 | movq (%rsp), %rdx |
Jan Beulich | 6261091 | 2012-02-24 14:54:37 +0000 | [diff] [blame] | 1362 | |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1363 | /* Set the NMI executing variable on the stack. */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1364 | pushq $1 |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1365 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1366 | /* Leave room for the "copied" frame */ |
| 1367 | subq $(5*8), %rsp |
Salman Qazi | 28696f4 | 2012-10-01 17:29:25 -0700 | [diff] [blame] | 1368 | |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1369 | /* Copy the stack frame to the Saved frame */ |
| 1370 | .rept 5 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1371 | pushq 11*8(%rsp) |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1372 | .endr |
Jan Beulich | 6261091 | 2012-02-24 14:54:37 +0000 | [diff] [blame] | 1373 | |
Steven Rostedt | 79fb4ad | 2012-02-24 15:55:13 -0500 | [diff] [blame] | 1374 | /* Everything up to here is safe from nested NMIs */ |
| 1375 | |
Jan Beulich | 6261091 | 2012-02-24 14:54:37 +0000 | [diff] [blame] | 1376 | /* |
| 1377 | * If there was a nested NMI, the first NMI's iret will return |
| 1378 | * here. But NMIs are still enabled and we can take another |
| 1379 | * nested NMI. The nested NMI checks the interrupted RIP to see |
| 1380 | * if it is between repeat_nmi and end_repeat_nmi, and if so |
| 1381 | * it will just return, as we are about to repeat an NMI anyway. |
| 1382 | * This makes it safe to copy to the stack frame that a nested |
| 1383 | * NMI will update. |
| 1384 | */ |
| 1385 | repeat_nmi: |
| 1386 | /* |
| 1387 | * Update the stack variable to say we are still in NMI (the update |
| 1388 | * is benign for the non-repeat case, where 1 was pushed just above |
| 1389 | * to this very stack slot). |
| 1390 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1391 | movq $1, 10*8(%rsp) |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1392 | |
| 1393 | /* Make another copy, this one may be modified by nested NMIs */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1394 | addq $(10*8), %rsp |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1395 | .rept 5 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1396 | pushq -6*8(%rsp) |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1397 | .endr |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1398 | subq $(5*8), %rsp |
Jan Beulich | 6261091 | 2012-02-24 14:54:37 +0000 | [diff] [blame] | 1399 | end_repeat_nmi: |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1400 | |
| 1401 | /* |
| 1402 | * Everything below this point can be preempted by a nested |
Steven Rostedt | 79fb4ad | 2012-02-24 15:55:13 -0500 | [diff] [blame] | 1403 | * NMI if the first NMI took an exception and reset our iret stack |
| 1404 | * so that we repeat another NMI. |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1405 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1406 | pushq $-1 /* ORIG_RAX: no syscall to restart */ |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 1407 | ALLOC_PT_GPREGS_ON_STACK |
| 1408 | |
Steven Rostedt | 1fd466e | 2011-12-08 12:32:27 -0500 | [diff] [blame] | 1409 | /* |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 1410 | * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit |
Steven Rostedt | 1fd466e | 2011-12-08 12:32:27 -0500 | [diff] [blame] | 1411 | * as we should not be calling schedule in NMI context. |
| 1412 | * Even with normal interrupts enabled. An NMI should not be |
| 1413 | * setting NEED_RESCHED or anything that normal interrupts and |
| 1414 | * exceptions might do. |
| 1415 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1416 | call paranoid_entry |
Steven Rostedt | 7fbb98c | 2012-06-07 10:21:21 -0400 | [diff] [blame] | 1417 | |
| 1418 | /* |
| 1419 | * Save off the CR2 register. If we take a page fault in the NMI then |
| 1420 | * it could corrupt the CR2 value. If the NMI preempts a page fault |
| 1421 | * handler before it was able to read the CR2 register, and then the |
| 1422 | * NMI itself takes a page fault, the page fault that was preempted |
| 1423 | * will read the information from the NMI page fault and not the |
| 1424 | * origin fault. Save it off and restore it if it changes. |
| 1425 | * Use the r12 callee-saved register. |
| 1426 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1427 | movq %cr2, %r12 |
Steven Rostedt | 7fbb98c | 2012-06-07 10:21:21 -0400 | [diff] [blame] | 1428 | |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1429 | /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1430 | movq %rsp, %rdi |
| 1431 | movq $-1, %rsi |
| 1432 | call do_nmi |
Steven Rostedt | 7fbb98c | 2012-06-07 10:21:21 -0400 | [diff] [blame] | 1433 | |
| 1434 | /* Did the NMI take a page fault? Restore cr2 if it did */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1435 | movq %cr2, %rcx |
| 1436 | cmpq %rcx, %r12 |
| 1437 | je 1f |
| 1438 | movq %r12, %cr2 |
Steven Rostedt | 7fbb98c | 2012-06-07 10:21:21 -0400 | [diff] [blame] | 1439 | 1: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1440 | testl %ebx, %ebx /* swapgs needed? */ |
| 1441 | jnz nmi_restore |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1442 | nmi_swapgs: |
| 1443 | SWAPGS_UNSAFE_STACK |
| 1444 | nmi_restore: |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 1445 | RESTORE_EXTRA_REGS |
| 1446 | RESTORE_C_REGS |
Jan Beulich | 444723d | 2013-01-24 09:27:31 +0000 | [diff] [blame] | 1447 | /* Pop the extra iret frame at once */ |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 1448 | REMOVE_PT_GPREGS_FROM_STACK 6*8 |
Salman Qazi | 28696f4 | 2012-10-01 17:29:25 -0700 | [diff] [blame] | 1449 | |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1450 | /* Clear the NMI executing stack variable */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1451 | movq $0, 5*8(%rsp) |
Andy Lutomirski | 5ca6f70 | 2015-06-04 13:24:29 -0700 | [diff] [blame] | 1452 | INTERRUPT_RETURN |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1453 | END(nmi) |
| 1454 | |
| 1455 | ENTRY(ignore_sysret) |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1456 | mov $-ENOSYS, %eax |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1457 | sysret |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1458 | END(ignore_sysret) |