blob: 055a01de7c8da6e052cfdebe0be8447b14933c87 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/x86_64/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
Ingo Molnar4d732132015-06-08 20:43:07 +02007 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * entry.S contains the system-call and fault low-level handling routines.
9 *
Andy Lutomirski8b4777a2011-06-05 13:50:18 -040010 * Some of this is documented in Documentation/x86/entry_64.txt
11 *
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +010012 * A note on terminology:
Ingo Molnar4d732132015-06-08 20:43:07 +020013 * - iret frame: Architecture defined interrupt frame from SS to RIP
14 * at the top of the kernel process stack.
Andi Kleen2e91a172006-09-26 10:52:29 +020015 *
16 * Some macro usage:
Ingo Molnar4d732132015-06-08 20:43:07 +020017 * - ENTRY/END: Define functions in the symbol table.
18 * - TRACE_IRQ_*: Trace hardirq state for lock debugging.
19 * - idtentry: Define exception entry points.
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/linkage.h>
22#include <asm/segment.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <asm/cache.h>
24#include <asm/errno.h>
Ingo Molnard36f9472015-06-03 18:29:26 +020025#include "calling.h"
Sam Ravnborge2d5df92005-09-09 21:28:48 +020026#include <asm/asm-offsets.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <asm/msr.h>
28#include <asm/unistd.h>
29#include <asm/thread_info.h>
30#include <asm/hw_irq.h>
Jeremy Fitzhardinge0341c142009-02-13 11:14:01 -080031#include <asm/page_types.h>
Ingo Molnar2601e642006-07-03 00:24:45 -070032#include <asm/irqflags.h>
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +010033#include <asm/paravirt.h>
Tejun Heo9939dda2009-01-13 20:41:35 +090034#include <asm/percpu.h>
H. Peter Anvind7abc0f2012-04-20 12:19:50 -070035#include <asm/asm.h>
H. Peter Anvin63bcff22012-09-21 12:43:12 -070036#include <asm/smap.h>
H. Peter Anvin3891a042014-04-29 16:46:09 -070037#include <asm/pgtable_types.h>
Eric Parisd7e75282012-01-03 14:23:06 -050038#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Roland McGrath86a1c342008-06-23 15:37:04 -070040/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
41#include <linux/elf-em.h>
Ingo Molnar4d732132015-06-08 20:43:07 +020042#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
43#define __AUDIT_ARCH_64BIT 0x80000000
44#define __AUDIT_ARCH_LE 0x40000000
Roland McGrath86a1c342008-06-23 15:37:04 -070045
Ingo Molnar4d732132015-06-08 20:43:07 +020046.code64
47.section .entry.text, "ax"
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020048
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +010049#ifdef CONFIG_PARAVIRT
Jeremy Fitzhardinge2be29982008-06-25 00:19:28 -040050ENTRY(native_usergs_sysret64)
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +010051 swapgs
52 sysretq
Cyrill Gorcunovb3baaa12009-02-23 22:57:00 +030053ENDPROC(native_usergs_sysret64)
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +010054#endif /* CONFIG_PARAVIRT */
55
Denys Vlasenkof2db9382015-02-26 14:40:30 -080056.macro TRACE_IRQS_IRETQ
Ingo Molnar2601e642006-07-03 00:24:45 -070057#ifdef CONFIG_TRACE_IRQFLAGS
Ingo Molnar4d732132015-06-08 20:43:07 +020058 bt $9, EFLAGS(%rsp) /* interrupts off? */
59 jnc 1f
Ingo Molnar2601e642006-07-03 00:24:45 -070060 TRACE_IRQS_ON
611:
62#endif
63.endm
64
Linus Torvalds1da177e2005-04-16 15:20:36 -070065/*
Steven Rostedt5963e312012-05-30 11:54:53 -040066 * When dynamic function tracer is enabled it will add a breakpoint
67 * to all locations that it is about to modify, sync CPUs, update
68 * all the code, sync CPUs, then remove the breakpoints. In this time
69 * if lockdep is enabled, it might jump back into the debug handler
70 * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF).
71 *
72 * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to
73 * make sure the stack pointer does not get reset back to the top
74 * of the debug stack, and instead just reuses the current stack.
75 */
76#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS)
77
78.macro TRACE_IRQS_OFF_DEBUG
Ingo Molnar4d732132015-06-08 20:43:07 +020079 call debug_stack_set_zero
Steven Rostedt5963e312012-05-30 11:54:53 -040080 TRACE_IRQS_OFF
Ingo Molnar4d732132015-06-08 20:43:07 +020081 call debug_stack_reset
Steven Rostedt5963e312012-05-30 11:54:53 -040082.endm
83
84.macro TRACE_IRQS_ON_DEBUG
Ingo Molnar4d732132015-06-08 20:43:07 +020085 call debug_stack_set_zero
Steven Rostedt5963e312012-05-30 11:54:53 -040086 TRACE_IRQS_ON
Ingo Molnar4d732132015-06-08 20:43:07 +020087 call debug_stack_reset
Steven Rostedt5963e312012-05-30 11:54:53 -040088.endm
89
Denys Vlasenkof2db9382015-02-26 14:40:30 -080090.macro TRACE_IRQS_IRETQ_DEBUG
Ingo Molnar4d732132015-06-08 20:43:07 +020091 bt $9, EFLAGS(%rsp) /* interrupts off? */
92 jnc 1f
Steven Rostedt5963e312012-05-30 11:54:53 -040093 TRACE_IRQS_ON_DEBUG
941:
95.endm
96
97#else
Ingo Molnar4d732132015-06-08 20:43:07 +020098# define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF
99# define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON
100# define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ
Steven Rostedt5963e312012-05-30 11:54:53 -0400101#endif
102
103/*
Ingo Molnar4d732132015-06-08 20:43:07 +0200104 * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 *
Ingo Molnar4d732132015-06-08 20:43:07 +0200106 * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800107 * then loads new ss, cs, and rip from previously programmed MSRs.
108 * rflags gets masked by a value from another MSR (so CLD and CLAC
109 * are not needed). SYSCALL does not save anything on the stack
110 * and does not change rsp.
111 *
112 * Registers on entry:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 * rax system call number
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800114 * rcx return address
115 * r11 saved rflags (note: r11 is callee-clobbered register in C ABI)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 * rdi arg0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 * rsi arg1
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100118 * rdx arg2
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800119 * r10 arg3 (needs to be moved to rcx to conform to C ABI)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 * r8 arg4
121 * r9 arg5
Ingo Molnar4d732132015-06-08 20:43:07 +0200122 * (note: r12-r15, rbp, rbx are callee-preserved in C ABI)
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100123 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 * Only called from user space.
125 *
Ingo Molnar7fcb3bc2015-03-17 14:42:59 +0100126 * When user can change pt_regs->foo always force IRET. That is because
Andi Kleen7bf36bb2006-04-07 19:50:00 +0200127 * it deals with uncanonical addresses better. SYSRET has trouble
128 * with them due to bugs in both AMD and Intel CPUs.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100129 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Ingo Molnarb2502b42015-06-08 08:42:03 +0200131ENTRY(entry_SYSCALL_64)
Denys Vlasenko9ed8e7d2015-03-19 18:17:47 +0100132 /*
133 * Interrupts are off on entry.
134 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
135 * it is too small to ever cause noticeable irq latency.
136 */
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100137 SWAPGS_UNSAFE_STACK
138 /*
139 * A hypervisor implementation might want to use a label
140 * after the swapgs, so that it can do the swapgs
141 * for the guest and jump here on syscall.
142 */
Ingo Molnarb2502b42015-06-08 08:42:03 +0200143GLOBAL(entry_SYSCALL_64_after_swapgs)
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100144
Ingo Molnar4d732132015-06-08 20:43:07 +0200145 movq %rsp, PER_CPU_VAR(rsp_scratch)
146 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
Denys Vlasenko9ed8e7d2015-03-19 18:17:47 +0100147
148 /* Construct struct pt_regs on stack */
Ingo Molnar4d732132015-06-08 20:43:07 +0200149 pushq $__USER_DS /* pt_regs->ss */
150 pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */
Denys Vlasenko33db1fd2015-03-17 14:52:24 +0100151 /*
Denys Vlasenko9ed8e7d2015-03-19 18:17:47 +0100152 * Re-enable interrupts.
153 * We use 'rsp_scratch' as a scratch space, hence irq-off block above
154 * must execute atomically in the face of possible interrupt-driven
155 * task preemption. We must enable interrupts only after we're done
156 * with using rsp_scratch:
Denys Vlasenko33db1fd2015-03-17 14:52:24 +0100157 */
158 ENABLE_INTERRUPTS(CLBR_NONE)
Ingo Molnar4d732132015-06-08 20:43:07 +0200159 pushq %r11 /* pt_regs->flags */
160 pushq $__USER_CS /* pt_regs->cs */
161 pushq %rcx /* pt_regs->ip */
162 pushq %rax /* pt_regs->orig_ax */
163 pushq %rdi /* pt_regs->di */
164 pushq %rsi /* pt_regs->si */
165 pushq %rdx /* pt_regs->dx */
166 pushq %rcx /* pt_regs->cx */
167 pushq $-ENOSYS /* pt_regs->ax */
168 pushq %r8 /* pt_regs->r8 */
169 pushq %r9 /* pt_regs->r9 */
170 pushq %r10 /* pt_regs->r10 */
171 pushq %r11 /* pt_regs->r11 */
172 sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
Denys Vlasenko9ed8e7d2015-03-19 18:17:47 +0100173
Ingo Molnar4d732132015-06-08 20:43:07 +0200174 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
175 jnz tracesys
Ingo Molnarb2502b42015-06-08 08:42:03 +0200176entry_SYSCALL_64_fastpath:
H. Peter Anvinfca460f2012-02-19 07:56:26 -0800177#if __SYSCALL_MASK == ~0
Ingo Molnar4d732132015-06-08 20:43:07 +0200178 cmpq $__NR_syscall_max, %rax
H. Peter Anvinfca460f2012-02-19 07:56:26 -0800179#else
Ingo Molnar4d732132015-06-08 20:43:07 +0200180 andl $__SYSCALL_MASK, %eax
181 cmpl $__NR_syscall_max, %eax
H. Peter Anvinfca460f2012-02-19 07:56:26 -0800182#endif
Ingo Molnar4d732132015-06-08 20:43:07 +0200183 ja 1f /* return -ENOSYS (already in pt_regs->ax) */
184 movq %r10, %rcx
185 call *sys_call_table(, %rax, 8)
186 movq %rax, RAX(%rsp)
Denys Vlasenko146b2b02015-03-25 18:18:13 +01001871:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188/*
Denys Vlasenko146b2b02015-03-25 18:18:13 +0100189 * Syscall return path ending with SYSRET (fast path).
190 * Has incompletely filled pt_regs.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100191 */
Peter Zijlstra10cd7062007-10-11 22:11:12 +0200192 LOCKDEP_SYS_EXIT
Denys Vlasenko4416c5a2015-03-31 19:00:03 +0200193 /*
194 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
195 * it is too small to ever cause noticeable irq latency.
196 */
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100197 DISABLE_INTERRUPTS(CLBR_NONE)
Andy Lutomirskib3494a42015-03-23 12:32:54 -0700198
199 /*
200 * We must check ti flags with interrupts (or at least preemption)
201 * off because we must *never* return to userspace without
202 * processing exit work that is enqueued if we're preempted here.
203 * In particular, returning to userspace with any of the one-shot
204 * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
205 * very bad.
206 */
Ingo Molnar4d732132015-06-08 20:43:07 +0200207 testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
208 jnz int_ret_from_sys_call_irqs_off /* Go to the slow path */
Andy Lutomirskib3494a42015-03-23 12:32:54 -0700209
Denys Vlasenko29722cd2015-03-09 19:39:21 +0100210 RESTORE_C_REGS_EXCEPT_RCX_R11
Ingo Molnar4d732132015-06-08 20:43:07 +0200211 movq RIP(%rsp), %rcx
212 movq EFLAGS(%rsp), %r11
213 movq RSP(%rsp), %rsp
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800214 /*
Ingo Molnar4d732132015-06-08 20:43:07 +0200215 * 64-bit SYSRET restores rip from rcx,
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800216 * rflags from r11 (but RF and VM bits are forced to 0),
217 * cs and ss are loaded from MSRs.
Denys Vlasenko4416c5a2015-03-31 19:00:03 +0200218 * Restoration of rflags re-enables interrupts.
Andy Lutomirski61f01dd2015-04-26 16:47:59 -0700219 *
220 * NB: On AMD CPUs with the X86_BUG_SYSRET_SS_ATTRS bug, the ss
221 * descriptor is not reinitialized. This means that we should
222 * avoid SYSRET with SS == NULL, which could happen if we schedule,
223 * exit the kernel, and re-enter using an interrupt vector. (All
224 * interrupt entries on x86_64 set SS to NULL.) We prevent that
225 * from happening by reloading SS in __switch_to. (Actually
226 * detecting the failure in 64-bit userspace is tricky but can be
227 * done.)
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800228 */
Jeremy Fitzhardinge2be29982008-06-25 00:19:28 -0400229 USERGS_SYSRET64
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
Andy Lutomirski29ea1b22015-07-03 12:44:28 -0700231GLOBAL(int_ret_from_sys_call_irqs_off)
232 TRACE_IRQS_ON
233 ENABLE_INTERRUPTS(CLBR_NONE)
234 jmp int_ret_from_sys_call
235
Ingo Molnar7fcb3bc2015-03-17 14:42:59 +0100236 /* Do syscall entry tracing */
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100237tracesys:
Ingo Molnar4d732132015-06-08 20:43:07 +0200238 movq %rsp, %rdi
239 movl $AUDIT_ARCH_X86_64, %esi
240 call syscall_trace_enter_phase1
241 test %rax, %rax
242 jnz tracesys_phase2 /* if needed, run the slow path */
243 RESTORE_C_REGS_EXCEPT_RAX /* else restore clobbered regs */
244 movq ORIG_RAX(%rsp), %rax
245 jmp entry_SYSCALL_64_fastpath /* and return to the fast path */
Andy Lutomirski1dcf74f2014-09-05 15:13:56 -0700246
247tracesys_phase2:
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800248 SAVE_EXTRA_REGS
Ingo Molnar4d732132015-06-08 20:43:07 +0200249 movq %rsp, %rdi
250 movl $AUDIT_ARCH_X86_64, %esi
251 movq %rax, %rdx
252 call syscall_trace_enter_phase2
Andy Lutomirski1dcf74f2014-09-05 15:13:56 -0700253
Roland McGrathd4d67152008-07-09 02:38:07 -0700254 /*
Denys Vlasenkoe90e1472015-02-26 14:40:28 -0800255 * Reload registers from stack in case ptrace changed them.
Andy Lutomirski1dcf74f2014-09-05 15:13:56 -0700256 * We don't reload %rax because syscall_trace_entry_phase2() returned
Roland McGrathd4d67152008-07-09 02:38:07 -0700257 * the value it wants us to use in the table lookup.
258 */
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800259 RESTORE_C_REGS_EXCEPT_RAX
260 RESTORE_EXTRA_REGS
H. Peter Anvinfca460f2012-02-19 07:56:26 -0800261#if __SYSCALL_MASK == ~0
Ingo Molnar4d732132015-06-08 20:43:07 +0200262 cmpq $__NR_syscall_max, %rax
H. Peter Anvinfca460f2012-02-19 07:56:26 -0800263#else
Ingo Molnar4d732132015-06-08 20:43:07 +0200264 andl $__SYSCALL_MASK, %eax
265 cmpl $__NR_syscall_max, %eax
H. Peter Anvinfca460f2012-02-19 07:56:26 -0800266#endif
Ingo Molnar4d732132015-06-08 20:43:07 +0200267 ja 1f /* return -ENOSYS (already in pt_regs->ax) */
268 movq %r10, %rcx /* fixup for C */
269 call *sys_call_table(, %rax, 8)
270 movq %rax, RAX(%rsp)
Denys Vlasenkoa6de5a22015-03-31 19:00:11 +02002711:
Ingo Molnar7fcb3bc2015-03-17 14:42:59 +0100272 /* Use IRET because user could have changed pt_regs->foo */
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100273
274/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 * Syscall return path ending with IRET.
Ingo Molnar7fcb3bc2015-03-17 14:42:59 +0100276 * Has correct iret frame.
Jan Beulichbcddc012006-12-07 02:14:02 +0100277 */
Cyrill Gorcunovbc8b2b92009-02-23 22:57:01 +0300278GLOBAL(int_ret_from_sys_call)
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800279 SAVE_EXTRA_REGS
Andy Lutomirski29ea1b22015-07-03 12:44:28 -0700280 movq %rsp, %rdi
281 call syscall_return_slowpath /* returns with IRQs disabled */
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800282 RESTORE_EXTRA_REGS
Andy Lutomirski29ea1b22015-07-03 12:44:28 -0700283 TRACE_IRQS_IRETQ /* we're about to change IF */
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200284
285 /*
286 * Try to use SYSRET instead of IRET if we're returning to
287 * a completely clean 64-bit userspace context.
288 */
Ingo Molnar4d732132015-06-08 20:43:07 +0200289 movq RCX(%rsp), %rcx
290 movq RIP(%rsp), %r11
291 cmpq %rcx, %r11 /* RCX == RIP */
292 jne opportunistic_sysret_failed
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200293
294 /*
295 * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP
296 * in kernel space. This essentially lets the user take over
Denys Vlasenko17be0ae2015-04-21 18:27:29 +0200297 * the kernel, since userspace controls RSP.
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200298 *
Denys Vlasenko17be0ae2015-04-21 18:27:29 +0200299 * If width of "canonical tail" ever becomes variable, this will need
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200300 * to be updated to remain correct on both old and new CPUs.
301 */
302 .ifne __VIRTUAL_MASK_SHIFT - 47
303 .error "virtual address width changed -- SYSRET checks need update"
304 .endif
Ingo Molnar4d732132015-06-08 20:43:07 +0200305
Denys Vlasenko17be0ae2015-04-21 18:27:29 +0200306 /* Change top 16 bits to be the sign-extension of 47th bit */
307 shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
308 sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
Ingo Molnar4d732132015-06-08 20:43:07 +0200309
Denys Vlasenko17be0ae2015-04-21 18:27:29 +0200310 /* If this changed %rcx, it was not canonical */
311 cmpq %rcx, %r11
312 jne opportunistic_sysret_failed
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200313
Ingo Molnar4d732132015-06-08 20:43:07 +0200314 cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */
315 jne opportunistic_sysret_failed
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200316
Ingo Molnar4d732132015-06-08 20:43:07 +0200317 movq R11(%rsp), %r11
318 cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */
319 jne opportunistic_sysret_failed
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200320
321 /*
322 * SYSRET can't restore RF. SYSRET can restore TF, but unlike IRET,
323 * restoring TF results in a trap from userspace immediately after
324 * SYSRET. This would cause an infinite loop whenever #DB happens
325 * with register state that satisfies the opportunistic SYSRET
326 * conditions. For example, single-stepping this user code:
327 *
Ingo Molnar4d732132015-06-08 20:43:07 +0200328 * movq $stuck_here, %rcx
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200329 * pushfq
330 * popq %r11
331 * stuck_here:
332 *
333 * would never get past 'stuck_here'.
334 */
Ingo Molnar4d732132015-06-08 20:43:07 +0200335 testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
336 jnz opportunistic_sysret_failed
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200337
338 /* nothing to check for RSP */
339
Ingo Molnar4d732132015-06-08 20:43:07 +0200340 cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */
341 jne opportunistic_sysret_failed
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200342
343 /*
Ingo Molnar4d732132015-06-08 20:43:07 +0200344 * We win! This label is here just for ease of understanding
345 * perf profiles. Nothing jumps here.
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200346 */
347syscall_return_via_sysret:
Denys Vlasenko17be0ae2015-04-21 18:27:29 +0200348 /* rcx and r11 are already restored (see code above) */
349 RESTORE_C_REGS_EXCEPT_RCX_R11
Ingo Molnar4d732132015-06-08 20:43:07 +0200350 movq RSP(%rsp), %rsp
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200351 USERGS_SYSRET64
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200352
353opportunistic_sysret_failed:
354 SWAPGS
355 jmp restore_c_regs_and_iret
Ingo Molnarb2502b42015-06-08 08:42:03 +0200356END(entry_SYSCALL_64)
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100357
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200358
Al Viro1d4b4b22012-10-22 22:34:11 -0400359 .macro FORK_LIKE func
360ENTRY(stub_\func)
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800361 SAVE_EXTRA_REGS 8
Ingo Molnar4d732132015-06-08 20:43:07 +0200362 jmp sys_\func
Al Viro1d4b4b22012-10-22 22:34:11 -0400363END(stub_\func)
364 .endm
365
366 FORK_LIKE clone
367 FORK_LIKE fork
368 FORK_LIKE vfork
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370ENTRY(stub_execve)
Denys Vlasenkofc3e9582015-04-04 20:55:19 +0200371 call sys_execve
372return_from_execve:
373 testl %eax, %eax
374 jz 1f
375 /* exec failed, can use fast SYSRET code path in this case */
376 ret
3771:
378 /* must use IRET code path (pt_regs->cs may have changed) */
379 addq $8, %rsp
380 ZERO_EXTRA_REGS
Ingo Molnar4d732132015-06-08 20:43:07 +0200381 movq %rax, RAX(%rsp)
Denys Vlasenkofc3e9582015-04-04 20:55:19 +0200382 jmp int_ret_from_sys_call
Jan Beulich4b787e02006-06-26 13:56:55 +0200383END(stub_execve)
Denys Vlasenkoa37f34a2015-04-07 22:43:44 +0200384/*
385 * Remaining execve stubs are only 7 bytes long.
386 * ENTRY() often aligns to 16 bytes, which in this case has no benefits.
387 */
388 .align 8
389GLOBAL(stub_execveat)
Denys Vlasenkofc3e9582015-04-04 20:55:19 +0200390 call sys_execveat
391 jmp return_from_execve
David Drysdale27d6ec72014-12-12 16:57:33 -0800392END(stub_execveat)
393
Denys Vlasenkoac7f5df2015-04-21 18:03:13 +0200394#if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
Denys Vlasenkoa37f34a2015-04-07 22:43:44 +0200395 .align 8
396GLOBAL(stub_x32_execve)
Denys Vlasenkoa37f34a2015-04-07 22:43:44 +0200397GLOBAL(stub32_execve)
Denys Vlasenko0f90fb92015-04-07 22:43:39 +0200398 call compat_sys_execve
399 jmp return_from_execve
Denys Vlasenko0f90fb92015-04-07 22:43:39 +0200400END(stub32_execve)
Denys Vlasenkoac7f5df2015-04-21 18:03:13 +0200401END(stub_x32_execve)
Denys Vlasenkoa37f34a2015-04-07 22:43:44 +0200402 .align 8
Denys Vlasenkoac7f5df2015-04-21 18:03:13 +0200403GLOBAL(stub_x32_execveat)
Denys Vlasenkoa37f34a2015-04-07 22:43:44 +0200404GLOBAL(stub32_execveat)
Denys Vlasenko0f90fb92015-04-07 22:43:39 +0200405 call compat_sys_execveat
406 jmp return_from_execve
Denys Vlasenko0f90fb92015-04-07 22:43:39 +0200407END(stub32_execveat)
Denys Vlasenkoac7f5df2015-04-21 18:03:13 +0200408END(stub_x32_execveat)
Denys Vlasenko0f90fb92015-04-07 22:43:39 +0200409#endif
410
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411/*
412 * sigreturn is special because it needs to restore all registers on return.
413 * This cannot be done with SYSRET, so use the IRET return path instead.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100414 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415ENTRY(stub_rt_sigreturn)
Denys Vlasenko31f01192015-04-07 22:43:37 +0200416 /*
417 * SAVE_EXTRA_REGS result is not normally needed:
418 * sigreturn overwrites all pt_regs->GPREGS.
419 * But sigreturn can fail (!), and there is no easy way to detect that.
420 * To make sure RESTORE_EXTRA_REGS doesn't restore garbage on error,
421 * we SAVE_EXTRA_REGS here.
422 */
423 SAVE_EXTRA_REGS 8
Ingo Molnar4d732132015-06-08 20:43:07 +0200424 call sys_rt_sigreturn
Denys Vlasenko31f01192015-04-07 22:43:37 +0200425return_from_stub:
426 addq $8, %rsp
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800427 RESTORE_EXTRA_REGS
Ingo Molnar4d732132015-06-08 20:43:07 +0200428 movq %rax, RAX(%rsp)
429 jmp int_ret_from_sys_call
Jan Beulich4b787e02006-06-26 13:56:55 +0200430END(stub_rt_sigreturn)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800432#ifdef CONFIG_X86_X32_ABI
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800433ENTRY(stub_x32_rt_sigreturn)
Denys Vlasenko31f01192015-04-07 22:43:37 +0200434 SAVE_EXTRA_REGS 8
Ingo Molnar4d732132015-06-08 20:43:07 +0200435 call sys32_x32_rt_sigreturn
436 jmp return_from_stub
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800437END(stub_x32_rt_sigreturn)
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800438#endif
439
Jan Beulich7effaa82005-09-12 18:49:24 +0200440/*
Denys Vlasenko1eeb2072015-02-26 14:40:33 -0800441 * A newly forked process directly context switches into this address.
442 *
443 * rdi: prev task we switched from
444 */
445ENTRY(ret_from_fork)
Denys Vlasenko1eeb2072015-02-26 14:40:33 -0800446
Ingo Molnar4d732132015-06-08 20:43:07 +0200447 LOCK ; btr $TIF_FORK, TI_flags(%r8)
Denys Vlasenko1eeb2072015-02-26 14:40:33 -0800448
Ingo Molnar4d732132015-06-08 20:43:07 +0200449 pushq $0x0002
450 popfq /* reset kernel eflags */
Denys Vlasenko1eeb2072015-02-26 14:40:33 -0800451
Ingo Molnar4d732132015-06-08 20:43:07 +0200452 call schedule_tail /* rdi: 'prev' task parameter */
Denys Vlasenko1eeb2072015-02-26 14:40:33 -0800453
Denys Vlasenko1eeb2072015-02-26 14:40:33 -0800454 RESTORE_EXTRA_REGS
455
Ingo Molnar4d732132015-06-08 20:43:07 +0200456 testb $3, CS(%rsp) /* from kernel_thread? */
Denys Vlasenko1eeb2072015-02-26 14:40:33 -0800457
Andy Lutomirski1e3fbb82015-02-26 14:40:39 -0800458 /*
459 * By the time we get here, we have no idea whether our pt_regs,
460 * ti flags, and ti status came from the 64-bit SYSCALL fast path,
Ingo Molnar138bd562015-06-05 14:11:49 +0200461 * the slow path, or one of the 32-bit compat paths.
Denys Vlasenko66ad4ef2015-04-07 22:43:42 +0200462 * Use IRET code path to return, since it can safely handle
Andy Lutomirski1e3fbb82015-02-26 14:40:39 -0800463 * all of the above.
464 */
Denys Vlasenko66ad4ef2015-04-07 22:43:42 +0200465 jnz int_ret_from_sys_call
Denys Vlasenko1eeb2072015-02-26 14:40:33 -0800466
Ingo Molnar4d732132015-06-08 20:43:07 +0200467 /*
468 * We came from kernel_thread
469 * nb: we depend on RESTORE_EXTRA_REGS above
470 */
471 movq %rbp, %rdi
472 call *%rbx
473 movl $0, RAX(%rsp)
Denys Vlasenko1eeb2072015-02-26 14:40:33 -0800474 RESTORE_EXTRA_REGS
Ingo Molnar4d732132015-06-08 20:43:07 +0200475 jmp int_ret_from_sys_call
Denys Vlasenko1eeb2072015-02-26 14:40:33 -0800476END(ret_from_fork)
477
478/*
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200479 * Build the entry stubs with some assembler magic.
480 * We pack 1 stub into every 8-byte block.
H. Peter Anvin939b7872008-11-11 13:51:52 -0800481 */
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200482 .align 8
H. Peter Anvin939b7872008-11-11 13:51:52 -0800483ENTRY(irq_entries_start)
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200484 vector=FIRST_EXTERNAL_VECTOR
485 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
Ingo Molnar4d732132015-06-08 20:43:07 +0200486 pushq $(~vector+0x80) /* Note: always in signed byte range */
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200487 vector=vector+1
488 jmp common_interrupt
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200489 .align 8
490 .endr
H. Peter Anvin939b7872008-11-11 13:51:52 -0800491END(irq_entries_start)
492
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100493/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 * Interrupt entry/exit.
495 *
496 * Interrupt entry points save only callee clobbered registers in fast path.
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100497 *
498 * Entry runs with interrupts off.
499 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500
Alexander van Heukelum722024d2008-11-13 13:50:20 +0100501/* 0(%rsp): ~(interrupt number) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 .macro interrupt func
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100503 cld
Andy Lutomirskiff467592015-07-03 12:44:29 -0700504 ALLOC_PT_GPREGS_ON_STACK
505 SAVE_C_REGS
506 SAVE_EXTRA_REGS
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100507
Andy Lutomirskiff467592015-07-03 12:44:29 -0700508 testb $3, CS(%rsp)
Denys Vlasenkodde74f22015-04-27 15:21:51 +0200509 jz 1f
Andy Lutomirski02bc7762015-07-03 12:44:31 -0700510
511 /*
512 * IRQ from user mode. Switch to kernel gsbase and inform context
513 * tracking that we're in kernel mode.
514 */
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100515 SWAPGS
Andy Lutomirski02bc7762015-07-03 12:44:31 -0700516#ifdef CONFIG_CONTEXT_TRACKING
517 call enter_from_user_mode
518#endif
519
Denys Vlasenko76f5df42015-02-26 14:40:27 -08005201:
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100521 /*
Denys Vlasenkoe90e1472015-02-26 14:40:28 -0800522 * Save previous stack pointer, optionally switch to interrupt stack.
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100523 * irq_count is used to check if a CPU is already on an interrupt stack
524 * or not. While this is essentially redundant with preempt_count it is
525 * a little cheaper to use a separate counter in the PDA (short of
526 * moving irq_enter into assembly, which would be too much work)
527 */
Andy Lutomirskia586f982015-07-03 12:44:30 -0700528 movq %rsp, %rdi
Ingo Molnar4d732132015-06-08 20:43:07 +0200529 incl PER_CPU_VAR(irq_count)
530 cmovzq PER_CPU_VAR(irq_stack_ptr), %rsp
Andy Lutomirskia586f982015-07-03 12:44:30 -0700531 pushq %rdi
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100532 /* We entered an interrupt context - irqs are off: */
533 TRACE_IRQS_OFF
534
Andy Lutomirskia586f982015-07-03 12:44:30 -0700535 call \func /* rdi points to pt_regs */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 .endm
537
Alexander van Heukelum722024d2008-11-13 13:50:20 +0100538 /*
539 * The interrupt stubs push (~vector+0x80) onto the stack and
540 * then jump to common_interrupt.
541 */
H. Peter Anvin939b7872008-11-11 13:51:52 -0800542 .p2align CONFIG_X86_L1_CACHE_SHIFT
543common_interrupt:
Jan Beulichee4eb872012-11-02 11:18:39 +0000544 ASM_CLAC
Ingo Molnar4d732132015-06-08 20:43:07 +0200545 addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 interrupt do_IRQ
Denys Vlasenko34061f12015-03-23 14:03:59 +0100547 /* 0(%rsp): old RSP */
Jan Beulich7effaa82005-09-12 18:49:24 +0200548ret_from_intr:
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100549 DISABLE_INTERRUPTS(CLBR_NONE)
Ingo Molnar2601e642006-07-03 00:24:45 -0700550 TRACE_IRQS_OFF
Ingo Molnar4d732132015-06-08 20:43:07 +0200551 decl PER_CPU_VAR(irq_count)
Frederic Weisbecker625dbc32011-01-06 15:22:47 +0100552
Frederic Weisbeckera2bbe752011-07-02 16:52:45 +0200553 /* Restore saved previous stack */
Andy Lutomirskiff467592015-07-03 12:44:29 -0700554 popq %rsp
Frederic Weisbecker625dbc32011-01-06 15:22:47 +0100555
Denys Vlasenko03335e92015-04-27 15:21:52 +0200556 testb $3, CS(%rsp)
Denys Vlasenkodde74f22015-04-27 15:21:51 +0200557 jz retint_kernel
Andy Lutomirski02bc7762015-07-03 12:44:31 -0700558
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 /* Interrupt came from user space */
Peter Zijlstra10cd7062007-10-11 22:11:12 +0200560 LOCKDEP_SYS_EXIT_IRQ
Andy Lutomirski02bc7762015-07-03 12:44:31 -0700561GLOBAL(retint_user)
562 mov %rsp,%rdi
563 call prepare_exit_to_usermode
Ingo Molnar2601e642006-07-03 00:24:45 -0700564 TRACE_IRQS_IRETQ
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100565 SWAPGS
Andy Lutomirskiff467592015-07-03 12:44:29 -0700566 jmp restore_regs_and_iret
Ingo Molnar2601e642006-07-03 00:24:45 -0700567
Denys Vlasenko627276c2015-03-30 20:09:31 +0200568/* Returning to kernel space */
Denys Vlasenko6ba71b72015-03-31 19:00:05 +0200569retint_kernel:
Denys Vlasenko627276c2015-03-30 20:09:31 +0200570#ifdef CONFIG_PREEMPT
571 /* Interrupts are off */
572 /* Check if we need preemption */
Ingo Molnar4d732132015-06-08 20:43:07 +0200573 bt $9, EFLAGS(%rsp) /* were interrupts off? */
Denys Vlasenko6ba71b72015-03-31 19:00:05 +0200574 jnc 1f
Ingo Molnar4d732132015-06-08 20:43:07 +02005750: cmpl $0, PER_CPU_VAR(__preempt_count)
Denys Vlasenko36acef22015-03-31 19:00:07 +0200576 jnz 1f
Denys Vlasenko627276c2015-03-30 20:09:31 +0200577 call preempt_schedule_irq
Denys Vlasenko36acef22015-03-31 19:00:07 +0200578 jmp 0b
Denys Vlasenko6ba71b72015-03-31 19:00:05 +02005791:
Denys Vlasenko627276c2015-03-30 20:09:31 +0200580#endif
Ingo Molnar2601e642006-07-03 00:24:45 -0700581 /*
582 * The iretq could re-enable interrupts:
583 */
584 TRACE_IRQS_IRETQ
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200585
586/*
587 * At this label, code paths which return to kernel and to user,
588 * which come from interrupts/exception and from syscalls, merge.
589 */
Andy Lutomirskiff467592015-07-03 12:44:29 -0700590restore_regs_and_iret:
591 RESTORE_EXTRA_REGS
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200592restore_c_regs_and_iret:
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800593 RESTORE_C_REGS
594 REMOVE_PT_GPREGS_FROM_STACK 8
Andy Lutomirski7209a752014-07-23 08:34:11 -0700595 INTERRUPT_RETURN
596
597ENTRY(native_iret)
H. Peter Anvin3891a042014-04-29 16:46:09 -0700598 /*
599 * Are we returning to a stack segment from the LDT? Note: in
600 * 64-bit mode SS:RSP on the exception stack is always valid.
601 */
H. Peter Anvin34273f42014-05-04 10:36:22 -0700602#ifdef CONFIG_X86_ESPFIX64
Ingo Molnar4d732132015-06-08 20:43:07 +0200603 testb $4, (SS-RIP)(%rsp)
604 jnz native_irq_return_ldt
H. Peter Anvin34273f42014-05-04 10:36:22 -0700605#endif
H. Peter Anvin3891a042014-04-29 16:46:09 -0700606
Andy Lutomirskiaf726f22014-11-22 18:00:31 -0800607.global native_irq_return_iret
Andy Lutomirski7209a752014-07-23 08:34:11 -0700608native_irq_return_iret:
Andy Lutomirskib645af22014-11-22 18:00:33 -0800609 /*
610 * This may fault. Non-paranoid faults on return to userspace are
611 * handled by fixup_bad_iret. These include #SS, #GP, and #NP.
612 * Double-faults due to espfix64 are handled in do_double_fault.
613 * Other faults here are fatal.
614 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 iretq
Ingo Molnar3701d8632008-02-09 23:24:08 +0100616
H. Peter Anvin34273f42014-05-04 10:36:22 -0700617#ifdef CONFIG_X86_ESPFIX64
Andy Lutomirski7209a752014-07-23 08:34:11 -0700618native_irq_return_ldt:
Ingo Molnar4d732132015-06-08 20:43:07 +0200619 pushq %rax
620 pushq %rdi
H. Peter Anvin3891a042014-04-29 16:46:09 -0700621 SWAPGS
Ingo Molnar4d732132015-06-08 20:43:07 +0200622 movq PER_CPU_VAR(espfix_waddr), %rdi
623 movq %rax, (0*8)(%rdi) /* RAX */
624 movq (2*8)(%rsp), %rax /* RIP */
625 movq %rax, (1*8)(%rdi)
626 movq (3*8)(%rsp), %rax /* CS */
627 movq %rax, (2*8)(%rdi)
628 movq (4*8)(%rsp), %rax /* RFLAGS */
629 movq %rax, (3*8)(%rdi)
630 movq (6*8)(%rsp), %rax /* SS */
631 movq %rax, (5*8)(%rdi)
632 movq (5*8)(%rsp), %rax /* RSP */
633 movq %rax, (4*8)(%rdi)
634 andl $0xffff0000, %eax
635 popq %rdi
636 orq PER_CPU_VAR(espfix_stack), %rax
H. Peter Anvin3891a042014-04-29 16:46:09 -0700637 SWAPGS
Ingo Molnar4d732132015-06-08 20:43:07 +0200638 movq %rax, %rsp
639 popq %rax
640 jmp native_irq_return_iret
H. Peter Anvin34273f42014-05-04 10:36:22 -0700641#endif
Jan Beulich4b787e02006-06-26 13:56:55 +0200642END(common_interrupt)
H. Peter Anvin3891a042014-04-29 16:46:09 -0700643
Masami Hiramatsu8222d712009-08-27 13:23:25 -0400644/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 * APIC interrupts.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100646 */
Seiji Aguchicf910e82013-06-20 11:46:53 -0400647.macro apicinterrupt3 num sym do_sym
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100648ENTRY(\sym)
Jan Beulichee4eb872012-11-02 11:18:39 +0000649 ASM_CLAC
Ingo Molnar4d732132015-06-08 20:43:07 +0200650 pushq $~(\num)
Jan Beulich39e95432011-11-29 11:03:46 +0000651.Lcommon_\sym:
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100652 interrupt \do_sym
Ingo Molnar4d732132015-06-08 20:43:07 +0200653 jmp ret_from_intr
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100654END(\sym)
655.endm
Jacob Shin89b831e2005-11-05 17:25:53 +0100656
Seiji Aguchicf910e82013-06-20 11:46:53 -0400657#ifdef CONFIG_TRACING
658#define trace(sym) trace_##sym
659#define smp_trace(sym) smp_trace_##sym
660
661.macro trace_apicinterrupt num sym
662apicinterrupt3 \num trace(\sym) smp_trace(\sym)
663.endm
664#else
665.macro trace_apicinterrupt num sym do_sym
666.endm
667#endif
668
669.macro apicinterrupt num sym do_sym
670apicinterrupt3 \num \sym \do_sym
671trace_apicinterrupt \num \sym
672.endm
673
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100674#ifdef CONFIG_SMP
Ingo Molnar4d732132015-06-08 20:43:07 +0200675apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt
676apicinterrupt3 REBOOT_VECTOR reboot_interrupt smp_reboot_interrupt
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677#endif
678
Nick Piggin03b48632009-01-20 04:36:04 +0100679#ifdef CONFIG_X86_UV
Ingo Molnar4d732132015-06-08 20:43:07 +0200680apicinterrupt3 UV_BAU_MESSAGE uv_bau_message_intr1 uv_bau_message_interrupt
Nick Piggin03b48632009-01-20 04:36:04 +0100681#endif
Ingo Molnar4d732132015-06-08 20:43:07 +0200682
683apicinterrupt LOCAL_TIMER_VECTOR apic_timer_interrupt smp_apic_timer_interrupt
684apicinterrupt X86_PLATFORM_IPI_VECTOR x86_platform_ipi smp_x86_platform_ipi
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685
Yang Zhangd78f2662013-04-11 19:25:11 +0800686#ifdef CONFIG_HAVE_KVM
Ingo Molnar4d732132015-06-08 20:43:07 +0200687apicinterrupt3 POSTED_INTR_VECTOR kvm_posted_intr_ipi smp_kvm_posted_intr_ipi
688apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR kvm_posted_intr_wakeup_ipi smp_kvm_posted_intr_wakeup_ipi
Yang Zhangd78f2662013-04-11 19:25:11 +0800689#endif
690
Seiji Aguchi33e5ff62013-06-22 07:33:30 -0400691#ifdef CONFIG_X86_MCE_THRESHOLD
Ingo Molnar4d732132015-06-08 20:43:07 +0200692apicinterrupt THRESHOLD_APIC_VECTOR threshold_interrupt smp_threshold_interrupt
Seiji Aguchi33e5ff62013-06-22 07:33:30 -0400693#endif
694
Aravind Gopalakrishnan24fd78a2015-05-06 06:58:56 -0500695#ifdef CONFIG_X86_MCE_AMD
Ingo Molnar4d732132015-06-08 20:43:07 +0200696apicinterrupt DEFERRED_ERROR_VECTOR deferred_error_interrupt smp_deferred_error_interrupt
Aravind Gopalakrishnan24fd78a2015-05-06 06:58:56 -0500697#endif
698
Seiji Aguchi33e5ff62013-06-22 07:33:30 -0400699#ifdef CONFIG_X86_THERMAL_VECTOR
Ingo Molnar4d732132015-06-08 20:43:07 +0200700apicinterrupt THERMAL_APIC_VECTOR thermal_interrupt smp_thermal_interrupt
Seiji Aguchi33e5ff62013-06-22 07:33:30 -0400701#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100703#ifdef CONFIG_SMP
Ingo Molnar4d732132015-06-08 20:43:07 +0200704apicinterrupt CALL_FUNCTION_SINGLE_VECTOR call_function_single_interrupt smp_call_function_single_interrupt
705apicinterrupt CALL_FUNCTION_VECTOR call_function_interrupt smp_call_function_interrupt
706apicinterrupt RESCHEDULE_VECTOR reschedule_interrupt smp_reschedule_interrupt
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100707#endif
708
Ingo Molnar4d732132015-06-08 20:43:07 +0200709apicinterrupt ERROR_APIC_VECTOR error_interrupt smp_error_interrupt
710apicinterrupt SPURIOUS_APIC_VECTOR spurious_interrupt smp_spurious_interrupt
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100711
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800712#ifdef CONFIG_IRQ_WORK
Ingo Molnar4d732132015-06-08 20:43:07 +0200713apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
Ingo Molnar241771e2008-12-03 10:39:53 +0100714#endif
715
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716/*
717 * Exception entry points.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100718 */
Andy Lutomirski9b476682015-03-05 19:19:07 -0800719#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8)
Andy Lutomirski577ed452014-05-21 15:07:09 -0700720
721.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100722ENTRY(\sym)
Andy Lutomirski577ed452014-05-21 15:07:09 -0700723 /* Sanity check */
724 .if \shift_ist != -1 && \paranoid == 0
725 .error "using shift_ist requires paranoid=1"
726 .endif
727
Jan Beulichee4eb872012-11-02 11:18:39 +0000728 ASM_CLAC
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +0100729 PARAVIRT_ADJUST_EXCEPTION_FRAME
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700730
731 .ifeq \has_error_code
Ingo Molnar4d732132015-06-08 20:43:07 +0200732 pushq $-1 /* ORIG_RAX: no syscall to restart */
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700733 .endif
734
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800735 ALLOC_PT_GPREGS_ON_STACK
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700736
737 .if \paranoid
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800738 .if \paranoid == 1
Ingo Molnar4d732132015-06-08 20:43:07 +0200739 testb $3, CS(%rsp) /* If coming from userspace, switch stacks */
740 jnz 1f
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800741 .endif
Ingo Molnar4d732132015-06-08 20:43:07 +0200742 call paranoid_entry
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700743 .else
Ingo Molnar4d732132015-06-08 20:43:07 +0200744 call error_entry
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700745 .endif
Denys Vlasenkoebfc4532015-02-26 14:40:34 -0800746 /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700747
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700748 .if \paranoid
Andy Lutomirski577ed452014-05-21 15:07:09 -0700749 .if \shift_ist != -1
Ingo Molnar4d732132015-06-08 20:43:07 +0200750 TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */
Andy Lutomirski577ed452014-05-21 15:07:09 -0700751 .else
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +0100752 TRACE_IRQS_OFF
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700753 .endif
Andy Lutomirski577ed452014-05-21 15:07:09 -0700754 .endif
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700755
Ingo Molnar4d732132015-06-08 20:43:07 +0200756 movq %rsp, %rdi /* pt_regs pointer */
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700757
758 .if \has_error_code
Ingo Molnar4d732132015-06-08 20:43:07 +0200759 movq ORIG_RAX(%rsp), %rsi /* get error code */
760 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700761 .else
Ingo Molnar4d732132015-06-08 20:43:07 +0200762 xorl %esi, %esi /* no error code */
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700763 .endif
764
Andy Lutomirski577ed452014-05-21 15:07:09 -0700765 .if \shift_ist != -1
Ingo Molnar4d732132015-06-08 20:43:07 +0200766 subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
Andy Lutomirski577ed452014-05-21 15:07:09 -0700767 .endif
768
Ingo Molnar4d732132015-06-08 20:43:07 +0200769 call \do_sym
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700770
Andy Lutomirski577ed452014-05-21 15:07:09 -0700771 .if \shift_ist != -1
Ingo Molnar4d732132015-06-08 20:43:07 +0200772 addq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
Andy Lutomirski577ed452014-05-21 15:07:09 -0700773 .endif
774
Denys Vlasenkoebfc4532015-02-26 14:40:34 -0800775 /* these procedures expect "no swapgs" flag in ebx */
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700776 .if \paranoid
Ingo Molnar4d732132015-06-08 20:43:07 +0200777 jmp paranoid_exit
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700778 .else
Ingo Molnar4d732132015-06-08 20:43:07 +0200779 jmp error_exit
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700780 .endif
781
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800782 .if \paranoid == 1
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800783 /*
784 * Paranoid entry from userspace. Switch stacks and treat it
785 * as a normal entry. This means that paranoid handlers
786 * run in real process context if user_mode(regs).
787 */
7881:
Ingo Molnar4d732132015-06-08 20:43:07 +0200789 call error_entry
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800790
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800791
Ingo Molnar4d732132015-06-08 20:43:07 +0200792 movq %rsp, %rdi /* pt_regs pointer */
793 call sync_regs
794 movq %rax, %rsp /* switch stack */
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800795
Ingo Molnar4d732132015-06-08 20:43:07 +0200796 movq %rsp, %rdi /* pt_regs pointer */
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800797
798 .if \has_error_code
Ingo Molnar4d732132015-06-08 20:43:07 +0200799 movq ORIG_RAX(%rsp), %rsi /* get error code */
800 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800801 .else
Ingo Molnar4d732132015-06-08 20:43:07 +0200802 xorl %esi, %esi /* no error code */
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800803 .endif
804
Ingo Molnar4d732132015-06-08 20:43:07 +0200805 call \do_sym
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800806
Ingo Molnar4d732132015-06-08 20:43:07 +0200807 jmp error_exit /* %ebx: no swapgs flag */
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800808 .endif
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +0100809END(\sym)
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100810.endm
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +0100811
Seiji Aguchi25c74b12013-10-30 16:37:00 -0400812#ifdef CONFIG_TRACING
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700813.macro trace_idtentry sym do_sym has_error_code:req
814idtentry trace(\sym) trace(\do_sym) has_error_code=\has_error_code
815idtentry \sym \do_sym has_error_code=\has_error_code
Seiji Aguchi25c74b12013-10-30 16:37:00 -0400816.endm
817#else
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700818.macro trace_idtentry sym do_sym has_error_code:req
819idtentry \sym \do_sym has_error_code=\has_error_code
Seiji Aguchi25c74b12013-10-30 16:37:00 -0400820.endm
821#endif
822
Ingo Molnar4d732132015-06-08 20:43:07 +0200823idtentry divide_error do_divide_error has_error_code=0
824idtentry overflow do_overflow has_error_code=0
825idtentry bounds do_bounds has_error_code=0
826idtentry invalid_op do_invalid_op has_error_code=0
827idtentry device_not_available do_device_not_available has_error_code=0
828idtentry double_fault do_double_fault has_error_code=1 paranoid=2
829idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0
830idtentry invalid_TSS do_invalid_TSS has_error_code=1
831idtentry segment_not_present do_segment_not_present has_error_code=1
832idtentry spurious_interrupt_bug do_spurious_interrupt_bug has_error_code=0
833idtentry coprocessor_error do_coprocessor_error has_error_code=0
834idtentry alignment_check do_alignment_check has_error_code=1
835idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400836
Ingo Molnar2601e642006-07-03 00:24:45 -0700837
Ingo Molnar4d732132015-06-08 20:43:07 +0200838 /*
839 * Reload gs selector with exception handling
840 * edi: new selector
841 */
Jeremy Fitzhardinge9f9d4892008-06-25 00:19:32 -0400842ENTRY(native_load_gs_index)
Ingo Molnar131484c2015-05-28 12:21:47 +0200843 pushfq
Jeremy Fitzhardingeb8aa2872009-01-28 14:35:03 -0800844 DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +0300845 SWAPGS
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100846gs_change:
Ingo Molnar4d732132015-06-08 20:43:07 +0200847 movl %edi, %gs
8482: mfence /* workaround */
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100849 SWAPGS
Ingo Molnar131484c2015-05-28 12:21:47 +0200850 popfq
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +0300851 ret
Alexander van Heukelum6efdcfa2008-11-23 10:15:32 +0100852END(native_load_gs_index)
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100853
Ingo Molnar4d732132015-06-08 20:43:07 +0200854 _ASM_EXTABLE(gs_change, bad_gs)
855 .section .fixup, "ax"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 /* running with kernelgs */
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100857bad_gs:
Ingo Molnar4d732132015-06-08 20:43:07 +0200858 SWAPGS /* switch back to user gs */
859 xorl %eax, %eax
860 movl %eax, %gs
861 jmp 2b
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +0300862 .previous
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100863
Andi Kleen26995002006-08-02 22:37:28 +0200864/* Call softirq on interrupt stack. Interrupts are off. */
Frederic Weisbecker7d65f4a2013-09-05 15:49:45 +0200865ENTRY(do_softirq_own_stack)
Ingo Molnar4d732132015-06-08 20:43:07 +0200866 pushq %rbp
867 mov %rsp, %rbp
868 incl PER_CPU_VAR(irq_count)
869 cmove PER_CPU_VAR(irq_stack_ptr), %rsp
870 push %rbp /* frame pointer backlink */
871 call __do_softirq
Andi Kleen26995002006-08-02 22:37:28 +0200872 leaveq
Ingo Molnar4d732132015-06-08 20:43:07 +0200873 decl PER_CPU_VAR(irq_count)
Andi Kleened6b6762005-07-28 21:15:49 -0700874 ret
Frederic Weisbecker7d65f4a2013-09-05 15:49:45 +0200875END(do_softirq_own_stack)
Andi Kleen75154f42007-06-23 02:29:25 +0200876
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -0700877#ifdef CONFIG_XEN
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700878idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -0700879
880/*
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +0300881 * A note on the "critical region" in our callback handler.
882 * We want to avoid stacking callback handlers due to events occurring
883 * during handling of the last event. To do this, we keep events disabled
884 * until we've done all processing. HOWEVER, we must enable events before
885 * popping the stack frame (can't be done atomically) and so it would still
886 * be possible to get enough handler activations to overflow the stack.
887 * Although unlikely, bugs of that kind are hard to track down, so we'd
888 * like to avoid the possibility.
889 * So, on entry to the handler we detect whether we interrupted an
890 * existing activation in its critical region -- if so, we pop the current
891 * activation and restart the handler using the previous one.
892 */
Ingo Molnar4d732132015-06-08 20:43:07 +0200893ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */
894
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +0300895/*
896 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
897 * see the correct pointer to the pt_regs
898 */
Ingo Molnar4d732132015-06-08 20:43:07 +0200899 movq %rdi, %rsp /* we don't return, adjust the stack frame */
90011: incl PER_CPU_VAR(irq_count)
901 movq %rsp, %rbp
902 cmovzq PER_CPU_VAR(irq_stack_ptr), %rsp
903 pushq %rbp /* frame pointer backlink */
904 call xen_evtchn_do_upcall
905 popq %rsp
906 decl PER_CPU_VAR(irq_count)
David Vrabelfdfd8112015-02-19 15:23:17 +0000907#ifndef CONFIG_PREEMPT
Ingo Molnar4d732132015-06-08 20:43:07 +0200908 call xen_maybe_preempt_hcall
David Vrabelfdfd8112015-02-19 15:23:17 +0000909#endif
Ingo Molnar4d732132015-06-08 20:43:07 +0200910 jmp error_exit
Alexander van Heukelum371c3942011-03-11 21:59:38 +0100911END(xen_do_hypervisor_callback)
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -0700912
913/*
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +0300914 * Hypervisor uses this for application faults while it executes.
915 * We get here for two reasons:
916 * 1. Fault while reloading DS, ES, FS or GS
917 * 2. Fault while executing IRET
918 * Category 1 we do not need to fix up as Xen has already reloaded all segment
919 * registers that could be reloaded and zeroed the others.
920 * Category 2 we fix up by killing the current process. We cannot use the
921 * normal Linux return path in this case because if we use the IRET hypercall
922 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
923 * We distinguish between categories by comparing each saved segment register
924 * with its current contents: any discrepancy means we in category 1.
925 */
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -0700926ENTRY(xen_failsafe_callback)
Ingo Molnar4d732132015-06-08 20:43:07 +0200927 movl %ds, %ecx
928 cmpw %cx, 0x10(%rsp)
929 jne 1f
930 movl %es, %ecx
931 cmpw %cx, 0x18(%rsp)
932 jne 1f
933 movl %fs, %ecx
934 cmpw %cx, 0x20(%rsp)
935 jne 1f
936 movl %gs, %ecx
937 cmpw %cx, 0x28(%rsp)
938 jne 1f
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -0700939 /* All segments match their saved values => Category 2 (Bad IRET). */
Ingo Molnar4d732132015-06-08 20:43:07 +0200940 movq (%rsp), %rcx
941 movq 8(%rsp), %r11
942 addq $0x30, %rsp
943 pushq $0 /* RIP */
944 pushq %r11
945 pushq %rcx
946 jmp general_protection
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07009471: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
Ingo Molnar4d732132015-06-08 20:43:07 +0200948 movq (%rsp), %rcx
949 movq 8(%rsp), %r11
950 addq $0x30, %rsp
951 pushq $-1 /* orig_ax = -1 => not a system call */
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800952 ALLOC_PT_GPREGS_ON_STACK
953 SAVE_C_REGS
954 SAVE_EXTRA_REGS
Ingo Molnar4d732132015-06-08 20:43:07 +0200955 jmp error_exit
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -0700956END(xen_failsafe_callback)
957
Seiji Aguchicf910e82013-06-20 11:46:53 -0400958apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
Sheng Yang38e20b02010-05-14 12:40:51 +0100959 xen_hvm_callback_vector xen_evtchn_do_upcall
960
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -0700961#endif /* CONFIG_XEN */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +0100962
K. Y. Srinivasanbc2b0332013-02-03 17:22:39 -0800963#if IS_ENABLED(CONFIG_HYPERV)
Seiji Aguchicf910e82013-06-20 11:46:53 -0400964apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
K. Y. Srinivasanbc2b0332013-02-03 17:22:39 -0800965 hyperv_callback_vector hyperv_vector_handler
966#endif /* CONFIG_HYPERV */
967
Ingo Molnar4d732132015-06-08 20:43:07 +0200968idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
969idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
970idtentry stack_segment do_stack_segment has_error_code=1
971
Jeremy Fitzhardinge6cac5a92009-03-29 19:56:29 -0700972#ifdef CONFIG_XEN
Ingo Molnar4d732132015-06-08 20:43:07 +0200973idtentry xen_debug do_debug has_error_code=0
974idtentry xen_int3 do_int3 has_error_code=0
975idtentry xen_stack_segment do_stack_segment has_error_code=1
Jeremy Fitzhardinge6cac5a92009-03-29 19:56:29 -0700976#endif
Ingo Molnar4d732132015-06-08 20:43:07 +0200977
978idtentry general_protection do_general_protection has_error_code=1
979trace_idtentry page_fault do_page_fault has_error_code=1
980
Gleb Natapov631bc482010-10-14 11:22:52 +0200981#ifdef CONFIG_KVM_GUEST
Ingo Molnar4d732132015-06-08 20:43:07 +0200982idtentry async_page_fault do_async_page_fault has_error_code=1
Gleb Natapov631bc482010-10-14 11:22:52 +0200983#endif
Ingo Molnar4d732132015-06-08 20:43:07 +0200984
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +0100985#ifdef CONFIG_X86_MCE
Ingo Molnar4d732132015-06-08 20:43:07 +0200986idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip)
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +0100987#endif
988
Denys Vlasenkoebfc4532015-02-26 14:40:34 -0800989/*
990 * Save all registers in pt_regs, and switch gs if needed.
991 * Use slow, but surefire "are we in kernel?" check.
992 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
993 */
994ENTRY(paranoid_entry)
Denys Vlasenko1eeb2072015-02-26 14:40:33 -0800995 cld
996 SAVE_C_REGS 8
997 SAVE_EXTRA_REGS 8
Ingo Molnar4d732132015-06-08 20:43:07 +0200998 movl $1, %ebx
999 movl $MSR_GS_BASE, %ecx
Denys Vlasenko1eeb2072015-02-26 14:40:33 -08001000 rdmsr
Ingo Molnar4d732132015-06-08 20:43:07 +02001001 testl %edx, %edx
1002 js 1f /* negative -> in kernel */
Denys Vlasenko1eeb2072015-02-26 14:40:33 -08001003 SWAPGS
Ingo Molnar4d732132015-06-08 20:43:07 +02001004 xorl %ebx, %ebx
Denys Vlasenko1eeb2072015-02-26 14:40:33 -080010051: ret
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001006END(paranoid_entry)
Denys Vlasenko1eeb2072015-02-26 14:40:33 -08001007
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001008/*
1009 * "Paranoid" exit path from exception stack. This is invoked
1010 * only on return from non-NMI IST interrupts that came
1011 * from kernel space.
1012 *
1013 * We may be returning to very strange contexts (e.g. very early
1014 * in syscall entry), so checking for preemption here would
1015 * be complicated. Fortunately, we there's no good reason
1016 * to try to handle preemption here.
Ingo Molnar4d732132015-06-08 20:43:07 +02001017 *
1018 * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001019 */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001020ENTRY(paranoid_exit)
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001021 DISABLE_INTERRUPTS(CLBR_NONE)
Steven Rostedt5963e312012-05-30 11:54:53 -04001022 TRACE_IRQS_OFF_DEBUG
Ingo Molnar4d732132015-06-08 20:43:07 +02001023 testl %ebx, %ebx /* swapgs needed? */
1024 jnz paranoid_exit_no_swapgs
Denys Vlasenkof2db9382015-02-26 14:40:30 -08001025 TRACE_IRQS_IRETQ
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001026 SWAPGS_UNSAFE_STACK
Ingo Molnar4d732132015-06-08 20:43:07 +02001027 jmp paranoid_exit_restore
Denys Vlasenko0d550832015-02-26 14:40:29 -08001028paranoid_exit_no_swapgs:
Denys Vlasenkof2db9382015-02-26 14:40:30 -08001029 TRACE_IRQS_IRETQ_DEBUG
Denys Vlasenko0d550832015-02-26 14:40:29 -08001030paranoid_exit_restore:
Denys Vlasenko76f5df42015-02-26 14:40:27 -08001031 RESTORE_EXTRA_REGS
1032 RESTORE_C_REGS
1033 REMOVE_PT_GPREGS_FROM_STACK 8
Andy Lutomirski48e08d02014-11-11 12:49:41 -08001034 INTERRUPT_RETURN
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001035END(paranoid_exit)
1036
1037/*
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001038 * Save all registers in pt_regs, and switch gs if needed.
Andy Lutomirski539f5112015-06-09 12:36:01 -07001039 * Return: EBX=0: came from user mode; EBX=1: otherwise
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001040 */
1041ENTRY(error_entry)
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001042 cld
Denys Vlasenko76f5df42015-02-26 14:40:27 -08001043 SAVE_C_REGS 8
1044 SAVE_EXTRA_REGS 8
Ingo Molnar4d732132015-06-08 20:43:07 +02001045 xorl %ebx, %ebx
Denys Vlasenko03335e92015-04-27 15:21:52 +02001046 testb $3, CS+8(%rsp)
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001047 jz .Lerror_kernelspace
Andy Lutomirski539f5112015-06-09 12:36:01 -07001048
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001049.Lerror_entry_from_usermode_swapgs:
1050 /*
1051 * We entered from user mode or we're pretending to have entered
1052 * from user mode due to an IRET fault.
1053 */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001054 SWAPGS
Andy Lutomirski539f5112015-06-09 12:36:01 -07001055
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001056.Lerror_entry_from_usermode_after_swapgs:
Andy Lutomirski02bc7762015-07-03 12:44:31 -07001057#ifdef CONFIG_CONTEXT_TRACKING
1058 call enter_from_user_mode
1059#endif
1060
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001061.Lerror_entry_done:
Andy Lutomirski02bc7762015-07-03 12:44:31 -07001062
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001063 TRACE_IRQS_OFF
1064 ret
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001065
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001066 /*
1067 * There are two places in the kernel that can potentially fault with
1068 * usergs. Handle them here. B stepping K8s sometimes report a
1069 * truncated RIP for IRET exceptions returning to compat mode. Check
1070 * for these here too.
1071 */
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001072.Lerror_kernelspace:
Ingo Molnar4d732132015-06-08 20:43:07 +02001073 incl %ebx
1074 leaq native_irq_return_iret(%rip), %rcx
1075 cmpq %rcx, RIP+8(%rsp)
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001076 je .Lerror_bad_iret
Ingo Molnar4d732132015-06-08 20:43:07 +02001077 movl %ecx, %eax /* zero extend */
1078 cmpq %rax, RIP+8(%rsp)
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001079 je .Lbstep_iret
Ingo Molnar4d732132015-06-08 20:43:07 +02001080 cmpq $gs_change, RIP+8(%rsp)
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001081 jne .Lerror_entry_done
Andy Lutomirski539f5112015-06-09 12:36:01 -07001082
1083 /*
1084 * hack: gs_change can fail with user gsbase. If this happens, fix up
1085 * gsbase and proceed. We'll fix up the exception and land in
1086 * gs_change's error handler with kernel gsbase.
1087 */
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001088 jmp .Lerror_entry_from_usermode_swapgs
Brian Gerstae24ffe2009-10-12 10:18:23 -04001089
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001090.Lbstep_iret:
Brian Gerstae24ffe2009-10-12 10:18:23 -04001091 /* Fix truncated RIP */
Ingo Molnar4d732132015-06-08 20:43:07 +02001092 movq %rcx, RIP+8(%rsp)
Andy Lutomirskib645af22014-11-22 18:00:33 -08001093 /* fall through */
1094
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001095.Lerror_bad_iret:
Andy Lutomirski539f5112015-06-09 12:36:01 -07001096 /*
1097 * We came from an IRET to user mode, so we have user gsbase.
1098 * Switch to kernel gsbase:
1099 */
Andy Lutomirskib645af22014-11-22 18:00:33 -08001100 SWAPGS
Andy Lutomirski539f5112015-06-09 12:36:01 -07001101
1102 /*
1103 * Pretend that the exception came from user mode: set up pt_regs
1104 * as if we faulted immediately after IRET and clear EBX so that
1105 * error_exit knows that we will be returning to user mode.
1106 */
Ingo Molnar4d732132015-06-08 20:43:07 +02001107 mov %rsp, %rdi
1108 call fixup_bad_iret
1109 mov %rax, %rsp
Andy Lutomirski539f5112015-06-09 12:36:01 -07001110 decl %ebx
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001111 jmp .Lerror_entry_from_usermode_after_swapgs
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001112END(error_entry)
1113
1114
Andy Lutomirski539f5112015-06-09 12:36:01 -07001115/*
1116 * On entry, EBS is a "return to kernel mode" flag:
1117 * 1: already in kernel mode, don't need SWAPGS
1118 * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
1119 */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001120ENTRY(error_exit)
Ingo Molnar4d732132015-06-08 20:43:07 +02001121 movl %ebx, %eax
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001122 DISABLE_INTERRUPTS(CLBR_NONE)
1123 TRACE_IRQS_OFF
Ingo Molnar4d732132015-06-08 20:43:07 +02001124 testl %eax, %eax
1125 jnz retint_kernel
1126 jmp retint_user
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001127END(error_exit)
1128
Denys Vlasenko0784b362015-04-01 16:50:57 +02001129/* Runs on exception stack */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001130ENTRY(nmi)
Andy Lutomirskifc57a7c2015-09-20 16:32:04 -07001131 /*
1132 * Fix up the exception frame if we're on Xen.
1133 * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most
1134 * one value to the stack on native, so it may clobber the rdx
1135 * scratch slot, but it won't clobber any of the important
1136 * slots past it.
1137 *
1138 * Xen is a different story, because the Xen frame itself overlaps
1139 * the "NMI executing" variable.
1140 */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001141 PARAVIRT_ADJUST_EXCEPTION_FRAME
Andy Lutomirskifc57a7c2015-09-20 16:32:04 -07001142
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001143 /*
1144 * We allow breakpoints in NMIs. If a breakpoint occurs, then
1145 * the iretq it performs will take us out of NMI context.
1146 * This means that we can have nested NMIs where the next
1147 * NMI is using the top of the stack of the previous NMI. We
1148 * can't let it execute because the nested NMI will corrupt the
1149 * stack of the previous NMI. NMI handlers are not re-entrant
1150 * anyway.
1151 *
1152 * To handle this case we do the following:
1153 * Check the a special location on the stack that contains
1154 * a variable that is set when NMIs are executing.
1155 * The interrupted task's stack is also checked to see if it
1156 * is an NMI stack.
1157 * If the variable is not set and the stack is not the NMI
1158 * stack then:
1159 * o Set the special variable on the stack
Andy Lutomirski0b229302015-07-15 10:29:36 -07001160 * o Copy the interrupt frame into an "outermost" location on the
1161 * stack
1162 * o Copy the interrupt frame into an "iret" location on the stack
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001163 * o Continue processing the NMI
1164 * If the variable is set or the previous stack is the NMI stack:
Andy Lutomirski0b229302015-07-15 10:29:36 -07001165 * o Modify the "iret" location to jump to the repeat_nmi
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001166 * o return back to the first NMI
1167 *
1168 * Now on exit of the first NMI, we first clear the stack variable
1169 * The NMI stack will tell any nested NMIs at that point that it is
1170 * nested. Then we pop the stack normally with iret, and if there was
1171 * a nested NMI that updated the copy interrupt stack frame, a
1172 * jump will be made to the repeat_nmi code that will handle the second
1173 * NMI.
Andy Lutomirski9b6e6a82015-07-15 10:29:35 -07001174 *
1175 * However, espfix prevents us from directly returning to userspace
1176 * with a single IRET instruction. Similarly, IRET to user mode
1177 * can fault. We therefore handle NMIs from user space like
1178 * other IST entries.
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001179 */
1180
Denys Vlasenko146b2b02015-03-25 18:18:13 +01001181 /* Use %rdx as our temp variable throughout */
Ingo Molnar4d732132015-06-08 20:43:07 +02001182 pushq %rdx
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001183
Andy Lutomirski9b6e6a82015-07-15 10:29:35 -07001184 testb $3, CS-RIP+8(%rsp)
1185 jz .Lnmi_from_kernel
Steven Rostedt45d5a162012-02-19 16:43:37 -05001186
1187 /*
Andy Lutomirski9b6e6a82015-07-15 10:29:35 -07001188 * NMI from user mode. We need to run on the thread stack, but we
1189 * can't go through the normal entry paths: NMIs are masked, and
1190 * we don't want to enable interrupts, because then we'll end
1191 * up in an awkward situation in which IRQs are on but NMIs
1192 * are off.
Andy Lutomirski83c133c2015-09-20 16:32:05 -07001193 *
1194 * We also must not push anything to the stack before switching
1195 * stacks lest we corrupt the "NMI executing" variable.
Andy Lutomirski9b6e6a82015-07-15 10:29:35 -07001196 */
1197
Andy Lutomirski83c133c2015-09-20 16:32:05 -07001198 SWAPGS_UNSAFE_STACK
Andy Lutomirski9b6e6a82015-07-15 10:29:35 -07001199 cld
1200 movq %rsp, %rdx
1201 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
1202 pushq 5*8(%rdx) /* pt_regs->ss */
1203 pushq 4*8(%rdx) /* pt_regs->rsp */
1204 pushq 3*8(%rdx) /* pt_regs->flags */
1205 pushq 2*8(%rdx) /* pt_regs->cs */
1206 pushq 1*8(%rdx) /* pt_regs->rip */
1207 pushq $-1 /* pt_regs->orig_ax */
1208 pushq %rdi /* pt_regs->di */
1209 pushq %rsi /* pt_regs->si */
1210 pushq (%rdx) /* pt_regs->dx */
1211 pushq %rcx /* pt_regs->cx */
1212 pushq %rax /* pt_regs->ax */
1213 pushq %r8 /* pt_regs->r8 */
1214 pushq %r9 /* pt_regs->r9 */
1215 pushq %r10 /* pt_regs->r10 */
1216 pushq %r11 /* pt_regs->r11 */
1217 pushq %rbx /* pt_regs->rbx */
1218 pushq %rbp /* pt_regs->rbp */
1219 pushq %r12 /* pt_regs->r12 */
1220 pushq %r13 /* pt_regs->r13 */
1221 pushq %r14 /* pt_regs->r14 */
1222 pushq %r15 /* pt_regs->r15 */
1223
1224 /*
1225 * At this point we no longer need to worry about stack damage
1226 * due to nesting -- we're on the normal thread stack and we're
1227 * done with the NMI stack.
1228 */
1229
1230 movq %rsp, %rdi
1231 movq $-1, %rsi
1232 call do_nmi
1233
1234 /*
1235 * Return back to user mode. We must *not* do the normal exit
1236 * work, because we don't want to enable interrupts. Fortunately,
1237 * do_nmi doesn't modify pt_regs.
1238 */
1239 SWAPGS
1240 jmp restore_c_regs_and_iret
1241
1242.Lnmi_from_kernel:
1243 /*
Andy Lutomirski0b229302015-07-15 10:29:36 -07001244 * Here's what our stack frame will look like:
1245 * +---------------------------------------------------------+
1246 * | original SS |
1247 * | original Return RSP |
1248 * | original RFLAGS |
1249 * | original CS |
1250 * | original RIP |
1251 * +---------------------------------------------------------+
1252 * | temp storage for rdx |
1253 * +---------------------------------------------------------+
1254 * | "NMI executing" variable |
1255 * +---------------------------------------------------------+
1256 * | iret SS } Copied from "outermost" frame |
1257 * | iret Return RSP } on each loop iteration; overwritten |
1258 * | iret RFLAGS } by a nested NMI to force another |
1259 * | iret CS } iteration if needed. |
1260 * | iret RIP } |
1261 * +---------------------------------------------------------+
1262 * | outermost SS } initialized in first_nmi; |
1263 * | outermost Return RSP } will not be changed before |
1264 * | outermost RFLAGS } NMI processing is done. |
1265 * | outermost CS } Copied to "iret" frame on each |
1266 * | outermost RIP } iteration. |
1267 * +---------------------------------------------------------+
1268 * | pt_regs |
1269 * +---------------------------------------------------------+
1270 *
1271 * The "original" frame is used by hardware. Before re-enabling
1272 * NMIs, we need to be done with it, and we need to leave enough
1273 * space for the asm code here.
1274 *
1275 * We return by executing IRET while RSP points to the "iret" frame.
1276 * That will either return for real or it will loop back into NMI
1277 * processing.
1278 *
1279 * The "outermost" frame is copied to the "iret" frame on each
1280 * iteration of the loop, so each iteration starts with the "iret"
1281 * frame pointing to the final return target.
1282 */
1283
1284 /*
1285 * Determine whether we're a nested NMI.
1286 *
Andy Lutomirskia27507c2015-07-15 10:29:37 -07001287 * If we interrupted kernel code between repeat_nmi and
1288 * end_repeat_nmi, then we are a nested NMI. We must not
1289 * modify the "iret" frame because it's being written by
1290 * the outer NMI. That's okay; the outer NMI handler is
1291 * about to about to call do_nmi anyway, so we can just
1292 * resume the outer NMI.
1293 */
1294
1295 movq $repeat_nmi, %rdx
1296 cmpq 8(%rsp), %rdx
1297 ja 1f
1298 movq $end_repeat_nmi, %rdx
1299 cmpq 8(%rsp), %rdx
1300 ja nested_nmi_out
13011:
1302
1303 /*
1304 * Now check "NMI executing". If it's set, then we're nested.
Andy Lutomirski0b229302015-07-15 10:29:36 -07001305 * This will not detect if we interrupted an outer NMI just
1306 * before IRET.
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001307 */
Ingo Molnar4d732132015-06-08 20:43:07 +02001308 cmpl $1, -8(%rsp)
1309 je nested_nmi
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001310
1311 /*
Andy Lutomirski0b229302015-07-15 10:29:36 -07001312 * Now test if the previous stack was an NMI stack. This covers
1313 * the case where we interrupt an outer NMI after it clears
Andy Lutomirski810bc072015-07-15 10:29:38 -07001314 * "NMI executing" but before IRET. We need to be careful, though:
1315 * there is one case in which RSP could point to the NMI stack
1316 * despite there being no NMI active: naughty userspace controls
1317 * RSP at the very beginning of the SYSCALL targets. We can
1318 * pull a fast one on naughty userspace, though: we program
1319 * SYSCALL to mask DF, so userspace cannot cause DF to be set
1320 * if it controls the kernel's RSP. We set DF before we clear
1321 * "NMI executing".
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001322 */
Denys Vlasenko0784b362015-04-01 16:50:57 +02001323 lea 6*8(%rsp), %rdx
1324 /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
1325 cmpq %rdx, 4*8(%rsp)
1326 /* If the stack pointer is above the NMI stack, this is a normal NMI */
1327 ja first_nmi
Ingo Molnar4d732132015-06-08 20:43:07 +02001328
Denys Vlasenko0784b362015-04-01 16:50:57 +02001329 subq $EXCEPTION_STKSZ, %rdx
1330 cmpq %rdx, 4*8(%rsp)
1331 /* If it is below the NMI stack, it is a normal NMI */
1332 jb first_nmi
Andy Lutomirski810bc072015-07-15 10:29:38 -07001333
1334 /* Ah, it is within the NMI stack. */
1335
1336 testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
1337 jz first_nmi /* RSP was user controlled. */
1338
1339 /* This is a nested NMI. */
Denys Vlasenko0784b362015-04-01 16:50:57 +02001340
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001341nested_nmi:
1342 /*
Andy Lutomirski0b229302015-07-15 10:29:36 -07001343 * Modify the "iret" frame to point to repeat_nmi, forcing another
1344 * iteration of NMI handling.
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001345 */
Andy Lutomirski23a781e2015-07-15 10:29:39 -07001346 subq $8, %rsp
Ingo Molnar4d732132015-06-08 20:43:07 +02001347 leaq -10*8(%rsp), %rdx
1348 pushq $__KERNEL_DS
1349 pushq %rdx
Ingo Molnar131484c2015-05-28 12:21:47 +02001350 pushfq
Ingo Molnar4d732132015-06-08 20:43:07 +02001351 pushq $__KERNEL_CS
1352 pushq $repeat_nmi
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001353
1354 /* Put stack back */
Ingo Molnar4d732132015-06-08 20:43:07 +02001355 addq $(6*8), %rsp
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001356
1357nested_nmi_out:
Ingo Molnar4d732132015-06-08 20:43:07 +02001358 popq %rdx
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001359
Andy Lutomirski0b229302015-07-15 10:29:36 -07001360 /* We are returning to kernel mode, so this cannot result in a fault. */
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001361 INTERRUPT_RETURN
1362
1363first_nmi:
Andy Lutomirski0b229302015-07-15 10:29:36 -07001364 /* Restore rdx. */
Ingo Molnar4d732132015-06-08 20:43:07 +02001365 movq (%rsp), %rdx
Jan Beulich62610912012-02-24 14:54:37 +00001366
Andy Lutomirski36f1a772015-07-15 10:29:40 -07001367 /* Make room for "NMI executing". */
1368 pushq $0
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001369
Andy Lutomirski0b229302015-07-15 10:29:36 -07001370 /* Leave room for the "iret" frame */
Ingo Molnar4d732132015-06-08 20:43:07 +02001371 subq $(5*8), %rsp
Salman Qazi28696f42012-10-01 17:29:25 -07001372
Andy Lutomirski0b229302015-07-15 10:29:36 -07001373 /* Copy the "original" frame to the "outermost" frame */
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001374 .rept 5
Ingo Molnar4d732132015-06-08 20:43:07 +02001375 pushq 11*8(%rsp)
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001376 .endr
Jan Beulich62610912012-02-24 14:54:37 +00001377
Steven Rostedt79fb4ad2012-02-24 15:55:13 -05001378 /* Everything up to here is safe from nested NMIs */
1379
Andy Lutomirskia97439a2015-07-15 10:29:41 -07001380#ifdef CONFIG_DEBUG_ENTRY
1381 /*
1382 * For ease of testing, unmask NMIs right away. Disabled by
1383 * default because IRET is very expensive.
1384 */
1385 pushq $0 /* SS */
1386 pushq %rsp /* RSP (minus 8 because of the previous push) */
1387 addq $8, (%rsp) /* Fix up RSP */
1388 pushfq /* RFLAGS */
1389 pushq $__KERNEL_CS /* CS */
1390 pushq $1f /* RIP */
1391 INTERRUPT_RETURN /* continues at repeat_nmi below */
13921:
1393#endif
1394
Andy Lutomirski0b229302015-07-15 10:29:36 -07001395repeat_nmi:
Jan Beulich62610912012-02-24 14:54:37 +00001396 /*
1397 * If there was a nested NMI, the first NMI's iret will return
1398 * here. But NMIs are still enabled and we can take another
1399 * nested NMI. The nested NMI checks the interrupted RIP to see
1400 * if it is between repeat_nmi and end_repeat_nmi, and if so
1401 * it will just return, as we are about to repeat an NMI anyway.
1402 * This makes it safe to copy to the stack frame that a nested
1403 * NMI will update.
Andy Lutomirski0b229302015-07-15 10:29:36 -07001404 *
1405 * RSP is pointing to "outermost RIP". gsbase is unknown, but, if
1406 * we're repeating an NMI, gsbase has the same value that it had on
1407 * the first iteration. paranoid_entry will load the kernel
Andy Lutomirski36f1a772015-07-15 10:29:40 -07001408 * gsbase if needed before we call do_nmi. "NMI executing"
1409 * is zero.
Jan Beulich62610912012-02-24 14:54:37 +00001410 */
Andy Lutomirski36f1a772015-07-15 10:29:40 -07001411 movq $1, 10*8(%rsp) /* Set "NMI executing". */
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001412
Andy Lutomirski0b229302015-07-15 10:29:36 -07001413 /*
1414 * Copy the "outermost" frame to the "iret" frame. NMIs that nest
1415 * here must not modify the "iret" frame while we're writing to
1416 * it or it will end up containing garbage.
1417 */
Ingo Molnar4d732132015-06-08 20:43:07 +02001418 addq $(10*8), %rsp
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001419 .rept 5
Ingo Molnar4d732132015-06-08 20:43:07 +02001420 pushq -6*8(%rsp)
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001421 .endr
Ingo Molnar4d732132015-06-08 20:43:07 +02001422 subq $(5*8), %rsp
Jan Beulich62610912012-02-24 14:54:37 +00001423end_repeat_nmi:
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001424
1425 /*
Andy Lutomirski0b229302015-07-15 10:29:36 -07001426 * Everything below this point can be preempted by a nested NMI.
1427 * If this happens, then the inner NMI will change the "iret"
1428 * frame to point back to repeat_nmi.
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001429 */
Ingo Molnar4d732132015-06-08 20:43:07 +02001430 pushq $-1 /* ORIG_RAX: no syscall to restart */
Denys Vlasenko76f5df42015-02-26 14:40:27 -08001431 ALLOC_PT_GPREGS_ON_STACK
1432
Steven Rostedt1fd466e2011-12-08 12:32:27 -05001433 /*
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001434 * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
Steven Rostedt1fd466e2011-12-08 12:32:27 -05001435 * as we should not be calling schedule in NMI context.
1436 * Even with normal interrupts enabled. An NMI should not be
1437 * setting NEED_RESCHED or anything that normal interrupts and
1438 * exceptions might do.
1439 */
Ingo Molnar4d732132015-06-08 20:43:07 +02001440 call paranoid_entry
Steven Rostedt7fbb98c2012-06-07 10:21:21 -04001441
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001442 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
Ingo Molnar4d732132015-06-08 20:43:07 +02001443 movq %rsp, %rdi
1444 movq $-1, %rsi
1445 call do_nmi
Steven Rostedt7fbb98c2012-06-07 10:21:21 -04001446
Ingo Molnar4d732132015-06-08 20:43:07 +02001447 testl %ebx, %ebx /* swapgs needed? */
1448 jnz nmi_restore
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001449nmi_swapgs:
1450 SWAPGS_UNSAFE_STACK
1451nmi_restore:
Denys Vlasenko76f5df42015-02-26 14:40:27 -08001452 RESTORE_EXTRA_REGS
1453 RESTORE_C_REGS
Andy Lutomirski0b229302015-07-15 10:29:36 -07001454
1455 /* Point RSP at the "iret" frame. */
Denys Vlasenko76f5df42015-02-26 14:40:27 -08001456 REMOVE_PT_GPREGS_FROM_STACK 6*8
Salman Qazi28696f42012-10-01 17:29:25 -07001457
Andy Lutomirski810bc072015-07-15 10:29:38 -07001458 /*
1459 * Clear "NMI executing". Set DF first so that we can easily
1460 * distinguish the remaining code between here and IRET from
1461 * the SYSCALL entry and exit paths. On a native kernel, we
1462 * could just inspect RIP, but, on paravirt kernels,
1463 * INTERRUPT_RETURN can translate into a jump into a
1464 * hypercall page.
1465 */
1466 std
1467 movq $0, 5*8(%rsp) /* clear "NMI executing" */
Andy Lutomirski0b229302015-07-15 10:29:36 -07001468
1469 /*
1470 * INTERRUPT_RETURN reads the "iret" frame and exits the NMI
1471 * stack in a single instruction. We are returning to kernel
1472 * mode, so this cannot result in a fault.
1473 */
Andy Lutomirski5ca6f702015-06-04 13:24:29 -07001474 INTERRUPT_RETURN
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001475END(nmi)
1476
1477ENTRY(ignore_sysret)
Ingo Molnar4d732132015-06-08 20:43:07 +02001478 mov $-ENOSYS, %eax
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001479 sysret
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001480END(ignore_sysret)