blob: da0b9bdcc32eb9adc699ce2a442f011a6467b1d4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/x86_64/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 */
8
9/*
10 * entry.S contains the system-call and fault low-level handling routines.
11 *
Andy Lutomirski8b4777a2011-06-05 13:50:18 -040012 * Some of this is documented in Documentation/x86/entry_64.txt
13 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 * NOTE: This code handles signal-recognition, which happens every time
15 * after an interrupt and after each system call.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +010016 *
17 * Normal syscalls and interrupts don't save a full stack frame, this is
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 * only done for syscall tracing, signals or fork/exec et.al.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +010019 *
20 * A note on terminology:
21 * - top of stack: Architecture defined interrupt frame from SS to RIP
22 * at the top of the kernel process stack.
Lucas De Marchi0d2eb442011-03-17 16:24:16 -030023 * - partial stack frame: partially saved registers up to R11.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +010024 * - full stack frame: Like partial stack frame, but all register saved.
Andi Kleen2e91a172006-09-26 10:52:29 +020025 *
26 * Some macro usage:
27 * - CFI macros are used to generate dwarf2 unwind information for better
28 * backtraces. They don't change any code.
29 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
30 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
31 * There are unfortunately lots of special cases where some registers
32 * not touched. The macro is a big mess that should be cleaned up.
33 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
34 * Gives a full stack frame.
35 * - ENTRY/END Define functions in the symbol table.
36 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
37 * frame that is otherwise undefined after a SYSCALL
38 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
39 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 */
41
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/linkage.h>
43#include <asm/segment.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <asm/cache.h>
45#include <asm/errno.h>
46#include <asm/dwarf2.h>
47#include <asm/calling.h>
Sam Ravnborge2d5df92005-09-09 21:28:48 +020048#include <asm/asm-offsets.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <asm/msr.h>
50#include <asm/unistd.h>
51#include <asm/thread_info.h>
52#include <asm/hw_irq.h>
Jeremy Fitzhardinge0341c142009-02-13 11:14:01 -080053#include <asm/page_types.h>
Ingo Molnar2601e642006-07-03 00:24:45 -070054#include <asm/irqflags.h>
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +010055#include <asm/paravirt.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053056#include <asm/ftrace.h>
Tejun Heo9939dda2009-01-13 20:41:35 +090057#include <asm/percpu.h>
H. Peter Anvind7abc0f2012-04-20 12:19:50 -070058#include <asm/asm.h>
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +010059#include <asm/context_tracking.h>
H. Peter Anvin63bcff22012-09-21 12:43:12 -070060#include <asm/smap.h>
H. Peter Anvin3891a042014-04-29 16:46:09 -070061#include <asm/pgtable_types.h>
Eric Parisd7e75282012-01-03 14:23:06 -050062#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Roland McGrath86a1c342008-06-23 15:37:04 -070064/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
65#include <linux/elf-em.h>
66#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
67#define __AUDIT_ARCH_64BIT 0x80000000
68#define __AUDIT_ARCH_LE 0x40000000
69
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 .code64
Jiri Olsaea714542011-03-07 19:10:39 +010071 .section .entry.text, "ax"
72
Steven Rostedt606576c2008-10-06 19:06:12 -040073#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedtd57c5d52011-02-09 13:32:18 -050074
75#ifdef CC_USING_FENTRY
76# define function_hook __fentry__
77#else
78# define function_hook mcount
79#endif
80
Steven Rostedtd61f82d2008-05-12 21:20:43 +020081#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedtd57c5d52011-02-09 13:32:18 -050082
83ENTRY(function_hook)
Steven Rostedtd61f82d2008-05-12 21:20:43 +020084 retq
Steven Rostedtd57c5d52011-02-09 13:32:18 -050085END(function_hook)
Steven Rostedtd61f82d2008-05-12 21:20:43 +020086
Steven Rostedt08f6fba2012-04-30 16:20:23 -040087/* skip is set if stack has been adjusted */
88.macro ftrace_caller_setup skip=0
89 MCOUNT_SAVE_FRAME \skip
90
91 /* Load the ftrace_ops into the 3rd parameter */
Steven Rostedt1739f092013-11-13 15:20:04 -050092 movq function_trace_op(%rip), %rdx
Steven Rostedt08f6fba2012-04-30 16:20:23 -040093
94 /* Load ip into the first parameter */
95 movq RIP(%rsp), %rdi
96 subq $MCOUNT_INSN_SIZE, %rdi
97 /* Load the parent_ip into the second parameter */
Steven Rostedtd57c5d52011-02-09 13:32:18 -050098#ifdef CC_USING_FENTRY
99 movq SS+16(%rsp), %rsi
100#else
Steven Rostedt08f6fba2012-04-30 16:20:23 -0400101 movq 8(%rbp), %rsi
Steven Rostedtd57c5d52011-02-09 13:32:18 -0500102#endif
Steven Rostedt08f6fba2012-04-30 16:20:23 -0400103.endm
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200104
105ENTRY(ftrace_caller)
Steven Rostedt08f6fba2012-04-30 16:20:23 -0400106 /* Check if tracing was disabled (quick check) */
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500107 cmpl $0, function_trace_stop
108 jne ftrace_stub
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200109
Steven Rostedt08f6fba2012-04-30 16:20:23 -0400110 ftrace_caller_setup
111 /* regs go into 4th parameter (but make it NULL) */
112 movq $0, %rcx
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200113
Cyrill Gorcunovbc8b2b92009-02-23 22:57:01 +0300114GLOBAL(ftrace_call)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200115 call ftrace_stub
116
Cyrill Gorcunovd680fe42008-12-13 00:09:08 +0300117 MCOUNT_RESTORE_FRAME
Steven Rostedt08f6fba2012-04-30 16:20:23 -0400118ftrace_return:
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200119
Frederic Weisbecker48d68b22008-12-02 00:20:39 +0100120#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Cyrill Gorcunovbc8b2b92009-02-23 22:57:01 +0300121GLOBAL(ftrace_graph_call)
Frederic Weisbecker48d68b22008-12-02 00:20:39 +0100122 jmp ftrace_stub
123#endif
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200124
Cyrill Gorcunovbc8b2b92009-02-23 22:57:01 +0300125GLOBAL(ftrace_stub)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200126 retq
127END(ftrace_caller)
128
Steven Rostedt08f6fba2012-04-30 16:20:23 -0400129ENTRY(ftrace_regs_caller)
130 /* Save the current flags before compare (in SS location)*/
131 pushfq
132
133 /* Check if tracing was disabled (quick check) */
134 cmpl $0, function_trace_stop
135 jne ftrace_restore_flags
136
137 /* skip=8 to skip flags saved in SS */
138 ftrace_caller_setup 8
139
140 /* Save the rest of pt_regs */
141 movq %r15, R15(%rsp)
142 movq %r14, R14(%rsp)
143 movq %r13, R13(%rsp)
144 movq %r12, R12(%rsp)
145 movq %r11, R11(%rsp)
146 movq %r10, R10(%rsp)
147 movq %rbp, RBP(%rsp)
148 movq %rbx, RBX(%rsp)
149 /* Copy saved flags */
150 movq SS(%rsp), %rcx
151 movq %rcx, EFLAGS(%rsp)
152 /* Kernel segments */
153 movq $__KERNEL_DS, %rcx
154 movq %rcx, SS(%rsp)
155 movq $__KERNEL_CS, %rcx
156 movq %rcx, CS(%rsp)
157 /* Stack - skipping return address */
158 leaq SS+16(%rsp), %rcx
159 movq %rcx, RSP(%rsp)
160
161 /* regs go into 4th parameter */
162 leaq (%rsp), %rcx
163
164GLOBAL(ftrace_regs_call)
165 call ftrace_stub
166
167 /* Copy flags back to SS, to restore them */
168 movq EFLAGS(%rsp), %rax
169 movq %rax, SS(%rsp)
170
Steven Rostedt47d5a5f2012-09-05 23:31:18 +0900171 /* Handlers can change the RIP */
172 movq RIP(%rsp), %rax
173 movq %rax, SS+8(%rsp)
174
Steven Rostedt08f6fba2012-04-30 16:20:23 -0400175 /* restore the rest of pt_regs */
176 movq R15(%rsp), %r15
177 movq R14(%rsp), %r14
178 movq R13(%rsp), %r13
179 movq R12(%rsp), %r12
180 movq R10(%rsp), %r10
181 movq RBP(%rsp), %rbp
182 movq RBX(%rsp), %rbx
183
184 /* skip=8 to skip flags saved in SS */
185 MCOUNT_RESTORE_FRAME 8
186
187 /* Restore flags */
188 popfq
189
190 jmp ftrace_return
191ftrace_restore_flags:
192 popfq
193 jmp ftrace_stub
194
195END(ftrace_regs_caller)
196
197
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200198#else /* ! CONFIG_DYNAMIC_FTRACE */
Steven Rostedtd57c5d52011-02-09 13:32:18 -0500199
200ENTRY(function_hook)
Steven Rostedt60a7ecf2008-11-05 16:05:44 -0500201 cmpl $0, function_trace_stop
202 jne ftrace_stub
203
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200204 cmpq $ftrace_stub, ftrace_trace_function
205 jnz trace
Frederic Weisbecker48d68b22008-12-02 00:20:39 +0100206
207#ifdef CONFIG_FUNCTION_GRAPH_TRACER
208 cmpq $ftrace_stub, ftrace_graph_return
209 jnz ftrace_graph_caller
Steven Rostedte49dc192008-12-02 23:50:05 -0500210
211 cmpq $ftrace_graph_entry_stub, ftrace_graph_entry
212 jnz ftrace_graph_caller
Frederic Weisbecker48d68b22008-12-02 00:20:39 +0100213#endif
214
Cyrill Gorcunovbc8b2b92009-02-23 22:57:01 +0300215GLOBAL(ftrace_stub)
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200216 retq
217
218trace:
Cyrill Gorcunovd680fe42008-12-13 00:09:08 +0300219 MCOUNT_SAVE_FRAME
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200220
Steven Rostedt08f6fba2012-04-30 16:20:23 -0400221 movq RIP(%rsp), %rdi
Steven Rostedtd57c5d52011-02-09 13:32:18 -0500222#ifdef CC_USING_FENTRY
223 movq SS+16(%rsp), %rsi
224#else
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200225 movq 8(%rbp), %rsi
Steven Rostedtd57c5d52011-02-09 13:32:18 -0500226#endif
Abhishek Sagar395a59d2008-06-21 23:47:27 +0530227 subq $MCOUNT_INSN_SIZE, %rdi
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200228
229 call *ftrace_trace_function
230
Cyrill Gorcunovd680fe42008-12-13 00:09:08 +0300231 MCOUNT_RESTORE_FRAME
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200232
233 jmp ftrace_stub
Steven Rostedtd57c5d52011-02-09 13:32:18 -0500234END(function_hook)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200235#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedt606576c2008-10-06 19:06:12 -0400236#endif /* CONFIG_FUNCTION_TRACER */
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200237
Frederic Weisbecker48d68b22008-12-02 00:20:39 +0100238#ifdef CONFIG_FUNCTION_GRAPH_TRACER
239ENTRY(ftrace_graph_caller)
Cyrill Gorcunovd680fe42008-12-13 00:09:08 +0300240 MCOUNT_SAVE_FRAME
Frederic Weisbecker48d68b22008-12-02 00:20:39 +0100241
Steven Rostedtd57c5d52011-02-09 13:32:18 -0500242#ifdef CC_USING_FENTRY
243 leaq SS+16(%rsp), %rdi
244 movq $0, %rdx /* No framepointers needed */
245#else
Frederic Weisbecker48d68b22008-12-02 00:20:39 +0100246 leaq 8(%rbp), %rdi
Steven Rostedt71e308a2009-06-18 12:45:08 -0400247 movq (%rbp), %rdx
Steven Rostedtd57c5d52011-02-09 13:32:18 -0500248#endif
249 movq RIP(%rsp), %rsi
Steven Rostedtbb4304c2008-12-02 15:34:09 -0500250 subq $MCOUNT_INSN_SIZE, %rsi
Frederic Weisbecker48d68b22008-12-02 00:20:39 +0100251
252 call prepare_ftrace_return
253
Cyrill Gorcunovd680fe42008-12-13 00:09:08 +0300254 MCOUNT_RESTORE_FRAME
255
Frederic Weisbecker48d68b22008-12-02 00:20:39 +0100256 retq
257END(ftrace_graph_caller)
258
Cyrill Gorcunovbc8b2b92009-02-23 22:57:01 +0300259GLOBAL(return_to_handler)
Jiri Olsa4818d802009-07-29 10:58:37 +0200260 subq $24, %rsp
Frederic Weisbecker48d68b22008-12-02 00:20:39 +0100261
Steven Rostedte71e99c2009-03-25 14:30:04 -0400262 /* Save the return values */
Frederic Weisbecker48d68b22008-12-02 00:20:39 +0100263 movq %rax, (%rsp)
Steven Rostedte71e99c2009-03-25 14:30:04 -0400264 movq %rdx, 8(%rsp)
Steven Rostedt71e308a2009-06-18 12:45:08 -0400265 movq %rbp, %rdi
Frederic Weisbecker48d68b22008-12-02 00:20:39 +0100266
267 call ftrace_return_to_handler
268
Steven Rostedt194ec342009-10-13 16:33:50 -0400269 movq %rax, %rdi
Steven Rostedte71e99c2009-03-25 14:30:04 -0400270 movq 8(%rsp), %rdx
Jan Beulich7effaa82005-09-12 18:49:24 +0200271 movq (%rsp), %rax
Steven Rostedt194ec342009-10-13 16:33:50 -0400272 addq $24, %rsp
273 jmp *%rdi
Frederic Weisbecker48d68b22008-12-02 00:20:39 +0100274#endif
Jan Beulich7effaa82005-09-12 18:49:24 +0200275
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
277#ifndef CONFIG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278#define retint_kernel retint_restore_args
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100279#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
281#ifdef CONFIG_PARAVIRT
282ENTRY(native_usergs_sysret64)
283 swapgs
284 sysretq
Cyrill Gorcunovb3baaa12009-02-23 22:57:00 +0300285ENDPROC(native_usergs_sysret64)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286#endif /* CONFIG_PARAVIRT */
287
288
289.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
290#ifdef CONFIG_TRACE_IRQFLAGS
291 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
292 jnc 1f
293 TRACE_IRQS_ON
2941:
295#endif
296.endm
297
298/*
Steven Rostedt5963e312012-05-30 11:54:53 -0400299 * When dynamic function tracer is enabled it will add a breakpoint
300 * to all locations that it is about to modify, sync CPUs, update
301 * all the code, sync CPUs, then remove the breakpoints. In this time
302 * if lockdep is enabled, it might jump back into the debug handler
303 * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF).
304 *
305 * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to
306 * make sure the stack pointer does not get reset back to the top
307 * of the debug stack, and instead just reuses the current stack.
308 */
309#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS)
310
311.macro TRACE_IRQS_OFF_DEBUG
312 call debug_stack_set_zero
313 TRACE_IRQS_OFF
314 call debug_stack_reset
315.endm
316
317.macro TRACE_IRQS_ON_DEBUG
318 call debug_stack_set_zero
319 TRACE_IRQS_ON
320 call debug_stack_reset
321.endm
322
323.macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
324 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
325 jnc 1f
326 TRACE_IRQS_ON_DEBUG
3271:
328.endm
329
330#else
331# define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF
332# define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON
333# define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ
334#endif
335
336/*
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100337 * C code is not supposed to know about undefined top of stack. Every time
338 * a C function with an pt_regs argument is called from the SYSCALL based
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 * fast path FIXUP_TOP_OF_STACK is needed.
340 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
341 * manipulation.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100342 */
343
344 /* %rsp:at FRAMEEND */
Alexander van Heukelumc002a1e2008-11-21 16:41:55 +0100345 .macro FIXUP_TOP_OF_STACK tmp offset=0
Brian Gerst3d1e42a2009-01-19 00:38:58 +0900346 movq PER_CPU_VAR(old_rsp),\tmp
Alexander van Heukelumc002a1e2008-11-21 16:41:55 +0100347 movq \tmp,RSP+\offset(%rsp)
348 movq $__USER_DS,SS+\offset(%rsp)
349 movq $__USER_CS,CS+\offset(%rsp)
350 movq $-1,RCX+\offset(%rsp)
351 movq R11+\offset(%rsp),\tmp /* get eflags */
352 movq \tmp,EFLAGS+\offset(%rsp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 .endm
354
Alexander van Heukelumc002a1e2008-11-21 16:41:55 +0100355 .macro RESTORE_TOP_OF_STACK tmp offset=0
356 movq RSP+\offset(%rsp),\tmp
Brian Gerst3d1e42a2009-01-19 00:38:58 +0900357 movq \tmp,PER_CPU_VAR(old_rsp)
Alexander van Heukelumc002a1e2008-11-21 16:41:55 +0100358 movq EFLAGS+\offset(%rsp),\tmp
359 movq \tmp,R11+\offset(%rsp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 .endm
361
362 .macro FAKE_STACK_FRAME child_rip
363 /* push in order ss, rsp, eflags, cs, rip */
364 xorl %eax, %eax
Jan Beulichdf5d1872010-09-02 14:07:16 +0100365 pushq_cfi $__KERNEL_DS /* ss */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 /*CFI_REL_OFFSET ss,0*/
Jan Beulichdf5d1872010-09-02 14:07:16 +0100367 pushq_cfi %rax /* rsp */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 CFI_REL_OFFSET rsp,0
H. Peter Anvin1adfa762013-04-27 16:10:11 -0700369 pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 /*CFI_REL_OFFSET rflags,0*/
Jan Beulichdf5d1872010-09-02 14:07:16 +0100371 pushq_cfi $__KERNEL_CS /* cs */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 /*CFI_REL_OFFSET cs,0*/
Jan Beulichdf5d1872010-09-02 14:07:16 +0100373 pushq_cfi \child_rip /* rip */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 CFI_REL_OFFSET rip,0
Jan Beulichdf5d1872010-09-02 14:07:16 +0100375 pushq_cfi %rax /* orig rax */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 .endm
377
378 .macro UNFAKE_STACK_FRAME
379 addq $8*6, %rsp
380 CFI_ADJUST_CFA_OFFSET -(6*8)
381 .endm
382
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100383/*
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +0100384 * initial frame state for interrupts (and exceptions without error code)
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100385 */
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +0100386 .macro EMPTY_FRAME start=1 offset=0
387 .if \start
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100388 CFI_STARTPROC simple
389 CFI_SIGNAL_FRAME
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +0100390 CFI_DEF_CFA rsp,8+\offset
391 .else
392 CFI_DEF_CFA_OFFSET 8+\offset
393 .endif
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100394 .endm
395
396/*
397 * initial frame state for interrupts (and exceptions without error code)
398 */
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +0100399 .macro INTR_FRAME start=1 offset=0
Ingo Molnare8a0e272008-11-21 15:11:32 +0100400 EMPTY_FRAME \start, SS+8+\offset-RIP
401 /*CFI_REL_OFFSET ss, SS+\offset-RIP*/
402 CFI_REL_OFFSET rsp, RSP+\offset-RIP
403 /*CFI_REL_OFFSET rflags, EFLAGS+\offset-RIP*/
404 /*CFI_REL_OFFSET cs, CS+\offset-RIP*/
405 CFI_REL_OFFSET rip, RIP+\offset-RIP
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +0100406 .endm
407
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100408/*
409 * initial frame state for exceptions with error code (and interrupts
410 * with vector already pushed)
411 */
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +0100412 .macro XCPT_FRAME start=1 offset=0
Ingo Molnare8a0e272008-11-21 15:11:32 +0100413 INTR_FRAME \start, RIP+\offset-ORIG_RAX
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +0100414 /*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/
415 .endm
416
417/*
418 * frame that enables calling into C.
419 */
420 .macro PARTIAL_FRAME start=1 offset=0
Ingo Molnare8a0e272008-11-21 15:11:32 +0100421 XCPT_FRAME \start, ORIG_RAX+\offset-ARGOFFSET
422 CFI_REL_OFFSET rdi, RDI+\offset-ARGOFFSET
423 CFI_REL_OFFSET rsi, RSI+\offset-ARGOFFSET
424 CFI_REL_OFFSET rdx, RDX+\offset-ARGOFFSET
425 CFI_REL_OFFSET rcx, RCX+\offset-ARGOFFSET
426 CFI_REL_OFFSET rax, RAX+\offset-ARGOFFSET
427 CFI_REL_OFFSET r8, R8+\offset-ARGOFFSET
428 CFI_REL_OFFSET r9, R9+\offset-ARGOFFSET
429 CFI_REL_OFFSET r10, R10+\offset-ARGOFFSET
430 CFI_REL_OFFSET r11, R11+\offset-ARGOFFSET
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +0100431 .endm
432
433/*
434 * frame that enables passing a complete pt_regs to a C function.
435 */
436 .macro DEFAULT_FRAME start=1 offset=0
Ingo Molnare8a0e272008-11-21 15:11:32 +0100437 PARTIAL_FRAME \start, R11+\offset-R15
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +0100438 CFI_REL_OFFSET rbx, RBX+\offset
439 CFI_REL_OFFSET rbp, RBP+\offset
440 CFI_REL_OFFSET r12, R12+\offset
441 CFI_REL_OFFSET r13, R13+\offset
442 CFI_REL_OFFSET r14, R14+\offset
443 CFI_REL_OFFSET r15, R15+\offset
444 .endm
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100445
446/* save partial stack frame */
Frederic Weisbecker18718532011-07-01 01:51:22 +0200447 .macro SAVE_ARGS_IRQ
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100448 cld
Frederic Weisbecker18718532011-07-01 01:51:22 +0200449 /* start from rbp in pt_regs and jump over */
Tao Guo1b2b23d2012-09-26 04:28:22 -0400450 movq_cfi rdi, (RDI-RBP)
451 movq_cfi rsi, (RSI-RBP)
452 movq_cfi rdx, (RDX-RBP)
453 movq_cfi rcx, (RCX-RBP)
454 movq_cfi rax, (RAX-RBP)
455 movq_cfi r8, (R8-RBP)
456 movq_cfi r9, (R9-RBP)
457 movq_cfi r10, (R10-RBP)
458 movq_cfi r11, (R11-RBP)
Ingo Molnar14ae22b2008-11-21 15:20:47 +0100459
Frederic Weisbeckera2bbe752011-07-02 16:52:45 +0200460 /* Save rbp so that we can unwind from get_irq_regs() */
461 movq_cfi rbp, 0
462
463 /* Save previous stack value */
464 movq %rsp, %rsi
Frederic Weisbecker3b99a3ef2011-07-01 02:25:17 +0200465
466 leaq -RBP(%rsp),%rdi /* arg1 for handler */
Jan Beulich69466462012-02-24 11:55:01 +0000467 testl $3, CS-RBP(%rsi)
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100468 je 1f
469 SWAPGS
470 /*
Brian Gerst56895532009-01-19 00:38:58 +0900471 * irq_count is used to check if a CPU is already on an interrupt stack
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100472 * or not. While this is essentially redundant with preempt_count it is
473 * a little cheaper to use a separate counter in the PDA (short of
474 * moving irq_enter into assembly, which would be too much work)
475 */
Brian Gerst56895532009-01-19 00:38:58 +09004761: incl PER_CPU_VAR(irq_count)
Jan Beulich69466462012-02-24 11:55:01 +0000477 cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
Jan Beulicheab9e612011-09-28 16:57:52 +0100478 CFI_DEF_CFA_REGISTER rsi
Frederic Weisbeckera2bbe752011-07-02 16:52:45 +0200479
Jan Beulich69466462012-02-24 11:55:01 +0000480 /* Store previous stack value */
Frederic Weisbeckera2bbe752011-07-02 16:52:45 +0200481 pushq %rsi
Jan Beulicheab9e612011-09-28 16:57:52 +0100482 CFI_ESCAPE 0x0f /* DW_CFA_def_cfa_expression */, 6, \
483 0x77 /* DW_OP_breg7 */, 0, \
484 0x06 /* DW_OP_deref */, \
485 0x08 /* DW_OP_const1u */, SS+8-RBP, \
486 0x22 /* DW_OP_plus */
Frederic Weisbeckera2bbe752011-07-02 16:52:45 +0200487 /* We entered an interrupt context - irqs are off: */
488 TRACE_IRQS_OFF
Frederic Weisbecker18718532011-07-01 01:51:22 +0200489 .endm
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100490
Alexander van Heukelume2f6bc22008-11-21 16:43:18 +0100491/* save complete stack frame */
Jan Beulichc2810182009-03-12 10:38:55 +0000492 .pushsection .kprobes.text, "ax"
Alexander van Heukelume2f6bc22008-11-21 16:43:18 +0100493ENTRY(save_paranoid)
494 XCPT_FRAME 1 RDI+8
495 cld
496 movq_cfi rdi, RDI+8
497 movq_cfi rsi, RSI+8
498 movq_cfi rdx, RDX+8
499 movq_cfi rcx, RCX+8
500 movq_cfi rax, RAX+8
501 movq_cfi r8, R8+8
502 movq_cfi r9, R9+8
503 movq_cfi r10, R10+8
504 movq_cfi r11, R11+8
505 movq_cfi rbx, RBX+8
506 movq_cfi rbp, RBP+8
507 movq_cfi r12, R12+8
508 movq_cfi r13, R13+8
509 movq_cfi r14, R14+8
510 movq_cfi r15, R15+8
511 movl $1,%ebx
512 movl $MSR_GS_BASE,%ecx
513 rdmsr
514 testl %edx,%edx
515 js 1f /* negative -> in kernel */
516 SWAPGS
517 xorl %ebx,%ebx
5181: ret
519 CFI_ENDPROC
520END(save_paranoid)
Jan Beulichc2810182009-03-12 10:38:55 +0000521 .popsection
Alexander van Heukelume2f6bc22008-11-21 16:43:18 +0100522
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523/*
Ingo Molnar5b3eec02008-11-27 14:41:21 +0100524 * A newly forked process directly context switches into this address.
525 *
526 * rdi: prev task we switched from
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100527 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528ENTRY(ret_from_fork)
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +0100529 DEFAULT_FRAME
Ingo Molnar5b3eec02008-11-27 14:41:21 +0100530
Benjamin LaHaise7106a5a2009-01-10 23:00:22 -0500531 LOCK ; btr $TIF_FORK,TI_flags(%r8)
532
Ian Campbell6eebdda2012-08-24 23:58:47 +0400533 pushq_cfi $0x0002
Jan Beulichdf5d1872010-09-02 14:07:16 +0100534 popfq_cfi # reset kernel eflags
Ingo Molnar5b3eec02008-11-27 14:41:21 +0100535
536 call schedule_tail # rdi: 'prev' task parameter
537
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 GET_THREAD_INFO(%rcx)
Ingo Molnar5b3eec02008-11-27 14:41:21 +0100539
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 RESTORE_REST
Ingo Molnar5b3eec02008-11-27 14:41:21 +0100541
542 testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
Al Viro7076aad2012-09-10 16:44:54 -0400543 jz 1f
Ingo Molnar5b3eec02008-11-27 14:41:21 +0100544
545 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 jnz int_ret_from_sys_call
Ingo Molnar5b3eec02008-11-27 14:41:21 +0100547
Alexander van Heukelumc002a1e2008-11-21 16:41:55 +0100548 RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
Ingo Molnar5b3eec02008-11-27 14:41:21 +0100549 jmp ret_from_sys_call # go to the SYSRET fastpath
550
Al Viro7076aad2012-09-10 16:44:54 -04005511:
Al Viro22e2430d2012-10-10 21:35:42 -0400552 subq $REST_SKIP, %rsp # leave space for volatiles
Al Viro7076aad2012-09-10 16:44:54 -0400553 CFI_ADJUST_CFA_OFFSET REST_SKIP
554 movq %rbp, %rdi
555 call *%rbx
Al Viro22e2430d2012-10-10 21:35:42 -0400556 movl $0, RAX(%rsp)
557 RESTORE_REST
558 jmp int_ret_from_sys_call
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 CFI_ENDPROC
Jan Beulich4b787e02006-06-26 13:56:55 +0200560END(ret_from_fork)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561
562/*
Lucas De Marchi0d2eb442011-03-17 16:24:16 -0300563 * System call entry. Up to 6 arguments in registers are supported.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 *
565 * SYSCALL does not save anything on the stack and does not change the
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700566 * stack pointer. However, it does mask the flags register for us, so
567 * CLD and CLAC are not needed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 */
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100569
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570/*
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100571 * Register setup:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 * rax system call number
573 * rdi arg0
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100574 * rcx return address for syscall/sysret, C arg3
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 * rsi arg1
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100576 * rdx arg2
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 * r10 arg3 (--> moved to rcx for C)
578 * r8 arg4
579 * r9 arg5
580 * r11 eflags for syscall/sysret, temporary for C
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100581 * r12-r15,rbp,rbx saved by C code, not touched.
582 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 * Interrupts are off on entry.
584 * Only called from user space.
585 *
586 * XXX if we had a free scratch register we could save the RSP into the stack frame
587 * and report it properly in ps. Unfortunately we haven't.
Andi Kleen7bf36bb2006-04-07 19:50:00 +0200588 *
589 * When user can change the frames always force IRET. That is because
590 * it deals with uncanonical addresses better. SYSRET has trouble
591 * with them due to bugs in both AMD and Intel CPUs.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100592 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593
594ENTRY(system_call)
Jan Beulich7effaa82005-09-12 18:49:24 +0200595 CFI_STARTPROC simple
Jan Beulichadf14232006-09-26 10:52:41 +0200596 CFI_SIGNAL_FRAME
Brian Gerst9af45652009-01-19 00:38:58 +0900597 CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
Jan Beulich7effaa82005-09-12 18:49:24 +0200598 CFI_REGISTER rip,rcx
599 /*CFI_REGISTER rflags,r11*/
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100600 SWAPGS_UNSAFE_STACK
601 /*
602 * A hypervisor implementation might want to use a label
603 * after the swapgs, so that it can do the swapgs
604 * for the guest and jump here on syscall.
605 */
Jan Beulichf6b2bc82011-11-29 11:24:10 +0000606GLOBAL(system_call_after_swapgs)
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100607
Brian Gerst3d1e42a2009-01-19 00:38:58 +0900608 movq %rsp,PER_CPU_VAR(old_rsp)
Brian Gerst9af45652009-01-19 00:38:58 +0900609 movq PER_CPU_VAR(kernel_stack),%rsp
Ingo Molnar2601e642006-07-03 00:24:45 -0700610 /*
611 * No need to follow this irqs off/on section - it's straight
612 * and short:
613 */
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100614 ENABLE_INTERRUPTS(CLBR_NONE)
Borislav Petkovcac0e0a2011-05-31 22:21:52 +0200615 SAVE_ARGS 8,0
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100616 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
Jan Beulich7effaa82005-09-12 18:49:24 +0200617 movq %rcx,RIP-ARGOFFSET(%rsp)
618 CFI_REL_OFFSET rip,RIP-ARGOFFSET
Jan Beulich46db09d2011-11-29 11:17:45 +0000619 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 jnz tracesys
Roland McGrath86a1c342008-06-23 15:37:04 -0700621system_call_fastpath:
H. Peter Anvinfca460f2012-02-19 07:56:26 -0800622#if __SYSCALL_MASK == ~0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 cmpq $__NR_syscall_max,%rax
H. Peter Anvinfca460f2012-02-19 07:56:26 -0800624#else
625 andl $__SYSCALL_MASK,%eax
626 cmpl $__NR_syscall_max,%eax
627#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 ja badsys
629 movq %r10,%rcx
630 call *sys_call_table(,%rax,8) # XXX: rip relative
631 movq %rax,RAX-ARGOFFSET(%rsp)
632/*
633 * Syscall return path ending with SYSRET (fast path)
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100634 * Has incomplete stack frame and undefined top of stack.
635 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636ret_from_sys_call:
Andi Kleen11b854b2005-04-16 15:25:02 -0700637 movl $_TIF_ALLWORK_MASK,%edi
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 /* edi: flagmask */
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100639sysret_check:
Peter Zijlstra10cd7062007-10-11 22:11:12 +0200640 LOCKDEP_SYS_EXIT
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100641 DISABLE_INTERRUPTS(CLBR_NONE)
Ingo Molnar2601e642006-07-03 00:24:45 -0700642 TRACE_IRQS_OFF
Jan Beulich46db09d2011-11-29 11:17:45 +0000643 movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 andl %edi,%edx
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100645 jnz sysret_careful
Jan Beulichbcddc012006-12-07 02:14:02 +0100646 CFI_REMEMBER_STATE
Ingo Molnar2601e642006-07-03 00:24:45 -0700647 /*
648 * sysretq will re-enable interrupts:
649 */
650 TRACE_IRQS_ON
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 movq RIP-ARGOFFSET(%rsp),%rcx
Jan Beulich7effaa82005-09-12 18:49:24 +0200652 CFI_REGISTER rip,rcx
Borislav Petkov838feb42011-05-31 22:21:53 +0200653 RESTORE_ARGS 1,-ARG_SKIP,0
Jan Beulich7effaa82005-09-12 18:49:24 +0200654 /*CFI_REGISTER rflags,r11*/
Brian Gerst3d1e42a2009-01-19 00:38:58 +0900655 movq PER_CPU_VAR(old_rsp), %rsp
Jeremy Fitzhardinge2be29982008-06-25 00:19:28 -0400656 USERGS_SYSRET64
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657
Jan Beulichbcddc012006-12-07 02:14:02 +0100658 CFI_RESTORE_STATE
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 /* Handle reschedules */
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100660 /* edx: work, edi: workmask */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661sysret_careful:
662 bt $TIF_NEED_RESCHED,%edx
663 jnc sysret_signal
Ingo Molnar2601e642006-07-03 00:24:45 -0700664 TRACE_IRQS_ON
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100665 ENABLE_INTERRUPTS(CLBR_NONE)
Jan Beulichdf5d1872010-09-02 14:07:16 +0100666 pushq_cfi %rdi
Frederic Weisbecker04304992012-07-11 20:26:38 +0200667 SCHEDULE_USER
Jan Beulichdf5d1872010-09-02 14:07:16 +0100668 popq_cfi %rdi
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 jmp sysret_check
670
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100671 /* Handle a signal */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672sysret_signal:
Ingo Molnar2601e642006-07-03 00:24:45 -0700673 TRACE_IRQS_ON
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100674 ENABLE_INTERRUPTS(CLBR_NONE)
Roland McGrath86a1c342008-06-23 15:37:04 -0700675#ifdef CONFIG_AUDITSYSCALL
676 bt $TIF_SYSCALL_AUDIT,%edx
677 jc sysret_audit
678#endif
Roland McGrathb60e7142009-09-22 16:46:34 -0700679 /*
680 * We have a signal, or exit tracing or single-step.
681 * These all wind up with the iret return path anyway,
682 * so just join that path right now.
683 */
684 FIXUP_TOP_OF_STACK %r11, -ARGOFFSET
685 jmp int_check_syscall_exit_work
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100686
Jan Beulich7effaa82005-09-12 18:49:24 +0200687badsys:
688 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
689 jmp ret_from_sys_call
690
Roland McGrath86a1c342008-06-23 15:37:04 -0700691#ifdef CONFIG_AUDITSYSCALL
692 /*
693 * Fast path for syscall audit without full syscall trace.
Eric Parisb05d8442012-01-03 14:23:06 -0500694 * We just call __audit_syscall_entry() directly, and then
Roland McGrath86a1c342008-06-23 15:37:04 -0700695 * jump back to the normal fast path.
696 */
697auditsys:
698 movq %r10,%r9 /* 6th arg: 4th syscall arg */
699 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
700 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
701 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
702 movq %rax,%rsi /* 2nd arg: syscall number */
703 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
Eric Parisb05d8442012-01-03 14:23:06 -0500704 call __audit_syscall_entry
Roland McGrath86a1c342008-06-23 15:37:04 -0700705 LOAD_ARGS 0 /* reload call-clobbered registers */
706 jmp system_call_fastpath
707
708 /*
Eric Parisd7e75282012-01-03 14:23:06 -0500709 * Return fast path for syscall audit. Call __audit_syscall_exit()
Roland McGrath86a1c342008-06-23 15:37:04 -0700710 * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
711 * masked off.
712 */
713sysret_audit:
Roland McGrath03275592010-07-21 17:44:12 -0700714 movq RAX-ARGOFFSET(%rsp),%rsi /* second arg, syscall return value */
Eric Parisd7e75282012-01-03 14:23:06 -0500715 cmpq $-MAX_ERRNO,%rsi /* is it < -MAX_ERRNO? */
716 setbe %al /* 1 if so, 0 if not */
Roland McGrath86a1c342008-06-23 15:37:04 -0700717 movzbl %al,%edi /* zero-extend that into %edi */
Eric Parisd7e75282012-01-03 14:23:06 -0500718 call __audit_syscall_exit
Roland McGrath86a1c342008-06-23 15:37:04 -0700719 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
720 jmp sysret_check
721#endif /* CONFIG_AUDITSYSCALL */
722
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 /* Do syscall tracing */
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100724tracesys:
Roland McGrath86a1c342008-06-23 15:37:04 -0700725#ifdef CONFIG_AUDITSYSCALL
Jan Beulich46db09d2011-11-29 11:17:45 +0000726 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
Roland McGrath86a1c342008-06-23 15:37:04 -0700727 jz auditsys
728#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 SAVE_REST
Roland McGratha31f8dd2008-03-16 21:59:11 -0700730 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 FIXUP_TOP_OF_STACK %rdi
732 movq %rsp,%rdi
733 call syscall_trace_enter
Roland McGrathd4d67152008-07-09 02:38:07 -0700734 /*
735 * Reload arg registers from stack in case ptrace changed them.
736 * We don't reload %rax because syscall_trace_enter() returned
737 * the value it wants us to use in the table lookup.
738 */
739 LOAD_ARGS ARGOFFSET, 1
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 RESTORE_REST
H. Peter Anvinfca460f2012-02-19 07:56:26 -0800741#if __SYSCALL_MASK == ~0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 cmpq $__NR_syscall_max,%rax
H. Peter Anvinfca460f2012-02-19 07:56:26 -0800743#else
744 andl $__SYSCALL_MASK,%eax
745 cmpl $__NR_syscall_max,%eax
746#endif
Roland McGratha31f8dd2008-03-16 21:59:11 -0700747 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 movq %r10,%rcx /* fixup for C */
749 call *sys_call_table(,%rax,8)
Roland McGratha31f8dd2008-03-16 21:59:11 -0700750 movq %rax,RAX-ARGOFFSET(%rsp)
Andi Kleen7bf36bb2006-04-07 19:50:00 +0200751 /* Use IRET because user could have changed frame */
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100752
753/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 * Syscall return path ending with IRET.
755 * Has correct top of stack, but partial stack frame.
Jan Beulichbcddc012006-12-07 02:14:02 +0100756 */
Cyrill Gorcunovbc8b2b92009-02-23 22:57:01 +0300757GLOBAL(int_ret_from_sys_call)
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100758 DISABLE_INTERRUPTS(CLBR_NONE)
Ingo Molnar2601e642006-07-03 00:24:45 -0700759 TRACE_IRQS_OFF
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 movl $_TIF_ALLWORK_MASK,%edi
761 /* edi: mask to check */
Cyrill Gorcunovbc8b2b92009-02-23 22:57:01 +0300762GLOBAL(int_with_check)
Peter Zijlstra10cd7062007-10-11 22:11:12 +0200763 LOCKDEP_SYS_EXIT_IRQ
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 GET_THREAD_INFO(%rcx)
Glauber Costa26ccb8a2008-06-24 11:19:35 -0300765 movl TI_flags(%rcx),%edx
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 andl %edi,%edx
767 jnz int_careful
Glauber Costa26ccb8a2008-06-24 11:19:35 -0300768 andl $~TS_COMPAT,TI_status(%rcx)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 jmp retint_swapgs
770
771 /* Either reschedule or signal or syscall exit tracking needed. */
772 /* First do a reschedule test. */
773 /* edx: work, edi: workmask */
774int_careful:
775 bt $TIF_NEED_RESCHED,%edx
776 jnc int_very_careful
Ingo Molnar2601e642006-07-03 00:24:45 -0700777 TRACE_IRQS_ON
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100778 ENABLE_INTERRUPTS(CLBR_NONE)
Jan Beulichdf5d1872010-09-02 14:07:16 +0100779 pushq_cfi %rdi
Frederic Weisbecker04304992012-07-11 20:26:38 +0200780 SCHEDULE_USER
Jan Beulichdf5d1872010-09-02 14:07:16 +0100781 popq_cfi %rdi
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100782 DISABLE_INTERRUPTS(CLBR_NONE)
Ingo Molnar2601e642006-07-03 00:24:45 -0700783 TRACE_IRQS_OFF
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 jmp int_with_check
785
786 /* handle signals and tracing -- both require a full stack frame */
787int_very_careful:
Ingo Molnar2601e642006-07-03 00:24:45 -0700788 TRACE_IRQS_ON
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100789 ENABLE_INTERRUPTS(CLBR_NONE)
Roland McGrathb60e7142009-09-22 16:46:34 -0700790int_check_syscall_exit_work:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 SAVE_REST
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100792 /* Check for syscall exit trace */
Roland McGrathd4d67152008-07-09 02:38:07 -0700793 testl $_TIF_WORK_SYSCALL_EXIT,%edx
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 jz int_signal
Jan Beulichdf5d1872010-09-02 14:07:16 +0100795 pushq_cfi %rdi
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100796 leaq 8(%rsp),%rdi # &ptregs -> arg1
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 call syscall_trace_leave
Jan Beulichdf5d1872010-09-02 14:07:16 +0100798 popq_cfi %rdi
Roland McGrathd4d67152008-07-09 02:38:07 -0700799 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 jmp int_restore_rest
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100801
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802int_signal:
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100803 testl $_TIF_DO_NOTIFY_MASK,%edx
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 jz 1f
805 movq %rsp,%rdi # &ptregs -> arg1
806 xorl %esi,%esi # oldset -> arg2
807 call do_notify_resume
Roland McGratheca91e72008-07-10 14:50:39 -07008081: movl $_TIF_WORK_MASK,%edi
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809int_restore_rest:
810 RESTORE_REST
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100811 DISABLE_INTERRUPTS(CLBR_NONE)
Ingo Molnar2601e642006-07-03 00:24:45 -0700812 TRACE_IRQS_OFF
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 jmp int_with_check
814 CFI_ENDPROC
Jan Beulichbcddc012006-12-07 02:14:02 +0100815END(system_call)
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100816
Al Viro1d4b4b22012-10-22 22:34:11 -0400817 .macro FORK_LIKE func
818ENTRY(stub_\func)
819 CFI_STARTPROC
820 popq %r11 /* save return address */
821 PARTIAL_FRAME 0
822 SAVE_REST
823 pushq %r11 /* put it back on stack */
824 FIXUP_TOP_OF_STACK %r11, 8
825 DEFAULT_FRAME 0 8 /* offset 8: return address */
826 call sys_\func
827 RESTORE_TOP_OF_STACK %r11, 8
828 ret $REST_SKIP /* pop extended registers */
829 CFI_ENDPROC
830END(stub_\func)
831 .endm
832
Al Virob3af11a2012-11-19 22:00:52 -0500833 .macro FIXED_FRAME label,func
834ENTRY(\label)
835 CFI_STARTPROC
836 PARTIAL_FRAME 0 8 /* offset 8: return address */
837 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
838 call \func
839 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
840 ret
841 CFI_ENDPROC
842END(\label)
843 .endm
844
Al Viro1d4b4b22012-10-22 22:34:11 -0400845 FORK_LIKE clone
846 FORK_LIKE fork
847 FORK_LIKE vfork
Al Virob3af11a2012-11-19 22:00:52 -0500848 FIXED_FRAME stub_iopl, sys_iopl
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849
850ENTRY(ptregscall_common)
Alexander van Heukelumc002a1e2008-11-21 16:41:55 +0100851 DEFAULT_FRAME 1 8 /* offset 8: return address */
852 RESTORE_TOP_OF_STACK %r11, 8
853 movq_cfi_restore R15+8, r15
854 movq_cfi_restore R14+8, r14
855 movq_cfi_restore R13+8, r13
856 movq_cfi_restore R12+8, r12
857 movq_cfi_restore RBP+8, rbp
858 movq_cfi_restore RBX+8, rbx
859 ret $REST_SKIP /* pop extended registers */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 CFI_ENDPROC
Jan Beulich4b787e02006-06-26 13:56:55 +0200861END(ptregscall_common)
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100862
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863ENTRY(stub_execve)
864 CFI_STARTPROC
Jan Beuliche6b04b62010-09-02 13:52:45 +0100865 addq $8, %rsp
866 PARTIAL_FRAME 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 SAVE_REST
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 FIXUP_TOP_OF_STACK %r11
869 call sys_execve
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 movq %rax,RAX(%rsp)
871 RESTORE_REST
872 jmp int_ret_from_sys_call
873 CFI_ENDPROC
Jan Beulich4b787e02006-06-26 13:56:55 +0200874END(stub_execve)
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100875
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876/*
877 * sigreturn is special because it needs to restore all registers on return.
878 * This cannot be done with SYSRET, so use the IRET return path instead.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100879 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880ENTRY(stub_rt_sigreturn)
881 CFI_STARTPROC
Jan Beulich7effaa82005-09-12 18:49:24 +0200882 addq $8, %rsp
Jan Beuliche6b04b62010-09-02 13:52:45 +0100883 PARTIAL_FRAME 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 SAVE_REST
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 FIXUP_TOP_OF_STACK %r11
886 call sys_rt_sigreturn
887 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
888 RESTORE_REST
889 jmp int_ret_from_sys_call
890 CFI_ENDPROC
Jan Beulich4b787e02006-06-26 13:56:55 +0200891END(stub_rt_sigreturn)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800893#ifdef CONFIG_X86_X32_ABI
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800894ENTRY(stub_x32_rt_sigreturn)
895 CFI_STARTPROC
896 addq $8, %rsp
897 PARTIAL_FRAME 0
898 SAVE_REST
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800899 FIXUP_TOP_OF_STACK %r11
900 call sys32_x32_rt_sigreturn
901 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
902 RESTORE_REST
903 jmp int_ret_from_sys_call
904 CFI_ENDPROC
905END(stub_x32_rt_sigreturn)
906
H. Peter Anvind1a797f2012-02-19 10:06:34 -0800907ENTRY(stub_x32_execve)
908 CFI_STARTPROC
909 addq $8, %rsp
910 PARTIAL_FRAME 0
911 SAVE_REST
912 FIXUP_TOP_OF_STACK %r11
Al Viro6783eaa22012-08-02 23:05:11 +0400913 call compat_sys_execve
H. Peter Anvind1a797f2012-02-19 10:06:34 -0800914 RESTORE_TOP_OF_STACK %r11
915 movq %rax,RAX(%rsp)
916 RESTORE_REST
917 jmp int_ret_from_sys_call
918 CFI_ENDPROC
919END(stub_x32_execve)
920
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800921#endif
922
Jan Beulich7effaa82005-09-12 18:49:24 +0200923/*
H. Peter Anvin939b7872008-11-11 13:51:52 -0800924 * Build the entry stubs and pointer table with some assembler magic.
925 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
926 * single cache line on all modern x86 implementations.
927 */
928 .section .init.rodata,"a"
929ENTRY(interrupt)
Jiri Olsaea714542011-03-07 19:10:39 +0100930 .section .entry.text
H. Peter Anvin939b7872008-11-11 13:51:52 -0800931 .p2align 5
932 .p2align CONFIG_X86_L1_CACHE_SHIFT
933ENTRY(irq_entries_start)
934 INTR_FRAME
935vector=FIRST_EXTERNAL_VECTOR
936.rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
937 .balign 32
938 .rept 7
939 .if vector < NR_VECTORS
H. Peter Anvin86655962008-11-12 10:27:35 -0800940 .if vector <> FIRST_EXTERNAL_VECTOR
H. Peter Anvin939b7872008-11-11 13:51:52 -0800941 CFI_ADJUST_CFA_OFFSET -8
942 .endif
Jan Beulichdf5d1872010-09-02 14:07:16 +01009431: pushq_cfi $(~vector+0x80) /* Note: always in signed byte range */
H. Peter Anvin86655962008-11-12 10:27:35 -0800944 .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
H. Peter Anvin939b7872008-11-11 13:51:52 -0800945 jmp 2f
946 .endif
947 .previous
948 .quad 1b
Jiri Olsaea714542011-03-07 19:10:39 +0100949 .section .entry.text
H. Peter Anvin939b7872008-11-11 13:51:52 -0800950vector=vector+1
951 .endif
952 .endr
9532: jmp common_interrupt
954.endr
955 CFI_ENDPROC
956END(irq_entries_start)
957
958.previous
959END(interrupt)
960.previous
961
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100962/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 * Interrupt entry/exit.
964 *
965 * Interrupt entry points save only callee clobbered registers in fast path.
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100966 *
967 * Entry runs with interrupts off.
968 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969
Alexander van Heukelum722024d2008-11-13 13:50:20 +0100970/* 0(%rsp): ~(interrupt number) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 .macro interrupt func
Frederic Weisbecker625dbc3b2011-01-06 15:22:47 +0100972 /* reserve pt_regs for scratch regs and rbp */
973 subq $ORIG_RAX-RBP, %rsp
974 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
Frederic Weisbecker18718532011-07-01 01:51:22 +0200975 SAVE_ARGS_IRQ
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 call \func
977 .endm
978
Masami Hiramatsu8222d712009-08-27 13:23:25 -0400979/*
980 * Interrupt entry/exit should be protected against kprobes
981 */
982 .pushsection .kprobes.text, "ax"
Alexander van Heukelum722024d2008-11-13 13:50:20 +0100983 /*
984 * The interrupt stubs push (~vector+0x80) onto the stack and
985 * then jump to common_interrupt.
986 */
H. Peter Anvin939b7872008-11-11 13:51:52 -0800987 .p2align CONFIG_X86_L1_CACHE_SHIFT
988common_interrupt:
Jan Beulich7effaa82005-09-12 18:49:24 +0200989 XCPT_FRAME
Jan Beulichee4eb872012-11-02 11:18:39 +0000990 ASM_CLAC
Alexander van Heukelum722024d2008-11-13 13:50:20 +0100991 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 interrupt do_IRQ
Brian Gerst3d1e42a2009-01-19 00:38:58 +0900993 /* 0(%rsp): old_rsp-ARGOFFSET */
Jan Beulich7effaa82005-09-12 18:49:24 +0200994ret_from_intr:
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100995 DISABLE_INTERRUPTS(CLBR_NONE)
Ingo Molnar2601e642006-07-03 00:24:45 -0700996 TRACE_IRQS_OFF
Brian Gerst56895532009-01-19 00:38:58 +0900997 decl PER_CPU_VAR(irq_count)
Frederic Weisbecker625dbc3b2011-01-06 15:22:47 +0100998
Frederic Weisbeckera2bbe752011-07-02 16:52:45 +0200999 /* Restore saved previous stack */
1000 popq %rsi
Mark Wielaard928282e2012-02-24 11:32:05 +01001001 CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
Jan Beulicheab9e612011-09-28 16:57:52 +01001002 leaq ARGOFFSET-RBP(%rsi), %rsp
Jan Beulich7effaa82005-09-12 18:49:24 +02001003 CFI_DEF_CFA_REGISTER rsp
Jan Beulicheab9e612011-09-28 16:57:52 +01001004 CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
Frederic Weisbecker625dbc3b2011-01-06 15:22:47 +01001005
Jan Beulich7effaa82005-09-12 18:49:24 +02001006exit_intr:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007 GET_THREAD_INFO(%rcx)
1008 testl $3,CS-ARGOFFSET(%rsp)
1009 je retint_kernel
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +01001010
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 /* Interrupt came from user space */
1012 /*
1013 * Has a correct top of stack, but a partial stack frame
1014 * %rcx: thread info. Interrupts off.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +01001015 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016retint_with_reschedule:
1017 movl $_TIF_WORK_MASK,%edi
Jan Beulich7effaa82005-09-12 18:49:24 +02001018retint_check:
Peter Zijlstra10cd7062007-10-11 22:11:12 +02001019 LOCKDEP_SYS_EXIT_IRQ
Glauber Costa26ccb8a2008-06-24 11:19:35 -03001020 movl TI_flags(%rcx),%edx
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 andl %edi,%edx
Jan Beulich7effaa82005-09-12 18:49:24 +02001022 CFI_REMEMBER_STATE
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 jnz retint_careful
Peter Zijlstra10cd7062007-10-11 22:11:12 +02001024
1025retint_swapgs: /* return to user-space */
Ingo Molnar2601e642006-07-03 00:24:45 -07001026 /*
1027 * The iretq could re-enable interrupts:
1028 */
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +01001029 DISABLE_INTERRUPTS(CLBR_ANY)
Ingo Molnar2601e642006-07-03 00:24:45 -07001030 TRACE_IRQS_IRETQ
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +01001031 SWAPGS
Ingo Molnar2601e642006-07-03 00:24:45 -07001032 jmp restore_args
1033
Peter Zijlstra10cd7062007-10-11 22:11:12 +02001034retint_restore_args: /* return to kernel space */
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +01001035 DISABLE_INTERRUPTS(CLBR_ANY)
Ingo Molnar2601e642006-07-03 00:24:45 -07001036 /*
1037 * The iretq could re-enable interrupts:
1038 */
1039 TRACE_IRQS_IRETQ
1040restore_args:
Borislav Petkov838feb42011-05-31 22:21:53 +02001041 RESTORE_ARGS 1,8,1
Ingo Molnar3701d8632008-02-09 23:24:08 +01001042
Adrian Bunkf7f3d792008-02-13 23:29:53 +02001043irq_return:
H. Peter Anvin3891a042014-04-29 16:46:09 -07001044 /*
1045 * Are we returning to a stack segment from the LDT? Note: in
1046 * 64-bit mode SS:RSP on the exception stack is always valid.
1047 */
H. Peter Anvin34273f42014-05-04 10:36:22 -07001048#ifdef CONFIG_X86_ESPFIX64
H. Peter Anvin3891a042014-04-29 16:46:09 -07001049 testb $4,(SS-RIP)(%rsp)
1050 jnz irq_return_ldt
H. Peter Anvin34273f42014-05-04 10:36:22 -07001051#endif
H. Peter Anvin3891a042014-04-29 16:46:09 -07001052
1053irq_return_iret:
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +01001054 INTERRUPT_RETURN
H. Peter Anvin3891a042014-04-29 16:46:09 -07001055 _ASM_EXTABLE(irq_return_iret, bad_iret)
Ingo Molnar3701d8632008-02-09 23:24:08 +01001056
1057#ifdef CONFIG_PARAVIRT
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +01001058ENTRY(native_iret)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 iretq
H. Peter Anvind7abc0f2012-04-20 12:19:50 -07001060 _ASM_EXTABLE(native_iret, bad_iret)
Ingo Molnar3701d8632008-02-09 23:24:08 +01001061#endif
1062
H. Peter Anvin34273f42014-05-04 10:36:22 -07001063#ifdef CONFIG_X86_ESPFIX64
H. Peter Anvin3891a042014-04-29 16:46:09 -07001064irq_return_ldt:
1065 pushq_cfi %rax
1066 pushq_cfi %rdi
1067 SWAPGS
1068 movq PER_CPU_VAR(espfix_waddr),%rdi
1069 movq %rax,(0*8)(%rdi) /* RAX */
1070 movq (2*8)(%rsp),%rax /* RIP */
1071 movq %rax,(1*8)(%rdi)
1072 movq (3*8)(%rsp),%rax /* CS */
1073 movq %rax,(2*8)(%rdi)
1074 movq (4*8)(%rsp),%rax /* RFLAGS */
1075 movq %rax,(3*8)(%rdi)
1076 movq (6*8)(%rsp),%rax /* SS */
1077 movq %rax,(5*8)(%rdi)
1078 movq (5*8)(%rsp),%rax /* RSP */
1079 movq %rax,(4*8)(%rdi)
1080 andl $0xffff0000,%eax
1081 popq_cfi %rdi
1082 orq PER_CPU_VAR(espfix_stack),%rax
1083 SWAPGS
1084 movq %rax,%rsp
1085 popq_cfi %rax
1086 jmp irq_return_iret
H. Peter Anvin34273f42014-05-04 10:36:22 -07001087#endif
H. Peter Anvin3891a042014-04-29 16:46:09 -07001088
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 .section .fixup,"ax"
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090bad_iret:
Roland McGrath3aa4b372008-02-06 22:39:43 +01001091 /*
1092 * The iret traps when the %cs or %ss being restored is bogus.
1093 * We've lost the original trap vector and error code.
1094 * #GPF is the most likely one to get for an invalid selector.
1095 * So pretend we completed the iret and took the #GPF in user mode.
1096 *
1097 * We are now running with the kernel GS after exception recovery.
1098 * But error_entry expects us to have user GS to match the user %cs,
1099 * so swap back.
1100 */
1101 pushq $0
1102
1103 SWAPGS
1104 jmp general_protection
1105
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +01001106 .previous
1107
Jan Beulich7effaa82005-09-12 18:49:24 +02001108 /* edi: workmask, edx: work */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109retint_careful:
Jan Beulich7effaa82005-09-12 18:49:24 +02001110 CFI_RESTORE_STATE
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 bt $TIF_NEED_RESCHED,%edx
1112 jnc retint_signal
Ingo Molnar2601e642006-07-03 00:24:45 -07001113 TRACE_IRQS_ON
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +01001114 ENABLE_INTERRUPTS(CLBR_NONE)
Jan Beulichdf5d1872010-09-02 14:07:16 +01001115 pushq_cfi %rdi
Frederic Weisbecker04304992012-07-11 20:26:38 +02001116 SCHEDULE_USER
Jan Beulichdf5d1872010-09-02 14:07:16 +01001117 popq_cfi %rdi
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 GET_THREAD_INFO(%rcx)
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +01001119 DISABLE_INTERRUPTS(CLBR_NONE)
Ingo Molnar2601e642006-07-03 00:24:45 -07001120 TRACE_IRQS_OFF
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 jmp retint_check
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +01001122
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123retint_signal:
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001124 testl $_TIF_DO_NOTIFY_MASK,%edx
Andi Kleen10ffdbb2005-05-16 21:53:19 -07001125 jz retint_swapgs
Ingo Molnar2601e642006-07-03 00:24:45 -07001126 TRACE_IRQS_ON
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +01001127 ENABLE_INTERRUPTS(CLBR_NONE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 SAVE_REST
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +01001129 movq $-1,ORIG_RAX(%rsp)
Andi Kleen3829ee62005-07-28 21:15:48 -07001130 xorl %esi,%esi # oldset
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 movq %rsp,%rdi # &pt_regs
1132 call do_notify_resume
1133 RESTORE_REST
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +01001134 DISABLE_INTERRUPTS(CLBR_NONE)
Ingo Molnar2601e642006-07-03 00:24:45 -07001135 TRACE_IRQS_OFF
Andi Kleenbe9e6872005-05-01 08:58:51 -07001136 GET_THREAD_INFO(%rcx)
Roland McGratheca91e72008-07-10 14:50:39 -07001137 jmp retint_with_reschedule
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138
1139#ifdef CONFIG_PREEMPT
1140 /* Returning to kernel space. Check if we need preemption */
1141 /* rcx: threadinfo. interrupts off. */
Andi Kleenb06baba2006-09-26 10:52:29 +02001142ENTRY(retint_kernel)
Peter Zijlstrac2daa3b2013-08-14 14:51:00 +02001143 cmpl $0,PER_CPU_VAR(__preempt_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 jnz retint_restore_args
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
1146 jnc retint_restore_args
1147 call preempt_schedule_irq
1148 jmp exit_intr
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +01001149#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 CFI_ENDPROC
Jan Beulich4b787e02006-06-26 13:56:55 +02001151END(common_interrupt)
H. Peter Anvin3891a042014-04-29 16:46:09 -07001152
1153 /*
1154 * If IRET takes a fault on the espfix stack, then we
1155 * end up promoting it to a doublefault. In that case,
1156 * modify the stack to make it look like we just entered
1157 * the #GP handler from user space, similar to bad_iret.
1158 */
H. Peter Anvin34273f42014-05-04 10:36:22 -07001159#ifdef CONFIG_X86_ESPFIX64
H. Peter Anvin3891a042014-04-29 16:46:09 -07001160 ALIGN
1161__do_double_fault:
1162 XCPT_FRAME 1 RDI+8
1163 movq RSP(%rdi),%rax /* Trap on the espfix stack? */
1164 sarq $PGDIR_SHIFT,%rax
1165 cmpl $ESPFIX_PGD_ENTRY,%eax
1166 jne do_double_fault /* No, just deliver the fault */
1167 cmpl $__KERNEL_CS,CS(%rdi)
1168 jne do_double_fault
1169 movq RIP(%rdi),%rax
1170 cmpq $irq_return_iret,%rax
1171#ifdef CONFIG_PARAVIRT
1172 je 1f
1173 cmpq $native_iret,%rax
1174#endif
1175 jne do_double_fault /* This shouldn't happen... */
11761:
1177 movq PER_CPU_VAR(kernel_stack),%rax
1178 subq $(6*8-KERNEL_STACK_OFFSET),%rax /* Reset to original stack */
1179 movq %rax,RSP(%rdi)
1180 movq $0,(%rax) /* Missing (lost) #GP error code */
1181 movq $general_protection,RIP(%rdi)
1182 retq
1183 CFI_ENDPROC
1184END(__do_double_fault)
H. Peter Anvin34273f42014-05-04 10:36:22 -07001185#else
1186# define __do_double_fault do_double_fault
1187#endif
H. Peter Anvin3891a042014-04-29 16:46:09 -07001188
Masami Hiramatsu8222d712009-08-27 13:23:25 -04001189/*
1190 * End of kprobes section
1191 */
1192 .popsection
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +01001193
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194/*
1195 * APIC interrupts.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +01001196 */
Seiji Aguchicf910e82013-06-20 11:46:53 -04001197.macro apicinterrupt3 num sym do_sym
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001198ENTRY(\sym)
Jan Beulich7effaa82005-09-12 18:49:24 +02001199 INTR_FRAME
Jan Beulichee4eb872012-11-02 11:18:39 +00001200 ASM_CLAC
Jan Beulichdf5d1872010-09-02 14:07:16 +01001201 pushq_cfi $~(\num)
Jan Beulich39e95432011-11-29 11:03:46 +00001202.Lcommon_\sym:
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001203 interrupt \do_sym
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 jmp ret_from_intr
1205 CFI_ENDPROC
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001206END(\sym)
1207.endm
Jacob Shin89b831e2005-11-05 17:25:53 +01001208
Seiji Aguchicf910e82013-06-20 11:46:53 -04001209#ifdef CONFIG_TRACING
1210#define trace(sym) trace_##sym
1211#define smp_trace(sym) smp_trace_##sym
1212
1213.macro trace_apicinterrupt num sym
1214apicinterrupt3 \num trace(\sym) smp_trace(\sym)
1215.endm
1216#else
1217.macro trace_apicinterrupt num sym do_sym
1218.endm
1219#endif
1220
1221.macro apicinterrupt num sym do_sym
1222apicinterrupt3 \num \sym \do_sym
1223trace_apicinterrupt \num \sym
1224.endm
1225
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +01001226#ifdef CONFIG_SMP
Seiji Aguchicf910e82013-06-20 11:46:53 -04001227apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR \
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001228 irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt
Seiji Aguchicf910e82013-06-20 11:46:53 -04001229apicinterrupt3 REBOOT_VECTOR \
Andi Kleen4ef702c2009-05-27 21:56:52 +02001230 reboot_interrupt smp_reboot_interrupt
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231#endif
1232
Nick Piggin03b48632009-01-20 04:36:04 +01001233#ifdef CONFIG_X86_UV
Seiji Aguchicf910e82013-06-20 11:46:53 -04001234apicinterrupt3 UV_BAU_MESSAGE \
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001235 uv_bau_message_intr1 uv_bau_message_interrupt
Nick Piggin03b48632009-01-20 04:36:04 +01001236#endif
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001237apicinterrupt LOCAL_TIMER_VECTOR \
1238 apic_timer_interrupt smp_apic_timer_interrupt
Dimitri Sivanich4a4de9c2009-10-14 09:22:57 -05001239apicinterrupt X86_PLATFORM_IPI_VECTOR \
1240 x86_platform_ipi smp_x86_platform_ipi
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241
Yang Zhangd78f2662013-04-11 19:25:11 +08001242#ifdef CONFIG_HAVE_KVM
Seiji Aguchicf910e82013-06-20 11:46:53 -04001243apicinterrupt3 POSTED_INTR_VECTOR \
Yang Zhangd78f2662013-04-11 19:25:11 +08001244 kvm_posted_intr_ipi smp_kvm_posted_intr_ipi
1245#endif
1246
Seiji Aguchi33e5ff62013-06-22 07:33:30 -04001247#ifdef CONFIG_X86_MCE_THRESHOLD
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001248apicinterrupt THRESHOLD_APIC_VECTOR \
Andi Kleen7856f6c2009-04-28 23:32:56 +02001249 threshold_interrupt smp_threshold_interrupt
Seiji Aguchi33e5ff62013-06-22 07:33:30 -04001250#endif
1251
1252#ifdef CONFIG_X86_THERMAL_VECTOR
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001253apicinterrupt THERMAL_APIC_VECTOR \
1254 thermal_interrupt smp_thermal_interrupt
Seiji Aguchi33e5ff62013-06-22 07:33:30 -04001255#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001257#ifdef CONFIG_SMP
1258apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \
1259 call_function_single_interrupt smp_call_function_single_interrupt
1260apicinterrupt CALL_FUNCTION_VECTOR \
1261 call_function_interrupt smp_call_function_interrupt
1262apicinterrupt RESCHEDULE_VECTOR \
1263 reschedule_interrupt smp_reschedule_interrupt
1264#endif
1265
1266apicinterrupt ERROR_APIC_VECTOR \
1267 error_interrupt smp_error_interrupt
1268apicinterrupt SPURIOUS_APIC_VECTOR \
1269 spurious_interrupt smp_spurious_interrupt
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +01001270
Peter Zijlstrae360adb2010-10-14 14:01:34 +08001271#ifdef CONFIG_IRQ_WORK
1272apicinterrupt IRQ_WORK_VECTOR \
1273 irq_work_interrupt smp_irq_work_interrupt
Ingo Molnar241771e2008-12-03 10:39:53 +01001274#endif
1275
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276/*
1277 * Exception entry points.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +01001278 */
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001279.macro zeroentry sym do_sym
1280ENTRY(\sym)
Jan Beulich7effaa82005-09-12 18:49:24 +02001281 INTR_FRAME
Jan Beulichee4eb872012-11-02 11:18:39 +00001282 ASM_CLAC
Jeremy Fitzhardingefab58422008-06-25 00:19:31 -04001283 PARAVIRT_ADJUST_EXCEPTION_FRAME
Ingo Molnar14ae22b2008-11-21 15:20:47 +01001284 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
Jan Beulichb1cccb12010-09-02 13:55:11 +01001285 subq $ORIG_RAX-R15, %rsp
1286 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
Alexander van Heukelumd99015b2008-11-19 01:18:11 +01001287 call error_entry
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +01001288 DEFAULT_FRAME 0
Alexander van Heukelumd99015b2008-11-19 01:18:11 +01001289 movq %rsp,%rdi /* pt_regs pointer */
1290 xorl %esi,%esi /* no error code */
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001291 call \do_sym
Alexander van Heukelumd99015b2008-11-19 01:18:11 +01001292 jmp error_exit /* %ebx: no swapgs flag */
Jan Beulich7effaa82005-09-12 18:49:24 +02001293 CFI_ENDPROC
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001294END(\sym)
1295.endm
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001297.macro paranoidzeroentry sym do_sym
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001298ENTRY(\sym)
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +01001299 INTR_FRAME
Jan Beulichee4eb872012-11-02 11:18:39 +00001300 ASM_CLAC
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +01001301 PARAVIRT_ADJUST_EXCEPTION_FRAME
Jan Beulichb1cccb12010-09-02 13:55:11 +01001302 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
1303 subq $ORIG_RAX-R15, %rsp
1304 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +01001305 call save_paranoid
1306 TRACE_IRQS_OFF
1307 movq %rsp,%rdi /* pt_regs pointer */
1308 xorl %esi,%esi /* no error code */
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001309 call \do_sym
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +01001310 jmp paranoid_exit /* %ebx: no swapgs flag */
1311 CFI_ENDPROC
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001312END(\sym)
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001313.endm
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +01001314
Brian Gerstc15a5952010-07-31 12:48:22 -04001315#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001316.macro paranoidzeroentry_ist sym do_sym ist
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001317ENTRY(\sym)
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001318 INTR_FRAME
Jan Beulichee4eb872012-11-02 11:18:39 +00001319 ASM_CLAC
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +01001320 PARAVIRT_ADJUST_EXCEPTION_FRAME
Jan Beulichb1cccb12010-09-02 13:55:11 +01001321 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
1322 subq $ORIG_RAX-R15, %rsp
1323 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +01001324 call save_paranoid
Steven Rostedt5963e312012-05-30 11:54:53 -04001325 TRACE_IRQS_OFF_DEBUG
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +01001326 movq %rsp,%rdi /* pt_regs pointer */
1327 xorl %esi,%esi /* no error code */
Brian Gerstc15a5952010-07-31 12:48:22 -04001328 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001329 call \do_sym
Brian Gerstc15a5952010-07-31 12:48:22 -04001330 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +01001331 jmp paranoid_exit /* %ebx: no swapgs flag */
1332 CFI_ENDPROC
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001333END(\sym)
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001334.endm
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +01001335
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001336.macro errorentry sym do_sym
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001337ENTRY(\sym)
Jan Beulich7effaa82005-09-12 18:49:24 +02001338 XCPT_FRAME
Jan Beulichee4eb872012-11-02 11:18:39 +00001339 ASM_CLAC
Jeremy Fitzhardingefab58422008-06-25 00:19:31 -04001340 PARAVIRT_ADJUST_EXCEPTION_FRAME
Jan Beulichb1cccb12010-09-02 13:55:11 +01001341 subq $ORIG_RAX-R15, %rsp
1342 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
Alexander van Heukelumd99015b2008-11-19 01:18:11 +01001343 call error_entry
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +01001344 DEFAULT_FRAME 0
Alexander van Heukelumd99015b2008-11-19 01:18:11 +01001345 movq %rsp,%rdi /* pt_regs pointer */
1346 movq ORIG_RAX(%rsp),%rsi /* get error code */
1347 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001348 call \do_sym
Alexander van Heukelumd99015b2008-11-19 01:18:11 +01001349 jmp error_exit /* %ebx: no swapgs flag */
Jan Beulich7effaa82005-09-12 18:49:24 +02001350 CFI_ENDPROC
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001351END(\sym)
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001352.endm
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353
Seiji Aguchi25c74b12013-10-30 16:37:00 -04001354#ifdef CONFIG_TRACING
1355.macro trace_errorentry sym do_sym
1356errorentry trace(\sym) trace(\do_sym)
1357errorentry \sym \do_sym
1358.endm
1359#else
1360.macro trace_errorentry sym do_sym
1361errorentry \sym \do_sym
1362.endm
1363#endif
1364
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 /* error code is on the stack already */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001366.macro paranoiderrorentry sym do_sym
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001367ENTRY(\sym)
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +01001368 XCPT_FRAME
Jan Beulichee4eb872012-11-02 11:18:39 +00001369 ASM_CLAC
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +01001370 PARAVIRT_ADJUST_EXCEPTION_FRAME
Jan Beulichb1cccb12010-09-02 13:55:11 +01001371 subq $ORIG_RAX-R15, %rsp
1372 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
Alexander van Heukelume2f6bc22008-11-21 16:43:18 +01001373 call save_paranoid
1374 DEFAULT_FRAME 0
Alexander van Heukelum7e61a792008-09-26 14:03:03 +02001375 TRACE_IRQS_OFF
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +01001376 movq %rsp,%rdi /* pt_regs pointer */
1377 movq ORIG_RAX(%rsp),%rsi /* get error code */
1378 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001379 call \do_sym
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +01001380 jmp paranoid_exit /* %ebx: no swapgs flag */
1381 CFI_ENDPROC
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001382END(\sym)
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001383.endm
1384
1385zeroentry divide_error do_divide_error
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001386zeroentry overflow do_overflow
1387zeroentry bounds do_bounds
1388zeroentry invalid_op do_invalid_op
1389zeroentry device_not_available do_device_not_available
H. Peter Anvin3891a042014-04-29 16:46:09 -07001390paranoiderrorentry double_fault __do_double_fault
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001391zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun
1392errorentry invalid_TSS do_invalid_TSS
1393errorentry segment_not_present do_segment_not_present
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001394zeroentry spurious_interrupt_bug do_spurious_interrupt_bug
1395zeroentry coprocessor_error do_coprocessor_error
1396errorentry alignment_check do_alignment_check
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001397zeroentry simd_coprocessor_error do_simd_coprocessor_error
Andy Lutomirski5cec93c2011-06-05 13:50:24 -04001398
Ingo Molnar2601e642006-07-03 00:24:45 -07001399
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001400 /* Reload gs selector with exception handling */
1401 /* edi: new selector */
Jeremy Fitzhardinge9f9d4892008-06-25 00:19:32 -04001402ENTRY(native_load_gs_index)
Jan Beulich7effaa82005-09-12 18:49:24 +02001403 CFI_STARTPROC
Jan Beulichdf5d1872010-09-02 14:07:16 +01001404 pushfq_cfi
Jeremy Fitzhardingeb8aa2872009-01-28 14:35:03 -08001405 DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001406 SWAPGS
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +01001407gs_change:
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001408 movl %edi,%gs
Linus Torvalds1da177e2005-04-16 15:20:36 -070014092: mfence /* workaround */
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +01001410 SWAPGS
Jan Beulichdf5d1872010-09-02 14:07:16 +01001411 popfq_cfi
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001412 ret
Jan Beulich7effaa82005-09-12 18:49:24 +02001413 CFI_ENDPROC
Alexander van Heukelum6efdcfa2008-11-23 10:15:32 +01001414END(native_load_gs_index)
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +01001415
H. Peter Anvind7abc0f2012-04-20 12:19:50 -07001416 _ASM_EXTABLE(gs_change,bad_gs)
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001417 .section .fixup,"ax"
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 /* running with kernelgs */
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +01001419bad_gs:
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +01001420 SWAPGS /* switch back to user gs */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 xorl %eax,%eax
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001422 movl %eax,%gs
1423 jmp 2b
1424 .previous
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +01001425
Andi Kleen26995002006-08-02 22:37:28 +02001426/* Call softirq on interrupt stack. Interrupts are off. */
Frederic Weisbecker7d65f4a2013-09-05 15:49:45 +02001427ENTRY(do_softirq_own_stack)
Jan Beulich7effaa82005-09-12 18:49:24 +02001428 CFI_STARTPROC
Jan Beulichdf5d1872010-09-02 14:07:16 +01001429 pushq_cfi %rbp
Andi Kleen26995002006-08-02 22:37:28 +02001430 CFI_REL_OFFSET rbp,0
1431 mov %rsp,%rbp
1432 CFI_DEF_CFA_REGISTER rbp
Brian Gerst56895532009-01-19 00:38:58 +09001433 incl PER_CPU_VAR(irq_count)
Brian Gerst26f80bd2009-01-19 00:38:58 +09001434 cmove PER_CPU_VAR(irq_stack_ptr),%rsp
Andi Kleen26995002006-08-02 22:37:28 +02001435 push %rbp # backlink for old unwinder
Andi Kleened6b6762005-07-28 21:15:49 -07001436 call __do_softirq
Andi Kleen26995002006-08-02 22:37:28 +02001437 leaveq
Jan Beulichdf5d1872010-09-02 14:07:16 +01001438 CFI_RESTORE rbp
Jan Beulich7effaa82005-09-12 18:49:24 +02001439 CFI_DEF_CFA_REGISTER rsp
Andi Kleen26995002006-08-02 22:37:28 +02001440 CFI_ADJUST_CFA_OFFSET -8
Brian Gerst56895532009-01-19 00:38:58 +09001441 decl PER_CPU_VAR(irq_count)
Andi Kleened6b6762005-07-28 21:15:49 -07001442 ret
Jan Beulich7effaa82005-09-12 18:49:24 +02001443 CFI_ENDPROC
Frederic Weisbecker7d65f4a2013-09-05 15:49:45 +02001444END(do_softirq_own_stack)
Andi Kleen75154f42007-06-23 02:29:25 +02001445
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001446#ifdef CONFIG_XEN
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001447zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001448
1449/*
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001450 * A note on the "critical region" in our callback handler.
1451 * We want to avoid stacking callback handlers due to events occurring
1452 * during handling of the last event. To do this, we keep events disabled
1453 * until we've done all processing. HOWEVER, we must enable events before
1454 * popping the stack frame (can't be done atomically) and so it would still
1455 * be possible to get enough handler activations to overflow the stack.
1456 * Although unlikely, bugs of that kind are hard to track down, so we'd
1457 * like to avoid the possibility.
1458 * So, on entry to the handler we detect whether we interrupted an
1459 * existing activation in its critical region -- if so, we pop the current
1460 * activation and restart the handler using the previous one.
1461 */
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001462ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1463 CFI_STARTPROC
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001464/*
1465 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1466 * see the correct pointer to the pt_regs
1467 */
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001468 movq %rdi, %rsp # we don't return, adjust the stack frame
1469 CFI_ENDPROC
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +01001470 DEFAULT_FRAME
Brian Gerst56895532009-01-19 00:38:58 +0900147111: incl PER_CPU_VAR(irq_count)
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001472 movq %rsp,%rbp
1473 CFI_DEF_CFA_REGISTER rbp
Brian Gerst26f80bd2009-01-19 00:38:58 +09001474 cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001475 pushq %rbp # backlink for old unwinder
1476 call xen_evtchn_do_upcall
1477 popq %rsp
1478 CFI_DEF_CFA_REGISTER rsp
Brian Gerst56895532009-01-19 00:38:58 +09001479 decl PER_CPU_VAR(irq_count)
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001480 jmp error_exit
1481 CFI_ENDPROC
Alexander van Heukelum371c3942011-03-11 21:59:38 +01001482END(xen_do_hypervisor_callback)
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001483
1484/*
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001485 * Hypervisor uses this for application faults while it executes.
1486 * We get here for two reasons:
1487 * 1. Fault while reloading DS, ES, FS or GS
1488 * 2. Fault while executing IRET
1489 * Category 1 we do not need to fix up as Xen has already reloaded all segment
1490 * registers that could be reloaded and zeroed the others.
1491 * Category 2 we fix up by killing the current process. We cannot use the
1492 * normal Linux return path in this case because if we use the IRET hypercall
1493 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1494 * We distinguish between categories by comparing each saved segment register
1495 * with its current contents: any discrepancy means we in category 1.
1496 */
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001497ENTRY(xen_failsafe_callback)
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +01001498 INTR_FRAME 1 (6*8)
1499 /*CFI_REL_OFFSET gs,GS*/
1500 /*CFI_REL_OFFSET fs,FS*/
1501 /*CFI_REL_OFFSET es,ES*/
1502 /*CFI_REL_OFFSET ds,DS*/
1503 CFI_REL_OFFSET r11,8
1504 CFI_REL_OFFSET rcx,0
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001505 movw %ds,%cx
1506 cmpw %cx,0x10(%rsp)
1507 CFI_REMEMBER_STATE
1508 jne 1f
1509 movw %es,%cx
1510 cmpw %cx,0x18(%rsp)
1511 jne 1f
1512 movw %fs,%cx
1513 cmpw %cx,0x20(%rsp)
1514 jne 1f
1515 movw %gs,%cx
1516 cmpw %cx,0x28(%rsp)
1517 jne 1f
1518 /* All segments match their saved values => Category 2 (Bad IRET). */
1519 movq (%rsp),%rcx
1520 CFI_RESTORE rcx
1521 movq 8(%rsp),%r11
1522 CFI_RESTORE r11
1523 addq $0x30,%rsp
1524 CFI_ADJUST_CFA_OFFSET -0x30
Ingo Molnar14ae22b2008-11-21 15:20:47 +01001525 pushq_cfi $0 /* RIP */
1526 pushq_cfi %r11
1527 pushq_cfi %rcx
Jeremy Fitzhardinge4a5c3e72008-07-08 15:07:09 -07001528 jmp general_protection
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001529 CFI_RESTORE_STATE
15301: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
1531 movq (%rsp),%rcx
1532 CFI_RESTORE rcx
1533 movq 8(%rsp),%r11
1534 CFI_RESTORE r11
1535 addq $0x30,%rsp
1536 CFI_ADJUST_CFA_OFFSET -0x30
David Vrabela349e23d12012-10-19 17:29:07 +01001537 pushq_cfi $-1 /* orig_ax = -1 => not a system call */
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001538 SAVE_ALL
1539 jmp error_exit
1540 CFI_ENDPROC
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001541END(xen_failsafe_callback)
1542
Seiji Aguchicf910e82013-06-20 11:46:53 -04001543apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
Sheng Yang38e20b02010-05-14 12:40:51 +01001544 xen_hvm_callback_vector xen_evtchn_do_upcall
1545
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001546#endif /* CONFIG_XEN */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001547
K. Y. Srinivasanbc2b0332013-02-03 17:22:39 -08001548#if IS_ENABLED(CONFIG_HYPERV)
Seiji Aguchicf910e82013-06-20 11:46:53 -04001549apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
K. Y. Srinivasanbc2b0332013-02-03 17:22:39 -08001550 hyperv_callback_vector hyperv_vector_handler
1551#endif /* CONFIG_HYPERV */
1552
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001553/*
1554 * Some functions should be protected against kprobes
1555 */
1556 .pushsection .kprobes.text, "ax"
1557
1558paranoidzeroentry_ist debug do_debug DEBUG_STACK
1559paranoidzeroentry_ist int3 do_int3 DEBUG_STACK
1560paranoiderrorentry stack_segment do_stack_segment
Jeremy Fitzhardinge6cac5a92009-03-29 19:56:29 -07001561#ifdef CONFIG_XEN
1562zeroentry xen_debug do_debug
1563zeroentry xen_int3 do_int3
1564errorentry xen_stack_segment do_stack_segment
1565#endif
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001566errorentry general_protection do_general_protection
Seiji Aguchi25c74b12013-10-30 16:37:00 -04001567trace_errorentry page_fault do_page_fault
Gleb Natapov631bc482010-10-14 11:22:52 +02001568#ifdef CONFIG_KVM_GUEST
1569errorentry async_page_fault do_async_page_fault
1570#endif
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001571#ifdef CONFIG_X86_MCE
Andi Kleen5d727922009-04-27 19:25:48 +02001572paranoidzeroentry machine_check *machine_check_vector(%rip)
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001573#endif
1574
1575 /*
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001576 * "Paranoid" exit path from exception stack.
1577 * Paranoid because this is used by NMIs and cannot take
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001578 * any kernel state for granted.
1579 * We don't do kernel preemption checks here, because only
1580 * NMI should be common and it does not enable IRQs and
1581 * cannot get reschedule ticks.
1582 *
1583 * "trace" is 0 for the NMI handler only, because irq-tracing
1584 * is fundamentally NMI-unsafe. (we cannot change the soft and
1585 * hard flags at once, atomically)
1586 */
1587
1588 /* ebx: no swapgs flag */
1589ENTRY(paranoid_exit)
Jan Beulich1f130a72010-09-02 13:54:32 +01001590 DEFAULT_FRAME
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001591 DISABLE_INTERRUPTS(CLBR_NONE)
Steven Rostedt5963e312012-05-30 11:54:53 -04001592 TRACE_IRQS_OFF_DEBUG
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001593 testl %ebx,%ebx /* swapgs needed? */
1594 jnz paranoid_restore
1595 testl $3,CS(%rsp)
1596 jnz paranoid_userspace
1597paranoid_swapgs:
1598 TRACE_IRQS_IRETQ 0
1599 SWAPGS_UNSAFE_STACK
Steven Rostedt0300e7f12009-04-17 08:33:52 -04001600 RESTORE_ALL 8
1601 jmp irq_return
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001602paranoid_restore:
Steven Rostedt5963e312012-05-30 11:54:53 -04001603 TRACE_IRQS_IRETQ_DEBUG 0
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001604 RESTORE_ALL 8
1605 jmp irq_return
1606paranoid_userspace:
1607 GET_THREAD_INFO(%rcx)
1608 movl TI_flags(%rcx),%ebx
1609 andl $_TIF_WORK_MASK,%ebx
1610 jz paranoid_swapgs
1611 movq %rsp,%rdi /* &pt_regs */
1612 call sync_regs
1613 movq %rax,%rsp /* switch stack for scheduling */
1614 testl $_TIF_NEED_RESCHED,%ebx
1615 jnz paranoid_schedule
1616 movl %ebx,%edx /* arg3: thread flags */
1617 TRACE_IRQS_ON
1618 ENABLE_INTERRUPTS(CLBR_NONE)
1619 xorl %esi,%esi /* arg2: oldset */
1620 movq %rsp,%rdi /* arg1: &pt_regs */
1621 call do_notify_resume
1622 DISABLE_INTERRUPTS(CLBR_NONE)
1623 TRACE_IRQS_OFF
1624 jmp paranoid_userspace
1625paranoid_schedule:
1626 TRACE_IRQS_ON
1627 ENABLE_INTERRUPTS(CLBR_ANY)
Frederic Weisbecker04304992012-07-11 20:26:38 +02001628 SCHEDULE_USER
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001629 DISABLE_INTERRUPTS(CLBR_ANY)
1630 TRACE_IRQS_OFF
1631 jmp paranoid_userspace
1632 CFI_ENDPROC
1633END(paranoid_exit)
1634
1635/*
1636 * Exception entry point. This expects an error code/orig_rax on the stack.
1637 * returns in "no swapgs flag" in %ebx.
1638 */
1639ENTRY(error_entry)
1640 XCPT_FRAME
1641 CFI_ADJUST_CFA_OFFSET 15*8
1642 /* oldrax contains error code */
1643 cld
1644 movq_cfi rdi, RDI+8
1645 movq_cfi rsi, RSI+8
1646 movq_cfi rdx, RDX+8
1647 movq_cfi rcx, RCX+8
1648 movq_cfi rax, RAX+8
1649 movq_cfi r8, R8+8
1650 movq_cfi r9, R9+8
1651 movq_cfi r10, R10+8
1652 movq_cfi r11, R11+8
1653 movq_cfi rbx, RBX+8
1654 movq_cfi rbp, RBP+8
1655 movq_cfi r12, R12+8
1656 movq_cfi r13, R13+8
1657 movq_cfi r14, R14+8
1658 movq_cfi r15, R15+8
1659 xorl %ebx,%ebx
1660 testl $3,CS+8(%rsp)
1661 je error_kernelspace
1662error_swapgs:
1663 SWAPGS
1664error_sti:
1665 TRACE_IRQS_OFF
1666 ret
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001667
1668/*
1669 * There are two places in the kernel that can potentially fault with
1670 * usergs. Handle them here. The exception handlers after iret run with
1671 * kernel gs again, so don't set the user space flag. B stepping K8s
1672 * sometimes report an truncated RIP for IRET exceptions returning to
1673 * compat mode. Check for these here too.
1674 */
1675error_kernelspace:
1676 incl %ebx
H. Peter Anvin3891a042014-04-29 16:46:09 -07001677 leaq irq_return_iret(%rip),%rcx
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001678 cmpq %rcx,RIP+8(%rsp)
1679 je error_swapgs
Brian Gerstae24ffe2009-10-12 10:18:23 -04001680 movl %ecx,%eax /* zero extend */
1681 cmpq %rax,RIP+8(%rsp)
1682 je bstep_iret
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001683 cmpq $gs_change,RIP+8(%rsp)
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001684 je error_swapgs
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001685 jmp error_sti
Brian Gerstae24ffe2009-10-12 10:18:23 -04001686
1687bstep_iret:
1688 /* Fix truncated RIP */
1689 movq %rcx,RIP+8(%rsp)
Brian Gerst97829de2009-11-03 14:02:05 -05001690 jmp error_swapgs
Jan Beuliche6b04b62010-09-02 13:52:45 +01001691 CFI_ENDPROC
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001692END(error_entry)
1693
1694
1695/* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
1696ENTRY(error_exit)
1697 DEFAULT_FRAME
1698 movl %ebx,%eax
1699 RESTORE_REST
1700 DISABLE_INTERRUPTS(CLBR_NONE)
1701 TRACE_IRQS_OFF
1702 GET_THREAD_INFO(%rcx)
1703 testl %eax,%eax
1704 jne retint_kernel
1705 LOCKDEP_SYS_EXIT_IRQ
1706 movl TI_flags(%rcx),%edx
1707 movl $_TIF_WORK_MASK,%edi
1708 andl %edi,%edx
1709 jnz retint_careful
1710 jmp retint_swapgs
1711 CFI_ENDPROC
1712END(error_exit)
1713
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001714/*
1715 * Test if a given stack is an NMI stack or not.
1716 */
1717 .macro test_in_nmi reg stack nmi_ret normal_ret
1718 cmpq %\reg, \stack
1719 ja \normal_ret
1720 subq $EXCEPTION_STKSZ, %\reg
1721 cmpq %\reg, \stack
1722 jb \normal_ret
1723 jmp \nmi_ret
1724 .endm
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001725
1726 /* runs on exception stack */
1727ENTRY(nmi)
1728 INTR_FRAME
1729 PARAVIRT_ADJUST_EXCEPTION_FRAME
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001730 /*
1731 * We allow breakpoints in NMIs. If a breakpoint occurs, then
1732 * the iretq it performs will take us out of NMI context.
1733 * This means that we can have nested NMIs where the next
1734 * NMI is using the top of the stack of the previous NMI. We
1735 * can't let it execute because the nested NMI will corrupt the
1736 * stack of the previous NMI. NMI handlers are not re-entrant
1737 * anyway.
1738 *
1739 * To handle this case we do the following:
1740 * Check the a special location on the stack that contains
1741 * a variable that is set when NMIs are executing.
1742 * The interrupted task's stack is also checked to see if it
1743 * is an NMI stack.
1744 * If the variable is not set and the stack is not the NMI
1745 * stack then:
1746 * o Set the special variable on the stack
1747 * o Copy the interrupt frame into a "saved" location on the stack
1748 * o Copy the interrupt frame into a "copy" location on the stack
1749 * o Continue processing the NMI
1750 * If the variable is set or the previous stack is the NMI stack:
1751 * o Modify the "copy" location to jump to the repeate_nmi
1752 * o return back to the first NMI
1753 *
1754 * Now on exit of the first NMI, we first clear the stack variable
1755 * The NMI stack will tell any nested NMIs at that point that it is
1756 * nested. Then we pop the stack normally with iret, and if there was
1757 * a nested NMI that updated the copy interrupt stack frame, a
1758 * jump will be made to the repeat_nmi code that will handle the second
1759 * NMI.
1760 */
1761
1762 /* Use %rdx as out temp variable throughout */
1763 pushq_cfi %rdx
Jan Beulich62610912012-02-24 14:54:37 +00001764 CFI_REL_OFFSET rdx, 0
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001765
1766 /*
Steven Rostedt45d5a162012-02-19 16:43:37 -05001767 * If %cs was not the kernel segment, then the NMI triggered in user
1768 * space, which means it is definitely not nested.
1769 */
Steven Rostedta38449ef2012-02-20 15:29:34 -05001770 cmpl $__KERNEL_CS, 16(%rsp)
Steven Rostedt45d5a162012-02-19 16:43:37 -05001771 jne first_nmi
1772
1773 /*
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001774 * Check the special variable on the stack to see if NMIs are
1775 * executing.
1776 */
Steven Rostedta38449ef2012-02-20 15:29:34 -05001777 cmpl $1, -8(%rsp)
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001778 je nested_nmi
1779
1780 /*
1781 * Now test if the previous stack was an NMI stack.
1782 * We need the double check. We check the NMI stack to satisfy the
1783 * race when the first NMI clears the variable before returning.
1784 * We check the variable because the first NMI could be in a
1785 * breakpoint routine using a breakpoint stack.
1786 */
1787 lea 6*8(%rsp), %rdx
1788 test_in_nmi rdx, 4*8(%rsp), nested_nmi, first_nmi
Jan Beulich62610912012-02-24 14:54:37 +00001789 CFI_REMEMBER_STATE
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001790
1791nested_nmi:
1792 /*
1793 * Do nothing if we interrupted the fixup in repeat_nmi.
1794 * It's about to repeat the NMI handler, so we are fine
1795 * with ignoring this one.
1796 */
1797 movq $repeat_nmi, %rdx
1798 cmpq 8(%rsp), %rdx
1799 ja 1f
1800 movq $end_repeat_nmi, %rdx
1801 cmpq 8(%rsp), %rdx
1802 ja nested_nmi_out
1803
18041:
1805 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
Salman Qazi28696f42012-10-01 17:29:25 -07001806 leaq -1*8(%rsp), %rdx
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001807 movq %rdx, %rsp
Salman Qazi28696f42012-10-01 17:29:25 -07001808 CFI_ADJUST_CFA_OFFSET 1*8
1809 leaq -10*8(%rsp), %rdx
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001810 pushq_cfi $__KERNEL_DS
1811 pushq_cfi %rdx
1812 pushfq_cfi
1813 pushq_cfi $__KERNEL_CS
1814 pushq_cfi $repeat_nmi
1815
1816 /* Put stack back */
Salman Qazi28696f42012-10-01 17:29:25 -07001817 addq $(6*8), %rsp
1818 CFI_ADJUST_CFA_OFFSET -6*8
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001819
1820nested_nmi_out:
1821 popq_cfi %rdx
Jan Beulich62610912012-02-24 14:54:37 +00001822 CFI_RESTORE rdx
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001823
1824 /* No need to check faults here */
1825 INTERRUPT_RETURN
1826
Jan Beulich62610912012-02-24 14:54:37 +00001827 CFI_RESTORE_STATE
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001828first_nmi:
1829 /*
1830 * Because nested NMIs will use the pushed location that we
1831 * stored in rdx, we must keep that space available.
1832 * Here's what our stack frame will look like:
1833 * +-------------------------+
1834 * | original SS |
1835 * | original Return RSP |
1836 * | original RFLAGS |
1837 * | original CS |
1838 * | original RIP |
1839 * +-------------------------+
1840 * | temp storage for rdx |
1841 * +-------------------------+
1842 * | NMI executing variable |
1843 * +-------------------------+
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001844 * | copied SS |
1845 * | copied Return RSP |
1846 * | copied RFLAGS |
1847 * | copied CS |
1848 * | copied RIP |
1849 * +-------------------------+
Salman Qazi28696f42012-10-01 17:29:25 -07001850 * | Saved SS |
1851 * | Saved Return RSP |
1852 * | Saved RFLAGS |
1853 * | Saved CS |
1854 * | Saved RIP |
1855 * +-------------------------+
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001856 * | pt_regs |
1857 * +-------------------------+
1858 *
Steven Rostedt79fb4ad2012-02-24 15:55:13 -05001859 * The saved stack frame is used to fix up the copied stack frame
1860 * that a nested NMI may change to make the interrupted NMI iret jump
1861 * to the repeat_nmi. The original stack frame and the temp storage
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001862 * is also used by nested NMIs and can not be trusted on exit.
1863 */
Steven Rostedt79fb4ad2012-02-24 15:55:13 -05001864 /* Do not pop rdx, nested NMIs will corrupt that part of the stack */
Jan Beulich62610912012-02-24 14:54:37 +00001865 movq (%rsp), %rdx
1866 CFI_RESTORE rdx
1867
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001868 /* Set the NMI executing variable on the stack. */
1869 pushq_cfi $1
1870
Salman Qazi28696f42012-10-01 17:29:25 -07001871 /*
1872 * Leave room for the "copied" frame
1873 */
1874 subq $(5*8), %rsp
Jan Beulich444723d2013-01-24 09:27:31 +00001875 CFI_ADJUST_CFA_OFFSET 5*8
Salman Qazi28696f42012-10-01 17:29:25 -07001876
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001877 /* Copy the stack frame to the Saved frame */
1878 .rept 5
Salman Qazi28696f42012-10-01 17:29:25 -07001879 pushq_cfi 11*8(%rsp)
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001880 .endr
Jan Beulich62610912012-02-24 14:54:37 +00001881 CFI_DEF_CFA_OFFSET SS+8-RIP
1882
Steven Rostedt79fb4ad2012-02-24 15:55:13 -05001883 /* Everything up to here is safe from nested NMIs */
1884
Jan Beulich62610912012-02-24 14:54:37 +00001885 /*
1886 * If there was a nested NMI, the first NMI's iret will return
1887 * here. But NMIs are still enabled and we can take another
1888 * nested NMI. The nested NMI checks the interrupted RIP to see
1889 * if it is between repeat_nmi and end_repeat_nmi, and if so
1890 * it will just return, as we are about to repeat an NMI anyway.
1891 * This makes it safe to copy to the stack frame that a nested
1892 * NMI will update.
1893 */
1894repeat_nmi:
1895 /*
1896 * Update the stack variable to say we are still in NMI (the update
1897 * is benign for the non-repeat case, where 1 was pushed just above
1898 * to this very stack slot).
1899 */
Salman Qazi28696f42012-10-01 17:29:25 -07001900 movq $1, 10*8(%rsp)
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001901
1902 /* Make another copy, this one may be modified by nested NMIs */
Salman Qazi28696f42012-10-01 17:29:25 -07001903 addq $(10*8), %rsp
1904 CFI_ADJUST_CFA_OFFSET -10*8
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001905 .rept 5
Salman Qazi28696f42012-10-01 17:29:25 -07001906 pushq_cfi -6*8(%rsp)
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001907 .endr
Salman Qazi28696f42012-10-01 17:29:25 -07001908 subq $(5*8), %rsp
Jan Beulich62610912012-02-24 14:54:37 +00001909 CFI_DEF_CFA_OFFSET SS+8-RIP
1910end_repeat_nmi:
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001911
1912 /*
1913 * Everything below this point can be preempted by a nested
Steven Rostedt79fb4ad2012-02-24 15:55:13 -05001914 * NMI if the first NMI took an exception and reset our iret stack
1915 * so that we repeat another NMI.
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001916 */
Steven Rostedt1fd466e2011-12-08 12:32:27 -05001917 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
Jan Beulichb1cccb12010-09-02 13:55:11 +01001918 subq $ORIG_RAX-R15, %rsp
1919 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
Steven Rostedt1fd466e2011-12-08 12:32:27 -05001920 /*
1921 * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
1922 * as we should not be calling schedule in NMI context.
1923 * Even with normal interrupts enabled. An NMI should not be
1924 * setting NEED_RESCHED or anything that normal interrupts and
1925 * exceptions might do.
1926 */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001927 call save_paranoid
1928 DEFAULT_FRAME 0
Steven Rostedt7fbb98c2012-06-07 10:21:21 -04001929
1930 /*
1931 * Save off the CR2 register. If we take a page fault in the NMI then
1932 * it could corrupt the CR2 value. If the NMI preempts a page fault
1933 * handler before it was able to read the CR2 register, and then the
1934 * NMI itself takes a page fault, the page fault that was preempted
1935 * will read the information from the NMI page fault and not the
1936 * origin fault. Save it off and restore it if it changes.
1937 * Use the r12 callee-saved register.
1938 */
1939 movq %cr2, %r12
1940
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001941 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
1942 movq %rsp,%rdi
1943 movq $-1,%rsi
1944 call do_nmi
Steven Rostedt7fbb98c2012-06-07 10:21:21 -04001945
1946 /* Did the NMI take a page fault? Restore cr2 if it did */
1947 movq %cr2, %rcx
1948 cmpq %rcx, %r12
1949 je 1f
1950 movq %r12, %cr2
19511:
1952
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001953 testl %ebx,%ebx /* swapgs needed? */
1954 jnz nmi_restore
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001955nmi_swapgs:
1956 SWAPGS_UNSAFE_STACK
1957nmi_restore:
Jan Beulich444723d2013-01-24 09:27:31 +00001958 /* Pop the extra iret frame at once */
1959 RESTORE_ALL 6*8
Salman Qazi28696f42012-10-01 17:29:25 -07001960
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001961 /* Clear the NMI executing stack variable */
Salman Qazi28696f42012-10-01 17:29:25 -07001962 movq $0, 5*8(%rsp)
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001963 jmp irq_return
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001964 CFI_ENDPROC
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001965END(nmi)
1966
1967ENTRY(ignore_sysret)
1968 CFI_STARTPROC
1969 mov $-ENOSYS,%eax
1970 sysret
1971 CFI_ENDPROC
1972END(ignore_sysret)
1973
1974/*
1975 * End of kprobes section
1976 */
1977 .popsection