blob: acc0c6f36f3f4c3a66f9ea81b92a409f680f1687 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Ingo Molnara49976d2015-06-08 09:49:11 +02002 * Copyright (C) 1991,1992 Linus Torvalds
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
Ingo Molnara49976d2015-06-08 09:49:11 +02004 * entry_32.S contains the system-call and low-level fault and trap handling routines.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
Andy Lutomirski39e87012015-10-05 17:48:13 -07006 * Stack layout while running C code:
Ingo Molnara49976d2015-06-08 09:49:11 +02007 * ptrace needs to have all registers on the stack.
8 * If the order here is changed, it needs to be
9 * updated in fork.c:copy_process(), signal.c:do_signal(),
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 * ptrace.c and ptrace.h
11 *
12 * 0(%esp) - %ebx
13 * 4(%esp) - %ecx
14 * 8(%esp) - %edx
Denys Vlasenko9b47feb2015-06-08 22:35:33 +020015 * C(%esp) - %esi
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 * 10(%esp) - %edi
17 * 14(%esp) - %ebp
18 * 18(%esp) - %eax
19 * 1C(%esp) - %ds
20 * 20(%esp) - %es
Jeremy Fitzhardinge464d1a72007-02-13 13:26:20 +010021 * 24(%esp) - %fs
Tejun Heoccbeed32009-02-09 22:17:40 +090022 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
23 * 2C(%esp) - orig_eax
24 * 30(%esp) - %eip
25 * 34(%esp) - %cs
26 * 38(%esp) - %eflags
27 * 3C(%esp) - %oldesp
28 * 40(%esp) - %oldss
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 */
30
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/linkage.h>
Eric Parisd7e75282012-01-03 14:23:06 -050032#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <asm/thread_info.h>
Ingo Molnar55f327f2006-07-03 00:24:43 -070034#include <asm/irqflags.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/errno.h>
36#include <asm/segment.h>
37#include <asm/smp.h>
Jeremy Fitzhardinge0341c142009-02-13 11:14:01 -080038#include <asm/page_types.h>
Stas Sergeevbe44d2a2006-12-07 02:14:01 +010039#include <asm/percpu.h>
Cyrill Gorcunovab68ed92008-03-25 22:16:32 +030040#include <asm/processor-flags.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053041#include <asm/ftrace.h>
Thomas Gleixner9b7dc562008-05-02 20:10:09 +020042#include <asm/irq_vectors.h>
Borislav Petkovcd4d09e2016-01-26 22:12:04 +010043#include <asm/cpufeatures.h>
Andy Lutomirskib4ca46e2011-08-25 16:10:33 -040044#include <asm/alternative-asm.h>
H. Peter Anvin6837a542012-04-20 12:19:50 -070045#include <asm/asm.h>
H. Peter Anvine59d1b02012-09-21 13:58:10 -070046#include <asm/smap.h>
Al Viro784d5692016-01-11 11:04:34 -050047#include <asm/export.h>
Josh Poimboeuf4d516f42016-09-21 16:04:01 -050048#include <asm/frame.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Jiri Olsaea714542011-03-07 19:10:39 +010050 .section .entry.text, "ax"
51
Rusty Russell139ec7c2006-12-07 02:14:08 +010052/*
53 * We use macros for low-level operations which need to be overridden
54 * for paravirtualization. The following will never clobber any registers:
55 * INTERRUPT_RETURN (aka. "iret")
56 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
Jeremy Fitzhardinged75cd222008-06-25 00:19:26 -040057 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
Rusty Russell139ec7c2006-12-07 02:14:08 +010058 *
59 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
60 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
61 * Allowing a register to be clobbered can shrink the paravirt replacement
62 * enough to patch inline, increasing performance.
63 */
64
Linus Torvalds1da177e2005-04-16 15:20:36 -070065#ifdef CONFIG_PREEMPT
Ingo Molnara49976d2015-06-08 09:49:11 +020066# define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#else
Ingo Molnara49976d2015-06-08 09:49:11 +020068# define preempt_stop(clobbers)
69# define resume_kernel restore_all
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#endif
71
Ingo Molnar55f327f2006-07-03 00:24:43 -070072.macro TRACE_IRQS_IRET
73#ifdef CONFIG_TRACE_IRQFLAGS
Ingo Molnara49976d2015-06-08 09:49:11 +020074 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off?
75 jz 1f
Ingo Molnar55f327f2006-07-03 00:24:43 -070076 TRACE_IRQS_ON
771:
78#endif
79.endm
80
Tejun Heoccbeed32009-02-09 22:17:40 +090081/*
82 * User gs save/restore
83 *
84 * %gs is used for userland TLS and kernel only uses it for stack
85 * canary which is required to be at %gs:20 by gcc. Read the comment
86 * at the top of stackprotector.h for more info.
87 *
88 * Local labels 98 and 99 are used.
89 */
90#ifdef CONFIG_X86_32_LAZY_GS
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
Tejun Heoccbeed32009-02-09 22:17:40 +090092 /* unfortunately push/pop can't be no-op */
93.macro PUSH_GS
Ingo Molnara49976d2015-06-08 09:49:11 +020094 pushl $0
Tejun Heoccbeed32009-02-09 22:17:40 +090095.endm
96.macro POP_GS pop=0
Ingo Molnara49976d2015-06-08 09:49:11 +020097 addl $(4 + \pop), %esp
Tejun Heoccbeed32009-02-09 22:17:40 +090098.endm
99.macro POP_GS_EX
100.endm
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
Tejun Heoccbeed32009-02-09 22:17:40 +0900102 /* all the rest are no-op */
103.macro PTGS_TO_GS
104.endm
105.macro PTGS_TO_GS_EX
106.endm
107.macro GS_TO_REG reg
108.endm
109.macro REG_TO_PTGS reg
110.endm
111.macro SET_KERNEL_GS reg
112.endm
113
114#else /* CONFIG_X86_32_LAZY_GS */
115
116.macro PUSH_GS
Ingo Molnara49976d2015-06-08 09:49:11 +0200117 pushl %gs
Tejun Heoccbeed32009-02-09 22:17:40 +0900118.endm
119
120.macro POP_GS pop=0
Ingo Molnara49976d2015-06-08 09:49:11 +020012198: popl %gs
Tejun Heoccbeed32009-02-09 22:17:40 +0900122 .if \pop <> 0
Denys Vlasenko9b47feb2015-06-08 22:35:33 +0200123 add $\pop, %esp
Tejun Heoccbeed32009-02-09 22:17:40 +0900124 .endif
125.endm
126.macro POP_GS_EX
127.pushsection .fixup, "ax"
Ingo Molnara49976d2015-06-08 09:49:11 +020012899: movl $0, (%esp)
129 jmp 98b
Jeremy Fitzhardingef95d47c2006-12-07 02:14:02 +0100130.popsection
Ingo Molnara49976d2015-06-08 09:49:11 +0200131 _ASM_EXTABLE(98b, 99b)
Tejun Heoccbeed32009-02-09 22:17:40 +0900132.endm
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
Tejun Heoccbeed32009-02-09 22:17:40 +0900134.macro PTGS_TO_GS
Ingo Molnara49976d2015-06-08 09:49:11 +020013598: mov PT_GS(%esp), %gs
Tejun Heoccbeed32009-02-09 22:17:40 +0900136.endm
137.macro PTGS_TO_GS_EX
138.pushsection .fixup, "ax"
Ingo Molnara49976d2015-06-08 09:49:11 +020013999: movl $0, PT_GS(%esp)
140 jmp 98b
Tejun Heoccbeed32009-02-09 22:17:40 +0900141.popsection
Ingo Molnara49976d2015-06-08 09:49:11 +0200142 _ASM_EXTABLE(98b, 99b)
Tejun Heoccbeed32009-02-09 22:17:40 +0900143.endm
144
145.macro GS_TO_REG reg
Ingo Molnara49976d2015-06-08 09:49:11 +0200146 movl %gs, \reg
Tejun Heoccbeed32009-02-09 22:17:40 +0900147.endm
148.macro REG_TO_PTGS reg
Ingo Molnara49976d2015-06-08 09:49:11 +0200149 movl \reg, PT_GS(%esp)
Tejun Heoccbeed32009-02-09 22:17:40 +0900150.endm
151.macro SET_KERNEL_GS reg
Ingo Molnara49976d2015-06-08 09:49:11 +0200152 movl $(__KERNEL_STACK_CANARY), \reg
153 movl \reg, %gs
Tejun Heoccbeed32009-02-09 22:17:40 +0900154.endm
155
Ingo Molnara49976d2015-06-08 09:49:11 +0200156#endif /* CONFIG_X86_32_LAZY_GS */
Tejun Heoccbeed32009-02-09 22:17:40 +0900157
Andy Lutomirski150ac782015-10-05 17:48:14 -0700158.macro SAVE_ALL pt_regs_ax=%eax
Tejun Heof0d96112009-02-09 22:17:40 +0900159 cld
Tejun Heoccbeed32009-02-09 22:17:40 +0900160 PUSH_GS
Ingo Molnara49976d2015-06-08 09:49:11 +0200161 pushl %fs
162 pushl %es
163 pushl %ds
Andy Lutomirski150ac782015-10-05 17:48:14 -0700164 pushl \pt_regs_ax
Ingo Molnara49976d2015-06-08 09:49:11 +0200165 pushl %ebp
166 pushl %edi
167 pushl %esi
168 pushl %edx
169 pushl %ecx
170 pushl %ebx
171 movl $(__USER_DS), %edx
172 movl %edx, %ds
173 movl %edx, %es
174 movl $(__KERNEL_PERCPU), %edx
175 movl %edx, %fs
Tejun Heoccbeed32009-02-09 22:17:40 +0900176 SET_KERNEL_GS %edx
Tejun Heof0d96112009-02-09 22:17:40 +0900177.endm
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500179/*
180 * This is a sneaky trick to help the unwinder find pt_regs on the stack. The
181 * frame pointer is replaced with an encoded pointer to pt_regs. The encoding
182 * is just setting the LSB, which makes it an invalid stack address and is also
183 * a signal to the unwinder that it's a pt_regs pointer in disguise.
184 *
185 * NOTE: This macro must be used *after* SAVE_ALL because it corrupts the
186 * original rbp.
187 */
188.macro ENCODE_FRAME_POINTER
189#ifdef CONFIG_FRAME_POINTER
190 mov %esp, %ebp
191 orl $0x1, %ebp
192#endif
193.endm
194
Tejun Heof0d96112009-02-09 22:17:40 +0900195.macro RESTORE_INT_REGS
Ingo Molnara49976d2015-06-08 09:49:11 +0200196 popl %ebx
197 popl %ecx
198 popl %edx
199 popl %esi
200 popl %edi
201 popl %ebp
202 popl %eax
Tejun Heof0d96112009-02-09 22:17:40 +0900203.endm
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204
Tejun Heoccbeed32009-02-09 22:17:40 +0900205.macro RESTORE_REGS pop=0
Tejun Heof0d96112009-02-09 22:17:40 +0900206 RESTORE_INT_REGS
Ingo Molnara49976d2015-06-08 09:49:11 +02002071: popl %ds
2082: popl %es
2093: popl %fs
Tejun Heoccbeed32009-02-09 22:17:40 +0900210 POP_GS \pop
Tejun Heof0d96112009-02-09 22:17:40 +0900211.pushsection .fixup, "ax"
Ingo Molnara49976d2015-06-08 09:49:11 +02002124: movl $0, (%esp)
213 jmp 1b
2145: movl $0, (%esp)
215 jmp 2b
2166: movl $0, (%esp)
217 jmp 3b
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218.popsection
Ingo Molnara49976d2015-06-08 09:49:11 +0200219 _ASM_EXTABLE(1b, 4b)
220 _ASM_EXTABLE(2b, 5b)
221 _ASM_EXTABLE(3b, 6b)
Tejun Heoccbeed32009-02-09 22:17:40 +0900222 POP_GS_EX
Tejun Heof0d96112009-02-09 22:17:40 +0900223.endm
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
Brian Gerst01003012016-08-13 12:38:19 -0400225/*
226 * %eax: prev task
227 * %edx: next task
228 */
229ENTRY(__switch_to_asm)
230 /*
231 * Save callee-saved registers
232 * This must match the order in struct inactive_task_frame
233 */
234 pushl %ebp
235 pushl %ebx
236 pushl %edi
237 pushl %esi
238
239 /* switch stack */
240 movl %esp, TASK_threadsp(%eax)
241 movl TASK_threadsp(%edx), %esp
242
243#ifdef CONFIG_CC_STACKPROTECTOR
244 movl TASK_stack_canary(%edx), %ebx
245 movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
246#endif
247
248 /* restore callee-saved registers */
249 popl %esi
250 popl %edi
251 popl %ebx
252 popl %ebp
253
254 jmp __switch_to
255END(__switch_to_asm)
256
257/*
Josh Poimboeuf4d516f42016-09-21 16:04:01 -0500258 * The unwinder expects the last frame on the stack to always be at the same
259 * offset from the end of the page, which allows it to validate the stack.
260 * Calling schedule_tail() directly would break that convention because its an
261 * asmlinkage function so its argument has to be pushed on the stack. This
262 * wrapper creates a proper "end of stack" frame header before the call.
263 */
264ENTRY(schedule_tail_wrapper)
265 FRAME_BEGIN
266
267 pushl %eax
268 call schedule_tail
269 popl %eax
270
271 FRAME_END
272 ret
273ENDPROC(schedule_tail_wrapper)
274/*
Brian Gerst01003012016-08-13 12:38:19 -0400275 * A newly forked process directly context switches into this address.
276 *
277 * eax: prev task we switched from
Brian Gerst616d2482016-08-13 12:38:20 -0400278 * ebx: kernel thread func (NULL for user thread)
279 * edi: kernel thread arg
Brian Gerst01003012016-08-13 12:38:19 -0400280 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281ENTRY(ret_from_fork)
Josh Poimboeuf4d516f42016-09-21 16:04:01 -0500282 call schedule_tail_wrapper
Andy Lutomirski39e87012015-10-05 17:48:13 -0700283
Brian Gerst616d2482016-08-13 12:38:20 -0400284 testl %ebx, %ebx
285 jnz 1f /* kernel threads are uncommon */
286
2872:
Andy Lutomirski39e87012015-10-05 17:48:13 -0700288 /* When we fork, we trace the syscall return in the child, too. */
289 movl %esp, %eax
290 call syscall_return_slowpath
291 jmp restore_all
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
Brian Gerst616d2482016-08-13 12:38:20 -0400293 /* kernel thread */
2941: movl %edi, %eax
295 call *%ebx
Andy Lutomirski39e87012015-10-05 17:48:13 -0700296 /*
Brian Gerst616d2482016-08-13 12:38:20 -0400297 * A kernel thread is allowed to return here after successfully
298 * calling do_execve(). Exit to userspace to complete the execve()
299 * syscall.
Andy Lutomirski39e87012015-10-05 17:48:13 -0700300 */
Brian Gerst616d2482016-08-13 12:38:20 -0400301 movl $0, PT_EAX(%esp)
302 jmp 2b
303END(ret_from_fork)
Al Viro6783eaa22012-08-02 23:05:11 +0400304
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305/*
306 * Return to user mode is not as complex as all this looks,
307 * but we want the default path for a system call return to
308 * go as quickly as possible which is why some of this is
309 * less clear than it otherwise should be.
310 */
311
312 # userspace resumption stub bypassing syscall exit tracing
313 ALIGN
314ret_from_exception:
Rusty Russell139ec7c2006-12-07 02:14:08 +0100315 preempt_stop(CLBR_ANY)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316ret_from_intr:
Dmitry Adamushko29a2e282012-03-22 21:39:25 +0100317#ifdef CONFIG_VM86
Ingo Molnara49976d2015-06-08 09:49:11 +0200318 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
319 movb PT_CS(%esp), %al
320 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
Dmitry Adamushko29a2e282012-03-22 21:39:25 +0100321#else
322 /*
Al Viro6783eaa22012-08-02 23:05:11 +0400323 * We can be coming here from child spawned by kernel_thread().
Dmitry Adamushko29a2e282012-03-22 21:39:25 +0100324 */
Ingo Molnara49976d2015-06-08 09:49:11 +0200325 movl PT_CS(%esp), %eax
326 andl $SEGMENT_RPL_MASK, %eax
Dmitry Adamushko29a2e282012-03-22 21:39:25 +0100327#endif
Ingo Molnara49976d2015-06-08 09:49:11 +0200328 cmpl $USER_RPL, %eax
329 jb resume_kernel # not returning to v8086 or userspace
Jeremy Fitzhardingef95d47c2006-12-07 02:14:02 +0100330
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331ENTRY(resume_userspace)
Andy Lutomirski5d73fc72015-07-31 14:41:09 -0700332 DISABLE_INTERRUPTS(CLBR_ANY)
Peter Zijlstrae32e58a2008-06-06 10:14:08 +0200333 TRACE_IRQS_OFF
Andy Lutomirski5d73fc72015-07-31 14:41:09 -0700334 movl %esp, %eax
335 call prepare_exit_to_usermode
Ingo Molnara49976d2015-06-08 09:49:11 +0200336 jmp restore_all
Jan Beulich47a55cd2007-02-13 13:26:24 +0100337END(ret_from_exception)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338
339#ifdef CONFIG_PREEMPT
340ENTRY(resume_kernel)
Rusty Russell139ec7c2006-12-07 02:14:08 +0100341 DISABLE_INTERRUPTS(CLBR_ANY)
Josh Poimboeuf1b002552016-09-21 16:03:59 -0500342.Lneed_resched:
Ingo Molnara49976d2015-06-08 09:49:11 +0200343 cmpl $0, PER_CPU_VAR(__preempt_count)
344 jnz restore_all
345 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
346 jz restore_all
347 call preempt_schedule_irq
Josh Poimboeuf1b002552016-09-21 16:03:59 -0500348 jmp .Lneed_resched
Jan Beulich47a55cd2007-02-13 13:26:24 +0100349END(resume_kernel)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350#endif
351
Andy Lutomirskif2b37572016-03-09 19:00:30 -0800352GLOBAL(__begin_SYSENTER_singlestep_region)
353/*
354 * All code from here through __end_SYSENTER_singlestep_region is subject
355 * to being single-stepped if a user program sets TF and executes SYSENTER.
356 * There is absolutely nothing that we can do to prevent this from happening
357 * (thanks Intel!). To keep our handling of this situation as simple as
358 * possible, we handle TF just like AC and NT, except that our #DB handler
359 * will ignore all of the single-step traps generated in this range.
360 */
361
362#ifdef CONFIG_XEN
363/*
364 * Xen doesn't set %esp to be precisely what the normal SYSENTER
365 * entry point expects, so fix it up before using the normal path.
366 */
367ENTRY(xen_sysenter_target)
368 addl $5*4, %esp /* remove xen-provided frame */
Josh Poimboeuf1b002552016-09-21 16:03:59 -0500369 jmp .Lsysenter_past_esp
Andy Lutomirskif2b37572016-03-09 19:00:30 -0800370#endif
371
Andy Lutomirskifda57b22016-03-09 19:00:35 -0800372/*
373 * 32-bit SYSENTER entry.
374 *
375 * 32-bit system calls through the vDSO's __kernel_vsyscall enter here
376 * if X86_FEATURE_SEP is available. This is the preferred system call
377 * entry on 32-bit systems.
378 *
379 * The SYSENTER instruction, in principle, should *only* occur in the
380 * vDSO. In practice, a small number of Android devices were shipped
381 * with a copy of Bionic that inlined a SYSENTER instruction. This
382 * never happened in any of Google's Bionic versions -- it only happened
383 * in a narrow range of Intel-provided versions.
384 *
385 * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs.
386 * IF and VM in RFLAGS are cleared (IOW: interrupts are off).
387 * SYSENTER does not save anything on the stack,
388 * and does not save old EIP (!!!), ESP, or EFLAGS.
389 *
390 * To avoid losing track of EFLAGS.VM (and thus potentially corrupting
391 * user and/or vm86 state), we explicitly disable the SYSENTER
392 * instruction in vm86 mode by reprogramming the MSRs.
393 *
394 * Arguments:
395 * eax system call number
396 * ebx arg1
397 * ecx arg2
398 * edx arg3
399 * esi arg4
400 * edi arg5
401 * ebp user stack
402 * 0(%ebp) arg6
403 */
Ingo Molnar4c8cd0c2015-06-08 08:33:56 +0200404ENTRY(entry_SYSENTER_32)
Ingo Molnara49976d2015-06-08 09:49:11 +0200405 movl TSS_sysenter_sp0(%esp), %esp
Josh Poimboeuf1b002552016-09-21 16:03:59 -0500406.Lsysenter_past_esp:
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700407 pushl $__USER_DS /* pt_regs->ss */
Andy Lutomirski30bfa7b2015-12-16 23:18:48 -0800408 pushl %ebp /* pt_regs->sp (stashed in bp) */
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700409 pushfl /* pt_regs->flags (except IF = 0) */
410 orl $X86_EFLAGS_IF, (%esp) /* Fix IF */
411 pushl $__USER_CS /* pt_regs->cs */
412 pushl $0 /* pt_regs->ip = 0 (placeholder) */
413 pushl %eax /* pt_regs->orig_ax */
414 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
415
Ingo Molnar55f327f2006-07-03 00:24:43 -0700416 /*
Andy Lutomirskif2b37572016-03-09 19:00:30 -0800417 * SYSENTER doesn't filter flags, so we need to clear NT, AC
418 * and TF ourselves. To save a few cycles, we can check whether
Andy Lutomirski67f590e2016-03-09 19:00:26 -0800419 * either was set instead of doing an unconditional popfq.
420 * This needs to happen before enabling interrupts so that
421 * we don't get preempted with NT set.
422 *
Andy Lutomirskif2b37572016-03-09 19:00:30 -0800423 * If TF is set, we will single-step all the way to here -- do_debug
424 * will ignore all the traps. (Yes, this is slow, but so is
425 * single-stepping in general. This allows us to avoid having
426 * a more complicated code to handle the case where a user program
427 * forces us to single-step through the SYSENTER entry code.)
428 *
Andy Lutomirski67f590e2016-03-09 19:00:26 -0800429 * NB.: .Lsysenter_fix_flags is a label with the code under it moved
430 * out-of-line as an optimization: NT is unlikely to be set in the
431 * majority of the cases and instead of polluting the I$ unnecessarily,
432 * we're keeping that code behind a branch which will predict as
433 * not-taken and therefore its instructions won't be fetched.
434 */
Andy Lutomirskif2b37572016-03-09 19:00:30 -0800435 testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp)
Andy Lutomirski67f590e2016-03-09 19:00:26 -0800436 jnz .Lsysenter_fix_flags
437.Lsysenter_flags_fixed:
438
439 /*
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700440 * User mode is traced as though IRQs are on, and SYSENTER
441 * turned them off.
Ingo Molnar55f327f2006-07-03 00:24:43 -0700442 */
Ingo Molnar55f327f2006-07-03 00:24:43 -0700443 TRACE_IRQS_OFF
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700444
445 movl %esp, %eax
446 call do_fast_syscall_32
Boris Ostrovsky91e2eea2015-11-19 16:55:45 -0500447 /* XEN PV guests always use IRET path */
448 ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
449 "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700450
451/* Opportunistic SYSEXIT */
452 TRACE_IRQS_ON /* User mode traces as IRQs on. */
453 movl PT_EIP(%esp), %edx /* pt_regs->ip */
454 movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */
Andy Lutomirski3bd29512015-10-16 15:42:55 -07004551: mov PT_FS(%esp), %fs
456 PTGS_TO_GS
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700457 popl %ebx /* pt_regs->bx */
458 addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */
459 popl %esi /* pt_regs->si */
460 popl %edi /* pt_regs->di */
461 popl %ebp /* pt_regs->bp */
462 popl %eax /* pt_regs->ax */
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700463
464 /*
Andy Lutomirskic2c9b522016-03-09 19:00:27 -0800465 * Restore all flags except IF. (We restore IF separately because
466 * STI gives a one-instruction window in which we won't be interrupted,
467 * whereas POPF does not.)
468 */
469 addl $PT_EFLAGS-PT_DS, %esp /* point esp at pt_regs->flags */
470 btr $X86_EFLAGS_IF_BIT, (%esp)
471 popfl
472
473 /*
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700474 * Return back to the vDSO, which will pop ecx and edx.
475 * Don't bother with DS and ES (they already contain __USER_DS).
476 */
Boris Ostrovsky88c15ec2015-11-19 16:55:46 -0500477 sti
478 sysexit
Roland McGrathaf0575b2008-06-24 04:16:52 -0700479
Ingo Molnara49976d2015-06-08 09:49:11 +0200480.pushsection .fixup, "ax"
4812: movl $0, PT_FS(%esp)
482 jmp 1b
Jeremy Fitzhardingef95d47c2006-12-07 02:14:02 +0100483.popsection
Ingo Molnara49976d2015-06-08 09:49:11 +0200484 _ASM_EXTABLE(1b, 2b)
Tejun Heoccbeed32009-02-09 22:17:40 +0900485 PTGS_TO_GS_EX
Andy Lutomirski67f590e2016-03-09 19:00:26 -0800486
487.Lsysenter_fix_flags:
488 pushl $X86_EFLAGS_FIXED
489 popfl
490 jmp .Lsysenter_flags_fixed
Andy Lutomirskif2b37572016-03-09 19:00:30 -0800491GLOBAL(__end_SYSENTER_singlestep_region)
Ingo Molnar4c8cd0c2015-06-08 08:33:56 +0200492ENDPROC(entry_SYSENTER_32)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493
Andy Lutomirskifda57b22016-03-09 19:00:35 -0800494/*
495 * 32-bit legacy system call entry.
496 *
497 * 32-bit x86 Linux system calls traditionally used the INT $0x80
498 * instruction. INT $0x80 lands here.
499 *
500 * This entry point can be used by any 32-bit perform system calls.
501 * Instances of INT $0x80 can be found inline in various programs and
502 * libraries. It is also used by the vDSO's __kernel_vsyscall
503 * fallback for hardware that doesn't support a faster entry method.
504 * Restarted 32-bit system calls also fall back to INT $0x80
505 * regardless of what instruction was originally used to do the system
506 * call. (64-bit programs can use INT $0x80 as well, but they can
507 * only run on 64-bit kernels and therefore land in
508 * entry_INT80_compat.)
509 *
510 * This is considered a slow path. It is not used by most libc
511 * implementations on modern hardware except during process startup.
512 *
513 * Arguments:
514 * eax system call number
515 * ebx arg1
516 * ecx arg2
517 * edx arg3
518 * esi arg4
519 * edi arg5
520 * ebp arg6
521 */
Ingo Molnarb2502b42015-06-08 08:42:03 +0200522ENTRY(entry_INT80_32)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700523 ASM_CLAC
Andy Lutomirski150ac782015-10-05 17:48:14 -0700524 pushl %eax /* pt_regs->orig_ax */
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700525 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
Andy Lutomirski150ac782015-10-05 17:48:14 -0700526
527 /*
Andy Lutomirskia798f092016-03-09 13:24:32 -0800528 * User mode is traced as though IRQs are on, and the interrupt gate
529 * turned them off.
Andy Lutomirski150ac782015-10-05 17:48:14 -0700530 */
Andy Lutomirskia798f092016-03-09 13:24:32 -0800531 TRACE_IRQS_OFF
Andy Lutomirski150ac782015-10-05 17:48:14 -0700532
533 movl %esp, %eax
Andy Lutomirskia798f092016-03-09 13:24:32 -0800534 call do_int80_syscall_32
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700535.Lsyscall_32_done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536
537restore_all:
Alexander van Heukelum2e04bc72009-06-18 00:35:57 +0200538 TRACE_IRQS_IRET
Josh Poimboeuf1b002552016-09-21 16:03:59 -0500539.Lrestore_all_notrace:
H. Peter Anvin34273f42014-05-04 10:36:22 -0700540#ifdef CONFIG_X86_ESPFIX32
Josh Poimboeuf1b002552016-09-21 16:03:59 -0500541 ALTERNATIVE "jmp .Lrestore_nocheck", "", X86_BUG_ESPFIX
Andy Lutomirski58a5aac2016-02-29 15:50:19 -0800542
Ingo Molnara49976d2015-06-08 09:49:11 +0200543 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
544 /*
545 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
546 * are returning to the kernel.
547 * See comments in process.c:copy_thread() for details.
548 */
549 movb PT_OLDSS(%esp), %ah
550 movb PT_CS(%esp), %al
551 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
552 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
Josh Poimboeuf1b002552016-09-21 16:03:59 -0500553 je .Lldt_ss # returning to user-space with LDT SS
H. Peter Anvin34273f42014-05-04 10:36:22 -0700554#endif
Josh Poimboeuf1b002552016-09-21 16:03:59 -0500555.Lrestore_nocheck:
Ingo Molnara49976d2015-06-08 09:49:11 +0200556 RESTORE_REGS 4 # skip orig_eax/error_code
Josh Poimboeuf1b002552016-09-21 16:03:59 -0500557.Lirq_return:
Ingo Molnar3701d8632008-02-09 23:24:08 +0100558 INTERRUPT_RETURN
Josh Poimboeuf1b002552016-09-21 16:03:59 -0500559
Ingo Molnara49976d2015-06-08 09:49:11 +0200560.section .fixup, "ax"
561ENTRY(iret_exc )
562 pushl $0 # no error code
563 pushl $do_iret_error
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500564 jmp common_exception
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565.previous
Josh Poimboeuf1b002552016-09-21 16:03:59 -0500566 _ASM_EXTABLE(.Lirq_return, iret_exc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567
H. Peter Anvin34273f42014-05-04 10:36:22 -0700568#ifdef CONFIG_X86_ESPFIX32
Josh Poimboeuf1b002552016-09-21 16:03:59 -0500569.Lldt_ss:
Alexander van Heukelumdc4c2a02009-06-18 00:35:58 +0200570/*
571 * Setup and switch to ESPFIX stack
572 *
573 * We're returning to userspace with a 16 bit stack. The CPU will not
574 * restore the high word of ESP for us on executing iret... This is an
575 * "official" bug of all the x86-compatible CPUs, which we can work
576 * around to make dosemu and wine happy. We do this by preloading the
577 * high word of ESP with the high word of the userspace ESP while
578 * compensating for the offset by changing to the ESPFIX segment with
579 * a base address that matches for the difference.
580 */
Brian Gerst72c511d2010-07-31 12:48:23 -0400581#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
Ingo Molnara49976d2015-06-08 09:49:11 +0200582 mov %esp, %edx /* load kernel esp */
583 mov PT_OLDESP(%esp), %eax /* load userspace esp */
584 mov %dx, %ax /* eax: new kernel esp */
Denys Vlasenko9b47feb2015-06-08 22:35:33 +0200585 sub %eax, %edx /* offset (low word is 0) */
586 shr $16, %edx
Ingo Molnara49976d2015-06-08 09:49:11 +0200587 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
588 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
589 pushl $__ESPFIX_SS
590 pushl %eax /* new kernel esp */
591 /*
592 * Disable interrupts, but do not irqtrace this section: we
Alexander van Heukelum2e04bc72009-06-18 00:35:57 +0200593 * will soon execute iret and the tracer was already set to
Ingo Molnara49976d2015-06-08 09:49:11 +0200594 * the irqstate after the IRET:
595 */
Rusty Russell139ec7c2006-12-07 02:14:08 +0100596 DISABLE_INTERRUPTS(CLBR_EAX)
Ingo Molnara49976d2015-06-08 09:49:11 +0200597 lss (%esp), %esp /* switch to espfix segment */
Josh Poimboeuf1b002552016-09-21 16:03:59 -0500598 jmp .Lrestore_nocheck
H. Peter Anvin34273f42014-05-04 10:36:22 -0700599#endif
Ingo Molnarb2502b42015-06-08 08:42:03 +0200600ENDPROC(entry_INT80_32)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601
Tejun Heof0d96112009-02-09 22:17:40 +0900602.macro FIXUP_ESPFIX_STACK
Alexander van Heukelumdc4c2a02009-06-18 00:35:58 +0200603/*
604 * Switch back for ESPFIX stack to the normal zerobased stack
605 *
606 * We can't call C functions using the ESPFIX stack. This code reads
607 * the high word of the segment base from the GDT and swiches to the
608 * normal stack and adjusts ESP with the matching offset.
609 */
H. Peter Anvin34273f42014-05-04 10:36:22 -0700610#ifdef CONFIG_X86_ESPFIX32
Alexander van Heukelumdc4c2a02009-06-18 00:35:58 +0200611 /* fixup the stack */
Ingo Molnara49976d2015-06-08 09:49:11 +0200612 mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
613 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
Denys Vlasenko9b47feb2015-06-08 22:35:33 +0200614 shl $16, %eax
Ingo Molnara49976d2015-06-08 09:49:11 +0200615 addl %esp, %eax /* the adjusted stack pointer */
616 pushl $__KERNEL_DS
617 pushl %eax
618 lss (%esp), %esp /* switch to the normal stack segment */
H. Peter Anvin34273f42014-05-04 10:36:22 -0700619#endif
Tejun Heof0d96112009-02-09 22:17:40 +0900620.endm
621.macro UNWIND_ESPFIX_STACK
H. Peter Anvin34273f42014-05-04 10:36:22 -0700622#ifdef CONFIG_X86_ESPFIX32
Ingo Molnara49976d2015-06-08 09:49:11 +0200623 movl %ss, %eax
Tejun Heof0d96112009-02-09 22:17:40 +0900624 /* see if on espfix stack */
Ingo Molnara49976d2015-06-08 09:49:11 +0200625 cmpw $__ESPFIX_SS, %ax
626 jne 27f
627 movl $__KERNEL_DS, %eax
628 movl %eax, %ds
629 movl %eax, %es
Tejun Heof0d96112009-02-09 22:17:40 +0900630 /* switch to normal stack */
631 FIXUP_ESPFIX_STACK
63227:
H. Peter Anvin34273f42014-05-04 10:36:22 -0700633#endif
Tejun Heof0d96112009-02-09 22:17:40 +0900634.endm
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635
636/*
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200637 * Build the entry stubs with some assembler magic.
638 * We pack 1 stub into every 8-byte block.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 */
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200640 .align 8
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641ENTRY(irq_entries_start)
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200642 vector=FIRST_EXTERNAL_VECTOR
643 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
Ingo Molnara49976d2015-06-08 09:49:11 +0200644 pushl $(~vector+0x80) /* Note: always in signed byte range */
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200645 vector=vector+1
646 jmp common_interrupt
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200647 .align 8
648 .endr
Jan Beulich47a55cd2007-02-13 13:26:24 +0100649END(irq_entries_start)
650
Ingo Molnar55f327f2006-07-03 00:24:43 -0700651/*
652 * the CPU automatically disables interrupts when executing an IRQ vector,
653 * so IRQ-flags tracing has to follow that:
654 */
H. Peter Anvinb7c62442008-11-11 13:24:58 -0800655 .p2align CONFIG_X86_L1_CACHE_SHIFT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656common_interrupt:
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700657 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200658 addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 SAVE_ALL
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500660 ENCODE_FRAME_POINTER
Ingo Molnar55f327f2006-07-03 00:24:43 -0700661 TRACE_IRQS_OFF
Ingo Molnara49976d2015-06-08 09:49:11 +0200662 movl %esp, %eax
663 call do_IRQ
664 jmp ret_from_intr
Jan Beulich47a55cd2007-02-13 13:26:24 +0100665ENDPROC(common_interrupt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666
Tejun Heo02cf94c2009-01-21 17:26:06 +0900667#define BUILD_INTERRUPT3(name, nr, fn) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668ENTRY(name) \
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700669 ASM_CLAC; \
Ingo Molnara49976d2015-06-08 09:49:11 +0200670 pushl $~(nr); \
Jan Beulichfe7cacc2006-06-26 13:57:44 +0200671 SAVE_ALL; \
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500672 ENCODE_FRAME_POINTER; \
Ingo Molnar55f327f2006-07-03 00:24:43 -0700673 TRACE_IRQS_OFF \
Ingo Molnara49976d2015-06-08 09:49:11 +0200674 movl %esp, %eax; \
675 call fn; \
676 jmp ret_from_intr; \
Jan Beulich47a55cd2007-02-13 13:26:24 +0100677ENDPROC(name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
Seiji Aguchicf910e82013-06-20 11:46:53 -0400679
680#ifdef CONFIG_TRACING
Ingo Molnara49976d2015-06-08 09:49:11 +0200681# define TRACE_BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
Seiji Aguchicf910e82013-06-20 11:46:53 -0400682#else
Ingo Molnara49976d2015-06-08 09:49:11 +0200683# define TRACE_BUILD_INTERRUPT(name, nr)
Seiji Aguchicf910e82013-06-20 11:46:53 -0400684#endif
685
Ingo Molnara49976d2015-06-08 09:49:11 +0200686#define BUILD_INTERRUPT(name, nr) \
687 BUILD_INTERRUPT3(name, nr, smp_##name); \
Seiji Aguchicf910e82013-06-20 11:46:53 -0400688 TRACE_BUILD_INTERRUPT(name, nr)
Tejun Heo02cf94c2009-01-21 17:26:06 +0900689
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690/* The include is where all of the SMP etc. interrupts come from */
Ingo Molnar1164dd02009-01-28 19:34:09 +0100691#include <asm/entry_arch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693ENTRY(coprocessor_error)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700694 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200695 pushl $0
696 pushl $do_coprocessor_error
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500697 jmp common_exception
Jan Beulich47a55cd2007-02-13 13:26:24 +0100698END(coprocessor_error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699
700ENTRY(simd_coprocessor_error)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700701 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200702 pushl $0
Brian Gerst40d2e762010-03-21 09:00:43 -0400703#ifdef CONFIG_X86_INVD_BUG
704 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
Ingo Molnara49976d2015-06-08 09:49:11 +0200705 ALTERNATIVE "pushl $do_general_protection", \
706 "pushl $do_simd_coprocessor_error", \
Borislav Petkov8e65f6e2015-01-18 12:35:55 +0100707 X86_FEATURE_XMM
Brian Gerst40d2e762010-03-21 09:00:43 -0400708#else
Ingo Molnara49976d2015-06-08 09:49:11 +0200709 pushl $do_simd_coprocessor_error
Brian Gerst40d2e762010-03-21 09:00:43 -0400710#endif
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500711 jmp common_exception
Jan Beulich47a55cd2007-02-13 13:26:24 +0100712END(simd_coprocessor_error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713
714ENTRY(device_not_available)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700715 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200716 pushl $-1 # mark this as an int
717 pushl $do_device_not_available
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500718 jmp common_exception
Jan Beulich47a55cd2007-02-13 13:26:24 +0100719END(device_not_available)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720
Rusty Russelld3561b72006-12-07 02:14:07 +0100721#ifdef CONFIG_PARAVIRT
722ENTRY(native_iret)
Ingo Molnar3701d8632008-02-09 23:24:08 +0100723 iret
H. Peter Anvin6837a542012-04-20 12:19:50 -0700724 _ASM_EXTABLE(native_iret, iret_exc)
Jan Beulich47a55cd2007-02-13 13:26:24 +0100725END(native_iret)
Rusty Russelld3561b72006-12-07 02:14:07 +0100726#endif
727
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728ENTRY(overflow)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700729 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200730 pushl $0
731 pushl $do_overflow
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500732 jmp common_exception
Jan Beulich47a55cd2007-02-13 13:26:24 +0100733END(overflow)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
735ENTRY(bounds)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700736 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200737 pushl $0
738 pushl $do_bounds
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500739 jmp common_exception
Jan Beulich47a55cd2007-02-13 13:26:24 +0100740END(bounds)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741
742ENTRY(invalid_op)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700743 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200744 pushl $0
745 pushl $do_invalid_op
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500746 jmp common_exception
Jan Beulich47a55cd2007-02-13 13:26:24 +0100747END(invalid_op)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748
749ENTRY(coprocessor_segment_overrun)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700750 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200751 pushl $0
752 pushl $do_coprocessor_segment_overrun
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500753 jmp common_exception
Jan Beulich47a55cd2007-02-13 13:26:24 +0100754END(coprocessor_segment_overrun)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
756ENTRY(invalid_TSS)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700757 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200758 pushl $do_invalid_TSS
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500759 jmp common_exception
Jan Beulich47a55cd2007-02-13 13:26:24 +0100760END(invalid_TSS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761
762ENTRY(segment_not_present)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700763 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200764 pushl $do_segment_not_present
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500765 jmp common_exception
Jan Beulich47a55cd2007-02-13 13:26:24 +0100766END(segment_not_present)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767
768ENTRY(stack_segment)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700769 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200770 pushl $do_stack_segment
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500771 jmp common_exception
Jan Beulich47a55cd2007-02-13 13:26:24 +0100772END(stack_segment)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774ENTRY(alignment_check)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700775 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200776 pushl $do_alignment_check
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500777 jmp common_exception
Jan Beulich47a55cd2007-02-13 13:26:24 +0100778END(alignment_check)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779
Prasanna S.Pd28c4392006-09-26 10:52:34 +0200780ENTRY(divide_error)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700781 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200782 pushl $0 # no error code
783 pushl $do_divide_error
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500784 jmp common_exception
Jan Beulich47a55cd2007-02-13 13:26:24 +0100785END(divide_error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786
787#ifdef CONFIG_X86_MCE
788ENTRY(machine_check)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700789 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200790 pushl $0
791 pushl machine_check_vector
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500792 jmp common_exception
Jan Beulich47a55cd2007-02-13 13:26:24 +0100793END(machine_check)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794#endif
795
796ENTRY(spurious_interrupt_bug)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700797 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200798 pushl $0
799 pushl $do_spurious_interrupt_bug
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500800 jmp common_exception
Jan Beulich47a55cd2007-02-13 13:26:24 +0100801END(spurious_interrupt_bug)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700803#ifdef CONFIG_XEN
804ENTRY(xen_hypervisor_callback)
Ingo Molnara49976d2015-06-08 09:49:11 +0200805 pushl $-1 /* orig_ax = -1 => not a system call */
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700806 SAVE_ALL
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500807 ENCODE_FRAME_POINTER
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700808 TRACE_IRQS_OFF
Jeremy Fitzhardinge9ec2b802007-07-17 18:37:07 -0700809
Ingo Molnara49976d2015-06-08 09:49:11 +0200810 /*
811 * Check to see if we got the event in the critical
812 * region in xen_iret_direct, after we've reenabled
813 * events and checked for pending events. This simulates
814 * iret instruction's behaviour where it delivers a
815 * pending interrupt when enabling interrupts:
816 */
817 movl PT_EIP(%esp), %eax
818 cmpl $xen_iret_start_crit, %eax
819 jb 1f
820 cmpl $xen_iret_end_crit, %eax
821 jae 1f
Jeremy Fitzhardinge9ec2b802007-07-17 18:37:07 -0700822
Ingo Molnara49976d2015-06-08 09:49:11 +0200823 jmp xen_iret_crit_fixup
Jeremy Fitzhardinge9ec2b802007-07-17 18:37:07 -0700824
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700825ENTRY(xen_do_upcall)
Ingo Molnara49976d2015-06-08 09:49:11 +02008261: mov %esp, %eax
827 call xen_evtchn_do_upcall
David Vrabelfdfd8112015-02-19 15:23:17 +0000828#ifndef CONFIG_PREEMPT
Ingo Molnara49976d2015-06-08 09:49:11 +0200829 call xen_maybe_preempt_hcall
David Vrabelfdfd8112015-02-19 15:23:17 +0000830#endif
Ingo Molnara49976d2015-06-08 09:49:11 +0200831 jmp ret_from_intr
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700832ENDPROC(xen_hypervisor_callback)
833
Ingo Molnara49976d2015-06-08 09:49:11 +0200834/*
835 * Hypervisor uses this for application faults while it executes.
836 * We get here for two reasons:
837 * 1. Fault while reloading DS, ES, FS or GS
838 * 2. Fault while executing IRET
839 * Category 1 we fix up by reattempting the load, and zeroing the segment
840 * register if the load fails.
841 * Category 2 we fix up by jumping to do_iret_error. We cannot use the
842 * normal Linux return path in this case because if we use the IRET hypercall
843 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
844 * We distinguish between categories by maintaining a status value in EAX.
845 */
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700846ENTRY(xen_failsafe_callback)
Ingo Molnara49976d2015-06-08 09:49:11 +0200847 pushl %eax
848 movl $1, %eax
8491: mov 4(%esp), %ds
8502: mov 8(%esp), %es
8513: mov 12(%esp), %fs
8524: mov 16(%esp), %gs
David Vrabela349e23d12012-10-19 17:29:07 +0100853 /* EAX == 0 => Category 1 (Bad segment)
854 EAX != 0 => Category 2 (Bad IRET) */
Ingo Molnara49976d2015-06-08 09:49:11 +0200855 testl %eax, %eax
856 popl %eax
857 lea 16(%esp), %esp
858 jz 5f
859 jmp iret_exc
8605: pushl $-1 /* orig_ax = -1 => not a system call */
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700861 SAVE_ALL
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500862 ENCODE_FRAME_POINTER
Ingo Molnara49976d2015-06-08 09:49:11 +0200863 jmp ret_from_exception
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700864
Ingo Molnara49976d2015-06-08 09:49:11 +0200865.section .fixup, "ax"
8666: xorl %eax, %eax
867 movl %eax, 4(%esp)
868 jmp 1b
8697: xorl %eax, %eax
870 movl %eax, 8(%esp)
871 jmp 2b
8728: xorl %eax, %eax
873 movl %eax, 12(%esp)
874 jmp 3b
8759: xorl %eax, %eax
876 movl %eax, 16(%esp)
877 jmp 4b
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700878.previous
Ingo Molnara49976d2015-06-08 09:49:11 +0200879 _ASM_EXTABLE(1b, 6b)
880 _ASM_EXTABLE(2b, 7b)
881 _ASM_EXTABLE(3b, 8b)
882 _ASM_EXTABLE(4b, 9b)
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700883ENDPROC(xen_failsafe_callback)
884
K. Y. Srinivasanbc2b0332013-02-03 17:22:39 -0800885BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
Sheng Yang38e20b02010-05-14 12:40:51 +0100886 xen_evtchn_do_upcall)
887
Ingo Molnara49976d2015-06-08 09:49:11 +0200888#endif /* CONFIG_XEN */
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700889
K. Y. Srinivasanbc2b0332013-02-03 17:22:39 -0800890#if IS_ENABLED(CONFIG_HYPERV)
891
892BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
893 hyperv_vector_handler)
894
895#endif /* CONFIG_HYPERV */
896
Steven Rostedt606576c2008-10-06 19:06:12 -0400897#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200898#ifdef CONFIG_DYNAMIC_FTRACE
899
900ENTRY(mcount)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200901 ret
902END(mcount)
903
904ENTRY(ftrace_caller)
Ingo Molnara49976d2015-06-08 09:49:11 +0200905 pushl %eax
906 pushl %ecx
907 pushl %edx
908 pushl $0 /* Pass NULL as regs pointer */
909 movl 4*4(%esp), %eax
910 movl 0x4(%ebp), %edx
911 movl function_trace_op, %ecx
912 subl $MCOUNT_INSN_SIZE, %eax
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200913
914.globl ftrace_call
915ftrace_call:
Ingo Molnara49976d2015-06-08 09:49:11 +0200916 call ftrace_stub
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200917
Ingo Molnara49976d2015-06-08 09:49:11 +0200918 addl $4, %esp /* skip NULL pointer */
919 popl %edx
920 popl %ecx
921 popl %eax
Josh Poimboeuf1b002552016-09-21 16:03:59 -0500922.Lftrace_ret:
Steven Rostedt5a45cfe2008-11-26 00:16:24 -0500923#ifdef CONFIG_FUNCTION_GRAPH_TRACER
924.globl ftrace_graph_call
925ftrace_graph_call:
Ingo Molnara49976d2015-06-08 09:49:11 +0200926 jmp ftrace_stub
Steven Rostedt5a45cfe2008-11-26 00:16:24 -0500927#endif
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200928
929.globl ftrace_stub
930ftrace_stub:
931 ret
932END(ftrace_caller)
933
Steven Rostedt4de72392012-06-05 20:00:11 -0400934ENTRY(ftrace_regs_caller)
935 pushf /* push flags before compare (in cs location) */
Steven Rostedt4de72392012-06-05 20:00:11 -0400936
937 /*
938 * i386 does not save SS and ESP when coming from kernel.
939 * Instead, to get sp, &regs->sp is used (see ptrace.h).
940 * Unfortunately, that means eflags must be at the same location
941 * as the current return ip is. We move the return ip into the
942 * ip location, and move flags into the return ip location.
943 */
Ingo Molnara49976d2015-06-08 09:49:11 +0200944 pushl 4(%esp) /* save return ip into ip slot */
Steven Rostedt4de72392012-06-05 20:00:11 -0400945
Ingo Molnara49976d2015-06-08 09:49:11 +0200946 pushl $0 /* Load 0 into orig_ax */
947 pushl %gs
948 pushl %fs
949 pushl %es
950 pushl %ds
951 pushl %eax
952 pushl %ebp
953 pushl %edi
954 pushl %esi
955 pushl %edx
956 pushl %ecx
957 pushl %ebx
Steven Rostedt4de72392012-06-05 20:00:11 -0400958
Ingo Molnara49976d2015-06-08 09:49:11 +0200959 movl 13*4(%esp), %eax /* Get the saved flags */
960 movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
961 /* clobbering return ip */
962 movl $__KERNEL_CS, 13*4(%esp)
Steven Rostedt4de72392012-06-05 20:00:11 -0400963
Ingo Molnara49976d2015-06-08 09:49:11 +0200964 movl 12*4(%esp), %eax /* Load ip (1st parameter) */
965 subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
966 movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
967 movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
968 pushl %esp /* Save pt_regs as 4th parameter */
Steven Rostedt4de72392012-06-05 20:00:11 -0400969
970GLOBAL(ftrace_regs_call)
Ingo Molnara49976d2015-06-08 09:49:11 +0200971 call ftrace_stub
Steven Rostedt4de72392012-06-05 20:00:11 -0400972
Ingo Molnara49976d2015-06-08 09:49:11 +0200973 addl $4, %esp /* Skip pt_regs */
974 movl 14*4(%esp), %eax /* Move flags back into cs */
975 movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
976 movl 12*4(%esp), %eax /* Get return ip from regs->ip */
977 movl %eax, 14*4(%esp) /* Put return ip back for ret */
Steven Rostedt4de72392012-06-05 20:00:11 -0400978
Ingo Molnara49976d2015-06-08 09:49:11 +0200979 popl %ebx
980 popl %ecx
981 popl %edx
982 popl %esi
983 popl %edi
984 popl %ebp
985 popl %eax
986 popl %ds
987 popl %es
988 popl %fs
989 popl %gs
990 addl $8, %esp /* Skip orig_ax and ip */
991 popf /* Pop flags at end (no addl to corrupt flags) */
Josh Poimboeuf1b002552016-09-21 16:03:59 -0500992 jmp .Lftrace_ret
Steven Rostedt4de72392012-06-05 20:00:11 -0400993
Steven Rostedt4de72392012-06-05 20:00:11 -0400994 popf
Ingo Molnara49976d2015-06-08 09:49:11 +0200995 jmp ftrace_stub
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200996#else /* ! CONFIG_DYNAMIC_FTRACE */
997
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200998ENTRY(mcount)
Ingo Molnara49976d2015-06-08 09:49:11 +0200999 cmpl $__PAGE_OFFSET, %esp
1000 jb ftrace_stub /* Paging not enabled yet? */
H. Peter Anvinaf058ab2013-08-30 17:29:29 -07001001
Ingo Molnara49976d2015-06-08 09:49:11 +02001002 cmpl $ftrace_stub, ftrace_trace_function
Josh Poimboeuf1b002552016-09-21 16:03:59 -05001003 jnz .Ltrace
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01001004#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Ingo Molnara49976d2015-06-08 09:49:11 +02001005 cmpl $ftrace_stub, ftrace_graph_return
1006 jnz ftrace_graph_caller
Steven Rostedte49dc192008-12-02 23:50:05 -05001007
Ingo Molnara49976d2015-06-08 09:49:11 +02001008 cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
1009 jnz ftrace_graph_caller
Frederic Weisbeckercaf4b322008-11-11 07:03:45 +01001010#endif
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001011.globl ftrace_stub
1012ftrace_stub:
1013 ret
1014
1015 /* taken from glibc */
Josh Poimboeuf1b002552016-09-21 16:03:59 -05001016.Ltrace:
Ingo Molnara49976d2015-06-08 09:49:11 +02001017 pushl %eax
1018 pushl %ecx
1019 pushl %edx
1020 movl 0xc(%esp), %eax
1021 movl 0x4(%ebp), %edx
1022 subl $MCOUNT_INSN_SIZE, %eax
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001023
Ingo Molnara49976d2015-06-08 09:49:11 +02001024 call *ftrace_trace_function
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001025
Ingo Molnara49976d2015-06-08 09:49:11 +02001026 popl %edx
1027 popl %ecx
1028 popl %eax
1029 jmp ftrace_stub
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001030END(mcount)
Steven Rostedtd61f82d2008-05-12 21:20:43 +02001031#endif /* CONFIG_DYNAMIC_FTRACE */
Al Viro784d5692016-01-11 11:04:34 -05001032EXPORT_SYMBOL(mcount)
Steven Rostedt606576c2008-10-06 19:06:12 -04001033#endif /* CONFIG_FUNCTION_TRACER */
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +02001034
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01001035#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1036ENTRY(ftrace_graph_caller)
Ingo Molnara49976d2015-06-08 09:49:11 +02001037 pushl %eax
1038 pushl %ecx
1039 pushl %edx
1040 movl 0xc(%esp), %eax
1041 lea 0x4(%ebp), %edx
1042 movl (%ebp), %ecx
1043 subl $MCOUNT_INSN_SIZE, %eax
1044 call prepare_ftrace_return
1045 popl %edx
1046 popl %ecx
1047 popl %eax
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01001048 ret
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01001049END(ftrace_graph_caller)
Frederic Weisbeckercaf4b322008-11-11 07:03:45 +01001050
1051.globl return_to_handler
1052return_to_handler:
Ingo Molnara49976d2015-06-08 09:49:11 +02001053 pushl %eax
1054 pushl %edx
1055 movl %ebp, %eax
1056 call ftrace_return_to_handler
1057 movl %eax, %ecx
1058 popl %edx
1059 popl %eax
1060 jmp *%ecx
Frederic Weisbeckere7d37372008-11-16 06:02:06 +01001061#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062
Seiji Aguchi25c74b12013-10-30 16:37:00 -04001063#ifdef CONFIG_TRACING
1064ENTRY(trace_page_fault)
Seiji Aguchi25c74b12013-10-30 16:37:00 -04001065 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +02001066 pushl $trace_do_page_fault
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -05001067 jmp common_exception
Seiji Aguchi25c74b12013-10-30 16:37:00 -04001068END(trace_page_fault)
1069#endif
1070
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001071ENTRY(page_fault)
H. Peter Anvine59d1b02012-09-21 13:58:10 -07001072 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +02001073 pushl $do_page_fault
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001074 ALIGN
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -05001075 jmp common_exception
1076END(page_fault)
1077
1078common_exception:
Tejun Heoccbeed32009-02-09 22:17:40 +09001079 /* the function address is in %gs's slot on the stack */
Ingo Molnara49976d2015-06-08 09:49:11 +02001080 pushl %fs
1081 pushl %es
1082 pushl %ds
1083 pushl %eax
1084 pushl %ebp
1085 pushl %edi
1086 pushl %esi
1087 pushl %edx
1088 pushl %ecx
1089 pushl %ebx
Josh Poimboeuf946c1912016-10-20 11:34:40 -05001090 ENCODE_FRAME_POINTER
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001091 cld
Ingo Molnara49976d2015-06-08 09:49:11 +02001092 movl $(__KERNEL_PERCPU), %ecx
1093 movl %ecx, %fs
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001094 UNWIND_ESPFIX_STACK
Tejun Heoccbeed32009-02-09 22:17:40 +09001095 GS_TO_REG %ecx
Ingo Molnara49976d2015-06-08 09:49:11 +02001096 movl PT_GS(%esp), %edi # get the function address
1097 movl PT_ORIG_EAX(%esp), %edx # get the error code
1098 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
Tejun Heoccbeed32009-02-09 22:17:40 +09001099 REG_TO_PTGS %ecx
1100 SET_KERNEL_GS %ecx
Ingo Molnara49976d2015-06-08 09:49:11 +02001101 movl $(__USER_DS), %ecx
1102 movl %ecx, %ds
1103 movl %ecx, %es
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001104 TRACE_IRQS_OFF
Ingo Molnara49976d2015-06-08 09:49:11 +02001105 movl %esp, %eax # pt_regs pointer
1106 call *%edi
1107 jmp ret_from_exception
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -05001108END(common_exception)
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001109
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001110ENTRY(debug)
Andy Lutomirski75366562016-03-09 19:00:32 -08001111 /*
1112 * #DB can happen at the first instruction of
1113 * entry_SYSENTER_32 or in Xen's SYSENTER prologue. If this
1114 * happens, then we will be running on a very small stack. We
1115 * need to detect this condition and switch to the thread
1116 * stack before calling any C code at all.
1117 *
1118 * If you edit this code, keep in mind that NMIs can happen in here.
1119 */
H. Peter Anvine59d1b02012-09-21 13:58:10 -07001120 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +02001121 pushl $-1 # mark this as an int
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001122 SAVE_ALL
Josh Poimboeuf946c1912016-10-20 11:34:40 -05001123 ENCODE_FRAME_POINTER
Ingo Molnara49976d2015-06-08 09:49:11 +02001124 xorl %edx, %edx # error code 0
1125 movl %esp, %eax # pt_regs pointer
Andy Lutomirski75366562016-03-09 19:00:32 -08001126
1127 /* Are we currently on the SYSENTER stack? */
1128 PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
1129 subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */
1130 cmpl $SIZEOF_SYSENTER_stack, %ecx
1131 jb .Ldebug_from_sysenter_stack
1132
1133 TRACE_IRQS_OFF
Ingo Molnara49976d2015-06-08 09:49:11 +02001134 call do_debug
1135 jmp ret_from_exception
Andy Lutomirski75366562016-03-09 19:00:32 -08001136
1137.Ldebug_from_sysenter_stack:
1138 /* We're on the SYSENTER stack. Switch off. */
Josh Poimboeuf946c1912016-10-20 11:34:40 -05001139 movl %esp, %ebx
Andy Lutomirski75366562016-03-09 19:00:32 -08001140 movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
1141 TRACE_IRQS_OFF
1142 call do_debug
Josh Poimboeuf946c1912016-10-20 11:34:40 -05001143 movl %ebx, %esp
Andy Lutomirski75366562016-03-09 19:00:32 -08001144 jmp ret_from_exception
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001145END(debug)
1146
1147/*
Andy Lutomirski75366562016-03-09 19:00:32 -08001148 * NMI is doubly nasty. It can happen on the first instruction of
1149 * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning
1150 * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32
1151 * switched stacks. We handle both conditions by simply checking whether we
1152 * interrupted kernel code running on the SYSENTER stack.
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001153 */
1154ENTRY(nmi)
H. Peter Anvine59d1b02012-09-21 13:58:10 -07001155 ASM_CLAC
H. Peter Anvin34273f42014-05-04 10:36:22 -07001156#ifdef CONFIG_X86_ESPFIX32
Ingo Molnara49976d2015-06-08 09:49:11 +02001157 pushl %eax
1158 movl %ss, %eax
1159 cmpw $__ESPFIX_SS, %ax
1160 popl %eax
Josh Poimboeuf1b002552016-09-21 16:03:59 -05001161 je .Lnmi_espfix_stack
H. Peter Anvin34273f42014-05-04 10:36:22 -07001162#endif
Andy Lutomirski75366562016-03-09 19:00:32 -08001163
1164 pushl %eax # pt_regs->orig_ax
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001165 SAVE_ALL
Josh Poimboeuf946c1912016-10-20 11:34:40 -05001166 ENCODE_FRAME_POINTER
Ingo Molnara49976d2015-06-08 09:49:11 +02001167 xorl %edx, %edx # zero error code
1168 movl %esp, %eax # pt_regs pointer
Andy Lutomirski75366562016-03-09 19:00:32 -08001169
1170 /* Are we currently on the SYSENTER stack? */
1171 PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
1172 subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */
1173 cmpl $SIZEOF_SYSENTER_stack, %ecx
1174 jb .Lnmi_from_sysenter_stack
1175
1176 /* Not on SYSENTER stack. */
Ingo Molnara49976d2015-06-08 09:49:11 +02001177 call do_nmi
Josh Poimboeuf1b002552016-09-21 16:03:59 -05001178 jmp .Lrestore_all_notrace
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001179
Andy Lutomirski75366562016-03-09 19:00:32 -08001180.Lnmi_from_sysenter_stack:
1181 /*
1182 * We're on the SYSENTER stack. Switch off. No one (not even debug)
1183 * is using the thread stack right now, so it's safe for us to use it.
1184 */
Josh Poimboeuf946c1912016-10-20 11:34:40 -05001185 movl %esp, %ebx
Andy Lutomirski75366562016-03-09 19:00:32 -08001186 movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
1187 call do_nmi
Josh Poimboeuf946c1912016-10-20 11:34:40 -05001188 movl %ebx, %esp
Josh Poimboeuf1b002552016-09-21 16:03:59 -05001189 jmp .Lrestore_all_notrace
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001190
H. Peter Anvin34273f42014-05-04 10:36:22 -07001191#ifdef CONFIG_X86_ESPFIX32
Josh Poimboeuf1b002552016-09-21 16:03:59 -05001192.Lnmi_espfix_stack:
Ingo Molnar131484c2015-05-28 12:21:47 +02001193 /*
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001194 * create the pointer to lss back
1195 */
Ingo Molnara49976d2015-06-08 09:49:11 +02001196 pushl %ss
1197 pushl %esp
1198 addl $4, (%esp)
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001199 /* copy the iret frame of 12 bytes */
1200 .rept 3
Ingo Molnara49976d2015-06-08 09:49:11 +02001201 pushl 16(%esp)
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001202 .endr
Ingo Molnara49976d2015-06-08 09:49:11 +02001203 pushl %eax
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001204 SAVE_ALL
Josh Poimboeuf946c1912016-10-20 11:34:40 -05001205 ENCODE_FRAME_POINTER
Ingo Molnara49976d2015-06-08 09:49:11 +02001206 FIXUP_ESPFIX_STACK # %eax == %esp
1207 xorl %edx, %edx # zero error code
1208 call do_nmi
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001209 RESTORE_REGS
Ingo Molnara49976d2015-06-08 09:49:11 +02001210 lss 12+4(%esp), %esp # back to espfix stack
Josh Poimboeuf1b002552016-09-21 16:03:59 -05001211 jmp .Lirq_return
H. Peter Anvin34273f42014-05-04 10:36:22 -07001212#endif
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001213END(nmi)
1214
1215ENTRY(int3)
H. Peter Anvine59d1b02012-09-21 13:58:10 -07001216 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +02001217 pushl $-1 # mark this as an int
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001218 SAVE_ALL
Josh Poimboeuf946c1912016-10-20 11:34:40 -05001219 ENCODE_FRAME_POINTER
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001220 TRACE_IRQS_OFF
Ingo Molnara49976d2015-06-08 09:49:11 +02001221 xorl %edx, %edx # zero error code
1222 movl %esp, %eax # pt_regs pointer
1223 call do_int3
1224 jmp ret_from_exception
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001225END(int3)
1226
1227ENTRY(general_protection)
Ingo Molnara49976d2015-06-08 09:49:11 +02001228 pushl $do_general_protection
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -05001229 jmp common_exception
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001230END(general_protection)
1231
Gleb Natapov631bc482010-10-14 11:22:52 +02001232#ifdef CONFIG_KVM_GUEST
1233ENTRY(async_page_fault)
H. Peter Anvine59d1b02012-09-21 13:58:10 -07001234 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +02001235 pushl $do_async_page_fault
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -05001236 jmp common_exception
Sedat Dilek2ae9d292011-03-08 22:39:24 +01001237END(async_page_fault)
Gleb Natapov631bc482010-10-14 11:22:52 +02001238#endif
Andy Lutomirski2deb4be2016-07-14 13:22:55 -07001239
1240ENTRY(rewind_stack_do_exit)
1241 /* Prevent any naive code from trying to unwind to our caller. */
1242 xorl %ebp, %ebp
1243
1244 movl PER_CPU_VAR(cpu_current_top_of_stack), %esi
1245 leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
1246
1247 call do_exit
12481: jmp 1b
1249END(rewind_stack_do_exit)