blob: 286efa3420912a6124d657df3378aaeffcc569f0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Ingo Molnara49976d2015-06-08 09:49:11 +02002 * Copyright (C) 1991,1992 Linus Torvalds
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
Ingo Molnara49976d2015-06-08 09:49:11 +02004 * entry_32.S contains the system-call and low-level fault and trap handling routines.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
Andy Lutomirski39e87012015-10-05 17:48:13 -07006 * Stack layout while running C code:
Ingo Molnara49976d2015-06-08 09:49:11 +02007 * ptrace needs to have all registers on the stack.
8 * If the order here is changed, it needs to be
9 * updated in fork.c:copy_process(), signal.c:do_signal(),
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 * ptrace.c and ptrace.h
11 *
12 * 0(%esp) - %ebx
13 * 4(%esp) - %ecx
14 * 8(%esp) - %edx
Denys Vlasenko9b47feb2015-06-08 22:35:33 +020015 * C(%esp) - %esi
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 * 10(%esp) - %edi
17 * 14(%esp) - %ebp
18 * 18(%esp) - %eax
19 * 1C(%esp) - %ds
20 * 20(%esp) - %es
Jeremy Fitzhardinge464d1a72007-02-13 13:26:20 +010021 * 24(%esp) - %fs
Tejun Heoccbeed32009-02-09 22:17:40 +090022 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
23 * 2C(%esp) - orig_eax
24 * 30(%esp) - %eip
25 * 34(%esp) - %cs
26 * 38(%esp) - %eflags
27 * 3C(%esp) - %oldesp
28 * 40(%esp) - %oldss
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 */
30
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/linkage.h>
Eric Parisd7e75282012-01-03 14:23:06 -050032#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <asm/thread_info.h>
Ingo Molnar55f327f2006-07-03 00:24:43 -070034#include <asm/irqflags.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/errno.h>
36#include <asm/segment.h>
37#include <asm/smp.h>
Jeremy Fitzhardinge0341c142009-02-13 11:14:01 -080038#include <asm/page_types.h>
Stas Sergeevbe44d2a2006-12-07 02:14:01 +010039#include <asm/percpu.h>
Cyrill Gorcunovab68ed92008-03-25 22:16:32 +030040#include <asm/processor-flags.h>
Abhishek Sagar395a59d2008-06-21 23:47:27 +053041#include <asm/ftrace.h>
Thomas Gleixner9b7dc562008-05-02 20:10:09 +020042#include <asm/irq_vectors.h>
Borislav Petkovcd4d09e2016-01-26 22:12:04 +010043#include <asm/cpufeatures.h>
Andy Lutomirskib4ca46e2011-08-25 16:10:33 -040044#include <asm/alternative-asm.h>
H. Peter Anvin6837a542012-04-20 12:19:50 -070045#include <asm/asm.h>
H. Peter Anvine59d1b02012-09-21 13:58:10 -070046#include <asm/smap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Jiri Olsaea714542011-03-07 19:10:39 +010048 .section .entry.text, "ax"
49
Rusty Russell139ec7c2006-12-07 02:14:08 +010050/*
51 * We use macros for low-level operations which need to be overridden
52 * for paravirtualization. The following will never clobber any registers:
53 * INTERRUPT_RETURN (aka. "iret")
54 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
Jeremy Fitzhardinged75cd222008-06-25 00:19:26 -040055 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
Rusty Russell139ec7c2006-12-07 02:14:08 +010056 *
57 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
58 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
59 * Allowing a register to be clobbered can shrink the paravirt replacement
60 * enough to patch inline, increasing performance.
61 */
62
Linus Torvalds1da177e2005-04-16 15:20:36 -070063#ifdef CONFIG_PREEMPT
Ingo Molnara49976d2015-06-08 09:49:11 +020064# define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
Linus Torvalds1da177e2005-04-16 15:20:36 -070065#else
Ingo Molnara49976d2015-06-08 09:49:11 +020066# define preempt_stop(clobbers)
67# define resume_kernel restore_all
Linus Torvalds1da177e2005-04-16 15:20:36 -070068#endif
69
Ingo Molnar55f327f2006-07-03 00:24:43 -070070.macro TRACE_IRQS_IRET
71#ifdef CONFIG_TRACE_IRQFLAGS
Ingo Molnara49976d2015-06-08 09:49:11 +020072 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off?
73 jz 1f
Ingo Molnar55f327f2006-07-03 00:24:43 -070074 TRACE_IRQS_ON
751:
76#endif
77.endm
78
Tejun Heoccbeed32009-02-09 22:17:40 +090079/*
80 * User gs save/restore
81 *
82 * %gs is used for userland TLS and kernel only uses it for stack
83 * canary which is required to be at %gs:20 by gcc. Read the comment
84 * at the top of stackprotector.h for more info.
85 *
86 * Local labels 98 and 99 are used.
87 */
88#ifdef CONFIG_X86_32_LAZY_GS
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
Tejun Heoccbeed32009-02-09 22:17:40 +090090 /* unfortunately push/pop can't be no-op */
91.macro PUSH_GS
Ingo Molnara49976d2015-06-08 09:49:11 +020092 pushl $0
Tejun Heoccbeed32009-02-09 22:17:40 +090093.endm
94.macro POP_GS pop=0
Ingo Molnara49976d2015-06-08 09:49:11 +020095 addl $(4 + \pop), %esp
Tejun Heoccbeed32009-02-09 22:17:40 +090096.endm
97.macro POP_GS_EX
98.endm
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
Tejun Heoccbeed32009-02-09 22:17:40 +0900100 /* all the rest are no-op */
101.macro PTGS_TO_GS
102.endm
103.macro PTGS_TO_GS_EX
104.endm
105.macro GS_TO_REG reg
106.endm
107.macro REG_TO_PTGS reg
108.endm
109.macro SET_KERNEL_GS reg
110.endm
111
112#else /* CONFIG_X86_32_LAZY_GS */
113
114.macro PUSH_GS
Ingo Molnara49976d2015-06-08 09:49:11 +0200115 pushl %gs
Tejun Heoccbeed32009-02-09 22:17:40 +0900116.endm
117
118.macro POP_GS pop=0
Ingo Molnara49976d2015-06-08 09:49:11 +020011998: popl %gs
Tejun Heoccbeed32009-02-09 22:17:40 +0900120 .if \pop <> 0
Denys Vlasenko9b47feb2015-06-08 22:35:33 +0200121 add $\pop, %esp
Tejun Heoccbeed32009-02-09 22:17:40 +0900122 .endif
123.endm
124.macro POP_GS_EX
125.pushsection .fixup, "ax"
Ingo Molnara49976d2015-06-08 09:49:11 +020012699: movl $0, (%esp)
127 jmp 98b
Jeremy Fitzhardingef95d47c2006-12-07 02:14:02 +0100128.popsection
Ingo Molnara49976d2015-06-08 09:49:11 +0200129 _ASM_EXTABLE(98b, 99b)
Tejun Heoccbeed32009-02-09 22:17:40 +0900130.endm
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131
Tejun Heoccbeed32009-02-09 22:17:40 +0900132.macro PTGS_TO_GS
Ingo Molnara49976d2015-06-08 09:49:11 +020013398: mov PT_GS(%esp), %gs
Tejun Heoccbeed32009-02-09 22:17:40 +0900134.endm
135.macro PTGS_TO_GS_EX
136.pushsection .fixup, "ax"
Ingo Molnara49976d2015-06-08 09:49:11 +020013799: movl $0, PT_GS(%esp)
138 jmp 98b
Tejun Heoccbeed32009-02-09 22:17:40 +0900139.popsection
Ingo Molnara49976d2015-06-08 09:49:11 +0200140 _ASM_EXTABLE(98b, 99b)
Tejun Heoccbeed32009-02-09 22:17:40 +0900141.endm
142
143.macro GS_TO_REG reg
Ingo Molnara49976d2015-06-08 09:49:11 +0200144 movl %gs, \reg
Tejun Heoccbeed32009-02-09 22:17:40 +0900145.endm
146.macro REG_TO_PTGS reg
Ingo Molnara49976d2015-06-08 09:49:11 +0200147 movl \reg, PT_GS(%esp)
Tejun Heoccbeed32009-02-09 22:17:40 +0900148.endm
149.macro SET_KERNEL_GS reg
Ingo Molnara49976d2015-06-08 09:49:11 +0200150 movl $(__KERNEL_STACK_CANARY), \reg
151 movl \reg, %gs
Tejun Heoccbeed32009-02-09 22:17:40 +0900152.endm
153
Ingo Molnara49976d2015-06-08 09:49:11 +0200154#endif /* CONFIG_X86_32_LAZY_GS */
Tejun Heoccbeed32009-02-09 22:17:40 +0900155
Andy Lutomirski150ac782015-10-05 17:48:14 -0700156.macro SAVE_ALL pt_regs_ax=%eax
Tejun Heof0d96112009-02-09 22:17:40 +0900157 cld
Tejun Heoccbeed32009-02-09 22:17:40 +0900158 PUSH_GS
Ingo Molnara49976d2015-06-08 09:49:11 +0200159 pushl %fs
160 pushl %es
161 pushl %ds
Andy Lutomirski150ac782015-10-05 17:48:14 -0700162 pushl \pt_regs_ax
Ingo Molnara49976d2015-06-08 09:49:11 +0200163 pushl %ebp
164 pushl %edi
165 pushl %esi
166 pushl %edx
167 pushl %ecx
168 pushl %ebx
169 movl $(__USER_DS), %edx
170 movl %edx, %ds
171 movl %edx, %es
172 movl $(__KERNEL_PERCPU), %edx
173 movl %edx, %fs
Tejun Heoccbeed32009-02-09 22:17:40 +0900174 SET_KERNEL_GS %edx
Tejun Heof0d96112009-02-09 22:17:40 +0900175.endm
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176
Tejun Heof0d96112009-02-09 22:17:40 +0900177.macro RESTORE_INT_REGS
Ingo Molnara49976d2015-06-08 09:49:11 +0200178 popl %ebx
179 popl %ecx
180 popl %edx
181 popl %esi
182 popl %edi
183 popl %ebp
184 popl %eax
Tejun Heof0d96112009-02-09 22:17:40 +0900185.endm
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
Tejun Heoccbeed32009-02-09 22:17:40 +0900187.macro RESTORE_REGS pop=0
Tejun Heof0d96112009-02-09 22:17:40 +0900188 RESTORE_INT_REGS
Ingo Molnara49976d2015-06-08 09:49:11 +02001891: popl %ds
1902: popl %es
1913: popl %fs
Tejun Heoccbeed32009-02-09 22:17:40 +0900192 POP_GS \pop
Tejun Heof0d96112009-02-09 22:17:40 +0900193.pushsection .fixup, "ax"
Ingo Molnara49976d2015-06-08 09:49:11 +02001944: movl $0, (%esp)
195 jmp 1b
1965: movl $0, (%esp)
197 jmp 2b
1986: movl $0, (%esp)
199 jmp 3b
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200.popsection
Ingo Molnara49976d2015-06-08 09:49:11 +0200201 _ASM_EXTABLE(1b, 4b)
202 _ASM_EXTABLE(2b, 5b)
203 _ASM_EXTABLE(3b, 6b)
Tejun Heoccbeed32009-02-09 22:17:40 +0900204 POP_GS_EX
Tejun Heof0d96112009-02-09 22:17:40 +0900205.endm
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207ENTRY(ret_from_fork)
Ingo Molnara49976d2015-06-08 09:49:11 +0200208 pushl %eax
209 call schedule_tail
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 GET_THREAD_INFO(%ebp)
Ingo Molnara49976d2015-06-08 09:49:11 +0200211 popl %eax
212 pushl $0x0202 # Reset kernel eflags
Ingo Molnar131484c2015-05-28 12:21:47 +0200213 popfl
Andy Lutomirski39e87012015-10-05 17:48:13 -0700214
215 /* When we fork, we trace the syscall return in the child, too. */
216 movl %esp, %eax
217 call syscall_return_slowpath
218 jmp restore_all
Jan Beulich47a55cd2007-02-13 13:26:24 +0100219END(ret_from_fork)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
Al Viro22e2430d2012-10-10 21:35:42 -0400221ENTRY(ret_from_kernel_thread)
Ingo Molnara49976d2015-06-08 09:49:11 +0200222 pushl %eax
223 call schedule_tail
Al Viro6783eaa22012-08-02 23:05:11 +0400224 GET_THREAD_INFO(%ebp)
Ingo Molnara49976d2015-06-08 09:49:11 +0200225 popl %eax
226 pushl $0x0202 # Reset kernel eflags
Ingo Molnar131484c2015-05-28 12:21:47 +0200227 popfl
Ingo Molnara49976d2015-06-08 09:49:11 +0200228 movl PT_EBP(%esp), %eax
229 call *PT_EBX(%esp)
230 movl $0, PT_EAX(%esp)
Andy Lutomirski39e87012015-10-05 17:48:13 -0700231
232 /*
233 * Kernel threads return to userspace as if returning from a syscall.
234 * We should check whether anything actually uses this path and, if so,
235 * consider switching it over to ret_from_fork.
236 */
237 movl %esp, %eax
238 call syscall_return_slowpath
239 jmp restore_all
Al Viro22e2430d2012-10-10 21:35:42 -0400240ENDPROC(ret_from_kernel_thread)
Al Viro6783eaa22012-08-02 23:05:11 +0400241
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242/*
243 * Return to user mode is not as complex as all this looks,
244 * but we want the default path for a system call return to
245 * go as quickly as possible which is why some of this is
246 * less clear than it otherwise should be.
247 */
248
249 # userspace resumption stub bypassing syscall exit tracing
250 ALIGN
251ret_from_exception:
Rusty Russell139ec7c2006-12-07 02:14:08 +0100252 preempt_stop(CLBR_ANY)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253ret_from_intr:
254 GET_THREAD_INFO(%ebp)
Dmitry Adamushko29a2e282012-03-22 21:39:25 +0100255#ifdef CONFIG_VM86
Ingo Molnara49976d2015-06-08 09:49:11 +0200256 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
257 movb PT_CS(%esp), %al
258 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
Dmitry Adamushko29a2e282012-03-22 21:39:25 +0100259#else
260 /*
Al Viro6783eaa22012-08-02 23:05:11 +0400261 * We can be coming here from child spawned by kernel_thread().
Dmitry Adamushko29a2e282012-03-22 21:39:25 +0100262 */
Ingo Molnara49976d2015-06-08 09:49:11 +0200263 movl PT_CS(%esp), %eax
264 andl $SEGMENT_RPL_MASK, %eax
Dmitry Adamushko29a2e282012-03-22 21:39:25 +0100265#endif
Ingo Molnara49976d2015-06-08 09:49:11 +0200266 cmpl $USER_RPL, %eax
267 jb resume_kernel # not returning to v8086 or userspace
Jeremy Fitzhardingef95d47c2006-12-07 02:14:02 +0100268
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269ENTRY(resume_userspace)
Andy Lutomirski5d73fc72015-07-31 14:41:09 -0700270 DISABLE_INTERRUPTS(CLBR_ANY)
Peter Zijlstrae32e58a2008-06-06 10:14:08 +0200271 TRACE_IRQS_OFF
Andy Lutomirski5d73fc72015-07-31 14:41:09 -0700272 movl %esp, %eax
273 call prepare_exit_to_usermode
Ingo Molnara49976d2015-06-08 09:49:11 +0200274 jmp restore_all
Jan Beulich47a55cd2007-02-13 13:26:24 +0100275END(ret_from_exception)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
277#ifdef CONFIG_PREEMPT
278ENTRY(resume_kernel)
Rusty Russell139ec7c2006-12-07 02:14:08 +0100279 DISABLE_INTERRUPTS(CLBR_ANY)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280need_resched:
Ingo Molnara49976d2015-06-08 09:49:11 +0200281 cmpl $0, PER_CPU_VAR(__preempt_count)
282 jnz restore_all
283 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
284 jz restore_all
285 call preempt_schedule_irq
286 jmp need_resched
Jan Beulich47a55cd2007-02-13 13:26:24 +0100287END(resume_kernel)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288#endif
289
Andy Lutomirskif2b37572016-03-09 19:00:30 -0800290GLOBAL(__begin_SYSENTER_singlestep_region)
291/*
292 * All code from here through __end_SYSENTER_singlestep_region is subject
293 * to being single-stepped if a user program sets TF and executes SYSENTER.
294 * There is absolutely nothing that we can do to prevent this from happening
295 * (thanks Intel!). To keep our handling of this situation as simple as
296 * possible, we handle TF just like AC and NT, except that our #DB handler
297 * will ignore all of the single-step traps generated in this range.
298 */
299
300#ifdef CONFIG_XEN
301/*
302 * Xen doesn't set %esp to be precisely what the normal SYSENTER
303 * entry point expects, so fix it up before using the normal path.
304 */
305ENTRY(xen_sysenter_target)
306 addl $5*4, %esp /* remove xen-provided frame */
307 jmp sysenter_past_esp
308#endif
309
Andy Lutomirskifda57b22016-03-09 19:00:35 -0800310/*
311 * 32-bit SYSENTER entry.
312 *
313 * 32-bit system calls through the vDSO's __kernel_vsyscall enter here
314 * if X86_FEATURE_SEP is available. This is the preferred system call
315 * entry on 32-bit systems.
316 *
317 * The SYSENTER instruction, in principle, should *only* occur in the
318 * vDSO. In practice, a small number of Android devices were shipped
319 * with a copy of Bionic that inlined a SYSENTER instruction. This
320 * never happened in any of Google's Bionic versions -- it only happened
321 * in a narrow range of Intel-provided versions.
322 *
323 * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs.
324 * IF and VM in RFLAGS are cleared (IOW: interrupts are off).
325 * SYSENTER does not save anything on the stack,
326 * and does not save old EIP (!!!), ESP, or EFLAGS.
327 *
328 * To avoid losing track of EFLAGS.VM (and thus potentially corrupting
329 * user and/or vm86 state), we explicitly disable the SYSENTER
330 * instruction in vm86 mode by reprogramming the MSRs.
331 *
332 * Arguments:
333 * eax system call number
334 * ebx arg1
335 * ecx arg2
336 * edx arg3
337 * esi arg4
338 * edi arg5
339 * ebp user stack
340 * 0(%ebp) arg6
341 */
Ingo Molnar4c8cd0c2015-06-08 08:33:56 +0200342ENTRY(entry_SYSENTER_32)
Ingo Molnara49976d2015-06-08 09:49:11 +0200343 movl TSS_sysenter_sp0(%esp), %esp
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344sysenter_past_esp:
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700345 pushl $__USER_DS /* pt_regs->ss */
Andy Lutomirski30bfa7b2015-12-16 23:18:48 -0800346 pushl %ebp /* pt_regs->sp (stashed in bp) */
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700347 pushfl /* pt_regs->flags (except IF = 0) */
348 orl $X86_EFLAGS_IF, (%esp) /* Fix IF */
349 pushl $__USER_CS /* pt_regs->cs */
350 pushl $0 /* pt_regs->ip = 0 (placeholder) */
351 pushl %eax /* pt_regs->orig_ax */
352 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
353
Ingo Molnar55f327f2006-07-03 00:24:43 -0700354 /*
Andy Lutomirskif2b37572016-03-09 19:00:30 -0800355 * SYSENTER doesn't filter flags, so we need to clear NT, AC
356 * and TF ourselves. To save a few cycles, we can check whether
Andy Lutomirski67f590e2016-03-09 19:00:26 -0800357 * either was set instead of doing an unconditional popfq.
358 * This needs to happen before enabling interrupts so that
359 * we don't get preempted with NT set.
360 *
Andy Lutomirskif2b37572016-03-09 19:00:30 -0800361 * If TF is set, we will single-step all the way to here -- do_debug
362 * will ignore all the traps. (Yes, this is slow, but so is
363 * single-stepping in general. This allows us to avoid having
364 * a more complicated code to handle the case where a user program
365 * forces us to single-step through the SYSENTER entry code.)
366 *
Andy Lutomirski67f590e2016-03-09 19:00:26 -0800367 * NB.: .Lsysenter_fix_flags is a label with the code under it moved
368 * out-of-line as an optimization: NT is unlikely to be set in the
369 * majority of the cases and instead of polluting the I$ unnecessarily,
370 * we're keeping that code behind a branch which will predict as
371 * not-taken and therefore its instructions won't be fetched.
372 */
Andy Lutomirskif2b37572016-03-09 19:00:30 -0800373 testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp)
Andy Lutomirski67f590e2016-03-09 19:00:26 -0800374 jnz .Lsysenter_fix_flags
375.Lsysenter_flags_fixed:
376
377 /*
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700378 * User mode is traced as though IRQs are on, and SYSENTER
379 * turned them off.
Ingo Molnar55f327f2006-07-03 00:24:43 -0700380 */
Ingo Molnar55f327f2006-07-03 00:24:43 -0700381 TRACE_IRQS_OFF
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700382
383 movl %esp, %eax
384 call do_fast_syscall_32
Boris Ostrovsky91e2eea2015-11-19 16:55:45 -0500385 /* XEN PV guests always use IRET path */
386 ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
387 "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700388
389/* Opportunistic SYSEXIT */
390 TRACE_IRQS_ON /* User mode traces as IRQs on. */
391 movl PT_EIP(%esp), %edx /* pt_regs->ip */
392 movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */
Andy Lutomirski3bd29512015-10-16 15:42:55 -07003931: mov PT_FS(%esp), %fs
394 PTGS_TO_GS
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700395 popl %ebx /* pt_regs->bx */
396 addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */
397 popl %esi /* pt_regs->si */
398 popl %edi /* pt_regs->di */
399 popl %ebp /* pt_regs->bp */
400 popl %eax /* pt_regs->ax */
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700401
402 /*
Andy Lutomirskic2c9b522016-03-09 19:00:27 -0800403 * Restore all flags except IF. (We restore IF separately because
404 * STI gives a one-instruction window in which we won't be interrupted,
405 * whereas POPF does not.)
406 */
407 addl $PT_EFLAGS-PT_DS, %esp /* point esp at pt_regs->flags */
408 btr $X86_EFLAGS_IF_BIT, (%esp)
409 popfl
410
411 /*
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700412 * Return back to the vDSO, which will pop ecx and edx.
413 * Don't bother with DS and ES (they already contain __USER_DS).
414 */
Boris Ostrovsky88c15ec2015-11-19 16:55:46 -0500415 sti
416 sysexit
Roland McGrathaf0575b2008-06-24 04:16:52 -0700417
Ingo Molnara49976d2015-06-08 09:49:11 +0200418.pushsection .fixup, "ax"
4192: movl $0, PT_FS(%esp)
420 jmp 1b
Jeremy Fitzhardingef95d47c2006-12-07 02:14:02 +0100421.popsection
Ingo Molnara49976d2015-06-08 09:49:11 +0200422 _ASM_EXTABLE(1b, 2b)
Tejun Heoccbeed32009-02-09 22:17:40 +0900423 PTGS_TO_GS_EX
Andy Lutomirski67f590e2016-03-09 19:00:26 -0800424
425.Lsysenter_fix_flags:
426 pushl $X86_EFLAGS_FIXED
427 popfl
428 jmp .Lsysenter_flags_fixed
Andy Lutomirskif2b37572016-03-09 19:00:30 -0800429GLOBAL(__end_SYSENTER_singlestep_region)
Ingo Molnar4c8cd0c2015-06-08 08:33:56 +0200430ENDPROC(entry_SYSENTER_32)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431
Andy Lutomirskifda57b22016-03-09 19:00:35 -0800432/*
433 * 32-bit legacy system call entry.
434 *
435 * 32-bit x86 Linux system calls traditionally used the INT $0x80
436 * instruction. INT $0x80 lands here.
437 *
438 * This entry point can be used by any 32-bit perform system calls.
439 * Instances of INT $0x80 can be found inline in various programs and
440 * libraries. It is also used by the vDSO's __kernel_vsyscall
441 * fallback for hardware that doesn't support a faster entry method.
442 * Restarted 32-bit system calls also fall back to INT $0x80
443 * regardless of what instruction was originally used to do the system
444 * call. (64-bit programs can use INT $0x80 as well, but they can
445 * only run on 64-bit kernels and therefore land in
446 * entry_INT80_compat.)
447 *
448 * This is considered a slow path. It is not used by most libc
449 * implementations on modern hardware except during process startup.
450 *
451 * Arguments:
452 * eax system call number
453 * ebx arg1
454 * ecx arg2
455 * edx arg3
456 * esi arg4
457 * edi arg5
458 * ebp arg6
459 */
Ingo Molnarb2502b42015-06-08 08:42:03 +0200460ENTRY(entry_INT80_32)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700461 ASM_CLAC
Andy Lutomirski150ac782015-10-05 17:48:14 -0700462 pushl %eax /* pt_regs->orig_ax */
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700463 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
Andy Lutomirski150ac782015-10-05 17:48:14 -0700464
465 /*
Andy Lutomirski657c1ee2015-10-16 15:42:54 -0700466 * User mode is traced as though IRQs are on. Unlike the 64-bit
467 * case, INT80 is a trap gate on 32-bit kernels, so interrupts
468 * are already on (unless user code is messing around with iopl).
Andy Lutomirski150ac782015-10-05 17:48:14 -0700469 */
Andy Lutomirski150ac782015-10-05 17:48:14 -0700470
471 movl %esp, %eax
Andy Lutomirski657c1ee2015-10-16 15:42:54 -0700472 call do_syscall_32_irqs_on
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700473.Lsyscall_32_done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474
475restore_all:
Alexander van Heukelum2e04bc72009-06-18 00:35:57 +0200476 TRACE_IRQS_IRET
477restore_all_notrace:
H. Peter Anvin34273f42014-05-04 10:36:22 -0700478#ifdef CONFIG_X86_ESPFIX32
Andy Lutomirski58a5aac2016-02-29 15:50:19 -0800479 ALTERNATIVE "jmp restore_nocheck", "", X86_BUG_ESPFIX
480
Ingo Molnara49976d2015-06-08 09:49:11 +0200481 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
482 /*
483 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
484 * are returning to the kernel.
485 * See comments in process.c:copy_thread() for details.
486 */
487 movb PT_OLDSS(%esp), %ah
488 movb PT_CS(%esp), %al
489 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
490 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
491 je ldt_ss # returning to user-space with LDT SS
H. Peter Anvin34273f42014-05-04 10:36:22 -0700492#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493restore_nocheck:
Ingo Molnara49976d2015-06-08 09:49:11 +0200494 RESTORE_REGS 4 # skip orig_eax/error_code
Adrian Bunkf7f3d792008-02-13 23:29:53 +0200495irq_return:
Ingo Molnar3701d8632008-02-09 23:24:08 +0100496 INTERRUPT_RETURN
Ingo Molnara49976d2015-06-08 09:49:11 +0200497.section .fixup, "ax"
498ENTRY(iret_exc )
499 pushl $0 # no error code
500 pushl $do_iret_error
501 jmp error_code
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502.previous
Ingo Molnara49976d2015-06-08 09:49:11 +0200503 _ASM_EXTABLE(irq_return, iret_exc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504
H. Peter Anvin34273f42014-05-04 10:36:22 -0700505#ifdef CONFIG_X86_ESPFIX32
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506ldt_ss:
Alexander van Heukelumdc4c2a02009-06-18 00:35:58 +0200507/*
508 * Setup and switch to ESPFIX stack
509 *
510 * We're returning to userspace with a 16 bit stack. The CPU will not
511 * restore the high word of ESP for us on executing iret... This is an
512 * "official" bug of all the x86-compatible CPUs, which we can work
513 * around to make dosemu and wine happy. We do this by preloading the
514 * high word of ESP with the high word of the userspace ESP while
515 * compensating for the offset by changing to the ESPFIX segment with
516 * a base address that matches for the difference.
517 */
Brian Gerst72c511d2010-07-31 12:48:23 -0400518#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
Ingo Molnara49976d2015-06-08 09:49:11 +0200519 mov %esp, %edx /* load kernel esp */
520 mov PT_OLDESP(%esp), %eax /* load userspace esp */
521 mov %dx, %ax /* eax: new kernel esp */
Denys Vlasenko9b47feb2015-06-08 22:35:33 +0200522 sub %eax, %edx /* offset (low word is 0) */
523 shr $16, %edx
Ingo Molnara49976d2015-06-08 09:49:11 +0200524 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
525 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
526 pushl $__ESPFIX_SS
527 pushl %eax /* new kernel esp */
528 /*
529 * Disable interrupts, but do not irqtrace this section: we
Alexander van Heukelum2e04bc72009-06-18 00:35:57 +0200530 * will soon execute iret and the tracer was already set to
Ingo Molnara49976d2015-06-08 09:49:11 +0200531 * the irqstate after the IRET:
532 */
Rusty Russell139ec7c2006-12-07 02:14:08 +0100533 DISABLE_INTERRUPTS(CLBR_EAX)
Ingo Molnara49976d2015-06-08 09:49:11 +0200534 lss (%esp), %esp /* switch to espfix segment */
535 jmp restore_nocheck
H. Peter Anvin34273f42014-05-04 10:36:22 -0700536#endif
Ingo Molnarb2502b42015-06-08 08:42:03 +0200537ENDPROC(entry_INT80_32)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538
Tejun Heof0d96112009-02-09 22:17:40 +0900539.macro FIXUP_ESPFIX_STACK
Alexander van Heukelumdc4c2a02009-06-18 00:35:58 +0200540/*
541 * Switch back for ESPFIX stack to the normal zerobased stack
542 *
543 * We can't call C functions using the ESPFIX stack. This code reads
544 * the high word of the segment base from the GDT and swiches to the
545 * normal stack and adjusts ESP with the matching offset.
546 */
H. Peter Anvin34273f42014-05-04 10:36:22 -0700547#ifdef CONFIG_X86_ESPFIX32
Alexander van Heukelumdc4c2a02009-06-18 00:35:58 +0200548 /* fixup the stack */
Ingo Molnara49976d2015-06-08 09:49:11 +0200549 mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
550 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
Denys Vlasenko9b47feb2015-06-08 22:35:33 +0200551 shl $16, %eax
Ingo Molnara49976d2015-06-08 09:49:11 +0200552 addl %esp, %eax /* the adjusted stack pointer */
553 pushl $__KERNEL_DS
554 pushl %eax
555 lss (%esp), %esp /* switch to the normal stack segment */
H. Peter Anvin34273f42014-05-04 10:36:22 -0700556#endif
Tejun Heof0d96112009-02-09 22:17:40 +0900557.endm
558.macro UNWIND_ESPFIX_STACK
H. Peter Anvin34273f42014-05-04 10:36:22 -0700559#ifdef CONFIG_X86_ESPFIX32
Ingo Molnara49976d2015-06-08 09:49:11 +0200560 movl %ss, %eax
Tejun Heof0d96112009-02-09 22:17:40 +0900561 /* see if on espfix stack */
Ingo Molnara49976d2015-06-08 09:49:11 +0200562 cmpw $__ESPFIX_SS, %ax
563 jne 27f
564 movl $__KERNEL_DS, %eax
565 movl %eax, %ds
566 movl %eax, %es
Tejun Heof0d96112009-02-09 22:17:40 +0900567 /* switch to normal stack */
568 FIXUP_ESPFIX_STACK
56927:
H. Peter Anvin34273f42014-05-04 10:36:22 -0700570#endif
Tejun Heof0d96112009-02-09 22:17:40 +0900571.endm
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572
573/*
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200574 * Build the entry stubs with some assembler magic.
575 * We pack 1 stub into every 8-byte block.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 */
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200577 .align 8
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578ENTRY(irq_entries_start)
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200579 vector=FIRST_EXTERNAL_VECTOR
580 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
Ingo Molnara49976d2015-06-08 09:49:11 +0200581 pushl $(~vector+0x80) /* Note: always in signed byte range */
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200582 vector=vector+1
583 jmp common_interrupt
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200584 .align 8
585 .endr
Jan Beulich47a55cd2007-02-13 13:26:24 +0100586END(irq_entries_start)
587
Ingo Molnar55f327f2006-07-03 00:24:43 -0700588/*
589 * the CPU automatically disables interrupts when executing an IRQ vector,
590 * so IRQ-flags tracing has to follow that:
591 */
H. Peter Anvinb7c62442008-11-11 13:24:58 -0800592 .p2align CONFIG_X86_L1_CACHE_SHIFT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593common_interrupt:
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700594 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200595 addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 SAVE_ALL
Ingo Molnar55f327f2006-07-03 00:24:43 -0700597 TRACE_IRQS_OFF
Ingo Molnara49976d2015-06-08 09:49:11 +0200598 movl %esp, %eax
599 call do_IRQ
600 jmp ret_from_intr
Jan Beulich47a55cd2007-02-13 13:26:24 +0100601ENDPROC(common_interrupt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602
Tejun Heo02cf94c2009-01-21 17:26:06 +0900603#define BUILD_INTERRUPT3(name, nr, fn) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604ENTRY(name) \
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700605 ASM_CLAC; \
Ingo Molnara49976d2015-06-08 09:49:11 +0200606 pushl $~(nr); \
Jan Beulichfe7cacc2006-06-26 13:57:44 +0200607 SAVE_ALL; \
Ingo Molnar55f327f2006-07-03 00:24:43 -0700608 TRACE_IRQS_OFF \
Ingo Molnara49976d2015-06-08 09:49:11 +0200609 movl %esp, %eax; \
610 call fn; \
611 jmp ret_from_intr; \
Jan Beulich47a55cd2007-02-13 13:26:24 +0100612ENDPROC(name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613
Seiji Aguchicf910e82013-06-20 11:46:53 -0400614
615#ifdef CONFIG_TRACING
Ingo Molnara49976d2015-06-08 09:49:11 +0200616# define TRACE_BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
Seiji Aguchicf910e82013-06-20 11:46:53 -0400617#else
Ingo Molnara49976d2015-06-08 09:49:11 +0200618# define TRACE_BUILD_INTERRUPT(name, nr)
Seiji Aguchicf910e82013-06-20 11:46:53 -0400619#endif
620
Ingo Molnara49976d2015-06-08 09:49:11 +0200621#define BUILD_INTERRUPT(name, nr) \
622 BUILD_INTERRUPT3(name, nr, smp_##name); \
Seiji Aguchicf910e82013-06-20 11:46:53 -0400623 TRACE_BUILD_INTERRUPT(name, nr)
Tejun Heo02cf94c2009-01-21 17:26:06 +0900624
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625/* The include is where all of the SMP etc. interrupts come from */
Ingo Molnar1164dd02009-01-28 19:34:09 +0100626#include <asm/entry_arch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628ENTRY(coprocessor_error)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700629 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200630 pushl $0
631 pushl $do_coprocessor_error
632 jmp error_code
Jan Beulich47a55cd2007-02-13 13:26:24 +0100633END(coprocessor_error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634
635ENTRY(simd_coprocessor_error)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700636 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200637 pushl $0
Brian Gerst40d2e762010-03-21 09:00:43 -0400638#ifdef CONFIG_X86_INVD_BUG
639 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
Ingo Molnara49976d2015-06-08 09:49:11 +0200640 ALTERNATIVE "pushl $do_general_protection", \
641 "pushl $do_simd_coprocessor_error", \
Borislav Petkov8e65f6e2015-01-18 12:35:55 +0100642 X86_FEATURE_XMM
Brian Gerst40d2e762010-03-21 09:00:43 -0400643#else
Ingo Molnara49976d2015-06-08 09:49:11 +0200644 pushl $do_simd_coprocessor_error
Brian Gerst40d2e762010-03-21 09:00:43 -0400645#endif
Ingo Molnara49976d2015-06-08 09:49:11 +0200646 jmp error_code
Jan Beulich47a55cd2007-02-13 13:26:24 +0100647END(simd_coprocessor_error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648
649ENTRY(device_not_available)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700650 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200651 pushl $-1 # mark this as an int
652 pushl $do_device_not_available
653 jmp error_code
Jan Beulich47a55cd2007-02-13 13:26:24 +0100654END(device_not_available)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655
Rusty Russelld3561b72006-12-07 02:14:07 +0100656#ifdef CONFIG_PARAVIRT
657ENTRY(native_iret)
Ingo Molnar3701d8632008-02-09 23:24:08 +0100658 iret
H. Peter Anvin6837a542012-04-20 12:19:50 -0700659 _ASM_EXTABLE(native_iret, iret_exc)
Jan Beulich47a55cd2007-02-13 13:26:24 +0100660END(native_iret)
Rusty Russelld3561b72006-12-07 02:14:07 +0100661#endif
662
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663ENTRY(overflow)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700664 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200665 pushl $0
666 pushl $do_overflow
667 jmp error_code
Jan Beulich47a55cd2007-02-13 13:26:24 +0100668END(overflow)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669
670ENTRY(bounds)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700671 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200672 pushl $0
673 pushl $do_bounds
674 jmp error_code
Jan Beulich47a55cd2007-02-13 13:26:24 +0100675END(bounds)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676
677ENTRY(invalid_op)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700678 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200679 pushl $0
680 pushl $do_invalid_op
681 jmp error_code
Jan Beulich47a55cd2007-02-13 13:26:24 +0100682END(invalid_op)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683
684ENTRY(coprocessor_segment_overrun)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700685 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200686 pushl $0
687 pushl $do_coprocessor_segment_overrun
688 jmp error_code
Jan Beulich47a55cd2007-02-13 13:26:24 +0100689END(coprocessor_segment_overrun)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690
691ENTRY(invalid_TSS)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700692 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200693 pushl $do_invalid_TSS
694 jmp error_code
Jan Beulich47a55cd2007-02-13 13:26:24 +0100695END(invalid_TSS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696
697ENTRY(segment_not_present)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700698 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200699 pushl $do_segment_not_present
700 jmp error_code
Jan Beulich47a55cd2007-02-13 13:26:24 +0100701END(segment_not_present)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702
703ENTRY(stack_segment)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700704 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200705 pushl $do_stack_segment
706 jmp error_code
Jan Beulich47a55cd2007-02-13 13:26:24 +0100707END(stack_segment)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709ENTRY(alignment_check)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700710 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200711 pushl $do_alignment_check
712 jmp error_code
Jan Beulich47a55cd2007-02-13 13:26:24 +0100713END(alignment_check)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714
Prasanna S.Pd28c4392006-09-26 10:52:34 +0200715ENTRY(divide_error)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700716 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200717 pushl $0 # no error code
718 pushl $do_divide_error
719 jmp error_code
Jan Beulich47a55cd2007-02-13 13:26:24 +0100720END(divide_error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721
722#ifdef CONFIG_X86_MCE
723ENTRY(machine_check)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700724 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200725 pushl $0
726 pushl machine_check_vector
727 jmp error_code
Jan Beulich47a55cd2007-02-13 13:26:24 +0100728END(machine_check)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729#endif
730
731ENTRY(spurious_interrupt_bug)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700732 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200733 pushl $0
734 pushl $do_spurious_interrupt_bug
735 jmp error_code
Jan Beulich47a55cd2007-02-13 13:26:24 +0100736END(spurious_interrupt_bug)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700738#ifdef CONFIG_XEN
739ENTRY(xen_hypervisor_callback)
Ingo Molnara49976d2015-06-08 09:49:11 +0200740 pushl $-1 /* orig_ax = -1 => not a system call */
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700741 SAVE_ALL
742 TRACE_IRQS_OFF
Jeremy Fitzhardinge9ec2b802007-07-17 18:37:07 -0700743
Ingo Molnara49976d2015-06-08 09:49:11 +0200744 /*
745 * Check to see if we got the event in the critical
746 * region in xen_iret_direct, after we've reenabled
747 * events and checked for pending events. This simulates
748 * iret instruction's behaviour where it delivers a
749 * pending interrupt when enabling interrupts:
750 */
751 movl PT_EIP(%esp), %eax
752 cmpl $xen_iret_start_crit, %eax
753 jb 1f
754 cmpl $xen_iret_end_crit, %eax
755 jae 1f
Jeremy Fitzhardinge9ec2b802007-07-17 18:37:07 -0700756
Ingo Molnara49976d2015-06-08 09:49:11 +0200757 jmp xen_iret_crit_fixup
Jeremy Fitzhardinge9ec2b802007-07-17 18:37:07 -0700758
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700759ENTRY(xen_do_upcall)
Ingo Molnara49976d2015-06-08 09:49:11 +02007601: mov %esp, %eax
761 call xen_evtchn_do_upcall
David Vrabelfdfd8112015-02-19 15:23:17 +0000762#ifndef CONFIG_PREEMPT
Ingo Molnara49976d2015-06-08 09:49:11 +0200763 call xen_maybe_preempt_hcall
David Vrabelfdfd8112015-02-19 15:23:17 +0000764#endif
Ingo Molnara49976d2015-06-08 09:49:11 +0200765 jmp ret_from_intr
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700766ENDPROC(xen_hypervisor_callback)
767
Ingo Molnara49976d2015-06-08 09:49:11 +0200768/*
769 * Hypervisor uses this for application faults while it executes.
770 * We get here for two reasons:
771 * 1. Fault while reloading DS, ES, FS or GS
772 * 2. Fault while executing IRET
773 * Category 1 we fix up by reattempting the load, and zeroing the segment
774 * register if the load fails.
775 * Category 2 we fix up by jumping to do_iret_error. We cannot use the
776 * normal Linux return path in this case because if we use the IRET hypercall
777 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
778 * We distinguish between categories by maintaining a status value in EAX.
779 */
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700780ENTRY(xen_failsafe_callback)
Ingo Molnara49976d2015-06-08 09:49:11 +0200781 pushl %eax
782 movl $1, %eax
7831: mov 4(%esp), %ds
7842: mov 8(%esp), %es
7853: mov 12(%esp), %fs
7864: mov 16(%esp), %gs
David Vrabela349e23d12012-10-19 17:29:07 +0100787 /* EAX == 0 => Category 1 (Bad segment)
788 EAX != 0 => Category 2 (Bad IRET) */
Ingo Molnara49976d2015-06-08 09:49:11 +0200789 testl %eax, %eax
790 popl %eax
791 lea 16(%esp), %esp
792 jz 5f
793 jmp iret_exc
7945: pushl $-1 /* orig_ax = -1 => not a system call */
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700795 SAVE_ALL
Ingo Molnara49976d2015-06-08 09:49:11 +0200796 jmp ret_from_exception
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700797
Ingo Molnara49976d2015-06-08 09:49:11 +0200798.section .fixup, "ax"
7996: xorl %eax, %eax
800 movl %eax, 4(%esp)
801 jmp 1b
8027: xorl %eax, %eax
803 movl %eax, 8(%esp)
804 jmp 2b
8058: xorl %eax, %eax
806 movl %eax, 12(%esp)
807 jmp 3b
8089: xorl %eax, %eax
809 movl %eax, 16(%esp)
810 jmp 4b
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700811.previous
Ingo Molnara49976d2015-06-08 09:49:11 +0200812 _ASM_EXTABLE(1b, 6b)
813 _ASM_EXTABLE(2b, 7b)
814 _ASM_EXTABLE(3b, 8b)
815 _ASM_EXTABLE(4b, 9b)
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700816ENDPROC(xen_failsafe_callback)
817
K. Y. Srinivasanbc2b0332013-02-03 17:22:39 -0800818BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
Sheng Yang38e20b02010-05-14 12:40:51 +0100819 xen_evtchn_do_upcall)
820
Ingo Molnara49976d2015-06-08 09:49:11 +0200821#endif /* CONFIG_XEN */
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700822
K. Y. Srinivasanbc2b0332013-02-03 17:22:39 -0800823#if IS_ENABLED(CONFIG_HYPERV)
824
825BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
826 hyperv_vector_handler)
827
828#endif /* CONFIG_HYPERV */
829
Steven Rostedt606576c2008-10-06 19:06:12 -0400830#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200831#ifdef CONFIG_DYNAMIC_FTRACE
832
833ENTRY(mcount)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200834 ret
835END(mcount)
836
837ENTRY(ftrace_caller)
Ingo Molnara49976d2015-06-08 09:49:11 +0200838 pushl %eax
839 pushl %ecx
840 pushl %edx
841 pushl $0 /* Pass NULL as regs pointer */
842 movl 4*4(%esp), %eax
843 movl 0x4(%ebp), %edx
844 movl function_trace_op, %ecx
845 subl $MCOUNT_INSN_SIZE, %eax
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200846
847.globl ftrace_call
848ftrace_call:
Ingo Molnara49976d2015-06-08 09:49:11 +0200849 call ftrace_stub
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200850
Ingo Molnara49976d2015-06-08 09:49:11 +0200851 addl $4, %esp /* skip NULL pointer */
852 popl %edx
853 popl %ecx
854 popl %eax
Steven Rostedt4de72392012-06-05 20:00:11 -0400855ftrace_ret:
Steven Rostedt5a45cfe2008-11-26 00:16:24 -0500856#ifdef CONFIG_FUNCTION_GRAPH_TRACER
857.globl ftrace_graph_call
858ftrace_graph_call:
Ingo Molnara49976d2015-06-08 09:49:11 +0200859 jmp ftrace_stub
Steven Rostedt5a45cfe2008-11-26 00:16:24 -0500860#endif
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200861
862.globl ftrace_stub
863ftrace_stub:
864 ret
865END(ftrace_caller)
866
Steven Rostedt4de72392012-06-05 20:00:11 -0400867ENTRY(ftrace_regs_caller)
868 pushf /* push flags before compare (in cs location) */
Steven Rostedt4de72392012-06-05 20:00:11 -0400869
870 /*
871 * i386 does not save SS and ESP when coming from kernel.
872 * Instead, to get sp, &regs->sp is used (see ptrace.h).
873 * Unfortunately, that means eflags must be at the same location
874 * as the current return ip is. We move the return ip into the
875 * ip location, and move flags into the return ip location.
876 */
Ingo Molnara49976d2015-06-08 09:49:11 +0200877 pushl 4(%esp) /* save return ip into ip slot */
Steven Rostedt4de72392012-06-05 20:00:11 -0400878
Ingo Molnara49976d2015-06-08 09:49:11 +0200879 pushl $0 /* Load 0 into orig_ax */
880 pushl %gs
881 pushl %fs
882 pushl %es
883 pushl %ds
884 pushl %eax
885 pushl %ebp
886 pushl %edi
887 pushl %esi
888 pushl %edx
889 pushl %ecx
890 pushl %ebx
Steven Rostedt4de72392012-06-05 20:00:11 -0400891
Ingo Molnara49976d2015-06-08 09:49:11 +0200892 movl 13*4(%esp), %eax /* Get the saved flags */
893 movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
894 /* clobbering return ip */
895 movl $__KERNEL_CS, 13*4(%esp)
Steven Rostedt4de72392012-06-05 20:00:11 -0400896
Ingo Molnara49976d2015-06-08 09:49:11 +0200897 movl 12*4(%esp), %eax /* Load ip (1st parameter) */
898 subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
899 movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
900 movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
901 pushl %esp /* Save pt_regs as 4th parameter */
Steven Rostedt4de72392012-06-05 20:00:11 -0400902
903GLOBAL(ftrace_regs_call)
Ingo Molnara49976d2015-06-08 09:49:11 +0200904 call ftrace_stub
Steven Rostedt4de72392012-06-05 20:00:11 -0400905
Ingo Molnara49976d2015-06-08 09:49:11 +0200906 addl $4, %esp /* Skip pt_regs */
907 movl 14*4(%esp), %eax /* Move flags back into cs */
908 movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
909 movl 12*4(%esp), %eax /* Get return ip from regs->ip */
910 movl %eax, 14*4(%esp) /* Put return ip back for ret */
Steven Rostedt4de72392012-06-05 20:00:11 -0400911
Ingo Molnara49976d2015-06-08 09:49:11 +0200912 popl %ebx
913 popl %ecx
914 popl %edx
915 popl %esi
916 popl %edi
917 popl %ebp
918 popl %eax
919 popl %ds
920 popl %es
921 popl %fs
922 popl %gs
923 addl $8, %esp /* Skip orig_ax and ip */
924 popf /* Pop flags at end (no addl to corrupt flags) */
925 jmp ftrace_ret
Steven Rostedt4de72392012-06-05 20:00:11 -0400926
Steven Rostedt4de72392012-06-05 20:00:11 -0400927 popf
Ingo Molnara49976d2015-06-08 09:49:11 +0200928 jmp ftrace_stub
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200929#else /* ! CONFIG_DYNAMIC_FTRACE */
930
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200931ENTRY(mcount)
Ingo Molnara49976d2015-06-08 09:49:11 +0200932 cmpl $__PAGE_OFFSET, %esp
933 jb ftrace_stub /* Paging not enabled yet? */
H. Peter Anvinaf058ab2013-08-30 17:29:29 -0700934
Ingo Molnara49976d2015-06-08 09:49:11 +0200935 cmpl $ftrace_stub, ftrace_trace_function
936 jnz trace
Frederic Weisbeckerfb526072008-11-25 21:07:04 +0100937#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Ingo Molnara49976d2015-06-08 09:49:11 +0200938 cmpl $ftrace_stub, ftrace_graph_return
939 jnz ftrace_graph_caller
Steven Rostedte49dc192008-12-02 23:50:05 -0500940
Ingo Molnara49976d2015-06-08 09:49:11 +0200941 cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
942 jnz ftrace_graph_caller
Frederic Weisbeckercaf4b322008-11-11 07:03:45 +0100943#endif
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200944.globl ftrace_stub
945ftrace_stub:
946 ret
947
948 /* taken from glibc */
949trace:
Ingo Molnara49976d2015-06-08 09:49:11 +0200950 pushl %eax
951 pushl %ecx
952 pushl %edx
953 movl 0xc(%esp), %eax
954 movl 0x4(%ebp), %edx
955 subl $MCOUNT_INSN_SIZE, %eax
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200956
Ingo Molnara49976d2015-06-08 09:49:11 +0200957 call *ftrace_trace_function
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200958
Ingo Molnara49976d2015-06-08 09:49:11 +0200959 popl %edx
960 popl %ecx
961 popl %eax
962 jmp ftrace_stub
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200963END(mcount)
Steven Rostedtd61f82d2008-05-12 21:20:43 +0200964#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedt606576c2008-10-06 19:06:12 -0400965#endif /* CONFIG_FUNCTION_TRACER */
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +0200966
Frederic Weisbeckerfb526072008-11-25 21:07:04 +0100967#ifdef CONFIG_FUNCTION_GRAPH_TRACER
968ENTRY(ftrace_graph_caller)
Ingo Molnara49976d2015-06-08 09:49:11 +0200969 pushl %eax
970 pushl %ecx
971 pushl %edx
972 movl 0xc(%esp), %eax
973 lea 0x4(%ebp), %edx
974 movl (%ebp), %ecx
975 subl $MCOUNT_INSN_SIZE, %eax
976 call prepare_ftrace_return
977 popl %edx
978 popl %ecx
979 popl %eax
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100980 ret
Frederic Weisbeckerfb526072008-11-25 21:07:04 +0100981END(ftrace_graph_caller)
Frederic Weisbeckercaf4b322008-11-11 07:03:45 +0100982
983.globl return_to_handler
984return_to_handler:
Ingo Molnara49976d2015-06-08 09:49:11 +0200985 pushl %eax
986 pushl %edx
987 movl %ebp, %eax
988 call ftrace_return_to_handler
989 movl %eax, %ecx
990 popl %edx
991 popl %eax
992 jmp *%ecx
Frederic Weisbeckere7d37372008-11-16 06:02:06 +0100993#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994
Seiji Aguchi25c74b12013-10-30 16:37:00 -0400995#ifdef CONFIG_TRACING
996ENTRY(trace_page_fault)
Seiji Aguchi25c74b12013-10-30 16:37:00 -0400997 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200998 pushl $trace_do_page_fault
999 jmp error_code
Seiji Aguchi25c74b12013-10-30 16:37:00 -04001000END(trace_page_fault)
1001#endif
1002
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001003ENTRY(page_fault)
H. Peter Anvine59d1b02012-09-21 13:58:10 -07001004 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +02001005 pushl $do_page_fault
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001006 ALIGN
1007error_code:
Tejun Heoccbeed32009-02-09 22:17:40 +09001008 /* the function address is in %gs's slot on the stack */
Ingo Molnara49976d2015-06-08 09:49:11 +02001009 pushl %fs
1010 pushl %es
1011 pushl %ds
1012 pushl %eax
1013 pushl %ebp
1014 pushl %edi
1015 pushl %esi
1016 pushl %edx
1017 pushl %ecx
1018 pushl %ebx
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001019 cld
Ingo Molnara49976d2015-06-08 09:49:11 +02001020 movl $(__KERNEL_PERCPU), %ecx
1021 movl %ecx, %fs
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001022 UNWIND_ESPFIX_STACK
Tejun Heoccbeed32009-02-09 22:17:40 +09001023 GS_TO_REG %ecx
Ingo Molnara49976d2015-06-08 09:49:11 +02001024 movl PT_GS(%esp), %edi # get the function address
1025 movl PT_ORIG_EAX(%esp), %edx # get the error code
1026 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
Tejun Heoccbeed32009-02-09 22:17:40 +09001027 REG_TO_PTGS %ecx
1028 SET_KERNEL_GS %ecx
Ingo Molnara49976d2015-06-08 09:49:11 +02001029 movl $(__USER_DS), %ecx
1030 movl %ecx, %ds
1031 movl %ecx, %es
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001032 TRACE_IRQS_OFF
Ingo Molnara49976d2015-06-08 09:49:11 +02001033 movl %esp, %eax # pt_regs pointer
1034 call *%edi
1035 jmp ret_from_exception
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001036END(page_fault)
1037
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001038ENTRY(debug)
Andy Lutomirski75366562016-03-09 19:00:32 -08001039 /*
1040 * #DB can happen at the first instruction of
1041 * entry_SYSENTER_32 or in Xen's SYSENTER prologue. If this
1042 * happens, then we will be running on a very small stack. We
1043 * need to detect this condition and switch to the thread
1044 * stack before calling any C code at all.
1045 *
1046 * If you edit this code, keep in mind that NMIs can happen in here.
1047 */
H. Peter Anvine59d1b02012-09-21 13:58:10 -07001048 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +02001049 pushl $-1 # mark this as an int
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001050 SAVE_ALL
Ingo Molnara49976d2015-06-08 09:49:11 +02001051 xorl %edx, %edx # error code 0
1052 movl %esp, %eax # pt_regs pointer
Andy Lutomirski75366562016-03-09 19:00:32 -08001053
1054 /* Are we currently on the SYSENTER stack? */
1055 PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
1056 subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */
1057 cmpl $SIZEOF_SYSENTER_stack, %ecx
1058 jb .Ldebug_from_sysenter_stack
1059
1060 TRACE_IRQS_OFF
Ingo Molnara49976d2015-06-08 09:49:11 +02001061 call do_debug
1062 jmp ret_from_exception
Andy Lutomirski75366562016-03-09 19:00:32 -08001063
1064.Ldebug_from_sysenter_stack:
1065 /* We're on the SYSENTER stack. Switch off. */
1066 movl %esp, %ebp
1067 movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
1068 TRACE_IRQS_OFF
1069 call do_debug
1070 movl %ebp, %esp
1071 jmp ret_from_exception
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001072END(debug)
1073
1074/*
Andy Lutomirski75366562016-03-09 19:00:32 -08001075 * NMI is doubly nasty. It can happen on the first instruction of
1076 * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning
1077 * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32
1078 * switched stacks. We handle both conditions by simply checking whether we
1079 * interrupted kernel code running on the SYSENTER stack.
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001080 */
1081ENTRY(nmi)
H. Peter Anvine59d1b02012-09-21 13:58:10 -07001082 ASM_CLAC
H. Peter Anvin34273f42014-05-04 10:36:22 -07001083#ifdef CONFIG_X86_ESPFIX32
Ingo Molnara49976d2015-06-08 09:49:11 +02001084 pushl %eax
1085 movl %ss, %eax
1086 cmpw $__ESPFIX_SS, %ax
1087 popl %eax
1088 je nmi_espfix_stack
H. Peter Anvin34273f42014-05-04 10:36:22 -07001089#endif
Andy Lutomirski75366562016-03-09 19:00:32 -08001090
1091 pushl %eax # pt_regs->orig_ax
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001092 SAVE_ALL
Ingo Molnara49976d2015-06-08 09:49:11 +02001093 xorl %edx, %edx # zero error code
1094 movl %esp, %eax # pt_regs pointer
Andy Lutomirski75366562016-03-09 19:00:32 -08001095
1096 /* Are we currently on the SYSENTER stack? */
1097 PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
1098 subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */
1099 cmpl $SIZEOF_SYSENTER_stack, %ecx
1100 jb .Lnmi_from_sysenter_stack
1101
1102 /* Not on SYSENTER stack. */
Ingo Molnara49976d2015-06-08 09:49:11 +02001103 call do_nmi
1104 jmp restore_all_notrace
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001105
Andy Lutomirski75366562016-03-09 19:00:32 -08001106.Lnmi_from_sysenter_stack:
1107 /*
1108 * We're on the SYSENTER stack. Switch off. No one (not even debug)
1109 * is using the thread stack right now, so it's safe for us to use it.
1110 */
1111 movl %esp, %ebp
1112 movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
1113 call do_nmi
1114 movl %ebp, %esp
1115 jmp restore_all_notrace
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001116
H. Peter Anvin34273f42014-05-04 10:36:22 -07001117#ifdef CONFIG_X86_ESPFIX32
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001118nmi_espfix_stack:
Ingo Molnar131484c2015-05-28 12:21:47 +02001119 /*
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001120 * create the pointer to lss back
1121 */
Ingo Molnara49976d2015-06-08 09:49:11 +02001122 pushl %ss
1123 pushl %esp
1124 addl $4, (%esp)
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001125 /* copy the iret frame of 12 bytes */
1126 .rept 3
Ingo Molnara49976d2015-06-08 09:49:11 +02001127 pushl 16(%esp)
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001128 .endr
Ingo Molnara49976d2015-06-08 09:49:11 +02001129 pushl %eax
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001130 SAVE_ALL
Ingo Molnara49976d2015-06-08 09:49:11 +02001131 FIXUP_ESPFIX_STACK # %eax == %esp
1132 xorl %edx, %edx # zero error code
1133 call do_nmi
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001134 RESTORE_REGS
Ingo Molnara49976d2015-06-08 09:49:11 +02001135 lss 12+4(%esp), %esp # back to espfix stack
1136 jmp irq_return
H. Peter Anvin34273f42014-05-04 10:36:22 -07001137#endif
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001138END(nmi)
1139
1140ENTRY(int3)
H. Peter Anvine59d1b02012-09-21 13:58:10 -07001141 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +02001142 pushl $-1 # mark this as an int
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001143 SAVE_ALL
1144 TRACE_IRQS_OFF
Ingo Molnara49976d2015-06-08 09:49:11 +02001145 xorl %edx, %edx # zero error code
1146 movl %esp, %eax # pt_regs pointer
1147 call do_int3
1148 jmp ret_from_exception
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001149END(int3)
1150
1151ENTRY(general_protection)
Ingo Molnara49976d2015-06-08 09:49:11 +02001152 pushl $do_general_protection
1153 jmp error_code
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001154END(general_protection)
1155
Gleb Natapov631bc482010-10-14 11:22:52 +02001156#ifdef CONFIG_KVM_GUEST
1157ENTRY(async_page_fault)
H. Peter Anvine59d1b02012-09-21 13:58:10 -07001158 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +02001159 pushl $do_async_page_fault
1160 jmp error_code
Sedat Dilek2ae9d292011-03-08 22:39:24 +01001161END(async_page_fault)
Gleb Natapov631bc482010-10-14 11:22:52 +02001162#endif