blob: 3d3389a92c339558e8a41ac08c98cf514b9a3584 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Andy Lutomirski478dc892015-11-12 12:59:04 -08002#include <linux/jump_label.h>
Josh Poimboeuf8c1f7552017-07-11 10:33:44 -05003#include <asm/unwind_hints.h>
Dave Hansen8a093172017-12-04 15:07:35 +01004#include <asm/cpufeatures.h>
5#include <asm/page_types.h>
Andy Lutomirski478dc892015-11-12 12:59:04 -08006
Ingo Molnar0c2bd5a2008-01-30 13:32:49 +01007/*
Ingo Molnar063f8912009-02-03 18:02:36 +01008
9 x86 function call convention, 64-bit:
10 -------------------------------------
11 arguments | callee-saved | extra caller-saved | return
12 [callee-clobbered] | | [callee-clobbered] |
13 ---------------------------------------------------------------------------
14 rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**]
15
16 ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
17 functions when it sees tail-call optimization possibilities) rflags is
18 clobbered. Leftover arguments are passed over the stack frame.)
19
20 [*] In the frame-pointers case rbp is fixed to the stack frame.
21
22 [**] for struct return values wider than 64 bits the return convention is a
23 bit more complex: up to 128 bits width we return small structures
24 straight in rax, rdx. For structures larger than that (3 words or
25 larger) the caller puts a pointer to an on-stack return struct
26 [allocated in the caller's stack frame] into the first argument - i.e.
27 into rdi. All other arguments shift up by one in this case.
28 Fortunately this case is rare in the kernel.
29
30For 32-bit we have the following conventions - kernel is built with
31-mregparm=3 and -freg-struct-return:
32
33 x86 function calling convention, 32-bit:
34 ----------------------------------------
35 arguments | callee-saved | extra caller-saved | return
36 [callee-clobbered] | | [callee-clobbered] |
37 -------------------------------------------------------------------------
38 eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**]
39
40 ( here too esp is obviously invariant across normal function calls. eflags
41 is clobbered. Leftover arguments are passed over the stack frame. )
42
43 [*] In the frame-pointers case ebp is fixed to the stack frame.
44
45 [**] We build with -freg-struct-return, which on 32-bit means similar
46 semantics as on 64-bit: edx can be used for a second return value
47 (i.e. covering integer and structure sizes up to 64 bits) - after that
48 it gets more complex and more expensive: 3-word or larger struct returns
49 get done in the caller's frame and the pointer to the return struct goes
50 into regparm0, i.e. eax - the other arguments shift up and the
51 function's register parameters degenerate to regparm=2 in essence.
52
53*/
54
Peter Zijlstra1a338ac2013-08-14 14:51:00 +020055#ifdef CONFIG_X86_64
56
Ingo Molnar063f8912009-02-03 18:02:36 +010057/*
Tao Guo1b2b23d2012-09-26 04:28:22 -040058 * 64-bit system call stack frame layout defines and helpers,
59 * for assembly code:
Ingo Molnar0c2bd5a2008-01-30 13:32:49 +010060 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
Denys Vlasenko76f5df42015-02-26 14:40:27 -080062/* The layout forms the "struct pt_regs" on the stack: */
63/*
64 * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
65 * unless syscall needs a complete, fully filled "struct pt_regs".
66 */
67#define R15 0*8
68#define R14 1*8
69#define R13 2*8
70#define R12 3*8
71#define RBP 4*8
72#define RBX 5*8
73/* These regs are callee-clobbered. Always saved on kernel entry. */
74#define R11 6*8
75#define R10 7*8
76#define R9 8*8
77#define R8 9*8
78#define RAX 10*8
79#define RCX 11*8
80#define RDX 12*8
81#define RSI 13*8
82#define RDI 14*8
83/*
84 * On syscall entry, this is syscall#. On CPU exception, this is error code.
85 * On hw interrupt, it's IRQ number:
86 */
87#define ORIG_RAX 15*8
88/* Return frame for iretq */
89#define RIP 16*8
90#define CS 17*8
91#define EFLAGS 18*8
92#define RSP 19*8
93#define SS 20*8
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
Denys Vlasenko911d2bb2015-02-26 14:40:36 -080095#define SIZEOF_PTREGS 21*8
96
Alexander Kuleshov59df2262016-10-20 01:11:08 +060097 .macro ALLOC_PT_GPREGS_ON_STACK
98 addq $-(15*8), %rsp
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 .endm
100
Denys Vlasenko29722cd2015-03-09 19:39:21 +0100101 .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
102 .if \r11
Ingo Molnar131484c2015-05-28 12:21:47 +0200103 movq %r11, 6*8+\offset(%rsp)
Denys Vlasenko29722cd2015-03-09 19:39:21 +0100104 .endif
105 .if \r8910
Ingo Molnar131484c2015-05-28 12:21:47 +0200106 movq %r10, 7*8+\offset(%rsp)
107 movq %r9, 8*8+\offset(%rsp)
108 movq %r8, 9*8+\offset(%rsp)
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800109 .endif
110 .if \rax
Ingo Molnar131484c2015-05-28 12:21:47 +0200111 movq %rax, 10*8+\offset(%rsp)
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800112 .endif
113 .if \rcx
Ingo Molnar131484c2015-05-28 12:21:47 +0200114 movq %rcx, 11*8+\offset(%rsp)
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800115 .endif
Ingo Molnar131484c2015-05-28 12:21:47 +0200116 movq %rdx, 12*8+\offset(%rsp)
117 movq %rsi, 13*8+\offset(%rsp)
118 movq %rdi, 14*8+\offset(%rsp)
Josh Poimboeuf8c1f7552017-07-11 10:33:44 -0500119 UNWIND_HINT_REGS offset=\offset extra=0
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800120 .endm
121 .macro SAVE_C_REGS offset=0
Denys Vlasenko29722cd2015-03-09 19:39:21 +0100122 SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800123 .endm
124 .macro SAVE_C_REGS_EXCEPT_RAX_RCX offset=0
Denys Vlasenko29722cd2015-03-09 19:39:21 +0100125 SAVE_C_REGS_HELPER \offset, 0, 0, 1, 1
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800126 .endm
127 .macro SAVE_C_REGS_EXCEPT_R891011
Denys Vlasenko29722cd2015-03-09 19:39:21 +0100128 SAVE_C_REGS_HELPER 0, 1, 1, 0, 0
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800129 .endm
130 .macro SAVE_C_REGS_EXCEPT_RCX_R891011
Denys Vlasenko29722cd2015-03-09 19:39:21 +0100131 SAVE_C_REGS_HELPER 0, 1, 0, 0, 0
132 .endm
133 .macro SAVE_C_REGS_EXCEPT_RAX_RCX_R11
134 SAVE_C_REGS_HELPER 0, 0, 0, 1, 0
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800135 .endm
Ingo Molnar0c2bd5a2008-01-30 13:32:49 +0100136
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800137 .macro SAVE_EXTRA_REGS offset=0
Ingo Molnar131484c2015-05-28 12:21:47 +0200138 movq %r15, 0*8+\offset(%rsp)
139 movq %r14, 1*8+\offset(%rsp)
140 movq %r13, 2*8+\offset(%rsp)
141 movq %r12, 3*8+\offset(%rsp)
142 movq %rbp, 4*8+\offset(%rsp)
143 movq %rbx, 5*8+\offset(%rsp)
Josh Poimboeuf8c1f7552017-07-11 10:33:44 -0500144 UNWIND_HINT_REGS offset=\offset
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800145 .endm
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800146
Andy Lutomirskie8720452017-11-02 00:59:01 -0700147 .macro POP_EXTRA_REGS
148 popq %r15
149 popq %r14
150 popq %r13
151 popq %r12
152 popq %rbp
153 popq %rbx
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800154 .endm
155
Andy Lutomirskie8720452017-11-02 00:59:01 -0700156 .macro POP_C_REGS
157 popq %r11
158 popq %r10
159 popq %r9
160 popq %r8
161 popq %rax
162 popq %rcx
163 popq %rdx
164 popq %rsi
165 popq %rdi
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 .endm
167
168 .macro icebp
169 .byte 0xf1
170 .endm
Peter Zijlstra1a338ac2013-08-14 14:51:00 +0200171
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500172/*
173 * This is a sneaky trick to help the unwinder find pt_regs on the stack. The
174 * frame pointer is replaced with an encoded pointer to pt_regs. The encoding
175 * is just setting the LSB, which makes it an invalid stack address and is also
176 * a signal to the unwinder that it's a pt_regs pointer in disguise.
177 *
178 * NOTE: This macro must be used *after* SAVE_EXTRA_REGS because it corrupts
179 * the original rbp.
180 */
181.macro ENCODE_FRAME_POINTER ptregs_offset=0
182#ifdef CONFIG_FRAME_POINTER
183 .if \ptregs_offset
184 leaq \ptregs_offset(%rsp), %rbp
185 .else
186 mov %rsp, %rbp
187 .endif
188 orq $0x1, %rbp
189#endif
190.endm
191
Dave Hansen8a093172017-12-04 15:07:35 +0100192#ifdef CONFIG_PAGE_TABLE_ISOLATION
193
194/* PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two halves: */
195#define PTI_SWITCH_MASK (1<<PAGE_SHIFT)
196
197.macro ADJUST_KERNEL_CR3 reg:req
198 /* Clear "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */
199 andq $(~PTI_SWITCH_MASK), \reg
200.endm
201
202.macro ADJUST_USER_CR3 reg:req
203 /* Move CR3 up a page to the user page tables: */
204 orq $(PTI_SWITCH_MASK), \reg
205.endm
206
207.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
Thomas Gleixneraa8c6242017-12-04 15:07:36 +0100208 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
Dave Hansen8a093172017-12-04 15:07:35 +0100209 mov %cr3, \scratch_reg
210 ADJUST_KERNEL_CR3 \scratch_reg
211 mov \scratch_reg, %cr3
Thomas Gleixneraa8c6242017-12-04 15:07:36 +0100212.Lend_\@:
Dave Hansen8a093172017-12-04 15:07:35 +0100213.endm
214
215.macro SWITCH_TO_USER_CR3 scratch_reg:req
Thomas Gleixneraa8c6242017-12-04 15:07:36 +0100216 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
Dave Hansen8a093172017-12-04 15:07:35 +0100217 mov %cr3, \scratch_reg
218 ADJUST_USER_CR3 \scratch_reg
219 mov \scratch_reg, %cr3
Thomas Gleixneraa8c6242017-12-04 15:07:36 +0100220.Lend_\@:
Dave Hansen8a093172017-12-04 15:07:35 +0100221.endm
222
223.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
Thomas Gleixneraa8c6242017-12-04 15:07:36 +0100224 ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI
Dave Hansen8a093172017-12-04 15:07:35 +0100225 movq %cr3, \scratch_reg
226 movq \scratch_reg, \save_reg
227 /*
228 * Is the switch bit zero? This means the address is
229 * up in real PAGE_TABLE_ISOLATION patches in a moment.
230 */
231 testq $(PTI_SWITCH_MASK), \scratch_reg
232 jz .Ldone_\@
233
234 ADJUST_KERNEL_CR3 \scratch_reg
235 movq \scratch_reg, %cr3
236
237.Ldone_\@:
238.endm
239
240.macro RESTORE_CR3 save_reg:req
Thomas Gleixneraa8c6242017-12-04 15:07:36 +0100241 ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
Dave Hansen8a093172017-12-04 15:07:35 +0100242 /*
243 * The CR3 write could be avoided when not changing its value,
244 * but would require a CR3 read *and* a scratch register.
245 */
246 movq \save_reg, %cr3
Thomas Gleixneraa8c6242017-12-04 15:07:36 +0100247.Lend_\@:
Dave Hansen8a093172017-12-04 15:07:35 +0100248.endm
249
250#else /* CONFIG_PAGE_TABLE_ISOLATION=n: */
251
252.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
253.endm
254.macro SWITCH_TO_USER_CR3 scratch_reg:req
255.endm
256.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
257.endm
258.macro RESTORE_CR3 save_reg:req
259.endm
260
261#endif
262
Peter Zijlstra1a338ac2013-08-14 14:51:00 +0200263#endif /* CONFIG_X86_64 */
264
Andy Lutomirski478dc892015-11-12 12:59:04 -0800265/*
266 * This does 'call enter_from_user_mode' unless we can avoid it based on
267 * kernel config or using the static jump infrastructure.
268 */
269.macro CALL_enter_from_user_mode
270#ifdef CONFIG_CONTEXT_TRACKING
271#ifdef HAVE_JUMP_LABEL
272 STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0
273#endif
274 call enter_from_user_mode
275.Lafter_call_\@:
276#endif
277.endm