blob: 3c711f2ab236712f85ba69d8e7421d4b8132c5a8 [file] [log] [blame]
Ingo Molnar0c2bd5a2008-01-30 13:32:49 +01001/*
Ingo Molnar063f8912009-02-03 18:02:36 +01002
3 x86 function call convention, 64-bit:
4 -------------------------------------
5 arguments | callee-saved | extra caller-saved | return
6 [callee-clobbered] | | [callee-clobbered] |
7 ---------------------------------------------------------------------------
8 rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**]
9
10 ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
11 functions when it sees tail-call optimization possibilities) rflags is
12 clobbered. Leftover arguments are passed over the stack frame.)
13
14 [*] In the frame-pointers case rbp is fixed to the stack frame.
15
16 [**] for struct return values wider than 64 bits the return convention is a
17 bit more complex: up to 128 bits width we return small structures
18 straight in rax, rdx. For structures larger than that (3 words or
19 larger) the caller puts a pointer to an on-stack return struct
20 [allocated in the caller's stack frame] into the first argument - i.e.
21 into rdi. All other arguments shift up by one in this case.
22 Fortunately this case is rare in the kernel.
23
24For 32-bit we have the following conventions - kernel is built with
25-mregparm=3 and -freg-struct-return:
26
27 x86 function calling convention, 32-bit:
28 ----------------------------------------
29 arguments | callee-saved | extra caller-saved | return
30 [callee-clobbered] | | [callee-clobbered] |
31 -------------------------------------------------------------------------
32 eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**]
33
34 ( here too esp is obviously invariant across normal function calls. eflags
35 is clobbered. Leftover arguments are passed over the stack frame. )
36
37 [*] In the frame-pointers case ebp is fixed to the stack frame.
38
39 [**] We build with -freg-struct-return, which on 32-bit means similar
40 semantics as on 64-bit: edx can be used for a second return value
41 (i.e. covering integer and structure sizes up to 64 bits) - after that
42 it gets more complex and more expensive: 3-word or larger struct returns
43 get done in the caller's frame and the pointer to the return struct goes
44 into regparm0, i.e. eax - the other arguments shift up and the
45 function's register parameters degenerate to regparm=2 in essence.
46
47*/
48
David Howellsa1ce3922012-10-02 18:01:25 +010049#include <asm/dwarf2.h>
Ingo Molnar063f8912009-02-03 18:02:36 +010050
Peter Zijlstra1a338ac2013-08-14 14:51:00 +020051#ifdef CONFIG_X86_64
52
Ingo Molnar063f8912009-02-03 18:02:36 +010053/*
Tao Guo1b2b23d2012-09-26 04:28:22 -040054 * 64-bit system call stack frame layout defines and helpers,
55 * for assembly code:
Ingo Molnar0c2bd5a2008-01-30 13:32:49 +010056 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
Tao Guo1b2b23d2012-09-26 04:28:22 -040058#define R15 0
59#define R14 8
60#define R13 16
61#define R12 24
62#define RBP 32
63#define RBX 40
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Ingo Molnar063f8912009-02-03 18:02:36 +010065/* arguments: interrupts/non tracing syscalls only save up to here: */
Tao Guo1b2b23d2012-09-26 04:28:22 -040066#define R11 48
67#define R10 56
68#define R9 64
69#define R8 72
70#define RAX 80
71#define RCX 88
72#define RDX 96
73#define RSI 104
74#define RDI 112
75#define ORIG_RAX 120 /* + error_code */
Ingo Molnar0c2bd5a2008-01-30 13:32:49 +010076/* end of arguments */
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
Ingo Molnar063f8912009-02-03 18:02:36 +010078/* cpu exception frame or undefined in case of fast syscall: */
Tao Guo1b2b23d2012-09-26 04:28:22 -040079#define RIP 128
80#define CS 136
81#define EFLAGS 144
82#define RSP 152
83#define SS 160
Ingo Molnar0c2bd5a2008-01-30 13:32:49 +010084
85#define ARGOFFSET R11
Ingo Molnar0c2bd5a2008-01-30 13:32:49 +010086
Andy Lutomirski54eea992014-09-05 15:13:55 -070087 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0
Ingo Molnar0c2bd5a2008-01-30 13:32:49 +010088 subq $9*8+\addskip, %rsp
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 CFI_ADJUST_CFA_OFFSET 9*8+\addskip
Borislav Petkova268fcf2011-05-31 22:21:51 +020090 movq_cfi rdi, 8*8
91 movq_cfi rsi, 7*8
92 movq_cfi rdx, 6*8
93
Borislav Petkovcac0e0a2011-05-31 22:21:52 +020094 .if \save_rcx
Borislav Petkova268fcf2011-05-31 22:21:51 +020095 movq_cfi rcx, 5*8
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 .endif
Borislav Petkova268fcf2011-05-31 22:21:51 +020097
Andy Lutomirski54eea992014-09-05 15:13:55 -070098 .if \rax_enosys
99 movq $-ENOSYS, 4*8(%rsp)
100 .else
Borislav Petkova268fcf2011-05-31 22:21:51 +0200101 movq_cfi rax, 4*8
Andy Lutomirski54eea992014-09-05 15:13:55 -0700102 .endif
Borislav Petkova268fcf2011-05-31 22:21:51 +0200103
Borislav Petkovcac0e0a2011-05-31 22:21:52 +0200104 .if \save_r891011
Borislav Petkova268fcf2011-05-31 22:21:51 +0200105 movq_cfi r8, 3*8
106 movq_cfi r9, 2*8
107 movq_cfi r10, 1*8
108 movq_cfi r11, 0*8
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 .endif
Borislav Petkova268fcf2011-05-31 22:21:51 +0200110
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 .endm
112
Jan Beulich32342822010-10-19 14:52:26 +0100113#define ARG_SKIP (9*8)
Ingo Molnar0c2bd5a2008-01-30 13:32:49 +0100114
Borislav Petkov838feb42011-05-31 22:21:53 +0200115 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
116 rstor_r8910=1, rstor_rdx=1
117 .if \rstor_r11
Borislav Petkova268fcf2011-05-31 22:21:51 +0200118 movq_cfi_restore 0*8, r11
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 .endif
Borislav Petkova268fcf2011-05-31 22:21:51 +0200120
Borislav Petkov838feb42011-05-31 22:21:53 +0200121 .if \rstor_r8910
Borislav Petkova268fcf2011-05-31 22:21:51 +0200122 movq_cfi_restore 1*8, r10
123 movq_cfi_restore 2*8, r9
124 movq_cfi_restore 3*8, r8
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 .endif
Borislav Petkova268fcf2011-05-31 22:21:51 +0200126
Borislav Petkov838feb42011-05-31 22:21:53 +0200127 .if \rstor_rax
Borislav Petkova268fcf2011-05-31 22:21:51 +0200128 movq_cfi_restore 4*8, rax
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 .endif
Borislav Petkova268fcf2011-05-31 22:21:51 +0200130
Borislav Petkov838feb42011-05-31 22:21:53 +0200131 .if \rstor_rcx
Borislav Petkova268fcf2011-05-31 22:21:51 +0200132 movq_cfi_restore 5*8, rcx
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 .endif
Borislav Petkova268fcf2011-05-31 22:21:51 +0200134
Borislav Petkov838feb42011-05-31 22:21:53 +0200135 .if \rstor_rdx
Borislav Petkova268fcf2011-05-31 22:21:51 +0200136 movq_cfi_restore 6*8, rdx
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 .endif
Borislav Petkova268fcf2011-05-31 22:21:51 +0200138
139 movq_cfi_restore 7*8, rsi
140 movq_cfi_restore 8*8, rdi
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 .if ARG_SKIP+\addskip > 0
Ingo Molnar0c2bd5a2008-01-30 13:32:49 +0100143 addq $ARG_SKIP+\addskip, %rsp
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
145 .endif
Ingo Molnar0c2bd5a2008-01-30 13:32:49 +0100146 .endm
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
Roland McGrathd4d67152008-07-09 02:38:07 -0700148 .macro LOAD_ARGS offset, skiprax=0
Ingo Molnar0c2bd5a2008-01-30 13:32:49 +0100149 movq \offset(%rsp), %r11
150 movq \offset+8(%rsp), %r10
151 movq \offset+16(%rsp), %r9
152 movq \offset+24(%rsp), %r8
153 movq \offset+40(%rsp), %rcx
154 movq \offset+48(%rsp), %rdx
155 movq \offset+56(%rsp), %rsi
156 movq \offset+64(%rsp), %rdi
Roland McGrathd4d67152008-07-09 02:38:07 -0700157 .if \skiprax
158 .else
Ingo Molnar0c2bd5a2008-01-30 13:32:49 +0100159 movq \offset+72(%rsp), %rax
Roland McGrathd4d67152008-07-09 02:38:07 -0700160 .endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 .endm
Ingo Molnar0c2bd5a2008-01-30 13:32:49 +0100162
Jan Beulich32342822010-10-19 14:52:26 +0100163#define REST_SKIP (6*8)
Ingo Molnar0c2bd5a2008-01-30 13:32:49 +0100164
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 .macro SAVE_REST
Ingo Molnar0c2bd5a2008-01-30 13:32:49 +0100166 subq $REST_SKIP, %rsp
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 CFI_ADJUST_CFA_OFFSET REST_SKIP
Borislav Petkova268fcf2011-05-31 22:21:51 +0200168 movq_cfi rbx, 5*8
169 movq_cfi rbp, 4*8
170 movq_cfi r12, 3*8
171 movq_cfi r13, 2*8
172 movq_cfi r14, 1*8
173 movq_cfi r15, 0*8
Ingo Molnar0c2bd5a2008-01-30 13:32:49 +0100174 .endm
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
176 .macro RESTORE_REST
Borislav Petkova268fcf2011-05-31 22:21:51 +0200177 movq_cfi_restore 0*8, r15
178 movq_cfi_restore 1*8, r14
179 movq_cfi_restore 2*8, r13
180 movq_cfi_restore 3*8, r12
181 movq_cfi_restore 4*8, rbp
182 movq_cfi_restore 5*8, rbx
Ingo Molnar0c2bd5a2008-01-30 13:32:49 +0100183 addq $REST_SKIP, %rsp
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
185 .endm
Ingo Molnar0c2bd5a2008-01-30 13:32:49 +0100186
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 .macro SAVE_ALL
188 SAVE_ARGS
189 SAVE_REST
190 .endm
Ingo Molnar0c2bd5a2008-01-30 13:32:49 +0100191
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 .macro RESTORE_ALL addskip=0
193 RESTORE_REST
Borislav Petkov838feb42011-05-31 22:21:53 +0200194 RESTORE_ARGS 1, \addskip
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 .endm
196
197 .macro icebp
198 .byte 0xf1
199 .endm
Peter Zijlstra1a338ac2013-08-14 14:51:00 +0200200
201#else /* CONFIG_X86_64 */
202
203/*
204 * For 32bit only simplified versions of SAVE_ALL/RESTORE_ALL. These
205 * are different from the entry_32.S versions in not changing the segment
206 * registers. So only suitable for in kernel use, not when transitioning
207 * from or to user space. The resulting stack frame is not a standard
208 * pt_regs frame. The main use case is calling C code from assembler
209 * when all the registers need to be preserved.
210 */
211
212 .macro SAVE_ALL
Denys Vlasenko49db46a2015-02-26 14:40:25 -0800213 pushl_cfi_reg eax
214 pushl_cfi_reg ebp
215 pushl_cfi_reg edi
216 pushl_cfi_reg esi
217 pushl_cfi_reg edx
218 pushl_cfi_reg ecx
219 pushl_cfi_reg ebx
Peter Zijlstra1a338ac2013-08-14 14:51:00 +0200220 .endm
221
222 .macro RESTORE_ALL
Denys Vlasenko49db46a2015-02-26 14:40:25 -0800223 popl_cfi_reg ebx
224 popl_cfi_reg ecx
225 popl_cfi_reg edx
226 popl_cfi_reg esi
227 popl_cfi_reg edi
228 popl_cfi_reg ebp
229 popl_cfi_reg eax
Peter Zijlstra1a338ac2013-08-14 14:51:00 +0200230 .endm
231
232#endif /* CONFIG_X86_64 */
233