blob: fa997dfaef242fa9abdb28c20658a939caf72697 [file] [log] [blame]
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +02001/*
2 * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com>
3 * Copyright 2002 Andi Kleen, SuSE Labs.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Subject to the GNU Public License v2.
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +02005 *
6 * Functions to copy from and to user space.
7 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008
Jan Beulich8d379da2006-09-26 10:52:32 +02009#include <linux/linkage.h>
10#include <asm/dwarf2.h>
Andi Kleen3022d732006-09-26 10:52:39 +020011#include <asm/current.h>
12#include <asm/asm-offsets.h>
13#include <asm/thread_info.h>
14#include <asm/cpufeature.h>
Fenghua Yu4307bec2011-05-17 15:29:15 -070015#include <asm/alternative-asm.h>
H. Peter Anvin9732da82012-04-20 12:19:51 -070016#include <asm/asm.h>
H. Peter Anvin63bcff22012-09-21 12:43:12 -070017#include <asm/smap.h>
Andi Kleen3022d732006-09-26 10:52:39 +020018
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020019 .macro ALIGN_DESTINATION
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020020 /* check for bad alignment of destination */
21 movl %edi,%ecx
22 andl $7,%ecx
23 jz 102f /* already aligned */
24 subl $8,%ecx
25 negl %ecx
26 subl %ecx,%edx
27100: movb (%rsi),%al
28101: movb %al,(%rdi)
29 incq %rsi
30 incq %rdi
31 decl %ecx
32 jnz 100b
33102:
34 .section .fixup,"ax"
Vitaly Mayatskikhafd962a2008-07-30 13:30:14 +020035103: addl %ecx,%edx /* ecx is zerorest also */
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020036 jmp copy_user_handle_tail
37 .previous
38
H. Peter Anvin9732da82012-04-20 12:19:51 -070039 _ASM_EXTABLE(100b,103b)
40 _ASM_EXTABLE(101b,103b)
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020041 .endm
42
43/* Standard copy_to_user with segment limit checking */
Frederic Weisbecker3c93ca02009-11-16 15:42:18 +010044ENTRY(_copy_to_user)
Jan Beulich8d379da2006-09-26 10:52:32 +020045 CFI_STARTPROC
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 GET_THREAD_INFO(%rax)
47 movq %rdi,%rcx
48 addq %rdx,%rcx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020049 jc bad_to_user
Glauber Costa26ccb8a2008-06-24 11:19:35 -030050 cmpq TI_addr_limit(%rax),%rcx
Jiri Olsa26afb7c2011-05-12 16:30:30 +020051 ja bad_to_user
Borislav Petkovde2ff882015-01-13 01:38:17 +010052 ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
53 "jmp copy_user_generic_string", \
54 X86_FEATURE_REP_GOOD, \
55 "jmp copy_user_enhanced_fast_string", \
56 X86_FEATURE_ERMS
Jan Beulich8d379da2006-09-26 10:52:32 +020057 CFI_ENDPROC
Frederic Weisbecker3c93ca02009-11-16 15:42:18 +010058ENDPROC(_copy_to_user)
Andi Kleen7bcd3f32006-02-03 21:51:02 +010059
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020060/* Standard copy_from_user with segment limit checking */
Arjan van de Ven9f0cf4a2009-09-26 14:33:01 +020061ENTRY(_copy_from_user)
Jan Beulich8d379da2006-09-26 10:52:32 +020062 CFI_STARTPROC
Linus Torvalds1da177e2005-04-16 15:20:36 -070063 GET_THREAD_INFO(%rax)
64 movq %rsi,%rcx
65 addq %rdx,%rcx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020066 jc bad_from_user
Glauber Costa26ccb8a2008-06-24 11:19:35 -030067 cmpq TI_addr_limit(%rax),%rcx
Jiri Olsa26afb7c2011-05-12 16:30:30 +020068 ja bad_from_user
Borislav Petkovde2ff882015-01-13 01:38:17 +010069 ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
70 "jmp copy_user_generic_string", \
71 X86_FEATURE_REP_GOOD, \
72 "jmp copy_user_enhanced_fast_string", \
73 X86_FEATURE_ERMS
Jan Beulich8d379da2006-09-26 10:52:32 +020074 CFI_ENDPROC
Arjan van de Ven9f0cf4a2009-09-26 14:33:01 +020075ENDPROC(_copy_from_user)
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020076
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 .section .fixup,"ax"
78 /* must zero dest */
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020079ENTRY(bad_from_user)
Linus Torvalds1da177e2005-04-16 15:20:36 -070080bad_from_user:
Jan Beulich8d379da2006-09-26 10:52:32 +020081 CFI_STARTPROC
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 movl %edx,%ecx
83 xorl %eax,%eax
84 rep
85 stosb
86bad_to_user:
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020087 movl %edx,%eax
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 ret
Jan Beulich8d379da2006-09-26 10:52:32 +020089 CFI_ENDPROC
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020090ENDPROC(bad_from_user)
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 .previous
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020092
Linus Torvalds1da177e2005-04-16 15:20:36 -070093/*
Andi Kleen3022d732006-09-26 10:52:39 +020094 * copy_user_generic_unrolled - memory copy with exception handling.
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020095 * This version is for CPUs like P4 that don't have efficient micro
96 * code for rep movsq
97 *
98 * Input:
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 * rdi destination
100 * rsi source
101 * rdx count
102 *
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200103 * Output:
Lucas De Marchi0d2eb442011-03-17 16:24:16 -0300104 * eax uncopied bytes or 0 if successful.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 */
Andi Kleen3022d732006-09-26 10:52:39 +0200106ENTRY(copy_user_generic_unrolled)
Jan Beulich8d379da2006-09-26 10:52:32 +0200107 CFI_STARTPROC
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700108 ASM_STAC
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200109 cmpl $8,%edx
110 jb 20f /* less then 8 bytes, go to byte copy loop */
111 ALIGN_DESTINATION
112 movl %edx,%ecx
113 andl $63,%edx
114 shrl $6,%ecx
115 jz 17f
1161: movq (%rsi),%r8
1172: movq 1*8(%rsi),%r9
1183: movq 2*8(%rsi),%r10
1194: movq 3*8(%rsi),%r11
1205: movq %r8,(%rdi)
1216: movq %r9,1*8(%rdi)
1227: movq %r10,2*8(%rdi)
1238: movq %r11,3*8(%rdi)
1249: movq 4*8(%rsi),%r8
12510: movq 5*8(%rsi),%r9
12611: movq 6*8(%rsi),%r10
12712: movq 7*8(%rsi),%r11
12813: movq %r8,4*8(%rdi)
12914: movq %r9,5*8(%rdi)
13015: movq %r10,6*8(%rdi)
13116: movq %r11,7*8(%rdi)
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100132 leaq 64(%rsi),%rsi
133 leaq 64(%rdi),%rdi
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200134 decl %ecx
135 jnz 1b
13617: movl %edx,%ecx
137 andl $7,%edx
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100138 shrl $3,%ecx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200139 jz 20f
14018: movq (%rsi),%r8
14119: movq %r8,(%rdi)
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100142 leaq 8(%rsi),%rsi
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200143 leaq 8(%rdi),%rdi
144 decl %ecx
145 jnz 18b
14620: andl %edx,%edx
147 jz 23f
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100148 movl %edx,%ecx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020014921: movb (%rsi),%al
15022: movb %al,(%rdi)
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100151 incq %rsi
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200152 incq %rdi
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100153 decl %ecx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200154 jnz 21b
15523: xor %eax,%eax
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700156 ASM_CLAC
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100157 ret
158
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200159 .section .fixup,"ax"
16030: shll $6,%ecx
161 addl %ecx,%edx
162 jmp 60f
H. Peter Anvin661c8012013-11-20 12:50:51 -080016340: leal (%rdx,%rcx,8),%edx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200164 jmp 60f
16550: movl %ecx,%edx
16660: jmp copy_user_handle_tail /* ecx is zerorest also */
167 .previous
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100168
H. Peter Anvin9732da82012-04-20 12:19:51 -0700169 _ASM_EXTABLE(1b,30b)
170 _ASM_EXTABLE(2b,30b)
171 _ASM_EXTABLE(3b,30b)
172 _ASM_EXTABLE(4b,30b)
173 _ASM_EXTABLE(5b,30b)
174 _ASM_EXTABLE(6b,30b)
175 _ASM_EXTABLE(7b,30b)
176 _ASM_EXTABLE(8b,30b)
177 _ASM_EXTABLE(9b,30b)
178 _ASM_EXTABLE(10b,30b)
179 _ASM_EXTABLE(11b,30b)
180 _ASM_EXTABLE(12b,30b)
181 _ASM_EXTABLE(13b,30b)
182 _ASM_EXTABLE(14b,30b)
183 _ASM_EXTABLE(15b,30b)
184 _ASM_EXTABLE(16b,30b)
185 _ASM_EXTABLE(18b,40b)
186 _ASM_EXTABLE(19b,40b)
187 _ASM_EXTABLE(21b,50b)
188 _ASM_EXTABLE(22b,50b)
Jan Beulich8d379da2006-09-26 10:52:32 +0200189 CFI_ENDPROC
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200190ENDPROC(copy_user_generic_unrolled)
Jan Beulich8d379da2006-09-26 10:52:32 +0200191
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200192/* Some CPUs run faster using the string copy instructions.
193 * This is also a lot simpler. Use them when possible.
194 *
195 * Only 4GB of copy is supported. This shouldn't be a problem
196 * because the kernel normally only writes from/to page sized chunks
197 * even if user space passed a longer buffer.
198 * And more would be dangerous because both Intel and AMD have
199 * errata with rep movsq > 4GB. If someone feels the need to fix
200 * this please consider this.
201 *
202 * Input:
203 * rdi destination
204 * rsi source
205 * rdx count
206 *
207 * Output:
208 * eax uncopied bytes or 0 if successful.
209 */
Andi Kleen3022d732006-09-26 10:52:39 +0200210ENTRY(copy_user_generic_string)
Jan Beulich8d379da2006-09-26 10:52:32 +0200211 CFI_STARTPROC
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700212 ASM_STAC
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200213 cmpl $8,%edx
214 jb 2f /* less than 8 bytes, go to byte copy loop */
215 ALIGN_DESTINATION
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 movl %edx,%ecx
217 shrl $3,%ecx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200218 andl $7,%edx
2191: rep
Andi Kleen3022d732006-09-26 10:52:39 +0200220 movsq
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +02002212: movl %edx,%ecx
2223: rep
223 movsb
Fenghua Yuf4cb1cc2013-11-16 12:37:01 -0800224 xorl %eax,%eax
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700225 ASM_CLAC
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100226 ret
Andi Kleen3022d732006-09-26 10:52:39 +0200227
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200228 .section .fixup,"ax"
H. Peter Anvin661c8012013-11-20 12:50:51 -080022911: leal (%rdx,%rcx,8),%ecx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020023012: movl %ecx,%edx /* ecx is zerorest also */
231 jmp copy_user_handle_tail
232 .previous
Andi Kleen2cbc9ee2006-01-11 22:44:45 +0100233
H. Peter Anvin9732da82012-04-20 12:19:51 -0700234 _ASM_EXTABLE(1b,11b)
235 _ASM_EXTABLE(3b,12b)
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200236 CFI_ENDPROC
237ENDPROC(copy_user_generic_string)
Fenghua Yu4307bec2011-05-17 15:29:15 -0700238
239/*
240 * Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
241 * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled.
242 *
243 * Input:
244 * rdi destination
245 * rsi source
246 * rdx count
247 *
248 * Output:
249 * eax uncopied bytes or 0 if successful.
250 */
251ENTRY(copy_user_enhanced_fast_string)
252 CFI_STARTPROC
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700253 ASM_STAC
Fenghua Yu4307bec2011-05-17 15:29:15 -0700254 movl %edx,%ecx
2551: rep
256 movsb
Fenghua Yuf4cb1cc2013-11-16 12:37:01 -0800257 xorl %eax,%eax
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700258 ASM_CLAC
Fenghua Yu4307bec2011-05-17 15:29:15 -0700259 ret
260
261 .section .fixup,"ax"
26212: movl %ecx,%edx /* ecx is zerorest also */
263 jmp copy_user_handle_tail
264 .previous
265
H. Peter Anvin9732da82012-04-20 12:19:51 -0700266 _ASM_EXTABLE(1b,12b)
Fenghua Yu4307bec2011-05-17 15:29:15 -0700267 CFI_ENDPROC
268ENDPROC(copy_user_enhanced_fast_string)