blob: d376e4b48f881b89170802ab7b6aa072c458ed8f [file] [log] [blame]
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +02001/*
2 * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com>
3 * Copyright 2002 Andi Kleen, SuSE Labs.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Subject to the GNU Public License v2.
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +02005 *
6 * Functions to copy from and to user space.
7 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008
Jan Beulich8d379da2006-09-26 10:52:32 +02009#include <linux/linkage.h>
Andi Kleen3022d732006-09-26 10:52:39 +020010#include <asm/current.h>
11#include <asm/asm-offsets.h>
12#include <asm/thread_info.h>
Borislav Petkovcd4d09e2016-01-26 22:12:04 +010013#include <asm/cpufeatures.h>
Fenghua Yu4307bec2011-05-17 15:29:15 -070014#include <asm/alternative-asm.h>
H. Peter Anvin9732da82012-04-20 12:19:51 -070015#include <asm/asm.h>
H. Peter Anvin63bcff22012-09-21 12:43:12 -070016#include <asm/smap.h>
Al Viro784d5692016-01-11 11:04:34 -050017#include <asm/export.h>
Andi Kleen3022d732006-09-26 10:52:39 +020018
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020019/* Standard copy_to_user with segment limit checking */
Frederic Weisbecker3c93ca02009-11-16 15:42:18 +010020ENTRY(_copy_to_user)
Andy Lutomirski13d4ea02016-07-14 13:22:57 -070021 mov PER_CPU_VAR(current_task), %rax
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 movq %rdi,%rcx
23 addq %rdx,%rcx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020024 jc bad_to_user
Andy Lutomirski13d4ea02016-07-14 13:22:57 -070025 cmpq TASK_addr_limit(%rax),%rcx
Jiri Olsa26afb7c2011-05-12 16:30:30 +020026 ja bad_to_user
Borislav Petkovde2ff882015-01-13 01:38:17 +010027 ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
28 "jmp copy_user_generic_string", \
29 X86_FEATURE_REP_GOOD, \
30 "jmp copy_user_enhanced_fast_string", \
31 X86_FEATURE_ERMS
Frederic Weisbecker3c93ca02009-11-16 15:42:18 +010032ENDPROC(_copy_to_user)
Al Viro784d5692016-01-11 11:04:34 -050033EXPORT_SYMBOL(_copy_to_user)
Andi Kleen7bcd3f32006-02-03 21:51:02 +010034
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020035/* Standard copy_from_user with segment limit checking */
Arjan van de Ven9f0cf4a2009-09-26 14:33:01 +020036ENTRY(_copy_from_user)
Andy Lutomirski13d4ea02016-07-14 13:22:57 -070037 mov PER_CPU_VAR(current_task), %rax
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 movq %rsi,%rcx
39 addq %rdx,%rcx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020040 jc bad_from_user
Andy Lutomirski13d4ea02016-07-14 13:22:57 -070041 cmpq TASK_addr_limit(%rax),%rcx
Jiri Olsa26afb7c2011-05-12 16:30:30 +020042 ja bad_from_user
Borislav Petkovde2ff882015-01-13 01:38:17 +010043 ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
44 "jmp copy_user_generic_string", \
45 X86_FEATURE_REP_GOOD, \
46 "jmp copy_user_enhanced_fast_string", \
47 X86_FEATURE_ERMS
Arjan van de Ven9f0cf4a2009-09-26 14:33:01 +020048ENDPROC(_copy_from_user)
Al Viro784d5692016-01-11 11:04:34 -050049EXPORT_SYMBOL(_copy_from_user)
50
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020051
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 .section .fixup,"ax"
53 /* must zero dest */
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020054ENTRY(bad_from_user)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055bad_from_user:
56 movl %edx,%ecx
57 xorl %eax,%eax
58 rep
59 stosb
60bad_to_user:
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020061 movl %edx,%eax
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 ret
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020063ENDPROC(bad_from_user)
Linus Torvalds1da177e2005-04-16 15:20:36 -070064 .previous
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020065
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/*
Andi Kleen3022d732006-09-26 10:52:39 +020067 * copy_user_generic_unrolled - memory copy with exception handling.
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020068 * This version is for CPUs like P4 that don't have efficient micro
69 * code for rep movsq
70 *
71 * Input:
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 * rdi destination
73 * rsi source
74 * rdx count
75 *
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020076 * Output:
Lucas De Marchi0d2eb442011-03-17 16:24:16 -030077 * eax uncopied bytes or 0 if successful.
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 */
Andi Kleen3022d732006-09-26 10:52:39 +020079ENTRY(copy_user_generic_unrolled)
H. Peter Anvin63bcff22012-09-21 12:43:12 -070080 ASM_STAC
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020081 cmpl $8,%edx
82 jb 20f /* less then 8 bytes, go to byte copy loop */
83 ALIGN_DESTINATION
84 movl %edx,%ecx
85 andl $63,%edx
86 shrl $6,%ecx
87 jz 17f
881: movq (%rsi),%r8
892: movq 1*8(%rsi),%r9
903: movq 2*8(%rsi),%r10
914: movq 3*8(%rsi),%r11
925: movq %r8,(%rdi)
936: movq %r9,1*8(%rdi)
947: movq %r10,2*8(%rdi)
958: movq %r11,3*8(%rdi)
969: movq 4*8(%rsi),%r8
9710: movq 5*8(%rsi),%r9
9811: movq 6*8(%rsi),%r10
9912: movq 7*8(%rsi),%r11
10013: movq %r8,4*8(%rdi)
10114: movq %r9,5*8(%rdi)
10215: movq %r10,6*8(%rdi)
10316: movq %r11,7*8(%rdi)
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100104 leaq 64(%rsi),%rsi
105 leaq 64(%rdi),%rdi
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200106 decl %ecx
107 jnz 1b
10817: movl %edx,%ecx
109 andl $7,%edx
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100110 shrl $3,%ecx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200111 jz 20f
11218: movq (%rsi),%r8
11319: movq %r8,(%rdi)
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100114 leaq 8(%rsi),%rsi
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200115 leaq 8(%rdi),%rdi
116 decl %ecx
117 jnz 18b
11820: andl %edx,%edx
119 jz 23f
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100120 movl %edx,%ecx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020012121: movb (%rsi),%al
12222: movb %al,(%rdi)
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100123 incq %rsi
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200124 incq %rdi
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100125 decl %ecx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200126 jnz 21b
12723: xor %eax,%eax
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700128 ASM_CLAC
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100129 ret
130
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200131 .section .fixup,"ax"
13230: shll $6,%ecx
133 addl %ecx,%edx
134 jmp 60f
H. Peter Anvin661c8012013-11-20 12:50:51 -080013540: leal (%rdx,%rcx,8),%edx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200136 jmp 60f
13750: movl %ecx,%edx
13860: jmp copy_user_handle_tail /* ecx is zerorest also */
139 .previous
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100140
H. Peter Anvin9732da82012-04-20 12:19:51 -0700141 _ASM_EXTABLE(1b,30b)
142 _ASM_EXTABLE(2b,30b)
143 _ASM_EXTABLE(3b,30b)
144 _ASM_EXTABLE(4b,30b)
145 _ASM_EXTABLE(5b,30b)
146 _ASM_EXTABLE(6b,30b)
147 _ASM_EXTABLE(7b,30b)
148 _ASM_EXTABLE(8b,30b)
149 _ASM_EXTABLE(9b,30b)
150 _ASM_EXTABLE(10b,30b)
151 _ASM_EXTABLE(11b,30b)
152 _ASM_EXTABLE(12b,30b)
153 _ASM_EXTABLE(13b,30b)
154 _ASM_EXTABLE(14b,30b)
155 _ASM_EXTABLE(15b,30b)
156 _ASM_EXTABLE(16b,30b)
157 _ASM_EXTABLE(18b,40b)
158 _ASM_EXTABLE(19b,40b)
159 _ASM_EXTABLE(21b,50b)
160 _ASM_EXTABLE(22b,50b)
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200161ENDPROC(copy_user_generic_unrolled)
Al Viro784d5692016-01-11 11:04:34 -0500162EXPORT_SYMBOL(copy_user_generic_unrolled)
Jan Beulich8d379da2006-09-26 10:52:32 +0200163
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200164/* Some CPUs run faster using the string copy instructions.
165 * This is also a lot simpler. Use them when possible.
166 *
167 * Only 4GB of copy is supported. This shouldn't be a problem
168 * because the kernel normally only writes from/to page sized chunks
169 * even if user space passed a longer buffer.
170 * And more would be dangerous because both Intel and AMD have
171 * errata with rep movsq > 4GB. If someone feels the need to fix
172 * this please consider this.
173 *
174 * Input:
175 * rdi destination
176 * rsi source
177 * rdx count
178 *
179 * Output:
180 * eax uncopied bytes or 0 if successful.
181 */
Andi Kleen3022d732006-09-26 10:52:39 +0200182ENTRY(copy_user_generic_string)
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700183 ASM_STAC
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200184 cmpl $8,%edx
185 jb 2f /* less than 8 bytes, go to byte copy loop */
186 ALIGN_DESTINATION
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 movl %edx,%ecx
188 shrl $3,%ecx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200189 andl $7,%edx
1901: rep
Andi Kleen3022d732006-09-26 10:52:39 +0200191 movsq
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +02001922: movl %edx,%ecx
1933: rep
194 movsb
Fenghua Yuf4cb1cc2013-11-16 12:37:01 -0800195 xorl %eax,%eax
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700196 ASM_CLAC
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100197 ret
Andi Kleen3022d732006-09-26 10:52:39 +0200198
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200199 .section .fixup,"ax"
H. Peter Anvin661c8012013-11-20 12:50:51 -080020011: leal (%rdx,%rcx,8),%ecx
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +020020112: movl %ecx,%edx /* ecx is zerorest also */
202 jmp copy_user_handle_tail
203 .previous
Andi Kleen2cbc9ee2006-01-11 22:44:45 +0100204
H. Peter Anvin9732da82012-04-20 12:19:51 -0700205 _ASM_EXTABLE(1b,11b)
206 _ASM_EXTABLE(3b,12b)
Vitaly Mayatskikhad2fc2c2008-07-02 15:53:13 +0200207ENDPROC(copy_user_generic_string)
Al Viro784d5692016-01-11 11:04:34 -0500208EXPORT_SYMBOL(copy_user_generic_string)
Fenghua Yu4307bec2011-05-17 15:29:15 -0700209
210/*
211 * Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
212 * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled.
213 *
214 * Input:
215 * rdi destination
216 * rsi source
217 * rdx count
218 *
219 * Output:
220 * eax uncopied bytes or 0 if successful.
221 */
222ENTRY(copy_user_enhanced_fast_string)
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700223 ASM_STAC
Fenghua Yu4307bec2011-05-17 15:29:15 -0700224 movl %edx,%ecx
2251: rep
226 movsb
Fenghua Yuf4cb1cc2013-11-16 12:37:01 -0800227 xorl %eax,%eax
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700228 ASM_CLAC
Fenghua Yu4307bec2011-05-17 15:29:15 -0700229 ret
230
231 .section .fixup,"ax"
23212: movl %ecx,%edx /* ecx is zerorest also */
233 jmp copy_user_handle_tail
234 .previous
235
H. Peter Anvin9732da82012-04-20 12:19:51 -0700236 _ASM_EXTABLE(1b,12b)
Fenghua Yu4307bec2011-05-17 15:29:15 -0700237ENDPROC(copy_user_enhanced_fast_string)
Al Viro784d5692016-01-11 11:04:34 -0500238EXPORT_SYMBOL(copy_user_enhanced_fast_string)
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200239
240/*
241 * copy_user_nocache - Uncached memory copy with exception handling
Toshi Kaniee9737c2016-02-11 14:24:16 -0700242 * This will force destination out of cache for more performance.
243 *
244 * Note: Cached memory copy is used when destination or size is not
245 * naturally aligned. That is:
246 * - Require 8-byte alignment when size is 8 bytes or larger.
Toshi Kania82eee72016-02-11 14:24:17 -0700247 * - Require 4-byte alignment when size is 4 bytes.
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200248 */
249ENTRY(__copy_user_nocache)
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200250 ASM_STAC
Toshi Kaniee9737c2016-02-11 14:24:16 -0700251
Toshi Kania82eee72016-02-11 14:24:17 -0700252 /* If size is less than 8 bytes, go to 4-byte copy */
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200253 cmpl $8,%edx
Toshi Kania82eee72016-02-11 14:24:17 -0700254 jb .L_4b_nocache_copy_entry
Toshi Kaniee9737c2016-02-11 14:24:16 -0700255
256 /* If destination is not 8-byte aligned, "cache" copy to align it */
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200257 ALIGN_DESTINATION
Toshi Kaniee9737c2016-02-11 14:24:16 -0700258
259 /* Set 4x8-byte copy count and remainder */
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200260 movl %edx,%ecx
261 andl $63,%edx
262 shrl $6,%ecx
Toshi Kaniee9737c2016-02-11 14:24:16 -0700263 jz .L_8b_nocache_copy_entry /* jump if count is 0 */
264
265 /* Perform 4x8-byte nocache loop-copy */
266.L_4x8b_nocache_copy_loop:
Borislav Petkovb41e6ec2015-05-13 19:42:24 +02002671: movq (%rsi),%r8
2682: movq 1*8(%rsi),%r9
2693: movq 2*8(%rsi),%r10
2704: movq 3*8(%rsi),%r11
2715: movnti %r8,(%rdi)
2726: movnti %r9,1*8(%rdi)
2737: movnti %r10,2*8(%rdi)
2748: movnti %r11,3*8(%rdi)
2759: movq 4*8(%rsi),%r8
27610: movq 5*8(%rsi),%r9
27711: movq 6*8(%rsi),%r10
27812: movq 7*8(%rsi),%r11
27913: movnti %r8,4*8(%rdi)
28014: movnti %r9,5*8(%rdi)
28115: movnti %r10,6*8(%rdi)
28216: movnti %r11,7*8(%rdi)
283 leaq 64(%rsi),%rsi
284 leaq 64(%rdi),%rdi
285 decl %ecx
Toshi Kaniee9737c2016-02-11 14:24:16 -0700286 jnz .L_4x8b_nocache_copy_loop
287
288 /* Set 8-byte copy count and remainder */
289.L_8b_nocache_copy_entry:
290 movl %edx,%ecx
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200291 andl $7,%edx
292 shrl $3,%ecx
Toshi Kania82eee72016-02-11 14:24:17 -0700293 jz .L_4b_nocache_copy_entry /* jump if count is 0 */
Toshi Kaniee9737c2016-02-11 14:24:16 -0700294
295 /* Perform 8-byte nocache loop-copy */
296.L_8b_nocache_copy_loop:
29720: movq (%rsi),%r8
29821: movnti %r8,(%rdi)
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200299 leaq 8(%rsi),%rsi
300 leaq 8(%rdi),%rdi
301 decl %ecx
Toshi Kaniee9737c2016-02-11 14:24:16 -0700302 jnz .L_8b_nocache_copy_loop
303
304 /* If no byte left, we're done */
Toshi Kania82eee72016-02-11 14:24:17 -0700305.L_4b_nocache_copy_entry:
306 andl %edx,%edx
307 jz .L_finish_copy
308
309 /* If destination is not 4-byte aligned, go to byte copy: */
310 movl %edi,%ecx
311 andl $3,%ecx
312 jnz .L_1b_cache_copy_entry
313
314 /* Set 4-byte copy count (1 or 0) and remainder */
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200315 movl %edx,%ecx
Toshi Kania82eee72016-02-11 14:24:17 -0700316 andl $3,%edx
317 shrl $2,%ecx
318 jz .L_1b_cache_copy_entry /* jump if count is 0 */
319
320 /* Perform 4-byte nocache copy: */
32130: movl (%rsi),%r8d
32231: movnti %r8d,(%rdi)
323 leaq 4(%rsi),%rsi
324 leaq 4(%rdi),%rdi
325
326 /* If no bytes left, we're done: */
Toshi Kaniee9737c2016-02-11 14:24:16 -0700327 andl %edx,%edx
328 jz .L_finish_copy
329
330 /* Perform byte "cache" loop-copy for the remainder */
Toshi Kania82eee72016-02-11 14:24:17 -0700331.L_1b_cache_copy_entry:
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200332 movl %edx,%ecx
Toshi Kaniee9737c2016-02-11 14:24:16 -0700333.L_1b_cache_copy_loop:
33440: movb (%rsi),%al
33541: movb %al,(%rdi)
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200336 incq %rsi
337 incq %rdi
338 decl %ecx
Toshi Kaniee9737c2016-02-11 14:24:16 -0700339 jnz .L_1b_cache_copy_loop
340
341 /* Finished copying; fence the prior stores */
342.L_finish_copy:
343 xorl %eax,%eax
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200344 ASM_CLAC
345 sfence
346 ret
347
348 .section .fixup,"ax"
Toshi Kaniee9737c2016-02-11 14:24:16 -0700349.L_fixup_4x8b_copy:
350 shll $6,%ecx
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200351 addl %ecx,%edx
Toshi Kaniee9737c2016-02-11 14:24:16 -0700352 jmp .L_fixup_handle_tail
353.L_fixup_8b_copy:
354 lea (%rdx,%rcx,8),%rdx
355 jmp .L_fixup_handle_tail
Toshi Kania82eee72016-02-11 14:24:17 -0700356.L_fixup_4b_copy:
357 lea (%rdx,%rcx,4),%rdx
358 jmp .L_fixup_handle_tail
Toshi Kaniee9737c2016-02-11 14:24:16 -0700359.L_fixup_1b_copy:
360 movl %ecx,%edx
361.L_fixup_handle_tail:
362 sfence
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200363 jmp copy_user_handle_tail
364 .previous
365
Toshi Kaniee9737c2016-02-11 14:24:16 -0700366 _ASM_EXTABLE(1b,.L_fixup_4x8b_copy)
367 _ASM_EXTABLE(2b,.L_fixup_4x8b_copy)
368 _ASM_EXTABLE(3b,.L_fixup_4x8b_copy)
369 _ASM_EXTABLE(4b,.L_fixup_4x8b_copy)
370 _ASM_EXTABLE(5b,.L_fixup_4x8b_copy)
371 _ASM_EXTABLE(6b,.L_fixup_4x8b_copy)
372 _ASM_EXTABLE(7b,.L_fixup_4x8b_copy)
373 _ASM_EXTABLE(8b,.L_fixup_4x8b_copy)
374 _ASM_EXTABLE(9b,.L_fixup_4x8b_copy)
375 _ASM_EXTABLE(10b,.L_fixup_4x8b_copy)
376 _ASM_EXTABLE(11b,.L_fixup_4x8b_copy)
377 _ASM_EXTABLE(12b,.L_fixup_4x8b_copy)
378 _ASM_EXTABLE(13b,.L_fixup_4x8b_copy)
379 _ASM_EXTABLE(14b,.L_fixup_4x8b_copy)
380 _ASM_EXTABLE(15b,.L_fixup_4x8b_copy)
381 _ASM_EXTABLE(16b,.L_fixup_4x8b_copy)
382 _ASM_EXTABLE(20b,.L_fixup_8b_copy)
383 _ASM_EXTABLE(21b,.L_fixup_8b_copy)
Toshi Kania82eee72016-02-11 14:24:17 -0700384 _ASM_EXTABLE(30b,.L_fixup_4b_copy)
385 _ASM_EXTABLE(31b,.L_fixup_4b_copy)
Toshi Kaniee9737c2016-02-11 14:24:16 -0700386 _ASM_EXTABLE(40b,.L_fixup_1b_copy)
387 _ASM_EXTABLE(41b,.L_fixup_1b_copy)
Borislav Petkovb41e6ec2015-05-13 19:42:24 +0200388ENDPROC(__copy_user_nocache)
Al Viro784d5692016-01-11 11:04:34 -0500389EXPORT_SYMBOL(__copy_user_nocache)