blob: 1c273be7c97eded64736d60ba10c22ed09f3ceef [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* Copyright 2002 Andi Kleen */
Dave Jones038b0a62006-10-04 03:38:54 -04002
Jan Beulich8d379da2006-09-26 10:52:32 +02003#include <linux/linkage.h>
Ingo Molnarf3b6eaf2009-03-12 12:20:17 +01004
Jan Beulich8d379da2006-09-26 10:52:32 +02005#include <asm/cpufeature.h>
Ingo Molnarf3b6eaf2009-03-12 12:20:17 +01006#include <asm/dwarf2.h>
Fenghua Yu101068c2011-05-17 15:29:16 -07007#include <asm/alternative-asm.h>
Jan Beulich8d379da2006-09-26 10:52:32 +02008
Linus Torvalds1da177e2005-04-16 15:20:36 -07009/*
10 * memcpy - Copy a memory block.
11 *
Ingo Molnarf3b6eaf2009-03-12 12:20:17 +010012 * Input:
13 * rdi destination
14 * rsi source
15 * rdx count
16 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 * Output:
18 * rax original destination
Ingo Molnarf3b6eaf2009-03-12 12:20:17 +010019 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
Ingo Molnarf3b6eaf2009-03-12 12:20:17 +010021/*
22 * memcpy_c() - fast string ops (REP MOVSQ) based variant.
23 *
Jan Beulich7269e882009-12-18 16:16:03 +000024 * This gets patched over the unrolled variant (below) via the
Ingo Molnarf3b6eaf2009-03-12 12:20:17 +010025 * alternative instructions framework:
26 */
Jan Beulich7269e882009-12-18 16:16:03 +000027 .section .altinstr_replacement, "ax", @progbits
28.Lmemcpy_c:
Ingo Molnarf3b6eaf2009-03-12 12:20:17 +010029 movq %rdi, %rax
Jan Beulich2ab56092012-01-26 15:50:55 +000030 movq %rdx, %rcx
31 shrq $3, %rcx
Ingo Molnarf3b6eaf2009-03-12 12:20:17 +010032 andl $7, %edx
Jan Beulich8d379da2006-09-26 10:52:32 +020033 rep movsq
Ingo Molnarf3b6eaf2009-03-12 12:20:17 +010034 movl %edx, %ecx
Jan Beulich8d379da2006-09-26 10:52:32 +020035 rep movsb
36 ret
Jan Beulich7269e882009-12-18 16:16:03 +000037.Lmemcpy_e:
38 .previous
Jan Beulich8d379da2006-09-26 10:52:32 +020039
Fenghua Yu101068c2011-05-17 15:29:16 -070040/*
41 * memcpy_c_e() - enhanced fast string memcpy. This is faster and simpler than
42 * memcpy_c. Use memcpy_c_e when possible.
43 *
44 * This gets patched over the unrolled variant (below) via the
45 * alternative instructions framework:
46 */
47 .section .altinstr_replacement, "ax", @progbits
48.Lmemcpy_c_e:
49 movq %rdi, %rax
Jan Beulich2ab56092012-01-26 15:50:55 +000050 movq %rdx, %rcx
Fenghua Yu101068c2011-05-17 15:29:16 -070051 rep movsb
52 ret
53.Lmemcpy_e_e:
54 .previous
55
Jan Beulich8d379da2006-09-26 10:52:32 +020056ENTRY(__memcpy)
57ENTRY(memcpy)
58 CFI_STARTPROC
Ingo Molnarf3b6eaf2009-03-12 12:20:17 +010059 movq %rdi, %rax
Andi Kleen7bcd3f32006-02-03 21:51:02 +010060
Jan Beulich2ab56092012-01-26 15:50:55 +000061 cmpq $0x20, %rdx
Ma Ling59daa702010-06-29 03:24:25 +080062 jb .Lhandle_tail
63
64 /*
Bart Van Assche9de49662011-05-01 14:09:21 +020065 * We check whether memory false dependence could occur,
Ma Ling59daa702010-06-29 03:24:25 +080066 * then jump to corresponding copy mode.
67 */
68 cmp %dil, %sil
69 jl .Lcopy_backward
Jan Beulich2ab56092012-01-26 15:50:55 +000070 subq $0x20, %rdx
Ma Ling59daa702010-06-29 03:24:25 +080071.Lcopy_forward_loop:
72 subq $0x20, %rdx
73
74 /*
75 * Move in blocks of 4x8 bytes:
76 */
77 movq 0*8(%rsi), %r8
78 movq 1*8(%rsi), %r9
79 movq 2*8(%rsi), %r10
80 movq 3*8(%rsi), %r11
81 leaq 4*8(%rsi), %rsi
82
83 movq %r8, 0*8(%rdi)
84 movq %r9, 1*8(%rdi)
85 movq %r10, 2*8(%rdi)
86 movq %r11, 3*8(%rdi)
87 leaq 4*8(%rdi), %rdi
88 jae .Lcopy_forward_loop
Jan Beulich2ab56092012-01-26 15:50:55 +000089 addl $0x20, %edx
Ma Ling59daa702010-06-29 03:24:25 +080090 jmp .Lhandle_tail
91
92.Lcopy_backward:
93 /*
94 * Calculate copy position to tail.
95 */
96 addq %rdx, %rsi
97 addq %rdx, %rdi
98 subq $0x20, %rdx
99 /*
100 * At most 3 ALU operations in one cycle,
101 * so append NOPS in the same 16bytes trunk.
102 */
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100103 .p2align 4
Ma Ling59daa702010-06-29 03:24:25 +0800104.Lcopy_backward_loop:
105 subq $0x20, %rdx
106 movq -1*8(%rsi), %r8
107 movq -2*8(%rsi), %r9
108 movq -3*8(%rsi), %r10
109 movq -4*8(%rsi), %r11
110 leaq -4*8(%rsi), %rsi
111 movq %r8, -1*8(%rdi)
112 movq %r9, -2*8(%rdi)
113 movq %r10, -3*8(%rdi)
114 movq %r11, -4*8(%rdi)
115 leaq -4*8(%rdi), %rdi
116 jae .Lcopy_backward_loop
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100117
Ingo Molnarf3b6eaf2009-03-12 12:20:17 +0100118 /*
Ma Ling59daa702010-06-29 03:24:25 +0800119 * Calculate copy position to head.
Ingo Molnarf3b6eaf2009-03-12 12:20:17 +0100120 */
Jan Beulich2ab56092012-01-26 15:50:55 +0000121 addl $0x20, %edx
Ma Ling59daa702010-06-29 03:24:25 +0800122 subq %rdx, %rsi
123 subq %rdx, %rdi
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100124.Lhandle_tail:
Jan Beulich2ab56092012-01-26 15:50:55 +0000125 cmpl $16, %edx
Ma Ling59daa702010-06-29 03:24:25 +0800126 jb .Lless_16bytes
Ingo Molnarf3b6eaf2009-03-12 12:20:17 +0100127
Ma Ling59daa702010-06-29 03:24:25 +0800128 /*
129 * Move data from 16 bytes to 31 bytes.
130 */
131 movq 0*8(%rsi), %r8
132 movq 1*8(%rsi), %r9
133 movq -2*8(%rsi, %rdx), %r10
134 movq -1*8(%rsi, %rdx), %r11
135 movq %r8, 0*8(%rdi)
136 movq %r9, 1*8(%rdi)
137 movq %r10, -2*8(%rdi, %rdx)
138 movq %r11, -1*8(%rdi, %rdx)
139 retq
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100140 .p2align 4
Ma Ling59daa702010-06-29 03:24:25 +0800141.Lless_16bytes:
Jan Beulich2ab56092012-01-26 15:50:55 +0000142 cmpl $8, %edx
Ma Ling59daa702010-06-29 03:24:25 +0800143 jb .Lless_8bytes
144 /*
145 * Move data from 8 bytes to 15 bytes.
146 */
147 movq 0*8(%rsi), %r8
148 movq -1*8(%rsi, %rdx), %r9
149 movq %r8, 0*8(%rdi)
150 movq %r9, -1*8(%rdi, %rdx)
151 retq
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100152 .p2align 4
Ma Ling59daa702010-06-29 03:24:25 +0800153.Lless_8bytes:
Jan Beulich2ab56092012-01-26 15:50:55 +0000154 cmpl $4, %edx
Ma Ling59daa702010-06-29 03:24:25 +0800155 jb .Lless_3bytes
156
157 /*
158 * Move data from 4 bytes to 7 bytes.
159 */
160 movl (%rsi), %ecx
161 movl -4(%rsi, %rdx), %r8d
162 movl %ecx, (%rdi)
163 movl %r8d, -4(%rdi, %rdx)
164 retq
165 .p2align 4
166.Lless_3bytes:
Jan Beulich9d8e2272012-01-26 15:55:32 +0000167 subl $1, %edx
168 jb .Lend
Ma Ling59daa702010-06-29 03:24:25 +0800169 /*
170 * Move data from 1 bytes to 3 bytes.
171 */
Jan Beulich9d8e2272012-01-26 15:55:32 +0000172 movzbl (%rsi), %ecx
173 jz .Lstore_1byte
174 movzbq 1(%rsi), %r8
175 movzbq (%rsi, %rdx), %r9
176 movb %r8b, 1(%rdi)
177 movb %r9b, (%rdi, %rdx)
178.Lstore_1byte:
179 movb %cl, (%rdi)
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100180
Ingo Molnarf3b6eaf2009-03-12 12:20:17 +0100181.Lend:
Ma Ling59daa702010-06-29 03:24:25 +0800182 retq
Jan Beulich8d379da2006-09-26 10:52:32 +0200183 CFI_ENDPROC
184ENDPROC(memcpy)
185ENDPROC(__memcpy)
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100186
Ingo Molnarf3b6eaf2009-03-12 12:20:17 +0100187 /*
Fenghua Yu101068c2011-05-17 15:29:16 -0700188 * Some CPUs are adding enhanced REP MOVSB/STOSB feature
189 * If the feature is supported, memcpy_c_e() is the first choice.
190 * If enhanced rep movsb copy is not available, use fast string copy
191 * memcpy_c() when possible. This is faster and code is simpler than
192 * original memcpy().
193 * Otherwise, original memcpy() is used.
194 * In .altinstructions section, ERMS feature is placed after REG_GOOD
195 * feature to implement the right patch order.
196 *
Ingo Molnarf3b6eaf2009-03-12 12:20:17 +0100197 * Replace only beginning, memcpy is used to apply alternatives,
198 * so it is silly to overwrite itself with nops - reboot is the
199 * only outcome...
200 */
Fenghua Yu101068c2011-05-17 15:29:16 -0700201 .section .altinstructions, "a"
202 altinstruction_entry memcpy,.Lmemcpy_c,X86_FEATURE_REP_GOOD,\
203 .Lmemcpy_e-.Lmemcpy_c,.Lmemcpy_e-.Lmemcpy_c
204 altinstruction_entry memcpy,.Lmemcpy_c_e,X86_FEATURE_ERMS, \
205 .Lmemcpy_e_e-.Lmemcpy_c_e,.Lmemcpy_e_e-.Lmemcpy_c_e
Andi Kleen7bcd3f32006-02-03 21:51:02 +0100206 .previous