Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* Copyright 2002 Andi Kleen */ |
Dave Jones | 038b0a6 | 2006-10-04 03:38:54 -0400 | [diff] [blame] | 2 | |
Jan Beulich | 8d379da | 2006-09-26 10:52:32 +0200 | [diff] [blame] | 3 | #include <linux/linkage.h> |
Ingo Molnar | f3b6eaf | 2009-03-12 12:20:17 +0100 | [diff] [blame] | 4 | |
Jan Beulich | 8d379da | 2006-09-26 10:52:32 +0200 | [diff] [blame] | 5 | #include <asm/cpufeature.h> |
Ingo Molnar | f3b6eaf | 2009-03-12 12:20:17 +0100 | [diff] [blame] | 6 | #include <asm/dwarf2.h> |
Fenghua Yu | 101068c | 2011-05-17 15:29:16 -0700 | [diff] [blame] | 7 | #include <asm/alternative-asm.h> |
Jan Beulich | 8d379da | 2006-09-26 10:52:32 +0200 | [diff] [blame] | 8 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | /* |
| 10 | * memcpy - Copy a memory block. |
| 11 | * |
Ingo Molnar | f3b6eaf | 2009-03-12 12:20:17 +0100 | [diff] [blame] | 12 | * Input: |
| 13 | * rdi destination |
| 14 | * rsi source |
| 15 | * rdx count |
| 16 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | * Output: |
| 18 | * rax original destination |
Ingo Molnar | f3b6eaf | 2009-03-12 12:20:17 +0100 | [diff] [blame] | 19 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | |
Ingo Molnar | f3b6eaf | 2009-03-12 12:20:17 +0100 | [diff] [blame] | 21 | /* |
| 22 | * memcpy_c() - fast string ops (REP MOVSQ) based variant. |
| 23 | * |
Jan Beulich | 7269e88 | 2009-12-18 16:16:03 +0000 | [diff] [blame] | 24 | * This gets patched over the unrolled variant (below) via the |
Ingo Molnar | f3b6eaf | 2009-03-12 12:20:17 +0100 | [diff] [blame] | 25 | * alternative instructions framework: |
| 26 | */ |
Jan Beulich | 7269e88 | 2009-12-18 16:16:03 +0000 | [diff] [blame] | 27 | .section .altinstr_replacement, "ax", @progbits |
| 28 | .Lmemcpy_c: |
Ingo Molnar | f3b6eaf | 2009-03-12 12:20:17 +0100 | [diff] [blame] | 29 | movq %rdi, %rax |
Jan Beulich | 2ab5609 | 2012-01-26 15:50:55 +0000 | [diff] [blame] | 30 | movq %rdx, %rcx |
| 31 | shrq $3, %rcx |
Ingo Molnar | f3b6eaf | 2009-03-12 12:20:17 +0100 | [diff] [blame] | 32 | andl $7, %edx |
Jan Beulich | 8d379da | 2006-09-26 10:52:32 +0200 | [diff] [blame] | 33 | rep movsq |
Ingo Molnar | f3b6eaf | 2009-03-12 12:20:17 +0100 | [diff] [blame] | 34 | movl %edx, %ecx |
Jan Beulich | 8d379da | 2006-09-26 10:52:32 +0200 | [diff] [blame] | 35 | rep movsb |
| 36 | ret |
Jan Beulich | 7269e88 | 2009-12-18 16:16:03 +0000 | [diff] [blame] | 37 | .Lmemcpy_e: |
| 38 | .previous |
Jan Beulich | 8d379da | 2006-09-26 10:52:32 +0200 | [diff] [blame] | 39 | |
Fenghua Yu | 101068c | 2011-05-17 15:29:16 -0700 | [diff] [blame] | 40 | /* |
| 41 | * memcpy_c_e() - enhanced fast string memcpy. This is faster and simpler than |
| 42 | * memcpy_c. Use memcpy_c_e when possible. |
| 43 | * |
| 44 | * This gets patched over the unrolled variant (below) via the |
| 45 | * alternative instructions framework: |
| 46 | */ |
| 47 | .section .altinstr_replacement, "ax", @progbits |
| 48 | .Lmemcpy_c_e: |
| 49 | movq %rdi, %rax |
Jan Beulich | 2ab5609 | 2012-01-26 15:50:55 +0000 | [diff] [blame] | 50 | movq %rdx, %rcx |
Fenghua Yu | 101068c | 2011-05-17 15:29:16 -0700 | [diff] [blame] | 51 | rep movsb |
| 52 | ret |
| 53 | .Lmemcpy_e_e: |
| 54 | .previous |
| 55 | |
Andrey Ryabinin | 393f203 | 2015-02-13 14:39:56 -0800 | [diff] [blame] | 56 | .weak memcpy |
| 57 | |
Jan Beulich | 8d379da | 2006-09-26 10:52:32 +0200 | [diff] [blame] | 58 | ENTRY(__memcpy) |
| 59 | ENTRY(memcpy) |
| 60 | CFI_STARTPROC |
Ingo Molnar | f3b6eaf | 2009-03-12 12:20:17 +0100 | [diff] [blame] | 61 | movq %rdi, %rax |
Andi Kleen | 7bcd3f3 | 2006-02-03 21:51:02 +0100 | [diff] [blame] | 62 | |
Jan Beulich | 2ab5609 | 2012-01-26 15:50:55 +0000 | [diff] [blame] | 63 | cmpq $0x20, %rdx |
Ma Ling | 59daa70 | 2010-06-29 03:24:25 +0800 | [diff] [blame] | 64 | jb .Lhandle_tail |
| 65 | |
| 66 | /* |
Bart Van Assche | 9de4966 | 2011-05-01 14:09:21 +0200 | [diff] [blame] | 67 | * We check whether memory false dependence could occur, |
Ma Ling | 59daa70 | 2010-06-29 03:24:25 +0800 | [diff] [blame] | 68 | * then jump to corresponding copy mode. |
| 69 | */ |
| 70 | cmp %dil, %sil |
| 71 | jl .Lcopy_backward |
Jan Beulich | 2ab5609 | 2012-01-26 15:50:55 +0000 | [diff] [blame] | 72 | subq $0x20, %rdx |
Ma Ling | 59daa70 | 2010-06-29 03:24:25 +0800 | [diff] [blame] | 73 | .Lcopy_forward_loop: |
| 74 | subq $0x20, %rdx |
| 75 | |
| 76 | /* |
| 77 | * Move in blocks of 4x8 bytes: |
| 78 | */ |
| 79 | movq 0*8(%rsi), %r8 |
| 80 | movq 1*8(%rsi), %r9 |
| 81 | movq 2*8(%rsi), %r10 |
| 82 | movq 3*8(%rsi), %r11 |
| 83 | leaq 4*8(%rsi), %rsi |
| 84 | |
| 85 | movq %r8, 0*8(%rdi) |
| 86 | movq %r9, 1*8(%rdi) |
| 87 | movq %r10, 2*8(%rdi) |
| 88 | movq %r11, 3*8(%rdi) |
| 89 | leaq 4*8(%rdi), %rdi |
| 90 | jae .Lcopy_forward_loop |
Jan Beulich | 2ab5609 | 2012-01-26 15:50:55 +0000 | [diff] [blame] | 91 | addl $0x20, %edx |
Ma Ling | 59daa70 | 2010-06-29 03:24:25 +0800 | [diff] [blame] | 92 | jmp .Lhandle_tail |
| 93 | |
| 94 | .Lcopy_backward: |
| 95 | /* |
| 96 | * Calculate copy position to tail. |
| 97 | */ |
| 98 | addq %rdx, %rsi |
| 99 | addq %rdx, %rdi |
| 100 | subq $0x20, %rdx |
| 101 | /* |
| 102 | * At most 3 ALU operations in one cycle, |
Andy Shevchenko | d50ba36 | 2013-04-15 12:06:10 +0300 | [diff] [blame] | 103 | * so append NOPS in the same 16 bytes trunk. |
Ma Ling | 59daa70 | 2010-06-29 03:24:25 +0800 | [diff] [blame] | 104 | */ |
Andi Kleen | 7bcd3f3 | 2006-02-03 21:51:02 +0100 | [diff] [blame] | 105 | .p2align 4 |
Ma Ling | 59daa70 | 2010-06-29 03:24:25 +0800 | [diff] [blame] | 106 | .Lcopy_backward_loop: |
| 107 | subq $0x20, %rdx |
| 108 | movq -1*8(%rsi), %r8 |
| 109 | movq -2*8(%rsi), %r9 |
| 110 | movq -3*8(%rsi), %r10 |
| 111 | movq -4*8(%rsi), %r11 |
| 112 | leaq -4*8(%rsi), %rsi |
| 113 | movq %r8, -1*8(%rdi) |
| 114 | movq %r9, -2*8(%rdi) |
| 115 | movq %r10, -3*8(%rdi) |
| 116 | movq %r11, -4*8(%rdi) |
| 117 | leaq -4*8(%rdi), %rdi |
| 118 | jae .Lcopy_backward_loop |
Andi Kleen | 7bcd3f3 | 2006-02-03 21:51:02 +0100 | [diff] [blame] | 119 | |
Ingo Molnar | f3b6eaf | 2009-03-12 12:20:17 +0100 | [diff] [blame] | 120 | /* |
Ma Ling | 59daa70 | 2010-06-29 03:24:25 +0800 | [diff] [blame] | 121 | * Calculate copy position to head. |
Ingo Molnar | f3b6eaf | 2009-03-12 12:20:17 +0100 | [diff] [blame] | 122 | */ |
Jan Beulich | 2ab5609 | 2012-01-26 15:50:55 +0000 | [diff] [blame] | 123 | addl $0x20, %edx |
Ma Ling | 59daa70 | 2010-06-29 03:24:25 +0800 | [diff] [blame] | 124 | subq %rdx, %rsi |
| 125 | subq %rdx, %rdi |
Andi Kleen | 7bcd3f3 | 2006-02-03 21:51:02 +0100 | [diff] [blame] | 126 | .Lhandle_tail: |
Jan Beulich | 2ab5609 | 2012-01-26 15:50:55 +0000 | [diff] [blame] | 127 | cmpl $16, %edx |
Ma Ling | 59daa70 | 2010-06-29 03:24:25 +0800 | [diff] [blame] | 128 | jb .Lless_16bytes |
Ingo Molnar | f3b6eaf | 2009-03-12 12:20:17 +0100 | [diff] [blame] | 129 | |
Ma Ling | 59daa70 | 2010-06-29 03:24:25 +0800 | [diff] [blame] | 130 | /* |
| 131 | * Move data from 16 bytes to 31 bytes. |
| 132 | */ |
| 133 | movq 0*8(%rsi), %r8 |
| 134 | movq 1*8(%rsi), %r9 |
| 135 | movq -2*8(%rsi, %rdx), %r10 |
| 136 | movq -1*8(%rsi, %rdx), %r11 |
| 137 | movq %r8, 0*8(%rdi) |
| 138 | movq %r9, 1*8(%rdi) |
| 139 | movq %r10, -2*8(%rdi, %rdx) |
| 140 | movq %r11, -1*8(%rdi, %rdx) |
| 141 | retq |
Andi Kleen | 7bcd3f3 | 2006-02-03 21:51:02 +0100 | [diff] [blame] | 142 | .p2align 4 |
Ma Ling | 59daa70 | 2010-06-29 03:24:25 +0800 | [diff] [blame] | 143 | .Lless_16bytes: |
Jan Beulich | 2ab5609 | 2012-01-26 15:50:55 +0000 | [diff] [blame] | 144 | cmpl $8, %edx |
Ma Ling | 59daa70 | 2010-06-29 03:24:25 +0800 | [diff] [blame] | 145 | jb .Lless_8bytes |
| 146 | /* |
| 147 | * Move data from 8 bytes to 15 bytes. |
| 148 | */ |
| 149 | movq 0*8(%rsi), %r8 |
| 150 | movq -1*8(%rsi, %rdx), %r9 |
| 151 | movq %r8, 0*8(%rdi) |
| 152 | movq %r9, -1*8(%rdi, %rdx) |
| 153 | retq |
Andi Kleen | 7bcd3f3 | 2006-02-03 21:51:02 +0100 | [diff] [blame] | 154 | .p2align 4 |
Ma Ling | 59daa70 | 2010-06-29 03:24:25 +0800 | [diff] [blame] | 155 | .Lless_8bytes: |
Jan Beulich | 2ab5609 | 2012-01-26 15:50:55 +0000 | [diff] [blame] | 156 | cmpl $4, %edx |
Ma Ling | 59daa70 | 2010-06-29 03:24:25 +0800 | [diff] [blame] | 157 | jb .Lless_3bytes |
| 158 | |
| 159 | /* |
| 160 | * Move data from 4 bytes to 7 bytes. |
| 161 | */ |
| 162 | movl (%rsi), %ecx |
| 163 | movl -4(%rsi, %rdx), %r8d |
| 164 | movl %ecx, (%rdi) |
| 165 | movl %r8d, -4(%rdi, %rdx) |
| 166 | retq |
| 167 | .p2align 4 |
| 168 | .Lless_3bytes: |
Jan Beulich | 9d8e227 | 2012-01-26 15:55:32 +0000 | [diff] [blame] | 169 | subl $1, %edx |
| 170 | jb .Lend |
Ma Ling | 59daa70 | 2010-06-29 03:24:25 +0800 | [diff] [blame] | 171 | /* |
| 172 | * Move data from 1 bytes to 3 bytes. |
| 173 | */ |
Jan Beulich | 9d8e227 | 2012-01-26 15:55:32 +0000 | [diff] [blame] | 174 | movzbl (%rsi), %ecx |
| 175 | jz .Lstore_1byte |
| 176 | movzbq 1(%rsi), %r8 |
| 177 | movzbq (%rsi, %rdx), %r9 |
| 178 | movb %r8b, 1(%rdi) |
| 179 | movb %r9b, (%rdi, %rdx) |
| 180 | .Lstore_1byte: |
| 181 | movb %cl, (%rdi) |
Andi Kleen | 7bcd3f3 | 2006-02-03 21:51:02 +0100 | [diff] [blame] | 182 | |
Ingo Molnar | f3b6eaf | 2009-03-12 12:20:17 +0100 | [diff] [blame] | 183 | .Lend: |
Ma Ling | 59daa70 | 2010-06-29 03:24:25 +0800 | [diff] [blame] | 184 | retq |
Jan Beulich | 8d379da | 2006-09-26 10:52:32 +0200 | [diff] [blame] | 185 | CFI_ENDPROC |
| 186 | ENDPROC(memcpy) |
| 187 | ENDPROC(__memcpy) |
Andi Kleen | 7bcd3f3 | 2006-02-03 21:51:02 +0100 | [diff] [blame] | 188 | |
Ingo Molnar | f3b6eaf | 2009-03-12 12:20:17 +0100 | [diff] [blame] | 189 | /* |
Fenghua Yu | 101068c | 2011-05-17 15:29:16 -0700 | [diff] [blame] | 190 | * Some CPUs are adding enhanced REP MOVSB/STOSB feature |
| 191 | * If the feature is supported, memcpy_c_e() is the first choice. |
| 192 | * If enhanced rep movsb copy is not available, use fast string copy |
| 193 | * memcpy_c() when possible. This is faster and code is simpler than |
| 194 | * original memcpy(). |
| 195 | * Otherwise, original memcpy() is used. |
| 196 | * In .altinstructions section, ERMS feature is placed after REG_GOOD |
| 197 | * feature to implement the right patch order. |
| 198 | * |
Ingo Molnar | f3b6eaf | 2009-03-12 12:20:17 +0100 | [diff] [blame] | 199 | * Replace only beginning, memcpy is used to apply alternatives, |
| 200 | * so it is silly to overwrite itself with nops - reboot is the |
| 201 | * only outcome... |
| 202 | */ |
Fenghua Yu | 101068c | 2011-05-17 15:29:16 -0700 | [diff] [blame] | 203 | .section .altinstructions, "a" |
Andrey Ryabinin | 393f203 | 2015-02-13 14:39:56 -0800 | [diff] [blame] | 204 | altinstruction_entry __memcpy,.Lmemcpy_c,X86_FEATURE_REP_GOOD,\ |
Fenghua Yu | 101068c | 2011-05-17 15:29:16 -0700 | [diff] [blame] | 205 | .Lmemcpy_e-.Lmemcpy_c,.Lmemcpy_e-.Lmemcpy_c |
Andrey Ryabinin | 393f203 | 2015-02-13 14:39:56 -0800 | [diff] [blame] | 206 | altinstruction_entry __memcpy,.Lmemcpy_c_e,X86_FEATURE_ERMS, \ |
Fenghua Yu | 101068c | 2011-05-17 15:29:16 -0700 | [diff] [blame] | 207 | .Lmemcpy_e_e-.Lmemcpy_c_e,.Lmemcpy_e_e-.Lmemcpy_c_e |
Andi Kleen | 7bcd3f3 | 2006-02-03 21:51:02 +0100 | [diff] [blame] | 208 | .previous |