blob: 9cae28c0ddfe92a0ca5a869a112e4f6e8a7ccbc8 [file] [log] [blame]
Robert Sloan8ff03552017-06-14 12:40:58 -07001#! /usr/bin/env perl
2# Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the OpenSSL license (the "License"). You may not use
5# this file except in compliance with the License. You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
David Benjamin4969cc92016-04-22 15:02:23 -04009#
10# ====================================================================
11# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15# ====================================================================
16#
17# June 2015
Robert Sloana94fe052017-02-21 08:49:28 -080018#
David Benjamin4969cc92016-04-22 15:02:23 -040019# ChaCha20 for ARMv8.
20#
21# Performance in cycles per byte out of large buffer.
22#
23# IALU/gcc-4.9 3xNEON+1xIALU 6xNEON+2xIALU
24#
25# Apple A7 5.50/+49% 3.33 1.70
26# Cortex-A53 8.40/+80% 4.72 4.72(*)
27# Cortex-A57 8.06/+43% 4.90 4.43(**)
28# Denver 4.50/+82% 2.63 2.67(*)
29# X-Gene 9.50/+46% 8.82 8.89(*)
Robert Sloan8ff03552017-06-14 12:40:58 -070030# Mongoose 8.00/+44% 3.64 3.25
Robert Sloanab8b8882018-03-26 11:39:51 -070031# Kryo 8.17/+50% 4.83 4.65
David Benjamin4969cc92016-04-22 15:02:23 -040032#
33# (*) it's expected that doubling interleave factor doesn't help
34# all processors, only those with higher NEON latency and
35# higher instruction issue rate;
36# (**) expected improvement was actually higher;
37
38$flavour=shift;
39$output=shift;
40
41$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
42( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
43( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
44die "can't locate arm-xlate.pl";
45
46open OUT,"| \"$^X\" $xlate $flavour $output";
47*STDOUT=*OUT;
48
49sub AUTOLOAD() # thunk [simplified] x86-style perlasm
50{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
51 my $arg = pop;
52 $arg = "#$arg" if ($arg*1 eq $arg);
53 $code .= "\t$opcode\t".join(',',@_,$arg)."\n";
54}
55
56my ($out,$inp,$len,$key,$ctr) = map("x$_",(0..4));
57
58my @x=map("x$_",(5..17,19..21));
59my @d=map("x$_",(22..28,30));
60
61sub ROUND {
62my ($a0,$b0,$c0,$d0)=@_;
63my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
64my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
65my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
66
67 (
68 "&add_32 (@x[$a0],@x[$a0],@x[$b0])",
69 "&add_32 (@x[$a1],@x[$a1],@x[$b1])",
70 "&add_32 (@x[$a2],@x[$a2],@x[$b2])",
71 "&add_32 (@x[$a3],@x[$a3],@x[$b3])",
72 "&eor_32 (@x[$d0],@x[$d0],@x[$a0])",
73 "&eor_32 (@x[$d1],@x[$d1],@x[$a1])",
74 "&eor_32 (@x[$d2],@x[$d2],@x[$a2])",
75 "&eor_32 (@x[$d3],@x[$d3],@x[$a3])",
76 "&ror_32 (@x[$d0],@x[$d0],16)",
77 "&ror_32 (@x[$d1],@x[$d1],16)",
78 "&ror_32 (@x[$d2],@x[$d2],16)",
79 "&ror_32 (@x[$d3],@x[$d3],16)",
80
81 "&add_32 (@x[$c0],@x[$c0],@x[$d0])",
82 "&add_32 (@x[$c1],@x[$c1],@x[$d1])",
83 "&add_32 (@x[$c2],@x[$c2],@x[$d2])",
84 "&add_32 (@x[$c3],@x[$c3],@x[$d3])",
85 "&eor_32 (@x[$b0],@x[$b0],@x[$c0])",
86 "&eor_32 (@x[$b1],@x[$b1],@x[$c1])",
87 "&eor_32 (@x[$b2],@x[$b2],@x[$c2])",
88 "&eor_32 (@x[$b3],@x[$b3],@x[$c3])",
89 "&ror_32 (@x[$b0],@x[$b0],20)",
90 "&ror_32 (@x[$b1],@x[$b1],20)",
91 "&ror_32 (@x[$b2],@x[$b2],20)",
92 "&ror_32 (@x[$b3],@x[$b3],20)",
93
94 "&add_32 (@x[$a0],@x[$a0],@x[$b0])",
95 "&add_32 (@x[$a1],@x[$a1],@x[$b1])",
96 "&add_32 (@x[$a2],@x[$a2],@x[$b2])",
97 "&add_32 (@x[$a3],@x[$a3],@x[$b3])",
98 "&eor_32 (@x[$d0],@x[$d0],@x[$a0])",
99 "&eor_32 (@x[$d1],@x[$d1],@x[$a1])",
100 "&eor_32 (@x[$d2],@x[$d2],@x[$a2])",
101 "&eor_32 (@x[$d3],@x[$d3],@x[$a3])",
102 "&ror_32 (@x[$d0],@x[$d0],24)",
103 "&ror_32 (@x[$d1],@x[$d1],24)",
104 "&ror_32 (@x[$d2],@x[$d2],24)",
105 "&ror_32 (@x[$d3],@x[$d3],24)",
106
107 "&add_32 (@x[$c0],@x[$c0],@x[$d0])",
108 "&add_32 (@x[$c1],@x[$c1],@x[$d1])",
109 "&add_32 (@x[$c2],@x[$c2],@x[$d2])",
110 "&add_32 (@x[$c3],@x[$c3],@x[$d3])",
111 "&eor_32 (@x[$b0],@x[$b0],@x[$c0])",
112 "&eor_32 (@x[$b1],@x[$b1],@x[$c1])",
113 "&eor_32 (@x[$b2],@x[$b2],@x[$c2])",
114 "&eor_32 (@x[$b3],@x[$b3],@x[$c3])",
115 "&ror_32 (@x[$b0],@x[$b0],25)",
116 "&ror_32 (@x[$b1],@x[$b1],25)",
117 "&ror_32 (@x[$b2],@x[$b2],25)",
118 "&ror_32 (@x[$b3],@x[$b3],25)"
119 );
120}
121
122$code.=<<___;
123#include <openssl/arm_arch.h>
124
David Benjamin4969cc92016-04-22 15:02:23 -0400125.extern OPENSSL_armcap_P
126
Robert Sloanc9abfe42018-11-26 12:19:07 -0800127.section .rodata
128
David Benjamin4969cc92016-04-22 15:02:23 -0400129.align 5
130.Lsigma:
131.quad 0x3320646e61707865,0x6b20657479622d32 // endian-neutral
132.Lone:
133.long 1,0,0,0
David Benjamin4969cc92016-04-22 15:02:23 -0400134.asciz "ChaCha20 for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
135
Robert Sloanc9abfe42018-11-26 12:19:07 -0800136.text
137
David Benjamin4969cc92016-04-22 15:02:23 -0400138.globl ChaCha20_ctr32
139.type ChaCha20_ctr32,%function
140.align 5
141ChaCha20_ctr32:
142 cbz $len,.Labort
Pete Bentley0c61efe2019-08-13 09:32:23 +0100143#if __has_feature(hwaddress_sanitizer) && __clang_major__ >= 10
144 adrp @x[0],:pg_hi21_nc:OPENSSL_armcap_P
145#else
Robert Sloanc9abfe42018-11-26 12:19:07 -0800146 adrp @x[0],:pg_hi21:OPENSSL_armcap_P
Pete Bentley0c61efe2019-08-13 09:32:23 +0100147#endif
David Benjamin4969cc92016-04-22 15:02:23 -0400148 cmp $len,#192
149 b.lo .Lshort
Pete Bentley0c61efe2019-08-13 09:32:23 +0100150 ldr w17,[@x[0],:lo12:OPENSSL_armcap_P]
David Benjamin4969cc92016-04-22 15:02:23 -0400151 tst w17,#ARMV7_NEON
152 b.ne ChaCha20_neon
153
154.Lshort:
155 stp x29,x30,[sp,#-96]!
156 add x29,sp,#0
157
Robert Sloanc9abfe42018-11-26 12:19:07 -0800158 adrp @x[0],:pg_hi21:.Lsigma
159 add @x[0],@x[0],:lo12:.Lsigma
David Benjamin4969cc92016-04-22 15:02:23 -0400160 stp x19,x20,[sp,#16]
161 stp x21,x22,[sp,#32]
162 stp x23,x24,[sp,#48]
163 stp x25,x26,[sp,#64]
164 stp x27,x28,[sp,#80]
165 sub sp,sp,#64
166
167 ldp @d[0],@d[1],[@x[0]] // load sigma
168 ldp @d[2],@d[3],[$key] // load key
169 ldp @d[4],@d[5],[$key,#16]
170 ldp @d[6],@d[7],[$ctr] // load counter
171#ifdef __ARMEB__
172 ror @d[2],@d[2],#32
173 ror @d[3],@d[3],#32
174 ror @d[4],@d[4],#32
175 ror @d[5],@d[5],#32
176 ror @d[6],@d[6],#32
177 ror @d[7],@d[7],#32
178#endif
179
180.Loop_outer:
181 mov.32 @x[0],@d[0] // unpack key block
182 lsr @x[1],@d[0],#32
183 mov.32 @x[2],@d[1]
184 lsr @x[3],@d[1],#32
185 mov.32 @x[4],@d[2]
186 lsr @x[5],@d[2],#32
187 mov.32 @x[6],@d[3]
188 lsr @x[7],@d[3],#32
189 mov.32 @x[8],@d[4]
190 lsr @x[9],@d[4],#32
191 mov.32 @x[10],@d[5]
192 lsr @x[11],@d[5],#32
193 mov.32 @x[12],@d[6]
194 lsr @x[13],@d[6],#32
195 mov.32 @x[14],@d[7]
196 lsr @x[15],@d[7],#32
197
198 mov $ctr,#10
199 subs $len,$len,#64
200.Loop:
Robert Sloana94fe052017-02-21 08:49:28 -0800201 sub $ctr,$ctr,#1
David Benjamin4969cc92016-04-22 15:02:23 -0400202___
203 foreach (&ROUND(0, 4, 8,12)) { eval; }
204 foreach (&ROUND(0, 5,10,15)) { eval; }
205$code.=<<___;
206 cbnz $ctr,.Loop
207
208 add.32 @x[0],@x[0],@d[0] // accumulate key block
209 add @x[1],@x[1],@d[0],lsr#32
210 add.32 @x[2],@x[2],@d[1]
211 add @x[3],@x[3],@d[1],lsr#32
212 add.32 @x[4],@x[4],@d[2]
213 add @x[5],@x[5],@d[2],lsr#32
214 add.32 @x[6],@x[6],@d[3]
215 add @x[7],@x[7],@d[3],lsr#32
216 add.32 @x[8],@x[8],@d[4]
217 add @x[9],@x[9],@d[4],lsr#32
218 add.32 @x[10],@x[10],@d[5]
219 add @x[11],@x[11],@d[5],lsr#32
220 add.32 @x[12],@x[12],@d[6]
221 add @x[13],@x[13],@d[6],lsr#32
222 add.32 @x[14],@x[14],@d[7]
223 add @x[15],@x[15],@d[7],lsr#32
224
225 b.lo .Ltail
226
227 add @x[0],@x[0],@x[1],lsl#32 // pack
228 add @x[2],@x[2],@x[3],lsl#32
229 ldp @x[1],@x[3],[$inp,#0] // load input
230 add @x[4],@x[4],@x[5],lsl#32
231 add @x[6],@x[6],@x[7],lsl#32
232 ldp @x[5],@x[7],[$inp,#16]
233 add @x[8],@x[8],@x[9],lsl#32
234 add @x[10],@x[10],@x[11],lsl#32
235 ldp @x[9],@x[11],[$inp,#32]
236 add @x[12],@x[12],@x[13],lsl#32
237 add @x[14],@x[14],@x[15],lsl#32
238 ldp @x[13],@x[15],[$inp,#48]
239 add $inp,$inp,#64
240#ifdef __ARMEB__
241 rev @x[0],@x[0]
242 rev @x[2],@x[2]
243 rev @x[4],@x[4]
244 rev @x[6],@x[6]
245 rev @x[8],@x[8]
246 rev @x[10],@x[10]
247 rev @x[12],@x[12]
248 rev @x[14],@x[14]
249#endif
250 eor @x[0],@x[0],@x[1]
251 eor @x[2],@x[2],@x[3]
252 eor @x[4],@x[4],@x[5]
253 eor @x[6],@x[6],@x[7]
254 eor @x[8],@x[8],@x[9]
255 eor @x[10],@x[10],@x[11]
256 eor @x[12],@x[12],@x[13]
257 eor @x[14],@x[14],@x[15]
258
259 stp @x[0],@x[2],[$out,#0] // store output
260 add @d[6],@d[6],#1 // increment counter
261 stp @x[4],@x[6],[$out,#16]
262 stp @x[8],@x[10],[$out,#32]
263 stp @x[12],@x[14],[$out,#48]
264 add $out,$out,#64
265
266 b.hi .Loop_outer
267
268 ldp x19,x20,[x29,#16]
269 add sp,sp,#64
270 ldp x21,x22,[x29,#32]
271 ldp x23,x24,[x29,#48]
272 ldp x25,x26,[x29,#64]
273 ldp x27,x28,[x29,#80]
274 ldp x29,x30,[sp],#96
275.Labort:
276 ret
277
278.align 4
279.Ltail:
280 add $len,$len,#64
281.Less_than_64:
282 sub $out,$out,#1
283 add $inp,$inp,$len
284 add $out,$out,$len
285 add $ctr,sp,$len
286 neg $len,$len
287
288 add @x[0],@x[0],@x[1],lsl#32 // pack
289 add @x[2],@x[2],@x[3],lsl#32
290 add @x[4],@x[4],@x[5],lsl#32
291 add @x[6],@x[6],@x[7],lsl#32
292 add @x[8],@x[8],@x[9],lsl#32
293 add @x[10],@x[10],@x[11],lsl#32
294 add @x[12],@x[12],@x[13],lsl#32
295 add @x[14],@x[14],@x[15],lsl#32
296#ifdef __ARMEB__
297 rev @x[0],@x[0]
298 rev @x[2],@x[2]
299 rev @x[4],@x[4]
300 rev @x[6],@x[6]
301 rev @x[8],@x[8]
302 rev @x[10],@x[10]
303 rev @x[12],@x[12]
304 rev @x[14],@x[14]
305#endif
306 stp @x[0],@x[2],[sp,#0]
307 stp @x[4],@x[6],[sp,#16]
308 stp @x[8],@x[10],[sp,#32]
309 stp @x[12],@x[14],[sp,#48]
310
311.Loop_tail:
312 ldrb w10,[$inp,$len]
313 ldrb w11,[$ctr,$len]
314 add $len,$len,#1
315 eor w10,w10,w11
316 strb w10,[$out,$len]
317 cbnz $len,.Loop_tail
318
319 stp xzr,xzr,[sp,#0]
320 stp xzr,xzr,[sp,#16]
321 stp xzr,xzr,[sp,#32]
322 stp xzr,xzr,[sp,#48]
323
324 ldp x19,x20,[x29,#16]
325 add sp,sp,#64
326 ldp x21,x22,[x29,#32]
327 ldp x23,x24,[x29,#48]
328 ldp x25,x26,[x29,#64]
329 ldp x27,x28,[x29,#80]
330 ldp x29,x30,[sp],#96
331 ret
332.size ChaCha20_ctr32,.-ChaCha20_ctr32
333___
334
335{{{
336my ($A0,$B0,$C0,$D0,$A1,$B1,$C1,$D1,$A2,$B2,$C2,$D2,$T0,$T1,$T2,$T3) =
337 map("v$_.4s",(0..7,16..23));
338my (@K)=map("v$_.4s",(24..30));
339my $ONE="v31.4s";
340
341sub NEONROUND {
342my $odd = pop;
343my ($a,$b,$c,$d,$t)=@_;
344
345 (
346 "&add ('$a','$a','$b')",
347 "&eor ('$d','$d','$a')",
348 "&rev32_16 ('$d','$d')", # vrot ($d,16)
349
350 "&add ('$c','$c','$d')",
351 "&eor ('$t','$b','$c')",
352 "&ushr ('$b','$t',20)",
353 "&sli ('$b','$t',12)",
354
355 "&add ('$a','$a','$b')",
356 "&eor ('$t','$d','$a')",
357 "&ushr ('$d','$t',24)",
358 "&sli ('$d','$t',8)",
359
360 "&add ('$c','$c','$d')",
361 "&eor ('$t','$b','$c')",
362 "&ushr ('$b','$t',25)",
363 "&sli ('$b','$t',7)",
364
365 "&ext ('$c','$c','$c',8)",
366 "&ext ('$d','$d','$d',$odd?4:12)",
367 "&ext ('$b','$b','$b',$odd?12:4)"
368 );
369}
370
371$code.=<<___;
372
373.type ChaCha20_neon,%function
374.align 5
375ChaCha20_neon:
376 stp x29,x30,[sp,#-96]!
377 add x29,sp,#0
378
Robert Sloanc9abfe42018-11-26 12:19:07 -0800379 adrp @x[0],:pg_hi21:.Lsigma
380 add @x[0],@x[0],:lo12:.Lsigma
David Benjamin4969cc92016-04-22 15:02:23 -0400381 stp x19,x20,[sp,#16]
382 stp x21,x22,[sp,#32]
383 stp x23,x24,[sp,#48]
384 stp x25,x26,[sp,#64]
385 stp x27,x28,[sp,#80]
386 cmp $len,#512
387 b.hs .L512_or_more_neon
388
389 sub sp,sp,#64
390
391 ldp @d[0],@d[1],[@x[0]] // load sigma
392 ld1 {@K[0]},[@x[0]],#16
393 ldp @d[2],@d[3],[$key] // load key
394 ldp @d[4],@d[5],[$key,#16]
395 ld1 {@K[1],@K[2]},[$key]
396 ldp @d[6],@d[7],[$ctr] // load counter
397 ld1 {@K[3]},[$ctr]
398 ld1 {$ONE},[@x[0]]
399#ifdef __ARMEB__
400 rev64 @K[0],@K[0]
401 ror @d[2],@d[2],#32
402 ror @d[3],@d[3],#32
403 ror @d[4],@d[4],#32
404 ror @d[5],@d[5],#32
405 ror @d[6],@d[6],#32
406 ror @d[7],@d[7],#32
407#endif
408 add @K[3],@K[3],$ONE // += 1
409 add @K[4],@K[3],$ONE
410 add @K[5],@K[4],$ONE
411 shl $ONE,$ONE,#2 // 1 -> 4
412
413.Loop_outer_neon:
414 mov.32 @x[0],@d[0] // unpack key block
415 lsr @x[1],@d[0],#32
416 mov $A0,@K[0]
417 mov.32 @x[2],@d[1]
418 lsr @x[3],@d[1],#32
419 mov $A1,@K[0]
420 mov.32 @x[4],@d[2]
421 lsr @x[5],@d[2],#32
422 mov $A2,@K[0]
423 mov.32 @x[6],@d[3]
424 mov $B0,@K[1]
425 lsr @x[7],@d[3],#32
426 mov $B1,@K[1]
427 mov.32 @x[8],@d[4]
428 mov $B2,@K[1]
429 lsr @x[9],@d[4],#32
430 mov $D0,@K[3]
431 mov.32 @x[10],@d[5]
432 mov $D1,@K[4]
433 lsr @x[11],@d[5],#32
434 mov $D2,@K[5]
435 mov.32 @x[12],@d[6]
436 mov $C0,@K[2]
437 lsr @x[13],@d[6],#32
438 mov $C1,@K[2]
439 mov.32 @x[14],@d[7]
440 mov $C2,@K[2]
441 lsr @x[15],@d[7],#32
442
443 mov $ctr,#10
444 subs $len,$len,#256
445.Loop_neon:
446 sub $ctr,$ctr,#1
447___
448 my @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,0);
449 my @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,0);
450 my @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,0);
451 my @thread3=&ROUND(0,4,8,12);
452
453 foreach (@thread0) {
454 eval; eval(shift(@thread3));
455 eval(shift(@thread1)); eval(shift(@thread3));
456 eval(shift(@thread2)); eval(shift(@thread3));
457 }
458
459 @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,1);
460 @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,1);
461 @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,1);
462 @thread3=&ROUND(0,5,10,15);
463
464 foreach (@thread0) {
465 eval; eval(shift(@thread3));
466 eval(shift(@thread1)); eval(shift(@thread3));
467 eval(shift(@thread2)); eval(shift(@thread3));
468 }
469$code.=<<___;
470 cbnz $ctr,.Loop_neon
471
472 add.32 @x[0],@x[0],@d[0] // accumulate key block
473 add $A0,$A0,@K[0]
474 add @x[1],@x[1],@d[0],lsr#32
475 add $A1,$A1,@K[0]
476 add.32 @x[2],@x[2],@d[1]
477 add $A2,$A2,@K[0]
478 add @x[3],@x[3],@d[1],lsr#32
479 add $C0,$C0,@K[2]
480 add.32 @x[4],@x[4],@d[2]
481 add $C1,$C1,@K[2]
482 add @x[5],@x[5],@d[2],lsr#32
483 add $C2,$C2,@K[2]
484 add.32 @x[6],@x[6],@d[3]
485 add $D0,$D0,@K[3]
486 add @x[7],@x[7],@d[3],lsr#32
487 add.32 @x[8],@x[8],@d[4]
488 add $D1,$D1,@K[4]
489 add @x[9],@x[9],@d[4],lsr#32
490 add.32 @x[10],@x[10],@d[5]
491 add $D2,$D2,@K[5]
492 add @x[11],@x[11],@d[5],lsr#32
493 add.32 @x[12],@x[12],@d[6]
494 add $B0,$B0,@K[1]
495 add @x[13],@x[13],@d[6],lsr#32
496 add.32 @x[14],@x[14],@d[7]
497 add $B1,$B1,@K[1]
498 add @x[15],@x[15],@d[7],lsr#32
499 add $B2,$B2,@K[1]
500
501 b.lo .Ltail_neon
502
503 add @x[0],@x[0],@x[1],lsl#32 // pack
504 add @x[2],@x[2],@x[3],lsl#32
505 ldp @x[1],@x[3],[$inp,#0] // load input
506 add @x[4],@x[4],@x[5],lsl#32
507 add @x[6],@x[6],@x[7],lsl#32
508 ldp @x[5],@x[7],[$inp,#16]
509 add @x[8],@x[8],@x[9],lsl#32
510 add @x[10],@x[10],@x[11],lsl#32
511 ldp @x[9],@x[11],[$inp,#32]
512 add @x[12],@x[12],@x[13],lsl#32
513 add @x[14],@x[14],@x[15],lsl#32
514 ldp @x[13],@x[15],[$inp,#48]
515 add $inp,$inp,#64
516#ifdef __ARMEB__
517 rev @x[0],@x[0]
518 rev @x[2],@x[2]
519 rev @x[4],@x[4]
520 rev @x[6],@x[6]
521 rev @x[8],@x[8]
522 rev @x[10],@x[10]
523 rev @x[12],@x[12]
524 rev @x[14],@x[14]
525#endif
526 ld1.8 {$T0-$T3},[$inp],#64
527 eor @x[0],@x[0],@x[1]
528 eor @x[2],@x[2],@x[3]
529 eor @x[4],@x[4],@x[5]
530 eor @x[6],@x[6],@x[7]
531 eor @x[8],@x[8],@x[9]
532 eor $A0,$A0,$T0
533 eor @x[10],@x[10],@x[11]
534 eor $B0,$B0,$T1
535 eor @x[12],@x[12],@x[13]
536 eor $C0,$C0,$T2
537 eor @x[14],@x[14],@x[15]
538 eor $D0,$D0,$T3
539 ld1.8 {$T0-$T3},[$inp],#64
540
541 stp @x[0],@x[2],[$out,#0] // store output
542 add @d[6],@d[6],#4 // increment counter
543 stp @x[4],@x[6],[$out,#16]
544 add @K[3],@K[3],$ONE // += 4
545 stp @x[8],@x[10],[$out,#32]
546 add @K[4],@K[4],$ONE
547 stp @x[12],@x[14],[$out,#48]
548 add @K[5],@K[5],$ONE
549 add $out,$out,#64
550
551 st1.8 {$A0-$D0},[$out],#64
552 ld1.8 {$A0-$D0},[$inp],#64
553
554 eor $A1,$A1,$T0
555 eor $B1,$B1,$T1
556 eor $C1,$C1,$T2
557 eor $D1,$D1,$T3
558 st1.8 {$A1-$D1},[$out],#64
559
560 eor $A2,$A2,$A0
561 eor $B2,$B2,$B0
562 eor $C2,$C2,$C0
563 eor $D2,$D2,$D0
564 st1.8 {$A2-$D2},[$out],#64
565
566 b.hi .Loop_outer_neon
567
568 ldp x19,x20,[x29,#16]
569 add sp,sp,#64
570 ldp x21,x22,[x29,#32]
571 ldp x23,x24,[x29,#48]
572 ldp x25,x26,[x29,#64]
573 ldp x27,x28,[x29,#80]
574 ldp x29,x30,[sp],#96
575 ret
576
577.Ltail_neon:
578 add $len,$len,#256
579 cmp $len,#64
580 b.lo .Less_than_64
581
582 add @x[0],@x[0],@x[1],lsl#32 // pack
583 add @x[2],@x[2],@x[3],lsl#32
584 ldp @x[1],@x[3],[$inp,#0] // load input
585 add @x[4],@x[4],@x[5],lsl#32
586 add @x[6],@x[6],@x[7],lsl#32
587 ldp @x[5],@x[7],[$inp,#16]
588 add @x[8],@x[8],@x[9],lsl#32
589 add @x[10],@x[10],@x[11],lsl#32
590 ldp @x[9],@x[11],[$inp,#32]
591 add @x[12],@x[12],@x[13],lsl#32
592 add @x[14],@x[14],@x[15],lsl#32
593 ldp @x[13],@x[15],[$inp,#48]
594 add $inp,$inp,#64
595#ifdef __ARMEB__
596 rev @x[0],@x[0]
597 rev @x[2],@x[2]
598 rev @x[4],@x[4]
599 rev @x[6],@x[6]
600 rev @x[8],@x[8]
601 rev @x[10],@x[10]
602 rev @x[12],@x[12]
603 rev @x[14],@x[14]
604#endif
605 eor @x[0],@x[0],@x[1]
606 eor @x[2],@x[2],@x[3]
607 eor @x[4],@x[4],@x[5]
608 eor @x[6],@x[6],@x[7]
609 eor @x[8],@x[8],@x[9]
610 eor @x[10],@x[10],@x[11]
611 eor @x[12],@x[12],@x[13]
612 eor @x[14],@x[14],@x[15]
613
614 stp @x[0],@x[2],[$out,#0] // store output
615 add @d[6],@d[6],#4 // increment counter
616 stp @x[4],@x[6],[$out,#16]
617 stp @x[8],@x[10],[$out,#32]
618 stp @x[12],@x[14],[$out,#48]
619 add $out,$out,#64
620 b.eq .Ldone_neon
621 sub $len,$len,#64
622 cmp $len,#64
623 b.lo .Less_than_128
624
625 ld1.8 {$T0-$T3},[$inp],#64
626 eor $A0,$A0,$T0
627 eor $B0,$B0,$T1
628 eor $C0,$C0,$T2
629 eor $D0,$D0,$T3
630 st1.8 {$A0-$D0},[$out],#64
631 b.eq .Ldone_neon
632 sub $len,$len,#64
633 cmp $len,#64
634 b.lo .Less_than_192
635
636 ld1.8 {$T0-$T3},[$inp],#64
637 eor $A1,$A1,$T0
638 eor $B1,$B1,$T1
639 eor $C1,$C1,$T2
640 eor $D1,$D1,$T3
641 st1.8 {$A1-$D1},[$out],#64
642 b.eq .Ldone_neon
643 sub $len,$len,#64
644
645 st1.8 {$A2-$D2},[sp]
646 b .Last_neon
647
648.Less_than_128:
649 st1.8 {$A0-$D0},[sp]
650 b .Last_neon
651.Less_than_192:
652 st1.8 {$A1-$D1},[sp]
653 b .Last_neon
654
655.align 4
656.Last_neon:
657 sub $out,$out,#1
658 add $inp,$inp,$len
659 add $out,$out,$len
660 add $ctr,sp,$len
661 neg $len,$len
662
663.Loop_tail_neon:
664 ldrb w10,[$inp,$len]
665 ldrb w11,[$ctr,$len]
666 add $len,$len,#1
667 eor w10,w10,w11
668 strb w10,[$out,$len]
669 cbnz $len,.Loop_tail_neon
670
671 stp xzr,xzr,[sp,#0]
672 stp xzr,xzr,[sp,#16]
673 stp xzr,xzr,[sp,#32]
674 stp xzr,xzr,[sp,#48]
675
676.Ldone_neon:
677 ldp x19,x20,[x29,#16]
678 add sp,sp,#64
679 ldp x21,x22,[x29,#32]
680 ldp x23,x24,[x29,#48]
681 ldp x25,x26,[x29,#64]
682 ldp x27,x28,[x29,#80]
683 ldp x29,x30,[sp],#96
684 ret
685.size ChaCha20_neon,.-ChaCha20_neon
686___
687{
688my ($T0,$T1,$T2,$T3,$T4,$T5)=@K;
689my ($A0,$B0,$C0,$D0,$A1,$B1,$C1,$D1,$A2,$B2,$C2,$D2,
690 $A3,$B3,$C3,$D3,$A4,$B4,$C4,$D4,$A5,$B5,$C5,$D5) = map("v$_.4s",(0..23));
691
692$code.=<<___;
693.type ChaCha20_512_neon,%function
694.align 5
695ChaCha20_512_neon:
696 stp x29,x30,[sp,#-96]!
697 add x29,sp,#0
698
Robert Sloanc9abfe42018-11-26 12:19:07 -0800699 adrp @x[0],:pg_hi21:.Lsigma
700 add @x[0],@x[0],:lo12:.Lsigma
David Benjamin4969cc92016-04-22 15:02:23 -0400701 stp x19,x20,[sp,#16]
702 stp x21,x22,[sp,#32]
703 stp x23,x24,[sp,#48]
704 stp x25,x26,[sp,#64]
705 stp x27,x28,[sp,#80]
706
707.L512_or_more_neon:
708 sub sp,sp,#128+64
709
710 ldp @d[0],@d[1],[@x[0]] // load sigma
711 ld1 {@K[0]},[@x[0]],#16
712 ldp @d[2],@d[3],[$key] // load key
713 ldp @d[4],@d[5],[$key,#16]
714 ld1 {@K[1],@K[2]},[$key]
715 ldp @d[6],@d[7],[$ctr] // load counter
716 ld1 {@K[3]},[$ctr]
717 ld1 {$ONE},[@x[0]]
718#ifdef __ARMEB__
719 rev64 @K[0],@K[0]
720 ror @d[2],@d[2],#32
721 ror @d[3],@d[3],#32
722 ror @d[4],@d[4],#32
723 ror @d[5],@d[5],#32
724 ror @d[6],@d[6],#32
725 ror @d[7],@d[7],#32
726#endif
727 add @K[3],@K[3],$ONE // += 1
728 stp @K[0],@K[1],[sp,#0] // off-load key block, invariant part
729 add @K[3],@K[3],$ONE // not typo
730 str @K[2],[sp,#32]
731 add @K[4],@K[3],$ONE
732 add @K[5],@K[4],$ONE
733 add @K[6],@K[5],$ONE
734 shl $ONE,$ONE,#2 // 1 -> 4
735
736 stp d8,d9,[sp,#128+0] // meet ABI requirements
737 stp d10,d11,[sp,#128+16]
738 stp d12,d13,[sp,#128+32]
739 stp d14,d15,[sp,#128+48]
740
741 sub $len,$len,#512 // not typo
742
743.Loop_outer_512_neon:
744 mov $A0,@K[0]
745 mov $A1,@K[0]
746 mov $A2,@K[0]
747 mov $A3,@K[0]
748 mov $A4,@K[0]
749 mov $A5,@K[0]
750 mov $B0,@K[1]
751 mov.32 @x[0],@d[0] // unpack key block
752 mov $B1,@K[1]
753 lsr @x[1],@d[0],#32
754 mov $B2,@K[1]
755 mov.32 @x[2],@d[1]
756 mov $B3,@K[1]
757 lsr @x[3],@d[1],#32
758 mov $B4,@K[1]
759 mov.32 @x[4],@d[2]
760 mov $B5,@K[1]
761 lsr @x[5],@d[2],#32
762 mov $D0,@K[3]
763 mov.32 @x[6],@d[3]
764 mov $D1,@K[4]
765 lsr @x[7],@d[3],#32
766 mov $D2,@K[5]
767 mov.32 @x[8],@d[4]
768 mov $D3,@K[6]
769 lsr @x[9],@d[4],#32
770 mov $C0,@K[2]
771 mov.32 @x[10],@d[5]
772 mov $C1,@K[2]
773 lsr @x[11],@d[5],#32
774 add $D4,$D0,$ONE // +4
775 mov.32 @x[12],@d[6]
776 add $D5,$D1,$ONE // +4
777 lsr @x[13],@d[6],#32
778 mov $C2,@K[2]
779 mov.32 @x[14],@d[7]
780 mov $C3,@K[2]
781 lsr @x[15],@d[7],#32
782 mov $C4,@K[2]
783 stp @K[3],@K[4],[sp,#48] // off-load key block, variable part
784 mov $C5,@K[2]
785 str @K[5],[sp,#80]
786
787 mov $ctr,#5
788 subs $len,$len,#512
789.Loop_upper_neon:
790 sub $ctr,$ctr,#1
791___
792 my @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,0);
793 my @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,0);
794 my @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,0);
795 my @thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,0);
796 my @thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,0);
797 my @thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,0);
798 my @thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
799 my $diff = ($#thread0+1)*6 - $#thread67 - 1;
800 my $i = 0;
801
802 foreach (@thread0) {
803 eval; eval(shift(@thread67));
804 eval(shift(@thread1)); eval(shift(@thread67));
805 eval(shift(@thread2)); eval(shift(@thread67));
806 eval(shift(@thread3)); eval(shift(@thread67));
807 eval(shift(@thread4)); eval(shift(@thread67));
808 eval(shift(@thread5)); eval(shift(@thread67));
809 }
810
811 @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,1);
812 @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,1);
813 @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,1);
814 @thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,1);
815 @thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,1);
816 @thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,1);
817 @thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
818
819 foreach (@thread0) {
820 eval; eval(shift(@thread67));
821 eval(shift(@thread1)); eval(shift(@thread67));
822 eval(shift(@thread2)); eval(shift(@thread67));
823 eval(shift(@thread3)); eval(shift(@thread67));
824 eval(shift(@thread4)); eval(shift(@thread67));
825 eval(shift(@thread5)); eval(shift(@thread67));
826 }
827$code.=<<___;
828 cbnz $ctr,.Loop_upper_neon
829
830 add.32 @x[0],@x[0],@d[0] // accumulate key block
831 add @x[1],@x[1],@d[0],lsr#32
832 add.32 @x[2],@x[2],@d[1]
833 add @x[3],@x[3],@d[1],lsr#32
834 add.32 @x[4],@x[4],@d[2]
835 add @x[5],@x[5],@d[2],lsr#32
836 add.32 @x[6],@x[6],@d[3]
837 add @x[7],@x[7],@d[3],lsr#32
838 add.32 @x[8],@x[8],@d[4]
839 add @x[9],@x[9],@d[4],lsr#32
840 add.32 @x[10],@x[10],@d[5]
841 add @x[11],@x[11],@d[5],lsr#32
842 add.32 @x[12],@x[12],@d[6]
843 add @x[13],@x[13],@d[6],lsr#32
844 add.32 @x[14],@x[14],@d[7]
845 add @x[15],@x[15],@d[7],lsr#32
846
847 add @x[0],@x[0],@x[1],lsl#32 // pack
848 add @x[2],@x[2],@x[3],lsl#32
849 ldp @x[1],@x[3],[$inp,#0] // load input
850 add @x[4],@x[4],@x[5],lsl#32
851 add @x[6],@x[6],@x[7],lsl#32
852 ldp @x[5],@x[7],[$inp,#16]
853 add @x[8],@x[8],@x[9],lsl#32
854 add @x[10],@x[10],@x[11],lsl#32
855 ldp @x[9],@x[11],[$inp,#32]
856 add @x[12],@x[12],@x[13],lsl#32
857 add @x[14],@x[14],@x[15],lsl#32
858 ldp @x[13],@x[15],[$inp,#48]
859 add $inp,$inp,#64
860#ifdef __ARMEB__
861 rev @x[0],@x[0]
862 rev @x[2],@x[2]
863 rev @x[4],@x[4]
864 rev @x[6],@x[6]
865 rev @x[8],@x[8]
866 rev @x[10],@x[10]
867 rev @x[12],@x[12]
868 rev @x[14],@x[14]
869#endif
870 eor @x[0],@x[0],@x[1]
871 eor @x[2],@x[2],@x[3]
872 eor @x[4],@x[4],@x[5]
873 eor @x[6],@x[6],@x[7]
874 eor @x[8],@x[8],@x[9]
875 eor @x[10],@x[10],@x[11]
876 eor @x[12],@x[12],@x[13]
877 eor @x[14],@x[14],@x[15]
878
879 stp @x[0],@x[2],[$out,#0] // store output
880 add @d[6],@d[6],#1 // increment counter
881 mov.32 @x[0],@d[0] // unpack key block
882 lsr @x[1],@d[0],#32
883 stp @x[4],@x[6],[$out,#16]
884 mov.32 @x[2],@d[1]
885 lsr @x[3],@d[1],#32
886 stp @x[8],@x[10],[$out,#32]
887 mov.32 @x[4],@d[2]
888 lsr @x[5],@d[2],#32
889 stp @x[12],@x[14],[$out,#48]
890 add $out,$out,#64
891 mov.32 @x[6],@d[3]
892 lsr @x[7],@d[3],#32
893 mov.32 @x[8],@d[4]
894 lsr @x[9],@d[4],#32
895 mov.32 @x[10],@d[5]
896 lsr @x[11],@d[5],#32
897 mov.32 @x[12],@d[6]
898 lsr @x[13],@d[6],#32
899 mov.32 @x[14],@d[7]
900 lsr @x[15],@d[7],#32
901
902 mov $ctr,#5
903.Loop_lower_neon:
904 sub $ctr,$ctr,#1
905___
906 @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,0);
907 @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,0);
908 @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,0);
909 @thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,0);
910 @thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,0);
911 @thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,0);
912 @thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
913
914 foreach (@thread0) {
915 eval; eval(shift(@thread67));
916 eval(shift(@thread1)); eval(shift(@thread67));
917 eval(shift(@thread2)); eval(shift(@thread67));
918 eval(shift(@thread3)); eval(shift(@thread67));
919 eval(shift(@thread4)); eval(shift(@thread67));
920 eval(shift(@thread5)); eval(shift(@thread67));
921 }
922
923 @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,1);
924 @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,1);
925 @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,1);
926 @thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,1);
927 @thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,1);
928 @thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,1);
929 @thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
930
931 foreach (@thread0) {
932 eval; eval(shift(@thread67));
933 eval(shift(@thread1)); eval(shift(@thread67));
934 eval(shift(@thread2)); eval(shift(@thread67));
935 eval(shift(@thread3)); eval(shift(@thread67));
936 eval(shift(@thread4)); eval(shift(@thread67));
937 eval(shift(@thread5)); eval(shift(@thread67));
938 }
939$code.=<<___;
940 cbnz $ctr,.Loop_lower_neon
941
942 add.32 @x[0],@x[0],@d[0] // accumulate key block
943 ldp @K[0],@K[1],[sp,#0]
944 add @x[1],@x[1],@d[0],lsr#32
945 ldp @K[2],@K[3],[sp,#32]
946 add.32 @x[2],@x[2],@d[1]
947 ldp @K[4],@K[5],[sp,#64]
948 add @x[3],@x[3],@d[1],lsr#32
949 add $A0,$A0,@K[0]
950 add.32 @x[4],@x[4],@d[2]
951 add $A1,$A1,@K[0]
952 add @x[5],@x[5],@d[2],lsr#32
953 add $A2,$A2,@K[0]
954 add.32 @x[6],@x[6],@d[3]
955 add $A3,$A3,@K[0]
956 add @x[7],@x[7],@d[3],lsr#32
957 add $A4,$A4,@K[0]
958 add.32 @x[8],@x[8],@d[4]
959 add $A5,$A5,@K[0]
960 add @x[9],@x[9],@d[4],lsr#32
961 add $C0,$C0,@K[2]
962 add.32 @x[10],@x[10],@d[5]
963 add $C1,$C1,@K[2]
964 add @x[11],@x[11],@d[5],lsr#32
965 add $C2,$C2,@K[2]
966 add.32 @x[12],@x[12],@d[6]
967 add $C3,$C3,@K[2]
968 add @x[13],@x[13],@d[6],lsr#32
969 add $C4,$C4,@K[2]
970 add.32 @x[14],@x[14],@d[7]
971 add $C5,$C5,@K[2]
972 add @x[15],@x[15],@d[7],lsr#32
973 add $D4,$D4,$ONE // +4
974 add @x[0],@x[0],@x[1],lsl#32 // pack
975 add $D5,$D5,$ONE // +4
976 add @x[2],@x[2],@x[3],lsl#32
977 add $D0,$D0,@K[3]
978 ldp @x[1],@x[3],[$inp,#0] // load input
979 add $D1,$D1,@K[4]
980 add @x[4],@x[4],@x[5],lsl#32
981 add $D2,$D2,@K[5]
982 add @x[6],@x[6],@x[7],lsl#32
983 add $D3,$D3,@K[6]
984 ldp @x[5],@x[7],[$inp,#16]
985 add $D4,$D4,@K[3]
986 add @x[8],@x[8],@x[9],lsl#32
987 add $D5,$D5,@K[4]
988 add @x[10],@x[10],@x[11],lsl#32
989 add $B0,$B0,@K[1]
990 ldp @x[9],@x[11],[$inp,#32]
991 add $B1,$B1,@K[1]
992 add @x[12],@x[12],@x[13],lsl#32
993 add $B2,$B2,@K[1]
994 add @x[14],@x[14],@x[15],lsl#32
995 add $B3,$B3,@K[1]
996 ldp @x[13],@x[15],[$inp,#48]
997 add $B4,$B4,@K[1]
998 add $inp,$inp,#64
999 add $B5,$B5,@K[1]
1000
1001#ifdef __ARMEB__
1002 rev @x[0],@x[0]
1003 rev @x[2],@x[2]
1004 rev @x[4],@x[4]
1005 rev @x[6],@x[6]
1006 rev @x[8],@x[8]
1007 rev @x[10],@x[10]
1008 rev @x[12],@x[12]
1009 rev @x[14],@x[14]
1010#endif
1011 ld1.8 {$T0-$T3},[$inp],#64
1012 eor @x[0],@x[0],@x[1]
1013 eor @x[2],@x[2],@x[3]
1014 eor @x[4],@x[4],@x[5]
1015 eor @x[6],@x[6],@x[7]
1016 eor @x[8],@x[8],@x[9]
1017 eor $A0,$A0,$T0
1018 eor @x[10],@x[10],@x[11]
1019 eor $B0,$B0,$T1
1020 eor @x[12],@x[12],@x[13]
1021 eor $C0,$C0,$T2
1022 eor @x[14],@x[14],@x[15]
1023 eor $D0,$D0,$T3
1024 ld1.8 {$T0-$T3},[$inp],#64
1025
1026 stp @x[0],@x[2],[$out,#0] // store output
1027 add @d[6],@d[6],#7 // increment counter
1028 stp @x[4],@x[6],[$out,#16]
1029 stp @x[8],@x[10],[$out,#32]
1030 stp @x[12],@x[14],[$out,#48]
1031 add $out,$out,#64
1032 st1.8 {$A0-$D0},[$out],#64
1033
1034 ld1.8 {$A0-$D0},[$inp],#64
1035 eor $A1,$A1,$T0
1036 eor $B1,$B1,$T1
1037 eor $C1,$C1,$T2
1038 eor $D1,$D1,$T3
1039 st1.8 {$A1-$D1},[$out],#64
1040
1041 ld1.8 {$A1-$D1},[$inp],#64
1042 eor $A2,$A2,$A0
1043 ldp @K[0],@K[1],[sp,#0]
1044 eor $B2,$B2,$B0
1045 ldp @K[2],@K[3],[sp,#32]
1046 eor $C2,$C2,$C0
1047 eor $D2,$D2,$D0
1048 st1.8 {$A2-$D2},[$out],#64
1049
1050 ld1.8 {$A2-$D2},[$inp],#64
1051 eor $A3,$A3,$A1
1052 eor $B3,$B3,$B1
1053 eor $C3,$C3,$C1
1054 eor $D3,$D3,$D1
1055 st1.8 {$A3-$D3},[$out],#64
1056
1057 ld1.8 {$A3-$D3},[$inp],#64
1058 eor $A4,$A4,$A2
1059 eor $B4,$B4,$B2
1060 eor $C4,$C4,$C2
1061 eor $D4,$D4,$D2
1062 st1.8 {$A4-$D4},[$out],#64
1063
1064 shl $A0,$ONE,#1 // 4 -> 8
1065 eor $A5,$A5,$A3
1066 eor $B5,$B5,$B3
1067 eor $C5,$C5,$C3
1068 eor $D5,$D5,$D3
1069 st1.8 {$A5-$D5},[$out],#64
1070
1071 add @K[3],@K[3],$A0 // += 8
1072 add @K[4],@K[4],$A0
1073 add @K[5],@K[5],$A0
1074 add @K[6],@K[6],$A0
1075
1076 b.hs .Loop_outer_512_neon
1077
1078 adds $len,$len,#512
1079 ushr $A0,$ONE,#2 // 4 -> 1
1080
1081 ldp d8,d9,[sp,#128+0] // meet ABI requirements
1082 ldp d10,d11,[sp,#128+16]
1083 ldp d12,d13,[sp,#128+32]
1084 ldp d14,d15,[sp,#128+48]
1085
1086 stp @K[0],$ONE,[sp,#0] // wipe off-load area
1087 stp @K[0],$ONE,[sp,#32]
1088 stp @K[0],$ONE,[sp,#64]
1089
1090 b.eq .Ldone_512_neon
1091
1092 cmp $len,#192
1093 sub @K[3],@K[3],$A0 // -= 1
1094 sub @K[4],@K[4],$A0
1095 sub @K[5],@K[5],$A0
1096 add sp,sp,#128
1097 b.hs .Loop_outer_neon
1098
1099 eor @K[1],@K[1],@K[1]
1100 eor @K[2],@K[2],@K[2]
1101 eor @K[3],@K[3],@K[3]
1102 eor @K[4],@K[4],@K[4]
1103 eor @K[5],@K[5],@K[5]
1104 eor @K[6],@K[6],@K[6]
1105 b .Loop_outer
1106
1107.Ldone_512_neon:
1108 ldp x19,x20,[x29,#16]
1109 add sp,sp,#128+64
1110 ldp x21,x22,[x29,#32]
1111 ldp x23,x24,[x29,#48]
1112 ldp x25,x26,[x29,#64]
1113 ldp x27,x28,[x29,#80]
1114 ldp x29,x30,[sp],#96
1115 ret
1116.size ChaCha20_512_neon,.-ChaCha20_512_neon
1117___
1118}
1119}}}
1120
1121foreach (split("\n",$code)) {
1122 s/\`([^\`]*)\`/eval $1/geo;
1123
1124 (s/\b([a-z]+)\.32\b/$1/ and (s/x([0-9]+)/w$1/g or 1)) or
1125 (m/\b(eor|ext|mov)\b/ and (s/\.4s/\.16b/g or 1)) or
1126 (s/\b((?:ld|st)1)\.8\b/$1/ and (s/\.4s/\.16b/g or 1)) or
1127 (m/\b(ld|st)[rp]\b/ and (s/v([0-9]+)\.4s/q$1/g or 1)) or
1128 (s/\brev32\.16\b/rev32/ and (s/\.4s/\.8h/g or 1));
1129
1130 #s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo;
1131
1132 print $_,"\n";
1133}
Pete Bentley0c61efe2019-08-13 09:32:23 +01001134close STDOUT or die "error closing STDOUT"; # flush