blob: d4db3b4d10693ce61b884e7bd9fae7a18795e8db [file] [log] [blame]
Robert Sloan6f79a502017-04-03 09:16:40 -07001#! /usr/bin/env perl
2# Copyright 2012-2016 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the OpenSSL license (the "License"). You may not use
5# this file except in compliance with the License. You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
Adam Langleyd9e397b2015-01-22 14:27:53 -08009
10# ====================================================================
11# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15#
16# Specific modes and adaptation for Linux kernel by Ard Biesheuvel
Robert Sloanab8b8882018-03-26 11:39:51 -070017# of Linaro. Permission to use under GPL terms is granted.
Adam Langleyd9e397b2015-01-22 14:27:53 -080018# ====================================================================
19
20# Bit-sliced AES for ARM NEON
21#
22# February 2012.
23#
24# This implementation is direct adaptation of bsaes-x86_64 module for
25# ARM NEON. Except that this module is endian-neutral [in sense that
26# it can be compiled for either endianness] by courtesy of vld1.8's
27# neutrality. Initial version doesn't implement interface to OpenSSL,
28# only low-level primitives and unsupported entry points, just enough
29# to collect performance results, which for Cortex-A8 core are:
30#
31# encrypt 19.5 cycles per byte processed with 128-bit key
32# decrypt 22.1 cycles per byte processed with 128-bit key
33# key conv. 440 cycles per 128-bit key/0.18 of 8x block
34#
35# Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7,
36# which is [much] worse than anticipated (for further details see
37# http://www.openssl.org/~appro/Snapdragon-S4.html).
38#
39# Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code
40# manages in 20.0 cycles].
41#
42# When comparing to x86_64 results keep in mind that NEON unit is
43# [mostly] single-issue and thus can't [fully] benefit from
44# instruction-level parallelism. And when comparing to aes-armv4
45# results keep in mind key schedule conversion overhead (see
46# bsaes-x86_64.pl for further details)...
47#
48# <appro@openssl.org>
49
50# April-August 2013
Robert Sloanab8b8882018-03-26 11:39:51 -070051# Add CBC, CTR and XTS subroutines and adapt for kernel use; courtesy of Ard.
Adam Langleyd9e397b2015-01-22 14:27:53 -080052
Adam Langleye9ada862015-05-11 17:20:37 -070053$flavour = shift;
David Benjaminc895d6b2016-08-11 13:26:41 -040054if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
55else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
Adam Langleye9ada862015-05-11 17:20:37 -070056
57if ($flavour && $flavour ne "void") {
58 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
59 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
Robert Sloan572a4e22017-04-17 10:52:19 -070060 ( $xlate="${dir}../../../perlasm/arm-xlate.pl" and -f $xlate) or
Adam Langleye9ada862015-05-11 17:20:37 -070061 die "can't locate arm-xlate.pl";
62
63 open STDOUT,"| \"$^X\" $xlate $flavour $output";
64} else {
65 open STDOUT,">$output";
66}
Adam Langleyd9e397b2015-01-22 14:27:53 -080067
68my ($inp,$out,$len,$key)=("r0","r1","r2","r3");
69my @XMM=map("q$_",(0..15));
70
71{
72my ($key,$rounds,$const)=("r4","r5","r6");
73
74sub Dlo() { shift=~m|q([1]?[0-9])|?"d".($1*2):""; }
75sub Dhi() { shift=~m|q([1]?[0-9])|?"d".($1*2+1):""; }
76
77sub Sbox {
78# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
79# output in lsb > [b0, b1, b4, b6, b3, b7, b2, b5] < msb
80my @b=@_[0..7];
81my @t=@_[8..11];
82my @s=@_[12..15];
83 &InBasisChange (@b);
84 &Inv_GF256 (@b[6,5,0,3,7,1,4,2],@t,@s);
85 &OutBasisChange (@b[7,1,4,2,6,5,0,3]);
86}
87
88sub InBasisChange {
89# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
Robert Sloana94fe052017-02-21 08:49:28 -080090# output in lsb > [b6, b5, b0, b3, b7, b1, b4, b2] < msb
Adam Langleyd9e397b2015-01-22 14:27:53 -080091my @b=@_[0..7];
92$code.=<<___;
93 veor @b[2], @b[2], @b[1]
94 veor @b[5], @b[5], @b[6]
95 veor @b[3], @b[3], @b[0]
96 veor @b[6], @b[6], @b[2]
97 veor @b[5], @b[5], @b[0]
98
99 veor @b[6], @b[6], @b[3]
100 veor @b[3], @b[3], @b[7]
101 veor @b[7], @b[7], @b[5]
102 veor @b[3], @b[3], @b[4]
103 veor @b[4], @b[4], @b[5]
104
105 veor @b[2], @b[2], @b[7]
106 veor @b[3], @b[3], @b[1]
107 veor @b[1], @b[1], @b[5]
108___
109}
110
111sub OutBasisChange {
112# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
113# output in lsb > [b6, b1, b2, b4, b7, b0, b3, b5] < msb
114my @b=@_[0..7];
115$code.=<<___;
116 veor @b[0], @b[0], @b[6]
117 veor @b[1], @b[1], @b[4]
118 veor @b[4], @b[4], @b[6]
119 veor @b[2], @b[2], @b[0]
120 veor @b[6], @b[6], @b[1]
121
122 veor @b[1], @b[1], @b[5]
123 veor @b[5], @b[5], @b[3]
124 veor @b[3], @b[3], @b[7]
125 veor @b[7], @b[7], @b[5]
126 veor @b[2], @b[2], @b[5]
127
128 veor @b[4], @b[4], @b[7]
129___
130}
131
132sub InvSbox {
133# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
134# output in lsb > [b0, b1, b6, b4, b2, b7, b3, b5] < msb
135my @b=@_[0..7];
136my @t=@_[8..11];
137my @s=@_[12..15];
138 &InvInBasisChange (@b);
139 &Inv_GF256 (@b[5,1,2,6,3,7,0,4],@t,@s);
140 &InvOutBasisChange (@b[3,7,0,4,5,1,2,6]);
141}
142
143sub InvInBasisChange { # OutBasisChange in reverse (with twist)
144my @b=@_[5,1,2,6,3,7,0,4];
145$code.=<<___
146 veor @b[1], @b[1], @b[7]
147 veor @b[4], @b[4], @b[7]
148
149 veor @b[7], @b[7], @b[5]
150 veor @b[1], @b[1], @b[3]
151 veor @b[2], @b[2], @b[5]
152 veor @b[3], @b[3], @b[7]
153
154 veor @b[6], @b[6], @b[1]
155 veor @b[2], @b[2], @b[0]
156 veor @b[5], @b[5], @b[3]
157 veor @b[4], @b[4], @b[6]
158 veor @b[0], @b[0], @b[6]
159 veor @b[1], @b[1], @b[4]
160___
161}
162
163sub InvOutBasisChange { # InBasisChange in reverse
164my @b=@_[2,5,7,3,6,1,0,4];
165$code.=<<___;
166 veor @b[1], @b[1], @b[5]
167 veor @b[2], @b[2], @b[7]
168
169 veor @b[3], @b[3], @b[1]
170 veor @b[4], @b[4], @b[5]
171 veor @b[7], @b[7], @b[5]
172 veor @b[3], @b[3], @b[4]
173 veor @b[5], @b[5], @b[0]
174 veor @b[3], @b[3], @b[7]
175 veor @b[6], @b[6], @b[2]
176 veor @b[2], @b[2], @b[1]
177 veor @b[6], @b[6], @b[3]
178
179 veor @b[3], @b[3], @b[0]
180 veor @b[5], @b[5], @b[6]
181___
182}
183
184sub Mul_GF4 {
185#;*************************************************************
186#;* Mul_GF4: Input x0-x1,y0-y1 Output x0-x1 Temp t0 (8) *
187#;*************************************************************
188my ($x0,$x1,$y0,$y1,$t0,$t1)=@_;
189$code.=<<___;
190 veor $t0, $y0, $y1
191 vand $t0, $t0, $x0
192 veor $x0, $x0, $x1
193 vand $t1, $x1, $y0
194 vand $x0, $x0, $y1
195 veor $x1, $t1, $t0
196 veor $x0, $x0, $t1
197___
198}
199
200sub Mul_GF4_N { # not used, see next subroutine
201# multiply and scale by N
202my ($x0,$x1,$y0,$y1,$t0)=@_;
203$code.=<<___;
204 veor $t0, $y0, $y1
205 vand $t0, $t0, $x0
206 veor $x0, $x0, $x1
207 vand $x1, $x1, $y0
208 vand $x0, $x0, $y1
209 veor $x1, $x1, $x0
210 veor $x0, $x0, $t0
211___
212}
213
214sub Mul_GF4_N_GF4 {
215# interleaved Mul_GF4_N and Mul_GF4
216my ($x0,$x1,$y0,$y1,$t0,
217 $x2,$x3,$y2,$y3,$t1)=@_;
218$code.=<<___;
219 veor $t0, $y0, $y1
220 veor $t1, $y2, $y3
221 vand $t0, $t0, $x0
222 vand $t1, $t1, $x2
223 veor $x0, $x0, $x1
224 veor $x2, $x2, $x3
225 vand $x1, $x1, $y0
226 vand $x3, $x3, $y2
227 vand $x0, $x0, $y1
228 vand $x2, $x2, $y3
229 veor $x1, $x1, $x0
230 veor $x2, $x2, $x3
231 veor $x0, $x0, $t0
232 veor $x3, $x3, $t1
233___
234}
235sub Mul_GF16_2 {
236my @x=@_[0..7];
237my @y=@_[8..11];
238my @t=@_[12..15];
239$code.=<<___;
240 veor @t[0], @x[0], @x[2]
241 veor @t[1], @x[1], @x[3]
242___
243 &Mul_GF4 (@x[0], @x[1], @y[0], @y[1], @t[2..3]);
244$code.=<<___;
245 veor @y[0], @y[0], @y[2]
246 veor @y[1], @y[1], @y[3]
247___
248 Mul_GF4_N_GF4 (@t[0], @t[1], @y[0], @y[1], @t[3],
249 @x[2], @x[3], @y[2], @y[3], @t[2]);
250$code.=<<___;
251 veor @x[0], @x[0], @t[0]
252 veor @x[2], @x[2], @t[0]
253 veor @x[1], @x[1], @t[1]
254 veor @x[3], @x[3], @t[1]
255
256 veor @t[0], @x[4], @x[6]
257 veor @t[1], @x[5], @x[7]
258___
259 &Mul_GF4_N_GF4 (@t[0], @t[1], @y[0], @y[1], @t[3],
260 @x[6], @x[7], @y[2], @y[3], @t[2]);
261$code.=<<___;
262 veor @y[0], @y[0], @y[2]
263 veor @y[1], @y[1], @y[3]
264___
265 &Mul_GF4 (@x[4], @x[5], @y[0], @y[1], @t[2..3]);
266$code.=<<___;
267 veor @x[4], @x[4], @t[0]
268 veor @x[6], @x[6], @t[0]
269 veor @x[5], @x[5], @t[1]
270 veor @x[7], @x[7], @t[1]
271___
272}
273sub Inv_GF256 {
274#;********************************************************************
275#;* Inv_GF256: Input x0-x7 Output x0-x7 Temp t0-t3,s0-s3 (144) *
276#;********************************************************************
277my @x=@_[0..7];
278my @t=@_[8..11];
279my @s=@_[12..15];
280# direct optimizations from hardware
281$code.=<<___;
282 veor @t[3], @x[4], @x[6]
283 veor @t[2], @x[5], @x[7]
284 veor @t[1], @x[1], @x[3]
285 veor @s[1], @x[7], @x[6]
286 vmov @t[0], @t[2]
287 veor @s[0], @x[0], @x[2]
288
289 vorr @t[2], @t[2], @t[1]
290 veor @s[3], @t[3], @t[0]
291 vand @s[2], @t[3], @s[0]
292 vorr @t[3], @t[3], @s[0]
293 veor @s[0], @s[0], @t[1]
294 vand @t[0], @t[0], @t[1]
295 veor @t[1], @x[3], @x[2]
296 vand @s[3], @s[3], @s[0]
297 vand @s[1], @s[1], @t[1]
298 veor @t[1], @x[4], @x[5]
299 veor @s[0], @x[1], @x[0]
300 veor @t[3], @t[3], @s[1]
301 veor @t[2], @t[2], @s[1]
302 vand @s[1], @t[1], @s[0]
303 vorr @t[1], @t[1], @s[0]
304 veor @t[3], @t[3], @s[3]
305 veor @t[0], @t[0], @s[1]
306 veor @t[2], @t[2], @s[2]
307 veor @t[1], @t[1], @s[3]
308 veor @t[0], @t[0], @s[2]
309 vand @s[0], @x[7], @x[3]
310 veor @t[1], @t[1], @s[2]
311 vand @s[1], @x[6], @x[2]
312 vand @s[2], @x[5], @x[1]
313 vorr @s[3], @x[4], @x[0]
314 veor @t[3], @t[3], @s[0]
315 veor @t[1], @t[1], @s[2]
316 veor @t[0], @t[0], @s[3]
317 veor @t[2], @t[2], @s[1]
318
319 @ Inv_GF16 \t0, \t1, \t2, \t3, \s0, \s1, \s2, \s3
320
321 @ new smaller inversion
322
323 vand @s[2], @t[3], @t[1]
324 vmov @s[0], @t[0]
325
326 veor @s[1], @t[2], @s[2]
327 veor @s[3], @t[0], @s[2]
328 veor @s[2], @t[0], @s[2] @ @s[2]=@s[3]
329
330 vbsl @s[1], @t[1], @t[0]
331 vbsl @s[3], @t[3], @t[2]
332 veor @t[3], @t[3], @t[2]
333
334 vbsl @s[0], @s[1], @s[2]
335 vbsl @t[0], @s[2], @s[1]
336
337 vand @s[2], @s[0], @s[3]
338 veor @t[1], @t[1], @t[0]
339
340 veor @s[2], @s[2], @t[3]
341___
342# output in s3, s2, s1, t1
343
344# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \t2, \t3, \t0, \t1, \s0, \s1, \s2, \s3
345
346# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \s3, \s2, \s1, \t1, \s0, \t0, \t2, \t3
347 &Mul_GF16_2(@x,@s[3,2,1],@t[1],@s[0],@t[0,2,3]);
348
349### output msb > [x3,x2,x1,x0,x7,x6,x5,x4] < lsb
350}
351
352# AES linear components
353
354sub ShiftRows {
355my @x=@_[0..7];
356my @t=@_[8..11];
357my $mask=pop;
358$code.=<<___;
359 vldmia $key!, {@t[0]-@t[3]}
360 veor @t[0], @t[0], @x[0]
361 veor @t[1], @t[1], @x[1]
362 vtbl.8 `&Dlo(@x[0])`, {@t[0]}, `&Dlo($mask)`
363 vtbl.8 `&Dhi(@x[0])`, {@t[0]}, `&Dhi($mask)`
364 vldmia $key!, {@t[0]}
365 veor @t[2], @t[2], @x[2]
366 vtbl.8 `&Dlo(@x[1])`, {@t[1]}, `&Dlo($mask)`
367 vtbl.8 `&Dhi(@x[1])`, {@t[1]}, `&Dhi($mask)`
368 vldmia $key!, {@t[1]}
369 veor @t[3], @t[3], @x[3]
370 vtbl.8 `&Dlo(@x[2])`, {@t[2]}, `&Dlo($mask)`
371 vtbl.8 `&Dhi(@x[2])`, {@t[2]}, `&Dhi($mask)`
372 vldmia $key!, {@t[2]}
373 vtbl.8 `&Dlo(@x[3])`, {@t[3]}, `&Dlo($mask)`
374 vtbl.8 `&Dhi(@x[3])`, {@t[3]}, `&Dhi($mask)`
375 vldmia $key!, {@t[3]}
376 veor @t[0], @t[0], @x[4]
377 veor @t[1], @t[1], @x[5]
378 vtbl.8 `&Dlo(@x[4])`, {@t[0]}, `&Dlo($mask)`
379 vtbl.8 `&Dhi(@x[4])`, {@t[0]}, `&Dhi($mask)`
380 veor @t[2], @t[2], @x[6]
381 vtbl.8 `&Dlo(@x[5])`, {@t[1]}, `&Dlo($mask)`
382 vtbl.8 `&Dhi(@x[5])`, {@t[1]}, `&Dhi($mask)`
383 veor @t[3], @t[3], @x[7]
384 vtbl.8 `&Dlo(@x[6])`, {@t[2]}, `&Dlo($mask)`
385 vtbl.8 `&Dhi(@x[6])`, {@t[2]}, `&Dhi($mask)`
386 vtbl.8 `&Dlo(@x[7])`, {@t[3]}, `&Dlo($mask)`
387 vtbl.8 `&Dhi(@x[7])`, {@t[3]}, `&Dhi($mask)`
388___
389}
390
391sub MixColumns {
392# modified to emit output in order suitable for feeding back to aesenc[last]
393my @x=@_[0..7];
394my @t=@_[8..15];
395my $inv=@_[16]; # optional
396$code.=<<___;
397 vext.8 @t[0], @x[0], @x[0], #12 @ x0 <<< 32
398 vext.8 @t[1], @x[1], @x[1], #12
399 veor @x[0], @x[0], @t[0] @ x0 ^ (x0 <<< 32)
400 vext.8 @t[2], @x[2], @x[2], #12
401 veor @x[1], @x[1], @t[1]
402 vext.8 @t[3], @x[3], @x[3], #12
403 veor @x[2], @x[2], @t[2]
404 vext.8 @t[4], @x[4], @x[4], #12
405 veor @x[3], @x[3], @t[3]
406 vext.8 @t[5], @x[5], @x[5], #12
407 veor @x[4], @x[4], @t[4]
408 vext.8 @t[6], @x[6], @x[6], #12
409 veor @x[5], @x[5], @t[5]
410 vext.8 @t[7], @x[7], @x[7], #12
411 veor @x[6], @x[6], @t[6]
412
413 veor @t[1], @t[1], @x[0]
414 veor @x[7], @x[7], @t[7]
415 vext.8 @x[0], @x[0], @x[0], #8 @ (x0 ^ (x0 <<< 32)) <<< 64)
416 veor @t[2], @t[2], @x[1]
417 veor @t[0], @t[0], @x[7]
418 veor @t[1], @t[1], @x[7]
419 vext.8 @x[1], @x[1], @x[1], #8
420 veor @t[5], @t[5], @x[4]
421 veor @x[0], @x[0], @t[0]
422 veor @t[6], @t[6], @x[5]
423 veor @x[1], @x[1], @t[1]
424 vext.8 @t[0], @x[4], @x[4], #8
425 veor @t[4], @t[4], @x[3]
426 vext.8 @t[1], @x[5], @x[5], #8
427 veor @t[7], @t[7], @x[6]
428 vext.8 @x[4], @x[3], @x[3], #8
429 veor @t[3], @t[3], @x[2]
430 vext.8 @x[5], @x[7], @x[7], #8
431 veor @t[4], @t[4], @x[7]
432 vext.8 @x[3], @x[6], @x[6], #8
433 veor @t[3], @t[3], @x[7]
434 vext.8 @x[6], @x[2], @x[2], #8
435 veor @x[7], @t[1], @t[5]
436___
437$code.=<<___ if (!$inv);
438 veor @x[2], @t[0], @t[4]
439 veor @x[4], @x[4], @t[3]
440 veor @x[5], @x[5], @t[7]
441 veor @x[3], @x[3], @t[6]
442 @ vmov @x[2], @t[0]
443 veor @x[6], @x[6], @t[2]
444 @ vmov @x[7], @t[1]
445___
446$code.=<<___ if ($inv);
447 veor @t[3], @t[3], @x[4]
448 veor @x[5], @x[5], @t[7]
449 veor @x[2], @x[3], @t[6]
450 veor @x[3], @t[0], @t[4]
451 veor @x[4], @x[6], @t[2]
452 vmov @x[6], @t[3]
453 @ vmov @x[7], @t[1]
454___
455}
456
457sub InvMixColumns_orig {
458my @x=@_[0..7];
459my @t=@_[8..15];
460
461$code.=<<___;
462 @ multiplication by 0x0e
463 vext.8 @t[7], @x[7], @x[7], #12
464 vmov @t[2], @x[2]
465 veor @x[2], @x[2], @x[5] @ 2 5
466 veor @x[7], @x[7], @x[5] @ 7 5
467 vext.8 @t[0], @x[0], @x[0], #12
468 vmov @t[5], @x[5]
469 veor @x[5], @x[5], @x[0] @ 5 0 [1]
470 veor @x[0], @x[0], @x[1] @ 0 1
471 vext.8 @t[1], @x[1], @x[1], #12
472 veor @x[1], @x[1], @x[2] @ 1 25
473 veor @x[0], @x[0], @x[6] @ 01 6 [2]
474 vext.8 @t[3], @x[3], @x[3], #12
475 veor @x[1], @x[1], @x[3] @ 125 3 [4]
476 veor @x[2], @x[2], @x[0] @ 25 016 [3]
477 veor @x[3], @x[3], @x[7] @ 3 75
478 veor @x[7], @x[7], @x[6] @ 75 6 [0]
479 vext.8 @t[6], @x[6], @x[6], #12
480 vmov @t[4], @x[4]
481 veor @x[6], @x[6], @x[4] @ 6 4
482 veor @x[4], @x[4], @x[3] @ 4 375 [6]
483 veor @x[3], @x[3], @x[7] @ 375 756=36
484 veor @x[6], @x[6], @t[5] @ 64 5 [7]
485 veor @x[3], @x[3], @t[2] @ 36 2
486 vext.8 @t[5], @t[5], @t[5], #12
487 veor @x[3], @x[3], @t[4] @ 362 4 [5]
488___
489 my @y = @x[7,5,0,2,1,3,4,6];
490$code.=<<___;
491 @ multiplication by 0x0b
492 veor @y[1], @y[1], @y[0]
493 veor @y[0], @y[0], @t[0]
494 vext.8 @t[2], @t[2], @t[2], #12
495 veor @y[1], @y[1], @t[1]
496 veor @y[0], @y[0], @t[5]
497 vext.8 @t[4], @t[4], @t[4], #12
498 veor @y[1], @y[1], @t[6]
499 veor @y[0], @y[0], @t[7]
500 veor @t[7], @t[7], @t[6] @ clobber t[7]
501
502 veor @y[3], @y[3], @t[0]
503 veor @y[1], @y[1], @y[0]
504 vext.8 @t[0], @t[0], @t[0], #12
505 veor @y[2], @y[2], @t[1]
506 veor @y[4], @y[4], @t[1]
507 vext.8 @t[1], @t[1], @t[1], #12
508 veor @y[2], @y[2], @t[2]
509 veor @y[3], @y[3], @t[2]
510 veor @y[5], @y[5], @t[2]
511 veor @y[2], @y[2], @t[7]
512 vext.8 @t[2], @t[2], @t[2], #12
513 veor @y[3], @y[3], @t[3]
514 veor @y[6], @y[6], @t[3]
515 veor @y[4], @y[4], @t[3]
516 veor @y[7], @y[7], @t[4]
517 vext.8 @t[3], @t[3], @t[3], #12
518 veor @y[5], @y[5], @t[4]
519 veor @y[7], @y[7], @t[7]
520 veor @t[7], @t[7], @t[5] @ clobber t[7] even more
521 veor @y[3], @y[3], @t[5]
522 veor @y[4], @y[4], @t[4]
523
524 veor @y[5], @y[5], @t[7]
525 vext.8 @t[4], @t[4], @t[4], #12
526 veor @y[6], @y[6], @t[7]
527 veor @y[4], @y[4], @t[7]
528
529 veor @t[7], @t[7], @t[5]
530 vext.8 @t[5], @t[5], @t[5], #12
531
532 @ multiplication by 0x0d
533 veor @y[4], @y[4], @y[7]
534 veor @t[7], @t[7], @t[6] @ restore t[7]
535 veor @y[7], @y[7], @t[4]
536 vext.8 @t[6], @t[6], @t[6], #12
537 veor @y[2], @y[2], @t[0]
538 veor @y[7], @y[7], @t[5]
539 vext.8 @t[7], @t[7], @t[7], #12
540 veor @y[2], @y[2], @t[2]
541
542 veor @y[3], @y[3], @y[1]
543 veor @y[1], @y[1], @t[1]
544 veor @y[0], @y[0], @t[0]
545 veor @y[3], @y[3], @t[0]
546 veor @y[1], @y[1], @t[5]
547 veor @y[0], @y[0], @t[5]
548 vext.8 @t[0], @t[0], @t[0], #12
549 veor @y[1], @y[1], @t[7]
550 veor @y[0], @y[0], @t[6]
551 veor @y[3], @y[3], @y[1]
552 veor @y[4], @y[4], @t[1]
553 vext.8 @t[1], @t[1], @t[1], #12
554
555 veor @y[7], @y[7], @t[7]
556 veor @y[4], @y[4], @t[2]
557 veor @y[5], @y[5], @t[2]
558 veor @y[2], @y[2], @t[6]
559 veor @t[6], @t[6], @t[3] @ clobber t[6]
560 vext.8 @t[2], @t[2], @t[2], #12
561 veor @y[4], @y[4], @y[7]
562 veor @y[3], @y[3], @t[6]
563
564 veor @y[6], @y[6], @t[6]
565 veor @y[5], @y[5], @t[5]
566 vext.8 @t[5], @t[5], @t[5], #12
567 veor @y[6], @y[6], @t[4]
568 vext.8 @t[4], @t[4], @t[4], #12
569 veor @y[5], @y[5], @t[6]
570 veor @y[6], @y[6], @t[7]
571 vext.8 @t[7], @t[7], @t[7], #12
572 veor @t[6], @t[6], @t[3] @ restore t[6]
573 vext.8 @t[3], @t[3], @t[3], #12
574
575 @ multiplication by 0x09
576 veor @y[4], @y[4], @y[1]
577 veor @t[1], @t[1], @y[1] @ t[1]=y[1]
578 veor @t[0], @t[0], @t[5] @ clobber t[0]
579 vext.8 @t[6], @t[6], @t[6], #12
580 veor @t[1], @t[1], @t[5]
581 veor @y[3], @y[3], @t[0]
582 veor @t[0], @t[0], @y[0] @ t[0]=y[0]
583 veor @t[1], @t[1], @t[6]
584 veor @t[6], @t[6], @t[7] @ clobber t[6]
585 veor @y[4], @y[4], @t[1]
586 veor @y[7], @y[7], @t[4]
587 veor @y[6], @y[6], @t[3]
588 veor @y[5], @y[5], @t[2]
589 veor @t[4], @t[4], @y[4] @ t[4]=y[4]
590 veor @t[3], @t[3], @y[3] @ t[3]=y[3]
591 veor @t[5], @t[5], @y[5] @ t[5]=y[5]
592 veor @t[2], @t[2], @y[2] @ t[2]=y[2]
593 veor @t[3], @t[3], @t[7]
594 veor @XMM[5], @t[5], @t[6]
595 veor @XMM[6], @t[6], @y[6] @ t[6]=y[6]
596 veor @XMM[2], @t[2], @t[6]
597 veor @XMM[7], @t[7], @y[7] @ t[7]=y[7]
598
599 vmov @XMM[0], @t[0]
600 vmov @XMM[1], @t[1]
601 @ vmov @XMM[2], @t[2]
602 vmov @XMM[3], @t[3]
603 vmov @XMM[4], @t[4]
604 @ vmov @XMM[5], @t[5]
605 @ vmov @XMM[6], @t[6]
606 @ vmov @XMM[7], @t[7]
607___
608}
609
610sub InvMixColumns {
611my @x=@_[0..7];
612my @t=@_[8..15];
613
614# Thanks to Jussi Kivilinna for providing pointer to
615#
616# | 0e 0b 0d 09 | | 02 03 01 01 | | 05 00 04 00 |
617# | 09 0e 0b 0d | = | 01 02 03 01 | x | 00 05 00 04 |
618# | 0d 09 0e 0b | | 01 01 02 03 | | 04 00 05 00 |
619# | 0b 0d 09 0e | | 03 01 01 02 | | 00 04 00 05 |
620
621$code.=<<___;
622 @ multiplication by 0x05-0x00-0x04-0x00
623 vext.8 @t[0], @x[0], @x[0], #8
624 vext.8 @t[6], @x[6], @x[6], #8
625 vext.8 @t[7], @x[7], @x[7], #8
626 veor @t[0], @t[0], @x[0]
627 vext.8 @t[1], @x[1], @x[1], #8
628 veor @t[6], @t[6], @x[6]
629 vext.8 @t[2], @x[2], @x[2], #8
630 veor @t[7], @t[7], @x[7]
631 vext.8 @t[3], @x[3], @x[3], #8
632 veor @t[1], @t[1], @x[1]
633 vext.8 @t[4], @x[4], @x[4], #8
634 veor @t[2], @t[2], @x[2]
635 vext.8 @t[5], @x[5], @x[5], #8
636 veor @t[3], @t[3], @x[3]
637 veor @t[4], @t[4], @x[4]
638 veor @t[5], @t[5], @x[5]
639
640 veor @x[0], @x[0], @t[6]
641 veor @x[1], @x[1], @t[6]
642 veor @x[2], @x[2], @t[0]
643 veor @x[4], @x[4], @t[2]
644 veor @x[3], @x[3], @t[1]
645 veor @x[1], @x[1], @t[7]
646 veor @x[2], @x[2], @t[7]
647 veor @x[4], @x[4], @t[6]
648 veor @x[5], @x[5], @t[3]
649 veor @x[3], @x[3], @t[6]
650 veor @x[6], @x[6], @t[4]
651 veor @x[4], @x[4], @t[7]
652 veor @x[5], @x[5], @t[7]
653 veor @x[7], @x[7], @t[5]
654___
655 &MixColumns (@x,@t,1); # flipped 2<->3 and 4<->6
656}
657
658sub swapmove {
659my ($a,$b,$n,$mask,$t)=@_;
660$code.=<<___;
661 vshr.u64 $t, $b, #$n
662 veor $t, $t, $a
663 vand $t, $t, $mask
664 veor $a, $a, $t
665 vshl.u64 $t, $t, #$n
666 veor $b, $b, $t
667___
668}
669sub swapmove2x {
670my ($a0,$b0,$a1,$b1,$n,$mask,$t0,$t1)=@_;
671$code.=<<___;
672 vshr.u64 $t0, $b0, #$n
673 vshr.u64 $t1, $b1, #$n
674 veor $t0, $t0, $a0
675 veor $t1, $t1, $a1
676 vand $t0, $t0, $mask
677 vand $t1, $t1, $mask
678 veor $a0, $a0, $t0
679 vshl.u64 $t0, $t0, #$n
680 veor $a1, $a1, $t1
681 vshl.u64 $t1, $t1, #$n
682 veor $b0, $b0, $t0
683 veor $b1, $b1, $t1
684___
685}
686
687sub bitslice {
688my @x=reverse(@_[0..7]);
689my ($t0,$t1,$t2,$t3)=@_[8..11];
690$code.=<<___;
691 vmov.i8 $t0,#0x55 @ compose .LBS0
692 vmov.i8 $t1,#0x33 @ compose .LBS1
693___
694 &swapmove2x(@x[0,1,2,3],1,$t0,$t2,$t3);
695 &swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
696$code.=<<___;
697 vmov.i8 $t0,#0x0f @ compose .LBS2
698___
699 &swapmove2x(@x[0,2,1,3],2,$t1,$t2,$t3);
700 &swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
701
702 &swapmove2x(@x[0,4,1,5],4,$t0,$t2,$t3);
703 &swapmove2x(@x[2,6,3,7],4,$t0,$t2,$t3);
704}
705
706$code.=<<___;
Adam Langleyd9e397b2015-01-22 14:27:53 -0800707#ifndef __KERNEL__
Kenny Rootb8494592015-09-25 02:29:14 +0000708# include <openssl/arm_arch.h>
Adam Langleyd9e397b2015-01-22 14:27:53 -0800709
710# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
711# define VFP_ABI_POP vldmia sp!,{d8-d15}
712# define VFP_ABI_FRAME 0x40
713#else
714# define VFP_ABI_PUSH
715# define VFP_ABI_POP
716# define VFP_ABI_FRAME 0
717# define BSAES_ASM_EXTENDED_KEY
718# define XTS_CHAIN_TWEAK
719# define __ARM_ARCH__ __LINUX_ARM_ARCH__
Adam Langleye9ada862015-05-11 17:20:37 -0700720# define __ARM_MAX_ARCH__ 7
Adam Langleyd9e397b2015-01-22 14:27:53 -0800721#endif
722
723#ifdef __thumb__
724# define adrl adr
725#endif
726
Adam Langleye9ada862015-05-11 17:20:37 -0700727#if __ARM_MAX_ARCH__>=7
728.arch armv7-a
729.fpu neon
730
Adam Langleyd9e397b2015-01-22 14:27:53 -0800731.text
732.syntax unified @ ARMv7-capable assembler is expected to handle this
Adam Langleye9ada862015-05-11 17:20:37 -0700733#if defined(__thumb2__) && !defined(__APPLE__)
Adam Langleyd9e397b2015-01-22 14:27:53 -0800734.thumb
735#else
736.code 32
Robert Sloan6f79a502017-04-03 09:16:40 -0700737# undef __thumb2__
Adam Langleyd9e397b2015-01-22 14:27:53 -0800738#endif
739
Adam Langleyd9e397b2015-01-22 14:27:53 -0800740.type _bsaes_decrypt8,%function
741.align 4
742_bsaes_decrypt8:
Robert Sloand5c22152017-11-13 09:22:12 -0800743 adr $const,.
Adam Langleyd9e397b2015-01-22 14:27:53 -0800744 vldmia $key!, {@XMM[9]} @ round 0 key
Robert Sloanab8b8882018-03-26 11:39:51 -0700745#if defined(__thumb2__) || defined(__APPLE__)
Adam Langleye9ada862015-05-11 17:20:37 -0700746 adr $const,.LM0ISR
747#else
Adam Langleyd9e397b2015-01-22 14:27:53 -0800748 add $const,$const,#.LM0ISR-_bsaes_decrypt8
Adam Langleye9ada862015-05-11 17:20:37 -0700749#endif
Adam Langleyd9e397b2015-01-22 14:27:53 -0800750
751 vldmia $const!, {@XMM[8]} @ .LM0ISR
752 veor @XMM[10], @XMM[0], @XMM[9] @ xor with round0 key
753 veor @XMM[11], @XMM[1], @XMM[9]
754 vtbl.8 `&Dlo(@XMM[0])`, {@XMM[10]}, `&Dlo(@XMM[8])`
755 vtbl.8 `&Dhi(@XMM[0])`, {@XMM[10]}, `&Dhi(@XMM[8])`
756 veor @XMM[12], @XMM[2], @XMM[9]
757 vtbl.8 `&Dlo(@XMM[1])`, {@XMM[11]}, `&Dlo(@XMM[8])`
758 vtbl.8 `&Dhi(@XMM[1])`, {@XMM[11]}, `&Dhi(@XMM[8])`
759 veor @XMM[13], @XMM[3], @XMM[9]
760 vtbl.8 `&Dlo(@XMM[2])`, {@XMM[12]}, `&Dlo(@XMM[8])`
761 vtbl.8 `&Dhi(@XMM[2])`, {@XMM[12]}, `&Dhi(@XMM[8])`
762 veor @XMM[14], @XMM[4], @XMM[9]
763 vtbl.8 `&Dlo(@XMM[3])`, {@XMM[13]}, `&Dlo(@XMM[8])`
764 vtbl.8 `&Dhi(@XMM[3])`, {@XMM[13]}, `&Dhi(@XMM[8])`
765 veor @XMM[15], @XMM[5], @XMM[9]
766 vtbl.8 `&Dlo(@XMM[4])`, {@XMM[14]}, `&Dlo(@XMM[8])`
767 vtbl.8 `&Dhi(@XMM[4])`, {@XMM[14]}, `&Dhi(@XMM[8])`
768 veor @XMM[10], @XMM[6], @XMM[9]
769 vtbl.8 `&Dlo(@XMM[5])`, {@XMM[15]}, `&Dlo(@XMM[8])`
770 vtbl.8 `&Dhi(@XMM[5])`, {@XMM[15]}, `&Dhi(@XMM[8])`
771 veor @XMM[11], @XMM[7], @XMM[9]
772 vtbl.8 `&Dlo(@XMM[6])`, {@XMM[10]}, `&Dlo(@XMM[8])`
773 vtbl.8 `&Dhi(@XMM[6])`, {@XMM[10]}, `&Dhi(@XMM[8])`
774 vtbl.8 `&Dlo(@XMM[7])`, {@XMM[11]}, `&Dlo(@XMM[8])`
775 vtbl.8 `&Dhi(@XMM[7])`, {@XMM[11]}, `&Dhi(@XMM[8])`
776___
777 &bitslice (@XMM[0..7, 8..11]);
778$code.=<<___;
779 sub $rounds,$rounds,#1
780 b .Ldec_sbox
781.align 4
782.Ldec_loop:
783___
784 &ShiftRows (@XMM[0..7, 8..12]);
785$code.=".Ldec_sbox:\n";
786 &InvSbox (@XMM[0..7, 8..15]);
787$code.=<<___;
788 subs $rounds,$rounds,#1
789 bcc .Ldec_done
790___
791 &InvMixColumns (@XMM[0,1,6,4,2,7,3,5, 8..15]);
792$code.=<<___;
793 vldmia $const, {@XMM[12]} @ .LISR
794 ite eq @ Thumb2 thing, sanity check in ARM
795 addeq $const,$const,#0x10
796 bne .Ldec_loop
797 vldmia $const, {@XMM[12]} @ .LISRM0
798 b .Ldec_loop
799.align 4
800.Ldec_done:
801___
802 &bitslice (@XMM[0,1,6,4,2,7,3,5, 8..11]);
803$code.=<<___;
804 vldmia $key, {@XMM[8]} @ last round key
805 veor @XMM[6], @XMM[6], @XMM[8]
806 veor @XMM[4], @XMM[4], @XMM[8]
807 veor @XMM[2], @XMM[2], @XMM[8]
808 veor @XMM[7], @XMM[7], @XMM[8]
809 veor @XMM[3], @XMM[3], @XMM[8]
810 veor @XMM[5], @XMM[5], @XMM[8]
811 veor @XMM[0], @XMM[0], @XMM[8]
812 veor @XMM[1], @XMM[1], @XMM[8]
813 bx lr
814.size _bsaes_decrypt8,.-_bsaes_decrypt8
815
816.type _bsaes_const,%object
817.align 6
818_bsaes_const:
819.LM0ISR: @ InvShiftRows constants
820 .quad 0x0a0e0206070b0f03, 0x0004080c0d010509
821.LISR:
822 .quad 0x0504070602010003, 0x0f0e0d0c080b0a09
823.LISRM0:
824 .quad 0x01040b0e0205080f, 0x0306090c00070a0d
825.LM0SR: @ ShiftRows constants
826 .quad 0x0a0e02060f03070b, 0x0004080c05090d01
827.LSR:
828 .quad 0x0504070600030201, 0x0f0e0d0c0a09080b
829.LSRM0:
830 .quad 0x0304090e00050a0f, 0x01060b0c0207080d
831.LM0:
832 .quad 0x02060a0e03070b0f, 0x0004080c0105090d
833.LREVM0SR:
834 .quad 0x090d01050c000408, 0x03070b0f060a0e02
835.asciz "Bit-sliced AES for NEON, CRYPTOGAMS by <appro\@openssl.org>"
836.align 6
837.size _bsaes_const,.-_bsaes_const
838
839.type _bsaes_encrypt8,%function
840.align 4
841_bsaes_encrypt8:
Robert Sloand5c22152017-11-13 09:22:12 -0800842 adr $const,.
Adam Langleyd9e397b2015-01-22 14:27:53 -0800843 vldmia $key!, {@XMM[9]} @ round 0 key
Robert Sloanab8b8882018-03-26 11:39:51 -0700844#if defined(__thumb2__) || defined(__APPLE__)
Adam Langleye9ada862015-05-11 17:20:37 -0700845 adr $const,.LM0SR
846#else
Adam Langleyd9e397b2015-01-22 14:27:53 -0800847 sub $const,$const,#_bsaes_encrypt8-.LM0SR
Adam Langleye9ada862015-05-11 17:20:37 -0700848#endif
Adam Langleyd9e397b2015-01-22 14:27:53 -0800849
850 vldmia $const!, {@XMM[8]} @ .LM0SR
851_bsaes_encrypt8_alt:
852 veor @XMM[10], @XMM[0], @XMM[9] @ xor with round0 key
853 veor @XMM[11], @XMM[1], @XMM[9]
854 vtbl.8 `&Dlo(@XMM[0])`, {@XMM[10]}, `&Dlo(@XMM[8])`
855 vtbl.8 `&Dhi(@XMM[0])`, {@XMM[10]}, `&Dhi(@XMM[8])`
856 veor @XMM[12], @XMM[2], @XMM[9]
857 vtbl.8 `&Dlo(@XMM[1])`, {@XMM[11]}, `&Dlo(@XMM[8])`
858 vtbl.8 `&Dhi(@XMM[1])`, {@XMM[11]}, `&Dhi(@XMM[8])`
859 veor @XMM[13], @XMM[3], @XMM[9]
860 vtbl.8 `&Dlo(@XMM[2])`, {@XMM[12]}, `&Dlo(@XMM[8])`
861 vtbl.8 `&Dhi(@XMM[2])`, {@XMM[12]}, `&Dhi(@XMM[8])`
862 veor @XMM[14], @XMM[4], @XMM[9]
863 vtbl.8 `&Dlo(@XMM[3])`, {@XMM[13]}, `&Dlo(@XMM[8])`
864 vtbl.8 `&Dhi(@XMM[3])`, {@XMM[13]}, `&Dhi(@XMM[8])`
865 veor @XMM[15], @XMM[5], @XMM[9]
866 vtbl.8 `&Dlo(@XMM[4])`, {@XMM[14]}, `&Dlo(@XMM[8])`
867 vtbl.8 `&Dhi(@XMM[4])`, {@XMM[14]}, `&Dhi(@XMM[8])`
868 veor @XMM[10], @XMM[6], @XMM[9]
869 vtbl.8 `&Dlo(@XMM[5])`, {@XMM[15]}, `&Dlo(@XMM[8])`
870 vtbl.8 `&Dhi(@XMM[5])`, {@XMM[15]}, `&Dhi(@XMM[8])`
871 veor @XMM[11], @XMM[7], @XMM[9]
872 vtbl.8 `&Dlo(@XMM[6])`, {@XMM[10]}, `&Dlo(@XMM[8])`
873 vtbl.8 `&Dhi(@XMM[6])`, {@XMM[10]}, `&Dhi(@XMM[8])`
874 vtbl.8 `&Dlo(@XMM[7])`, {@XMM[11]}, `&Dlo(@XMM[8])`
875 vtbl.8 `&Dhi(@XMM[7])`, {@XMM[11]}, `&Dhi(@XMM[8])`
876_bsaes_encrypt8_bitslice:
877___
878 &bitslice (@XMM[0..7, 8..11]);
879$code.=<<___;
880 sub $rounds,$rounds,#1
881 b .Lenc_sbox
882.align 4
883.Lenc_loop:
884___
885 &ShiftRows (@XMM[0..7, 8..12]);
886$code.=".Lenc_sbox:\n";
887 &Sbox (@XMM[0..7, 8..15]);
888$code.=<<___;
889 subs $rounds,$rounds,#1
890 bcc .Lenc_done
891___
892 &MixColumns (@XMM[0,1,4,6,3,7,2,5, 8..15]);
893$code.=<<___;
894 vldmia $const, {@XMM[12]} @ .LSR
895 ite eq @ Thumb2 thing, samity check in ARM
896 addeq $const,$const,#0x10
897 bne .Lenc_loop
898 vldmia $const, {@XMM[12]} @ .LSRM0
899 b .Lenc_loop
900.align 4
901.Lenc_done:
902___
903 # output in lsb > [t0, t1, t4, t6, t3, t7, t2, t5] < msb
904 &bitslice (@XMM[0,1,4,6,3,7,2,5, 8..11]);
905$code.=<<___;
906 vldmia $key, {@XMM[8]} @ last round key
907 veor @XMM[4], @XMM[4], @XMM[8]
908 veor @XMM[6], @XMM[6], @XMM[8]
909 veor @XMM[3], @XMM[3], @XMM[8]
910 veor @XMM[7], @XMM[7], @XMM[8]
911 veor @XMM[2], @XMM[2], @XMM[8]
912 veor @XMM[5], @XMM[5], @XMM[8]
913 veor @XMM[0], @XMM[0], @XMM[8]
914 veor @XMM[1], @XMM[1], @XMM[8]
915 bx lr
916.size _bsaes_encrypt8,.-_bsaes_encrypt8
917___
918}
919{
920my ($out,$inp,$rounds,$const)=("r12","r4","r5","r6");
921
922sub bitslice_key {
923my @x=reverse(@_[0..7]);
924my ($bs0,$bs1,$bs2,$t2,$t3)=@_[8..12];
925
926 &swapmove (@x[0,1],1,$bs0,$t2,$t3);
927$code.=<<___;
928 @ &swapmove(@x[2,3],1,$t0,$t2,$t3);
929 vmov @x[2], @x[0]
930 vmov @x[3], @x[1]
931___
932 #&swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
933
934 &swapmove2x (@x[0,2,1,3],2,$bs1,$t2,$t3);
935$code.=<<___;
936 @ &swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
937 vmov @x[4], @x[0]
938 vmov @x[6], @x[2]
939 vmov @x[5], @x[1]
940 vmov @x[7], @x[3]
941___
942 &swapmove2x (@x[0,4,1,5],4,$bs2,$t2,$t3);
943 &swapmove2x (@x[2,6,3,7],4,$bs2,$t2,$t3);
944}
945
946$code.=<<___;
947.type _bsaes_key_convert,%function
948.align 4
949_bsaes_key_convert:
Robert Sloand5c22152017-11-13 09:22:12 -0800950 adr $const,.
Adam Langleyd9e397b2015-01-22 14:27:53 -0800951 vld1.8 {@XMM[7]}, [$inp]! @ load round 0 key
Robert Sloanab8b8882018-03-26 11:39:51 -0700952#if defined(__thumb2__) || defined(__APPLE__)
Adam Langleye9ada862015-05-11 17:20:37 -0700953 adr $const,.LM0
954#else
Adam Langleyd9e397b2015-01-22 14:27:53 -0800955 sub $const,$const,#_bsaes_key_convert-.LM0
Adam Langleye9ada862015-05-11 17:20:37 -0700956#endif
Adam Langleyd9e397b2015-01-22 14:27:53 -0800957 vld1.8 {@XMM[15]}, [$inp]! @ load round 1 key
958
959 vmov.i8 @XMM[8], #0x01 @ bit masks
960 vmov.i8 @XMM[9], #0x02
961 vmov.i8 @XMM[10], #0x04
962 vmov.i8 @XMM[11], #0x08
963 vmov.i8 @XMM[12], #0x10
964 vmov.i8 @XMM[13], #0x20
965 vldmia $const, {@XMM[14]} @ .LM0
966
967#ifdef __ARMEL__
968 vrev32.8 @XMM[7], @XMM[7]
969 vrev32.8 @XMM[15], @XMM[15]
970#endif
971 sub $rounds,$rounds,#1
972 vstmia $out!, {@XMM[7]} @ save round 0 key
973 b .Lkey_loop
974
975.align 4
976.Lkey_loop:
977 vtbl.8 `&Dlo(@XMM[7])`,{@XMM[15]},`&Dlo(@XMM[14])`
978 vtbl.8 `&Dhi(@XMM[7])`,{@XMM[15]},`&Dhi(@XMM[14])`
979 vmov.i8 @XMM[6], #0x40
980 vmov.i8 @XMM[15], #0x80
981
982 vtst.8 @XMM[0], @XMM[7], @XMM[8]
983 vtst.8 @XMM[1], @XMM[7], @XMM[9]
984 vtst.8 @XMM[2], @XMM[7], @XMM[10]
985 vtst.8 @XMM[3], @XMM[7], @XMM[11]
986 vtst.8 @XMM[4], @XMM[7], @XMM[12]
987 vtst.8 @XMM[5], @XMM[7], @XMM[13]
988 vtst.8 @XMM[6], @XMM[7], @XMM[6]
989 vtst.8 @XMM[7], @XMM[7], @XMM[15]
990 vld1.8 {@XMM[15]}, [$inp]! @ load next round key
991 vmvn @XMM[0], @XMM[0] @ "pnot"
992 vmvn @XMM[1], @XMM[1]
993 vmvn @XMM[5], @XMM[5]
994 vmvn @XMM[6], @XMM[6]
995#ifdef __ARMEL__
996 vrev32.8 @XMM[15], @XMM[15]
997#endif
998 subs $rounds,$rounds,#1
999 vstmia $out!,{@XMM[0]-@XMM[7]} @ write bit-sliced round key
1000 bne .Lkey_loop
1001
1002 vmov.i8 @XMM[7],#0x63 @ compose .L63
1003 @ don't save last round key
1004 bx lr
1005.size _bsaes_key_convert,.-_bsaes_key_convert
1006___
1007}
1008
1009if (0) { # following four functions are unsupported interface
1010 # used for benchmarking...
1011$code.=<<___;
1012.globl bsaes_enc_key_convert
Adam Langleyd9e397b2015-01-22 14:27:53 -08001013.type bsaes_enc_key_convert,%function
1014.align 4
1015bsaes_enc_key_convert:
1016 stmdb sp!,{r4-r6,lr}
1017 vstmdb sp!,{d8-d15} @ ABI specification says so
1018
1019 ldr r5,[$inp,#240] @ pass rounds
1020 mov r4,$inp @ pass key
1021 mov r12,$out @ pass key schedule
1022 bl _bsaes_key_convert
1023 veor @XMM[7],@XMM[7],@XMM[15] @ fix up last round key
1024 vstmia r12, {@XMM[7]} @ save last round key
1025
1026 vldmia sp!,{d8-d15}
1027 ldmia sp!,{r4-r6,pc}
1028.size bsaes_enc_key_convert,.-bsaes_enc_key_convert
1029
1030.globl bsaes_encrypt_128
Adam Langleyd9e397b2015-01-22 14:27:53 -08001031.type bsaes_encrypt_128,%function
1032.align 4
1033bsaes_encrypt_128:
1034 stmdb sp!,{r4-r6,lr}
1035 vstmdb sp!,{d8-d15} @ ABI specification says so
1036.Lenc128_loop:
1037 vld1.8 {@XMM[0]-@XMM[1]}, [$inp]! @ load input
1038 vld1.8 {@XMM[2]-@XMM[3]}, [$inp]!
1039 mov r4,$key @ pass the key
1040 vld1.8 {@XMM[4]-@XMM[5]}, [$inp]!
1041 mov r5,#10 @ pass rounds
1042 vld1.8 {@XMM[6]-@XMM[7]}, [$inp]!
1043
1044 bl _bsaes_encrypt8
1045
1046 vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
1047 vst1.8 {@XMM[4]}, [$out]!
1048 vst1.8 {@XMM[6]}, [$out]!
1049 vst1.8 {@XMM[3]}, [$out]!
1050 vst1.8 {@XMM[7]}, [$out]!
1051 vst1.8 {@XMM[2]}, [$out]!
1052 subs $len,$len,#0x80
1053 vst1.8 {@XMM[5]}, [$out]!
1054 bhi .Lenc128_loop
1055
1056 vldmia sp!,{d8-d15}
1057 ldmia sp!,{r4-r6,pc}
1058.size bsaes_encrypt_128,.-bsaes_encrypt_128
1059
1060.globl bsaes_dec_key_convert
Adam Langleyd9e397b2015-01-22 14:27:53 -08001061.type bsaes_dec_key_convert,%function
1062.align 4
1063bsaes_dec_key_convert:
1064 stmdb sp!,{r4-r6,lr}
1065 vstmdb sp!,{d8-d15} @ ABI specification says so
1066
1067 ldr r5,[$inp,#240] @ pass rounds
1068 mov r4,$inp @ pass key
1069 mov r12,$out @ pass key schedule
1070 bl _bsaes_key_convert
1071 vldmia $out, {@XMM[6]}
1072 vstmia r12, {@XMM[15]} @ save last round key
1073 veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key
1074 vstmia $out, {@XMM[7]}
1075
1076 vldmia sp!,{d8-d15}
1077 ldmia sp!,{r4-r6,pc}
1078.size bsaes_dec_key_convert,.-bsaes_dec_key_convert
1079
1080.globl bsaes_decrypt_128
Adam Langleyd9e397b2015-01-22 14:27:53 -08001081.type bsaes_decrypt_128,%function
1082.align 4
1083bsaes_decrypt_128:
1084 stmdb sp!,{r4-r6,lr}
1085 vstmdb sp!,{d8-d15} @ ABI specification says so
1086.Ldec128_loop:
1087 vld1.8 {@XMM[0]-@XMM[1]}, [$inp]! @ load input
1088 vld1.8 {@XMM[2]-@XMM[3]}, [$inp]!
1089 mov r4,$key @ pass the key
1090 vld1.8 {@XMM[4]-@XMM[5]}, [$inp]!
1091 mov r5,#10 @ pass rounds
1092 vld1.8 {@XMM[6]-@XMM[7]}, [$inp]!
1093
1094 bl _bsaes_decrypt8
1095
1096 vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
1097 vst1.8 {@XMM[6]}, [$out]!
1098 vst1.8 {@XMM[4]}, [$out]!
1099 vst1.8 {@XMM[2]}, [$out]!
1100 vst1.8 {@XMM[7]}, [$out]!
1101 vst1.8 {@XMM[3]}, [$out]!
1102 subs $len,$len,#0x80
1103 vst1.8 {@XMM[5]}, [$out]!
1104 bhi .Ldec128_loop
1105
1106 vldmia sp!,{d8-d15}
1107 ldmia sp!,{r4-r6,pc}
1108.size bsaes_decrypt_128,.-bsaes_decrypt_128
1109___
1110}
1111{
1112my ($inp,$out,$len,$key, $ivp,$fp,$rounds)=map("r$_",(0..3,8..10));
1113my ($keysched)=("sp");
1114
1115$code.=<<___;
Adam Langleyd9e397b2015-01-22 14:27:53 -08001116.global bsaes_cbc_encrypt
Adam Langleyd9e397b2015-01-22 14:27:53 -08001117.type bsaes_cbc_encrypt,%function
1118.align 5
1119bsaes_cbc_encrypt:
Robert Sloan9d5d1a72019-03-18 09:32:50 -07001120 @ In OpenSSL, this function had a fallback to aes_nohw_cbc_encrypt for
1121 @ short inputs. We patch this out, using bsaes for all input sizes.
Adam Langleyd9e397b2015-01-22 14:27:53 -08001122
1123 @ it is up to the caller to make sure we are called with enc == 0
1124
1125 mov ip, sp
1126 stmdb sp!, {r4-r10, lr}
1127 VFP_ABI_PUSH
1128 ldr $ivp, [ip] @ IV is 1st arg on the stack
1129 mov $len, $len, lsr#4 @ len in 16 byte blocks
1130 sub sp, #0x10 @ scratch space to carry over the IV
1131 mov $fp, sp @ save sp
1132
1133 ldr $rounds, [$key, #240] @ get # of rounds
1134#ifndef BSAES_ASM_EXTENDED_KEY
1135 @ allocate the key schedule on the stack
1136 sub r12, sp, $rounds, lsl#7 @ 128 bytes per inner round key
1137 add r12, #`128-32` @ sifze of bit-slices key schedule
1138
1139 @ populate the key schedule
1140 mov r4, $key @ pass key
1141 mov r5, $rounds @ pass # of rounds
1142 mov sp, r12 @ sp is $keysched
1143 bl _bsaes_key_convert
1144 vldmia $keysched, {@XMM[6]}
1145 vstmia r12, {@XMM[15]} @ save last round key
1146 veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key
1147 vstmia $keysched, {@XMM[7]}
1148#else
1149 ldr r12, [$key, #244]
1150 eors r12, #1
1151 beq 0f
1152
1153 @ populate the key schedule
1154 str r12, [$key, #244]
1155 mov r4, $key @ pass key
1156 mov r5, $rounds @ pass # of rounds
1157 add r12, $key, #248 @ pass key schedule
1158 bl _bsaes_key_convert
1159 add r4, $key, #248
1160 vldmia r4, {@XMM[6]}
1161 vstmia r12, {@XMM[15]} @ save last round key
1162 veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key
1163 vstmia r4, {@XMM[7]}
1164
1165.align 2
11660:
1167#endif
1168
1169 vld1.8 {@XMM[15]}, [$ivp] @ load IV
1170 b .Lcbc_dec_loop
1171
1172.align 4
1173.Lcbc_dec_loop:
1174 subs $len, $len, #0x8
1175 bmi .Lcbc_dec_loop_finish
1176
1177 vld1.8 {@XMM[0]-@XMM[1]}, [$inp]! @ load input
1178 vld1.8 {@XMM[2]-@XMM[3]}, [$inp]!
1179#ifndef BSAES_ASM_EXTENDED_KEY
1180 mov r4, $keysched @ pass the key
1181#else
1182 add r4, $key, #248
1183#endif
1184 vld1.8 {@XMM[4]-@XMM[5]}, [$inp]!
1185 mov r5, $rounds
1186 vld1.8 {@XMM[6]-@XMM[7]}, [$inp]
1187 sub $inp, $inp, #0x60
1188 vstmia $fp, {@XMM[15]} @ put aside IV
1189
1190 bl _bsaes_decrypt8
1191
1192 vldmia $fp, {@XMM[14]} @ reload IV
1193 vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
1194 veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
1195 vld1.8 {@XMM[10]-@XMM[11]}, [$inp]!
1196 veor @XMM[1], @XMM[1], @XMM[8]
1197 veor @XMM[6], @XMM[6], @XMM[9]
1198 vld1.8 {@XMM[12]-@XMM[13]}, [$inp]!
1199 veor @XMM[4], @XMM[4], @XMM[10]
1200 veor @XMM[2], @XMM[2], @XMM[11]
1201 vld1.8 {@XMM[14]-@XMM[15]}, [$inp]!
1202 veor @XMM[7], @XMM[7], @XMM[12]
1203 vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
1204 veor @XMM[3], @XMM[3], @XMM[13]
1205 vst1.8 {@XMM[6]}, [$out]!
1206 veor @XMM[5], @XMM[5], @XMM[14]
1207 vst1.8 {@XMM[4]}, [$out]!
1208 vst1.8 {@XMM[2]}, [$out]!
1209 vst1.8 {@XMM[7]}, [$out]!
1210 vst1.8 {@XMM[3]}, [$out]!
1211 vst1.8 {@XMM[5]}, [$out]!
1212
1213 b .Lcbc_dec_loop
1214
1215.Lcbc_dec_loop_finish:
1216 adds $len, $len, #8
1217 beq .Lcbc_dec_done
1218
Robert Sloan9d5d1a72019-03-18 09:32:50 -07001219 @ Set up most parameters for the _bsaes_decrypt8 call.
Adam Langleyd9e397b2015-01-22 14:27:53 -08001220#ifndef BSAES_ASM_EXTENDED_KEY
1221 mov r4, $keysched @ pass the key
1222#else
1223 add r4, $key, #248
1224#endif
1225 mov r5, $rounds
1226 vstmia $fp, {@XMM[15]} @ put aside IV
Robert Sloan9d5d1a72019-03-18 09:32:50 -07001227
1228 vld1.8 {@XMM[0]}, [$inp]! @ load input
1229 cmp $len, #2
1230 blo .Lcbc_dec_one
1231 vld1.8 {@XMM[1]}, [$inp]!
Adam Langleyd9e397b2015-01-22 14:27:53 -08001232 beq .Lcbc_dec_two
1233 vld1.8 {@XMM[2]}, [$inp]!
1234 cmp $len, #4
1235 blo .Lcbc_dec_three
1236 vld1.8 {@XMM[3]}, [$inp]!
1237 beq .Lcbc_dec_four
1238 vld1.8 {@XMM[4]}, [$inp]!
1239 cmp $len, #6
1240 blo .Lcbc_dec_five
1241 vld1.8 {@XMM[5]}, [$inp]!
1242 beq .Lcbc_dec_six
1243 vld1.8 {@XMM[6]}, [$inp]!
1244 sub $inp, $inp, #0x70
1245
1246 bl _bsaes_decrypt8
1247
1248 vldmia $fp, {@XMM[14]} @ reload IV
1249 vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
1250 veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
1251 vld1.8 {@XMM[10]-@XMM[11]}, [$inp]!
1252 veor @XMM[1], @XMM[1], @XMM[8]
1253 veor @XMM[6], @XMM[6], @XMM[9]
1254 vld1.8 {@XMM[12]-@XMM[13]}, [$inp]!
1255 veor @XMM[4], @XMM[4], @XMM[10]
1256 veor @XMM[2], @XMM[2], @XMM[11]
1257 vld1.8 {@XMM[15]}, [$inp]!
1258 veor @XMM[7], @XMM[7], @XMM[12]
1259 vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
1260 veor @XMM[3], @XMM[3], @XMM[13]
1261 vst1.8 {@XMM[6]}, [$out]!
1262 vst1.8 {@XMM[4]}, [$out]!
1263 vst1.8 {@XMM[2]}, [$out]!
1264 vst1.8 {@XMM[7]}, [$out]!
1265 vst1.8 {@XMM[3]}, [$out]!
1266 b .Lcbc_dec_done
1267.align 4
1268.Lcbc_dec_six:
1269 sub $inp, $inp, #0x60
1270 bl _bsaes_decrypt8
1271 vldmia $fp,{@XMM[14]} @ reload IV
1272 vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
1273 veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
1274 vld1.8 {@XMM[10]-@XMM[11]}, [$inp]!
1275 veor @XMM[1], @XMM[1], @XMM[8]
1276 veor @XMM[6], @XMM[6], @XMM[9]
1277 vld1.8 {@XMM[12]}, [$inp]!
1278 veor @XMM[4], @XMM[4], @XMM[10]
1279 veor @XMM[2], @XMM[2], @XMM[11]
1280 vld1.8 {@XMM[15]}, [$inp]!
1281 veor @XMM[7], @XMM[7], @XMM[12]
1282 vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
1283 vst1.8 {@XMM[6]}, [$out]!
1284 vst1.8 {@XMM[4]}, [$out]!
1285 vst1.8 {@XMM[2]}, [$out]!
1286 vst1.8 {@XMM[7]}, [$out]!
1287 b .Lcbc_dec_done
1288.align 4
1289.Lcbc_dec_five:
1290 sub $inp, $inp, #0x50
1291 bl _bsaes_decrypt8
1292 vldmia $fp, {@XMM[14]} @ reload IV
1293 vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
1294 veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
1295 vld1.8 {@XMM[10]-@XMM[11]}, [$inp]!
1296 veor @XMM[1], @XMM[1], @XMM[8]
1297 veor @XMM[6], @XMM[6], @XMM[9]
1298 vld1.8 {@XMM[15]}, [$inp]!
1299 veor @XMM[4], @XMM[4], @XMM[10]
1300 vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
1301 veor @XMM[2], @XMM[2], @XMM[11]
1302 vst1.8 {@XMM[6]}, [$out]!
1303 vst1.8 {@XMM[4]}, [$out]!
1304 vst1.8 {@XMM[2]}, [$out]!
1305 b .Lcbc_dec_done
1306.align 4
1307.Lcbc_dec_four:
1308 sub $inp, $inp, #0x40
1309 bl _bsaes_decrypt8
1310 vldmia $fp, {@XMM[14]} @ reload IV
1311 vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
1312 veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
1313 vld1.8 {@XMM[10]}, [$inp]!
1314 veor @XMM[1], @XMM[1], @XMM[8]
1315 veor @XMM[6], @XMM[6], @XMM[9]
1316 vld1.8 {@XMM[15]}, [$inp]!
1317 veor @XMM[4], @XMM[4], @XMM[10]
1318 vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
1319 vst1.8 {@XMM[6]}, [$out]!
1320 vst1.8 {@XMM[4]}, [$out]!
1321 b .Lcbc_dec_done
1322.align 4
1323.Lcbc_dec_three:
1324 sub $inp, $inp, #0x30
1325 bl _bsaes_decrypt8
1326 vldmia $fp, {@XMM[14]} @ reload IV
1327 vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
1328 veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
1329 vld1.8 {@XMM[15]}, [$inp]!
1330 veor @XMM[1], @XMM[1], @XMM[8]
1331 veor @XMM[6], @XMM[6], @XMM[9]
1332 vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
1333 vst1.8 {@XMM[6]}, [$out]!
1334 b .Lcbc_dec_done
1335.align 4
1336.Lcbc_dec_two:
1337 sub $inp, $inp, #0x20
1338 bl _bsaes_decrypt8
1339 vldmia $fp, {@XMM[14]} @ reload IV
1340 vld1.8 {@XMM[8]}, [$inp]! @ reload input
1341 veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
1342 vld1.8 {@XMM[15]}, [$inp]! @ reload input
1343 veor @XMM[1], @XMM[1], @XMM[8]
1344 vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
1345 b .Lcbc_dec_done
1346.align 4
1347.Lcbc_dec_one:
1348 sub $inp, $inp, #0x10
Robert Sloan9d5d1a72019-03-18 09:32:50 -07001349 bl _bsaes_decrypt8
1350 vldmia $fp, {@XMM[14]} @ reload IV
1351 vld1.8 {@XMM[15]}, [$inp]! @ reload input
1352 veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
1353 vst1.8 {@XMM[0]}, [$out]! @ write output
Adam Langleyd9e397b2015-01-22 14:27:53 -08001354
1355.Lcbc_dec_done:
1356#ifndef BSAES_ASM_EXTENDED_KEY
1357 vmov.i32 q0, #0
1358 vmov.i32 q1, #0
1359.Lcbc_dec_bzero: @ wipe key schedule [if any]
1360 vstmia $keysched!, {q0-q1}
1361 cmp $keysched, $fp
1362 bne .Lcbc_dec_bzero
1363#endif
1364
1365 mov sp, $fp
1366 add sp, #0x10 @ add sp,$fp,#0x10 is no good for thumb
1367 vst1.8 {@XMM[15]}, [$ivp] @ return IV
1368 VFP_ABI_POP
1369 ldmia sp!, {r4-r10, pc}
1370.size bsaes_cbc_encrypt,.-bsaes_cbc_encrypt
1371___
1372}
1373{
1374my ($inp,$out,$len,$key, $ctr,$fp,$rounds)=(map("r$_",(0..3,8..10)));
1375my $const = "r6"; # shared with _bsaes_encrypt8_alt
1376my $keysched = "sp";
1377
1378$code.=<<___;
Adam Langleyd9e397b2015-01-22 14:27:53 -08001379.global bsaes_ctr32_encrypt_blocks
Adam Langleyd9e397b2015-01-22 14:27:53 -08001380.type bsaes_ctr32_encrypt_blocks,%function
1381.align 5
1382bsaes_ctr32_encrypt_blocks:
Robert Sloan9d5d1a72019-03-18 09:32:50 -07001383 @ In OpenSSL, short inputs fall back to aes_nohw_* here. We patch this
1384 @ out to retain a constant-time implementation.
Adam Langleyd9e397b2015-01-22 14:27:53 -08001385 mov ip, sp
1386 stmdb sp!, {r4-r10, lr}
1387 VFP_ABI_PUSH
1388 ldr $ctr, [ip] @ ctr is 1st arg on the stack
1389 sub sp, sp, #0x10 @ scratch space to carry over the ctr
1390 mov $fp, sp @ save sp
1391
1392 ldr $rounds, [$key, #240] @ get # of rounds
1393#ifndef BSAES_ASM_EXTENDED_KEY
1394 @ allocate the key schedule on the stack
1395 sub r12, sp, $rounds, lsl#7 @ 128 bytes per inner round key
1396 add r12, #`128-32` @ size of bit-sliced key schedule
1397
1398 @ populate the key schedule
1399 mov r4, $key @ pass key
1400 mov r5, $rounds @ pass # of rounds
1401 mov sp, r12 @ sp is $keysched
1402 bl _bsaes_key_convert
1403 veor @XMM[7],@XMM[7],@XMM[15] @ fix up last round key
1404 vstmia r12, {@XMM[7]} @ save last round key
1405
1406 vld1.8 {@XMM[0]}, [$ctr] @ load counter
Adam Langleye9ada862015-05-11 17:20:37 -07001407#ifdef __APPLE__
Adam Langleyf4e42722015-06-04 17:45:09 -07001408 mov $ctr, #:lower16:(.LREVM0SR-.LM0)
Adam Langleye9ada862015-05-11 17:20:37 -07001409 add $ctr, $const, $ctr
1410#else
Adam Langleyd9e397b2015-01-22 14:27:53 -08001411 add $ctr, $const, #.LREVM0SR-.LM0 @ borrow $ctr
Adam Langleye9ada862015-05-11 17:20:37 -07001412#endif
Adam Langleyd9e397b2015-01-22 14:27:53 -08001413 vldmia $keysched, {@XMM[4]} @ load round0 key
1414#else
1415 ldr r12, [$key, #244]
1416 eors r12, #1
1417 beq 0f
1418
1419 @ populate the key schedule
1420 str r12, [$key, #244]
1421 mov r4, $key @ pass key
1422 mov r5, $rounds @ pass # of rounds
1423 add r12, $key, #248 @ pass key schedule
1424 bl _bsaes_key_convert
1425 veor @XMM[7],@XMM[7],@XMM[15] @ fix up last round key
1426 vstmia r12, {@XMM[7]} @ save last round key
1427
1428.align 2
14290: add r12, $key, #248
1430 vld1.8 {@XMM[0]}, [$ctr] @ load counter
1431 adrl $ctr, .LREVM0SR @ borrow $ctr
1432 vldmia r12, {@XMM[4]} @ load round0 key
1433 sub sp, #0x10 @ place for adjusted round0 key
1434#endif
1435
1436 vmov.i32 @XMM[8],#1 @ compose 1<<96
1437 veor @XMM[9],@XMM[9],@XMM[9]
1438 vrev32.8 @XMM[0],@XMM[0]
1439 vext.8 @XMM[8],@XMM[9],@XMM[8],#4
1440 vrev32.8 @XMM[4],@XMM[4]
1441 vadd.u32 @XMM[9],@XMM[8],@XMM[8] @ compose 2<<96
1442 vstmia $keysched, {@XMM[4]} @ save adjusted round0 key
1443 b .Lctr_enc_loop
1444
1445.align 4
1446.Lctr_enc_loop:
1447 vadd.u32 @XMM[10], @XMM[8], @XMM[9] @ compose 3<<96
1448 vadd.u32 @XMM[1], @XMM[0], @XMM[8] @ +1
1449 vadd.u32 @XMM[2], @XMM[0], @XMM[9] @ +2
1450 vadd.u32 @XMM[3], @XMM[0], @XMM[10] @ +3
1451 vadd.u32 @XMM[4], @XMM[1], @XMM[10]
1452 vadd.u32 @XMM[5], @XMM[2], @XMM[10]
1453 vadd.u32 @XMM[6], @XMM[3], @XMM[10]
1454 vadd.u32 @XMM[7], @XMM[4], @XMM[10]
1455 vadd.u32 @XMM[10], @XMM[5], @XMM[10] @ next counter
1456
1457 @ Borrow prologue from _bsaes_encrypt8 to use the opportunity
1458 @ to flip byte order in 32-bit counter
1459
1460 vldmia $keysched, {@XMM[9]} @ load round0 key
1461#ifndef BSAES_ASM_EXTENDED_KEY
1462 add r4, $keysched, #0x10 @ pass next round key
1463#else
1464 add r4, $key, #`248+16`
1465#endif
1466 vldmia $ctr, {@XMM[8]} @ .LREVM0SR
1467 mov r5, $rounds @ pass rounds
1468 vstmia $fp, {@XMM[10]} @ save next counter
Adam Langleye9ada862015-05-11 17:20:37 -07001469#ifdef __APPLE__
Adam Langleyf4e42722015-06-04 17:45:09 -07001470 mov $const, #:lower16:(.LREVM0SR-.LSR)
Adam Langleye9ada862015-05-11 17:20:37 -07001471 sub $const, $ctr, $const
1472#else
Adam Langleyd9e397b2015-01-22 14:27:53 -08001473 sub $const, $ctr, #.LREVM0SR-.LSR @ pass constants
Adam Langleye9ada862015-05-11 17:20:37 -07001474#endif
Adam Langleyd9e397b2015-01-22 14:27:53 -08001475
1476 bl _bsaes_encrypt8_alt
1477
1478 subs $len, $len, #8
1479 blo .Lctr_enc_loop_done
1480
1481 vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ load input
1482 vld1.8 {@XMM[10]-@XMM[11]}, [$inp]!
1483 veor @XMM[0], @XMM[8]
1484 veor @XMM[1], @XMM[9]
1485 vld1.8 {@XMM[12]-@XMM[13]}, [$inp]!
1486 veor @XMM[4], @XMM[10]
1487 veor @XMM[6], @XMM[11]
1488 vld1.8 {@XMM[14]-@XMM[15]}, [$inp]!
1489 veor @XMM[3], @XMM[12]
1490 vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
1491 veor @XMM[7], @XMM[13]
1492 veor @XMM[2], @XMM[14]
1493 vst1.8 {@XMM[4]}, [$out]!
1494 veor @XMM[5], @XMM[15]
1495 vst1.8 {@XMM[6]}, [$out]!
1496 vmov.i32 @XMM[8], #1 @ compose 1<<96
1497 vst1.8 {@XMM[3]}, [$out]!
1498 veor @XMM[9], @XMM[9], @XMM[9]
1499 vst1.8 {@XMM[7]}, [$out]!
1500 vext.8 @XMM[8], @XMM[9], @XMM[8], #4
1501 vst1.8 {@XMM[2]}, [$out]!
1502 vadd.u32 @XMM[9],@XMM[8],@XMM[8] @ compose 2<<96
1503 vst1.8 {@XMM[5]}, [$out]!
1504 vldmia $fp, {@XMM[0]} @ load counter
1505
1506 bne .Lctr_enc_loop
1507 b .Lctr_enc_done
1508
1509.align 4
1510.Lctr_enc_loop_done:
1511 add $len, $len, #8
1512 vld1.8 {@XMM[8]}, [$inp]! @ load input
1513 veor @XMM[0], @XMM[8]
1514 vst1.8 {@XMM[0]}, [$out]! @ write output
1515 cmp $len, #2
1516 blo .Lctr_enc_done
1517 vld1.8 {@XMM[9]}, [$inp]!
1518 veor @XMM[1], @XMM[9]
1519 vst1.8 {@XMM[1]}, [$out]!
1520 beq .Lctr_enc_done
1521 vld1.8 {@XMM[10]}, [$inp]!
1522 veor @XMM[4], @XMM[10]
1523 vst1.8 {@XMM[4]}, [$out]!
1524 cmp $len, #4
1525 blo .Lctr_enc_done
1526 vld1.8 {@XMM[11]}, [$inp]!
1527 veor @XMM[6], @XMM[11]
1528 vst1.8 {@XMM[6]}, [$out]!
1529 beq .Lctr_enc_done
1530 vld1.8 {@XMM[12]}, [$inp]!
1531 veor @XMM[3], @XMM[12]
1532 vst1.8 {@XMM[3]}, [$out]!
1533 cmp $len, #6
1534 blo .Lctr_enc_done
1535 vld1.8 {@XMM[13]}, [$inp]!
1536 veor @XMM[7], @XMM[13]
1537 vst1.8 {@XMM[7]}, [$out]!
1538 beq .Lctr_enc_done
1539 vld1.8 {@XMM[14]}, [$inp]
1540 veor @XMM[2], @XMM[14]
1541 vst1.8 {@XMM[2]}, [$out]!
1542
1543.Lctr_enc_done:
1544 vmov.i32 q0, #0
1545 vmov.i32 q1, #0
1546#ifndef BSAES_ASM_EXTENDED_KEY
1547.Lctr_enc_bzero: @ wipe key schedule [if any]
1548 vstmia $keysched!, {q0-q1}
1549 cmp $keysched, $fp
1550 bne .Lctr_enc_bzero
1551#else
1552 vstmia $keysched, {q0-q1}
1553#endif
1554
1555 mov sp, $fp
1556 add sp, #0x10 @ add sp,$fp,#0x10 is no good for thumb
1557 VFP_ABI_POP
1558 ldmia sp!, {r4-r10, pc} @ return
1559
Robert Sloan9d5d1a72019-03-18 09:32:50 -07001560 @ OpenSSL contains aes_nohw_* fallback code here. We patch this
1561 @ out to retain a constant-time implementation.
Adam Langleyd9e397b2015-01-22 14:27:53 -08001562.size bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
1563___
1564}
Robert Sloan4c22c5f2019-03-01 15:53:37 -08001565# In BorinSSL, we patch XTS support out.
1566if (0) {
Adam Langleyd9e397b2015-01-22 14:27:53 -08001567######################################################################
1568# void bsaes_xts_[en|de]crypt(const char *inp,char *out,size_t len,
1569# const AES_KEY *key1, const AES_KEY *key2,
1570# const unsigned char iv[16]);
1571#
1572my ($inp,$out,$len,$key,$rounds,$magic,$fp)=(map("r$_",(7..10,1..3)));
1573my $const="r6"; # returned by _bsaes_key_convert
1574my $twmask=@XMM[5];
1575my @T=@XMM[6..7];
1576
1577$code.=<<___;
1578.globl bsaes_xts_encrypt
Adam Langleyd9e397b2015-01-22 14:27:53 -08001579.type bsaes_xts_encrypt,%function
1580.align 4
1581bsaes_xts_encrypt:
1582 mov ip, sp
1583 stmdb sp!, {r4-r10, lr} @ 0x20
1584 VFP_ABI_PUSH
1585 mov r6, sp @ future $fp
1586
1587 mov $inp, r0
1588 mov $out, r1
1589 mov $len, r2
1590 mov $key, r3
1591
1592 sub r0, sp, #0x10 @ 0x10
1593 bic r0, #0xf @ align at 16 bytes
1594 mov sp, r0
1595
1596#ifdef XTS_CHAIN_TWEAK
1597 ldr r0, [ip] @ pointer to input tweak
1598#else
1599 @ generate initial tweak
1600 ldr r0, [ip, #4] @ iv[]
1601 mov r1, sp
1602 ldr r2, [ip, #0] @ key2
Robert Sloan4c22c5f2019-03-01 15:53:37 -08001603 bl aes_nohw_encrypt
Adam Langleyd9e397b2015-01-22 14:27:53 -08001604 mov r0,sp @ pointer to initial tweak
1605#endif
1606
1607 ldr $rounds, [$key, #240] @ get # of rounds
1608 mov $fp, r6
1609#ifndef BSAES_ASM_EXTENDED_KEY
1610 @ allocate the key schedule on the stack
1611 sub r12, sp, $rounds, lsl#7 @ 128 bytes per inner round key
1612 @ add r12, #`128-32` @ size of bit-sliced key schedule
1613 sub r12, #`32+16` @ place for tweak[9]
1614
1615 @ populate the key schedule
1616 mov r4, $key @ pass key
1617 mov r5, $rounds @ pass # of rounds
1618 mov sp, r12
1619 add r12, #0x90 @ pass key schedule
1620 bl _bsaes_key_convert
1621 veor @XMM[7], @XMM[7], @XMM[15] @ fix up last round key
1622 vstmia r12, {@XMM[7]} @ save last round key
1623#else
1624 ldr r12, [$key, #244]
1625 eors r12, #1
1626 beq 0f
1627
1628 str r12, [$key, #244]
1629 mov r4, $key @ pass key
1630 mov r5, $rounds @ pass # of rounds
1631 add r12, $key, #248 @ pass key schedule
1632 bl _bsaes_key_convert
1633 veor @XMM[7], @XMM[7], @XMM[15] @ fix up last round key
1634 vstmia r12, {@XMM[7]}
1635
1636.align 2
16370: sub sp, #0x90 @ place for tweak[9]
1638#endif
1639
1640 vld1.8 {@XMM[8]}, [r0] @ initial tweak
1641 adr $magic, .Lxts_magic
1642
1643 subs $len, #0x80
1644 blo .Lxts_enc_short
1645 b .Lxts_enc_loop
1646
1647.align 4
1648.Lxts_enc_loop:
1649 vldmia $magic, {$twmask} @ load XTS magic
1650 vshr.s64 @T[0], @XMM[8], #63
1651 mov r0, sp
1652 vand @T[0], @T[0], $twmask
1653___
1654for($i=9;$i<16;$i++) {
1655$code.=<<___;
1656 vadd.u64 @XMM[$i], @XMM[$i-1], @XMM[$i-1]
1657 vst1.64 {@XMM[$i-1]}, [r0,:128]!
1658 vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
1659 vshr.s64 @T[1], @XMM[$i], #63
1660 veor @XMM[$i], @XMM[$i], @T[0]
1661 vand @T[1], @T[1], $twmask
1662___
1663 @T=reverse(@T);
1664
1665$code.=<<___ if ($i>=10);
1666 vld1.8 {@XMM[$i-10]}, [$inp]!
1667___
1668$code.=<<___ if ($i>=11);
1669 veor @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
1670___
1671}
1672$code.=<<___;
1673 vadd.u64 @XMM[8], @XMM[15], @XMM[15]
1674 vst1.64 {@XMM[15]}, [r0,:128]!
1675 vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
1676 veor @XMM[8], @XMM[8], @T[0]
1677 vst1.64 {@XMM[8]}, [r0,:128] @ next round tweak
1678
1679 vld1.8 {@XMM[6]-@XMM[7]}, [$inp]!
1680 veor @XMM[5], @XMM[5], @XMM[13]
1681#ifndef BSAES_ASM_EXTENDED_KEY
1682 add r4, sp, #0x90 @ pass key schedule
1683#else
1684 add r4, $key, #248 @ pass key schedule
1685#endif
1686 veor @XMM[6], @XMM[6], @XMM[14]
1687 mov r5, $rounds @ pass rounds
1688 veor @XMM[7], @XMM[7], @XMM[15]
1689 mov r0, sp
1690
1691 bl _bsaes_encrypt8
1692
1693 vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
1694 vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
1695 veor @XMM[0], @XMM[0], @XMM[ 8]
1696 vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
1697 veor @XMM[1], @XMM[1], @XMM[ 9]
1698 veor @XMM[8], @XMM[4], @XMM[10]
1699 vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
1700 veor @XMM[9], @XMM[6], @XMM[11]
1701 vld1.64 {@XMM[14]-@XMM[15]}, [r0,:128]!
1702 veor @XMM[10], @XMM[3], @XMM[12]
1703 vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
1704 veor @XMM[11], @XMM[7], @XMM[13]
1705 veor @XMM[12], @XMM[2], @XMM[14]
1706 vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
1707 veor @XMM[13], @XMM[5], @XMM[15]
1708 vst1.8 {@XMM[12]-@XMM[13]}, [$out]!
1709
1710 vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
1711
1712 subs $len, #0x80
1713 bpl .Lxts_enc_loop
1714
1715.Lxts_enc_short:
1716 adds $len, #0x70
1717 bmi .Lxts_enc_done
1718
1719 vldmia $magic, {$twmask} @ load XTS magic
1720 vshr.s64 @T[0], @XMM[8], #63
1721 mov r0, sp
1722 vand @T[0], @T[0], $twmask
1723___
1724for($i=9;$i<16;$i++) {
1725$code.=<<___;
1726 vadd.u64 @XMM[$i], @XMM[$i-1], @XMM[$i-1]
1727 vst1.64 {@XMM[$i-1]}, [r0,:128]!
1728 vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
1729 vshr.s64 @T[1], @XMM[$i], #63
1730 veor @XMM[$i], @XMM[$i], @T[0]
1731 vand @T[1], @T[1], $twmask
1732___
1733 @T=reverse(@T);
1734
1735$code.=<<___ if ($i>=10);
1736 vld1.8 {@XMM[$i-10]}, [$inp]!
1737 subs $len, #0x10
1738 bmi .Lxts_enc_`$i-9`
1739___
1740$code.=<<___ if ($i>=11);
1741 veor @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
1742___
1743}
1744$code.=<<___;
1745 sub $len, #0x10
1746 vst1.64 {@XMM[15]}, [r0,:128] @ next round tweak
1747
1748 vld1.8 {@XMM[6]}, [$inp]!
1749 veor @XMM[5], @XMM[5], @XMM[13]
1750#ifndef BSAES_ASM_EXTENDED_KEY
1751 add r4, sp, #0x90 @ pass key schedule
1752#else
1753 add r4, $key, #248 @ pass key schedule
1754#endif
1755 veor @XMM[6], @XMM[6], @XMM[14]
1756 mov r5, $rounds @ pass rounds
1757 mov r0, sp
1758
1759 bl _bsaes_encrypt8
1760
1761 vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
1762 vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
1763 veor @XMM[0], @XMM[0], @XMM[ 8]
1764 vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
1765 veor @XMM[1], @XMM[1], @XMM[ 9]
1766 veor @XMM[8], @XMM[4], @XMM[10]
1767 vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
1768 veor @XMM[9], @XMM[6], @XMM[11]
1769 vld1.64 {@XMM[14]}, [r0,:128]!
1770 veor @XMM[10], @XMM[3], @XMM[12]
1771 vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
1772 veor @XMM[11], @XMM[7], @XMM[13]
1773 veor @XMM[12], @XMM[2], @XMM[14]
1774 vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
1775 vst1.8 {@XMM[12]}, [$out]!
1776
1777 vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
1778 b .Lxts_enc_done
1779.align 4
1780.Lxts_enc_6:
Adam Langleyd9e397b2015-01-22 14:27:53 -08001781 veor @XMM[4], @XMM[4], @XMM[12]
1782#ifndef BSAES_ASM_EXTENDED_KEY
1783 add r4, sp, #0x90 @ pass key schedule
1784#else
1785 add r4, $key, #248 @ pass key schedule
1786#endif
1787 veor @XMM[5], @XMM[5], @XMM[13]
1788 mov r5, $rounds @ pass rounds
1789 mov r0, sp
1790
1791 bl _bsaes_encrypt8
1792
1793 vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
1794 vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
1795 veor @XMM[0], @XMM[0], @XMM[ 8]
1796 vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
1797 veor @XMM[1], @XMM[1], @XMM[ 9]
1798 veor @XMM[8], @XMM[4], @XMM[10]
1799 vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
1800 veor @XMM[9], @XMM[6], @XMM[11]
1801 veor @XMM[10], @XMM[3], @XMM[12]
1802 vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
1803 veor @XMM[11], @XMM[7], @XMM[13]
1804 vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
1805
1806 vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
1807 b .Lxts_enc_done
1808
1809@ put this in range for both ARM and Thumb mode adr instructions
1810.align 5
1811.Lxts_magic:
1812 .quad 1, 0x87
1813
1814.align 5
1815.Lxts_enc_5:
Adam Langleyd9e397b2015-01-22 14:27:53 -08001816 veor @XMM[3], @XMM[3], @XMM[11]
1817#ifndef BSAES_ASM_EXTENDED_KEY
1818 add r4, sp, #0x90 @ pass key schedule
1819#else
1820 add r4, $key, #248 @ pass key schedule
1821#endif
1822 veor @XMM[4], @XMM[4], @XMM[12]
1823 mov r5, $rounds @ pass rounds
1824 mov r0, sp
1825
1826 bl _bsaes_encrypt8
1827
1828 vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
1829 vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
1830 veor @XMM[0], @XMM[0], @XMM[ 8]
1831 vld1.64 {@XMM[12]}, [r0,:128]!
1832 veor @XMM[1], @XMM[1], @XMM[ 9]
1833 veor @XMM[8], @XMM[4], @XMM[10]
1834 vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
1835 veor @XMM[9], @XMM[6], @XMM[11]
1836 veor @XMM[10], @XMM[3], @XMM[12]
1837 vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
1838 vst1.8 {@XMM[10]}, [$out]!
1839
1840 vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
1841 b .Lxts_enc_done
1842.align 4
1843.Lxts_enc_4:
Adam Langleyd9e397b2015-01-22 14:27:53 -08001844 veor @XMM[2], @XMM[2], @XMM[10]
1845#ifndef BSAES_ASM_EXTENDED_KEY
1846 add r4, sp, #0x90 @ pass key schedule
1847#else
1848 add r4, $key, #248 @ pass key schedule
1849#endif
1850 veor @XMM[3], @XMM[3], @XMM[11]
1851 mov r5, $rounds @ pass rounds
1852 mov r0, sp
1853
1854 bl _bsaes_encrypt8
1855
1856 vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
1857 vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
1858 veor @XMM[0], @XMM[0], @XMM[ 8]
1859 veor @XMM[1], @XMM[1], @XMM[ 9]
1860 veor @XMM[8], @XMM[4], @XMM[10]
1861 vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
1862 veor @XMM[9], @XMM[6], @XMM[11]
1863 vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
1864
1865 vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
1866 b .Lxts_enc_done
1867.align 4
1868.Lxts_enc_3:
Adam Langleyd9e397b2015-01-22 14:27:53 -08001869 veor @XMM[1], @XMM[1], @XMM[9]
1870#ifndef BSAES_ASM_EXTENDED_KEY
1871 add r4, sp, #0x90 @ pass key schedule
1872#else
1873 add r4, $key, #248 @ pass key schedule
1874#endif
1875 veor @XMM[2], @XMM[2], @XMM[10]
1876 mov r5, $rounds @ pass rounds
1877 mov r0, sp
1878
1879 bl _bsaes_encrypt8
1880
1881 vld1.64 {@XMM[8]-@XMM[9]}, [r0,:128]!
1882 vld1.64 {@XMM[10]}, [r0,:128]!
1883 veor @XMM[0], @XMM[0], @XMM[ 8]
1884 veor @XMM[1], @XMM[1], @XMM[ 9]
1885 veor @XMM[8], @XMM[4], @XMM[10]
1886 vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
1887 vst1.8 {@XMM[8]}, [$out]!
1888
1889 vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
1890 b .Lxts_enc_done
1891.align 4
1892.Lxts_enc_2:
Adam Langleyd9e397b2015-01-22 14:27:53 -08001893 veor @XMM[0], @XMM[0], @XMM[8]
1894#ifndef BSAES_ASM_EXTENDED_KEY
1895 add r4, sp, #0x90 @ pass key schedule
1896#else
1897 add r4, $key, #248 @ pass key schedule
1898#endif
1899 veor @XMM[1], @XMM[1], @XMM[9]
1900 mov r5, $rounds @ pass rounds
1901 mov r0, sp
1902
1903 bl _bsaes_encrypt8
1904
1905 vld1.64 {@XMM[8]-@XMM[9]}, [r0,:128]!
1906 veor @XMM[0], @XMM[0], @XMM[ 8]
1907 veor @XMM[1], @XMM[1], @XMM[ 9]
1908 vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
1909
1910 vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
1911 b .Lxts_enc_done
1912.align 4
1913.Lxts_enc_1:
1914 mov r0, sp
David Benjaminc895d6b2016-08-11 13:26:41 -04001915 veor @XMM[0], @XMM[0], @XMM[8]
Adam Langleyd9e397b2015-01-22 14:27:53 -08001916 mov r1, sp
1917 vst1.8 {@XMM[0]}, [sp,:128]
1918 mov r2, $key
1919 mov r4, $fp @ preserve fp
1920
Robert Sloan4c22c5f2019-03-01 15:53:37 -08001921 bl aes_nohw_encrypt
Adam Langleyd9e397b2015-01-22 14:27:53 -08001922
1923 vld1.8 {@XMM[0]}, [sp,:128]
1924 veor @XMM[0], @XMM[0], @XMM[8]
1925 vst1.8 {@XMM[0]}, [$out]!
1926 mov $fp, r4
1927
1928 vmov @XMM[8], @XMM[9] @ next round tweak
1929
1930.Lxts_enc_done:
1931#ifndef XTS_CHAIN_TWEAK
1932 adds $len, #0x10
1933 beq .Lxts_enc_ret
1934 sub r6, $out, #0x10
1935
1936.Lxts_enc_steal:
1937 ldrb r0, [$inp], #1
1938 ldrb r1, [$out, #-0x10]
1939 strb r0, [$out, #-0x10]
1940 strb r1, [$out], #1
1941
1942 subs $len, #1
1943 bhi .Lxts_enc_steal
1944
1945 vld1.8 {@XMM[0]}, [r6]
1946 mov r0, sp
1947 veor @XMM[0], @XMM[0], @XMM[8]
1948 mov r1, sp
1949 vst1.8 {@XMM[0]}, [sp,:128]
1950 mov r2, $key
1951 mov r4, $fp @ preserve fp
1952
Robert Sloan4c22c5f2019-03-01 15:53:37 -08001953 bl aes_nohw_encrypt
Adam Langleyd9e397b2015-01-22 14:27:53 -08001954
1955 vld1.8 {@XMM[0]}, [sp,:128]
1956 veor @XMM[0], @XMM[0], @XMM[8]
1957 vst1.8 {@XMM[0]}, [r6]
1958 mov $fp, r4
1959#endif
1960
1961.Lxts_enc_ret:
1962 bic r0, $fp, #0xf
1963 vmov.i32 q0, #0
1964 vmov.i32 q1, #0
1965#ifdef XTS_CHAIN_TWEAK
1966 ldr r1, [$fp, #0x20+VFP_ABI_FRAME] @ chain tweak
1967#endif
1968.Lxts_enc_bzero: @ wipe key schedule [if any]
1969 vstmia sp!, {q0-q1}
1970 cmp sp, r0
1971 bne .Lxts_enc_bzero
1972
1973 mov sp, $fp
1974#ifdef XTS_CHAIN_TWEAK
1975 vst1.8 {@XMM[8]}, [r1]
1976#endif
1977 VFP_ABI_POP
1978 ldmia sp!, {r4-r10, pc} @ return
1979
1980.size bsaes_xts_encrypt,.-bsaes_xts_encrypt
1981
1982.globl bsaes_xts_decrypt
Adam Langleyd9e397b2015-01-22 14:27:53 -08001983.type bsaes_xts_decrypt,%function
1984.align 4
1985bsaes_xts_decrypt:
1986 mov ip, sp
1987 stmdb sp!, {r4-r10, lr} @ 0x20
1988 VFP_ABI_PUSH
1989 mov r6, sp @ future $fp
1990
1991 mov $inp, r0
1992 mov $out, r1
1993 mov $len, r2
1994 mov $key, r3
1995
1996 sub r0, sp, #0x10 @ 0x10
1997 bic r0, #0xf @ align at 16 bytes
1998 mov sp, r0
1999
2000#ifdef XTS_CHAIN_TWEAK
2001 ldr r0, [ip] @ pointer to input tweak
2002#else
2003 @ generate initial tweak
2004 ldr r0, [ip, #4] @ iv[]
2005 mov r1, sp
2006 ldr r2, [ip, #0] @ key2
Robert Sloan4c22c5f2019-03-01 15:53:37 -08002007 bl aes_nohw_encrypt
Adam Langleyd9e397b2015-01-22 14:27:53 -08002008 mov r0, sp @ pointer to initial tweak
2009#endif
2010
2011 ldr $rounds, [$key, #240] @ get # of rounds
2012 mov $fp, r6
2013#ifndef BSAES_ASM_EXTENDED_KEY
2014 @ allocate the key schedule on the stack
2015 sub r12, sp, $rounds, lsl#7 @ 128 bytes per inner round key
2016 @ add r12, #`128-32` @ size of bit-sliced key schedule
2017 sub r12, #`32+16` @ place for tweak[9]
2018
2019 @ populate the key schedule
2020 mov r4, $key @ pass key
2021 mov r5, $rounds @ pass # of rounds
2022 mov sp, r12
2023 add r12, #0x90 @ pass key schedule
2024 bl _bsaes_key_convert
2025 add r4, sp, #0x90
2026 vldmia r4, {@XMM[6]}
2027 vstmia r12, {@XMM[15]} @ save last round key
2028 veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key
2029 vstmia r4, {@XMM[7]}
2030#else
2031 ldr r12, [$key, #244]
2032 eors r12, #1
2033 beq 0f
2034
2035 str r12, [$key, #244]
2036 mov r4, $key @ pass key
2037 mov r5, $rounds @ pass # of rounds
2038 add r12, $key, #248 @ pass key schedule
2039 bl _bsaes_key_convert
2040 add r4, $key, #248
2041 vldmia r4, {@XMM[6]}
2042 vstmia r12, {@XMM[15]} @ save last round key
2043 veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key
2044 vstmia r4, {@XMM[7]}
2045
2046.align 2
20470: sub sp, #0x90 @ place for tweak[9]
2048#endif
2049 vld1.8 {@XMM[8]}, [r0] @ initial tweak
2050 adr $magic, .Lxts_magic
2051
Adam Langleye9ada862015-05-11 17:20:37 -07002052#ifndef XTS_CHAIN_TWEAK
Adam Langleyd9e397b2015-01-22 14:27:53 -08002053 tst $len, #0xf @ if not multiple of 16
2054 it ne @ Thumb2 thing, sanity check in ARM
2055 subne $len, #0x10 @ subtract another 16 bytes
Adam Langleye9ada862015-05-11 17:20:37 -07002056#endif
Adam Langleyd9e397b2015-01-22 14:27:53 -08002057 subs $len, #0x80
2058
2059 blo .Lxts_dec_short
2060 b .Lxts_dec_loop
2061
2062.align 4
2063.Lxts_dec_loop:
2064 vldmia $magic, {$twmask} @ load XTS magic
2065 vshr.s64 @T[0], @XMM[8], #63
2066 mov r0, sp
2067 vand @T[0], @T[0], $twmask
2068___
2069for($i=9;$i<16;$i++) {
2070$code.=<<___;
2071 vadd.u64 @XMM[$i], @XMM[$i-1], @XMM[$i-1]
2072 vst1.64 {@XMM[$i-1]}, [r0,:128]!
2073 vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
2074 vshr.s64 @T[1], @XMM[$i], #63
2075 veor @XMM[$i], @XMM[$i], @T[0]
2076 vand @T[1], @T[1], $twmask
2077___
2078 @T=reverse(@T);
2079
2080$code.=<<___ if ($i>=10);
2081 vld1.8 {@XMM[$i-10]}, [$inp]!
2082___
2083$code.=<<___ if ($i>=11);
2084 veor @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
2085___
2086}
2087$code.=<<___;
2088 vadd.u64 @XMM[8], @XMM[15], @XMM[15]
2089 vst1.64 {@XMM[15]}, [r0,:128]!
2090 vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
2091 veor @XMM[8], @XMM[8], @T[0]
2092 vst1.64 {@XMM[8]}, [r0,:128] @ next round tweak
2093
2094 vld1.8 {@XMM[6]-@XMM[7]}, [$inp]!
2095 veor @XMM[5], @XMM[5], @XMM[13]
2096#ifndef BSAES_ASM_EXTENDED_KEY
2097 add r4, sp, #0x90 @ pass key schedule
2098#else
2099 add r4, $key, #248 @ pass key schedule
2100#endif
2101 veor @XMM[6], @XMM[6], @XMM[14]
2102 mov r5, $rounds @ pass rounds
2103 veor @XMM[7], @XMM[7], @XMM[15]
2104 mov r0, sp
2105
2106 bl _bsaes_decrypt8
2107
2108 vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
2109 vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
2110 veor @XMM[0], @XMM[0], @XMM[ 8]
2111 vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
2112 veor @XMM[1], @XMM[1], @XMM[ 9]
2113 veor @XMM[8], @XMM[6], @XMM[10]
2114 vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
2115 veor @XMM[9], @XMM[4], @XMM[11]
2116 vld1.64 {@XMM[14]-@XMM[15]}, [r0,:128]!
2117 veor @XMM[10], @XMM[2], @XMM[12]
2118 vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
2119 veor @XMM[11], @XMM[7], @XMM[13]
2120 veor @XMM[12], @XMM[3], @XMM[14]
2121 vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
2122 veor @XMM[13], @XMM[5], @XMM[15]
2123 vst1.8 {@XMM[12]-@XMM[13]}, [$out]!
2124
2125 vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
2126
2127 subs $len, #0x80
2128 bpl .Lxts_dec_loop
2129
2130.Lxts_dec_short:
2131 adds $len, #0x70
2132 bmi .Lxts_dec_done
2133
2134 vldmia $magic, {$twmask} @ load XTS magic
2135 vshr.s64 @T[0], @XMM[8], #63
2136 mov r0, sp
2137 vand @T[0], @T[0], $twmask
2138___
2139for($i=9;$i<16;$i++) {
2140$code.=<<___;
2141 vadd.u64 @XMM[$i], @XMM[$i-1], @XMM[$i-1]
2142 vst1.64 {@XMM[$i-1]}, [r0,:128]!
2143 vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
2144 vshr.s64 @T[1], @XMM[$i], #63
2145 veor @XMM[$i], @XMM[$i], @T[0]
2146 vand @T[1], @T[1], $twmask
2147___
2148 @T=reverse(@T);
2149
2150$code.=<<___ if ($i>=10);
2151 vld1.8 {@XMM[$i-10]}, [$inp]!
2152 subs $len, #0x10
2153 bmi .Lxts_dec_`$i-9`
2154___
2155$code.=<<___ if ($i>=11);
2156 veor @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
2157___
2158}
2159$code.=<<___;
2160 sub $len, #0x10
2161 vst1.64 {@XMM[15]}, [r0,:128] @ next round tweak
2162
2163 vld1.8 {@XMM[6]}, [$inp]!
2164 veor @XMM[5], @XMM[5], @XMM[13]
2165#ifndef BSAES_ASM_EXTENDED_KEY
2166 add r4, sp, #0x90 @ pass key schedule
2167#else
2168 add r4, $key, #248 @ pass key schedule
2169#endif
2170 veor @XMM[6], @XMM[6], @XMM[14]
2171 mov r5, $rounds @ pass rounds
2172 mov r0, sp
2173
2174 bl _bsaes_decrypt8
2175
2176 vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
2177 vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
2178 veor @XMM[0], @XMM[0], @XMM[ 8]
2179 vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
2180 veor @XMM[1], @XMM[1], @XMM[ 9]
2181 veor @XMM[8], @XMM[6], @XMM[10]
2182 vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
2183 veor @XMM[9], @XMM[4], @XMM[11]
2184 vld1.64 {@XMM[14]}, [r0,:128]!
2185 veor @XMM[10], @XMM[2], @XMM[12]
2186 vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
2187 veor @XMM[11], @XMM[7], @XMM[13]
2188 veor @XMM[12], @XMM[3], @XMM[14]
2189 vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
2190 vst1.8 {@XMM[12]}, [$out]!
2191
2192 vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
2193 b .Lxts_dec_done
2194.align 4
2195.Lxts_dec_6:
2196 vst1.64 {@XMM[14]}, [r0,:128] @ next round tweak
2197
2198 veor @XMM[4], @XMM[4], @XMM[12]
2199#ifndef BSAES_ASM_EXTENDED_KEY
2200 add r4, sp, #0x90 @ pass key schedule
2201#else
2202 add r4, $key, #248 @ pass key schedule
2203#endif
2204 veor @XMM[5], @XMM[5], @XMM[13]
2205 mov r5, $rounds @ pass rounds
2206 mov r0, sp
2207
2208 bl _bsaes_decrypt8
2209
2210 vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
2211 vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
2212 veor @XMM[0], @XMM[0], @XMM[ 8]
2213 vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
2214 veor @XMM[1], @XMM[1], @XMM[ 9]
2215 veor @XMM[8], @XMM[6], @XMM[10]
2216 vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
2217 veor @XMM[9], @XMM[4], @XMM[11]
2218 veor @XMM[10], @XMM[2], @XMM[12]
2219 vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
2220 veor @XMM[11], @XMM[7], @XMM[13]
2221 vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
2222
2223 vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
2224 b .Lxts_dec_done
2225.align 4
2226.Lxts_dec_5:
Adam Langleyd9e397b2015-01-22 14:27:53 -08002227 veor @XMM[3], @XMM[3], @XMM[11]
2228#ifndef BSAES_ASM_EXTENDED_KEY
2229 add r4, sp, #0x90 @ pass key schedule
2230#else
2231 add r4, $key, #248 @ pass key schedule
2232#endif
2233 veor @XMM[4], @XMM[4], @XMM[12]
2234 mov r5, $rounds @ pass rounds
2235 mov r0, sp
2236
2237 bl _bsaes_decrypt8
2238
2239 vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
2240 vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
2241 veor @XMM[0], @XMM[0], @XMM[ 8]
2242 vld1.64 {@XMM[12]}, [r0,:128]!
2243 veor @XMM[1], @XMM[1], @XMM[ 9]
2244 veor @XMM[8], @XMM[6], @XMM[10]
2245 vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
2246 veor @XMM[9], @XMM[4], @XMM[11]
2247 veor @XMM[10], @XMM[2], @XMM[12]
2248 vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
2249 vst1.8 {@XMM[10]}, [$out]!
2250
2251 vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
2252 b .Lxts_dec_done
2253.align 4
2254.Lxts_dec_4:
Adam Langleyd9e397b2015-01-22 14:27:53 -08002255 veor @XMM[2], @XMM[2], @XMM[10]
2256#ifndef BSAES_ASM_EXTENDED_KEY
2257 add r4, sp, #0x90 @ pass key schedule
2258#else
2259 add r4, $key, #248 @ pass key schedule
2260#endif
2261 veor @XMM[3], @XMM[3], @XMM[11]
2262 mov r5, $rounds @ pass rounds
2263 mov r0, sp
2264
2265 bl _bsaes_decrypt8
2266
2267 vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
2268 vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
2269 veor @XMM[0], @XMM[0], @XMM[ 8]
2270 veor @XMM[1], @XMM[1], @XMM[ 9]
2271 veor @XMM[8], @XMM[6], @XMM[10]
2272 vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
2273 veor @XMM[9], @XMM[4], @XMM[11]
2274 vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
2275
2276 vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
2277 b .Lxts_dec_done
2278.align 4
2279.Lxts_dec_3:
Adam Langleyd9e397b2015-01-22 14:27:53 -08002280 veor @XMM[1], @XMM[1], @XMM[9]
2281#ifndef BSAES_ASM_EXTENDED_KEY
2282 add r4, sp, #0x90 @ pass key schedule
2283#else
2284 add r4, $key, #248 @ pass key schedule
2285#endif
2286 veor @XMM[2], @XMM[2], @XMM[10]
2287 mov r5, $rounds @ pass rounds
2288 mov r0, sp
2289
2290 bl _bsaes_decrypt8
2291
2292 vld1.64 {@XMM[8]-@XMM[9]}, [r0,:128]!
2293 vld1.64 {@XMM[10]}, [r0,:128]!
2294 veor @XMM[0], @XMM[0], @XMM[ 8]
2295 veor @XMM[1], @XMM[1], @XMM[ 9]
2296 veor @XMM[8], @XMM[6], @XMM[10]
2297 vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
2298 vst1.8 {@XMM[8]}, [$out]!
2299
2300 vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
2301 b .Lxts_dec_done
2302.align 4
2303.Lxts_dec_2:
Adam Langleyd9e397b2015-01-22 14:27:53 -08002304 veor @XMM[0], @XMM[0], @XMM[8]
2305#ifndef BSAES_ASM_EXTENDED_KEY
2306 add r4, sp, #0x90 @ pass key schedule
2307#else
2308 add r4, $key, #248 @ pass key schedule
2309#endif
2310 veor @XMM[1], @XMM[1], @XMM[9]
2311 mov r5, $rounds @ pass rounds
2312 mov r0, sp
2313
2314 bl _bsaes_decrypt8
2315
2316 vld1.64 {@XMM[8]-@XMM[9]}, [r0,:128]!
2317 veor @XMM[0], @XMM[0], @XMM[ 8]
2318 veor @XMM[1], @XMM[1], @XMM[ 9]
2319 vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
2320
2321 vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
2322 b .Lxts_dec_done
2323.align 4
2324.Lxts_dec_1:
2325 mov r0, sp
David Benjaminc895d6b2016-08-11 13:26:41 -04002326 veor @XMM[0], @XMM[0], @XMM[8]
Adam Langleyd9e397b2015-01-22 14:27:53 -08002327 mov r1, sp
2328 vst1.8 {@XMM[0]}, [sp,:128]
David Benjaminc895d6b2016-08-11 13:26:41 -04002329 mov r5, $magic @ preserve magic
Adam Langleyd9e397b2015-01-22 14:27:53 -08002330 mov r2, $key
2331 mov r4, $fp @ preserve fp
Adam Langleyd9e397b2015-01-22 14:27:53 -08002332
Robert Sloan4c22c5f2019-03-01 15:53:37 -08002333 bl aes_nohw_decrypt
Adam Langleyd9e397b2015-01-22 14:27:53 -08002334
2335 vld1.8 {@XMM[0]}, [sp,:128]
2336 veor @XMM[0], @XMM[0], @XMM[8]
2337 vst1.8 {@XMM[0]}, [$out]!
2338 mov $fp, r4
2339 mov $magic, r5
2340
2341 vmov @XMM[8], @XMM[9] @ next round tweak
2342
2343.Lxts_dec_done:
2344#ifndef XTS_CHAIN_TWEAK
2345 adds $len, #0x10
2346 beq .Lxts_dec_ret
2347
2348 @ calculate one round of extra tweak for the stolen ciphertext
2349 vldmia $magic, {$twmask}
2350 vshr.s64 @XMM[6], @XMM[8], #63
2351 vand @XMM[6], @XMM[6], $twmask
2352 vadd.u64 @XMM[9], @XMM[8], @XMM[8]
2353 vswp `&Dhi("@XMM[6]")`,`&Dlo("@XMM[6]")`
2354 veor @XMM[9], @XMM[9], @XMM[6]
2355
2356 @ perform the final decryption with the last tweak value
2357 vld1.8 {@XMM[0]}, [$inp]!
2358 mov r0, sp
2359 veor @XMM[0], @XMM[0], @XMM[9]
2360 mov r1, sp
2361 vst1.8 {@XMM[0]}, [sp,:128]
2362 mov r2, $key
2363 mov r4, $fp @ preserve fp
2364
Robert Sloan4c22c5f2019-03-01 15:53:37 -08002365 bl aes_nohw_decrypt
Adam Langleyd9e397b2015-01-22 14:27:53 -08002366
2367 vld1.8 {@XMM[0]}, [sp,:128]
2368 veor @XMM[0], @XMM[0], @XMM[9]
2369 vst1.8 {@XMM[0]}, [$out]
2370
2371 mov r6, $out
2372.Lxts_dec_steal:
2373 ldrb r1, [$out]
2374 ldrb r0, [$inp], #1
2375 strb r1, [$out, #0x10]
2376 strb r0, [$out], #1
2377
2378 subs $len, #1
2379 bhi .Lxts_dec_steal
2380
2381 vld1.8 {@XMM[0]}, [r6]
2382 mov r0, sp
2383 veor @XMM[0], @XMM[8]
2384 mov r1, sp
2385 vst1.8 {@XMM[0]}, [sp,:128]
2386 mov r2, $key
2387
Robert Sloan4c22c5f2019-03-01 15:53:37 -08002388 bl aes_nohw_decrypt
Adam Langleyd9e397b2015-01-22 14:27:53 -08002389
2390 vld1.8 {@XMM[0]}, [sp,:128]
2391 veor @XMM[0], @XMM[0], @XMM[8]
2392 vst1.8 {@XMM[0]}, [r6]
2393 mov $fp, r4
2394#endif
2395
2396.Lxts_dec_ret:
2397 bic r0, $fp, #0xf
2398 vmov.i32 q0, #0
2399 vmov.i32 q1, #0
2400#ifdef XTS_CHAIN_TWEAK
2401 ldr r1, [$fp, #0x20+VFP_ABI_FRAME] @ chain tweak
2402#endif
2403.Lxts_dec_bzero: @ wipe key schedule [if any]
2404 vstmia sp!, {q0-q1}
2405 cmp sp, r0
2406 bne .Lxts_dec_bzero
2407
2408 mov sp, $fp
2409#ifdef XTS_CHAIN_TWEAK
2410 vst1.8 {@XMM[8]}, [r1]
2411#endif
2412 VFP_ABI_POP
2413 ldmia sp!, {r4-r10, pc} @ return
2414
2415.size bsaes_xts_decrypt,.-bsaes_xts_decrypt
2416___
2417}
2418$code.=<<___;
2419#endif
Adam Langleyd9e397b2015-01-22 14:27:53 -08002420___
2421
2422$code =~ s/\`([^\`]*)\`/eval($1)/gem;
2423
2424open SELF,$0;
2425while(<SELF>) {
2426 next if (/^#!/);
2427 last if (!s/^#/@/ and !/^$/);
2428 print;
2429}
2430close SELF;
2431
2432print $code;
2433
Srinivas Paladugudd42a612019-08-09 19:30:39 +00002434close STDOUT;