blob: 02b16b673abf9118e38ca77a6f59b0878a2869b4 [file] [log] [blame]
Robert Sloan89678152019-03-12 14:24:00 -07001// This file is generated from a similarly-named Perl script in the BoringSSL
2// source tree. Do not edit by hand.
3
4#if defined(__has_feature)
5#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM)
6#define OPENSSL_NO_ASM
7#endif
8#endif
9
10#if !defined(OPENSSL_NO_ASM)
11#if defined(__aarch64__)
12#if defined(BORINGSSL_PREFIX)
13#include <boringssl_prefix_symbols_asm.h>
14#endif
15.section .rodata
16
17.type _vpaes_consts,%object
18.align 7 // totally strategic alignment
19_vpaes_consts:
20.Lk_mc_forward: // mc_forward
21.quad 0x0407060500030201, 0x0C0F0E0D080B0A09
22.quad 0x080B0A0904070605, 0x000302010C0F0E0D
23.quad 0x0C0F0E0D080B0A09, 0x0407060500030201
24.quad 0x000302010C0F0E0D, 0x080B0A0904070605
25.Lk_mc_backward: // mc_backward
26.quad 0x0605040702010003, 0x0E0D0C0F0A09080B
27.quad 0x020100030E0D0C0F, 0x0A09080B06050407
28.quad 0x0E0D0C0F0A09080B, 0x0605040702010003
29.quad 0x0A09080B06050407, 0x020100030E0D0C0F
30.Lk_sr: // sr
31.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
32.quad 0x030E09040F0A0500, 0x0B06010C07020D08
33.quad 0x0F060D040B020900, 0x070E050C030A0108
34.quad 0x0B0E0104070A0D00, 0x0306090C0F020508
35
36//
37// "Hot" constants
38//
39.Lk_inv: // inv, inva
40.quad 0x0E05060F0D080180, 0x040703090A0B0C02
41.quad 0x01040A060F0B0780, 0x030D0E0C02050809
42.Lk_ipt: // input transform (lo, hi)
43.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
44.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
45.Lk_sbo: // sbou, sbot
46.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
47.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
48.Lk_sb1: // sb1u, sb1t
49.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
50.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
51.Lk_sb2: // sb2u, sb2t
52.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
53.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
54
55//
56// Decryption stuff
57//
58.Lk_dipt: // decryption input transform
59.quad 0x0F505B040B545F00, 0x154A411E114E451A
60.quad 0x86E383E660056500, 0x12771772F491F194
61.Lk_dsbo: // decryption sbox final output
62.quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D
63.quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C
64.Lk_dsb9: // decryption sbox output *9*u, *9*t
65.quad 0x851C03539A86D600, 0xCAD51F504F994CC9
66.quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565
67.Lk_dsbd: // decryption sbox output *D*u, *D*t
68.quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439
69.quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3
70.Lk_dsbb: // decryption sbox output *B*u, *B*t
71.quad 0xD022649296B44200, 0x602646F6B0F2D404
72.quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B
73.Lk_dsbe: // decryption sbox output *E*u, *E*t
74.quad 0x46F2929626D4D000, 0x2242600464B4F6B0
75.quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32
76
77//
78// Key schedule constants
79//
80.Lk_dksd: // decryption key schedule: invskew x*D
81.quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
82.quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E
83.Lk_dksb: // decryption key schedule: invskew x*B
84.quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99
85.quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
86.Lk_dkse: // decryption key schedule: invskew x*E + 0x63
87.quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086
88.quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487
89.Lk_dks9: // decryption key schedule: invskew x*9
90.quad 0xB6116FC87ED9A700, 0x4AED933482255BFC
91.quad 0x4576516227143300, 0x8BB89FACE9DAFDCE
92
93.Lk_rcon: // rcon
94.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
95
96.Lk_opt: // output transform
97.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
98.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
99.Lk_deskew: // deskew tables: inverts the sbox's "skew"
100.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
101.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
102
103.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,56,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0
104.align 2
105.size _vpaes_consts,.-_vpaes_consts
106.align 6
107
108.text
109##
110## _aes_preheat
111##
112## Fills register %r10 -> .aes_consts (so you can -fPIC)
113## and %xmm9-%xmm15 as specified below.
114##
115.type _vpaes_encrypt_preheat,%function
116.align 4
117_vpaes_encrypt_preheat:
118 adrp x10, .Lk_inv
119 add x10, x10, :lo12:.Lk_inv
120 movi v17.16b, #0x0f
121 ld1 {v18.2d,v19.2d}, [x10],#32 // .Lk_inv
122 ld1 {v20.2d,v21.2d,v22.2d,v23.2d}, [x10],#64 // .Lk_ipt, .Lk_sbo
123 ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10] // .Lk_sb1, .Lk_sb2
124 ret
125.size _vpaes_encrypt_preheat,.-_vpaes_encrypt_preheat
126
127##
128## _aes_encrypt_core
129##
130## AES-encrypt %xmm0.
131##
132## Inputs:
133## %xmm0 = input
134## %xmm9-%xmm15 as in _vpaes_preheat
135## (%rdx) = scheduled keys
136##
137## Output in %xmm0
138## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax
139## Preserves %xmm6 - %xmm8 so you get some local vectors
140##
141##
142.type _vpaes_encrypt_core,%function
143.align 4
144_vpaes_encrypt_core:
145 mov x9, x2
146 ldr w8, [x2,#240] // pull rounds
147 adrp x11, .Lk_mc_forward+16
148 add x11, x11, :lo12:.Lk_mc_forward+16
149 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
150 ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key
151 and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
152 ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0
153 tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1
154 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
155 tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
156 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
157 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
158 b .Lenc_entry
159
160.align 4
161.Lenc_loop:
162 // middle of middle round
163 add x10, x11, #0x40
164 tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
165 ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[]
166 tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
167 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
168 tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
169 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
170 tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
171 ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[]
172 tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
173 eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
174 tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
175 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
176 tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
177 eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
178 and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4
179 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
180 sub w8, w8, #1 // nr--
181
182.Lenc_entry:
183 // top of round
184 and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k
185 ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
186 tbl v5.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
187 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
188 tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
189 tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
190 eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
191 eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
192 tbl v2.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
193 tbl v3.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
194 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
195 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
196 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5
197 cbnz w8, .Lenc_loop
198
199 // middle of last round
200 add x10, x11, #0x80
201 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
202 // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
203 tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
204 ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[]
205 tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
206 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
207 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
208 tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0
209 ret
210.size _vpaes_encrypt_core,.-_vpaes_encrypt_core
211
212.globl vpaes_encrypt
213.hidden vpaes_encrypt
214.type vpaes_encrypt,%function
215.align 4
216vpaes_encrypt:
217 stp x29,x30,[sp,#-16]!
218 add x29,sp,#0
219
220 ld1 {v7.16b}, [x0]
221 bl _vpaes_encrypt_preheat
222 bl _vpaes_encrypt_core
223 st1 {v0.16b}, [x1]
224
225 ldp x29,x30,[sp],#16
226 ret
227.size vpaes_encrypt,.-vpaes_encrypt
228
229.type _vpaes_encrypt_2x,%function
230.align 4
231_vpaes_encrypt_2x:
232 mov x9, x2
233 ldr w8, [x2,#240] // pull rounds
234 adrp x11, .Lk_mc_forward+16
235 add x11, x11, :lo12:.Lk_mc_forward+16
236 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
237 ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key
238 and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
239 ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0
240 and v9.16b, v15.16b, v17.16b
241 ushr v8.16b, v15.16b, #4
242 tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1
243 tbl v9.16b, {v20.16b}, v9.16b
244 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
245 tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
246 tbl v10.16b, {v21.16b}, v8.16b
247 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
248 eor v8.16b, v9.16b, v16.16b
249 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
250 eor v8.16b, v8.16b, v10.16b
251 b .Lenc_2x_entry
252
253.align 4
254.Lenc_2x_loop:
255 // middle of middle round
256 add x10, x11, #0x40
257 tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
258 tbl v12.16b, {v25.16b}, v10.16b
259 ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[]
260 tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
261 tbl v8.16b, {v24.16b}, v11.16b
262 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
263 eor v12.16b, v12.16b, v16.16b
264 tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
265 tbl v13.16b, {v27.16b}, v10.16b
266 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
267 eor v8.16b, v8.16b, v12.16b
268 tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
269 tbl v10.16b, {v26.16b}, v11.16b
270 ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[]
271 tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
272 tbl v11.16b, {v8.16b}, v1.16b
273 eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
274 eor v10.16b, v10.16b, v13.16b
275 tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
276 tbl v8.16b, {v8.16b}, v4.16b
277 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
278 eor v11.16b, v11.16b, v10.16b
279 tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
280 tbl v12.16b, {v11.16b},v1.16b
281 eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
282 eor v8.16b, v8.16b, v11.16b
283 and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4
284 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
285 eor v8.16b, v8.16b, v12.16b
286 sub w8, w8, #1 // nr--
287
288.Lenc_2x_entry:
289 // top of round
290 and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k
291 ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
292 and v9.16b, v8.16b, v17.16b
293 ushr v8.16b, v8.16b, #4
294 tbl v5.16b, {v19.16b},v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
295 tbl v13.16b, {v19.16b},v9.16b
296 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
297 eor v9.16b, v9.16b, v8.16b
298 tbl v3.16b, {v18.16b},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
299 tbl v11.16b, {v18.16b},v8.16b
300 tbl v4.16b, {v18.16b},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
301 tbl v12.16b, {v18.16b},v9.16b
302 eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
303 eor v11.16b, v11.16b, v13.16b
304 eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
305 eor v12.16b, v12.16b, v13.16b
306 tbl v2.16b, {v18.16b},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
307 tbl v10.16b, {v18.16b},v11.16b
308 tbl v3.16b, {v18.16b},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
309 tbl v11.16b, {v18.16b},v12.16b
310 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
311 eor v10.16b, v10.16b, v9.16b
312 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
313 eor v11.16b, v11.16b, v8.16b
314 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5
315 cbnz w8, .Lenc_2x_loop
316
317 // middle of last round
318 add x10, x11, #0x80
319 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
320 // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
321 tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
322 tbl v12.16b, {v22.16b}, v10.16b
323 ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[]
324 tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
325 tbl v8.16b, {v23.16b}, v11.16b
326 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
327 eor v12.16b, v12.16b, v16.16b
328 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
329 eor v8.16b, v8.16b, v12.16b
330 tbl v0.16b, {v0.16b},v1.16b // vpshufb %xmm1, %xmm0, %xmm0
331 tbl v1.16b, {v8.16b},v1.16b
332 ret
333.size _vpaes_encrypt_2x,.-_vpaes_encrypt_2x
334
335.type _vpaes_decrypt_preheat,%function
336.align 4
337_vpaes_decrypt_preheat:
338 adrp x10, .Lk_inv
339 add x10, x10, :lo12:.Lk_inv
340 movi v17.16b, #0x0f
341 adrp x11, .Lk_dipt
342 add x11, x11, :lo12:.Lk_dipt
343 ld1 {v18.2d,v19.2d}, [x10],#32 // .Lk_inv
344 ld1 {v20.2d,v21.2d,v22.2d,v23.2d}, [x11],#64 // .Lk_dipt, .Lk_dsbo
345 ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x11],#64 // .Lk_dsb9, .Lk_dsbd
346 ld1 {v28.2d,v29.2d,v30.2d,v31.2d}, [x11] // .Lk_dsbb, .Lk_dsbe
347 ret
348.size _vpaes_decrypt_preheat,.-_vpaes_decrypt_preheat
349
350##
351## Decryption core
352##
353## Same API as encryption core.
354##
355.type _vpaes_decrypt_core,%function
356.align 4
357_vpaes_decrypt_core:
358 mov x9, x2
359 ldr w8, [x2,#240] // pull rounds
360
361 // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo
362 lsl x11, x8, #4 // mov %rax, %r11; shl $4, %r11
363 eor x11, x11, #0x30 // xor $0x30, %r11
364 adrp x10, .Lk_sr
365 add x10, x10, :lo12:.Lk_sr
366 and x11, x11, #0x30 // and $0x30, %r11
367 add x11, x11, x10
368 adrp x10, .Lk_mc_forward+48
369 add x10, x10, :lo12:.Lk_mc_forward+48
370
371 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key
372 and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
373 ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0
374 tbl v2.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2
375 ld1 {v5.2d}, [x10] // vmovdqa .Lk_mc_forward+48(%rip), %xmm5
376 // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi
377 tbl v0.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0
378 eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2
379 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
380 b .Ldec_entry
381
382.align 4
383.Ldec_loop:
384//
385// Inverse mix columns
386//
387 // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u
388 // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t
389 tbl v4.16b, {v24.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u
390 tbl v1.16b, {v25.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t
391 eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0
392 // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu
393 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
394 // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt
395
396 tbl v4.16b, {v26.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu
397 tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
398 tbl v1.16b, {v27.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt
399 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
400 // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu
401 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
402 // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt
403
404 tbl v4.16b, {v28.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu
405 tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
406 tbl v1.16b, {v29.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt
407 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
408 // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu
409 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
410 // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet
411
412 tbl v4.16b, {v30.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu
413 tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
414 tbl v1.16b, {v31.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet
415 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
416 ext v5.16b, v5.16b, v5.16b, #12 // vpalignr $12, %xmm5, %xmm5, %xmm5
417 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
418 sub w8, w8, #1 // sub $1,%rax # nr--
419
420.Ldec_entry:
421 // top of round
422 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k
423 ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
424 tbl v2.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
425 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
426 tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
427 tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
428 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
429 eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
430 tbl v2.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
431 tbl v3.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
432 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
433 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
434 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0
435 cbnz w8, .Ldec_loop
436
437 // middle of last round
438 // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou
439 tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
440 // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot
441 ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160
442 tbl v1.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t
443 eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k
444 eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A
445 tbl v0.16b, {v0.16b}, v2.16b // vpshufb %xmm2, %xmm0, %xmm0
446 ret
447.size _vpaes_decrypt_core,.-_vpaes_decrypt_core
448
449.globl vpaes_decrypt
450.hidden vpaes_decrypt
451.type vpaes_decrypt,%function
452.align 4
453vpaes_decrypt:
454 stp x29,x30,[sp,#-16]!
455 add x29,sp,#0
456
457 ld1 {v7.16b}, [x0]
458 bl _vpaes_decrypt_preheat
459 bl _vpaes_decrypt_core
460 st1 {v0.16b}, [x1]
461
462 ldp x29,x30,[sp],#16
463 ret
464.size vpaes_decrypt,.-vpaes_decrypt
465
466// v14-v15 input, v0-v1 output
467.type _vpaes_decrypt_2x,%function
468.align 4
469_vpaes_decrypt_2x:
470 mov x9, x2
471 ldr w8, [x2,#240] // pull rounds
472
473 // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo
474 lsl x11, x8, #4 // mov %rax, %r11; shl $4, %r11
475 eor x11, x11, #0x30 // xor $0x30, %r11
476 adrp x10, .Lk_sr
477 add x10, x10, :lo12:.Lk_sr
478 and x11, x11, #0x30 // and $0x30, %r11
479 add x11, x11, x10
480 adrp x10, .Lk_mc_forward+48
481 add x10, x10, :lo12:.Lk_mc_forward+48
482
483 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key
484 and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
485 ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0
486 and v9.16b, v15.16b, v17.16b
487 ushr v8.16b, v15.16b, #4
488 tbl v2.16b, {v20.16b},v1.16b // vpshufb %xmm1, %xmm2, %xmm2
489 tbl v10.16b, {v20.16b},v9.16b
490 ld1 {v5.2d}, [x10] // vmovdqa .Lk_mc_forward+48(%rip), %xmm5
491 // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi
492 tbl v0.16b, {v21.16b},v0.16b // vpshufb %xmm0, %xmm1, %xmm0
493 tbl v8.16b, {v21.16b},v8.16b
494 eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2
495 eor v10.16b, v10.16b, v16.16b
496 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
497 eor v8.16b, v8.16b, v10.16b
498 b .Ldec_2x_entry
499
500.align 4
501.Ldec_2x_loop:
502//
503// Inverse mix columns
504//
505 // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u
506 // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t
507 tbl v4.16b, {v24.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u
508 tbl v12.16b, {v24.16b}, v10.16b
509 tbl v1.16b, {v25.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t
510 tbl v9.16b, {v25.16b}, v11.16b
511 eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0
512 eor v8.16b, v12.16b, v16.16b
513 // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu
514 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
515 eor v8.16b, v8.16b, v9.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
516 // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt
517
518 tbl v4.16b, {v26.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu
519 tbl v12.16b, {v26.16b}, v10.16b
520 tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
521 tbl v8.16b, {v8.16b},v5.16b
522 tbl v1.16b, {v27.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt
523 tbl v9.16b, {v27.16b}, v11.16b
524 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
525 eor v8.16b, v8.16b, v12.16b
526 // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu
527 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
528 eor v8.16b, v8.16b, v9.16b
529 // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt
530
531 tbl v4.16b, {v28.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu
532 tbl v12.16b, {v28.16b}, v10.16b
533 tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
534 tbl v8.16b, {v8.16b},v5.16b
535 tbl v1.16b, {v29.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt
536 tbl v9.16b, {v29.16b}, v11.16b
537 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
538 eor v8.16b, v8.16b, v12.16b
539 // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu
540 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
541 eor v8.16b, v8.16b, v9.16b
542 // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet
543
544 tbl v4.16b, {v30.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu
545 tbl v12.16b, {v30.16b}, v10.16b
546 tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
547 tbl v8.16b, {v8.16b},v5.16b
548 tbl v1.16b, {v31.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet
549 tbl v9.16b, {v31.16b}, v11.16b
550 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
551 eor v8.16b, v8.16b, v12.16b
552 ext v5.16b, v5.16b, v5.16b, #12 // vpalignr $12, %xmm5, %xmm5, %xmm5
553 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
554 eor v8.16b, v8.16b, v9.16b
555 sub w8, w8, #1 // sub $1,%rax # nr--
556
557.Ldec_2x_entry:
558 // top of round
559 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k
560 ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
561 and v9.16b, v8.16b, v17.16b
562 ushr v8.16b, v8.16b, #4
563 tbl v2.16b, {v19.16b},v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
564 tbl v10.16b, {v19.16b},v9.16b
565 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
566 eor v9.16b, v9.16b, v8.16b
567 tbl v3.16b, {v18.16b},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
568 tbl v11.16b, {v18.16b},v8.16b
569 tbl v4.16b, {v18.16b},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
570 tbl v12.16b, {v18.16b},v9.16b
571 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
572 eor v11.16b, v11.16b, v10.16b
573 eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
574 eor v12.16b, v12.16b, v10.16b
575 tbl v2.16b, {v18.16b},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
576 tbl v10.16b, {v18.16b},v11.16b
577 tbl v3.16b, {v18.16b},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
578 tbl v11.16b, {v18.16b},v12.16b
579 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
580 eor v10.16b, v10.16b, v9.16b
581 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
582 eor v11.16b, v11.16b, v8.16b
583 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0
584 cbnz w8, .Ldec_2x_loop
585
586 // middle of last round
587 // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou
588 tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
589 tbl v12.16b, {v22.16b}, v10.16b
590 // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot
591 tbl v1.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t
592 tbl v9.16b, {v23.16b}, v11.16b
593 ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160
594 eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k
595 eor v12.16b, v12.16b, v16.16b
596 eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A
597 eor v8.16b, v9.16b, v12.16b
598 tbl v0.16b, {v0.16b},v2.16b // vpshufb %xmm2, %xmm0, %xmm0
599 tbl v1.16b, {v8.16b},v2.16b
600 ret
601.size _vpaes_decrypt_2x,.-_vpaes_decrypt_2x
602########################################################
603## ##
604## AES key schedule ##
605## ##
606########################################################
607.type _vpaes_key_preheat,%function
608.align 4
609_vpaes_key_preheat:
610 adrp x10, .Lk_inv
611 add x10, x10, :lo12:.Lk_inv
612 movi v16.16b, #0x5b // .Lk_s63
613 adrp x11, .Lk_sb1
614 add x11, x11, :lo12:.Lk_sb1
615 movi v17.16b, #0x0f // .Lk_s0F
616 ld1 {v18.2d,v19.2d,v20.2d,v21.2d}, [x10] // .Lk_inv, .Lk_ipt
617 adrp x10, .Lk_dksd
618 add x10, x10, :lo12:.Lk_dksd
619 ld1 {v22.2d,v23.2d}, [x11] // .Lk_sb1
620 adrp x11, .Lk_mc_forward
621 add x11, x11, :lo12:.Lk_mc_forward
622 ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10],#64 // .Lk_dksd, .Lk_dksb
623 ld1 {v28.2d,v29.2d,v30.2d,v31.2d}, [x10],#64 // .Lk_dkse, .Lk_dks9
624 ld1 {v8.2d}, [x10] // .Lk_rcon
625 ld1 {v9.2d}, [x11] // .Lk_mc_forward[0]
626 ret
627.size _vpaes_key_preheat,.-_vpaes_key_preheat
628
629.type _vpaes_schedule_core,%function
630.align 4
631_vpaes_schedule_core:
632 stp x29, x30, [sp,#-16]!
633 add x29,sp,#0
634
635 bl _vpaes_key_preheat // load the tables
636
637 ld1 {v0.16b}, [x0],#16 // vmovdqu (%rdi), %xmm0 # load key (unaligned)
638
639 // input transform
640 mov v3.16b, v0.16b // vmovdqa %xmm0, %xmm3
641 bl _vpaes_schedule_transform
642 mov v7.16b, v0.16b // vmovdqa %xmm0, %xmm7
643
644 adrp x10, .Lk_sr // lea .Lk_sr(%rip),%r10
645 add x10, x10, :lo12:.Lk_sr
646
647 add x8, x8, x10
648 cbnz w3, .Lschedule_am_decrypting
649
650 // encrypting, output zeroth round key after transform
651 st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx)
652 b .Lschedule_go
653
654.Lschedule_am_decrypting:
655 // decrypting, output zeroth round key after shiftrows
656 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1
657 tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
658 st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx)
659 eor x8, x8, #0x30 // xor $0x30, %r8
660
661.Lschedule_go:
662 cmp w1, #192 // cmp $192, %esi
663 b.hi .Lschedule_256
664 b.eq .Lschedule_192
665 // 128: fall though
666
667##
668## .schedule_128
669##
670## 128-bit specific part of key schedule.
671##
672## This schedule is really simple, because all its parts
673## are accomplished by the subroutines.
674##
675.Lschedule_128:
676 mov x0, #10 // mov $10, %esi
677
678.Loop_schedule_128:
679 sub x0, x0, #1 // dec %esi
680 bl _vpaes_schedule_round
681 cbz x0, .Lschedule_mangle_last
682 bl _vpaes_schedule_mangle // write output
683 b .Loop_schedule_128
684
685##
686## .aes_schedule_192
687##
688## 192-bit specific part of key schedule.
689##
690## The main body of this schedule is the same as the 128-bit
691## schedule, but with more smearing. The long, high side is
692## stored in %xmm7 as before, and the short, low side is in
693## the high bits of %xmm6.
694##
695## This schedule is somewhat nastier, however, because each
696## round produces 192 bits of key material, or 1.5 round keys.
697## Therefore, on each cycle we do 2 rounds and produce 3 round
698## keys.
699##
700.align 4
701.Lschedule_192:
702 sub x0, x0, #8
703 ld1 {v0.16b}, [x0] // vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned)
704 bl _vpaes_schedule_transform // input transform
705 mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save short part
706 eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 # clear 4
707 ins v6.d[0], v4.d[0] // vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros
708 mov x0, #4 // mov $4, %esi
709
710.Loop_schedule_192:
711 sub x0, x0, #1 // dec %esi
712 bl _vpaes_schedule_round
713 ext v0.16b, v6.16b, v0.16b, #8 // vpalignr $8,%xmm6,%xmm0,%xmm0
714 bl _vpaes_schedule_mangle // save key n
715 bl _vpaes_schedule_192_smear
716 bl _vpaes_schedule_mangle // save key n+1
717 bl _vpaes_schedule_round
718 cbz x0, .Lschedule_mangle_last
719 bl _vpaes_schedule_mangle // save key n+2
720 bl _vpaes_schedule_192_smear
721 b .Loop_schedule_192
722
723##
724## .aes_schedule_256
725##
726## 256-bit specific part of key schedule.
727##
728## The structure here is very similar to the 128-bit
729## schedule, but with an additional "low side" in
730## %xmm6. The low side's rounds are the same as the
731## high side's, except no rcon and no rotation.
732##
733.align 4
734.Lschedule_256:
735 ld1 {v0.16b}, [x0] // vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned)
736 bl _vpaes_schedule_transform // input transform
737 mov x0, #7 // mov $7, %esi
738
739.Loop_schedule_256:
740 sub x0, x0, #1 // dec %esi
741 bl _vpaes_schedule_mangle // output low result
742 mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6
743
744 // high round
745 bl _vpaes_schedule_round
746 cbz x0, .Lschedule_mangle_last
747 bl _vpaes_schedule_mangle
748
749 // low round. swap xmm7 and xmm6
750 dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0
751 movi v4.16b, #0
752 mov v5.16b, v7.16b // vmovdqa %xmm7, %xmm5
753 mov v7.16b, v6.16b // vmovdqa %xmm6, %xmm7
754 bl _vpaes_schedule_low_round
755 mov v7.16b, v5.16b // vmovdqa %xmm5, %xmm7
756
757 b .Loop_schedule_256
758
759##
760## .aes_schedule_mangle_last
761##
762## Mangler for last round of key schedule
763## Mangles %xmm0
764## when encrypting, outputs out(%xmm0) ^ 63
765## when decrypting, outputs unskew(%xmm0)
766##
767## Always called right before return... jumps to cleanup and exits
768##
769.align 4
770.Lschedule_mangle_last:
771 // schedule last round key from xmm0
772 adrp x11, .Lk_deskew // lea .Lk_deskew(%rip),%r11 # prepare to deskew
773 add x11, x11, :lo12:.Lk_deskew
774
775 cbnz w3, .Lschedule_mangle_last_dec
776
777 // encrypting
778 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10),%xmm1
779 adrp x11, .Lk_opt // lea .Lk_opt(%rip), %r11 # prepare to output transform
780 add x11, x11, :lo12:.Lk_opt
781 add x2, x2, #32 // add $32, %rdx
782 tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 # output permute
783
784.Lschedule_mangle_last_dec:
785 ld1 {v20.2d,v21.2d}, [x11] // reload constants
786 sub x2, x2, #16 // add $-16, %rdx
787 eor v0.16b, v0.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm0, %xmm0
788 bl _vpaes_schedule_transform // output transform
789 st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) # save last key
790
791 // cleanup
792 eor v0.16b, v0.16b, v0.16b // vpxor %xmm0, %xmm0, %xmm0
793 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1
794 eor v2.16b, v2.16b, v2.16b // vpxor %xmm2, %xmm2, %xmm2
795 eor v3.16b, v3.16b, v3.16b // vpxor %xmm3, %xmm3, %xmm3
796 eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4
797 eor v5.16b, v5.16b, v5.16b // vpxor %xmm5, %xmm5, %xmm5
798 eor v6.16b, v6.16b, v6.16b // vpxor %xmm6, %xmm6, %xmm6
799 eor v7.16b, v7.16b, v7.16b // vpxor %xmm7, %xmm7, %xmm7
800 ldp x29, x30, [sp],#16
801 ret
802.size _vpaes_schedule_core,.-_vpaes_schedule_core
803
804##
805## .aes_schedule_192_smear
806##
807## Smear the short, low side in the 192-bit key schedule.
808##
809## Inputs:
810## %xmm7: high side, b a x y
811## %xmm6: low side, d c 0 0
812## %xmm13: 0
813##
814## Outputs:
815## %xmm6: b+c+d b+c 0 0
816## %xmm0: b+c+d b+c b a
817##
818.type _vpaes_schedule_192_smear,%function
819.align 4
820_vpaes_schedule_192_smear:
821 movi v1.16b, #0
822 dup v0.4s, v7.s[3]
823 ins v1.s[3], v6.s[2] // vpshufd $0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0
824 ins v0.s[0], v7.s[2] // vpshufd $0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a
825 eor v6.16b, v6.16b, v1.16b // vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0
826 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1
827 eor v6.16b, v6.16b, v0.16b // vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a
828 mov v0.16b, v6.16b // vmovdqa %xmm6, %xmm0
829 ins v6.d[0], v1.d[0] // vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros
830 ret
831.size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
832
833##
834## .aes_schedule_round
835##
836## Runs one main round of the key schedule on %xmm0, %xmm7
837##
838## Specifically, runs subbytes on the high dword of %xmm0
839## then rotates it by one byte and xors into the low dword of
840## %xmm7.
841##
842## Adds rcon from low byte of %xmm8, then rotates %xmm8 for
843## next rcon.
844##
845## Smears the dwords of %xmm7 by xoring the low into the
846## second low, result into third, result into highest.
847##
848## Returns results in %xmm7 = %xmm0.
849## Clobbers %xmm1-%xmm4, %r11.
850##
851.type _vpaes_schedule_round,%function
852.align 4
853_vpaes_schedule_round:
854 // extract rcon from xmm8
855 movi v4.16b, #0 // vpxor %xmm4, %xmm4, %xmm4
856 ext v1.16b, v8.16b, v4.16b, #15 // vpalignr $15, %xmm8, %xmm4, %xmm1
857 ext v8.16b, v8.16b, v8.16b, #15 // vpalignr $15, %xmm8, %xmm8, %xmm8
858 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7
859
860 // rotate
861 dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0
862 ext v0.16b, v0.16b, v0.16b, #1 // vpalignr $1, %xmm0, %xmm0, %xmm0
863
864 // fall through...
865
866 // low round: same as high round, but no rotation and no rcon.
867_vpaes_schedule_low_round:
868 // smear xmm7
869 ext v1.16b, v4.16b, v7.16b, #12 // vpslldq $4, %xmm7, %xmm1
870 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7
871 ext v4.16b, v4.16b, v7.16b, #8 // vpslldq $8, %xmm7, %xmm4
872
873 // subbytes
874 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k
875 ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i
876 eor v7.16b, v7.16b, v4.16b // vpxor %xmm4, %xmm7, %xmm7
877 tbl v2.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
878 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
879 tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
880 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
881 tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
882 eor v7.16b, v7.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm7, %xmm7
883 tbl v3.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak
884 eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
885 tbl v2.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak
886 eor v3.16b, v3.16b, v1.16b // vpxor %xmm1, %xmm3, %xmm3 # 2 = io
887 eor v2.16b, v2.16b, v0.16b // vpxor %xmm0, %xmm2, %xmm2 # 3 = jo
888 tbl v4.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou
889 tbl v1.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t
890 eor v1.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output
891
892 // add in smeared stuff
893 eor v0.16b, v1.16b, v7.16b // vpxor %xmm7, %xmm1, %xmm0
894 eor v7.16b, v1.16b, v7.16b // vmovdqa %xmm0, %xmm7
895 ret
896.size _vpaes_schedule_round,.-_vpaes_schedule_round
897
898##
899## .aes_schedule_transform
900##
901## Linear-transform %xmm0 according to tables at (%r11)
902##
903## Requires that %xmm9 = 0x0F0F... as in preheat
904## Output in %xmm0
905## Clobbers %xmm1, %xmm2
906##
907.type _vpaes_schedule_transform,%function
908.align 4
909_vpaes_schedule_transform:
910 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
911 ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0
912 // vmovdqa (%r11), %xmm2 # lo
913 tbl v2.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2
914 // vmovdqa 16(%r11), %xmm1 # hi
915 tbl v0.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0
916 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
917 ret
918.size _vpaes_schedule_transform,.-_vpaes_schedule_transform
919
920##
921## .aes_schedule_mangle
922##
923## Mangle xmm0 from (basis-transformed) standard version
924## to our version.
925##
926## On encrypt,
927## xor with 0x63
928## multiply by circulant 0,1,1,1
929## apply shiftrows transform
930##
931## On decrypt,
932## xor with 0x63
933## multiply by "inverse mixcolumns" circulant E,B,D,9
934## deskew
935## apply shiftrows transform
936##
937##
938## Writes out to (%rdx), and increments or decrements it
939## Keeps track of round number mod 4 in %r8
940## Preserves xmm0
941## Clobbers xmm1-xmm5
942##
943.type _vpaes_schedule_mangle,%function
944.align 4
945_vpaes_schedule_mangle:
946 mov v4.16b, v0.16b // vmovdqa %xmm0, %xmm4 # save xmm0 for later
947 // vmovdqa .Lk_mc_forward(%rip),%xmm5
948 cbnz w3, .Lschedule_mangle_dec
949
950 // encrypting
951 eor v4.16b, v0.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm0, %xmm4
952 add x2, x2, #16 // add $16, %rdx
953 tbl v4.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm4
954 tbl v1.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm1
955 tbl v3.16b, {v1.16b}, v9.16b // vpshufb %xmm5, %xmm1, %xmm3
956 eor v4.16b, v4.16b, v1.16b // vpxor %xmm1, %xmm4, %xmm4
957 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1
958 eor v3.16b, v3.16b, v4.16b // vpxor %xmm4, %xmm3, %xmm3
959
960 b .Lschedule_mangle_both
961.align 4
962.Lschedule_mangle_dec:
963 // inverse mix columns
964 // lea .Lk_dksd(%rip),%r11
965 ushr v1.16b, v4.16b, #4 // vpsrlb $4, %xmm4, %xmm1 # 1 = hi
966 and v4.16b, v4.16b, v17.16b // vpand %xmm9, %xmm4, %xmm4 # 4 = lo
967
968 // vmovdqa 0x00(%r11), %xmm2
969 tbl v2.16b, {v24.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2
970 // vmovdqa 0x10(%r11), %xmm3
971 tbl v3.16b, {v25.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
972 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3
973 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3
974
975 // vmovdqa 0x20(%r11), %xmm2
976 tbl v2.16b, {v26.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2
977 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2
978 // vmovdqa 0x30(%r11), %xmm3
979 tbl v3.16b, {v27.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
980 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3
981 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3
982
983 // vmovdqa 0x40(%r11), %xmm2
984 tbl v2.16b, {v28.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2
985 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2
986 // vmovdqa 0x50(%r11), %xmm3
987 tbl v3.16b, {v29.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
988 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3
989
990 // vmovdqa 0x60(%r11), %xmm2
991 tbl v2.16b, {v30.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2
992 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3
993 // vmovdqa 0x70(%r11), %xmm4
994 tbl v4.16b, {v31.16b}, v1.16b // vpshufb %xmm1, %xmm4, %xmm4
995 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1
996 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2
997 eor v3.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm3
998
999 sub x2, x2, #16 // add $-16, %rdx
1000
1001.Lschedule_mangle_both:
1002 tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
1003 add x8, x8, #64-16 // add $-16, %r8
1004 and x8, x8, #~(1<<6) // and $0x30, %r8
1005 st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx)
1006 ret
1007.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
1008
1009.globl vpaes_set_encrypt_key
1010.hidden vpaes_set_encrypt_key
1011.type vpaes_set_encrypt_key,%function
1012.align 4
1013vpaes_set_encrypt_key:
1014 stp x29,x30,[sp,#-16]!
1015 add x29,sp,#0
1016 stp d8,d9,[sp,#-16]! // ABI spec says so
1017
1018 lsr w9, w1, #5 // shr $5,%eax
1019 add w9, w9, #5 // $5,%eax
1020 str w9, [x2,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
1021
1022 mov w3, #0 // mov $0,%ecx
1023 mov x8, #0x30 // mov $0x30,%r8d
1024 bl _vpaes_schedule_core
1025 eor x0, x0, x0
1026
1027 ldp d8,d9,[sp],#16
1028 ldp x29,x30,[sp],#16
1029 ret
1030.size vpaes_set_encrypt_key,.-vpaes_set_encrypt_key
1031
1032.globl vpaes_set_decrypt_key
1033.hidden vpaes_set_decrypt_key
1034.type vpaes_set_decrypt_key,%function
1035.align 4
1036vpaes_set_decrypt_key:
1037 stp x29,x30,[sp,#-16]!
1038 add x29,sp,#0
1039 stp d8,d9,[sp,#-16]! // ABI spec says so
1040
1041 lsr w9, w1, #5 // shr $5,%eax
1042 add w9, w9, #5 // $5,%eax
1043 str w9, [x2,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
1044 lsl w9, w9, #4 // shl $4,%eax
1045 add x2, x2, #16 // lea 16(%rdx,%rax),%rdx
1046 add x2, x2, x9
1047
1048 mov w3, #1 // mov $1,%ecx
1049 lsr w8, w1, #1 // shr $1,%r8d
1050 and x8, x8, #32 // and $32,%r8d
1051 eor x8, x8, #32 // xor $32,%r8d # nbits==192?0:32
1052 bl _vpaes_schedule_core
1053
1054 ldp d8,d9,[sp],#16
1055 ldp x29,x30,[sp],#16
1056 ret
1057.size vpaes_set_decrypt_key,.-vpaes_set_decrypt_key
1058.globl vpaes_cbc_encrypt
1059.hidden vpaes_cbc_encrypt
1060.type vpaes_cbc_encrypt,%function
1061.align 4
1062vpaes_cbc_encrypt:
1063 cbz x2, .Lcbc_abort
1064 cmp w5, #0 // check direction
1065 b.eq vpaes_cbc_decrypt
1066
1067 stp x29,x30,[sp,#-16]!
1068 add x29,sp,#0
1069
1070 mov x17, x2 // reassign
1071 mov x2, x3 // reassign
1072
1073 ld1 {v0.16b}, [x4] // load ivec
1074 bl _vpaes_encrypt_preheat
1075 b .Lcbc_enc_loop
1076
1077.align 4
1078.Lcbc_enc_loop:
1079 ld1 {v7.16b}, [x0],#16 // load input
1080 eor v7.16b, v7.16b, v0.16b // xor with ivec
1081 bl _vpaes_encrypt_core
1082 st1 {v0.16b}, [x1],#16 // save output
1083 subs x17, x17, #16
1084 b.hi .Lcbc_enc_loop
1085
1086 st1 {v0.16b}, [x4] // write ivec
1087
1088 ldp x29,x30,[sp],#16
1089.Lcbc_abort:
1090 ret
1091.size vpaes_cbc_encrypt,.-vpaes_cbc_encrypt
1092
1093.type vpaes_cbc_decrypt,%function
1094.align 4
1095vpaes_cbc_decrypt:
1096 stp x29,x30,[sp,#-16]!
1097 add x29,sp,#0
1098 stp d8,d9,[sp,#-16]! // ABI spec says so
1099 stp d10,d11,[sp,#-16]!
1100 stp d12,d13,[sp,#-16]!
1101 stp d14,d15,[sp,#-16]!
1102
1103 mov x17, x2 // reassign
1104 mov x2, x3 // reassign
1105 ld1 {v6.16b}, [x4] // load ivec
1106 bl _vpaes_decrypt_preheat
1107 tst x17, #16
1108 b.eq .Lcbc_dec_loop2x
1109
1110 ld1 {v7.16b}, [x0], #16 // load input
1111 bl _vpaes_decrypt_core
1112 eor v0.16b, v0.16b, v6.16b // xor with ivec
1113 orr v6.16b, v7.16b, v7.16b // next ivec value
1114 st1 {v0.16b}, [x1], #16
1115 subs x17, x17, #16
1116 b.ls .Lcbc_dec_done
1117
1118.align 4
1119.Lcbc_dec_loop2x:
1120 ld1 {v14.16b,v15.16b}, [x0], #32
1121 bl _vpaes_decrypt_2x
1122 eor v0.16b, v0.16b, v6.16b // xor with ivec
1123 eor v1.16b, v1.16b, v14.16b
1124 orr v6.16b, v15.16b, v15.16b
1125 st1 {v0.16b,v1.16b}, [x1], #32
1126 subs x17, x17, #32
1127 b.hi .Lcbc_dec_loop2x
1128
1129.Lcbc_dec_done:
1130 st1 {v6.16b}, [x4]
1131
1132 ldp d14,d15,[sp],#16
1133 ldp d12,d13,[sp],#16
1134 ldp d10,d11,[sp],#16
1135 ldp d8,d9,[sp],#16
1136 ldp x29,x30,[sp],#16
1137 ret
1138.size vpaes_cbc_decrypt,.-vpaes_cbc_decrypt
1139.globl vpaes_ctr32_encrypt_blocks
1140.hidden vpaes_ctr32_encrypt_blocks
1141.type vpaes_ctr32_encrypt_blocks,%function
1142.align 4
1143vpaes_ctr32_encrypt_blocks:
1144 stp x29,x30,[sp,#-16]!
1145 add x29,sp,#0
1146 stp d8,d9,[sp,#-16]! // ABI spec says so
1147 stp d10,d11,[sp,#-16]!
1148 stp d12,d13,[sp,#-16]!
1149 stp d14,d15,[sp,#-16]!
1150
1151 cbz x2, .Lctr32_done
1152
1153 // Note, unlike the other functions, x2 here is measured in blocks,
1154 // not bytes.
1155 mov x17, x2
1156 mov x2, x3
1157
1158 // Load the IV and counter portion.
1159 ldr w6, [x4, #12]
1160 ld1 {v7.16b}, [x4]
1161
1162 bl _vpaes_encrypt_preheat
1163 tst x17, #1
1164 rev w6, w6 // The counter is big-endian.
1165 b.eq .Lctr32_prep_loop
1166
1167 // Handle one block so the remaining block count is even for
1168 // _vpaes_encrypt_2x.
1169 ld1 {v6.16b}, [x0], #16 // .Load input ahead of time
1170 bl _vpaes_encrypt_core
1171 eor v0.16b, v0.16b, v6.16b // XOR input and result
1172 st1 {v0.16b}, [x1], #16
1173 subs x17, x17, #1
1174 // Update the counter.
1175 add w6, w6, #1
1176 rev w7, w6
1177 mov v7.s[3], w7
1178 b.ls .Lctr32_done
1179
1180.Lctr32_prep_loop:
1181 // _vpaes_encrypt_core takes its input from v7, while _vpaes_encrypt_2x
1182 // uses v14 and v15.
1183 mov v15.16b, v7.16b
1184 mov v14.16b, v7.16b
1185 add w6, w6, #1
1186 rev w7, w6
1187 mov v15.s[3], w7
1188
1189.Lctr32_loop:
1190 ld1 {v6.16b,v7.16b}, [x0], #32 // .Load input ahead of time
1191 bl _vpaes_encrypt_2x
1192 eor v0.16b, v0.16b, v6.16b // XOR input and result
1193 eor v1.16b, v1.16b, v7.16b // XOR input and result (#2)
1194 st1 {v0.16b,v1.16b}, [x1], #32
1195 subs x17, x17, #2
1196 // Update the counter.
1197 add w7, w6, #1
1198 add w6, w6, #2
1199 rev w7, w7
1200 mov v14.s[3], w7
1201 rev w7, w6
1202 mov v15.s[3], w7
1203 b.hi .Lctr32_loop
1204
1205.Lctr32_done:
1206 ldp d14,d15,[sp],#16
1207 ldp d12,d13,[sp],#16
1208 ldp d10,d11,[sp],#16
1209 ldp d8,d9,[sp],#16
1210 ldp x29,x30,[sp],#16
1211 ret
1212.size vpaes_ctr32_encrypt_blocks,.-vpaes_ctr32_encrypt_blocks
1213#endif
1214#endif // !OPENSSL_NO_ASM