blob: f5de67f0373f7dc46e714c05a672ce9efd91d7d4 [file] [log] [blame]
Robert Sloanc9abfe42018-11-26 12:19:07 -08001// This file is generated from a similarly-named Perl script in the BoringSSL
2// source tree. Do not edit by hand.
3
Pete Bentley0c61efe2019-08-13 09:32:23 +01004#if !defined(__has_feature)
5#define __has_feature(x) 0
6#endif
Robert Sloan726e9d12018-09-11 11:45:04 -07007#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM)
8#define OPENSSL_NO_ASM
9#endif
Robert Sloan726e9d12018-09-11 11:45:04 -070010
11#if !defined(OPENSSL_NO_ASM)
12#if defined(BORINGSSL_PREFIX)
13#include <boringssl_prefix_symbols_asm.h>
14#endif
Robert Sloan8ff03552017-06-14 12:40:58 -070015#include <openssl/arm_arch.h>
16
17.text
18
19.code 32
20#undef __thumb2__
21.globl _gcm_init_v8
22.private_extern _gcm_init_v8
23#ifdef __thumb2__
24.thumb_func _gcm_init_v8
25#endif
26.align 4
27_gcm_init_v8:
28 vld1.64 {q9},[r1] @ load input H
29 vmov.i8 q11,#0xe1
30 vshl.i64 q11,q11,#57 @ 0xc2.0
31 vext.8 q3,q9,q9,#8
32 vshr.u64 q10,q11,#63
33 vdup.32 q9,d18[1]
34 vext.8 q8,q10,q11,#8 @ t0=0xc2....01
35 vshr.u64 q10,q3,#63
36 vshr.s32 q9,q9,#31 @ broadcast carry bit
37 vand q10,q10,q8
38 vshl.i64 q3,q3,#1
39 vext.8 q10,q10,q10,#8
40 vand q8,q8,q9
41 vorr q3,q3,q10 @ H<<<=1
42 veor q12,q3,q8 @ twisted H
43 vst1.64 {q12},[r0]! @ store Htable[0]
44
45 @ calculate H^2
46 vext.8 q8,q12,q12,#8 @ Karatsuba pre-processing
47.byte 0xa8,0x0e,0xa8,0xf2 @ pmull q0,q12,q12
48 veor q8,q8,q12
49.byte 0xa9,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q12
50.byte 0xa0,0x2e,0xa0,0xf2 @ pmull q1,q8,q8
51
52 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
53 veor q10,q0,q2
54 veor q1,q1,q9
55 veor q1,q1,q10
56.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase
57
58 vmov d4,d3 @ Xh|Xm - 256-bit result
59 vmov d3,d0 @ Xm is rotated Xl
60 veor q0,q1,q10
61
62 vext.8 q10,q0,q0,#8 @ 2nd phase
63.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
64 veor q10,q10,q2
65 veor q14,q0,q10
66
67 vext.8 q9,q14,q14,#8 @ Karatsuba pre-processing
68 veor q9,q9,q14
69 vext.8 q13,q8,q9,#8 @ pack Karatsuba pre-processed
70 vst1.64 {q13,q14},[r0] @ store Htable[1..2]
71
72 bx lr
73
74.globl _gcm_gmult_v8
75.private_extern _gcm_gmult_v8
76#ifdef __thumb2__
77.thumb_func _gcm_gmult_v8
78#endif
79.align 4
80_gcm_gmult_v8:
81 vld1.64 {q9},[r0] @ load Xi
82 vmov.i8 q11,#0xe1
83 vld1.64 {q12,q13},[r1] @ load twisted H, ...
84 vshl.u64 q11,q11,#57
85#ifndef __ARMEB__
86 vrev64.8 q9,q9
87#endif
88 vext.8 q3,q9,q9,#8
89
90.byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
91 veor q9,q9,q3 @ Karatsuba pre-processing
92.byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
93.byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
94
95 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
96 veor q10,q0,q2
97 veor q1,q1,q9
98 veor q1,q1,q10
99.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction
100
101 vmov d4,d3 @ Xh|Xm - 256-bit result
102 vmov d3,d0 @ Xm is rotated Xl
103 veor q0,q1,q10
104
105 vext.8 q10,q0,q0,#8 @ 2nd phase of reduction
106.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
107 veor q10,q10,q2
108 veor q0,q0,q10
109
110#ifndef __ARMEB__
111 vrev64.8 q0,q0
112#endif
113 vext.8 q0,q0,q0,#8
114 vst1.64 {q0},[r0] @ write out Xi
115
116 bx lr
117
118.globl _gcm_ghash_v8
119.private_extern _gcm_ghash_v8
120#ifdef __thumb2__
121.thumb_func _gcm_ghash_v8
122#endif
123.align 4
124_gcm_ghash_v8:
125 vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI says so
126 vld1.64 {q0},[r0] @ load [rotated] Xi
127 @ "[rotated]" means that
128 @ loaded value would have
129 @ to be rotated in order to
130 @ make it appear as in
Robert Sloanc6ebb282018-04-30 10:10:26 -0700131 @ algorithm specification
Robert Sloan8ff03552017-06-14 12:40:58 -0700132 subs r3,r3,#32 @ see if r3 is 32 or larger
133 mov r12,#16 @ r12 is used as post-
134 @ increment for input pointer;
135 @ as loop is modulo-scheduled
136 @ r12 is zeroed just in time
Robert Sloanc6ebb282018-04-30 10:10:26 -0700137 @ to preclude overstepping
Robert Sloan8ff03552017-06-14 12:40:58 -0700138 @ inp[len], which means that
139 @ last block[s] are actually
140 @ loaded twice, but last
141 @ copy is not processed
142 vld1.64 {q12,q13},[r1]! @ load twisted H, ..., H^2
143 vmov.i8 q11,#0xe1
144 vld1.64 {q14},[r1]
145 moveq r12,#0 @ is it time to zero r12?
146 vext.8 q0,q0,q0,#8 @ rotate Xi
147 vld1.64 {q8},[r2]! @ load [rotated] I[0]
148 vshl.u64 q11,q11,#57 @ compose 0xc2.0 constant
149#ifndef __ARMEB__
150 vrev64.8 q8,q8
151 vrev64.8 q0,q0
152#endif
153 vext.8 q3,q8,q8,#8 @ rotate I[0]
154 blo Lodd_tail_v8 @ r3 was less than 32
155 vld1.64 {q9},[r2],r12 @ load [rotated] I[1]
156#ifndef __ARMEB__
157 vrev64.8 q9,q9
158#endif
159 vext.8 q7,q9,q9,#8
160 veor q3,q3,q0 @ I[i]^=Xi
161.byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1
162 veor q9,q9,q7 @ Karatsuba pre-processing
163.byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7
164 b Loop_mod2x_v8
165
166.align 4
167Loop_mod2x_v8:
168 vext.8 q10,q3,q3,#8
169 subs r3,r3,#32 @ is there more data?
170.byte 0x86,0x0e,0xac,0xf2 @ pmull q0,q14,q3 @ H^2.lo·Xi.lo
171 movlo r12,#0 @ is it time to zero r12?
172
173.byte 0xa2,0xae,0xaa,0xf2 @ pmull q5,q13,q9
174 veor q10,q10,q3 @ Karatsuba pre-processing
175.byte 0x87,0x4e,0xad,0xf2 @ pmull2 q2,q14,q3 @ H^2.hi·Xi.hi
176 veor q0,q0,q4 @ accumulate
177.byte 0xa5,0x2e,0xab,0xf2 @ pmull2 q1,q13,q10 @ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
178 vld1.64 {q8},[r2],r12 @ load [rotated] I[i+2]
179
180 veor q2,q2,q6
181 moveq r12,#0 @ is it time to zero r12?
182 veor q1,q1,q5
183
184 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
185 veor q10,q0,q2
186 veor q1,q1,q9
187 vld1.64 {q9},[r2],r12 @ load [rotated] I[i+3]
188#ifndef __ARMEB__
189 vrev64.8 q8,q8
190#endif
191 veor q1,q1,q10
192.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction
193
194#ifndef __ARMEB__
195 vrev64.8 q9,q9
196#endif
197 vmov d4,d3 @ Xh|Xm - 256-bit result
198 vmov d3,d0 @ Xm is rotated Xl
199 vext.8 q7,q9,q9,#8
200 vext.8 q3,q8,q8,#8
201 veor q0,q1,q10
202.byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1
203 veor q3,q3,q2 @ accumulate q3 early
204
205 vext.8 q10,q0,q0,#8 @ 2nd phase of reduction
206.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
207 veor q3,q3,q10
208 veor q9,q9,q7 @ Karatsuba pre-processing
209 veor q3,q3,q0
210.byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7
211 bhs Loop_mod2x_v8 @ there was at least 32 more bytes
212
213 veor q2,q2,q10
214 vext.8 q3,q8,q8,#8 @ re-construct q3
215 adds r3,r3,#32 @ re-construct r3
216 veor q0,q0,q2 @ re-construct q0
217 beq Ldone_v8 @ is r3 zero?
218Lodd_tail_v8:
219 vext.8 q10,q0,q0,#8
220 veor q3,q3,q0 @ inp^=Xi
221 veor q9,q8,q10 @ q9 is rotated inp^Xi
222
223.byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
224 veor q9,q9,q3 @ Karatsuba pre-processing
225.byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
226.byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
227
228 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
229 veor q10,q0,q2
230 veor q1,q1,q9
231 veor q1,q1,q10
232.byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of reduction
233
234 vmov d4,d3 @ Xh|Xm - 256-bit result
235 vmov d3,d0 @ Xm is rotated Xl
236 veor q0,q1,q10
237
238 vext.8 q10,q0,q0,#8 @ 2nd phase of reduction
239.byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
240 veor q10,q10,q2
241 veor q0,q0,q10
242
243Ldone_v8:
244#ifndef __ARMEB__
245 vrev64.8 q0,q0
246#endif
247 vext.8 q0,q0,q0,#8
248 vst1.64 {q0},[r0] @ write out Xi
249
250 vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI says so
251 bx lr
252
253.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
254.align 2
255.align 2
Robert Sloan726e9d12018-09-11 11:45:04 -0700256#endif // !OPENSSL_NO_ASM