blob: 79a1df532b702b9567c3d2815bfce8c00c562bfa [file] [log] [blame]
Robert Sloan726e9d12018-09-11 11:45:04 -07001#if defined(__has_feature)
2#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM)
3#define OPENSSL_NO_ASM
4#endif
5#endif
6
7#if !defined(OPENSSL_NO_ASM)
8#if defined(BORINGSSL_PREFIX)
9#include <boringssl_prefix_symbols_asm.h>
10#endif
Robert Sloan8ff03552017-06-14 12:40:58 -070011#include <openssl/arm_arch.h>
12
13.text
Robert Sloan8ff03552017-06-14 12:40:58 -070014
Robert Sloan8ff03552017-06-14 12:40:58 -070015.globl _gcm_init_v8
16.private_extern _gcm_init_v8
17
18.align 4
19_gcm_init_v8:
20 ld1 {v17.2d},[x1] //load input H
21 movi v19.16b,#0xe1
22 shl v19.2d,v19.2d,#57 //0xc2.0
23 ext v3.16b,v17.16b,v17.16b,#8
24 ushr v18.2d,v19.2d,#63
25 dup v17.4s,v17.s[1]
26 ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01
27 ushr v18.2d,v3.2d,#63
28 sshr v17.4s,v17.4s,#31 //broadcast carry bit
29 and v18.16b,v18.16b,v16.16b
30 shl v3.2d,v3.2d,#1
31 ext v18.16b,v18.16b,v18.16b,#8
32 and v16.16b,v16.16b,v17.16b
33 orr v3.16b,v3.16b,v18.16b //H<<<=1
34 eor v20.16b,v3.16b,v16.16b //twisted H
35 st1 {v20.2d},[x0],#16 //store Htable[0]
36
37 //calculate H^2
38 ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing
39 pmull v0.1q,v20.1d,v20.1d
40 eor v16.16b,v16.16b,v20.16b
41 pmull2 v2.1q,v20.2d,v20.2d
42 pmull v1.1q,v16.1d,v16.1d
43
44 ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
45 eor v18.16b,v0.16b,v2.16b
46 eor v1.16b,v1.16b,v17.16b
47 eor v1.16b,v1.16b,v18.16b
48 pmull v18.1q,v0.1d,v19.1d //1st phase
49
50 ins v2.d[0],v1.d[1]
51 ins v1.d[1],v0.d[0]
52 eor v0.16b,v1.16b,v18.16b
53
54 ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
55 pmull v0.1q,v0.1d,v19.1d
56 eor v18.16b,v18.16b,v2.16b
57 eor v22.16b,v0.16b,v18.16b
58
59 ext v17.16b,v22.16b,v22.16b,#8 //Karatsuba pre-processing
60 eor v17.16b,v17.16b,v22.16b
61 ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
62 st1 {v21.2d,v22.2d},[x0] //store Htable[1..2]
63
64 ret
65
66.globl _gcm_gmult_v8
67.private_extern _gcm_gmult_v8
68
69.align 4
70_gcm_gmult_v8:
71 ld1 {v17.2d},[x0] //load Xi
72 movi v19.16b,#0xe1
73 ld1 {v20.2d,v21.2d},[x1] //load twisted H, ...
74 shl v19.2d,v19.2d,#57
75#ifndef __ARMEB__
76 rev64 v17.16b,v17.16b
77#endif
78 ext v3.16b,v17.16b,v17.16b,#8
79
80 pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
81 eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
82 pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
83 pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
84
85 ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
86 eor v18.16b,v0.16b,v2.16b
87 eor v1.16b,v1.16b,v17.16b
88 eor v1.16b,v1.16b,v18.16b
89 pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
90
91 ins v2.d[0],v1.d[1]
92 ins v1.d[1],v0.d[0]
93 eor v0.16b,v1.16b,v18.16b
94
95 ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
96 pmull v0.1q,v0.1d,v19.1d
97 eor v18.16b,v18.16b,v2.16b
98 eor v0.16b,v0.16b,v18.16b
99
100#ifndef __ARMEB__
101 rev64 v0.16b,v0.16b
102#endif
103 ext v0.16b,v0.16b,v0.16b,#8
104 st1 {v0.2d},[x0] //write out Xi
105
106 ret
107
108.globl _gcm_ghash_v8
109.private_extern _gcm_ghash_v8
110
111.align 4
112_gcm_ghash_v8:
113 ld1 {v0.2d},[x0] //load [rotated] Xi
114 //"[rotated]" means that
115 //loaded value would have
116 //to be rotated in order to
117 //make it appear as in
Robert Sloanc6ebb282018-04-30 10:10:26 -0700118 //algorithm specification
Robert Sloan8ff03552017-06-14 12:40:58 -0700119 subs x3,x3,#32 //see if x3 is 32 or larger
120 mov x12,#16 //x12 is used as post-
121 //increment for input pointer;
122 //as loop is modulo-scheduled
123 //x12 is zeroed just in time
Robert Sloanc6ebb282018-04-30 10:10:26 -0700124 //to preclude overstepping
Robert Sloan8ff03552017-06-14 12:40:58 -0700125 //inp[len], which means that
126 //last block[s] are actually
127 //loaded twice, but last
128 //copy is not processed
129 ld1 {v20.2d,v21.2d},[x1],#32 //load twisted H, ..., H^2
130 movi v19.16b,#0xe1
131 ld1 {v22.2d},[x1]
132 csel x12,xzr,x12,eq //is it time to zero x12?
133 ext v0.16b,v0.16b,v0.16b,#8 //rotate Xi
134 ld1 {v16.2d},[x2],#16 //load [rotated] I[0]
135 shl v19.2d,v19.2d,#57 //compose 0xc2.0 constant
136#ifndef __ARMEB__
137 rev64 v16.16b,v16.16b
138 rev64 v0.16b,v0.16b
139#endif
140 ext v3.16b,v16.16b,v16.16b,#8 //rotate I[0]
141 b.lo Lodd_tail_v8 //x3 was less than 32
142 ld1 {v17.2d},[x2],x12 //load [rotated] I[1]
143#ifndef __ARMEB__
144 rev64 v17.16b,v17.16b
145#endif
146 ext v7.16b,v17.16b,v17.16b,#8
147 eor v3.16b,v3.16b,v0.16b //I[i]^=Xi
148 pmull v4.1q,v20.1d,v7.1d //H·Ii+1
149 eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing
150 pmull2 v6.1q,v20.2d,v7.2d
151 b Loop_mod2x_v8
152
153.align 4
154Loop_mod2x_v8:
155 ext v18.16b,v3.16b,v3.16b,#8
156 subs x3,x3,#32 //is there more data?
157 pmull v0.1q,v22.1d,v3.1d //H^2.lo·Xi.lo
158 csel x12,xzr,x12,lo //is it time to zero x12?
159
160 pmull v5.1q,v21.1d,v17.1d
161 eor v18.16b,v18.16b,v3.16b //Karatsuba pre-processing
162 pmull2 v2.1q,v22.2d,v3.2d //H^2.hi·Xi.hi
163 eor v0.16b,v0.16b,v4.16b //accumulate
164 pmull2 v1.1q,v21.2d,v18.2d //(H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
165 ld1 {v16.2d},[x2],x12 //load [rotated] I[i+2]
166
167 eor v2.16b,v2.16b,v6.16b
168 csel x12,xzr,x12,eq //is it time to zero x12?
169 eor v1.16b,v1.16b,v5.16b
170
171 ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
172 eor v18.16b,v0.16b,v2.16b
173 eor v1.16b,v1.16b,v17.16b
174 ld1 {v17.2d},[x2],x12 //load [rotated] I[i+3]
175#ifndef __ARMEB__
176 rev64 v16.16b,v16.16b
177#endif
178 eor v1.16b,v1.16b,v18.16b
179 pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
180
181#ifndef __ARMEB__
182 rev64 v17.16b,v17.16b
183#endif
184 ins v2.d[0],v1.d[1]
185 ins v1.d[1],v0.d[0]
186 ext v7.16b,v17.16b,v17.16b,#8
187 ext v3.16b,v16.16b,v16.16b,#8
188 eor v0.16b,v1.16b,v18.16b
189 pmull v4.1q,v20.1d,v7.1d //H·Ii+1
190 eor v3.16b,v3.16b,v2.16b //accumulate v3.16b early
191
192 ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
193 pmull v0.1q,v0.1d,v19.1d
194 eor v3.16b,v3.16b,v18.16b
195 eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing
196 eor v3.16b,v3.16b,v0.16b
197 pmull2 v6.1q,v20.2d,v7.2d
198 b.hs Loop_mod2x_v8 //there was at least 32 more bytes
199
200 eor v2.16b,v2.16b,v18.16b
201 ext v3.16b,v16.16b,v16.16b,#8 //re-construct v3.16b
202 adds x3,x3,#32 //re-construct x3
203 eor v0.16b,v0.16b,v2.16b //re-construct v0.16b
204 b.eq Ldone_v8 //is x3 zero?
205Lodd_tail_v8:
206 ext v18.16b,v0.16b,v0.16b,#8
207 eor v3.16b,v3.16b,v0.16b //inp^=Xi
208 eor v17.16b,v16.16b,v18.16b //v17.16b is rotated inp^Xi
209
210 pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
211 eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
212 pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
213 pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
214
215 ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
216 eor v18.16b,v0.16b,v2.16b
217 eor v1.16b,v1.16b,v17.16b
218 eor v1.16b,v1.16b,v18.16b
219 pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
220
221 ins v2.d[0],v1.d[1]
222 ins v1.d[1],v0.d[0]
223 eor v0.16b,v1.16b,v18.16b
224
225 ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
226 pmull v0.1q,v0.1d,v19.1d
227 eor v18.16b,v18.16b,v2.16b
228 eor v0.16b,v0.16b,v18.16b
229
230Ldone_v8:
231#ifndef __ARMEB__
232 rev64 v0.16b,v0.16b
233#endif
234 ext v0.16b,v0.16b,v0.16b,#8
235 st1 {v0.2d},[x0] //write out Xi
236
237 ret
238
239.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
240.align 2
241.align 2
Robert Sloan726e9d12018-09-11 11:45:04 -0700242#endif // !OPENSSL_NO_ASM