blob: c38134fe446896b835d4b8fc5928a9b26876ebfe [file] [log] [blame]
Robert Sloan8ff03552017-06-14 12:40:58 -07001#include <openssl/arm_arch.h>
2
3.text
Robert Sloan8ff03552017-06-14 12:40:58 -07004
Robert Sloan8ff03552017-06-14 12:40:58 -07005.globl _gcm_init_v8
6.private_extern _gcm_init_v8
7
8.align 4
9_gcm_init_v8:
10 ld1 {v17.2d},[x1] //load input H
11 movi v19.16b,#0xe1
12 shl v19.2d,v19.2d,#57 //0xc2.0
13 ext v3.16b,v17.16b,v17.16b,#8
14 ushr v18.2d,v19.2d,#63
15 dup v17.4s,v17.s[1]
16 ext v16.16b,v18.16b,v19.16b,#8 //t0=0xc2....01
17 ushr v18.2d,v3.2d,#63
18 sshr v17.4s,v17.4s,#31 //broadcast carry bit
19 and v18.16b,v18.16b,v16.16b
20 shl v3.2d,v3.2d,#1
21 ext v18.16b,v18.16b,v18.16b,#8
22 and v16.16b,v16.16b,v17.16b
23 orr v3.16b,v3.16b,v18.16b //H<<<=1
24 eor v20.16b,v3.16b,v16.16b //twisted H
25 st1 {v20.2d},[x0],#16 //store Htable[0]
26
27 //calculate H^2
28 ext v16.16b,v20.16b,v20.16b,#8 //Karatsuba pre-processing
29 pmull v0.1q,v20.1d,v20.1d
30 eor v16.16b,v16.16b,v20.16b
31 pmull2 v2.1q,v20.2d,v20.2d
32 pmull v1.1q,v16.1d,v16.1d
33
34 ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
35 eor v18.16b,v0.16b,v2.16b
36 eor v1.16b,v1.16b,v17.16b
37 eor v1.16b,v1.16b,v18.16b
38 pmull v18.1q,v0.1d,v19.1d //1st phase
39
40 ins v2.d[0],v1.d[1]
41 ins v1.d[1],v0.d[0]
42 eor v0.16b,v1.16b,v18.16b
43
44 ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
45 pmull v0.1q,v0.1d,v19.1d
46 eor v18.16b,v18.16b,v2.16b
47 eor v22.16b,v0.16b,v18.16b
48
49 ext v17.16b,v22.16b,v22.16b,#8 //Karatsuba pre-processing
50 eor v17.16b,v17.16b,v22.16b
51 ext v21.16b,v16.16b,v17.16b,#8 //pack Karatsuba pre-processed
52 st1 {v21.2d,v22.2d},[x0] //store Htable[1..2]
53
54 ret
55
56.globl _gcm_gmult_v8
57.private_extern _gcm_gmult_v8
58
59.align 4
60_gcm_gmult_v8:
61 ld1 {v17.2d},[x0] //load Xi
62 movi v19.16b,#0xe1
63 ld1 {v20.2d,v21.2d},[x1] //load twisted H, ...
64 shl v19.2d,v19.2d,#57
65#ifndef __ARMEB__
66 rev64 v17.16b,v17.16b
67#endif
68 ext v3.16b,v17.16b,v17.16b,#8
69
70 pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
71 eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
72 pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
73 pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
74
75 ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
76 eor v18.16b,v0.16b,v2.16b
77 eor v1.16b,v1.16b,v17.16b
78 eor v1.16b,v1.16b,v18.16b
79 pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
80
81 ins v2.d[0],v1.d[1]
82 ins v1.d[1],v0.d[0]
83 eor v0.16b,v1.16b,v18.16b
84
85 ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
86 pmull v0.1q,v0.1d,v19.1d
87 eor v18.16b,v18.16b,v2.16b
88 eor v0.16b,v0.16b,v18.16b
89
90#ifndef __ARMEB__
91 rev64 v0.16b,v0.16b
92#endif
93 ext v0.16b,v0.16b,v0.16b,#8
94 st1 {v0.2d},[x0] //write out Xi
95
96 ret
97
98.globl _gcm_ghash_v8
99.private_extern _gcm_ghash_v8
100
101.align 4
102_gcm_ghash_v8:
103 ld1 {v0.2d},[x0] //load [rotated] Xi
104 //"[rotated]" means that
105 //loaded value would have
106 //to be rotated in order to
107 //make it appear as in
Robert Sloanc6ebb282018-04-30 10:10:26 -0700108 //algorithm specification
Robert Sloan8ff03552017-06-14 12:40:58 -0700109 subs x3,x3,#32 //see if x3 is 32 or larger
110 mov x12,#16 //x12 is used as post-
111 //increment for input pointer;
112 //as loop is modulo-scheduled
113 //x12 is zeroed just in time
Robert Sloanc6ebb282018-04-30 10:10:26 -0700114 //to preclude overstepping
Robert Sloan8ff03552017-06-14 12:40:58 -0700115 //inp[len], which means that
116 //last block[s] are actually
117 //loaded twice, but last
118 //copy is not processed
119 ld1 {v20.2d,v21.2d},[x1],#32 //load twisted H, ..., H^2
120 movi v19.16b,#0xe1
121 ld1 {v22.2d},[x1]
122 csel x12,xzr,x12,eq //is it time to zero x12?
123 ext v0.16b,v0.16b,v0.16b,#8 //rotate Xi
124 ld1 {v16.2d},[x2],#16 //load [rotated] I[0]
125 shl v19.2d,v19.2d,#57 //compose 0xc2.0 constant
126#ifndef __ARMEB__
127 rev64 v16.16b,v16.16b
128 rev64 v0.16b,v0.16b
129#endif
130 ext v3.16b,v16.16b,v16.16b,#8 //rotate I[0]
131 b.lo Lodd_tail_v8 //x3 was less than 32
132 ld1 {v17.2d},[x2],x12 //load [rotated] I[1]
133#ifndef __ARMEB__
134 rev64 v17.16b,v17.16b
135#endif
136 ext v7.16b,v17.16b,v17.16b,#8
137 eor v3.16b,v3.16b,v0.16b //I[i]^=Xi
138 pmull v4.1q,v20.1d,v7.1d //H·Ii+1
139 eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing
140 pmull2 v6.1q,v20.2d,v7.2d
141 b Loop_mod2x_v8
142
143.align 4
144Loop_mod2x_v8:
145 ext v18.16b,v3.16b,v3.16b,#8
146 subs x3,x3,#32 //is there more data?
147 pmull v0.1q,v22.1d,v3.1d //H^2.lo·Xi.lo
148 csel x12,xzr,x12,lo //is it time to zero x12?
149
150 pmull v5.1q,v21.1d,v17.1d
151 eor v18.16b,v18.16b,v3.16b //Karatsuba pre-processing
152 pmull2 v2.1q,v22.2d,v3.2d //H^2.hi·Xi.hi
153 eor v0.16b,v0.16b,v4.16b //accumulate
154 pmull2 v1.1q,v21.2d,v18.2d //(H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
155 ld1 {v16.2d},[x2],x12 //load [rotated] I[i+2]
156
157 eor v2.16b,v2.16b,v6.16b
158 csel x12,xzr,x12,eq //is it time to zero x12?
159 eor v1.16b,v1.16b,v5.16b
160
161 ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
162 eor v18.16b,v0.16b,v2.16b
163 eor v1.16b,v1.16b,v17.16b
164 ld1 {v17.2d},[x2],x12 //load [rotated] I[i+3]
165#ifndef __ARMEB__
166 rev64 v16.16b,v16.16b
167#endif
168 eor v1.16b,v1.16b,v18.16b
169 pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
170
171#ifndef __ARMEB__
172 rev64 v17.16b,v17.16b
173#endif
174 ins v2.d[0],v1.d[1]
175 ins v1.d[1],v0.d[0]
176 ext v7.16b,v17.16b,v17.16b,#8
177 ext v3.16b,v16.16b,v16.16b,#8
178 eor v0.16b,v1.16b,v18.16b
179 pmull v4.1q,v20.1d,v7.1d //H·Ii+1
180 eor v3.16b,v3.16b,v2.16b //accumulate v3.16b early
181
182 ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
183 pmull v0.1q,v0.1d,v19.1d
184 eor v3.16b,v3.16b,v18.16b
185 eor v17.16b,v17.16b,v7.16b //Karatsuba pre-processing
186 eor v3.16b,v3.16b,v0.16b
187 pmull2 v6.1q,v20.2d,v7.2d
188 b.hs Loop_mod2x_v8 //there was at least 32 more bytes
189
190 eor v2.16b,v2.16b,v18.16b
191 ext v3.16b,v16.16b,v16.16b,#8 //re-construct v3.16b
192 adds x3,x3,#32 //re-construct x3
193 eor v0.16b,v0.16b,v2.16b //re-construct v0.16b
194 b.eq Ldone_v8 //is x3 zero?
195Lodd_tail_v8:
196 ext v18.16b,v0.16b,v0.16b,#8
197 eor v3.16b,v3.16b,v0.16b //inp^=Xi
198 eor v17.16b,v16.16b,v18.16b //v17.16b is rotated inp^Xi
199
200 pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
201 eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
202 pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
203 pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
204
205 ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
206 eor v18.16b,v0.16b,v2.16b
207 eor v1.16b,v1.16b,v17.16b
208 eor v1.16b,v1.16b,v18.16b
209 pmull v18.1q,v0.1d,v19.1d //1st phase of reduction
210
211 ins v2.d[0],v1.d[1]
212 ins v1.d[1],v0.d[0]
213 eor v0.16b,v1.16b,v18.16b
214
215 ext v18.16b,v0.16b,v0.16b,#8 //2nd phase of reduction
216 pmull v0.1q,v0.1d,v19.1d
217 eor v18.16b,v18.16b,v2.16b
218 eor v0.16b,v0.16b,v18.16b
219
220Ldone_v8:
221#ifndef __ARMEB__
222 rev64 v0.16b,v0.16b
223#endif
224 ext v0.16b,v0.16b,v0.16b,#8
225 st1 {v0.2d},[x0] //write out Xi
226
227 ret
228
229.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
230.align 2
231.align 2