blob: cd12a7394523f7e62de680c21fdde3d6aaa6f62c [file] [log] [blame]
Adam Langleyd9e397b2015-01-22 14:27:53 -08001#!/usr/bin/env perl
2#
3# ====================================================================
4# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9#
10# SHA512 block transform for x86. September 2007.
11#
12# May 2013.
13#
14# Add SSSE3 code path, 20-25% improvement [over original SSE2 code].
15#
16# Performance in clock cycles per processed byte (less is better):
17#
18# gcc icc x86 asm SIMD(*) x86_64(**)
19# Pentium 100 97 61 - -
20# PIII 75 77 56 - -
21# P4 116 95 82 34.6 30.8
22# AMD K8 54 55 36 20.7 9.57
23# Core2 66 57 40 15.9 9.97
24# Westmere 70 - 38 12.2 9.58
25# Sandy Bridge 58 - 35 11.9 11.2
26# Ivy Bridge 50 - 33 11.5 8.17
27# Haswell 46 - 29 11.3 7.66
Robert Sloana94fe052017-02-21 08:49:28 -080028# Skylake 40 - 26 13.3 7.25
Adam Langleyd9e397b2015-01-22 14:27:53 -080029# Bulldozer 121 - 50 14.0 13.5
30# VIA Nano 91 - 52 33 14.7
31# Atom 126 - 68 48(***) 14.7
32# Silvermont 97 - 58 42(***) 17.5
Robert Sloana94fe052017-02-21 08:49:28 -080033# Goldmont 80 - 48 19.5 12.0
Adam Langleyd9e397b2015-01-22 14:27:53 -080034#
35# (*) whichever best applicable.
36# (**) x86_64 assembler performance is presented for reference
37# purposes, the results are for integer-only code.
38# (***) paddq is increadibly slow on Atom.
39#
40# IALU code-path is optimized for elder Pentiums. On vanilla Pentium
41# performance improvement over compiler generated code reaches ~60%,
Kenny Rootb8494592015-09-25 02:29:14 +000042# while on PIII - ~35%. On newer ยต-archs improvement varies from 15%
Adam Langleyd9e397b2015-01-22 14:27:53 -080043# to 50%, but it's less important as they are expected to execute SSE2
44# code-path, which is commonly ~2-3x faster [than compiler generated
45# code]. SSE2 code-path is as fast as original sha512-sse2.pl, even
46# though it does not use 128-bit operations. The latter means that
47# SSE2-aware kernel is no longer required to execute the code. Another
48# difference is that new code optimizes amount of writes, but at the
49# cost of increased data cache "footprint" by 1/2KB.
50
51$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
Robert Sloan572a4e22017-04-17 10:52:19 -070052push(@INC,"${dir}","${dir}../../../perlasm");
Adam Langleyd9e397b2015-01-22 14:27:53 -080053require "x86asm.pl";
54
David Benjaminc895d6b2016-08-11 13:26:41 -040055$output=pop;
56open STDOUT,">$output";
57
Robert Sloan8ff03552017-06-14 12:40:58 -070058&asm_init($ARGV[0],$ARGV[$#ARGV] eq "386");
Adam Langleyd9e397b2015-01-22 14:27:53 -080059
60$sse2=0;
61for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); }
62
63&external_label("OPENSSL_ia32cap_P") if ($sse2);
64
65$Tlo=&DWP(0,"esp"); $Thi=&DWP(4,"esp");
66$Alo=&DWP(8,"esp"); $Ahi=&DWP(8+4,"esp");
67$Blo=&DWP(16,"esp"); $Bhi=&DWP(16+4,"esp");
68$Clo=&DWP(24,"esp"); $Chi=&DWP(24+4,"esp");
69$Dlo=&DWP(32,"esp"); $Dhi=&DWP(32+4,"esp");
70$Elo=&DWP(40,"esp"); $Ehi=&DWP(40+4,"esp");
71$Flo=&DWP(48,"esp"); $Fhi=&DWP(48+4,"esp");
72$Glo=&DWP(56,"esp"); $Ghi=&DWP(56+4,"esp");
73$Hlo=&DWP(64,"esp"); $Hhi=&DWP(64+4,"esp");
74$K512="ebp";
75
76$Asse2=&QWP(0,"esp");
77$Bsse2=&QWP(8,"esp");
78$Csse2=&QWP(16,"esp");
79$Dsse2=&QWP(24,"esp");
80$Esse2=&QWP(32,"esp");
81$Fsse2=&QWP(40,"esp");
82$Gsse2=&QWP(48,"esp");
83$Hsse2=&QWP(56,"esp");
84
85$A="mm0"; # B-D and
86$E="mm4"; # F-H are commonly loaded to respectively mm1-mm3 and
87 # mm5-mm7, but it's done on on-demand basis...
88$BxC="mm2"; # ... except for B^C
89
90sub BODY_00_15_sse2 {
91 my $phase=shift;
92
93 #&movq ("mm5",$Fsse2); # load f
94 #&movq ("mm6",$Gsse2); # load g
95
96 &movq ("mm1",$E); # %mm1 is sliding right
97 &pxor ("mm5","mm6"); # f^=g
98 &psrlq ("mm1",14);
99 &movq ($Esse2,$E); # modulo-scheduled save e
100 &pand ("mm5",$E); # f&=e
101 &psllq ($E,23); # $E is sliding left
102 &movq ($A,"mm3") if ($phase<2);
103 &movq (&QWP(8*9,"esp"),"mm7") # save X[i]
104 &movq ("mm3","mm1"); # %mm3 is T1
105 &psrlq ("mm1",4);
106 &pxor ("mm5","mm6"); # Ch(e,f,g)
107 &pxor ("mm3",$E);
108 &psllq ($E,23);
109 &pxor ("mm3","mm1");
110 &movq ($Asse2,$A); # modulo-scheduled save a
111 &paddq ("mm7","mm5"); # X[i]+=Ch(e,f,g)
112 &pxor ("mm3",$E);
113 &psrlq ("mm1",23);
114 &paddq ("mm7",$Hsse2); # X[i]+=h
115 &pxor ("mm3","mm1");
116 &psllq ($E,4);
117 &paddq ("mm7",QWP(0,$K512)); # X[i]+=K512[i]
118 &pxor ("mm3",$E); # T1=Sigma1_512(e)
119
120 &movq ($E,$Dsse2); # e = load d, e in next round
121 &paddq ("mm3","mm7"); # T1+=X[i]
122 &movq ("mm5",$A); # %mm5 is sliding right
123 &psrlq ("mm5",28);
124 &paddq ($E,"mm3"); # d += T1
125 &movq ("mm6",$A); # %mm6 is sliding left
126 &movq ("mm7","mm5");
127 &psllq ("mm6",25);
128 &movq ("mm1",$Bsse2); # load b
129 &psrlq ("mm5",6);
130 &pxor ("mm7","mm6");
131 &sub ("esp",8);
132 &psllq ("mm6",5);
133 &pxor ("mm7","mm5");
134 &pxor ($A,"mm1"); # a^b, b^c in next round
135 &psrlq ("mm5",5);
136 &pxor ("mm7","mm6");
137 &pand ($BxC,$A); # (b^c)&(a^b)
138 &psllq ("mm6",6);
139 &pxor ("mm7","mm5");
140 &pxor ($BxC,"mm1"); # [h=]Maj(a,b,c)
141 &pxor ("mm6","mm7"); # Sigma0_512(a)
142 &movq ("mm7",&QWP(8*(9+16-1),"esp")) if ($phase!=0); # pre-fetch
143 &movq ("mm5",$Fsse2) if ($phase==0); # load f
144
145 if ($phase>1) {
146 &paddq ($BxC,"mm6"); # h+=Sigma0(a)
147 &add ($K512,8);
148 #&paddq ($BxC,"mm3"); # h+=T1
149
150 ($A,$BxC) = ($BxC,$A); # rotate registers
151 } else {
152 &paddq ("mm3",$BxC); # T1+=Maj(a,b,c)
153 &movq ($BxC,$A);
154 &add ($K512,8);
155 &paddq ("mm3","mm6"); # T1+=Sigma0(a)
156 &movq ("mm6",$Gsse2) if ($phase==0); # load g
157 #&movq ($A,"mm3"); # h=T1
158 }
159}
160
161sub BODY_00_15_x86 {
162 #define Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
163 # LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
164 # HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
165 &mov ("ecx",$Elo);
166 &mov ("edx",$Ehi);
167 &mov ("esi","ecx");
168
169 &shr ("ecx",9); # lo>>9
170 &mov ("edi","edx");
171 &shr ("edx",9); # hi>>9
172 &mov ("ebx","ecx");
173 &shl ("esi",14); # lo<<14
174 &mov ("eax","edx");
175 &shl ("edi",14); # hi<<14
176 &xor ("ebx","esi");
177
178 &shr ("ecx",14-9); # lo>>14
179 &xor ("eax","edi");
180 &shr ("edx",14-9); # hi>>14
181 &xor ("eax","ecx");
182 &shl ("esi",18-14); # lo<<18
183 &xor ("ebx","edx");
184 &shl ("edi",18-14); # hi<<18
185 &xor ("ebx","esi");
186
187 &shr ("ecx",18-14); # lo>>18
188 &xor ("eax","edi");
189 &shr ("edx",18-14); # hi>>18
190 &xor ("eax","ecx");
191 &shl ("esi",23-18); # lo<<23
192 &xor ("ebx","edx");
193 &shl ("edi",23-18); # hi<<23
194 &xor ("eax","esi");
195 &xor ("ebx","edi"); # T1 = Sigma1(e)
196
197 &mov ("ecx",$Flo);
198 &mov ("edx",$Fhi);
199 &mov ("esi",$Glo);
200 &mov ("edi",$Ghi);
201 &add ("eax",$Hlo);
202 &adc ("ebx",$Hhi); # T1 += h
203 &xor ("ecx","esi");
204 &xor ("edx","edi");
205 &and ("ecx",$Elo);
206 &and ("edx",$Ehi);
207 &add ("eax",&DWP(8*(9+15)+0,"esp"));
208 &adc ("ebx",&DWP(8*(9+15)+4,"esp")); # T1 += X[0]
209 &xor ("ecx","esi");
210 &xor ("edx","edi"); # Ch(e,f,g) = (f^g)&e)^g
211
212 &mov ("esi",&DWP(0,$K512));
213 &mov ("edi",&DWP(4,$K512)); # K[i]
214 &add ("eax","ecx");
215 &adc ("ebx","edx"); # T1 += Ch(e,f,g)
216 &mov ("ecx",$Dlo);
217 &mov ("edx",$Dhi);
218 &add ("eax","esi");
219 &adc ("ebx","edi"); # T1 += K[i]
220 &mov ($Tlo,"eax");
221 &mov ($Thi,"ebx"); # put T1 away
222 &add ("eax","ecx");
223 &adc ("ebx","edx"); # d += T1
224
225 #define Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
226 # LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
227 # HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
228 &mov ("ecx",$Alo);
229 &mov ("edx",$Ahi);
230 &mov ($Dlo,"eax");
231 &mov ($Dhi,"ebx");
232 &mov ("esi","ecx");
233
234 &shr ("ecx",2); # lo>>2
235 &mov ("edi","edx");
236 &shr ("edx",2); # hi>>2
237 &mov ("ebx","ecx");
238 &shl ("esi",4); # lo<<4
239 &mov ("eax","edx");
240 &shl ("edi",4); # hi<<4
241 &xor ("ebx","esi");
242
243 &shr ("ecx",7-2); # lo>>7
244 &xor ("eax","edi");
245 &shr ("edx",7-2); # hi>>7
246 &xor ("ebx","ecx");
247 &shl ("esi",25-4); # lo<<25
248 &xor ("eax","edx");
249 &shl ("edi",25-4); # hi<<25
250 &xor ("eax","esi");
251
252 &shr ("ecx",28-7); # lo>>28
253 &xor ("ebx","edi");
254 &shr ("edx",28-7); # hi>>28
255 &xor ("eax","ecx");
256 &shl ("esi",30-25); # lo<<30
257 &xor ("ebx","edx");
258 &shl ("edi",30-25); # hi<<30
259 &xor ("eax","esi");
260 &xor ("ebx","edi"); # Sigma0(a)
261
262 &mov ("ecx",$Alo);
263 &mov ("edx",$Ahi);
264 &mov ("esi",$Blo);
265 &mov ("edi",$Bhi);
266 &add ("eax",$Tlo);
267 &adc ("ebx",$Thi); # T1 = Sigma0(a)+T1
268 &or ("ecx","esi");
269 &or ("edx","edi");
270 &and ("ecx",$Clo);
271 &and ("edx",$Chi);
272 &and ("esi",$Alo);
273 &and ("edi",$Ahi);
274 &or ("ecx","esi");
275 &or ("edx","edi"); # Maj(a,b,c) = ((a|b)&c)|(a&b)
276
277 &add ("eax","ecx");
278 &adc ("ebx","edx"); # T1 += Maj(a,b,c)
279 &mov ($Tlo,"eax");
280 &mov ($Thi,"ebx");
281
282 &mov (&LB("edx"),&BP(0,$K512)); # pre-fetch LSB of *K
283 &sub ("esp",8);
284 &lea ($K512,&DWP(8,$K512)); # K++
285}
286
287
288&function_begin("sha512_block_data_order");
289 &mov ("esi",wparam(0)); # ctx
290 &mov ("edi",wparam(1)); # inp
291 &mov ("eax",wparam(2)); # num
292 &mov ("ebx","esp"); # saved sp
293
294 &call (&label("pic_point")); # make it PIC!
295&set_label("pic_point");
296 &blindpop($K512);
297 &lea ($K512,&DWP(&label("K512")."-".&label("pic_point"),$K512));
298
299 &sub ("esp",16);
300 &and ("esp",-64);
301
302 &shl ("eax",7);
303 &add ("eax","edi");
304 &mov (&DWP(0,"esp"),"esi"); # ctx
305 &mov (&DWP(4,"esp"),"edi"); # inp
306 &mov (&DWP(8,"esp"),"eax"); # inp+num*128
307 &mov (&DWP(12,"esp"),"ebx"); # saved sp
308
309if ($sse2) {
310 &picmeup("edx","OPENSSL_ia32cap_P",$K512,&label("K512"));
311 &mov ("ecx",&DWP(0,"edx"));
312 &test ("ecx",1<<26);
313 &jz (&label("loop_x86"));
314
315 &mov ("edx",&DWP(4,"edx"));
316
317 # load ctx->h[0-7]
318 &movq ($A,&QWP(0,"esi"));
319 &and ("ecx",1<<24); # XMM registers availability
320 &movq ("mm1",&QWP(8,"esi"));
321 &and ("edx",1<<9); # SSSE3 bit
322 &movq ($BxC,&QWP(16,"esi"));
323 &or ("ecx","edx");
324 &movq ("mm3",&QWP(24,"esi"));
325 &movq ($E,&QWP(32,"esi"));
326 &movq ("mm5",&QWP(40,"esi"));
327 &movq ("mm6",&QWP(48,"esi"));
328 &movq ("mm7",&QWP(56,"esi"));
329 &cmp ("ecx",1<<24|1<<9);
330 &je (&label("SSSE3"));
331 &sub ("esp",8*10);
332 &jmp (&label("loop_sse2"));
333
334&set_label("loop_sse2",16);
335 #&movq ($Asse2,$A);
336 &movq ($Bsse2,"mm1");
337 &movq ($Csse2,$BxC);
338 &movq ($Dsse2,"mm3");
339 #&movq ($Esse2,$E);
340 &movq ($Fsse2,"mm5");
341 &movq ($Gsse2,"mm6");
342 &pxor ($BxC,"mm1"); # magic
343 &movq ($Hsse2,"mm7");
344 &movq ("mm3",$A); # magic
345
346 &mov ("eax",&DWP(0,"edi"));
347 &mov ("ebx",&DWP(4,"edi"));
348 &add ("edi",8);
349 &mov ("edx",15); # counter
350 &bswap ("eax");
351 &bswap ("ebx");
352 &jmp (&label("00_14_sse2"));
353
354&set_label("00_14_sse2",16);
355 &movd ("mm1","eax");
356 &mov ("eax",&DWP(0,"edi"));
357 &movd ("mm7","ebx");
358 &mov ("ebx",&DWP(4,"edi"));
359 &add ("edi",8);
360 &bswap ("eax");
361 &bswap ("ebx");
362 &punpckldq("mm7","mm1");
363
364 &BODY_00_15_sse2();
365
366 &dec ("edx");
367 &jnz (&label("00_14_sse2"));
368
369 &movd ("mm1","eax");
370 &movd ("mm7","ebx");
371 &punpckldq("mm7","mm1");
372
373 &BODY_00_15_sse2(1);
374
375 &pxor ($A,$A); # A is in %mm3
376 &mov ("edx",32); # counter
377 &jmp (&label("16_79_sse2"));
378
379&set_label("16_79_sse2",16);
380 for ($j=0;$j<2;$j++) { # 2x unroll
Robert Sloana94fe052017-02-21 08:49:28 -0800381 #&movq ("mm7",&QWP(8*(9+16-1),"esp")); # prefetched in BODY_00_15
Adam Langleyd9e397b2015-01-22 14:27:53 -0800382 &movq ("mm5",&QWP(8*(9+16-14),"esp"));
383 &movq ("mm1","mm7");
384 &psrlq ("mm7",1);
385 &movq ("mm6","mm5");
386 &psrlq ("mm5",6);
387 &psllq ("mm1",56);
388 &paddq ($A,"mm3"); # from BODY_00_15
389 &movq ("mm3","mm7");
390 &psrlq ("mm7",7-1);
391 &pxor ("mm3","mm1");
392 &psllq ("mm1",63-56);
393 &pxor ("mm3","mm7");
394 &psrlq ("mm7",8-7);
395 &pxor ("mm3","mm1");
396 &movq ("mm1","mm5");
397 &psrlq ("mm5",19-6);
398 &pxor ("mm7","mm3"); # sigma0
399
400 &psllq ("mm6",3);
401 &pxor ("mm1","mm5");
402 &paddq ("mm7",&QWP(8*(9+16),"esp"));
403 &pxor ("mm1","mm6");
404 &psrlq ("mm5",61-19);
405 &paddq ("mm7",&QWP(8*(9+16-9),"esp"));
406 &pxor ("mm1","mm5");
407 &psllq ("mm6",45-3);
408 &movq ("mm5",$Fsse2); # load f
409 &pxor ("mm1","mm6"); # sigma1
410 &movq ("mm6",$Gsse2); # load g
411
412 &paddq ("mm7","mm1"); # X[i]
413 #&movq (&QWP(8*9,"esp"),"mm7"); # moved to BODY_00_15
414
415 &BODY_00_15_sse2(2);
416 }
417 &dec ("edx");
418 &jnz (&label("16_79_sse2"));
419
420 #&movq ($A,$Asse2);
421 &paddq ($A,"mm3"); # from BODY_00_15
422 &movq ("mm1",$Bsse2);
423 #&movq ($BxC,$Csse2);
424 &movq ("mm3",$Dsse2);
425 #&movq ($E,$Esse2);
426 &movq ("mm5",$Fsse2);
427 &movq ("mm6",$Gsse2);
428 &movq ("mm7",$Hsse2);
429
430 &pxor ($BxC,"mm1"); # de-magic
431 &paddq ($A,&QWP(0,"esi"));
432 &paddq ("mm1",&QWP(8,"esi"));
433 &paddq ($BxC,&QWP(16,"esi"));
434 &paddq ("mm3",&QWP(24,"esi"));
435 &paddq ($E,&QWP(32,"esi"));
436 &paddq ("mm5",&QWP(40,"esi"));
437 &paddq ("mm6",&QWP(48,"esi"));
438 &paddq ("mm7",&QWP(56,"esi"));
439
440 &mov ("eax",8*80);
441 &movq (&QWP(0,"esi"),$A);
442 &movq (&QWP(8,"esi"),"mm1");
443 &movq (&QWP(16,"esi"),$BxC);
444 &movq (&QWP(24,"esi"),"mm3");
445 &movq (&QWP(32,"esi"),$E);
446 &movq (&QWP(40,"esi"),"mm5");
447 &movq (&QWP(48,"esi"),"mm6");
448 &movq (&QWP(56,"esi"),"mm7");
449
450 &lea ("esp",&DWP(0,"esp","eax")); # destroy frame
451 &sub ($K512,"eax"); # rewind K
452
453 &cmp ("edi",&DWP(8*10+8,"esp")); # are we done yet?
454 &jb (&label("loop_sse2"));
455
456 &mov ("esp",&DWP(8*10+12,"esp")); # restore sp
457 &emms ();
458&function_end_A();
459
460&set_label("SSSE3",32);
461{ my ($cnt,$frame)=("ecx","edx");
462 my @X=map("xmm$_",(0..7));
463 my $j;
464 my $i=0;
465
466 &lea ($frame,&DWP(-64,"esp"));
467 &sub ("esp",256);
468
469 # fixed stack frame layout
470 #
471 # +0 A B C D E F G H # backing store
472 # +64 X[0]+K[i] .. X[15]+K[i] # XMM->MM xfer area
473 # +192 # XMM off-load ring buffer
474 # +256 # saved parameters
475
476 &movdqa (@X[1],&QWP(80*8,$K512)); # byte swap mask
477 &movdqu (@X[0],&QWP(0,"edi"));
478 &pshufb (@X[0],@X[1]);
479 for ($j=0;$j<8;$j++) {
480 &movdqa (&QWP(16*(($j-1)%4),$frame),@X[3]) if ($j>4); # off-load
481 &movdqa (@X[3],&QWP(16*($j%8),$K512));
482 &movdqa (@X[2],@X[1]) if ($j<7); # perpetuate byte swap mask
483 &movdqu (@X[1],&QWP(16*($j+1),"edi")) if ($j<7); # next input
484 &movdqa (@X[1],&QWP(16*(($j+1)%4),$frame)) if ($j==7);# restore @X[0]
485 &paddq (@X[3],@X[0]);
486 &pshufb (@X[1],@X[2]) if ($j<7);
487 &movdqa (&QWP(16*($j%8)-128,$frame),@X[3]); # xfer X[i]+K[i]
488
489 push(@X,shift(@X)); # rotate(@X)
490 }
491 #&jmp (&label("loop_ssse3"));
492 &nop ();
493
494&set_label("loop_ssse3",32);
495 &movdqa (@X[2],&QWP(16*(($j+1)%4),$frame)); # pre-restore @X[1]
496 &movdqa (&QWP(16*(($j-1)%4),$frame),@X[3]); # off-load @X[3]
497 &lea ($K512,&DWP(16*8,$K512));
498
499 #&movq ($Asse2,$A); # off-load A-H
500 &movq ($Bsse2,"mm1");
501 &mov ("ebx","edi");
502 &movq ($Csse2,$BxC);
503 &lea ("edi",&DWP(128,"edi")); # advance input
504 &movq ($Dsse2,"mm3");
505 &cmp ("edi","eax");
506 #&movq ($Esse2,$E);
507 &movq ($Fsse2,"mm5");
508 &cmovb ("ebx","edi");
509 &movq ($Gsse2,"mm6");
510 &mov ("ecx",4); # loop counter
511 &pxor ($BxC,"mm1"); # magic
512 &movq ($Hsse2,"mm7");
513 &pxor ("mm3","mm3"); # magic
514
515 &jmp (&label("00_47_ssse3"));
516
517sub BODY_00_15_ssse3 { # "phase-less" copy of BODY_00_15_sse2
518 (
519 '&movq ("mm1",$E)', # %mm1 is sliding right
520 '&movq ("mm7",&QWP(((-8*$i)%128)-128,$frame))',# X[i]+K[i]
521 '&pxor ("mm5","mm6")', # f^=g
522 '&psrlq ("mm1",14)',
523 '&movq (&QWP(8*($i+4)%64,"esp"),$E)', # modulo-scheduled save e
524 '&pand ("mm5",$E)', # f&=e
525 '&psllq ($E,23)', # $E is sliding left
526 '&paddq ($A,"mm3")', # [h+=Maj(a,b,c)]
527 '&movq ("mm3","mm1")', # %mm3 is T1
528 '&psrlq("mm1",4)',
529 '&pxor ("mm5","mm6")', # Ch(e,f,g)
530 '&pxor ("mm3",$E)',
531 '&psllq($E,23)',
532 '&pxor ("mm3","mm1")',
533 '&movq (&QWP(8*$i%64,"esp"),$A)', # modulo-scheduled save a
534 '&paddq("mm7","mm5")', # X[i]+=Ch(e,f,g)
535 '&pxor ("mm3",$E)',
536 '&psrlq("mm1",23)',
537 '&paddq("mm7",&QWP(8*($i+7)%64,"esp"))', # X[i]+=h
538 '&pxor ("mm3","mm1")',
539 '&psllq($E,4)',
540 '&pxor ("mm3",$E)', # T1=Sigma1_512(e)
541
542 '&movq ($E,&QWP(8*($i+3)%64,"esp"))', # e = load d, e in next round
543 '&paddq ("mm3","mm7")', # T1+=X[i]
544 '&movq ("mm5",$A)', # %mm5 is sliding right
545 '&psrlq("mm5",28)',
546 '&paddq ($E,"mm3")', # d += T1
547 '&movq ("mm6",$A)', # %mm6 is sliding left
548 '&movq ("mm7","mm5")',
549 '&psllq("mm6",25)',
550 '&movq ("mm1",&QWP(8*($i+1)%64,"esp"))', # load b
551 '&psrlq("mm5",6)',
552 '&pxor ("mm7","mm6")',
553 '&psllq("mm6",5)',
554 '&pxor ("mm7","mm5")',
555 '&pxor ($A,"mm1")', # a^b, b^c in next round
556 '&psrlq("mm5",5)',
557 '&pxor ("mm7","mm6")',
558 '&pand ($BxC,$A)', # (b^c)&(a^b)
559 '&psllq("mm6",6)',
560 '&pxor ("mm7","mm5")',
561 '&pxor ($BxC,"mm1")', # [h=]Maj(a,b,c)
562 '&pxor ("mm6","mm7")', # Sigma0_512(a)
563 '&movq ("mm5",&QWP(8*($i+5-1)%64,"esp"))', # pre-load f
564 '&paddq ($BxC,"mm6")', # h+=Sigma0(a)
565 '&movq ("mm6",&QWP(8*($i+6-1)%64,"esp"))', # pre-load g
566
567 '($A,$BxC) = ($BxC,$A); $i--;'
568 );
569}
570
571&set_label("00_47_ssse3",32);
572
573 for(;$j<16;$j++) {
574 my ($t0,$t2,$t1)=@X[2..4];
575 my @insns = (&BODY_00_15_ssse3(),&BODY_00_15_ssse3());
576
577 &movdqa ($t2,@X[5]);
578 &movdqa (@X[1],$t0); # restore @X[1]
579 &palignr ($t0,@X[0],8); # X[1..2]
580 &movdqa (&QWP(16*($j%4),$frame),@X[4]); # off-load @X[4]
581 &palignr ($t2,@X[4],8); # X[9..10]
582
583 &movdqa ($t1,$t0);
584 &psrlq ($t0,7);
585 &paddq (@X[0],$t2); # X[0..1] += X[9..10]
586 &movdqa ($t2,$t1);
587 &psrlq ($t1,1);
588 &psllq ($t2,64-8);
589 &pxor ($t0,$t1);
590 &psrlq ($t1,8-1);
591 &pxor ($t0,$t2);
592 &psllq ($t2,8-1);
593 &pxor ($t0,$t1);
594 &movdqa ($t1,@X[7]);
595 &pxor ($t0,$t2); # sigma0(X[1..2])
596 &movdqa ($t2,@X[7]);
597 &psrlq ($t1,6);
598 &paddq (@X[0],$t0); # X[0..1] += sigma0(X[1..2])
599
600 &movdqa ($t0,@X[7]);
601 &psrlq ($t2,19);
602 &psllq ($t0,64-61);
603 &pxor ($t1,$t2);
604 &psrlq ($t2,61-19);
605 &pxor ($t1,$t0);
606 &psllq ($t0,61-19);
607 &pxor ($t1,$t2);
608 &movdqa ($t2,&QWP(16*(($j+2)%4),$frame));# pre-restore @X[1]
609 &pxor ($t1,$t0); # sigma0(X[1..2])
610 &movdqa ($t0,&QWP(16*($j%8),$K512));
611 eval(shift(@insns));
612 &paddq (@X[0],$t1); # X[0..1] += sigma0(X[14..15])
613 eval(shift(@insns));
614 eval(shift(@insns));
615 eval(shift(@insns));
616 eval(shift(@insns));
617 &paddq ($t0,@X[0]);
618 foreach(@insns) { eval; }
619 &movdqa (&QWP(16*($j%8)-128,$frame),$t0);# xfer X[i]+K[i]
620
621 push(@X,shift(@X)); # rotate(@X)
622 }
623 &lea ($K512,&DWP(16*8,$K512));
624 &dec ("ecx");
625 &jnz (&label("00_47_ssse3"));
626
627 &movdqa (@X[1],&QWP(0,$K512)); # byte swap mask
628 &lea ($K512,&DWP(-80*8,$K512)); # rewind
629 &movdqu (@X[0],&QWP(0,"ebx"));
630 &pshufb (@X[0],@X[1]);
631
632 for ($j=0;$j<8;$j++) { # load next or same block
633 my @insns = (&BODY_00_15_ssse3(),&BODY_00_15_ssse3());
634
635 &movdqa (&QWP(16*(($j-1)%4),$frame),@X[3]) if ($j>4); # off-load
636 &movdqa (@X[3],&QWP(16*($j%8),$K512));
637 &movdqa (@X[2],@X[1]) if ($j<7); # perpetuate byte swap mask
638 &movdqu (@X[1],&QWP(16*($j+1),"ebx")) if ($j<7); # next input
639 &movdqa (@X[1],&QWP(16*(($j+1)%4),$frame)) if ($j==7);# restore @X[0]
640 &paddq (@X[3],@X[0]);
641 &pshufb (@X[1],@X[2]) if ($j<7);
642 foreach(@insns) { eval; }
643 &movdqa (&QWP(16*($j%8)-128,$frame),@X[3]);# xfer X[i]+K[i]
644
645 push(@X,shift(@X)); # rotate(@X)
646 }
647
648 #&movq ($A,$Asse2); # load A-H
649 &movq ("mm1",$Bsse2);
650 &paddq ($A,"mm3"); # from BODY_00_15
651 #&movq ($BxC,$Csse2);
652 &movq ("mm3",$Dsse2);
653 #&movq ($E,$Esse2);
654 #&movq ("mm5",$Fsse2);
655 #&movq ("mm6",$Gsse2);
656 &movq ("mm7",$Hsse2);
657
658 &pxor ($BxC,"mm1"); # de-magic
659 &paddq ($A,&QWP(0,"esi"));
660 &paddq ("mm1",&QWP(8,"esi"));
661 &paddq ($BxC,&QWP(16,"esi"));
662 &paddq ("mm3",&QWP(24,"esi"));
663 &paddq ($E,&QWP(32,"esi"));
664 &paddq ("mm5",&QWP(40,"esi"));
665 &paddq ("mm6",&QWP(48,"esi"));
666 &paddq ("mm7",&QWP(56,"esi"));
667
668 &movq (&QWP(0,"esi"),$A);
669 &movq (&QWP(8,"esi"),"mm1");
670 &movq (&QWP(16,"esi"),$BxC);
671 &movq (&QWP(24,"esi"),"mm3");
672 &movq (&QWP(32,"esi"),$E);
673 &movq (&QWP(40,"esi"),"mm5");
674 &movq (&QWP(48,"esi"),"mm6");
675 &movq (&QWP(56,"esi"),"mm7");
676
677 &cmp ("edi","eax") # are we done yet?
678 &jb (&label("loop_ssse3"));
679
680 &mov ("esp",&DWP(64+12,$frame)); # restore sp
681 &emms ();
682}
683&function_end_A();
684}
685&set_label("loop_x86",16);
686 # copy input block to stack reversing byte and qword order
687 for ($i=0;$i<8;$i++) {
688 &mov ("eax",&DWP($i*16+0,"edi"));
689 &mov ("ebx",&DWP($i*16+4,"edi"));
690 &mov ("ecx",&DWP($i*16+8,"edi"));
691 &mov ("edx",&DWP($i*16+12,"edi"));
692 &bswap ("eax");
693 &bswap ("ebx");
694 &bswap ("ecx");
695 &bswap ("edx");
696 &push ("eax");
697 &push ("ebx");
698 &push ("ecx");
699 &push ("edx");
700 }
701 &add ("edi",128);
702 &sub ("esp",9*8); # place for T,A,B,C,D,E,F,G,H
703 &mov (&DWP(8*(9+16)+4,"esp"),"edi");
704
705 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
706 &lea ("edi",&DWP(8,"esp"));
707 &mov ("ecx",16);
708 &data_word(0xA5F3F689); # rep movsd
709
710&set_label("00_15_x86",16);
711 &BODY_00_15_x86();
712
713 &cmp (&LB("edx"),0x94);
714 &jne (&label("00_15_x86"));
715
716&set_label("16_79_x86",16);
717 #define sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7))
718 # LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
719 # HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7
720 &mov ("ecx",&DWP(8*(9+15+16-1)+0,"esp"));
721 &mov ("edx",&DWP(8*(9+15+16-1)+4,"esp"));
722 &mov ("esi","ecx");
723
724 &shr ("ecx",1); # lo>>1
725 &mov ("edi","edx");
726 &shr ("edx",1); # hi>>1
727 &mov ("eax","ecx");
728 &shl ("esi",24); # lo<<24
729 &mov ("ebx","edx");
730 &shl ("edi",24); # hi<<24
731 &xor ("ebx","esi");
732
733 &shr ("ecx",7-1); # lo>>7
734 &xor ("eax","edi");
735 &shr ("edx",7-1); # hi>>7
736 &xor ("eax","ecx");
737 &shl ("esi",31-24); # lo<<31
738 &xor ("ebx","edx");
739 &shl ("edi",25-24); # hi<<25
740 &xor ("ebx","esi");
741
742 &shr ("ecx",8-7); # lo>>8
743 &xor ("eax","edi");
744 &shr ("edx",8-7); # hi>>8
745 &xor ("eax","ecx");
746 &shl ("edi",31-25); # hi<<31
747 &xor ("ebx","edx");
748 &xor ("eax","edi"); # T1 = sigma0(X[-15])
749
750 &mov (&DWP(0,"esp"),"eax");
751 &mov (&DWP(4,"esp"),"ebx"); # put T1 away
752
753 #define sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
754 # LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
755 # HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
756 &mov ("ecx",&DWP(8*(9+15+16-14)+0,"esp"));
757 &mov ("edx",&DWP(8*(9+15+16-14)+4,"esp"));
758 &mov ("esi","ecx");
759
760 &shr ("ecx",6); # lo>>6
761 &mov ("edi","edx");
762 &shr ("edx",6); # hi>>6
763 &mov ("eax","ecx");
764 &shl ("esi",3); # lo<<3
765 &mov ("ebx","edx");
766 &shl ("edi",3); # hi<<3
767 &xor ("eax","esi");
768
769 &shr ("ecx",19-6); # lo>>19
770 &xor ("ebx","edi");
771 &shr ("edx",19-6); # hi>>19
772 &xor ("eax","ecx");
773 &shl ("esi",13-3); # lo<<13
774 &xor ("ebx","edx");
775 &shl ("edi",13-3); # hi<<13
776 &xor ("ebx","esi");
777
778 &shr ("ecx",29-19); # lo>>29
779 &xor ("eax","edi");
780 &shr ("edx",29-19); # hi>>29
781 &xor ("ebx","ecx");
782 &shl ("edi",26-13); # hi<<26
783 &xor ("eax","edx");
784 &xor ("eax","edi"); # sigma1(X[-2])
785
786 &mov ("ecx",&DWP(8*(9+15+16)+0,"esp"));
787 &mov ("edx",&DWP(8*(9+15+16)+4,"esp"));
788 &add ("eax",&DWP(0,"esp"));
789 &adc ("ebx",&DWP(4,"esp")); # T1 = sigma1(X[-2])+T1
790 &mov ("esi",&DWP(8*(9+15+16-9)+0,"esp"));
791 &mov ("edi",&DWP(8*(9+15+16-9)+4,"esp"));
792 &add ("eax","ecx");
793 &adc ("ebx","edx"); # T1 += X[-16]
794 &add ("eax","esi");
795 &adc ("ebx","edi"); # T1 += X[-7]
796 &mov (&DWP(8*(9+15)+0,"esp"),"eax");
797 &mov (&DWP(8*(9+15)+4,"esp"),"ebx"); # save X[0]
798
799 &BODY_00_15_x86();
800
801 &cmp (&LB("edx"),0x17);
802 &jne (&label("16_79_x86"));
803
804 &mov ("esi",&DWP(8*(9+16+80)+0,"esp"));# ctx
805 &mov ("edi",&DWP(8*(9+16+80)+4,"esp"));# inp
806 for($i=0;$i<4;$i++) {
807 &mov ("eax",&DWP($i*16+0,"esi"));
808 &mov ("ebx",&DWP($i*16+4,"esi"));
809 &mov ("ecx",&DWP($i*16+8,"esi"));
810 &mov ("edx",&DWP($i*16+12,"esi"));
811 &add ("eax",&DWP(8+($i*16)+0,"esp"));
812 &adc ("ebx",&DWP(8+($i*16)+4,"esp"));
813 &mov (&DWP($i*16+0,"esi"),"eax");
814 &mov (&DWP($i*16+4,"esi"),"ebx");
815 &add ("ecx",&DWP(8+($i*16)+8,"esp"));
816 &adc ("edx",&DWP(8+($i*16)+12,"esp"));
817 &mov (&DWP($i*16+8,"esi"),"ecx");
818 &mov (&DWP($i*16+12,"esi"),"edx");
819 }
820 &add ("esp",8*(9+16+80)); # destroy frame
821 &sub ($K512,8*80); # rewind K
822
823 &cmp ("edi",&DWP(8,"esp")); # are we done yet?
824 &jb (&label("loop_x86"));
825
826 &mov ("esp",&DWP(12,"esp")); # restore sp
827&function_end_A();
828
829&set_label("K512",64); # Yes! I keep it in the code segment!
830 &data_word(0xd728ae22,0x428a2f98); # u64
831 &data_word(0x23ef65cd,0x71374491); # u64
832 &data_word(0xec4d3b2f,0xb5c0fbcf); # u64
833 &data_word(0x8189dbbc,0xe9b5dba5); # u64
834 &data_word(0xf348b538,0x3956c25b); # u64
835 &data_word(0xb605d019,0x59f111f1); # u64
836 &data_word(0xaf194f9b,0x923f82a4); # u64
837 &data_word(0xda6d8118,0xab1c5ed5); # u64
838 &data_word(0xa3030242,0xd807aa98); # u64
839 &data_word(0x45706fbe,0x12835b01); # u64
840 &data_word(0x4ee4b28c,0x243185be); # u64
841 &data_word(0xd5ffb4e2,0x550c7dc3); # u64
842 &data_word(0xf27b896f,0x72be5d74); # u64
843 &data_word(0x3b1696b1,0x80deb1fe); # u64
844 &data_word(0x25c71235,0x9bdc06a7); # u64
845 &data_word(0xcf692694,0xc19bf174); # u64
846 &data_word(0x9ef14ad2,0xe49b69c1); # u64
847 &data_word(0x384f25e3,0xefbe4786); # u64
848 &data_word(0x8b8cd5b5,0x0fc19dc6); # u64
849 &data_word(0x77ac9c65,0x240ca1cc); # u64
850 &data_word(0x592b0275,0x2de92c6f); # u64
851 &data_word(0x6ea6e483,0x4a7484aa); # u64
852 &data_word(0xbd41fbd4,0x5cb0a9dc); # u64
853 &data_word(0x831153b5,0x76f988da); # u64
854 &data_word(0xee66dfab,0x983e5152); # u64
855 &data_word(0x2db43210,0xa831c66d); # u64
856 &data_word(0x98fb213f,0xb00327c8); # u64
857 &data_word(0xbeef0ee4,0xbf597fc7); # u64
858 &data_word(0x3da88fc2,0xc6e00bf3); # u64
859 &data_word(0x930aa725,0xd5a79147); # u64
860 &data_word(0xe003826f,0x06ca6351); # u64
861 &data_word(0x0a0e6e70,0x14292967); # u64
862 &data_word(0x46d22ffc,0x27b70a85); # u64
863 &data_word(0x5c26c926,0x2e1b2138); # u64
864 &data_word(0x5ac42aed,0x4d2c6dfc); # u64
865 &data_word(0x9d95b3df,0x53380d13); # u64
866 &data_word(0x8baf63de,0x650a7354); # u64
867 &data_word(0x3c77b2a8,0x766a0abb); # u64
868 &data_word(0x47edaee6,0x81c2c92e); # u64
869 &data_word(0x1482353b,0x92722c85); # u64
870 &data_word(0x4cf10364,0xa2bfe8a1); # u64
871 &data_word(0xbc423001,0xa81a664b); # u64
872 &data_word(0xd0f89791,0xc24b8b70); # u64
873 &data_word(0x0654be30,0xc76c51a3); # u64
874 &data_word(0xd6ef5218,0xd192e819); # u64
875 &data_word(0x5565a910,0xd6990624); # u64
876 &data_word(0x5771202a,0xf40e3585); # u64
877 &data_word(0x32bbd1b8,0x106aa070); # u64
878 &data_word(0xb8d2d0c8,0x19a4c116); # u64
879 &data_word(0x5141ab53,0x1e376c08); # u64
880 &data_word(0xdf8eeb99,0x2748774c); # u64
881 &data_word(0xe19b48a8,0x34b0bcb5); # u64
882 &data_word(0xc5c95a63,0x391c0cb3); # u64
883 &data_word(0xe3418acb,0x4ed8aa4a); # u64
884 &data_word(0x7763e373,0x5b9cca4f); # u64
885 &data_word(0xd6b2b8a3,0x682e6ff3); # u64
886 &data_word(0x5defb2fc,0x748f82ee); # u64
887 &data_word(0x43172f60,0x78a5636f); # u64
888 &data_word(0xa1f0ab72,0x84c87814); # u64
889 &data_word(0x1a6439ec,0x8cc70208); # u64
890 &data_word(0x23631e28,0x90befffa); # u64
891 &data_word(0xde82bde9,0xa4506ceb); # u64
892 &data_word(0xb2c67915,0xbef9a3f7); # u64
893 &data_word(0xe372532b,0xc67178f2); # u64
894 &data_word(0xea26619c,0xca273ece); # u64
895 &data_word(0x21c0c207,0xd186b8c7); # u64
896 &data_word(0xcde0eb1e,0xeada7dd6); # u64
897 &data_word(0xee6ed178,0xf57d4f7f); # u64
898 &data_word(0x72176fba,0x06f067aa); # u64
899 &data_word(0xa2c898a6,0x0a637dc5); # u64
900 &data_word(0xbef90dae,0x113f9804); # u64
901 &data_word(0x131c471b,0x1b710b35); # u64
902 &data_word(0x23047d84,0x28db77f5); # u64
903 &data_word(0x40c72493,0x32caab7b); # u64
904 &data_word(0x15c9bebc,0x3c9ebe0a); # u64
905 &data_word(0x9c100d4c,0x431d67c4); # u64
906 &data_word(0xcb3e42b6,0x4cc5d4be); # u64
907 &data_word(0xfc657e2a,0x597f299c); # u64
908 &data_word(0x3ad6faec,0x5fcb6fab); # u64
909 &data_word(0x4a475817,0x6c44198c); # u64
910
911 &data_word(0x04050607,0x00010203); # byte swap
912 &data_word(0x0c0d0e0f,0x08090a0b); # mask
913&function_end_B("sha512_block_data_order");
914&asciz("SHA512 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");
915
916&asm_finish();
David Benjaminc895d6b2016-08-11 13:26:41 -0400917
918close STDOUT;