blob: 5c4122fe81ef61c291a8009e7ce8e1b15547be34 [file] [log] [blame]
Robert Sloanfe7cd212017-08-07 09:03:39 -07001#! /usr/bin/env perl
2# Copyright 2010-2016 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the OpenSSL license (the "License"). You may not use
5# this file except in compliance with the License. You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
Adam Langleyd9e397b2015-01-22 14:27:53 -08009#
10# ====================================================================
11# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15# ====================================================================
16#
17# March, June 2010
18#
19# The module implements "4-bit" GCM GHASH function and underlying
20# single multiplication operation in GF(2^128). "4-bit" means that
21# it uses 256 bytes per-key table [+128 bytes shared table]. GHASH
22# function features so called "528B" variant utilizing additional
23# 256+16 bytes of per-key storage [+512 bytes shared table].
24# Performance results are for this streamed GHASH subroutine and are
25# expressed in cycles per processed byte, less is better:
26#
27# gcc 3.4.x(*) assembler
28#
29# P4 28.6 14.0 +100%
30# Opteron 19.3 7.7 +150%
31# Core2 17.8 8.1(**) +120%
32# Atom 31.6 16.8 +88%
33# VIA Nano 21.8 10.1 +115%
34#
35# (*) comparison is not completely fair, because C results are
36# for vanilla "256B" implementation, while assembler results
37# are for "528B";-)
38# (**) it's mystery [to me] why Core2 result is not same as for
39# Opteron;
40
41# May 2010
42#
43# Add PCLMULQDQ version performing at 2.02 cycles per processed byte.
44# See ghash-x86.pl for background information and details about coding
45# techniques.
46#
Adam Vartanianbfcf3a72018-08-10 14:55:24 +010047# Special thanks to David Woodhouse for providing access to a
48# Westmere-based system on behalf of Intel Open Source Technology Centre.
Adam Langleyd9e397b2015-01-22 14:27:53 -080049
50# December 2012
51#
52# Overhaul: aggregate Karatsuba post-processing, improve ILP in
53# reduction_alg9, increase reduction aggregate factor to 4x. As for
54# the latter. ghash-x86.pl discusses that it makes lesser sense to
55# increase aggregate factor. Then why increase here? Critical path
56# consists of 3 independent pclmulqdq instructions, Karatsuba post-
57# processing and reduction. "On top" of this we lay down aggregated
58# multiplication operations, triplets of independent pclmulqdq's. As
59# issue rate for pclmulqdq is limited, it makes lesser sense to
60# aggregate more multiplications than it takes to perform remaining
61# non-multiplication operations. 2x is near-optimal coefficient for
62# contemporary Intel CPUs (therefore modest improvement coefficient),
63# but not for Bulldozer. Latter is because logical SIMD operations
64# are twice as slow in comparison to Intel, so that critical path is
65# longer. A CPU with higher pclmulqdq issue rate would also benefit
66# from higher aggregate factor...
67#
68# Westmere 1.78(+13%)
69# Sandy Bridge 1.80(+8%)
70# Ivy Bridge 1.80(+7%)
71# Haswell 0.55(+93%) (if system doesn't support AVX)
72# Broadwell 0.45(+110%)(if system doesn't support AVX)
Robert Sloana94fe052017-02-21 08:49:28 -080073# Skylake 0.44(+110%)(if system doesn't support AVX)
Adam Langleyd9e397b2015-01-22 14:27:53 -080074# Bulldozer 1.49(+27%)
75# Silvermont 2.88(+13%)
Robert Sloanfe7cd212017-08-07 09:03:39 -070076# Knights L 2.12(-) (if system doesn't support AVX)
Robert Sloana94fe052017-02-21 08:49:28 -080077# Goldmont 1.08(+24%)
Adam Langleyd9e397b2015-01-22 14:27:53 -080078
79# March 2013
80#
81# ... 8x aggregate factor AVX code path is using reduction algorithm
82# suggested by Shay Gueron[1]. Even though contemporary AVX-capable
83# CPUs such as Sandy and Ivy Bridge can execute it, the code performs
84# sub-optimally in comparison to above mentioned version. But thanks
85# to Ilya Albrekht and Max Locktyukhin of Intel Corp. we knew that
Robert Sloana94fe052017-02-21 08:49:28 -080086# it performs in 0.41 cycles per byte on Haswell processor, in
87# 0.29 on Broadwell, and in 0.36 on Skylake.
Adam Langleyd9e397b2015-01-22 14:27:53 -080088#
Robert Sloanfe7cd212017-08-07 09:03:39 -070089# Knights Landing achieves 1.09 cpb.
90#
Adam Langleyd9e397b2015-01-22 14:27:53 -080091# [1] http://rt.openssl.org/Ticket/Display.html?id=2900&user=guest&pass=guest
92
93$flavour = shift;
94$output = shift;
95if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
96
97$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
98
99$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
100( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
Robert Sloan9254e682017-04-24 09:42:06 -0700101( $xlate="${dir}../../../perlasm/x86_64-xlate.pl" and -f $xlate) or
Adam Langleyd9e397b2015-01-22 14:27:53 -0800102die "can't locate x86_64-xlate.pl";
103
Steven Valdezb0b45c62017-01-17 16:23:54 -0500104# See the notes about |$avx| in aesni-gcm-x86_64.pl; otherwise tags will be
105# computed incorrectly.
David Benjamin4969cc92016-04-22 15:02:23 -0400106#
Kenny Roote99801b2015-11-06 15:31:15 -0800107# In upstream, this is controlled by shelling out to the compiler to check
108# versions, but BoringSSL is intended to be used with pre-generated perlasm
109# output, so this isn't useful anyway.
Steven Valdezb0b45c62017-01-17 16:23:54 -0500110$avx = 1;
Adam Langleyd9e397b2015-01-22 14:27:53 -0800111
David Benjaminc895d6b2016-08-11 13:26:41 -0400112open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
Adam Langleyd9e397b2015-01-22 14:27:53 -0800113*STDOUT=*OUT;
114
115$do4xaggr=1;
116
117# common register layout
118$nlo="%rax";
119$nhi="%rbx";
120$Zlo="%r8";
121$Zhi="%r9";
122$tmp="%r10";
123$rem_4bit = "%r11";
124
125$Xi="%rdi";
126$Htbl="%rsi";
127
128# per-function register layout
129$cnt="%rcx";
130$rem="%rdx";
131
132sub LB() { my $r=shift; $r =~ s/%[er]([a-d])x/%\1l/ or
133 $r =~ s/%[er]([sd]i)/%\1l/ or
134 $r =~ s/%[er](bp)/%\1l/ or
135 $r =~ s/%(r[0-9]+)[d]?/%\1b/; $r; }
136
137sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm
138{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
139 my $arg = pop;
140 $arg = "\$$arg" if ($arg*1 eq $arg);
141 $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
142}
143
144{ my $N;
145 sub loop() {
146 my $inp = shift;
147
148 $N++;
149$code.=<<___;
150 xor $nlo,$nlo
151 xor $nhi,$nhi
152 mov `&LB("$Zlo")`,`&LB("$nlo")`
153 mov `&LB("$Zlo")`,`&LB("$nhi")`
154 shl \$4,`&LB("$nlo")`
155 mov \$14,$cnt
156 mov 8($Htbl,$nlo),$Zlo
157 mov ($Htbl,$nlo),$Zhi
158 and \$0xf0,`&LB("$nhi")`
159 mov $Zlo,$rem
160 jmp .Loop$N
161
162.align 16
163.Loop$N:
164 shr \$4,$Zlo
165 and \$0xf,$rem
166 mov $Zhi,$tmp
167 mov ($inp,$cnt),`&LB("$nlo")`
168 shr \$4,$Zhi
169 xor 8($Htbl,$nhi),$Zlo
170 shl \$60,$tmp
171 xor ($Htbl,$nhi),$Zhi
172 mov `&LB("$nlo")`,`&LB("$nhi")`
173 xor ($rem_4bit,$rem,8),$Zhi
174 mov $Zlo,$rem
175 shl \$4,`&LB("$nlo")`
176 xor $tmp,$Zlo
177 dec $cnt
178 js .Lbreak$N
179
180 shr \$4,$Zlo
181 and \$0xf,$rem
182 mov $Zhi,$tmp
183 shr \$4,$Zhi
184 xor 8($Htbl,$nlo),$Zlo
185 shl \$60,$tmp
186 xor ($Htbl,$nlo),$Zhi
187 and \$0xf0,`&LB("$nhi")`
188 xor ($rem_4bit,$rem,8),$Zhi
189 mov $Zlo,$rem
190 xor $tmp,$Zlo
191 jmp .Loop$N
192
193.align 16
194.Lbreak$N:
195 shr \$4,$Zlo
196 and \$0xf,$rem
197 mov $Zhi,$tmp
198 shr \$4,$Zhi
199 xor 8($Htbl,$nlo),$Zlo
200 shl \$60,$tmp
201 xor ($Htbl,$nlo),$Zhi
202 and \$0xf0,`&LB("$nhi")`
203 xor ($rem_4bit,$rem,8),$Zhi
204 mov $Zlo,$rem
205 xor $tmp,$Zlo
206
207 shr \$4,$Zlo
208 and \$0xf,$rem
209 mov $Zhi,$tmp
210 shr \$4,$Zhi
211 xor 8($Htbl,$nhi),$Zlo
212 shl \$60,$tmp
213 xor ($Htbl,$nhi),$Zhi
214 xor $tmp,$Zlo
215 xor ($rem_4bit,$rem,8),$Zhi
216
217 bswap $Zlo
218 bswap $Zhi
219___
220}}
221
222$code=<<___;
223.text
Robert Sloan2424d842017-05-01 07:46:28 -0700224.extern OPENSSL_ia32cap_P
Adam Langleyd9e397b2015-01-22 14:27:53 -0800225
226.globl gcm_gmult_4bit
227.type gcm_gmult_4bit,\@function,2
228.align 16
229gcm_gmult_4bit:
Adam Vartanianbfcf3a72018-08-10 14:55:24 +0100230.cfi_startproc
Adam Langleyd9e397b2015-01-22 14:27:53 -0800231 push %rbx
Adam Vartanianbfcf3a72018-08-10 14:55:24 +0100232.cfi_push %rbx
Robert Sloana94fe052017-02-21 08:49:28 -0800233 push %rbp # %rbp and others are pushed exclusively in
Adam Vartanianbfcf3a72018-08-10 14:55:24 +0100234.cfi_push %rbp
Adam Langleyd9e397b2015-01-22 14:27:53 -0800235 push %r12 # order to reuse Win64 exception handler...
Adam Vartanianbfcf3a72018-08-10 14:55:24 +0100236.cfi_push %r12
Robert Sloana94fe052017-02-21 08:49:28 -0800237 push %r13
Adam Vartanianbfcf3a72018-08-10 14:55:24 +0100238.cfi_push %r13
Robert Sloana94fe052017-02-21 08:49:28 -0800239 push %r14
Adam Vartanianbfcf3a72018-08-10 14:55:24 +0100240.cfi_push %r14
Robert Sloana94fe052017-02-21 08:49:28 -0800241 push %r15
Adam Vartanianbfcf3a72018-08-10 14:55:24 +0100242.cfi_push %r15
Robert Sloana94fe052017-02-21 08:49:28 -0800243 sub \$280,%rsp
Adam Vartanianbfcf3a72018-08-10 14:55:24 +0100244.cfi_adjust_cfa_offset 280
Adam Langleyd9e397b2015-01-22 14:27:53 -0800245.Lgmult_prologue:
246
247 movzb 15($Xi),$Zlo
248 lea .Lrem_4bit(%rip),$rem_4bit
249___
250 &loop ($Xi);
251$code.=<<___;
252 mov $Zlo,8($Xi)
253 mov $Zhi,($Xi)
254
Robert Sloana94fe052017-02-21 08:49:28 -0800255 lea 280+48(%rsp),%rsi
Adam Vartanianbfcf3a72018-08-10 14:55:24 +0100256.cfi_def_cfa %rsi,8
Robert Sloana94fe052017-02-21 08:49:28 -0800257 mov -8(%rsi),%rbx
Adam Vartanianbfcf3a72018-08-10 14:55:24 +0100258.cfi_restore %rbx
Robert Sloana94fe052017-02-21 08:49:28 -0800259 lea (%rsi),%rsp
Adam Vartanianbfcf3a72018-08-10 14:55:24 +0100260.cfi_def_cfa_register %rsp
Adam Langleyd9e397b2015-01-22 14:27:53 -0800261.Lgmult_epilogue:
262 ret
Adam Vartanianbfcf3a72018-08-10 14:55:24 +0100263.cfi_endproc
Adam Langleyd9e397b2015-01-22 14:27:53 -0800264.size gcm_gmult_4bit,.-gcm_gmult_4bit
265___
266
267# per-function register layout
268$inp="%rdx";
269$len="%rcx";
270$rem_8bit=$rem_4bit;
271
272$code.=<<___;
273.globl gcm_ghash_4bit
274.type gcm_ghash_4bit,\@function,4
275.align 16
276gcm_ghash_4bit:
Adam Vartanianbfcf3a72018-08-10 14:55:24 +0100277.cfi_startproc
Adam Langleyd9e397b2015-01-22 14:27:53 -0800278 push %rbx
Adam Vartanianbfcf3a72018-08-10 14:55:24 +0100279.cfi_push %rbx
Adam Langleyd9e397b2015-01-22 14:27:53 -0800280 push %rbp
Adam Vartanianbfcf3a72018-08-10 14:55:24 +0100281.cfi_push %rbp
Adam Langleyd9e397b2015-01-22 14:27:53 -0800282 push %r12
Adam Vartanianbfcf3a72018-08-10 14:55:24 +0100283.cfi_push %r12
Adam Langleyd9e397b2015-01-22 14:27:53 -0800284 push %r13
Adam Vartanianbfcf3a72018-08-10 14:55:24 +0100285.cfi_push %r13
Adam Langleyd9e397b2015-01-22 14:27:53 -0800286 push %r14
Adam Vartanianbfcf3a72018-08-10 14:55:24 +0100287.cfi_push %r14
Adam Langleyd9e397b2015-01-22 14:27:53 -0800288 push %r15
Adam Vartanianbfcf3a72018-08-10 14:55:24 +0100289.cfi_push %r15
Adam Langleyd9e397b2015-01-22 14:27:53 -0800290 sub \$280,%rsp
Adam Vartanianbfcf3a72018-08-10 14:55:24 +0100291.cfi_adjust_cfa_offset 280
Adam Langleyd9e397b2015-01-22 14:27:53 -0800292.Lghash_prologue:
293 mov $inp,%r14 # reassign couple of args
294 mov $len,%r15
295___
296{ my $inp="%r14";
297 my $dat="%edx";
298 my $len="%r15";
299 my @nhi=("%ebx","%ecx");
300 my @rem=("%r12","%r13");
301 my $Hshr4="%rbp";
302
303 &sub ($Htbl,-128); # size optimization
304 &lea ($Hshr4,"16+128(%rsp)");
305 { my @lo =($nlo,$nhi);
306 my @hi =($Zlo,$Zhi);
307
308 &xor ($dat,$dat);
309 for ($i=0,$j=-2;$i<18;$i++,$j++) {
310 &mov ("$j(%rsp)",&LB($dat)) if ($i>1);
311 &or ($lo[0],$tmp) if ($i>1);
312 &mov (&LB($dat),&LB($lo[1])) if ($i>0 && $i<17);
313 &shr ($lo[1],4) if ($i>0 && $i<17);
314 &mov ($tmp,$hi[1]) if ($i>0 && $i<17);
315 &shr ($hi[1],4) if ($i>0 && $i<17);
316 &mov ("8*$j($Hshr4)",$hi[0]) if ($i>1);
317 &mov ($hi[0],"16*$i+0-128($Htbl)") if ($i<16);
318 &shl (&LB($dat),4) if ($i>0 && $i<17);
319 &mov ("8*$j-128($Hshr4)",$lo[0]) if ($i>1);
320 &mov ($lo[0],"16*$i+8-128($Htbl)") if ($i<16);
321 &shl ($tmp,60) if ($i>0 && $i<17);
322
323 push (@lo,shift(@lo));
324 push (@hi,shift(@hi));
325 }
326 }
327 &add ($Htbl,-128);
328 &mov ($Zlo,"8($Xi)");
329 &mov ($Zhi,"0($Xi)");
330 &add ($len,$inp); # pointer to the end of data
331 &lea ($rem_8bit,".Lrem_8bit(%rip)");
332 &jmp (".Louter_loop");
333
334$code.=".align 16\n.Louter_loop:\n";
335 &xor ($Zhi,"($inp)");
336 &mov ("%rdx","8($inp)");
337 &lea ($inp,"16($inp)");
338 &xor ("%rdx",$Zlo);
339 &mov ("($Xi)",$Zhi);
340 &mov ("8($Xi)","%rdx");
341 &shr ("%rdx",32);
342
343 &xor ($nlo,$nlo);
344 &rol ($dat,8);
345 &mov (&LB($nlo),&LB($dat));
346 &movz ($nhi[0],&LB($dat));
347 &shl (&LB($nlo),4);
348 &shr ($nhi[0],4);
349
350 for ($j=11,$i=0;$i<15;$i++) {
351 &rol ($dat,8);
352 &xor ($Zlo,"8($Htbl,$nlo)") if ($i>0);
353 &xor ($Zhi,"($Htbl,$nlo)") if ($i>0);
354 &mov ($Zlo,"8($Htbl,$nlo)") if ($i==0);
355 &mov ($Zhi,"($Htbl,$nlo)") if ($i==0);
356
357 &mov (&LB($nlo),&LB($dat));
358 &xor ($Zlo,$tmp) if ($i>0);
359 &movzw ($rem[1],"($rem_8bit,$rem[1],2)") if ($i>0);
360
361 &movz ($nhi[1],&LB($dat));
362 &shl (&LB($nlo),4);
363 &movzb ($rem[0],"(%rsp,$nhi[0])");
364
365 &shr ($nhi[1],4) if ($i<14);
366 &and ($nhi[1],0xf0) if ($i==14);
367 &shl ($rem[1],48) if ($i>0);
368 &xor ($rem[0],$Zlo);
369
370 &mov ($tmp,$Zhi);
371 &xor ($Zhi,$rem[1]) if ($i>0);
372 &shr ($Zlo,8);
373
374 &movz ($rem[0],&LB($rem[0]));
375 &mov ($dat,"$j($Xi)") if (--$j%4==0);
376 &shr ($Zhi,8);
377
378 &xor ($Zlo,"-128($Hshr4,$nhi[0],8)");
379 &shl ($tmp,56);
380 &xor ($Zhi,"($Hshr4,$nhi[0],8)");
381
382 unshift (@nhi,pop(@nhi)); # "rotate" registers
383 unshift (@rem,pop(@rem));
384 }
385 &movzw ($rem[1],"($rem_8bit,$rem[1],2)");
386 &xor ($Zlo,"8($Htbl,$nlo)");
387 &xor ($Zhi,"($Htbl,$nlo)");
388
389 &shl ($rem[1],48);
390 &xor ($Zlo,$tmp);
391
392 &xor ($Zhi,$rem[1]);
393 &movz ($rem[0],&LB($Zlo));
394 &shr ($Zlo,4);
395
396 &mov ($tmp,$Zhi);
397 &shl (&LB($rem[0]),4);
398 &shr ($Zhi,4);
399
400 &xor ($Zlo,"8($Htbl,$nhi[0])");
401 &movzw ($rem[0],"($rem_8bit,$rem[0],2)");
402 &shl ($tmp,60);
403
404 &xor ($Zhi,"($Htbl,$nhi[0])");
405 &xor ($Zlo,$tmp);
406 &shl ($rem[0],48);
407
408 &bswap ($Zlo);
409 &xor ($Zhi,$rem[0]);
410
411 &bswap ($Zhi);
412 &cmp ($inp,$len);
413 &jb (".Louter_loop");
414}
415$code.=<<___;
416 mov $Zlo,8($Xi)
417 mov $Zhi,($Xi)
418
Robert Sloana94fe052017-02-21 08:49:28 -0800419 lea 280+48(%rsp),%rsi
Adam Vartanianbfcf3a72018-08-10 14:55:24 +0100420.cfi_def_cfa %rsi,8
Robert Sloana94fe052017-02-21 08:49:28 -0800421 mov -48(%rsi),%r15
Adam Vartanianbfcf3a72018-08-10 14:55:24 +0100422.cfi_restore %r15
Robert Sloana94fe052017-02-21 08:49:28 -0800423 mov -40(%rsi),%r14
Adam Vartanianbfcf3a72018-08-10 14:55:24 +0100424.cfi_restore %r14
Robert Sloana94fe052017-02-21 08:49:28 -0800425 mov -32(%rsi),%r13
Adam Vartanianbfcf3a72018-08-10 14:55:24 +0100426.cfi_restore %r13
Robert Sloana94fe052017-02-21 08:49:28 -0800427 mov -24(%rsi),%r12
Adam Vartanianbfcf3a72018-08-10 14:55:24 +0100428.cfi_restore %r12
Robert Sloana94fe052017-02-21 08:49:28 -0800429 mov -16(%rsi),%rbp
Adam Vartanianbfcf3a72018-08-10 14:55:24 +0100430.cfi_restore %rbp
Robert Sloana94fe052017-02-21 08:49:28 -0800431 mov -8(%rsi),%rbx
Adam Vartanianbfcf3a72018-08-10 14:55:24 +0100432.cfi_restore %rbx
Robert Sloana94fe052017-02-21 08:49:28 -0800433 lea 0(%rsi),%rsp
Adam Vartanianbfcf3a72018-08-10 14:55:24 +0100434.cfi_def_cfa_register %rsp
Adam Langleyd9e397b2015-01-22 14:27:53 -0800435.Lghash_epilogue:
436 ret
Adam Vartanianbfcf3a72018-08-10 14:55:24 +0100437.cfi_endproc
Adam Langleyd9e397b2015-01-22 14:27:53 -0800438.size gcm_ghash_4bit,.-gcm_ghash_4bit
439___
440
441######################################################################
442# PCLMULQDQ version.
443
444@_4args=$win64? ("%rcx","%rdx","%r8", "%r9") : # Win64 order
445 ("%rdi","%rsi","%rdx","%rcx"); # Unix order
446
447($Xi,$Xhi)=("%xmm0","%xmm1"); $Hkey="%xmm2";
448($T1,$T2,$T3)=("%xmm3","%xmm4","%xmm5");
449
450sub clmul64x64_T2 { # minimal register pressure
451my ($Xhi,$Xi,$Hkey,$HK)=@_;
452
453if (!defined($HK)) { $HK = $T2;
454$code.=<<___;
455 movdqa $Xi,$Xhi #
456 pshufd \$0b01001110,$Xi,$T1
457 pshufd \$0b01001110,$Hkey,$T2
458 pxor $Xi,$T1 #
459 pxor $Hkey,$T2
460___
461} else {
462$code.=<<___;
463 movdqa $Xi,$Xhi #
464 pshufd \$0b01001110,$Xi,$T1
465 pxor $Xi,$T1 #
466___
467}
468$code.=<<___;
469 pclmulqdq \$0x00,$Hkey,$Xi #######
470 pclmulqdq \$0x11,$Hkey,$Xhi #######
471 pclmulqdq \$0x00,$HK,$T1 #######
472 pxor $Xi,$T1 #
473 pxor $Xhi,$T1 #
474
475 movdqa $T1,$T2 #
476 psrldq \$8,$T1
477 pslldq \$8,$T2 #
478 pxor $T1,$Xhi
479 pxor $T2,$Xi #
480___
481}
482
483sub reduction_alg9 { # 17/11 times faster than Intel version
484my ($Xhi,$Xi) = @_;
485
486$code.=<<___;
487 # 1st phase
488 movdqa $Xi,$T2 #
489 movdqa $Xi,$T1
490 psllq \$5,$Xi
491 pxor $Xi,$T1 #
492 psllq \$1,$Xi
493 pxor $T1,$Xi #
494 psllq \$57,$Xi #
495 movdqa $Xi,$T1 #
496 pslldq \$8,$Xi
Robert Sloana94fe052017-02-21 08:49:28 -0800497 psrldq \$8,$T1 #
Adam Langleyd9e397b2015-01-22 14:27:53 -0800498 pxor $T2,$Xi
499 pxor $T1,$Xhi #
500
501 # 2nd phase
502 movdqa $Xi,$T2
503 psrlq \$1,$Xi
504 pxor $T2,$Xhi #
505 pxor $Xi,$T2
506 psrlq \$5,$Xi
507 pxor $T2,$Xi #
508 psrlq \$1,$Xi #
509 pxor $Xhi,$Xi #
510___
511}
512
513{ my ($Htbl,$Xip)=@_4args;
514 my $HK="%xmm6";
515
516$code.=<<___;
517.globl gcm_init_clmul
518.type gcm_init_clmul,\@abi-omnipotent
519.align 16
520gcm_init_clmul:
Robert Sloan4c22c5f2019-03-01 15:53:37 -0800521.cfi_startproc
Adam Langleyd9e397b2015-01-22 14:27:53 -0800522.L_init_clmul:
523___
524$code.=<<___ if ($win64);
525.LSEH_begin_gcm_init_clmul:
526 # I can't trust assembler to use specific encoding:-(
527 .byte 0x48,0x83,0xec,0x18 #sub $0x18,%rsp
528 .byte 0x0f,0x29,0x34,0x24 #movaps %xmm6,(%rsp)
529___
530$code.=<<___;
531 movdqu ($Xip),$Hkey
532 pshufd \$0b01001110,$Hkey,$Hkey # dword swap
533
534 # <<1 twist
535 pshufd \$0b11111111,$Hkey,$T2 # broadcast uppermost dword
536 movdqa $Hkey,$T1
537 psllq \$1,$Hkey
538 pxor $T3,$T3 #
539 psrlq \$63,$T1
540 pcmpgtd $T2,$T3 # broadcast carry bit
541 pslldq \$8,$T1
542 por $T1,$Hkey # H<<=1
543
544 # magic reduction
545 pand .L0x1c2_polynomial(%rip),$T3
546 pxor $T3,$Hkey # if(carry) H^=0x1c2_polynomial
547
548 # calculate H^2
549 pshufd \$0b01001110,$Hkey,$HK
550 movdqa $Hkey,$Xi
551 pxor $Hkey,$HK
552___
553 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$HK);
554 &reduction_alg9 ($Xhi,$Xi);
555$code.=<<___;
556 pshufd \$0b01001110,$Hkey,$T1
557 pshufd \$0b01001110,$Xi,$T2
558 pxor $Hkey,$T1 # Karatsuba pre-processing
559 movdqu $Hkey,0x00($Htbl) # save H
560 pxor $Xi,$T2 # Karatsuba pre-processing
561 movdqu $Xi,0x10($Htbl) # save H^2
562 palignr \$8,$T1,$T2 # low part is H.lo^H.hi...
563 movdqu $T2,0x20($Htbl) # save Karatsuba "salt"
564___
565if ($do4xaggr) {
566 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$HK); # H^3
567 &reduction_alg9 ($Xhi,$Xi);
568$code.=<<___;
569 movdqa $Xi,$T3
570___
571 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$HK); # H^4
572 &reduction_alg9 ($Xhi,$Xi);
573$code.=<<___;
574 pshufd \$0b01001110,$T3,$T1
575 pshufd \$0b01001110,$Xi,$T2
576 pxor $T3,$T1 # Karatsuba pre-processing
577 movdqu $T3,0x30($Htbl) # save H^3
578 pxor $Xi,$T2 # Karatsuba pre-processing
579 movdqu $Xi,0x40($Htbl) # save H^4
580 palignr \$8,$T1,$T2 # low part is H^3.lo^H^3.hi...
581 movdqu $T2,0x50($Htbl) # save Karatsuba "salt"
582___
583}
584$code.=<<___ if ($win64);
585 movaps (%rsp),%xmm6
586 lea 0x18(%rsp),%rsp
587.LSEH_end_gcm_init_clmul:
588___
589$code.=<<___;
590 ret
Robert Sloan4c22c5f2019-03-01 15:53:37 -0800591.cfi_endproc
Adam Langleyd9e397b2015-01-22 14:27:53 -0800592.size gcm_init_clmul,.-gcm_init_clmul
593___
594}
595
596{ my ($Xip,$Htbl)=@_4args;
597
598$code.=<<___;
599.globl gcm_gmult_clmul
600.type gcm_gmult_clmul,\@abi-omnipotent
601.align 16
602gcm_gmult_clmul:
Robert Sloan4c22c5f2019-03-01 15:53:37 -0800603.cfi_startproc
Adam Langleyd9e397b2015-01-22 14:27:53 -0800604.L_gmult_clmul:
605 movdqu ($Xip),$Xi
606 movdqa .Lbswap_mask(%rip),$T3
607 movdqu ($Htbl),$Hkey
608 movdqu 0x20($Htbl),$T2
609 pshufb $T3,$Xi
610___
611 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$T2);
612$code.=<<___ if (0 || (&reduction_alg9($Xhi,$Xi)&&0));
613 # experimental alternative. special thing about is that there
Robert Sloana94fe052017-02-21 08:49:28 -0800614 # no dependency between the two multiplications...
Adam Langleyd9e397b2015-01-22 14:27:53 -0800615 mov \$`0xE1<<1`,%eax
Kenny Rootb8494592015-09-25 02:29:14 +0000616 mov \$0xA040608020C0E000,%r10 # ((7..0)·0xE0)&0xff
Adam Langleyd9e397b2015-01-22 14:27:53 -0800617 mov \$0x07,%r11d
618 movq %rax,$T1
619 movq %r10,$T2
620 movq %r11,$T3 # borrow $T3
621 pand $Xi,$T3
Kenny Rootb8494592015-09-25 02:29:14 +0000622 pshufb $T3,$T2 # ($Xi&7)·0xE0
Adam Langleyd9e397b2015-01-22 14:27:53 -0800623 movq %rax,$T3
Kenny Rootb8494592015-09-25 02:29:14 +0000624 pclmulqdq \$0x00,$Xi,$T1 # ·(0xE1<<1)
Adam Langleyd9e397b2015-01-22 14:27:53 -0800625 pxor $Xi,$T2
626 pslldq \$15,$T2
627 paddd $T2,$T2 # <<(64+56+1)
628 pxor $T2,$Xi
629 pclmulqdq \$0x01,$T3,$Xi
630 movdqa .Lbswap_mask(%rip),$T3 # reload $T3
631 psrldq \$1,$T1
632 pxor $T1,$Xhi
633 pslldq \$7,$Xi
634 pxor $Xhi,$Xi
635___
636$code.=<<___;
637 pshufb $T3,$Xi
638 movdqu $Xi,($Xip)
639 ret
Robert Sloan4c22c5f2019-03-01 15:53:37 -0800640.cfi_endproc
Adam Langleyd9e397b2015-01-22 14:27:53 -0800641.size gcm_gmult_clmul,.-gcm_gmult_clmul
642___
643}
644
645{ my ($Xip,$Htbl,$inp,$len)=@_4args;
646 my ($Xln,$Xmn,$Xhn,$Hkey2,$HK) = map("%xmm$_",(3..7));
647 my ($T1,$T2,$T3)=map("%xmm$_",(8..10));
648
649$code.=<<___;
650.globl gcm_ghash_clmul
651.type gcm_ghash_clmul,\@abi-omnipotent
652.align 32
653gcm_ghash_clmul:
Robert Sloan4c22c5f2019-03-01 15:53:37 -0800654.cfi_startproc
Adam Langleyd9e397b2015-01-22 14:27:53 -0800655.L_ghash_clmul:
656___
657$code.=<<___ if ($win64);
658 lea -0x88(%rsp),%rax
659.LSEH_begin_gcm_ghash_clmul:
660 # I can't trust assembler to use specific encoding:-(
661 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax),%rsp
662 .byte 0x0f,0x29,0x70,0xe0 #movaps %xmm6,-0x20(%rax)
663 .byte 0x0f,0x29,0x78,0xf0 #movaps %xmm7,-0x10(%rax)
664 .byte 0x44,0x0f,0x29,0x00 #movaps %xmm8,0(%rax)
665 .byte 0x44,0x0f,0x29,0x48,0x10 #movaps %xmm9,0x10(%rax)
666 .byte 0x44,0x0f,0x29,0x50,0x20 #movaps %xmm10,0x20(%rax)
667 .byte 0x44,0x0f,0x29,0x58,0x30 #movaps %xmm11,0x30(%rax)
668 .byte 0x44,0x0f,0x29,0x60,0x40 #movaps %xmm12,0x40(%rax)
669 .byte 0x44,0x0f,0x29,0x68,0x50 #movaps %xmm13,0x50(%rax)
670 .byte 0x44,0x0f,0x29,0x70,0x60 #movaps %xmm14,0x60(%rax)
671 .byte 0x44,0x0f,0x29,0x78,0x70 #movaps %xmm15,0x70(%rax)
672___
673$code.=<<___;
674 movdqa .Lbswap_mask(%rip),$T3
675
676 movdqu ($Xip),$Xi
677 movdqu ($Htbl),$Hkey
678 movdqu 0x20($Htbl),$HK
679 pshufb $T3,$Xi
680
681 sub \$0x10,$len
682 jz .Lodd_tail
683
684 movdqu 0x10($Htbl),$Hkey2
685___
686if ($do4xaggr) {
687my ($Xl,$Xm,$Xh,$Hkey3,$Hkey4)=map("%xmm$_",(11..15));
688
689$code.=<<___;
Robert Sloan2424d842017-05-01 07:46:28 -0700690 leaq OPENSSL_ia32cap_P(%rip),%rax
Robert Sloan9254e682017-04-24 09:42:06 -0700691 mov 4(%rax),%eax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800692 cmp \$0x30,$len
693 jb .Lskip4x
694
695 and \$`1<<26|1<<22`,%eax # isolate MOVBE+XSAVE
696 cmp \$`1<<22`,%eax # check for MOVBE without XSAVE
697 je .Lskip4x
698
699 sub \$0x30,$len
Kenny Rootb8494592015-09-25 02:29:14 +0000700 mov \$0xA040608020C0E000,%rax # ((7..0)·0xE0)&0xff
Adam Langleyd9e397b2015-01-22 14:27:53 -0800701 movdqu 0x30($Htbl),$Hkey3
702 movdqu 0x40($Htbl),$Hkey4
703
704 #######
705 # Xi+4 =[(H*Ii+3) + (H^2*Ii+2) + (H^3*Ii+1) + H^4*(Ii+Xi)] mod P
706 #
707 movdqu 0x30($inp),$Xln
708 movdqu 0x20($inp),$Xl
709 pshufb $T3,$Xln
710 pshufb $T3,$Xl
711 movdqa $Xln,$Xhn
712 pshufd \$0b01001110,$Xln,$Xmn
713 pxor $Xln,$Xmn
714 pclmulqdq \$0x00,$Hkey,$Xln
715 pclmulqdq \$0x11,$Hkey,$Xhn
716 pclmulqdq \$0x00,$HK,$Xmn
717
718 movdqa $Xl,$Xh
719 pshufd \$0b01001110,$Xl,$Xm
720 pxor $Xl,$Xm
721 pclmulqdq \$0x00,$Hkey2,$Xl
722 pclmulqdq \$0x11,$Hkey2,$Xh
723 pclmulqdq \$0x10,$HK,$Xm
724 xorps $Xl,$Xln
725 xorps $Xh,$Xhn
726 movups 0x50($Htbl),$HK
727 xorps $Xm,$Xmn
728
729 movdqu 0x10($inp),$Xl
730 movdqu 0($inp),$T1
731 pshufb $T3,$Xl
732 pshufb $T3,$T1
733 movdqa $Xl,$Xh
734 pshufd \$0b01001110,$Xl,$Xm
735 pxor $T1,$Xi
736 pxor $Xl,$Xm
737 pclmulqdq \$0x00,$Hkey3,$Xl
738 movdqa $Xi,$Xhi
739 pshufd \$0b01001110,$Xi,$T1
740 pxor $Xi,$T1
741 pclmulqdq \$0x11,$Hkey3,$Xh
742 pclmulqdq \$0x00,$HK,$Xm
743 xorps $Xl,$Xln
744 xorps $Xh,$Xhn
745
746 lea 0x40($inp),$inp
747 sub \$0x40,$len
748 jc .Ltail4x
749
750 jmp .Lmod4_loop
751.align 32
752.Lmod4_loop:
753 pclmulqdq \$0x00,$Hkey4,$Xi
754 xorps $Xm,$Xmn
755 movdqu 0x30($inp),$Xl
756 pshufb $T3,$Xl
757 pclmulqdq \$0x11,$Hkey4,$Xhi
758 xorps $Xln,$Xi
759 movdqu 0x20($inp),$Xln
760 movdqa $Xl,$Xh
761 pclmulqdq \$0x10,$HK,$T1
762 pshufd \$0b01001110,$Xl,$Xm
763 xorps $Xhn,$Xhi
764 pxor $Xl,$Xm
765 pshufb $T3,$Xln
766 movups 0x20($Htbl),$HK
767 xorps $Xmn,$T1
768 pclmulqdq \$0x00,$Hkey,$Xl
769 pshufd \$0b01001110,$Xln,$Xmn
770
771 pxor $Xi,$T1 # aggregated Karatsuba post-processing
772 movdqa $Xln,$Xhn
773 pxor $Xhi,$T1 #
774 pxor $Xln,$Xmn
775 movdqa $T1,$T2 #
776 pclmulqdq \$0x11,$Hkey,$Xh
777 pslldq \$8,$T1
778 psrldq \$8,$T2 #
779 pxor $T1,$Xi
780 movdqa .L7_mask(%rip),$T1
781 pxor $T2,$Xhi #
782 movq %rax,$T2
783
784 pand $Xi,$T1 # 1st phase
785 pshufb $T1,$T2 #
786 pxor $Xi,$T2 #
787 pclmulqdq \$0x00,$HK,$Xm
788 psllq \$57,$T2 #
789 movdqa $T2,$T1 #
790 pslldq \$8,$T2
791 pclmulqdq \$0x00,$Hkey2,$Xln
Robert Sloana94fe052017-02-21 08:49:28 -0800792 psrldq \$8,$T1 #
Adam Langleyd9e397b2015-01-22 14:27:53 -0800793 pxor $T2,$Xi
794 pxor $T1,$Xhi #
795 movdqu 0($inp),$T1
796
797 movdqa $Xi,$T2 # 2nd phase
798 psrlq \$1,$Xi
799 pclmulqdq \$0x11,$Hkey2,$Xhn
800 xorps $Xl,$Xln
801 movdqu 0x10($inp),$Xl
802 pshufb $T3,$Xl
803 pclmulqdq \$0x10,$HK,$Xmn
804 xorps $Xh,$Xhn
805 movups 0x50($Htbl),$HK
806 pshufb $T3,$T1
807 pxor $T2,$Xhi #
808 pxor $Xi,$T2
809 psrlq \$5,$Xi
810
811 movdqa $Xl,$Xh
812 pxor $Xm,$Xmn
813 pshufd \$0b01001110,$Xl,$Xm
814 pxor $T2,$Xi #
815 pxor $T1,$Xhi
816 pxor $Xl,$Xm
817 pclmulqdq \$0x00,$Hkey3,$Xl
818 psrlq \$1,$Xi #
819 pxor $Xhi,$Xi #
820 movdqa $Xi,$Xhi
821 pclmulqdq \$0x11,$Hkey3,$Xh
822 xorps $Xl,$Xln
823 pshufd \$0b01001110,$Xi,$T1
824 pxor $Xi,$T1
825
826 pclmulqdq \$0x00,$HK,$Xm
827 xorps $Xh,$Xhn
828
829 lea 0x40($inp),$inp
830 sub \$0x40,$len
831 jnc .Lmod4_loop
832
833.Ltail4x:
834 pclmulqdq \$0x00,$Hkey4,$Xi
835 pclmulqdq \$0x11,$Hkey4,$Xhi
836 pclmulqdq \$0x10,$HK,$T1
837 xorps $Xm,$Xmn
838 xorps $Xln,$Xi
839 xorps $Xhn,$Xhi
840 pxor $Xi,$Xhi # aggregated Karatsuba post-processing
841 pxor $Xmn,$T1
842
843 pxor $Xhi,$T1 #
844 pxor $Xi,$Xhi
845
846 movdqa $T1,$T2 #
847 psrldq \$8,$T1
848 pslldq \$8,$T2 #
849 pxor $T1,$Xhi
850 pxor $T2,$Xi #
851___
852 &reduction_alg9($Xhi,$Xi);
853$code.=<<___;
854 add \$0x40,$len
855 jz .Ldone
856 movdqu 0x20($Htbl),$HK
857 sub \$0x10,$len
858 jz .Lodd_tail
859.Lskip4x:
860___
861}
862$code.=<<___;
863 #######
864 # Xi+2 =[H*(Ii+1 + Xi+1)] mod P =
865 # [(H*Ii+1) + (H*Xi+1)] mod P =
866 # [(H*Ii+1) + H^2*(Ii+Xi)] mod P
867 #
868 movdqu ($inp),$T1 # Ii
869 movdqu 16($inp),$Xln # Ii+1
870 pshufb $T3,$T1
871 pshufb $T3,$Xln
872 pxor $T1,$Xi # Ii+Xi
873
874 movdqa $Xln,$Xhn
875 pshufd \$0b01001110,$Xln,$Xmn
876 pxor $Xln,$Xmn
877 pclmulqdq \$0x00,$Hkey,$Xln
878 pclmulqdq \$0x11,$Hkey,$Xhn
879 pclmulqdq \$0x00,$HK,$Xmn
880
881 lea 32($inp),$inp # i+=2
882 nop
883 sub \$0x20,$len
884 jbe .Leven_tail
885 nop
886 jmp .Lmod_loop
887
888.align 32
889.Lmod_loop:
890 movdqa $Xi,$Xhi
891 movdqa $Xmn,$T1
892 pshufd \$0b01001110,$Xi,$Xmn #
893 pxor $Xi,$Xmn #
894
895 pclmulqdq \$0x00,$Hkey2,$Xi
896 pclmulqdq \$0x11,$Hkey2,$Xhi
897 pclmulqdq \$0x10,$HK,$Xmn
898
899 pxor $Xln,$Xi # (H*Ii+1) + H^2*(Ii+Xi)
900 pxor $Xhn,$Xhi
901 movdqu ($inp),$T2 # Ii
902 pxor $Xi,$T1 # aggregated Karatsuba post-processing
903 pshufb $T3,$T2
904 movdqu 16($inp),$Xln # Ii+1
905
906 pxor $Xhi,$T1
907 pxor $T2,$Xhi # "Ii+Xi", consume early
908 pxor $T1,$Xmn
909 pshufb $T3,$Xln
910 movdqa $Xmn,$T1 #
911 psrldq \$8,$T1
912 pslldq \$8,$Xmn #
913 pxor $T1,$Xhi
914 pxor $Xmn,$Xi #
915
916 movdqa $Xln,$Xhn #
917
918 movdqa $Xi,$T2 # 1st phase
919 movdqa $Xi,$T1
920 psllq \$5,$Xi
921 pxor $Xi,$T1 #
922 pclmulqdq \$0x00,$Hkey,$Xln #######
923 psllq \$1,$Xi
924 pxor $T1,$Xi #
925 psllq \$57,$Xi #
926 movdqa $Xi,$T1 #
927 pslldq \$8,$Xi
Robert Sloana94fe052017-02-21 08:49:28 -0800928 psrldq \$8,$T1 #
Adam Langleyd9e397b2015-01-22 14:27:53 -0800929 pxor $T2,$Xi
930 pshufd \$0b01001110,$Xhn,$Xmn
931 pxor $T1,$Xhi #
932 pxor $Xhn,$Xmn #
933
934 movdqa $Xi,$T2 # 2nd phase
935 psrlq \$1,$Xi
936 pclmulqdq \$0x11,$Hkey,$Xhn #######
937 pxor $T2,$Xhi #
938 pxor $Xi,$T2
939 psrlq \$5,$Xi
940 pxor $T2,$Xi #
941 lea 32($inp),$inp
942 psrlq \$1,$Xi #
943 pclmulqdq \$0x00,$HK,$Xmn #######
944 pxor $Xhi,$Xi #
945
946 sub \$0x20,$len
947 ja .Lmod_loop
948
949.Leven_tail:
950 movdqa $Xi,$Xhi
951 movdqa $Xmn,$T1
952 pshufd \$0b01001110,$Xi,$Xmn #
953 pxor $Xi,$Xmn #
954
955 pclmulqdq \$0x00,$Hkey2,$Xi
956 pclmulqdq \$0x11,$Hkey2,$Xhi
957 pclmulqdq \$0x10,$HK,$Xmn
958
959 pxor $Xln,$Xi # (H*Ii+1) + H^2*(Ii+Xi)
960 pxor $Xhn,$Xhi
961 pxor $Xi,$T1
962 pxor $Xhi,$T1
963 pxor $T1,$Xmn
964 movdqa $Xmn,$T1 #
965 psrldq \$8,$T1
966 pslldq \$8,$Xmn #
967 pxor $T1,$Xhi
968 pxor $Xmn,$Xi #
969___
970 &reduction_alg9 ($Xhi,$Xi);
971$code.=<<___;
972 test $len,$len
973 jnz .Ldone
974
975.Lodd_tail:
976 movdqu ($inp),$T1 # Ii
977 pshufb $T3,$T1
978 pxor $T1,$Xi # Ii+Xi
979___
980 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$HK); # H*(Ii+Xi)
981 &reduction_alg9 ($Xhi,$Xi);
982$code.=<<___;
983.Ldone:
984 pshufb $T3,$Xi
985 movdqu $Xi,($Xip)
986___
987$code.=<<___ if ($win64);
988 movaps (%rsp),%xmm6
989 movaps 0x10(%rsp),%xmm7
990 movaps 0x20(%rsp),%xmm8
991 movaps 0x30(%rsp),%xmm9
992 movaps 0x40(%rsp),%xmm10
993 movaps 0x50(%rsp),%xmm11
994 movaps 0x60(%rsp),%xmm12
995 movaps 0x70(%rsp),%xmm13
996 movaps 0x80(%rsp),%xmm14
997 movaps 0x90(%rsp),%xmm15
998 lea 0xa8(%rsp),%rsp
999.LSEH_end_gcm_ghash_clmul:
1000___
1001$code.=<<___;
1002 ret
Robert Sloan4c22c5f2019-03-01 15:53:37 -08001003.cfi_endproc
Adam Langleyd9e397b2015-01-22 14:27:53 -08001004.size gcm_ghash_clmul,.-gcm_ghash_clmul
1005___
1006}
1007
1008$code.=<<___;
1009.globl gcm_init_avx
1010.type gcm_init_avx,\@abi-omnipotent
1011.align 32
1012gcm_init_avx:
Robert Sloan4c22c5f2019-03-01 15:53:37 -08001013.cfi_startproc
Adam Langleyd9e397b2015-01-22 14:27:53 -08001014___
1015if ($avx) {
1016my ($Htbl,$Xip)=@_4args;
1017my $HK="%xmm6";
1018
1019$code.=<<___ if ($win64);
1020.LSEH_begin_gcm_init_avx:
1021 # I can't trust assembler to use specific encoding:-(
1022 .byte 0x48,0x83,0xec,0x18 #sub $0x18,%rsp
1023 .byte 0x0f,0x29,0x34,0x24 #movaps %xmm6,(%rsp)
1024___
1025$code.=<<___;
1026 vzeroupper
1027
1028 vmovdqu ($Xip),$Hkey
1029 vpshufd \$0b01001110,$Hkey,$Hkey # dword swap
1030
1031 # <<1 twist
1032 vpshufd \$0b11111111,$Hkey,$T2 # broadcast uppermost dword
1033 vpsrlq \$63,$Hkey,$T1
1034 vpsllq \$1,$Hkey,$Hkey
1035 vpxor $T3,$T3,$T3 #
1036 vpcmpgtd $T2,$T3,$T3 # broadcast carry bit
1037 vpslldq \$8,$T1,$T1
1038 vpor $T1,$Hkey,$Hkey # H<<=1
1039
1040 # magic reduction
1041 vpand .L0x1c2_polynomial(%rip),$T3,$T3
1042 vpxor $T3,$Hkey,$Hkey # if(carry) H^=0x1c2_polynomial
1043
1044 vpunpckhqdq $Hkey,$Hkey,$HK
1045 vmovdqa $Hkey,$Xi
1046 vpxor $Hkey,$HK,$HK
1047 mov \$4,%r10 # up to H^8
1048 jmp .Linit_start_avx
1049___
1050
1051sub clmul64x64_avx {
1052my ($Xhi,$Xi,$Hkey,$HK)=@_;
1053
1054if (!defined($HK)) { $HK = $T2;
1055$code.=<<___;
1056 vpunpckhqdq $Xi,$Xi,$T1
1057 vpunpckhqdq $Hkey,$Hkey,$T2
1058 vpxor $Xi,$T1,$T1 #
1059 vpxor $Hkey,$T2,$T2
1060___
1061} else {
1062$code.=<<___;
1063 vpunpckhqdq $Xi,$Xi,$T1
1064 vpxor $Xi,$T1,$T1 #
1065___
1066}
1067$code.=<<___;
1068 vpclmulqdq \$0x11,$Hkey,$Xi,$Xhi #######
1069 vpclmulqdq \$0x00,$Hkey,$Xi,$Xi #######
1070 vpclmulqdq \$0x00,$HK,$T1,$T1 #######
1071 vpxor $Xi,$Xhi,$T2 #
1072 vpxor $T2,$T1,$T1 #
1073
1074 vpslldq \$8,$T1,$T2 #
1075 vpsrldq \$8,$T1,$T1
1076 vpxor $T2,$Xi,$Xi #
1077 vpxor $T1,$Xhi,$Xhi
1078___
1079}
1080
1081sub reduction_avx {
1082my ($Xhi,$Xi) = @_;
1083
1084$code.=<<___;
1085 vpsllq \$57,$Xi,$T1 # 1st phase
1086 vpsllq \$62,$Xi,$T2
1087 vpxor $T1,$T2,$T2 #
1088 vpsllq \$63,$Xi,$T1
1089 vpxor $T1,$T2,$T2 #
1090 vpslldq \$8,$T2,$T1 #
1091 vpsrldq \$8,$T2,$T2
1092 vpxor $T1,$Xi,$Xi #
1093 vpxor $T2,$Xhi,$Xhi
1094
1095 vpsrlq \$1,$Xi,$T2 # 2nd phase
1096 vpxor $Xi,$Xhi,$Xhi
1097 vpxor $T2,$Xi,$Xi #
1098 vpsrlq \$5,$T2,$T2
1099 vpxor $T2,$Xi,$Xi #
1100 vpsrlq \$1,$Xi,$Xi #
1101 vpxor $Xhi,$Xi,$Xi #
1102___
1103}
1104
1105$code.=<<___;
1106.align 32
1107.Linit_loop_avx:
1108 vpalignr \$8,$T1,$T2,$T3 # low part is H.lo^H.hi...
1109 vmovdqu $T3,-0x10($Htbl) # save Karatsuba "salt"
1110___
1111 &clmul64x64_avx ($Xhi,$Xi,$Hkey,$HK); # calculate H^3,5,7
1112 &reduction_avx ($Xhi,$Xi);
1113$code.=<<___;
1114.Linit_start_avx:
1115 vmovdqa $Xi,$T3
1116___
1117 &clmul64x64_avx ($Xhi,$Xi,$Hkey,$HK); # calculate H^2,4,6,8
1118 &reduction_avx ($Xhi,$Xi);
1119$code.=<<___;
1120 vpshufd \$0b01001110,$T3,$T1
1121 vpshufd \$0b01001110,$Xi,$T2
1122 vpxor $T3,$T1,$T1 # Karatsuba pre-processing
1123 vmovdqu $T3,0x00($Htbl) # save H^1,3,5,7
1124 vpxor $Xi,$T2,$T2 # Karatsuba pre-processing
1125 vmovdqu $Xi,0x10($Htbl) # save H^2,4,6,8
1126 lea 0x30($Htbl),$Htbl
1127 sub \$1,%r10
1128 jnz .Linit_loop_avx
1129
1130 vpalignr \$8,$T2,$T1,$T3 # last "salt" is flipped
1131 vmovdqu $T3,-0x10($Htbl)
1132
1133 vzeroupper
1134___
1135$code.=<<___ if ($win64);
1136 movaps (%rsp),%xmm6
1137 lea 0x18(%rsp),%rsp
1138.LSEH_end_gcm_init_avx:
1139___
1140$code.=<<___;
1141 ret
Robert Sloan4c22c5f2019-03-01 15:53:37 -08001142.cfi_endproc
Adam Langleyd9e397b2015-01-22 14:27:53 -08001143.size gcm_init_avx,.-gcm_init_avx
1144___
1145} else {
1146$code.=<<___;
1147 jmp .L_init_clmul
1148.size gcm_init_avx,.-gcm_init_avx
1149___
1150}
1151
1152$code.=<<___;
1153.globl gcm_gmult_avx
1154.type gcm_gmult_avx,\@abi-omnipotent
1155.align 32
1156gcm_gmult_avx:
Robert Sloan4c22c5f2019-03-01 15:53:37 -08001157.cfi_startproc
Adam Langleyd9e397b2015-01-22 14:27:53 -08001158 jmp .L_gmult_clmul
Robert Sloan4c22c5f2019-03-01 15:53:37 -08001159.cfi_endproc
Adam Langleyd9e397b2015-01-22 14:27:53 -08001160.size gcm_gmult_avx,.-gcm_gmult_avx
1161___
1162
1163$code.=<<___;
1164.globl gcm_ghash_avx
1165.type gcm_ghash_avx,\@abi-omnipotent
1166.align 32
1167gcm_ghash_avx:
Robert Sloan4c22c5f2019-03-01 15:53:37 -08001168.cfi_startproc
Adam Langleyd9e397b2015-01-22 14:27:53 -08001169___
1170if ($avx) {
1171my ($Xip,$Htbl,$inp,$len)=@_4args;
1172my ($Xlo,$Xhi,$Xmi,
1173 $Zlo,$Zhi,$Zmi,
1174 $Hkey,$HK,$T1,$T2,
1175 $Xi,$Xo,$Tred,$bswap,$Ii,$Ij) = map("%xmm$_",(0..15));
1176
1177$code.=<<___ if ($win64);
1178 lea -0x88(%rsp),%rax
1179.LSEH_begin_gcm_ghash_avx:
1180 # I can't trust assembler to use specific encoding:-(
1181 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax),%rsp
1182 .byte 0x0f,0x29,0x70,0xe0 #movaps %xmm6,-0x20(%rax)
1183 .byte 0x0f,0x29,0x78,0xf0 #movaps %xmm7,-0x10(%rax)
1184 .byte 0x44,0x0f,0x29,0x00 #movaps %xmm8,0(%rax)
1185 .byte 0x44,0x0f,0x29,0x48,0x10 #movaps %xmm9,0x10(%rax)
1186 .byte 0x44,0x0f,0x29,0x50,0x20 #movaps %xmm10,0x20(%rax)
1187 .byte 0x44,0x0f,0x29,0x58,0x30 #movaps %xmm11,0x30(%rax)
1188 .byte 0x44,0x0f,0x29,0x60,0x40 #movaps %xmm12,0x40(%rax)
1189 .byte 0x44,0x0f,0x29,0x68,0x50 #movaps %xmm13,0x50(%rax)
1190 .byte 0x44,0x0f,0x29,0x70,0x60 #movaps %xmm14,0x60(%rax)
1191 .byte 0x44,0x0f,0x29,0x78,0x70 #movaps %xmm15,0x70(%rax)
1192___
1193$code.=<<___;
1194 vzeroupper
1195
1196 vmovdqu ($Xip),$Xi # load $Xi
1197 lea .L0x1c2_polynomial(%rip),%r10
1198 lea 0x40($Htbl),$Htbl # size optimization
1199 vmovdqu .Lbswap_mask(%rip),$bswap
1200 vpshufb $bswap,$Xi,$Xi
1201 cmp \$0x80,$len
1202 jb .Lshort_avx
1203 sub \$0x80,$len
1204
1205 vmovdqu 0x70($inp),$Ii # I[7]
1206 vmovdqu 0x00-0x40($Htbl),$Hkey # $Hkey^1
1207 vpshufb $bswap,$Ii,$Ii
1208 vmovdqu 0x20-0x40($Htbl),$HK
1209
1210 vpunpckhqdq $Ii,$Ii,$T2
1211 vmovdqu 0x60($inp),$Ij # I[6]
1212 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo
1213 vpxor $Ii,$T2,$T2
1214 vpshufb $bswap,$Ij,$Ij
1215 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi
1216 vmovdqu 0x10-0x40($Htbl),$Hkey # $Hkey^2
1217 vpunpckhqdq $Ij,$Ij,$T1
1218 vmovdqu 0x50($inp),$Ii # I[5]
1219 vpclmulqdq \$0x00,$HK,$T2,$Xmi
1220 vpxor $Ij,$T1,$T1
1221
1222 vpshufb $bswap,$Ii,$Ii
1223 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo
1224 vpunpckhqdq $Ii,$Ii,$T2
1225 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi
1226 vmovdqu 0x30-0x40($Htbl),$Hkey # $Hkey^3
1227 vpxor $Ii,$T2,$T2
1228 vmovdqu 0x40($inp),$Ij # I[4]
1229 vpclmulqdq \$0x10,$HK,$T1,$Zmi
1230 vmovdqu 0x50-0x40($Htbl),$HK
1231
1232 vpshufb $bswap,$Ij,$Ij
1233 vpxor $Xlo,$Zlo,$Zlo
1234 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo
1235 vpxor $Xhi,$Zhi,$Zhi
1236 vpunpckhqdq $Ij,$Ij,$T1
1237 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi
1238 vmovdqu 0x40-0x40($Htbl),$Hkey # $Hkey^4
1239 vpxor $Xmi,$Zmi,$Zmi
1240 vpclmulqdq \$0x00,$HK,$T2,$Xmi
1241 vpxor $Ij,$T1,$T1
1242
1243 vmovdqu 0x30($inp),$Ii # I[3]
1244 vpxor $Zlo,$Xlo,$Xlo
1245 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo
1246 vpxor $Zhi,$Xhi,$Xhi
1247 vpshufb $bswap,$Ii,$Ii
1248 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi
1249 vmovdqu 0x60-0x40($Htbl),$Hkey # $Hkey^5
1250 vpxor $Zmi,$Xmi,$Xmi
1251 vpunpckhqdq $Ii,$Ii,$T2
1252 vpclmulqdq \$0x10,$HK,$T1,$Zmi
1253 vmovdqu 0x80-0x40($Htbl),$HK
1254 vpxor $Ii,$T2,$T2
1255
1256 vmovdqu 0x20($inp),$Ij # I[2]
1257 vpxor $Xlo,$Zlo,$Zlo
1258 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo
1259 vpxor $Xhi,$Zhi,$Zhi
1260 vpshufb $bswap,$Ij,$Ij
1261 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi
1262 vmovdqu 0x70-0x40($Htbl),$Hkey # $Hkey^6
1263 vpxor $Xmi,$Zmi,$Zmi
1264 vpunpckhqdq $Ij,$Ij,$T1
1265 vpclmulqdq \$0x00,$HK,$T2,$Xmi
1266 vpxor $Ij,$T1,$T1
1267
1268 vmovdqu 0x10($inp),$Ii # I[1]
1269 vpxor $Zlo,$Xlo,$Xlo
1270 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo
1271 vpxor $Zhi,$Xhi,$Xhi
1272 vpshufb $bswap,$Ii,$Ii
1273 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi
1274 vmovdqu 0x90-0x40($Htbl),$Hkey # $Hkey^7
1275 vpxor $Zmi,$Xmi,$Xmi
1276 vpunpckhqdq $Ii,$Ii,$T2
1277 vpclmulqdq \$0x10,$HK,$T1,$Zmi
1278 vmovdqu 0xb0-0x40($Htbl),$HK
1279 vpxor $Ii,$T2,$T2
1280
1281 vmovdqu ($inp),$Ij # I[0]
1282 vpxor $Xlo,$Zlo,$Zlo
1283 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo
1284 vpxor $Xhi,$Zhi,$Zhi
1285 vpshufb $bswap,$Ij,$Ij
1286 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi
1287 vmovdqu 0xa0-0x40($Htbl),$Hkey # $Hkey^8
1288 vpxor $Xmi,$Zmi,$Zmi
1289 vpclmulqdq \$0x10,$HK,$T2,$Xmi
1290
1291 lea 0x80($inp),$inp
1292 cmp \$0x80,$len
1293 jb .Ltail_avx
1294
1295 vpxor $Xi,$Ij,$Ij # accumulate $Xi
1296 sub \$0x80,$len
1297 jmp .Loop8x_avx
1298
1299.align 32
1300.Loop8x_avx:
1301 vpunpckhqdq $Ij,$Ij,$T1
1302 vmovdqu 0x70($inp),$Ii # I[7]
1303 vpxor $Xlo,$Zlo,$Zlo
1304 vpxor $Ij,$T1,$T1
1305 vpclmulqdq \$0x00,$Hkey,$Ij,$Xi
1306 vpshufb $bswap,$Ii,$Ii
1307 vpxor $Xhi,$Zhi,$Zhi
1308 vpclmulqdq \$0x11,$Hkey,$Ij,$Xo
1309 vmovdqu 0x00-0x40($Htbl),$Hkey # $Hkey^1
1310 vpunpckhqdq $Ii,$Ii,$T2
1311 vpxor $Xmi,$Zmi,$Zmi
1312 vpclmulqdq \$0x00,$HK,$T1,$Tred
1313 vmovdqu 0x20-0x40($Htbl),$HK
1314 vpxor $Ii,$T2,$T2
1315
1316 vmovdqu 0x60($inp),$Ij # I[6]
1317 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo
1318 vpxor $Zlo,$Xi,$Xi # collect result
1319 vpshufb $bswap,$Ij,$Ij
1320 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi
1321 vxorps $Zhi,$Xo,$Xo
1322 vmovdqu 0x10-0x40($Htbl),$Hkey # $Hkey^2
1323 vpunpckhqdq $Ij,$Ij,$T1
1324 vpclmulqdq \$0x00,$HK, $T2,$Xmi
1325 vpxor $Zmi,$Tred,$Tred
1326 vxorps $Ij,$T1,$T1
1327
1328 vmovdqu 0x50($inp),$Ii # I[5]
1329 vpxor $Xi,$Tred,$Tred # aggregated Karatsuba post-processing
1330 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo
1331 vpxor $Xo,$Tred,$Tred
1332 vpslldq \$8,$Tred,$T2
1333 vpxor $Xlo,$Zlo,$Zlo
1334 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi
1335 vpsrldq \$8,$Tred,$Tred
1336 vpxor $T2, $Xi, $Xi
1337 vmovdqu 0x30-0x40($Htbl),$Hkey # $Hkey^3
1338 vpshufb $bswap,$Ii,$Ii
1339 vxorps $Tred,$Xo, $Xo
1340 vpxor $Xhi,$Zhi,$Zhi
1341 vpunpckhqdq $Ii,$Ii,$T2
1342 vpclmulqdq \$0x10,$HK, $T1,$Zmi
1343 vmovdqu 0x50-0x40($Htbl),$HK
1344 vpxor $Ii,$T2,$T2
1345 vpxor $Xmi,$Zmi,$Zmi
1346
1347 vmovdqu 0x40($inp),$Ij # I[4]
1348 vpalignr \$8,$Xi,$Xi,$Tred # 1st phase
1349 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo
1350 vpshufb $bswap,$Ij,$Ij
1351 vpxor $Zlo,$Xlo,$Xlo
1352 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi
1353 vmovdqu 0x40-0x40($Htbl),$Hkey # $Hkey^4
1354 vpunpckhqdq $Ij,$Ij,$T1
1355 vpxor $Zhi,$Xhi,$Xhi
1356 vpclmulqdq \$0x00,$HK, $T2,$Xmi
1357 vxorps $Ij,$T1,$T1
1358 vpxor $Zmi,$Xmi,$Xmi
1359
1360 vmovdqu 0x30($inp),$Ii # I[3]
1361 vpclmulqdq \$0x10,(%r10),$Xi,$Xi
1362 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo
1363 vpshufb $bswap,$Ii,$Ii
1364 vpxor $Xlo,$Zlo,$Zlo
1365 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi
1366 vmovdqu 0x60-0x40($Htbl),$Hkey # $Hkey^5
1367 vpunpckhqdq $Ii,$Ii,$T2
1368 vpxor $Xhi,$Zhi,$Zhi
1369 vpclmulqdq \$0x10,$HK, $T1,$Zmi
1370 vmovdqu 0x80-0x40($Htbl),$HK
1371 vpxor $Ii,$T2,$T2
1372 vpxor $Xmi,$Zmi,$Zmi
1373
1374 vmovdqu 0x20($inp),$Ij # I[2]
1375 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo
1376 vpshufb $bswap,$Ij,$Ij
1377 vpxor $Zlo,$Xlo,$Xlo
1378 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi
1379 vmovdqu 0x70-0x40($Htbl),$Hkey # $Hkey^6
1380 vpunpckhqdq $Ij,$Ij,$T1
1381 vpxor $Zhi,$Xhi,$Xhi
1382 vpclmulqdq \$0x00,$HK, $T2,$Xmi
1383 vpxor $Ij,$T1,$T1
1384 vpxor $Zmi,$Xmi,$Xmi
1385 vxorps $Tred,$Xi,$Xi
1386
1387 vmovdqu 0x10($inp),$Ii # I[1]
1388 vpalignr \$8,$Xi,$Xi,$Tred # 2nd phase
1389 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo
1390 vpshufb $bswap,$Ii,$Ii
1391 vpxor $Xlo,$Zlo,$Zlo
1392 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi
1393 vmovdqu 0x90-0x40($Htbl),$Hkey # $Hkey^7
1394 vpclmulqdq \$0x10,(%r10),$Xi,$Xi
1395 vxorps $Xo,$Tred,$Tred
1396 vpunpckhqdq $Ii,$Ii,$T2
1397 vpxor $Xhi,$Zhi,$Zhi
1398 vpclmulqdq \$0x10,$HK, $T1,$Zmi
1399 vmovdqu 0xb0-0x40($Htbl),$HK
1400 vpxor $Ii,$T2,$T2
1401 vpxor $Xmi,$Zmi,$Zmi
1402
1403 vmovdqu ($inp),$Ij # I[0]
1404 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo
1405 vpshufb $bswap,$Ij,$Ij
1406 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi
1407 vmovdqu 0xa0-0x40($Htbl),$Hkey # $Hkey^8
1408 vpxor $Tred,$Ij,$Ij
1409 vpclmulqdq \$0x10,$HK, $T2,$Xmi
1410 vpxor $Xi,$Ij,$Ij # accumulate $Xi
1411
1412 lea 0x80($inp),$inp
1413 sub \$0x80,$len
1414 jnc .Loop8x_avx
1415
1416 add \$0x80,$len
1417 jmp .Ltail_no_xor_avx
1418
1419.align 32
1420.Lshort_avx:
1421 vmovdqu -0x10($inp,$len),$Ii # very last word
1422 lea ($inp,$len),$inp
1423 vmovdqu 0x00-0x40($Htbl),$Hkey # $Hkey^1
1424 vmovdqu 0x20-0x40($Htbl),$HK
1425 vpshufb $bswap,$Ii,$Ij
1426
1427 vmovdqa $Xlo,$Zlo # subtle way to zero $Zlo,
1428 vmovdqa $Xhi,$Zhi # $Zhi and
1429 vmovdqa $Xmi,$Zmi # $Zmi
1430 sub \$0x10,$len
1431 jz .Ltail_avx
1432
1433 vpunpckhqdq $Ij,$Ij,$T1
1434 vpxor $Xlo,$Zlo,$Zlo
1435 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo
1436 vpxor $Ij,$T1,$T1
1437 vmovdqu -0x20($inp),$Ii
1438 vpxor $Xhi,$Zhi,$Zhi
1439 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi
1440 vmovdqu 0x10-0x40($Htbl),$Hkey # $Hkey^2
1441 vpshufb $bswap,$Ii,$Ij
1442 vpxor $Xmi,$Zmi,$Zmi
1443 vpclmulqdq \$0x00,$HK,$T1,$Xmi
1444 vpsrldq \$8,$HK,$HK
1445 sub \$0x10,$len
1446 jz .Ltail_avx
1447
1448 vpunpckhqdq $Ij,$Ij,$T1
1449 vpxor $Xlo,$Zlo,$Zlo
1450 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo
1451 vpxor $Ij,$T1,$T1
1452 vmovdqu -0x30($inp),$Ii
1453 vpxor $Xhi,$Zhi,$Zhi
1454 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi
1455 vmovdqu 0x30-0x40($Htbl),$Hkey # $Hkey^3
1456 vpshufb $bswap,$Ii,$Ij
1457 vpxor $Xmi,$Zmi,$Zmi
1458 vpclmulqdq \$0x00,$HK,$T1,$Xmi
1459 vmovdqu 0x50-0x40($Htbl),$HK
1460 sub \$0x10,$len
1461 jz .Ltail_avx
1462
1463 vpunpckhqdq $Ij,$Ij,$T1
1464 vpxor $Xlo,$Zlo,$Zlo
1465 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo
1466 vpxor $Ij,$T1,$T1
1467 vmovdqu -0x40($inp),$Ii
1468 vpxor $Xhi,$Zhi,$Zhi
1469 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi
1470 vmovdqu 0x40-0x40($Htbl),$Hkey # $Hkey^4
1471 vpshufb $bswap,$Ii,$Ij
1472 vpxor $Xmi,$Zmi,$Zmi
1473 vpclmulqdq \$0x00,$HK,$T1,$Xmi
1474 vpsrldq \$8,$HK,$HK
1475 sub \$0x10,$len
1476 jz .Ltail_avx
1477
1478 vpunpckhqdq $Ij,$Ij,$T1
1479 vpxor $Xlo,$Zlo,$Zlo
1480 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo
1481 vpxor $Ij,$T1,$T1
1482 vmovdqu -0x50($inp),$Ii
1483 vpxor $Xhi,$Zhi,$Zhi
1484 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi
1485 vmovdqu 0x60-0x40($Htbl),$Hkey # $Hkey^5
1486 vpshufb $bswap,$Ii,$Ij
1487 vpxor $Xmi,$Zmi,$Zmi
1488 vpclmulqdq \$0x00,$HK,$T1,$Xmi
1489 vmovdqu 0x80-0x40($Htbl),$HK
1490 sub \$0x10,$len
1491 jz .Ltail_avx
1492
1493 vpunpckhqdq $Ij,$Ij,$T1
1494 vpxor $Xlo,$Zlo,$Zlo
1495 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo
1496 vpxor $Ij,$T1,$T1
1497 vmovdqu -0x60($inp),$Ii
1498 vpxor $Xhi,$Zhi,$Zhi
1499 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi
1500 vmovdqu 0x70-0x40($Htbl),$Hkey # $Hkey^6
1501 vpshufb $bswap,$Ii,$Ij
1502 vpxor $Xmi,$Zmi,$Zmi
1503 vpclmulqdq \$0x00,$HK,$T1,$Xmi
1504 vpsrldq \$8,$HK,$HK
1505 sub \$0x10,$len
1506 jz .Ltail_avx
1507
1508 vpunpckhqdq $Ij,$Ij,$T1
1509 vpxor $Xlo,$Zlo,$Zlo
1510 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo
1511 vpxor $Ij,$T1,$T1
1512 vmovdqu -0x70($inp),$Ii
1513 vpxor $Xhi,$Zhi,$Zhi
1514 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi
1515 vmovdqu 0x90-0x40($Htbl),$Hkey # $Hkey^7
1516 vpshufb $bswap,$Ii,$Ij
1517 vpxor $Xmi,$Zmi,$Zmi
1518 vpclmulqdq \$0x00,$HK,$T1,$Xmi
1519 vmovq 0xb8-0x40($Htbl),$HK
1520 sub \$0x10,$len
1521 jmp .Ltail_avx
1522
1523.align 32
1524.Ltail_avx:
1525 vpxor $Xi,$Ij,$Ij # accumulate $Xi
1526.Ltail_no_xor_avx:
1527 vpunpckhqdq $Ij,$Ij,$T1
1528 vpxor $Xlo,$Zlo,$Zlo
1529 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo
1530 vpxor $Ij,$T1,$T1
1531 vpxor $Xhi,$Zhi,$Zhi
1532 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi
1533 vpxor $Xmi,$Zmi,$Zmi
1534 vpclmulqdq \$0x00,$HK,$T1,$Xmi
1535
1536 vmovdqu (%r10),$Tred
1537
1538 vpxor $Xlo,$Zlo,$Xi
1539 vpxor $Xhi,$Zhi,$Xo
1540 vpxor $Xmi,$Zmi,$Zmi
1541
1542 vpxor $Xi, $Zmi,$Zmi # aggregated Karatsuba post-processing
1543 vpxor $Xo, $Zmi,$Zmi
1544 vpslldq \$8, $Zmi,$T2
1545 vpsrldq \$8, $Zmi,$Zmi
1546 vpxor $T2, $Xi, $Xi
1547 vpxor $Zmi,$Xo, $Xo
1548
1549 vpclmulqdq \$0x10,$Tred,$Xi,$T2 # 1st phase
1550 vpalignr \$8,$Xi,$Xi,$Xi
1551 vpxor $T2,$Xi,$Xi
1552
1553 vpclmulqdq \$0x10,$Tred,$Xi,$T2 # 2nd phase
1554 vpalignr \$8,$Xi,$Xi,$Xi
1555 vpxor $Xo,$Xi,$Xi
1556 vpxor $T2,$Xi,$Xi
1557
1558 cmp \$0,$len
1559 jne .Lshort_avx
1560
1561 vpshufb $bswap,$Xi,$Xi
1562 vmovdqu $Xi,($Xip)
1563 vzeroupper
1564___
1565$code.=<<___ if ($win64);
1566 movaps (%rsp),%xmm6
1567 movaps 0x10(%rsp),%xmm7
1568 movaps 0x20(%rsp),%xmm8
1569 movaps 0x30(%rsp),%xmm9
1570 movaps 0x40(%rsp),%xmm10
1571 movaps 0x50(%rsp),%xmm11
1572 movaps 0x60(%rsp),%xmm12
1573 movaps 0x70(%rsp),%xmm13
1574 movaps 0x80(%rsp),%xmm14
1575 movaps 0x90(%rsp),%xmm15
1576 lea 0xa8(%rsp),%rsp
1577.LSEH_end_gcm_ghash_avx:
1578___
1579$code.=<<___;
1580 ret
Robert Sloan4c22c5f2019-03-01 15:53:37 -08001581.cfi_endproc
Adam Langleyd9e397b2015-01-22 14:27:53 -08001582.size gcm_ghash_avx,.-gcm_ghash_avx
1583___
1584} else {
1585$code.=<<___;
1586 jmp .L_ghash_clmul
1587.size gcm_ghash_avx,.-gcm_ghash_avx
1588___
1589}
1590
1591$code.=<<___;
1592.align 64
1593.Lbswap_mask:
1594 .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
1595.L0x1c2_polynomial:
1596 .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2
1597.L7_mask:
1598 .long 7,0,7,0
1599.L7_mask_poly:
1600 .long 7,0,`0xE1<<1`,0
1601.align 64
1602.type .Lrem_4bit,\@object
1603.Lrem_4bit:
1604 .long 0,`0x0000<<16`,0,`0x1C20<<16`,0,`0x3840<<16`,0,`0x2460<<16`
1605 .long 0,`0x7080<<16`,0,`0x6CA0<<16`,0,`0x48C0<<16`,0,`0x54E0<<16`
1606 .long 0,`0xE100<<16`,0,`0xFD20<<16`,0,`0xD940<<16`,0,`0xC560<<16`
1607 .long 0,`0x9180<<16`,0,`0x8DA0<<16`,0,`0xA9C0<<16`,0,`0xB5E0<<16`
1608.type .Lrem_8bit,\@object
1609.Lrem_8bit:
1610 .value 0x0000,0x01C2,0x0384,0x0246,0x0708,0x06CA,0x048C,0x054E
1611 .value 0x0E10,0x0FD2,0x0D94,0x0C56,0x0918,0x08DA,0x0A9C,0x0B5E
1612 .value 0x1C20,0x1DE2,0x1FA4,0x1E66,0x1B28,0x1AEA,0x18AC,0x196E
1613 .value 0x1230,0x13F2,0x11B4,0x1076,0x1538,0x14FA,0x16BC,0x177E
1614 .value 0x3840,0x3982,0x3BC4,0x3A06,0x3F48,0x3E8A,0x3CCC,0x3D0E
1615 .value 0x3650,0x3792,0x35D4,0x3416,0x3158,0x309A,0x32DC,0x331E
1616 .value 0x2460,0x25A2,0x27E4,0x2626,0x2368,0x22AA,0x20EC,0x212E
1617 .value 0x2A70,0x2BB2,0x29F4,0x2836,0x2D78,0x2CBA,0x2EFC,0x2F3E
1618 .value 0x7080,0x7142,0x7304,0x72C6,0x7788,0x764A,0x740C,0x75CE
1619 .value 0x7E90,0x7F52,0x7D14,0x7CD6,0x7998,0x785A,0x7A1C,0x7BDE
1620 .value 0x6CA0,0x6D62,0x6F24,0x6EE6,0x6BA8,0x6A6A,0x682C,0x69EE
1621 .value 0x62B0,0x6372,0x6134,0x60F6,0x65B8,0x647A,0x663C,0x67FE
1622 .value 0x48C0,0x4902,0x4B44,0x4A86,0x4FC8,0x4E0A,0x4C4C,0x4D8E
1623 .value 0x46D0,0x4712,0x4554,0x4496,0x41D8,0x401A,0x425C,0x439E
1624 .value 0x54E0,0x5522,0x5764,0x56A6,0x53E8,0x522A,0x506C,0x51AE
1625 .value 0x5AF0,0x5B32,0x5974,0x58B6,0x5DF8,0x5C3A,0x5E7C,0x5FBE
1626 .value 0xE100,0xE0C2,0xE284,0xE346,0xE608,0xE7CA,0xE58C,0xE44E
1627 .value 0xEF10,0xEED2,0xEC94,0xED56,0xE818,0xE9DA,0xEB9C,0xEA5E
1628 .value 0xFD20,0xFCE2,0xFEA4,0xFF66,0xFA28,0xFBEA,0xF9AC,0xF86E
1629 .value 0xF330,0xF2F2,0xF0B4,0xF176,0xF438,0xF5FA,0xF7BC,0xF67E
1630 .value 0xD940,0xD882,0xDAC4,0xDB06,0xDE48,0xDF8A,0xDDCC,0xDC0E
1631 .value 0xD750,0xD692,0xD4D4,0xD516,0xD058,0xD19A,0xD3DC,0xD21E
1632 .value 0xC560,0xC4A2,0xC6E4,0xC726,0xC268,0xC3AA,0xC1EC,0xC02E
1633 .value 0xCB70,0xCAB2,0xC8F4,0xC936,0xCC78,0xCDBA,0xCFFC,0xCE3E
1634 .value 0x9180,0x9042,0x9204,0x93C6,0x9688,0x974A,0x950C,0x94CE
1635 .value 0x9F90,0x9E52,0x9C14,0x9DD6,0x9898,0x995A,0x9B1C,0x9ADE
1636 .value 0x8DA0,0x8C62,0x8E24,0x8FE6,0x8AA8,0x8B6A,0x892C,0x88EE
1637 .value 0x83B0,0x8272,0x8034,0x81F6,0x84B8,0x857A,0x873C,0x86FE
1638 .value 0xA9C0,0xA802,0xAA44,0xAB86,0xAEC8,0xAF0A,0xAD4C,0xAC8E
1639 .value 0xA7D0,0xA612,0xA454,0xA596,0xA0D8,0xA11A,0xA35C,0xA29E
1640 .value 0xB5E0,0xB422,0xB664,0xB7A6,0xB2E8,0xB32A,0xB16C,0xB0AE
1641 .value 0xBBF0,0xBA32,0xB874,0xB9B6,0xBCF8,0xBD3A,0xBF7C,0xBEBE
1642
1643.asciz "GHASH for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
1644.align 64
1645___
1646
1647# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1648# CONTEXT *context,DISPATCHER_CONTEXT *disp)
1649if ($win64) {
1650$rec="%rcx";
1651$frame="%rdx";
1652$context="%r8";
1653$disp="%r9";
1654
1655$code.=<<___;
1656.extern __imp_RtlVirtualUnwind
1657.type se_handler,\@abi-omnipotent
1658.align 16
1659se_handler:
1660 push %rsi
1661 push %rdi
1662 push %rbx
1663 push %rbp
1664 push %r12
1665 push %r13
1666 push %r14
1667 push %r15
1668 pushfq
1669 sub \$64,%rsp
1670
1671 mov 120($context),%rax # pull context->Rax
1672 mov 248($context),%rbx # pull context->Rip
1673
1674 mov 8($disp),%rsi # disp->ImageBase
1675 mov 56($disp),%r11 # disp->HandlerData
1676
1677 mov 0(%r11),%r10d # HandlerData[0]
1678 lea (%rsi,%r10),%r10 # prologue label
1679 cmp %r10,%rbx # context->Rip<prologue label
1680 jb .Lin_prologue
1681
1682 mov 152($context),%rax # pull context->Rsp
1683
1684 mov 4(%r11),%r10d # HandlerData[1]
1685 lea (%rsi,%r10),%r10 # epilogue label
1686 cmp %r10,%rbx # context->Rip>=epilogue label
1687 jae .Lin_prologue
1688
Robert Sloana94fe052017-02-21 08:49:28 -08001689 lea 48+280(%rax),%rax # adjust "rsp"
Adam Langleyd9e397b2015-01-22 14:27:53 -08001690
1691 mov -8(%rax),%rbx
1692 mov -16(%rax),%rbp
1693 mov -24(%rax),%r12
Robert Sloana94fe052017-02-21 08:49:28 -08001694 mov -32(%rax),%r13
1695 mov -40(%rax),%r14
1696 mov -48(%rax),%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08001697 mov %rbx,144($context) # restore context->Rbx
1698 mov %rbp,160($context) # restore context->Rbp
1699 mov %r12,216($context) # restore context->R12
Robert Sloana94fe052017-02-21 08:49:28 -08001700 mov %r13,224($context) # restore context->R13
1701 mov %r14,232($context) # restore context->R14
1702 mov %r15,240($context) # restore context->R15
Adam Langleyd9e397b2015-01-22 14:27:53 -08001703
1704.Lin_prologue:
1705 mov 8(%rax),%rdi
1706 mov 16(%rax),%rsi
1707 mov %rax,152($context) # restore context->Rsp
1708 mov %rsi,168($context) # restore context->Rsi
1709 mov %rdi,176($context) # restore context->Rdi
1710
1711 mov 40($disp),%rdi # disp->ContextRecord
1712 mov $context,%rsi # context
1713 mov \$`1232/8`,%ecx # sizeof(CONTEXT)
1714 .long 0xa548f3fc # cld; rep movsq
1715
1716 mov $disp,%rsi
1717 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
1718 mov 8(%rsi),%rdx # arg2, disp->ImageBase
1719 mov 0(%rsi),%r8 # arg3, disp->ControlPc
1720 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
1721 mov 40(%rsi),%r10 # disp->ContextRecord
1722 lea 56(%rsi),%r11 # &disp->HandlerData
1723 lea 24(%rsi),%r12 # &disp->EstablisherFrame
1724 mov %r10,32(%rsp) # arg5
1725 mov %r11,40(%rsp) # arg6
1726 mov %r12,48(%rsp) # arg7
1727 mov %rcx,56(%rsp) # arg8, (NULL)
1728 call *__imp_RtlVirtualUnwind(%rip)
1729
1730 mov \$1,%eax # ExceptionContinueSearch
1731 add \$64,%rsp
1732 popfq
1733 pop %r15
1734 pop %r14
1735 pop %r13
1736 pop %r12
1737 pop %rbp
1738 pop %rbx
1739 pop %rdi
1740 pop %rsi
1741 ret
1742.size se_handler,.-se_handler
1743
1744.section .pdata
1745.align 4
1746 .rva .LSEH_begin_gcm_gmult_4bit
1747 .rva .LSEH_end_gcm_gmult_4bit
1748 .rva .LSEH_info_gcm_gmult_4bit
1749
1750 .rva .LSEH_begin_gcm_ghash_4bit
1751 .rva .LSEH_end_gcm_ghash_4bit
1752 .rva .LSEH_info_gcm_ghash_4bit
1753
1754 .rva .LSEH_begin_gcm_init_clmul
1755 .rva .LSEH_end_gcm_init_clmul
1756 .rva .LSEH_info_gcm_init_clmul
1757
1758 .rva .LSEH_begin_gcm_ghash_clmul
1759 .rva .LSEH_end_gcm_ghash_clmul
1760 .rva .LSEH_info_gcm_ghash_clmul
1761___
1762$code.=<<___ if ($avx);
1763 .rva .LSEH_begin_gcm_init_avx
1764 .rva .LSEH_end_gcm_init_avx
1765 .rva .LSEH_info_gcm_init_clmul
1766
1767 .rva .LSEH_begin_gcm_ghash_avx
1768 .rva .LSEH_end_gcm_ghash_avx
1769 .rva .LSEH_info_gcm_ghash_clmul
1770___
1771$code.=<<___;
1772.section .xdata
1773.align 8
1774.LSEH_info_gcm_gmult_4bit:
1775 .byte 9,0,0,0
1776 .rva se_handler
1777 .rva .Lgmult_prologue,.Lgmult_epilogue # HandlerData
1778.LSEH_info_gcm_ghash_4bit:
1779 .byte 9,0,0,0
1780 .rva se_handler
1781 .rva .Lghash_prologue,.Lghash_epilogue # HandlerData
1782.LSEH_info_gcm_init_clmul:
1783 .byte 0x01,0x08,0x03,0x00
1784 .byte 0x08,0x68,0x00,0x00 #movaps 0x00(rsp),xmm6
1785 .byte 0x04,0x22,0x00,0x00 #sub rsp,0x18
1786.LSEH_info_gcm_ghash_clmul:
1787 .byte 0x01,0x33,0x16,0x00
1788 .byte 0x33,0xf8,0x09,0x00 #movaps 0x90(rsp),xmm15
1789 .byte 0x2e,0xe8,0x08,0x00 #movaps 0x80(rsp),xmm14
1790 .byte 0x29,0xd8,0x07,0x00 #movaps 0x70(rsp),xmm13
1791 .byte 0x24,0xc8,0x06,0x00 #movaps 0x60(rsp),xmm12
1792 .byte 0x1f,0xb8,0x05,0x00 #movaps 0x50(rsp),xmm11
1793 .byte 0x1a,0xa8,0x04,0x00 #movaps 0x40(rsp),xmm10
1794 .byte 0x15,0x98,0x03,0x00 #movaps 0x30(rsp),xmm9
1795 .byte 0x10,0x88,0x02,0x00 #movaps 0x20(rsp),xmm8
1796 .byte 0x0c,0x78,0x01,0x00 #movaps 0x10(rsp),xmm7
1797 .byte 0x08,0x68,0x00,0x00 #movaps 0x00(rsp),xmm6
1798 .byte 0x04,0x01,0x15,0x00 #sub rsp,0xa8
1799___
1800}
1801
1802$code =~ s/\`([^\`]*)\`/eval($1)/gem;
1803
1804print $code;
1805
Pete Bentley0c61efe2019-08-13 09:32:23 +01001806close STDOUT or die "error closing STDOUT";