blob: ab063d77067e4cf9ed731c34653cf8eddbfeebb1 [file] [log] [blame]
Robert Sloanab8b8882018-03-26 11:39:51 -07001#! /usr/bin/env perl
2# Copyright 2013-2016 The OpenSSL Project Authors. All Rights Reserved.
3# Copyright (c) 2012, Intel Corporation. All Rights Reserved.
4#
5# Licensed under the OpenSSL license (the "License"). You may not use
6# this file except in compliance with the License. You can obtain a copy
7# in the file LICENSE in the source distribution or at
8# https://www.openssl.org/source/license.html
9#
10# Originally written by Shay Gueron (1, 2), and Vlad Krasnov (1)
11# (1) Intel Corporation, Israel Development Center, Haifa, Israel
12# (2) University of Haifa, Israel
13#
14# References:
15# [1] S. Gueron, V. Krasnov: "Software Implementation of Modular
16# Exponentiation, Using Advanced Vector Instructions Architectures",
17# F. Ozbudak and F. Rodriguez-Henriquez (Eds.): WAIFI 2012, LNCS 7369,
18# pp. 119?135, 2012. Springer-Verlag Berlin Heidelberg 2012
19# [2] S. Gueron: "Efficient Software Implementations of Modular
20# Exponentiation", Journal of Cryptographic Engineering 2:31-43 (2012).
21# [3] S. Gueron, V. Krasnov: "Speeding up Big-numbers Squaring",IEEE
22# Proceedings of 9th International Conference on Information Technology:
23# New Generations (ITNG 2012), pp.821-823 (2012)
24# [4] S. Gueron, V. Krasnov: "[PATCH] Efficient and side channel analysis
25# resistant 1024-bit modular exponentiation, for optimizing RSA2048
26# on AVX2 capable x86_64 platforms",
27# http://rt.openssl.org/Ticket/Display.html?id=2850&user=guest&pass=guest
Adam Langleyd9e397b2015-01-22 14:27:53 -080028#
29# +13% improvement over original submission by <appro@openssl.org>
30#
31# rsa2048 sign/sec OpenSSL 1.0.1 scalar(*) this
32# 2.3GHz Haswell 621 765/+23% 1113/+79%
33# 2.3GHz Broadwell(**) 688 1200(***)/+74% 1120/+63%
34#
35# (*) if system doesn't support AVX2, for reference purposes;
36# (**) scaled to 2.3GHz to simplify comparison;
37# (***) scalar AD*X code is faster than AVX2 and is preferred code
38# path for Broadwell;
39
40$flavour = shift;
41$output = shift;
42if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
43
44$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
45
46$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
47( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
Robert Sloan8ff03552017-06-14 12:40:58 -070048( $xlate="${dir}../../../perlasm/x86_64-xlate.pl" and -f $xlate) or
Adam Langleyd9e397b2015-01-22 14:27:53 -080049die "can't locate x86_64-xlate.pl";
50
Kenny Roote99801b2015-11-06 15:31:15 -080051# In upstream, this is controlled by shelling out to the compiler to check
52# versions, but BoringSSL is intended to be used with pre-generated perlasm
53# output, so this isn't useful anyway.
54#
Robert Sloan8f860b12017-08-28 07:37:06 -070055# TODO(davidben): Set $addx to one once build problems are resolved.
Robert Sloan1c9db532017-03-13 08:03:59 -070056$avx = 2;
Robert Sloan8f860b12017-08-28 07:37:06 -070057$addx = 0;
Adam Langleyd9e397b2015-01-22 14:27:53 -080058
David Benjaminc895d6b2016-08-11 13:26:41 -040059open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
Adam Langleyd9e397b2015-01-22 14:27:53 -080060*STDOUT = *OUT;
61
62if ($avx>1) {{{
63{ # void AMS_WW(
64my $rp="%rdi"; # BN_ULONG *rp,
65my $ap="%rsi"; # const BN_ULONG *ap,
66my $np="%rdx"; # const BN_ULONG *np,
67my $n0="%ecx"; # const BN_ULONG n0,
68my $rep="%r8d"; # int repeat);
69
70# The registers that hold the accumulated redundant result
71# The AMM works on 1024 bit operands, and redundant word size is 29
72# Therefore: ceil(1024/29)/4 = 9
73my $ACC0="%ymm0";
74my $ACC1="%ymm1";
75my $ACC2="%ymm2";
76my $ACC3="%ymm3";
77my $ACC4="%ymm4";
78my $ACC5="%ymm5";
79my $ACC6="%ymm6";
80my $ACC7="%ymm7";
81my $ACC8="%ymm8";
82my $ACC9="%ymm9";
83# Registers that hold the broadcasted words of bp, currently used
84my $B1="%ymm10";
85my $B2="%ymm11";
86# Registers that hold the broadcasted words of Y, currently used
87my $Y1="%ymm12";
88my $Y2="%ymm13";
89# Helper registers
90my $TEMP1="%ymm14";
91my $AND_MASK="%ymm15";
92# alu registers that hold the first words of the ACC
93my $r0="%r9";
94my $r1="%r10";
95my $r2="%r11";
96my $r3="%r12";
97
98my $i="%r14d"; # loop counter
99my $tmp = "%r15";
100
101my $FrameSize=32*18+32*8; # place for A^2 and 2*A
102
103my $aap=$r0;
104my $tp0="%rbx";
105my $tp1=$r3;
106my $tpa=$tmp;
107
108$np="%r13"; # reassigned argument
109
110$code.=<<___;
111.text
112
113.globl rsaz_1024_sqr_avx2
114.type rsaz_1024_sqr_avx2,\@function,5
115.align 64
116rsaz_1024_sqr_avx2: # 702 cycles, 14% faster than rsaz_1024_mul_avx2
Robert Sloana94fe052017-02-21 08:49:28 -0800117.cfi_startproc
Adam Langleyd9e397b2015-01-22 14:27:53 -0800118 lea (%rsp), %rax
Robert Sloana94fe052017-02-21 08:49:28 -0800119.cfi_def_cfa_register %rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800120 push %rbx
Robert Sloana94fe052017-02-21 08:49:28 -0800121.cfi_push %rbx
Adam Langleyd9e397b2015-01-22 14:27:53 -0800122 push %rbp
Robert Sloana94fe052017-02-21 08:49:28 -0800123.cfi_push %rbp
Adam Langleyd9e397b2015-01-22 14:27:53 -0800124 push %r12
Robert Sloana94fe052017-02-21 08:49:28 -0800125.cfi_push %r12
Adam Langleyd9e397b2015-01-22 14:27:53 -0800126 push %r13
Robert Sloana94fe052017-02-21 08:49:28 -0800127.cfi_push %r13
Adam Langleyd9e397b2015-01-22 14:27:53 -0800128 push %r14
Robert Sloana94fe052017-02-21 08:49:28 -0800129.cfi_push %r14
Adam Langleyd9e397b2015-01-22 14:27:53 -0800130 push %r15
Robert Sloana94fe052017-02-21 08:49:28 -0800131.cfi_push %r15
Adam Langleyd9e397b2015-01-22 14:27:53 -0800132 vzeroupper
133___
134$code.=<<___ if ($win64);
135 lea -0xa8(%rsp),%rsp
136 vmovaps %xmm6,-0xd8(%rax)
137 vmovaps %xmm7,-0xc8(%rax)
138 vmovaps %xmm8,-0xb8(%rax)
139 vmovaps %xmm9,-0xa8(%rax)
140 vmovaps %xmm10,-0x98(%rax)
141 vmovaps %xmm11,-0x88(%rax)
142 vmovaps %xmm12,-0x78(%rax)
143 vmovaps %xmm13,-0x68(%rax)
144 vmovaps %xmm14,-0x58(%rax)
145 vmovaps %xmm15,-0x48(%rax)
146.Lsqr_1024_body:
147___
148$code.=<<___;
149 mov %rax,%rbp
Robert Sloana94fe052017-02-21 08:49:28 -0800150.cfi_def_cfa_register %rbp
Adam Langleyd9e397b2015-01-22 14:27:53 -0800151 mov %rdx, $np # reassigned argument
152 sub \$$FrameSize, %rsp
153 mov $np, $tmp
154 sub \$-128, $rp # size optimization
155 sub \$-128, $ap
156 sub \$-128, $np
157
158 and \$4095, $tmp # see if $np crosses page
159 add \$32*10, $tmp
160 shr \$12, $tmp
161 vpxor $ACC9,$ACC9,$ACC9
162 jz .Lsqr_1024_no_n_copy
163
164 # unaligned 256-bit load that crosses page boundary can
165 # cause >2x performance degradation here, so if $np does
166 # cross page boundary, copy it to stack and make sure stack
167 # frame doesn't...
168 sub \$32*10,%rsp
169 vmovdqu 32*0-128($np), $ACC0
170 and \$-2048, %rsp
171 vmovdqu 32*1-128($np), $ACC1
172 vmovdqu 32*2-128($np), $ACC2
173 vmovdqu 32*3-128($np), $ACC3
174 vmovdqu 32*4-128($np), $ACC4
175 vmovdqu 32*5-128($np), $ACC5
176 vmovdqu 32*6-128($np), $ACC6
177 vmovdqu 32*7-128($np), $ACC7
178 vmovdqu 32*8-128($np), $ACC8
179 lea $FrameSize+128(%rsp),$np
180 vmovdqu $ACC0, 32*0-128($np)
181 vmovdqu $ACC1, 32*1-128($np)
182 vmovdqu $ACC2, 32*2-128($np)
183 vmovdqu $ACC3, 32*3-128($np)
184 vmovdqu $ACC4, 32*4-128($np)
185 vmovdqu $ACC5, 32*5-128($np)
186 vmovdqu $ACC6, 32*6-128($np)
187 vmovdqu $ACC7, 32*7-128($np)
188 vmovdqu $ACC8, 32*8-128($np)
189 vmovdqu $ACC9, 32*9-128($np) # $ACC9 is zero
190
191.Lsqr_1024_no_n_copy:
192 and \$-1024, %rsp
193
194 vmovdqu 32*1-128($ap), $ACC1
195 vmovdqu 32*2-128($ap), $ACC2
196 vmovdqu 32*3-128($ap), $ACC3
197 vmovdqu 32*4-128($ap), $ACC4
198 vmovdqu 32*5-128($ap), $ACC5
199 vmovdqu 32*6-128($ap), $ACC6
200 vmovdqu 32*7-128($ap), $ACC7
201 vmovdqu 32*8-128($ap), $ACC8
202
203 lea 192(%rsp), $tp0 # 64+128=192
Robert Sloancd79cde2017-12-11 09:06:12 -0800204 vmovdqu .Land_mask(%rip), $AND_MASK
Adam Langleyd9e397b2015-01-22 14:27:53 -0800205 jmp .LOOP_GRANDE_SQR_1024
206
207.align 32
208.LOOP_GRANDE_SQR_1024:
209 lea 32*18+128(%rsp), $aap # size optimization
210 lea 448(%rsp), $tp1 # 64+128+256=448
211
212 # the squaring is performed as described in Variant B of
213 # "Speeding up Big-Number Squaring", so start by calculating
214 # the A*2=A+A vector
215 vpaddq $ACC1, $ACC1, $ACC1
216 vpbroadcastq 32*0-128($ap), $B1
217 vpaddq $ACC2, $ACC2, $ACC2
218 vmovdqa $ACC1, 32*0-128($aap)
219 vpaddq $ACC3, $ACC3, $ACC3
220 vmovdqa $ACC2, 32*1-128($aap)
221 vpaddq $ACC4, $ACC4, $ACC4
222 vmovdqa $ACC3, 32*2-128($aap)
223 vpaddq $ACC5, $ACC5, $ACC5
224 vmovdqa $ACC4, 32*3-128($aap)
225 vpaddq $ACC6, $ACC6, $ACC6
226 vmovdqa $ACC5, 32*4-128($aap)
227 vpaddq $ACC7, $ACC7, $ACC7
228 vmovdqa $ACC6, 32*5-128($aap)
229 vpaddq $ACC8, $ACC8, $ACC8
230 vmovdqa $ACC7, 32*6-128($aap)
231 vpxor $ACC9, $ACC9, $ACC9
232 vmovdqa $ACC8, 32*7-128($aap)
233
234 vpmuludq 32*0-128($ap), $B1, $ACC0
235 vpbroadcastq 32*1-128($ap), $B2
236 vmovdqu $ACC9, 32*9-192($tp0) # zero upper half
237 vpmuludq $B1, $ACC1, $ACC1
238 vmovdqu $ACC9, 32*10-448($tp1)
239 vpmuludq $B1, $ACC2, $ACC2
240 vmovdqu $ACC9, 32*11-448($tp1)
241 vpmuludq $B1, $ACC3, $ACC3
242 vmovdqu $ACC9, 32*12-448($tp1)
243 vpmuludq $B1, $ACC4, $ACC4
244 vmovdqu $ACC9, 32*13-448($tp1)
245 vpmuludq $B1, $ACC5, $ACC5
246 vmovdqu $ACC9, 32*14-448($tp1)
247 vpmuludq $B1, $ACC6, $ACC6
248 vmovdqu $ACC9, 32*15-448($tp1)
249 vpmuludq $B1, $ACC7, $ACC7
250 vmovdqu $ACC9, 32*16-448($tp1)
251 vpmuludq $B1, $ACC8, $ACC8
252 vpbroadcastq 32*2-128($ap), $B1
253 vmovdqu $ACC9, 32*17-448($tp1)
254
255 mov $ap, $tpa
256 mov \$4, $i
257 jmp .Lsqr_entry_1024
258___
259$TEMP0=$Y1;
260$TEMP2=$Y2;
261$code.=<<___;
262.align 32
263.LOOP_SQR_1024:
264 vpbroadcastq 32*1-128($tpa), $B2
265 vpmuludq 32*0-128($ap), $B1, $ACC0
266 vpaddq 32*0-192($tp0), $ACC0, $ACC0
267 vpmuludq 32*0-128($aap), $B1, $ACC1
268 vpaddq 32*1-192($tp0), $ACC1, $ACC1
269 vpmuludq 32*1-128($aap), $B1, $ACC2
270 vpaddq 32*2-192($tp0), $ACC2, $ACC2
271 vpmuludq 32*2-128($aap), $B1, $ACC3
272 vpaddq 32*3-192($tp0), $ACC3, $ACC3
273 vpmuludq 32*3-128($aap), $B1, $ACC4
274 vpaddq 32*4-192($tp0), $ACC4, $ACC4
275 vpmuludq 32*4-128($aap), $B1, $ACC5
276 vpaddq 32*5-192($tp0), $ACC5, $ACC5
277 vpmuludq 32*5-128($aap), $B1, $ACC6
278 vpaddq 32*6-192($tp0), $ACC6, $ACC6
279 vpmuludq 32*6-128($aap), $B1, $ACC7
280 vpaddq 32*7-192($tp0), $ACC7, $ACC7
281 vpmuludq 32*7-128($aap), $B1, $ACC8
282 vpbroadcastq 32*2-128($tpa), $B1
283 vpaddq 32*8-192($tp0), $ACC8, $ACC8
284.Lsqr_entry_1024:
285 vmovdqu $ACC0, 32*0-192($tp0)
286 vmovdqu $ACC1, 32*1-192($tp0)
287
288 vpmuludq 32*1-128($ap), $B2, $TEMP0
289 vpaddq $TEMP0, $ACC2, $ACC2
290 vpmuludq 32*1-128($aap), $B2, $TEMP1
291 vpaddq $TEMP1, $ACC3, $ACC3
292 vpmuludq 32*2-128($aap), $B2, $TEMP2
293 vpaddq $TEMP2, $ACC4, $ACC4
294 vpmuludq 32*3-128($aap), $B2, $TEMP0
295 vpaddq $TEMP0, $ACC5, $ACC5
296 vpmuludq 32*4-128($aap), $B2, $TEMP1
297 vpaddq $TEMP1, $ACC6, $ACC6
298 vpmuludq 32*5-128($aap), $B2, $TEMP2
299 vpaddq $TEMP2, $ACC7, $ACC7
300 vpmuludq 32*6-128($aap), $B2, $TEMP0
301 vpaddq $TEMP0, $ACC8, $ACC8
302 vpmuludq 32*7-128($aap), $B2, $ACC0
303 vpbroadcastq 32*3-128($tpa), $B2
304 vpaddq 32*9-192($tp0), $ACC0, $ACC0
305
306 vmovdqu $ACC2, 32*2-192($tp0)
307 vmovdqu $ACC3, 32*3-192($tp0)
308
309 vpmuludq 32*2-128($ap), $B1, $TEMP2
310 vpaddq $TEMP2, $ACC4, $ACC4
311 vpmuludq 32*2-128($aap), $B1, $TEMP0
312 vpaddq $TEMP0, $ACC5, $ACC5
313 vpmuludq 32*3-128($aap), $B1, $TEMP1
314 vpaddq $TEMP1, $ACC6, $ACC6
315 vpmuludq 32*4-128($aap), $B1, $TEMP2
316 vpaddq $TEMP2, $ACC7, $ACC7
317 vpmuludq 32*5-128($aap), $B1, $TEMP0
318 vpaddq $TEMP0, $ACC8, $ACC8
319 vpmuludq 32*6-128($aap), $B1, $TEMP1
320 vpaddq $TEMP1, $ACC0, $ACC0
321 vpmuludq 32*7-128($aap), $B1, $ACC1
322 vpbroadcastq 32*4-128($tpa), $B1
323 vpaddq 32*10-448($tp1), $ACC1, $ACC1
324
325 vmovdqu $ACC4, 32*4-192($tp0)
326 vmovdqu $ACC5, 32*5-192($tp0)
327
328 vpmuludq 32*3-128($ap), $B2, $TEMP0
329 vpaddq $TEMP0, $ACC6, $ACC6
330 vpmuludq 32*3-128($aap), $B2, $TEMP1
331 vpaddq $TEMP1, $ACC7, $ACC7
332 vpmuludq 32*4-128($aap), $B2, $TEMP2
333 vpaddq $TEMP2, $ACC8, $ACC8
334 vpmuludq 32*5-128($aap), $B2, $TEMP0
335 vpaddq $TEMP0, $ACC0, $ACC0
336 vpmuludq 32*6-128($aap), $B2, $TEMP1
337 vpaddq $TEMP1, $ACC1, $ACC1
338 vpmuludq 32*7-128($aap), $B2, $ACC2
339 vpbroadcastq 32*5-128($tpa), $B2
Robert Sloana94fe052017-02-21 08:49:28 -0800340 vpaddq 32*11-448($tp1), $ACC2, $ACC2
Adam Langleyd9e397b2015-01-22 14:27:53 -0800341
342 vmovdqu $ACC6, 32*6-192($tp0)
343 vmovdqu $ACC7, 32*7-192($tp0)
344
345 vpmuludq 32*4-128($ap), $B1, $TEMP0
346 vpaddq $TEMP0, $ACC8, $ACC8
347 vpmuludq 32*4-128($aap), $B1, $TEMP1
348 vpaddq $TEMP1, $ACC0, $ACC0
349 vpmuludq 32*5-128($aap), $B1, $TEMP2
350 vpaddq $TEMP2, $ACC1, $ACC1
351 vpmuludq 32*6-128($aap), $B1, $TEMP0
352 vpaddq $TEMP0, $ACC2, $ACC2
353 vpmuludq 32*7-128($aap), $B1, $ACC3
354 vpbroadcastq 32*6-128($tpa), $B1
355 vpaddq 32*12-448($tp1), $ACC3, $ACC3
356
357 vmovdqu $ACC8, 32*8-192($tp0)
358 vmovdqu $ACC0, 32*9-192($tp0)
359 lea 8($tp0), $tp0
360
361 vpmuludq 32*5-128($ap), $B2, $TEMP2
362 vpaddq $TEMP2, $ACC1, $ACC1
363 vpmuludq 32*5-128($aap), $B2, $TEMP0
364 vpaddq $TEMP0, $ACC2, $ACC2
365 vpmuludq 32*6-128($aap), $B2, $TEMP1
366 vpaddq $TEMP1, $ACC3, $ACC3
367 vpmuludq 32*7-128($aap), $B2, $ACC4
368 vpbroadcastq 32*7-128($tpa), $B2
369 vpaddq 32*13-448($tp1), $ACC4, $ACC4
370
371 vmovdqu $ACC1, 32*10-448($tp1)
372 vmovdqu $ACC2, 32*11-448($tp1)
373
374 vpmuludq 32*6-128($ap), $B1, $TEMP0
375 vpaddq $TEMP0, $ACC3, $ACC3
376 vpmuludq 32*6-128($aap), $B1, $TEMP1
377 vpbroadcastq 32*8-128($tpa), $ACC0 # borrow $ACC0 for $B1
378 vpaddq $TEMP1, $ACC4, $ACC4
379 vpmuludq 32*7-128($aap), $B1, $ACC5
380 vpbroadcastq 32*0+8-128($tpa), $B1 # for next iteration
381 vpaddq 32*14-448($tp1), $ACC5, $ACC5
382
383 vmovdqu $ACC3, 32*12-448($tp1)
384 vmovdqu $ACC4, 32*13-448($tp1)
385 lea 8($tpa), $tpa
386
387 vpmuludq 32*7-128($ap), $B2, $TEMP0
388 vpaddq $TEMP0, $ACC5, $ACC5
389 vpmuludq 32*7-128($aap), $B2, $ACC6
390 vpaddq 32*15-448($tp1), $ACC6, $ACC6
391
392 vpmuludq 32*8-128($ap), $ACC0, $ACC7
393 vmovdqu $ACC5, 32*14-448($tp1)
394 vpaddq 32*16-448($tp1), $ACC7, $ACC7
395 vmovdqu $ACC6, 32*15-448($tp1)
396 vmovdqu $ACC7, 32*16-448($tp1)
397 lea 8($tp1), $tp1
398
Robert Sloana94fe052017-02-21 08:49:28 -0800399 dec $i
Adam Langleyd9e397b2015-01-22 14:27:53 -0800400 jnz .LOOP_SQR_1024
401___
402$ZERO = $ACC9;
403$TEMP0 = $B1;
404$TEMP2 = $B2;
405$TEMP3 = $Y1;
406$TEMP4 = $Y2;
407$code.=<<___;
David Benjamin4969cc92016-04-22 15:02:23 -0400408 # we need to fix indices 32-39 to avoid overflow
Adam Langleyd9e397b2015-01-22 14:27:53 -0800409 vmovdqu 32*8(%rsp), $ACC8 # 32*8-192($tp0),
410 vmovdqu 32*9(%rsp), $ACC1 # 32*9-192($tp0)
411 vmovdqu 32*10(%rsp), $ACC2 # 32*10-192($tp0)
412 lea 192(%rsp), $tp0 # 64+128=192
413
414 vpsrlq \$29, $ACC8, $TEMP1
415 vpand $AND_MASK, $ACC8, $ACC8
416 vpsrlq \$29, $ACC1, $TEMP2
417 vpand $AND_MASK, $ACC1, $ACC1
418
419 vpermq \$0x93, $TEMP1, $TEMP1
420 vpxor $ZERO, $ZERO, $ZERO
421 vpermq \$0x93, $TEMP2, $TEMP2
422
423 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
424 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
425 vpaddq $TEMP0, $ACC8, $ACC8
426 vpblendd \$3, $TEMP2, $ZERO, $TEMP2
427 vpaddq $TEMP1, $ACC1, $ACC1
428 vpaddq $TEMP2, $ACC2, $ACC2
429 vmovdqu $ACC1, 32*9-192($tp0)
430 vmovdqu $ACC2, 32*10-192($tp0)
431
432 mov (%rsp), %rax
433 mov 8(%rsp), $r1
434 mov 16(%rsp), $r2
435 mov 24(%rsp), $r3
436 vmovdqu 32*1(%rsp), $ACC1
437 vmovdqu 32*2-192($tp0), $ACC2
438 vmovdqu 32*3-192($tp0), $ACC3
439 vmovdqu 32*4-192($tp0), $ACC4
440 vmovdqu 32*5-192($tp0), $ACC5
441 vmovdqu 32*6-192($tp0), $ACC6
442 vmovdqu 32*7-192($tp0), $ACC7
443
444 mov %rax, $r0
445 imull $n0, %eax
446 and \$0x1fffffff, %eax
447 vmovd %eax, $Y1
448
449 mov %rax, %rdx
450 imulq -128($np), %rax
451 vpbroadcastq $Y1, $Y1
452 add %rax, $r0
453 mov %rdx, %rax
454 imulq 8-128($np), %rax
455 shr \$29, $r0
456 add %rax, $r1
457 mov %rdx, %rax
458 imulq 16-128($np), %rax
459 add $r0, $r1
460 add %rax, $r2
461 imulq 24-128($np), %rdx
462 add %rdx, $r3
463
464 mov $r1, %rax
465 imull $n0, %eax
466 and \$0x1fffffff, %eax
467
468 mov \$9, $i
469 jmp .LOOP_REDUCE_1024
470
471.align 32
472.LOOP_REDUCE_1024:
473 vmovd %eax, $Y2
474 vpbroadcastq $Y2, $Y2
475
476 vpmuludq 32*1-128($np), $Y1, $TEMP0
477 mov %rax, %rdx
478 imulq -128($np), %rax
479 vpaddq $TEMP0, $ACC1, $ACC1
480 add %rax, $r1
481 vpmuludq 32*2-128($np), $Y1, $TEMP1
482 mov %rdx, %rax
483 imulq 8-128($np), %rax
484 vpaddq $TEMP1, $ACC2, $ACC2
485 vpmuludq 32*3-128($np), $Y1, $TEMP2
486 .byte 0x67
487 add %rax, $r2
488 .byte 0x67
489 mov %rdx, %rax
490 imulq 16-128($np), %rax
491 shr \$29, $r1
492 vpaddq $TEMP2, $ACC3, $ACC3
493 vpmuludq 32*4-128($np), $Y1, $TEMP0
494 add %rax, $r3
495 add $r1, $r2
496 vpaddq $TEMP0, $ACC4, $ACC4
497 vpmuludq 32*5-128($np), $Y1, $TEMP1
498 mov $r2, %rax
499 imull $n0, %eax
500 vpaddq $TEMP1, $ACC5, $ACC5
501 vpmuludq 32*6-128($np), $Y1, $TEMP2
502 and \$0x1fffffff, %eax
503 vpaddq $TEMP2, $ACC6, $ACC6
504 vpmuludq 32*7-128($np), $Y1, $TEMP0
505 vpaddq $TEMP0, $ACC7, $ACC7
506 vpmuludq 32*8-128($np), $Y1, $TEMP1
507 vmovd %eax, $Y1
508 #vmovdqu 32*1-8-128($np), $TEMP2 # moved below
509 vpaddq $TEMP1, $ACC8, $ACC8
510 #vmovdqu 32*2-8-128($np), $TEMP0 # moved below
511 vpbroadcastq $Y1, $Y1
512
513 vpmuludq 32*1-8-128($np), $Y2, $TEMP2 # see above
514 vmovdqu 32*3-8-128($np), $TEMP1
515 mov %rax, %rdx
516 imulq -128($np), %rax
517 vpaddq $TEMP2, $ACC1, $ACC1
518 vpmuludq 32*2-8-128($np), $Y2, $TEMP0 # see above
519 vmovdqu 32*4-8-128($np), $TEMP2
520 add %rax, $r2
521 mov %rdx, %rax
522 imulq 8-128($np), %rax
523 vpaddq $TEMP0, $ACC2, $ACC2
524 add $r3, %rax
525 shr \$29, $r2
526 vpmuludq $Y2, $TEMP1, $TEMP1
527 vmovdqu 32*5-8-128($np), $TEMP0
528 add $r2, %rax
529 vpaddq $TEMP1, $ACC3, $ACC3
530 vpmuludq $Y2, $TEMP2, $TEMP2
531 vmovdqu 32*6-8-128($np), $TEMP1
532 .byte 0x67
533 mov %rax, $r3
534 imull $n0, %eax
535 vpaddq $TEMP2, $ACC4, $ACC4
536 vpmuludq $Y2, $TEMP0, $TEMP0
537 .byte 0xc4,0x41,0x7e,0x6f,0x9d,0x58,0x00,0x00,0x00 # vmovdqu 32*7-8-128($np), $TEMP2
538 and \$0x1fffffff, %eax
539 vpaddq $TEMP0, $ACC5, $ACC5
540 vpmuludq $Y2, $TEMP1, $TEMP1
541 vmovdqu 32*8-8-128($np), $TEMP0
542 vpaddq $TEMP1, $ACC6, $ACC6
543 vpmuludq $Y2, $TEMP2, $TEMP2
544 vmovdqu 32*9-8-128($np), $ACC9
545 vmovd %eax, $ACC0 # borrow ACC0 for Y2
546 imulq -128($np), %rax
547 vpaddq $TEMP2, $ACC7, $ACC7
548 vpmuludq $Y2, $TEMP0, $TEMP0
549 vmovdqu 32*1-16-128($np), $TEMP1
550 vpbroadcastq $ACC0, $ACC0
551 vpaddq $TEMP0, $ACC8, $ACC8
552 vpmuludq $Y2, $ACC9, $ACC9
553 vmovdqu 32*2-16-128($np), $TEMP2
554 add %rax, $r3
555
556___
557($ACC0,$Y2)=($Y2,$ACC0);
558$code.=<<___;
559 vmovdqu 32*1-24-128($np), $ACC0
560 vpmuludq $Y1, $TEMP1, $TEMP1
561 vmovdqu 32*3-16-128($np), $TEMP0
562 vpaddq $TEMP1, $ACC1, $ACC1
563 vpmuludq $Y2, $ACC0, $ACC0
564 vpmuludq $Y1, $TEMP2, $TEMP2
565 .byte 0xc4,0x41,0x7e,0x6f,0xb5,0xf0,0xff,0xff,0xff # vmovdqu 32*4-16-128($np), $TEMP1
566 vpaddq $ACC1, $ACC0, $ACC0
567 vpaddq $TEMP2, $ACC2, $ACC2
568 vpmuludq $Y1, $TEMP0, $TEMP0
569 vmovdqu 32*5-16-128($np), $TEMP2
570 .byte 0x67
571 vmovq $ACC0, %rax
572 vmovdqu $ACC0, (%rsp) # transfer $r0-$r3
573 vpaddq $TEMP0, $ACC3, $ACC3
574 vpmuludq $Y1, $TEMP1, $TEMP1
575 vmovdqu 32*6-16-128($np), $TEMP0
576 vpaddq $TEMP1, $ACC4, $ACC4
577 vpmuludq $Y1, $TEMP2, $TEMP2
578 vmovdqu 32*7-16-128($np), $TEMP1
579 vpaddq $TEMP2, $ACC5, $ACC5
580 vpmuludq $Y1, $TEMP0, $TEMP0
581 vmovdqu 32*8-16-128($np), $TEMP2
582 vpaddq $TEMP0, $ACC6, $ACC6
583 vpmuludq $Y1, $TEMP1, $TEMP1
584 shr \$29, $r3
585 vmovdqu 32*9-16-128($np), $TEMP0
586 add $r3, %rax
587 vpaddq $TEMP1, $ACC7, $ACC7
588 vpmuludq $Y1, $TEMP2, $TEMP2
589 #vmovdqu 32*2-24-128($np), $TEMP1 # moved below
590 mov %rax, $r0
591 imull $n0, %eax
592 vpaddq $TEMP2, $ACC8, $ACC8
593 vpmuludq $Y1, $TEMP0, $TEMP0
594 and \$0x1fffffff, %eax
595 vmovd %eax, $Y1
596 vmovdqu 32*3-24-128($np), $TEMP2
597 .byte 0x67
598 vpaddq $TEMP0, $ACC9, $ACC9
599 vpbroadcastq $Y1, $Y1
600
601 vpmuludq 32*2-24-128($np), $Y2, $TEMP1 # see above
602 vmovdqu 32*4-24-128($np), $TEMP0
603 mov %rax, %rdx
604 imulq -128($np), %rax
605 mov 8(%rsp), $r1
606 vpaddq $TEMP1, $ACC2, $ACC1
607 vpmuludq $Y2, $TEMP2, $TEMP2
608 vmovdqu 32*5-24-128($np), $TEMP1
609 add %rax, $r0
610 mov %rdx, %rax
611 imulq 8-128($np), %rax
612 .byte 0x67
613 shr \$29, $r0
614 mov 16(%rsp), $r2
615 vpaddq $TEMP2, $ACC3, $ACC2
616 vpmuludq $Y2, $TEMP0, $TEMP0
617 vmovdqu 32*6-24-128($np), $TEMP2
618 add %rax, $r1
619 mov %rdx, %rax
620 imulq 16-128($np), %rax
621 vpaddq $TEMP0, $ACC4, $ACC3
622 vpmuludq $Y2, $TEMP1, $TEMP1
623 vmovdqu 32*7-24-128($np), $TEMP0
624 imulq 24-128($np), %rdx # future $r3
625 add %rax, $r2
626 lea ($r0,$r1), %rax
627 vpaddq $TEMP1, $ACC5, $ACC4
628 vpmuludq $Y2, $TEMP2, $TEMP2
629 vmovdqu 32*8-24-128($np), $TEMP1
630 mov %rax, $r1
631 imull $n0, %eax
632 vpmuludq $Y2, $TEMP0, $TEMP0
633 vpaddq $TEMP2, $ACC6, $ACC5
634 vmovdqu 32*9-24-128($np), $TEMP2
635 and \$0x1fffffff, %eax
636 vpaddq $TEMP0, $ACC7, $ACC6
637 vpmuludq $Y2, $TEMP1, $TEMP1
638 add 24(%rsp), %rdx
639 vpaddq $TEMP1, $ACC8, $ACC7
640 vpmuludq $Y2, $TEMP2, $TEMP2
641 vpaddq $TEMP2, $ACC9, $ACC8
642 vmovq $r3, $ACC9
643 mov %rdx, $r3
644
645 dec $i
646 jnz .LOOP_REDUCE_1024
647___
648($ACC0,$Y2)=($Y2,$ACC0);
649$code.=<<___;
650 lea 448(%rsp), $tp1 # size optimization
651 vpaddq $ACC9, $Y2, $ACC0
652 vpxor $ZERO, $ZERO, $ZERO
653
654 vpaddq 32*9-192($tp0), $ACC0, $ACC0
655 vpaddq 32*10-448($tp1), $ACC1, $ACC1
656 vpaddq 32*11-448($tp1), $ACC2, $ACC2
657 vpaddq 32*12-448($tp1), $ACC3, $ACC3
658 vpaddq 32*13-448($tp1), $ACC4, $ACC4
659 vpaddq 32*14-448($tp1), $ACC5, $ACC5
660 vpaddq 32*15-448($tp1), $ACC6, $ACC6
661 vpaddq 32*16-448($tp1), $ACC7, $ACC7
662 vpaddq 32*17-448($tp1), $ACC8, $ACC8
663
664 vpsrlq \$29, $ACC0, $TEMP1
665 vpand $AND_MASK, $ACC0, $ACC0
666 vpsrlq \$29, $ACC1, $TEMP2
667 vpand $AND_MASK, $ACC1, $ACC1
668 vpsrlq \$29, $ACC2, $TEMP3
669 vpermq \$0x93, $TEMP1, $TEMP1
670 vpand $AND_MASK, $ACC2, $ACC2
671 vpsrlq \$29, $ACC3, $TEMP4
672 vpermq \$0x93, $TEMP2, $TEMP2
673 vpand $AND_MASK, $ACC3, $ACC3
674 vpermq \$0x93, $TEMP3, $TEMP3
675
676 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
677 vpermq \$0x93, $TEMP4, $TEMP4
678 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
679 vpaddq $TEMP0, $ACC0, $ACC0
680 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
681 vpaddq $TEMP1, $ACC1, $ACC1
682 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
683 vpaddq $TEMP2, $ACC2, $ACC2
684 vpblendd \$3, $TEMP4, $ZERO, $TEMP4
685 vpaddq $TEMP3, $ACC3, $ACC3
686 vpaddq $TEMP4, $ACC4, $ACC4
687
688 vpsrlq \$29, $ACC0, $TEMP1
689 vpand $AND_MASK, $ACC0, $ACC0
690 vpsrlq \$29, $ACC1, $TEMP2
691 vpand $AND_MASK, $ACC1, $ACC1
692 vpsrlq \$29, $ACC2, $TEMP3
693 vpermq \$0x93, $TEMP1, $TEMP1
694 vpand $AND_MASK, $ACC2, $ACC2
695 vpsrlq \$29, $ACC3, $TEMP4
696 vpermq \$0x93, $TEMP2, $TEMP2
697 vpand $AND_MASK, $ACC3, $ACC3
698 vpermq \$0x93, $TEMP3, $TEMP3
699
700 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
701 vpermq \$0x93, $TEMP4, $TEMP4
702 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
703 vpaddq $TEMP0, $ACC0, $ACC0
704 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
705 vpaddq $TEMP1, $ACC1, $ACC1
706 vmovdqu $ACC0, 32*0-128($rp)
707 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
708 vpaddq $TEMP2, $ACC2, $ACC2
709 vmovdqu $ACC1, 32*1-128($rp)
710 vpblendd \$3, $TEMP4, $ZERO, $TEMP4
711 vpaddq $TEMP3, $ACC3, $ACC3
712 vmovdqu $ACC2, 32*2-128($rp)
713 vpaddq $TEMP4, $ACC4, $ACC4
714 vmovdqu $ACC3, 32*3-128($rp)
715___
716$TEMP5=$ACC0;
717$code.=<<___;
718 vpsrlq \$29, $ACC4, $TEMP1
719 vpand $AND_MASK, $ACC4, $ACC4
720 vpsrlq \$29, $ACC5, $TEMP2
721 vpand $AND_MASK, $ACC5, $ACC5
722 vpsrlq \$29, $ACC6, $TEMP3
723 vpermq \$0x93, $TEMP1, $TEMP1
724 vpand $AND_MASK, $ACC6, $ACC6
725 vpsrlq \$29, $ACC7, $TEMP4
726 vpermq \$0x93, $TEMP2, $TEMP2
727 vpand $AND_MASK, $ACC7, $ACC7
728 vpsrlq \$29, $ACC8, $TEMP5
729 vpermq \$0x93, $TEMP3, $TEMP3
730 vpand $AND_MASK, $ACC8, $ACC8
731 vpermq \$0x93, $TEMP4, $TEMP4
732
733 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
734 vpermq \$0x93, $TEMP5, $TEMP5
735 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
736 vpaddq $TEMP0, $ACC4, $ACC4
737 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
738 vpaddq $TEMP1, $ACC5, $ACC5
739 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
740 vpaddq $TEMP2, $ACC6, $ACC6
741 vpblendd \$3, $TEMP4, $TEMP5, $TEMP4
742 vpaddq $TEMP3, $ACC7, $ACC7
743 vpaddq $TEMP4, $ACC8, $ACC8
Robert Sloana94fe052017-02-21 08:49:28 -0800744
Adam Langleyd9e397b2015-01-22 14:27:53 -0800745 vpsrlq \$29, $ACC4, $TEMP1
746 vpand $AND_MASK, $ACC4, $ACC4
747 vpsrlq \$29, $ACC5, $TEMP2
748 vpand $AND_MASK, $ACC5, $ACC5
749 vpsrlq \$29, $ACC6, $TEMP3
750 vpermq \$0x93, $TEMP1, $TEMP1
751 vpand $AND_MASK, $ACC6, $ACC6
752 vpsrlq \$29, $ACC7, $TEMP4
753 vpermq \$0x93, $TEMP2, $TEMP2
754 vpand $AND_MASK, $ACC7, $ACC7
755 vpsrlq \$29, $ACC8, $TEMP5
756 vpermq \$0x93, $TEMP3, $TEMP3
757 vpand $AND_MASK, $ACC8, $ACC8
758 vpermq \$0x93, $TEMP4, $TEMP4
759
760 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
761 vpermq \$0x93, $TEMP5, $TEMP5
762 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
763 vpaddq $TEMP0, $ACC4, $ACC4
764 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
765 vpaddq $TEMP1, $ACC5, $ACC5
766 vmovdqu $ACC4, 32*4-128($rp)
767 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
768 vpaddq $TEMP2, $ACC6, $ACC6
769 vmovdqu $ACC5, 32*5-128($rp)
770 vpblendd \$3, $TEMP4, $TEMP5, $TEMP4
771 vpaddq $TEMP3, $ACC7, $ACC7
772 vmovdqu $ACC6, 32*6-128($rp)
773 vpaddq $TEMP4, $ACC8, $ACC8
774 vmovdqu $ACC7, 32*7-128($rp)
775 vmovdqu $ACC8, 32*8-128($rp)
776
777 mov $rp, $ap
778 dec $rep
779 jne .LOOP_GRANDE_SQR_1024
780
781 vzeroall
782 mov %rbp, %rax
Robert Sloana94fe052017-02-21 08:49:28 -0800783.cfi_def_cfa_register %rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800784___
785$code.=<<___ if ($win64);
Robert Sloan5d625782017-02-13 09:55:39 -0800786.Lsqr_1024_in_tail:
Adam Langleyd9e397b2015-01-22 14:27:53 -0800787 movaps -0xd8(%rax),%xmm6
788 movaps -0xc8(%rax),%xmm7
789 movaps -0xb8(%rax),%xmm8
790 movaps -0xa8(%rax),%xmm9
791 movaps -0x98(%rax),%xmm10
792 movaps -0x88(%rax),%xmm11
793 movaps -0x78(%rax),%xmm12
794 movaps -0x68(%rax),%xmm13
795 movaps -0x58(%rax),%xmm14
796 movaps -0x48(%rax),%xmm15
797___
798$code.=<<___;
799 mov -48(%rax),%r15
Robert Sloana94fe052017-02-21 08:49:28 -0800800.cfi_restore %r15
Adam Langleyd9e397b2015-01-22 14:27:53 -0800801 mov -40(%rax),%r14
Robert Sloana94fe052017-02-21 08:49:28 -0800802.cfi_restore %r14
Adam Langleyd9e397b2015-01-22 14:27:53 -0800803 mov -32(%rax),%r13
Robert Sloana94fe052017-02-21 08:49:28 -0800804.cfi_restore %r13
Adam Langleyd9e397b2015-01-22 14:27:53 -0800805 mov -24(%rax),%r12
Robert Sloana94fe052017-02-21 08:49:28 -0800806.cfi_restore %r12
Adam Langleyd9e397b2015-01-22 14:27:53 -0800807 mov -16(%rax),%rbp
Robert Sloana94fe052017-02-21 08:49:28 -0800808.cfi_restore %rbp
Adam Langleyd9e397b2015-01-22 14:27:53 -0800809 mov -8(%rax),%rbx
Robert Sloana94fe052017-02-21 08:49:28 -0800810.cfi_restore %rbx
Adam Langleyd9e397b2015-01-22 14:27:53 -0800811 lea (%rax),%rsp # restore %rsp
Robert Sloana94fe052017-02-21 08:49:28 -0800812.cfi_def_cfa_register %rsp
Adam Langleyd9e397b2015-01-22 14:27:53 -0800813.Lsqr_1024_epilogue:
814 ret
Robert Sloana94fe052017-02-21 08:49:28 -0800815.cfi_endproc
Adam Langleyd9e397b2015-01-22 14:27:53 -0800816.size rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2
817___
818}
819
820{ # void AMM_WW(
821my $rp="%rdi"; # BN_ULONG *rp,
822my $ap="%rsi"; # const BN_ULONG *ap,
823my $bp="%rdx"; # const BN_ULONG *bp,
824my $np="%rcx"; # const BN_ULONG *np,
825my $n0="%r8d"; # unsigned int n0);
826
827# The registers that hold the accumulated redundant result
828# The AMM works on 1024 bit operands, and redundant word size is 29
829# Therefore: ceil(1024/29)/4 = 9
830my $ACC0="%ymm0";
831my $ACC1="%ymm1";
832my $ACC2="%ymm2";
833my $ACC3="%ymm3";
834my $ACC4="%ymm4";
835my $ACC5="%ymm5";
836my $ACC6="%ymm6";
837my $ACC7="%ymm7";
838my $ACC8="%ymm8";
839my $ACC9="%ymm9";
840
841# Registers that hold the broadcasted words of multiplier, currently used
842my $Bi="%ymm10";
843my $Yi="%ymm11";
844
845# Helper registers
846my $TEMP0=$ACC0;
847my $TEMP1="%ymm12";
848my $TEMP2="%ymm13";
849my $ZERO="%ymm14";
850my $AND_MASK="%ymm15";
851
852# alu registers that hold the first words of the ACC
853my $r0="%r9";
854my $r1="%r10";
855my $r2="%r11";
856my $r3="%r12";
857
858my $i="%r14d";
859my $tmp="%r15";
860
861$bp="%r13"; # reassigned argument
862
863$code.=<<___;
864.globl rsaz_1024_mul_avx2
865.type rsaz_1024_mul_avx2,\@function,5
866.align 64
867rsaz_1024_mul_avx2:
Robert Sloana94fe052017-02-21 08:49:28 -0800868.cfi_startproc
Adam Langleyd9e397b2015-01-22 14:27:53 -0800869 lea (%rsp), %rax
Robert Sloana94fe052017-02-21 08:49:28 -0800870.cfi_def_cfa_register %rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800871 push %rbx
Robert Sloana94fe052017-02-21 08:49:28 -0800872.cfi_push %rbx
Adam Langleyd9e397b2015-01-22 14:27:53 -0800873 push %rbp
Robert Sloana94fe052017-02-21 08:49:28 -0800874.cfi_push %rbp
Adam Langleyd9e397b2015-01-22 14:27:53 -0800875 push %r12
Robert Sloana94fe052017-02-21 08:49:28 -0800876.cfi_push %r12
Adam Langleyd9e397b2015-01-22 14:27:53 -0800877 push %r13
Robert Sloana94fe052017-02-21 08:49:28 -0800878.cfi_push %r13
Adam Langleyd9e397b2015-01-22 14:27:53 -0800879 push %r14
Robert Sloana94fe052017-02-21 08:49:28 -0800880.cfi_push %r14
Adam Langleyd9e397b2015-01-22 14:27:53 -0800881 push %r15
Robert Sloana94fe052017-02-21 08:49:28 -0800882.cfi_push %r15
Adam Langleyd9e397b2015-01-22 14:27:53 -0800883___
884$code.=<<___ if ($win64);
885 vzeroupper
886 lea -0xa8(%rsp),%rsp
887 vmovaps %xmm6,-0xd8(%rax)
888 vmovaps %xmm7,-0xc8(%rax)
889 vmovaps %xmm8,-0xb8(%rax)
890 vmovaps %xmm9,-0xa8(%rax)
891 vmovaps %xmm10,-0x98(%rax)
892 vmovaps %xmm11,-0x88(%rax)
893 vmovaps %xmm12,-0x78(%rax)
894 vmovaps %xmm13,-0x68(%rax)
895 vmovaps %xmm14,-0x58(%rax)
896 vmovaps %xmm15,-0x48(%rax)
897.Lmul_1024_body:
898___
899$code.=<<___;
900 mov %rax,%rbp
Robert Sloana94fe052017-02-21 08:49:28 -0800901.cfi_def_cfa_register %rbp
Adam Langleyd9e397b2015-01-22 14:27:53 -0800902 vzeroall
903 mov %rdx, $bp # reassigned argument
904 sub \$64,%rsp
905
906 # unaligned 256-bit load that crosses page boundary can
907 # cause severe performance degradation here, so if $ap does
908 # cross page boundary, swap it with $bp [meaning that caller
909 # is advised to lay down $ap and $bp next to each other, so
910 # that only one can cross page boundary].
911 .byte 0x67,0x67
912 mov $ap, $tmp
913 and \$4095, $tmp
914 add \$32*10, $tmp
915 shr \$12, $tmp
916 mov $ap, $tmp
917 cmovnz $bp, $ap
918 cmovnz $tmp, $bp
919
920 mov $np, $tmp
921 sub \$-128,$ap # size optimization
922 sub \$-128,$np
923 sub \$-128,$rp
924
925 and \$4095, $tmp # see if $np crosses page
926 add \$32*10, $tmp
927 .byte 0x67,0x67
928 shr \$12, $tmp
929 jz .Lmul_1024_no_n_copy
930
931 # unaligned 256-bit load that crosses page boundary can
932 # cause severe performance degradation here, so if $np does
933 # cross page boundary, copy it to stack and make sure stack
934 # frame doesn't...
935 sub \$32*10,%rsp
936 vmovdqu 32*0-128($np), $ACC0
937 and \$-512, %rsp
938 vmovdqu 32*1-128($np), $ACC1
939 vmovdqu 32*2-128($np), $ACC2
940 vmovdqu 32*3-128($np), $ACC3
941 vmovdqu 32*4-128($np), $ACC4
942 vmovdqu 32*5-128($np), $ACC5
943 vmovdqu 32*6-128($np), $ACC6
944 vmovdqu 32*7-128($np), $ACC7
945 vmovdqu 32*8-128($np), $ACC8
946 lea 64+128(%rsp),$np
947 vmovdqu $ACC0, 32*0-128($np)
948 vpxor $ACC0, $ACC0, $ACC0
949 vmovdqu $ACC1, 32*1-128($np)
950 vpxor $ACC1, $ACC1, $ACC1
951 vmovdqu $ACC2, 32*2-128($np)
952 vpxor $ACC2, $ACC2, $ACC2
953 vmovdqu $ACC3, 32*3-128($np)
954 vpxor $ACC3, $ACC3, $ACC3
955 vmovdqu $ACC4, 32*4-128($np)
956 vpxor $ACC4, $ACC4, $ACC4
957 vmovdqu $ACC5, 32*5-128($np)
958 vpxor $ACC5, $ACC5, $ACC5
959 vmovdqu $ACC6, 32*6-128($np)
960 vpxor $ACC6, $ACC6, $ACC6
961 vmovdqu $ACC7, 32*7-128($np)
962 vpxor $ACC7, $ACC7, $ACC7
963 vmovdqu $ACC8, 32*8-128($np)
964 vmovdqa $ACC0, $ACC8
965 vmovdqu $ACC9, 32*9-128($np) # $ACC9 is zero after vzeroall
966.Lmul_1024_no_n_copy:
967 and \$-64,%rsp
968
969 mov ($bp), %rbx
970 vpbroadcastq ($bp), $Bi
971 vmovdqu $ACC0, (%rsp) # clear top of stack
972 xor $r0, $r0
973 .byte 0x67
974 xor $r1, $r1
975 xor $r2, $r2
976 xor $r3, $r3
977
978 vmovdqu .Land_mask(%rip), $AND_MASK
979 mov \$9, $i
980 vmovdqu $ACC9, 32*9-128($rp) # $ACC9 is zero after vzeroall
981 jmp .Loop_mul_1024
982
983.align 32
984.Loop_mul_1024:
985 vpsrlq \$29, $ACC3, $ACC9 # correct $ACC3(*)
986 mov %rbx, %rax
987 imulq -128($ap), %rax
988 add $r0, %rax
989 mov %rbx, $r1
990 imulq 8-128($ap), $r1
991 add 8(%rsp), $r1
992
993 mov %rax, $r0
994 imull $n0, %eax
995 and \$0x1fffffff, %eax
996
997 mov %rbx, $r2
998 imulq 16-128($ap), $r2
999 add 16(%rsp), $r2
1000
1001 mov %rbx, $r3
1002 imulq 24-128($ap), $r3
1003 add 24(%rsp), $r3
1004 vpmuludq 32*1-128($ap),$Bi,$TEMP0
1005 vmovd %eax, $Yi
1006 vpaddq $TEMP0,$ACC1,$ACC1
1007 vpmuludq 32*2-128($ap),$Bi,$TEMP1
1008 vpbroadcastq $Yi, $Yi
1009 vpaddq $TEMP1,$ACC2,$ACC2
1010 vpmuludq 32*3-128($ap),$Bi,$TEMP2
1011 vpand $AND_MASK, $ACC3, $ACC3 # correct $ACC3
1012 vpaddq $TEMP2,$ACC3,$ACC3
1013 vpmuludq 32*4-128($ap),$Bi,$TEMP0
1014 vpaddq $TEMP0,$ACC4,$ACC4
1015 vpmuludq 32*5-128($ap),$Bi,$TEMP1
1016 vpaddq $TEMP1,$ACC5,$ACC5
1017 vpmuludq 32*6-128($ap),$Bi,$TEMP2
1018 vpaddq $TEMP2,$ACC6,$ACC6
1019 vpmuludq 32*7-128($ap),$Bi,$TEMP0
1020 vpermq \$0x93, $ACC9, $ACC9 # correct $ACC3
1021 vpaddq $TEMP0,$ACC7,$ACC7
1022 vpmuludq 32*8-128($ap),$Bi,$TEMP1
1023 vpbroadcastq 8($bp), $Bi
1024 vpaddq $TEMP1,$ACC8,$ACC8
1025
1026 mov %rax,%rdx
1027 imulq -128($np),%rax
1028 add %rax,$r0
1029 mov %rdx,%rax
1030 imulq 8-128($np),%rax
1031 add %rax,$r1
1032 mov %rdx,%rax
1033 imulq 16-128($np),%rax
1034 add %rax,$r2
1035 shr \$29, $r0
1036 imulq 24-128($np),%rdx
1037 add %rdx,$r3
1038 add $r0, $r1
1039
1040 vpmuludq 32*1-128($np),$Yi,$TEMP2
1041 vmovq $Bi, %rbx
1042 vpaddq $TEMP2,$ACC1,$ACC1
1043 vpmuludq 32*2-128($np),$Yi,$TEMP0
1044 vpaddq $TEMP0,$ACC2,$ACC2
1045 vpmuludq 32*3-128($np),$Yi,$TEMP1
1046 vpaddq $TEMP1,$ACC3,$ACC3
1047 vpmuludq 32*4-128($np),$Yi,$TEMP2
1048 vpaddq $TEMP2,$ACC4,$ACC4
1049 vpmuludq 32*5-128($np),$Yi,$TEMP0
1050 vpaddq $TEMP0,$ACC5,$ACC5
1051 vpmuludq 32*6-128($np),$Yi,$TEMP1
1052 vpaddq $TEMP1,$ACC6,$ACC6
1053 vpmuludq 32*7-128($np),$Yi,$TEMP2
Robert Sloancd79cde2017-12-11 09:06:12 -08001054 vpblendd \$3, $ZERO, $ACC9, $TEMP1 # correct $ACC3
Adam Langleyd9e397b2015-01-22 14:27:53 -08001055 vpaddq $TEMP2,$ACC7,$ACC7
1056 vpmuludq 32*8-128($np),$Yi,$TEMP0
Robert Sloancd79cde2017-12-11 09:06:12 -08001057 vpaddq $TEMP1, $ACC3, $ACC3 # correct $ACC3
Adam Langleyd9e397b2015-01-22 14:27:53 -08001058 vpaddq $TEMP0,$ACC8,$ACC8
1059
1060 mov %rbx, %rax
1061 imulq -128($ap),%rax
1062 add %rax,$r1
1063 vmovdqu -8+32*1-128($ap),$TEMP1
1064 mov %rbx, %rax
1065 imulq 8-128($ap),%rax
1066 add %rax,$r2
1067 vmovdqu -8+32*2-128($ap),$TEMP2
1068
1069 mov $r1, %rax
Robert Sloancd79cde2017-12-11 09:06:12 -08001070 vpblendd \$0xfc, $ZERO, $ACC9, $ACC9 # correct $ACC3
Adam Langleyd9e397b2015-01-22 14:27:53 -08001071 imull $n0, %eax
Robert Sloancd79cde2017-12-11 09:06:12 -08001072 vpaddq $ACC9,$ACC4,$ACC4 # correct $ACC3
Adam Langleyd9e397b2015-01-22 14:27:53 -08001073 and \$0x1fffffff, %eax
1074
1075 imulq 16-128($ap),%rbx
1076 add %rbx,$r3
1077 vpmuludq $Bi,$TEMP1,$TEMP1
1078 vmovd %eax, $Yi
1079 vmovdqu -8+32*3-128($ap),$TEMP0
1080 vpaddq $TEMP1,$ACC1,$ACC1
1081 vpmuludq $Bi,$TEMP2,$TEMP2
1082 vpbroadcastq $Yi, $Yi
1083 vmovdqu -8+32*4-128($ap),$TEMP1
1084 vpaddq $TEMP2,$ACC2,$ACC2
1085 vpmuludq $Bi,$TEMP0,$TEMP0
1086 vmovdqu -8+32*5-128($ap),$TEMP2
1087 vpaddq $TEMP0,$ACC3,$ACC3
1088 vpmuludq $Bi,$TEMP1,$TEMP1
1089 vmovdqu -8+32*6-128($ap),$TEMP0
1090 vpaddq $TEMP1,$ACC4,$ACC4
1091 vpmuludq $Bi,$TEMP2,$TEMP2
1092 vmovdqu -8+32*7-128($ap),$TEMP1
1093 vpaddq $TEMP2,$ACC5,$ACC5
1094 vpmuludq $Bi,$TEMP0,$TEMP0
1095 vmovdqu -8+32*8-128($ap),$TEMP2
1096 vpaddq $TEMP0,$ACC6,$ACC6
1097 vpmuludq $Bi,$TEMP1,$TEMP1
1098 vmovdqu -8+32*9-128($ap),$ACC9
1099 vpaddq $TEMP1,$ACC7,$ACC7
1100 vpmuludq $Bi,$TEMP2,$TEMP2
1101 vpaddq $TEMP2,$ACC8,$ACC8
1102 vpmuludq $Bi,$ACC9,$ACC9
1103 vpbroadcastq 16($bp), $Bi
1104
1105 mov %rax,%rdx
1106 imulq -128($np),%rax
1107 add %rax,$r1
1108 vmovdqu -8+32*1-128($np),$TEMP0
1109 mov %rdx,%rax
1110 imulq 8-128($np),%rax
1111 add %rax,$r2
1112 vmovdqu -8+32*2-128($np),$TEMP1
1113 shr \$29, $r1
1114 imulq 16-128($np),%rdx
1115 add %rdx,$r3
1116 add $r1, $r2
1117
1118 vpmuludq $Yi,$TEMP0,$TEMP0
1119 vmovq $Bi, %rbx
1120 vmovdqu -8+32*3-128($np),$TEMP2
1121 vpaddq $TEMP0,$ACC1,$ACC1
1122 vpmuludq $Yi,$TEMP1,$TEMP1
1123 vmovdqu -8+32*4-128($np),$TEMP0
1124 vpaddq $TEMP1,$ACC2,$ACC2
1125 vpmuludq $Yi,$TEMP2,$TEMP2
1126 vmovdqu -8+32*5-128($np),$TEMP1
1127 vpaddq $TEMP2,$ACC3,$ACC3
1128 vpmuludq $Yi,$TEMP0,$TEMP0
1129 vmovdqu -8+32*6-128($np),$TEMP2
1130 vpaddq $TEMP0,$ACC4,$ACC4
1131 vpmuludq $Yi,$TEMP1,$TEMP1
1132 vmovdqu -8+32*7-128($np),$TEMP0
1133 vpaddq $TEMP1,$ACC5,$ACC5
1134 vpmuludq $Yi,$TEMP2,$TEMP2
1135 vmovdqu -8+32*8-128($np),$TEMP1
1136 vpaddq $TEMP2,$ACC6,$ACC6
1137 vpmuludq $Yi,$TEMP0,$TEMP0
1138 vmovdqu -8+32*9-128($np),$TEMP2
1139 vpaddq $TEMP0,$ACC7,$ACC7
1140 vpmuludq $Yi,$TEMP1,$TEMP1
1141 vpaddq $TEMP1,$ACC8,$ACC8
1142 vpmuludq $Yi,$TEMP2,$TEMP2
1143 vpaddq $TEMP2,$ACC9,$ACC9
1144
1145 vmovdqu -16+32*1-128($ap),$TEMP0
1146 mov %rbx,%rax
1147 imulq -128($ap),%rax
1148 add $r2,%rax
1149
1150 vmovdqu -16+32*2-128($ap),$TEMP1
1151 mov %rax,$r2
1152 imull $n0, %eax
1153 and \$0x1fffffff, %eax
1154
1155 imulq 8-128($ap),%rbx
1156 add %rbx,$r3
1157 vpmuludq $Bi,$TEMP0,$TEMP0
1158 vmovd %eax, $Yi
1159 vmovdqu -16+32*3-128($ap),$TEMP2
1160 vpaddq $TEMP0,$ACC1,$ACC1
1161 vpmuludq $Bi,$TEMP1,$TEMP1
1162 vpbroadcastq $Yi, $Yi
1163 vmovdqu -16+32*4-128($ap),$TEMP0
1164 vpaddq $TEMP1,$ACC2,$ACC2
1165 vpmuludq $Bi,$TEMP2,$TEMP2
1166 vmovdqu -16+32*5-128($ap),$TEMP1
1167 vpaddq $TEMP2,$ACC3,$ACC3
1168 vpmuludq $Bi,$TEMP0,$TEMP0
1169 vmovdqu -16+32*6-128($ap),$TEMP2
1170 vpaddq $TEMP0,$ACC4,$ACC4
1171 vpmuludq $Bi,$TEMP1,$TEMP1
1172 vmovdqu -16+32*7-128($ap),$TEMP0
1173 vpaddq $TEMP1,$ACC5,$ACC5
1174 vpmuludq $Bi,$TEMP2,$TEMP2
1175 vmovdqu -16+32*8-128($ap),$TEMP1
1176 vpaddq $TEMP2,$ACC6,$ACC6
1177 vpmuludq $Bi,$TEMP0,$TEMP0
1178 vmovdqu -16+32*9-128($ap),$TEMP2
1179 vpaddq $TEMP0,$ACC7,$ACC7
1180 vpmuludq $Bi,$TEMP1,$TEMP1
1181 vpaddq $TEMP1,$ACC8,$ACC8
1182 vpmuludq $Bi,$TEMP2,$TEMP2
1183 vpbroadcastq 24($bp), $Bi
1184 vpaddq $TEMP2,$ACC9,$ACC9
1185
1186 vmovdqu -16+32*1-128($np),$TEMP0
1187 mov %rax,%rdx
1188 imulq -128($np),%rax
1189 add %rax,$r2
1190 vmovdqu -16+32*2-128($np),$TEMP1
1191 imulq 8-128($np),%rdx
1192 add %rdx,$r3
1193 shr \$29, $r2
1194
1195 vpmuludq $Yi,$TEMP0,$TEMP0
1196 vmovq $Bi, %rbx
1197 vmovdqu -16+32*3-128($np),$TEMP2
1198 vpaddq $TEMP0,$ACC1,$ACC1
1199 vpmuludq $Yi,$TEMP1,$TEMP1
1200 vmovdqu -16+32*4-128($np),$TEMP0
1201 vpaddq $TEMP1,$ACC2,$ACC2
1202 vpmuludq $Yi,$TEMP2,$TEMP2
1203 vmovdqu -16+32*5-128($np),$TEMP1
1204 vpaddq $TEMP2,$ACC3,$ACC3
1205 vpmuludq $Yi,$TEMP0,$TEMP0
1206 vmovdqu -16+32*6-128($np),$TEMP2
1207 vpaddq $TEMP0,$ACC4,$ACC4
1208 vpmuludq $Yi,$TEMP1,$TEMP1
1209 vmovdqu -16+32*7-128($np),$TEMP0
1210 vpaddq $TEMP1,$ACC5,$ACC5
1211 vpmuludq $Yi,$TEMP2,$TEMP2
1212 vmovdqu -16+32*8-128($np),$TEMP1
1213 vpaddq $TEMP2,$ACC6,$ACC6
1214 vpmuludq $Yi,$TEMP0,$TEMP0
1215 vmovdqu -16+32*9-128($np),$TEMP2
1216 vpaddq $TEMP0,$ACC7,$ACC7
1217 vpmuludq $Yi,$TEMP1,$TEMP1
1218 vmovdqu -24+32*1-128($ap),$TEMP0
1219 vpaddq $TEMP1,$ACC8,$ACC8
1220 vpmuludq $Yi,$TEMP2,$TEMP2
1221 vmovdqu -24+32*2-128($ap),$TEMP1
1222 vpaddq $TEMP2,$ACC9,$ACC9
1223
1224 add $r2, $r3
1225 imulq -128($ap),%rbx
1226 add %rbx,$r3
1227
1228 mov $r3, %rax
1229 imull $n0, %eax
1230 and \$0x1fffffff, %eax
1231
1232 vpmuludq $Bi,$TEMP0,$TEMP0
1233 vmovd %eax, $Yi
1234 vmovdqu -24+32*3-128($ap),$TEMP2
1235 vpaddq $TEMP0,$ACC1,$ACC1
1236 vpmuludq $Bi,$TEMP1,$TEMP1
1237 vpbroadcastq $Yi, $Yi
1238 vmovdqu -24+32*4-128($ap),$TEMP0
1239 vpaddq $TEMP1,$ACC2,$ACC2
1240 vpmuludq $Bi,$TEMP2,$TEMP2
1241 vmovdqu -24+32*5-128($ap),$TEMP1
1242 vpaddq $TEMP2,$ACC3,$ACC3
1243 vpmuludq $Bi,$TEMP0,$TEMP0
1244 vmovdqu -24+32*6-128($ap),$TEMP2
1245 vpaddq $TEMP0,$ACC4,$ACC4
1246 vpmuludq $Bi,$TEMP1,$TEMP1
1247 vmovdqu -24+32*7-128($ap),$TEMP0
1248 vpaddq $TEMP1,$ACC5,$ACC5
1249 vpmuludq $Bi,$TEMP2,$TEMP2
1250 vmovdqu -24+32*8-128($ap),$TEMP1
1251 vpaddq $TEMP2,$ACC6,$ACC6
1252 vpmuludq $Bi,$TEMP0,$TEMP0
1253 vmovdqu -24+32*9-128($ap),$TEMP2
1254 vpaddq $TEMP0,$ACC7,$ACC7
1255 vpmuludq $Bi,$TEMP1,$TEMP1
1256 vpaddq $TEMP1,$ACC8,$ACC8
1257 vpmuludq $Bi,$TEMP2,$TEMP2
1258 vpbroadcastq 32($bp), $Bi
1259 vpaddq $TEMP2,$ACC9,$ACC9
1260 add \$32, $bp # $bp++
1261
1262 vmovdqu -24+32*1-128($np),$TEMP0
1263 imulq -128($np),%rax
1264 add %rax,$r3
1265 shr \$29, $r3
1266
1267 vmovdqu -24+32*2-128($np),$TEMP1
1268 vpmuludq $Yi,$TEMP0,$TEMP0
1269 vmovq $Bi, %rbx
1270 vmovdqu -24+32*3-128($np),$TEMP2
1271 vpaddq $TEMP0,$ACC1,$ACC0 # $ACC0==$TEMP0
1272 vpmuludq $Yi,$TEMP1,$TEMP1
1273 vmovdqu $ACC0, (%rsp) # transfer $r0-$r3
1274 vpaddq $TEMP1,$ACC2,$ACC1
1275 vmovdqu -24+32*4-128($np),$TEMP0
1276 vpmuludq $Yi,$TEMP2,$TEMP2
1277 vmovdqu -24+32*5-128($np),$TEMP1
1278 vpaddq $TEMP2,$ACC3,$ACC2
1279 vpmuludq $Yi,$TEMP0,$TEMP0
1280 vmovdqu -24+32*6-128($np),$TEMP2
1281 vpaddq $TEMP0,$ACC4,$ACC3
1282 vpmuludq $Yi,$TEMP1,$TEMP1
1283 vmovdqu -24+32*7-128($np),$TEMP0
1284 vpaddq $TEMP1,$ACC5,$ACC4
1285 vpmuludq $Yi,$TEMP2,$TEMP2
1286 vmovdqu -24+32*8-128($np),$TEMP1
1287 vpaddq $TEMP2,$ACC6,$ACC5
1288 vpmuludq $Yi,$TEMP0,$TEMP0
1289 vmovdqu -24+32*9-128($np),$TEMP2
1290 mov $r3, $r0
1291 vpaddq $TEMP0,$ACC7,$ACC6
1292 vpmuludq $Yi,$TEMP1,$TEMP1
1293 add (%rsp), $r0
1294 vpaddq $TEMP1,$ACC8,$ACC7
1295 vpmuludq $Yi,$TEMP2,$TEMP2
1296 vmovq $r3, $TEMP1
1297 vpaddq $TEMP2,$ACC9,$ACC8
1298
1299 dec $i
1300 jnz .Loop_mul_1024
1301___
1302
1303# (*) Original implementation was correcting ACC1-ACC3 for overflow
1304# after 7 loop runs, or after 28 iterations, or 56 additions.
1305# But as we underutilize resources, it's possible to correct in
1306# each iteration with marginal performance loss. But then, as
1307# we do it in each iteration, we can correct less digits, and
Robert Sloancd79cde2017-12-11 09:06:12 -08001308# avoid performance penalties completely.
Adam Langleyd9e397b2015-01-22 14:27:53 -08001309
1310$TEMP0 = $ACC9;
1311$TEMP3 = $Bi;
1312$TEMP4 = $Yi;
1313$code.=<<___;
Adam Langleyd9e397b2015-01-22 14:27:53 -08001314 vpaddq (%rsp), $TEMP1, $ACC0
1315
1316 vpsrlq \$29, $ACC0, $TEMP1
1317 vpand $AND_MASK, $ACC0, $ACC0
1318 vpsrlq \$29, $ACC1, $TEMP2
1319 vpand $AND_MASK, $ACC1, $ACC1
1320 vpsrlq \$29, $ACC2, $TEMP3
1321 vpermq \$0x93, $TEMP1, $TEMP1
1322 vpand $AND_MASK, $ACC2, $ACC2
1323 vpsrlq \$29, $ACC3, $TEMP4
1324 vpermq \$0x93, $TEMP2, $TEMP2
1325 vpand $AND_MASK, $ACC3, $ACC3
1326
1327 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
1328 vpermq \$0x93, $TEMP3, $TEMP3
1329 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
1330 vpermq \$0x93, $TEMP4, $TEMP4
1331 vpaddq $TEMP0, $ACC0, $ACC0
1332 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
1333 vpaddq $TEMP1, $ACC1, $ACC1
1334 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
1335 vpaddq $TEMP2, $ACC2, $ACC2
1336 vpblendd \$3, $TEMP4, $ZERO, $TEMP4
1337 vpaddq $TEMP3, $ACC3, $ACC3
1338 vpaddq $TEMP4, $ACC4, $ACC4
1339
1340 vpsrlq \$29, $ACC0, $TEMP1
1341 vpand $AND_MASK, $ACC0, $ACC0
1342 vpsrlq \$29, $ACC1, $TEMP2
1343 vpand $AND_MASK, $ACC1, $ACC1
1344 vpsrlq \$29, $ACC2, $TEMP3
1345 vpermq \$0x93, $TEMP1, $TEMP1
1346 vpand $AND_MASK, $ACC2, $ACC2
1347 vpsrlq \$29, $ACC3, $TEMP4
1348 vpermq \$0x93, $TEMP2, $TEMP2
1349 vpand $AND_MASK, $ACC3, $ACC3
1350 vpermq \$0x93, $TEMP3, $TEMP3
1351
1352 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
1353 vpermq \$0x93, $TEMP4, $TEMP4
1354 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
1355 vpaddq $TEMP0, $ACC0, $ACC0
1356 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
1357 vpaddq $TEMP1, $ACC1, $ACC1
1358 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
1359 vpaddq $TEMP2, $ACC2, $ACC2
1360 vpblendd \$3, $TEMP4, $ZERO, $TEMP4
1361 vpaddq $TEMP3, $ACC3, $ACC3
1362 vpaddq $TEMP4, $ACC4, $ACC4
1363
1364 vmovdqu $ACC0, 0-128($rp)
1365 vmovdqu $ACC1, 32-128($rp)
1366 vmovdqu $ACC2, 64-128($rp)
1367 vmovdqu $ACC3, 96-128($rp)
1368___
1369
1370$TEMP5=$ACC0;
1371$code.=<<___;
1372 vpsrlq \$29, $ACC4, $TEMP1
1373 vpand $AND_MASK, $ACC4, $ACC4
1374 vpsrlq \$29, $ACC5, $TEMP2
1375 vpand $AND_MASK, $ACC5, $ACC5
1376 vpsrlq \$29, $ACC6, $TEMP3
1377 vpermq \$0x93, $TEMP1, $TEMP1
1378 vpand $AND_MASK, $ACC6, $ACC6
1379 vpsrlq \$29, $ACC7, $TEMP4
1380 vpermq \$0x93, $TEMP2, $TEMP2
1381 vpand $AND_MASK, $ACC7, $ACC7
1382 vpsrlq \$29, $ACC8, $TEMP5
1383 vpermq \$0x93, $TEMP3, $TEMP3
1384 vpand $AND_MASK, $ACC8, $ACC8
1385 vpermq \$0x93, $TEMP4, $TEMP4
1386
1387 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
1388 vpermq \$0x93, $TEMP5, $TEMP5
1389 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
1390 vpaddq $TEMP0, $ACC4, $ACC4
1391 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
1392 vpaddq $TEMP1, $ACC5, $ACC5
1393 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
1394 vpaddq $TEMP2, $ACC6, $ACC6
1395 vpblendd \$3, $TEMP4, $TEMP5, $TEMP4
1396 vpaddq $TEMP3, $ACC7, $ACC7
1397 vpaddq $TEMP4, $ACC8, $ACC8
1398
1399 vpsrlq \$29, $ACC4, $TEMP1
1400 vpand $AND_MASK, $ACC4, $ACC4
1401 vpsrlq \$29, $ACC5, $TEMP2
1402 vpand $AND_MASK, $ACC5, $ACC5
1403 vpsrlq \$29, $ACC6, $TEMP3
1404 vpermq \$0x93, $TEMP1, $TEMP1
1405 vpand $AND_MASK, $ACC6, $ACC6
1406 vpsrlq \$29, $ACC7, $TEMP4
1407 vpermq \$0x93, $TEMP2, $TEMP2
1408 vpand $AND_MASK, $ACC7, $ACC7
1409 vpsrlq \$29, $ACC8, $TEMP5
1410 vpermq \$0x93, $TEMP3, $TEMP3
1411 vpand $AND_MASK, $ACC8, $ACC8
1412 vpermq \$0x93, $TEMP4, $TEMP4
1413
1414 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
1415 vpermq \$0x93, $TEMP5, $TEMP5
1416 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
1417 vpaddq $TEMP0, $ACC4, $ACC4
1418 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
1419 vpaddq $TEMP1, $ACC5, $ACC5
1420 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
1421 vpaddq $TEMP2, $ACC6, $ACC6
1422 vpblendd \$3, $TEMP4, $TEMP5, $TEMP4
1423 vpaddq $TEMP3, $ACC7, $ACC7
1424 vpaddq $TEMP4, $ACC8, $ACC8
1425
1426 vmovdqu $ACC4, 128-128($rp)
Robert Sloana94fe052017-02-21 08:49:28 -08001427 vmovdqu $ACC5, 160-128($rp)
Adam Langleyd9e397b2015-01-22 14:27:53 -08001428 vmovdqu $ACC6, 192-128($rp)
1429 vmovdqu $ACC7, 224-128($rp)
1430 vmovdqu $ACC8, 256-128($rp)
1431 vzeroupper
1432
1433 mov %rbp, %rax
Robert Sloana94fe052017-02-21 08:49:28 -08001434.cfi_def_cfa_register %rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001435___
1436$code.=<<___ if ($win64);
Robert Sloan5d625782017-02-13 09:55:39 -08001437.Lmul_1024_in_tail:
Adam Langleyd9e397b2015-01-22 14:27:53 -08001438 movaps -0xd8(%rax),%xmm6
1439 movaps -0xc8(%rax),%xmm7
1440 movaps -0xb8(%rax),%xmm8
1441 movaps -0xa8(%rax),%xmm9
1442 movaps -0x98(%rax),%xmm10
1443 movaps -0x88(%rax),%xmm11
1444 movaps -0x78(%rax),%xmm12
1445 movaps -0x68(%rax),%xmm13
1446 movaps -0x58(%rax),%xmm14
1447 movaps -0x48(%rax),%xmm15
1448___
1449$code.=<<___;
1450 mov -48(%rax),%r15
Robert Sloana94fe052017-02-21 08:49:28 -08001451.cfi_restore %r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08001452 mov -40(%rax),%r14
Robert Sloana94fe052017-02-21 08:49:28 -08001453.cfi_restore %r14
Adam Langleyd9e397b2015-01-22 14:27:53 -08001454 mov -32(%rax),%r13
Robert Sloana94fe052017-02-21 08:49:28 -08001455.cfi_restore %r13
Adam Langleyd9e397b2015-01-22 14:27:53 -08001456 mov -24(%rax),%r12
Robert Sloana94fe052017-02-21 08:49:28 -08001457.cfi_restore %r12
Adam Langleyd9e397b2015-01-22 14:27:53 -08001458 mov -16(%rax),%rbp
Robert Sloana94fe052017-02-21 08:49:28 -08001459.cfi_restore %rbp
Adam Langleyd9e397b2015-01-22 14:27:53 -08001460 mov -8(%rax),%rbx
Robert Sloana94fe052017-02-21 08:49:28 -08001461.cfi_restore %rbx
Adam Langleyd9e397b2015-01-22 14:27:53 -08001462 lea (%rax),%rsp # restore %rsp
Robert Sloana94fe052017-02-21 08:49:28 -08001463.cfi_def_cfa_register %rsp
Adam Langleyd9e397b2015-01-22 14:27:53 -08001464.Lmul_1024_epilogue:
1465 ret
Robert Sloana94fe052017-02-21 08:49:28 -08001466.cfi_endproc
Adam Langleyd9e397b2015-01-22 14:27:53 -08001467.size rsaz_1024_mul_avx2,.-rsaz_1024_mul_avx2
1468___
1469}
1470{
1471my ($out,$inp) = $win64 ? ("%rcx","%rdx") : ("%rdi","%rsi");
1472my @T = map("%r$_",(8..11));
1473
1474$code.=<<___;
1475.globl rsaz_1024_red2norm_avx2
1476.type rsaz_1024_red2norm_avx2,\@abi-omnipotent
1477.align 32
1478rsaz_1024_red2norm_avx2:
1479 sub \$-128,$inp # size optimization
1480 xor %rax,%rax
1481___
1482
1483for ($j=0,$i=0; $i<16; $i++) {
1484 my $k=0;
1485 while (29*$j<64*($i+1)) { # load data till boundary
1486 $code.=" mov `8*$j-128`($inp), @T[0]\n";
1487 $j++; $k++; push(@T,shift(@T));
1488 }
1489 $l=$k;
1490 while ($k>1) { # shift loaded data but last value
1491 $code.=" shl \$`29*($j-$k)`,@T[-$k]\n";
1492 $k--;
1493 }
1494 $code.=<<___; # shift last value
1495 mov @T[-1], @T[0]
1496 shl \$`29*($j-1)`, @T[-1]
1497 shr \$`-29*($j-1)`, @T[0]
1498___
1499 while ($l) { # accumulate all values
1500 $code.=" add @T[-$l], %rax\n";
1501 $l--;
1502 }
1503 $code.=<<___;
1504 adc \$0, @T[0] # consume eventual carry
1505 mov %rax, 8*$i($out)
1506 mov @T[0], %rax
1507___
1508 push(@T,shift(@T));
1509}
1510$code.=<<___;
1511 ret
1512.size rsaz_1024_red2norm_avx2,.-rsaz_1024_red2norm_avx2
1513
1514.globl rsaz_1024_norm2red_avx2
1515.type rsaz_1024_norm2red_avx2,\@abi-omnipotent
1516.align 32
1517rsaz_1024_norm2red_avx2:
1518 sub \$-128,$out # size optimization
1519 mov ($inp),@T[0]
1520 mov \$0x1fffffff,%eax
1521___
1522for ($j=0,$i=0; $i<16; $i++) {
1523 $code.=" mov `8*($i+1)`($inp),@T[1]\n" if ($i<15);
1524 $code.=" xor @T[1],@T[1]\n" if ($i==15);
1525 my $k=1;
1526 while (29*($j+1)<64*($i+1)) {
1527 $code.=<<___;
1528 mov @T[0],@T[-$k]
1529 shr \$`29*$j`,@T[-$k]
1530 and %rax,@T[-$k] # &0x1fffffff
1531 mov @T[-$k],`8*$j-128`($out)
1532___
1533 $j++; $k++;
1534 }
1535 $code.=<<___;
1536 shrd \$`29*$j`,@T[1],@T[0]
1537 and %rax,@T[0]
1538 mov @T[0],`8*$j-128`($out)
1539___
1540 $j++;
1541 push(@T,shift(@T));
1542}
1543$code.=<<___;
1544 mov @T[0],`8*$j-128`($out) # zero
1545 mov @T[0],`8*($j+1)-128`($out)
1546 mov @T[0],`8*($j+2)-128`($out)
1547 mov @T[0],`8*($j+3)-128`($out)
1548 ret
1549.size rsaz_1024_norm2red_avx2,.-rsaz_1024_norm2red_avx2
1550___
1551}
1552{
1553my ($out,$inp,$power) = $win64 ? ("%rcx","%rdx","%r8d") : ("%rdi","%rsi","%edx");
1554
1555$code.=<<___;
1556.globl rsaz_1024_scatter5_avx2
1557.type rsaz_1024_scatter5_avx2,\@abi-omnipotent
1558.align 32
1559rsaz_1024_scatter5_avx2:
1560 vzeroupper
1561 vmovdqu .Lscatter_permd(%rip),%ymm5
1562 shl \$4,$power
1563 lea ($out,$power),$out
1564 mov \$9,%eax
1565 jmp .Loop_scatter_1024
1566
1567.align 32
1568.Loop_scatter_1024:
1569 vmovdqu ($inp),%ymm0
1570 lea 32($inp),$inp
1571 vpermd %ymm0,%ymm5,%ymm0
1572 vmovdqu %xmm0,($out)
1573 lea 16*32($out),$out
1574 dec %eax
1575 jnz .Loop_scatter_1024
1576
1577 vzeroupper
1578 ret
1579.size rsaz_1024_scatter5_avx2,.-rsaz_1024_scatter5_avx2
1580
1581.globl rsaz_1024_gather5_avx2
1582.type rsaz_1024_gather5_avx2,\@abi-omnipotent
1583.align 32
1584rsaz_1024_gather5_avx2:
Robert Sloana94fe052017-02-21 08:49:28 -08001585.cfi_startproc
David Benjamin4969cc92016-04-22 15:02:23 -04001586 vzeroupper
1587 mov %rsp,%r11
Robert Sloana94fe052017-02-21 08:49:28 -08001588.cfi_def_cfa_register %r11
Adam Langleyd9e397b2015-01-22 14:27:53 -08001589___
1590$code.=<<___ if ($win64);
1591 lea -0x88(%rsp),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001592.LSEH_begin_rsaz_1024_gather5:
1593 # I can't trust assembler to use specific encoding:-(
David Benjamin4969cc92016-04-22 15:02:23 -04001594 .byte 0x48,0x8d,0x60,0xe0 # lea -0x20(%rax),%rsp
1595 .byte 0xc5,0xf8,0x29,0x70,0xe0 # vmovaps %xmm6,-0x20(%rax)
1596 .byte 0xc5,0xf8,0x29,0x78,0xf0 # vmovaps %xmm7,-0x10(%rax)
1597 .byte 0xc5,0x78,0x29,0x40,0x00 # vmovaps %xmm8,0(%rax)
1598 .byte 0xc5,0x78,0x29,0x48,0x10 # vmovaps %xmm9,0x10(%rax)
1599 .byte 0xc5,0x78,0x29,0x50,0x20 # vmovaps %xmm10,0x20(%rax)
1600 .byte 0xc5,0x78,0x29,0x58,0x30 # vmovaps %xmm11,0x30(%rax)
1601 .byte 0xc5,0x78,0x29,0x60,0x40 # vmovaps %xmm12,0x40(%rax)
1602 .byte 0xc5,0x78,0x29,0x68,0x50 # vmovaps %xmm13,0x50(%rax)
1603 .byte 0xc5,0x78,0x29,0x70,0x60 # vmovaps %xmm14,0x60(%rax)
1604 .byte 0xc5,0x78,0x29,0x78,0x70 # vmovaps %xmm15,0x70(%rax)
Adam Langleyd9e397b2015-01-22 14:27:53 -08001605___
1606$code.=<<___;
David Benjamin4969cc92016-04-22 15:02:23 -04001607 lea -0x100(%rsp),%rsp
1608 and \$-32, %rsp
1609 lea .Linc(%rip), %r10
1610 lea -128(%rsp),%rax # control u-op density
Adam Langleyd9e397b2015-01-22 14:27:53 -08001611
David Benjamin4969cc92016-04-22 15:02:23 -04001612 vmovd $power, %xmm4
1613 vmovdqa (%r10),%ymm0
1614 vmovdqa 32(%r10),%ymm1
1615 vmovdqa 64(%r10),%ymm5
1616 vpbroadcastd %xmm4,%ymm4
Adam Langleyd9e397b2015-01-22 14:27:53 -08001617
David Benjamin4969cc92016-04-22 15:02:23 -04001618 vpaddd %ymm5, %ymm0, %ymm2
1619 vpcmpeqd %ymm4, %ymm0, %ymm0
1620 vpaddd %ymm5, %ymm1, %ymm3
1621 vpcmpeqd %ymm4, %ymm1, %ymm1
1622 vmovdqa %ymm0, 32*0+128(%rax)
1623 vpaddd %ymm5, %ymm2, %ymm0
1624 vpcmpeqd %ymm4, %ymm2, %ymm2
1625 vmovdqa %ymm1, 32*1+128(%rax)
1626 vpaddd %ymm5, %ymm3, %ymm1
1627 vpcmpeqd %ymm4, %ymm3, %ymm3
1628 vmovdqa %ymm2, 32*2+128(%rax)
1629 vpaddd %ymm5, %ymm0, %ymm2
1630 vpcmpeqd %ymm4, %ymm0, %ymm0
1631 vmovdqa %ymm3, 32*3+128(%rax)
1632 vpaddd %ymm5, %ymm1, %ymm3
1633 vpcmpeqd %ymm4, %ymm1, %ymm1
1634 vmovdqa %ymm0, 32*4+128(%rax)
1635 vpaddd %ymm5, %ymm2, %ymm8
1636 vpcmpeqd %ymm4, %ymm2, %ymm2
1637 vmovdqa %ymm1, 32*5+128(%rax)
1638 vpaddd %ymm5, %ymm3, %ymm9
1639 vpcmpeqd %ymm4, %ymm3, %ymm3
1640 vmovdqa %ymm2, 32*6+128(%rax)
1641 vpaddd %ymm5, %ymm8, %ymm10
1642 vpcmpeqd %ymm4, %ymm8, %ymm8
1643 vmovdqa %ymm3, 32*7+128(%rax)
1644 vpaddd %ymm5, %ymm9, %ymm11
1645 vpcmpeqd %ymm4, %ymm9, %ymm9
1646 vpaddd %ymm5, %ymm10, %ymm12
1647 vpcmpeqd %ymm4, %ymm10, %ymm10
1648 vpaddd %ymm5, %ymm11, %ymm13
1649 vpcmpeqd %ymm4, %ymm11, %ymm11
1650 vpaddd %ymm5, %ymm12, %ymm14
1651 vpcmpeqd %ymm4, %ymm12, %ymm12
1652 vpaddd %ymm5, %ymm13, %ymm15
1653 vpcmpeqd %ymm4, %ymm13, %ymm13
1654 vpcmpeqd %ymm4, %ymm14, %ymm14
1655 vpcmpeqd %ymm4, %ymm15, %ymm15
Adam Langleyd9e397b2015-01-22 14:27:53 -08001656
David Benjamin4969cc92016-04-22 15:02:23 -04001657 vmovdqa -32(%r10),%ymm7 # .Lgather_permd
1658 lea 128($inp), $inp
1659 mov \$9,$power
1660
Adam Langleyd9e397b2015-01-22 14:27:53 -08001661.Loop_gather_1024:
David Benjamin4969cc92016-04-22 15:02:23 -04001662 vmovdqa 32*0-128($inp), %ymm0
1663 vmovdqa 32*1-128($inp), %ymm1
1664 vmovdqa 32*2-128($inp), %ymm2
1665 vmovdqa 32*3-128($inp), %ymm3
1666 vpand 32*0+128(%rax), %ymm0, %ymm0
1667 vpand 32*1+128(%rax), %ymm1, %ymm1
1668 vpand 32*2+128(%rax), %ymm2, %ymm2
1669 vpor %ymm0, %ymm1, %ymm4
1670 vpand 32*3+128(%rax), %ymm3, %ymm3
1671 vmovdqa 32*4-128($inp), %ymm0
1672 vmovdqa 32*5-128($inp), %ymm1
1673 vpor %ymm2, %ymm3, %ymm5
1674 vmovdqa 32*6-128($inp), %ymm2
1675 vmovdqa 32*7-128($inp), %ymm3
1676 vpand 32*4+128(%rax), %ymm0, %ymm0
1677 vpand 32*5+128(%rax), %ymm1, %ymm1
1678 vpand 32*6+128(%rax), %ymm2, %ymm2
1679 vpor %ymm0, %ymm4, %ymm4
1680 vpand 32*7+128(%rax), %ymm3, %ymm3
1681 vpand 32*8-128($inp), %ymm8, %ymm0
1682 vpor %ymm1, %ymm5, %ymm5
1683 vpand 32*9-128($inp), %ymm9, %ymm1
1684 vpor %ymm2, %ymm4, %ymm4
1685 vpand 32*10-128($inp),%ymm10, %ymm2
1686 vpor %ymm3, %ymm5, %ymm5
1687 vpand 32*11-128($inp),%ymm11, %ymm3
1688 vpor %ymm0, %ymm4, %ymm4
1689 vpand 32*12-128($inp),%ymm12, %ymm0
1690 vpor %ymm1, %ymm5, %ymm5
1691 vpand 32*13-128($inp),%ymm13, %ymm1
1692 vpor %ymm2, %ymm4, %ymm4
1693 vpand 32*14-128($inp),%ymm14, %ymm2
1694 vpor %ymm3, %ymm5, %ymm5
1695 vpand 32*15-128($inp),%ymm15, %ymm3
1696 lea 32*16($inp), $inp
1697 vpor %ymm0, %ymm4, %ymm4
1698 vpor %ymm1, %ymm5, %ymm5
1699 vpor %ymm2, %ymm4, %ymm4
1700 vpor %ymm3, %ymm5, %ymm5
1701
1702 vpor %ymm5, %ymm4, %ymm4
1703 vextracti128 \$1, %ymm4, %xmm5 # upper half is cleared
1704 vpor %xmm4, %xmm5, %xmm5
1705 vpermd %ymm5,%ymm7,%ymm5
1706 vmovdqu %ymm5,($out)
Adam Langleyd9e397b2015-01-22 14:27:53 -08001707 lea 32($out),$out
David Benjamin4969cc92016-04-22 15:02:23 -04001708 dec $power
Adam Langleyd9e397b2015-01-22 14:27:53 -08001709 jnz .Loop_gather_1024
1710
1711 vpxor %ymm0,%ymm0,%ymm0
1712 vmovdqu %ymm0,($out)
1713 vzeroupper
1714___
1715$code.=<<___ if ($win64);
David Benjamin4969cc92016-04-22 15:02:23 -04001716 movaps -0xa8(%r11),%xmm6
1717 movaps -0x98(%r11),%xmm7
1718 movaps -0x88(%r11),%xmm8
1719 movaps -0x78(%r11),%xmm9
1720 movaps -0x68(%r11),%xmm10
1721 movaps -0x58(%r11),%xmm11
1722 movaps -0x48(%r11),%xmm12
1723 movaps -0x38(%r11),%xmm13
1724 movaps -0x28(%r11),%xmm14
1725 movaps -0x18(%r11),%xmm15
Adam Langleyd9e397b2015-01-22 14:27:53 -08001726___
1727$code.=<<___;
David Benjamin4969cc92016-04-22 15:02:23 -04001728 lea (%r11),%rsp
Robert Sloana94fe052017-02-21 08:49:28 -08001729.cfi_def_cfa_register %rsp
Adam Langleyd9e397b2015-01-22 14:27:53 -08001730 ret
Robert Sloana94fe052017-02-21 08:49:28 -08001731.cfi_endproc
1732.LSEH_end_rsaz_1024_gather5:
Adam Langleyd9e397b2015-01-22 14:27:53 -08001733.size rsaz_1024_gather5_avx2,.-rsaz_1024_gather5_avx2
1734___
1735}
1736
1737$code.=<<___;
1738.extern OPENSSL_ia32cap_P
1739.globl rsaz_avx2_eligible
1740.type rsaz_avx2_eligible,\@abi-omnipotent
1741.align 32
1742rsaz_avx2_eligible:
Robert Sloan8ff03552017-06-14 12:40:58 -07001743 leaq OPENSSL_ia32cap_P(%rip),%rax
1744 mov 8(%rax),%eax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001745___
1746$code.=<<___ if ($addx);
1747 mov \$`1<<8|1<<19`,%ecx
1748 mov \$0,%edx
1749 and %eax,%ecx
1750 cmp \$`1<<8|1<<19`,%ecx # check for BMI2+AD*X
1751 cmove %edx,%eax
1752___
1753$code.=<<___;
1754 and \$`1<<5`,%eax
1755 shr \$5,%eax
1756 ret
1757.size rsaz_avx2_eligible,.-rsaz_avx2_eligible
1758
1759.align 64
1760.Land_mask:
Robert Sloancd79cde2017-12-11 09:06:12 -08001761 .quad 0x1fffffff,0x1fffffff,0x1fffffff,0x1fffffff
Adam Langleyd9e397b2015-01-22 14:27:53 -08001762.Lscatter_permd:
1763 .long 0,2,4,6,7,7,7,7
1764.Lgather_permd:
1765 .long 0,7,1,7,2,7,3,7
David Benjamin4969cc92016-04-22 15:02:23 -04001766.Linc:
1767 .long 0,0,0,0, 1,1,1,1
1768 .long 2,2,2,2, 3,3,3,3
1769 .long 4,4,4,4, 4,4,4,4
Adam Langleyd9e397b2015-01-22 14:27:53 -08001770.align 64
1771___
1772
1773if ($win64) {
1774$rec="%rcx";
1775$frame="%rdx";
1776$context="%r8";
1777$disp="%r9";
1778
1779$code.=<<___
1780.extern __imp_RtlVirtualUnwind
1781.type rsaz_se_handler,\@abi-omnipotent
1782.align 16
1783rsaz_se_handler:
1784 push %rsi
1785 push %rdi
1786 push %rbx
1787 push %rbp
1788 push %r12
1789 push %r13
1790 push %r14
1791 push %r15
1792 pushfq
1793 sub \$64,%rsp
1794
1795 mov 120($context),%rax # pull context->Rax
1796 mov 248($context),%rbx # pull context->Rip
1797
1798 mov 8($disp),%rsi # disp->ImageBase
1799 mov 56($disp),%r11 # disp->HandlerData
1800
1801 mov 0(%r11),%r10d # HandlerData[0]
1802 lea (%rsi,%r10),%r10 # prologue label
1803 cmp %r10,%rbx # context->Rip<prologue label
1804 jb .Lcommon_seh_tail
1805
Adam Langleyd9e397b2015-01-22 14:27:53 -08001806 mov 4(%r11),%r10d # HandlerData[1]
1807 lea (%rsi,%r10),%r10 # epilogue label
1808 cmp %r10,%rbx # context->Rip>=epilogue label
1809 jae .Lcommon_seh_tail
1810
Robert Sloan5d625782017-02-13 09:55:39 -08001811 mov 160($context),%rbp # pull context->Rbp
1812
1813 mov 8(%r11),%r10d # HandlerData[2]
1814 lea (%rsi,%r10),%r10 # "in tail" label
1815 cmp %r10,%rbx # context->Rip>="in tail" label
1816 cmovc %rbp,%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001817
1818 mov -48(%rax),%r15
1819 mov -40(%rax),%r14
1820 mov -32(%rax),%r13
1821 mov -24(%rax),%r12
1822 mov -16(%rax),%rbp
1823 mov -8(%rax),%rbx
1824 mov %r15,240($context)
1825 mov %r14,232($context)
1826 mov %r13,224($context)
1827 mov %r12,216($context)
1828 mov %rbp,160($context)
1829 mov %rbx,144($context)
1830
1831 lea -0xd8(%rax),%rsi # %xmm save area
1832 lea 512($context),%rdi # & context.Xmm6
1833 mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
1834 .long 0xa548f3fc # cld; rep movsq
1835
1836.Lcommon_seh_tail:
1837 mov 8(%rax),%rdi
1838 mov 16(%rax),%rsi
1839 mov %rax,152($context) # restore context->Rsp
1840 mov %rsi,168($context) # restore context->Rsi
1841 mov %rdi,176($context) # restore context->Rdi
1842
1843 mov 40($disp),%rdi # disp->ContextRecord
1844 mov $context,%rsi # context
1845 mov \$154,%ecx # sizeof(CONTEXT)
1846 .long 0xa548f3fc # cld; rep movsq
1847
1848 mov $disp,%rsi
1849 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
1850 mov 8(%rsi),%rdx # arg2, disp->ImageBase
1851 mov 0(%rsi),%r8 # arg3, disp->ControlPc
1852 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
1853 mov 40(%rsi),%r10 # disp->ContextRecord
1854 lea 56(%rsi),%r11 # &disp->HandlerData
1855 lea 24(%rsi),%r12 # &disp->EstablisherFrame
1856 mov %r10,32(%rsp) # arg5
1857 mov %r11,40(%rsp) # arg6
1858 mov %r12,48(%rsp) # arg7
1859 mov %rcx,56(%rsp) # arg8, (NULL)
1860 call *__imp_RtlVirtualUnwind(%rip)
1861
1862 mov \$1,%eax # ExceptionContinueSearch
1863 add \$64,%rsp
1864 popfq
1865 pop %r15
1866 pop %r14
1867 pop %r13
1868 pop %r12
1869 pop %rbp
1870 pop %rbx
1871 pop %rdi
1872 pop %rsi
1873 ret
1874.size rsaz_se_handler,.-rsaz_se_handler
1875
1876.section .pdata
1877.align 4
1878 .rva .LSEH_begin_rsaz_1024_sqr_avx2
1879 .rva .LSEH_end_rsaz_1024_sqr_avx2
1880 .rva .LSEH_info_rsaz_1024_sqr_avx2
1881
1882 .rva .LSEH_begin_rsaz_1024_mul_avx2
1883 .rva .LSEH_end_rsaz_1024_mul_avx2
1884 .rva .LSEH_info_rsaz_1024_mul_avx2
1885
1886 .rva .LSEH_begin_rsaz_1024_gather5
1887 .rva .LSEH_end_rsaz_1024_gather5
1888 .rva .LSEH_info_rsaz_1024_gather5
1889.section .xdata
1890.align 8
1891.LSEH_info_rsaz_1024_sqr_avx2:
1892 .byte 9,0,0,0
1893 .rva rsaz_se_handler
Robert Sloan5d625782017-02-13 09:55:39 -08001894 .rva .Lsqr_1024_body,.Lsqr_1024_epilogue,.Lsqr_1024_in_tail
1895 .long 0
Adam Langleyd9e397b2015-01-22 14:27:53 -08001896.LSEH_info_rsaz_1024_mul_avx2:
1897 .byte 9,0,0,0
1898 .rva rsaz_se_handler
Robert Sloan5d625782017-02-13 09:55:39 -08001899 .rva .Lmul_1024_body,.Lmul_1024_epilogue,.Lmul_1024_in_tail
1900 .long 0
Adam Langleyd9e397b2015-01-22 14:27:53 -08001901.LSEH_info_rsaz_1024_gather5:
David Benjamin4969cc92016-04-22 15:02:23 -04001902 .byte 0x01,0x36,0x17,0x0b
1903 .byte 0x36,0xf8,0x09,0x00 # vmovaps 0x90(rsp),xmm15
1904 .byte 0x31,0xe8,0x08,0x00 # vmovaps 0x80(rsp),xmm14
1905 .byte 0x2c,0xd8,0x07,0x00 # vmovaps 0x70(rsp),xmm13
1906 .byte 0x27,0xc8,0x06,0x00 # vmovaps 0x60(rsp),xmm12
1907 .byte 0x22,0xb8,0x05,0x00 # vmovaps 0x50(rsp),xmm11
1908 .byte 0x1d,0xa8,0x04,0x00 # vmovaps 0x40(rsp),xmm10
1909 .byte 0x18,0x98,0x03,0x00 # vmovaps 0x30(rsp),xmm9
1910 .byte 0x13,0x88,0x02,0x00 # vmovaps 0x20(rsp),xmm8
1911 .byte 0x0e,0x78,0x01,0x00 # vmovaps 0x10(rsp),xmm7
1912 .byte 0x09,0x68,0x00,0x00 # vmovaps 0x00(rsp),xmm6
1913 .byte 0x04,0x01,0x15,0x00 # sub rsp,0xa8
1914 .byte 0x00,0xb3,0x00,0x00 # set_frame r11
Adam Langleyd9e397b2015-01-22 14:27:53 -08001915___
1916}
1917
1918foreach (split("\n",$code)) {
1919 s/\`([^\`]*)\`/eval($1)/ge;
1920
1921 s/\b(sh[rl]d?\s+\$)(-?[0-9]+)/$1.$2%64/ge or
1922
1923 s/\b(vmov[dq])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
1924 s/\b(vmovdqu)\b(.+)%x%ymm([0-9]+)/$1$2%xmm$3/go or
1925 s/\b(vpinsr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
1926 s/\b(vpextr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
1927 s/\b(vpbroadcast[qd]\s+)%ymm([0-9]+)/$1%xmm$2/go;
1928 print $_,"\n";
1929}
1930
1931}}} else {{{
1932print <<___; # assembler is too old
1933.text
1934
1935.globl rsaz_avx2_eligible
1936.type rsaz_avx2_eligible,\@abi-omnipotent
1937rsaz_avx2_eligible:
1938 xor %eax,%eax
1939 ret
1940.size rsaz_avx2_eligible,.-rsaz_avx2_eligible
1941
1942.globl rsaz_1024_sqr_avx2
1943.globl rsaz_1024_mul_avx2
1944.globl rsaz_1024_norm2red_avx2
1945.globl rsaz_1024_red2norm_avx2
1946.globl rsaz_1024_scatter5_avx2
1947.globl rsaz_1024_gather5_avx2
1948.type rsaz_1024_sqr_avx2,\@abi-omnipotent
1949rsaz_1024_sqr_avx2:
1950rsaz_1024_mul_avx2:
1951rsaz_1024_norm2red_avx2:
1952rsaz_1024_red2norm_avx2:
1953rsaz_1024_scatter5_avx2:
1954rsaz_1024_gather5_avx2:
1955 .byte 0x0f,0x0b # ud2
1956 ret
1957.size rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2
1958___
1959}}}
1960
1961close STDOUT;