blob: bf68aadd20f2a45db57bfb6e20b3f336c77ae82b [file] [log] [blame]
Adam Langleyd9e397b2015-01-22 14:27:53 -08001#!/usr/bin/env perl
2
3# ====================================================================
4# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9
10# August 2011.
11#
12# Companion to x86_64-mont.pl that optimizes cache-timing attack
13# countermeasures. The subroutines are produced by replacing bp[i]
14# references in their x86_64-mont.pl counterparts with cache-neutral
15# references to powers table computed in BN_mod_exp_mont_consttime.
16# In addition subroutine that scatters elements of the powers table
17# is implemented, so that scatter-/gathering can be tuned without
18# bn_exp.c modifications.
19
20# August 2013.
21#
22# Add MULX/AD*X code paths and additional interfaces to optimize for
23# branch prediction unit. For input lengths that are multiples of 8
24# the np argument is not just modulus value, but one interleaved
25# with 0. This is to optimize post-condition...
26
27$flavour = shift;
28$output = shift;
29if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
30
31$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
32
33$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
34( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
35( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
36die "can't locate x86_64-xlate.pl";
37
David Benjaminc895d6b2016-08-11 13:26:41 -040038open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
Adam Langleyd9e397b2015-01-22 14:27:53 -080039*STDOUT=*OUT;
40
Kenny Roote99801b2015-11-06 15:31:15 -080041# In upstream, this is controlled by shelling out to the compiler to check
42# versions, but BoringSSL is intended to be used with pre-generated perlasm
43# output, so this isn't useful anyway.
44#
45# TODO(davidben): Enable this after testing. $addx goes up to 1.
46$addx = 0;
Adam Langleyd9e397b2015-01-22 14:27:53 -080047
48# int bn_mul_mont_gather5(
49$rp="%rdi"; # BN_ULONG *rp,
50$ap="%rsi"; # const BN_ULONG *ap,
51$bp="%rdx"; # const BN_ULONG *bp,
52$np="%rcx"; # const BN_ULONG *np,
53$n0="%r8"; # const BN_ULONG *n0,
54$num="%r9"; # int num,
55 # int idx); # 0 to 2^5-1, "index" in $bp holding
56 # pre-computed powers of a', interlaced
57 # in such manner that b[0] is $bp[idx],
58 # b[1] is [2^5+idx], etc.
59$lo0="%r10";
60$hi0="%r11";
61$hi1="%r13";
62$i="%r14";
63$j="%r15";
64$m0="%rbx";
65$m1="%rbp";
66
67$code=<<___;
68.text
69
70.extern OPENSSL_ia32cap_P
71
72.globl bn_mul_mont_gather5
73.type bn_mul_mont_gather5,\@function,6
74.align 64
75bn_mul_mont_gather5:
Robert Sloana94fe052017-02-21 08:49:28 -080076.cfi_startproc
77 mov ${num}d,${num}d
78 mov %rsp,%rax
79.cfi_def_cfa_register %rax
Adam Langleyd9e397b2015-01-22 14:27:53 -080080 test \$7,${num}d
81 jnz .Lmul_enter
82___
83$code.=<<___ if ($addx);
84 mov OPENSSL_ia32cap_P+8(%rip),%r11d
85___
86$code.=<<___;
87 jmp .Lmul4x_enter
88
89.align 16
90.Lmul_enter:
David Benjamin4969cc92016-04-22 15:02:23 -040091 movd `($win64?56:8)`(%rsp),%xmm5 # load 7th argument
Adam Langleyd9e397b2015-01-22 14:27:53 -080092 push %rbx
Robert Sloana94fe052017-02-21 08:49:28 -080093.cfi_push %rbx
Adam Langleyd9e397b2015-01-22 14:27:53 -080094 push %rbp
Robert Sloana94fe052017-02-21 08:49:28 -080095.cfi_push %rbp
Adam Langleyd9e397b2015-01-22 14:27:53 -080096 push %r12
Robert Sloana94fe052017-02-21 08:49:28 -080097.cfi_push %r12
Adam Langleyd9e397b2015-01-22 14:27:53 -080098 push %r13
Robert Sloana94fe052017-02-21 08:49:28 -080099.cfi_push %r13
Adam Langleyd9e397b2015-01-22 14:27:53 -0800100 push %r14
Robert Sloana94fe052017-02-21 08:49:28 -0800101.cfi_push %r14
Adam Langleyd9e397b2015-01-22 14:27:53 -0800102 push %r15
Robert Sloana94fe052017-02-21 08:49:28 -0800103.cfi_push %r15
David Benjamin4969cc92016-04-22 15:02:23 -0400104
Robert Sloana94fe052017-02-21 08:49:28 -0800105 neg $num
106 mov %rsp,%r11
107 lea -280(%rsp,$num,8),%r10 # future alloca(8*(num+2)+256+8)
108 neg $num # restore $num
109 and \$-1024,%r10 # minimize TLB usage
Adam Langleyd9e397b2015-01-22 14:27:53 -0800110
Robert Sloana94fe052017-02-21 08:49:28 -0800111 # An OS-agnostic version of __chkstk.
112 #
113 # Some OSes (Windows) insist on stack being "wired" to
114 # physical memory in strictly sequential manner, i.e. if stack
115 # allocation spans two pages, then reference to farmost one can
116 # be punishable by SEGV. But page walking can do good even on
117 # other OSes, because it guarantees that villain thread hits
118 # the guard page before it can make damage to innocent one...
119 sub %r10,%r11
120 and \$-4096,%r11
121 lea (%r10,%r11),%rsp
122 mov (%rsp),%r11
123 cmp %r10,%rsp
124 ja .Lmul_page_walk
125 jmp .Lmul_page_walk_done
126
127.Lmul_page_walk:
128 lea -4096(%rsp),%rsp
129 mov (%rsp),%r11
130 cmp %r10,%rsp
131 ja .Lmul_page_walk
132.Lmul_page_walk_done:
133
134 lea .Linc(%rip),%r10
Adam Langleyd9e397b2015-01-22 14:27:53 -0800135 mov %rax,8(%rsp,$num,8) # tp[num+1]=%rsp
Robert Sloana94fe052017-02-21 08:49:28 -0800136.cfi_cfa_expression %rsp+8,$num,8,mul,plus,deref,+8
Adam Langleyd9e397b2015-01-22 14:27:53 -0800137.Lmul_body:
Robert Sloana94fe052017-02-21 08:49:28 -0800138
David Benjamin4969cc92016-04-22 15:02:23 -0400139 lea 128($bp),%r12 # reassign $bp (+size optimization)
Adam Langleyd9e397b2015-01-22 14:27:53 -0800140___
141 $bp="%r12";
142 $STRIDE=2**5*8; # 5 is "window size"
143 $N=$STRIDE/4; # should match cache line size
144$code.=<<___;
David Benjamin4969cc92016-04-22 15:02:23 -0400145 movdqa 0(%r10),%xmm0 # 00000001000000010000000000000000
146 movdqa 16(%r10),%xmm1 # 00000002000000020000000200000002
147 lea 24-112(%rsp,$num,8),%r10# place the mask after tp[num+3] (+ICache optimization)
148 and \$-16,%r10
Adam Langleyd9e397b2015-01-22 14:27:53 -0800149
David Benjamin4969cc92016-04-22 15:02:23 -0400150 pshufd \$0,%xmm5,%xmm5 # broadcast index
151 movdqa %xmm1,%xmm4
152 movdqa %xmm1,%xmm2
153___
154########################################################################
155# calculate mask by comparing 0..31 to index and save result to stack
156#
157$code.=<<___;
158 paddd %xmm0,%xmm1
159 pcmpeqd %xmm5,%xmm0 # compare to 1,0
160 .byte 0x67
161 movdqa %xmm4,%xmm3
162___
163for($k=0;$k<$STRIDE/16-4;$k+=4) {
164$code.=<<___;
165 paddd %xmm1,%xmm2
166 pcmpeqd %xmm5,%xmm1 # compare to 3,2
167 movdqa %xmm0,`16*($k+0)+112`(%r10)
168 movdqa %xmm4,%xmm0
169
170 paddd %xmm2,%xmm3
171 pcmpeqd %xmm5,%xmm2 # compare to 5,4
172 movdqa %xmm1,`16*($k+1)+112`(%r10)
173 movdqa %xmm4,%xmm1
174
175 paddd %xmm3,%xmm0
176 pcmpeqd %xmm5,%xmm3 # compare to 7,6
177 movdqa %xmm2,`16*($k+2)+112`(%r10)
178 movdqa %xmm4,%xmm2
179
180 paddd %xmm0,%xmm1
181 pcmpeqd %xmm5,%xmm0
182 movdqa %xmm3,`16*($k+3)+112`(%r10)
183 movdqa %xmm4,%xmm3
184___
185}
186$code.=<<___; # last iteration can be optimized
187 paddd %xmm1,%xmm2
188 pcmpeqd %xmm5,%xmm1
189 movdqa %xmm0,`16*($k+0)+112`(%r10)
190
191 paddd %xmm2,%xmm3
192 .byte 0x67
193 pcmpeqd %xmm5,%xmm2
194 movdqa %xmm1,`16*($k+1)+112`(%r10)
195
196 pcmpeqd %xmm5,%xmm3
197 movdqa %xmm2,`16*($k+2)+112`(%r10)
198 pand `16*($k+0)-128`($bp),%xmm0 # while it's still in register
199
200 pand `16*($k+1)-128`($bp),%xmm1
201 pand `16*($k+2)-128`($bp),%xmm2
202 movdqa %xmm3,`16*($k+3)+112`(%r10)
203 pand `16*($k+3)-128`($bp),%xmm3
Adam Langleyd9e397b2015-01-22 14:27:53 -0800204 por %xmm2,%xmm0
David Benjamin4969cc92016-04-22 15:02:23 -0400205 por %xmm3,%xmm1
206___
207for($k=0;$k<$STRIDE/16-4;$k+=4) {
208$code.=<<___;
209 movdqa `16*($k+0)-128`($bp),%xmm4
210 movdqa `16*($k+1)-128`($bp),%xmm5
211 movdqa `16*($k+2)-128`($bp),%xmm2
212 pand `16*($k+0)+112`(%r10),%xmm4
213 movdqa `16*($k+3)-128`($bp),%xmm3
214 pand `16*($k+1)+112`(%r10),%xmm5
215 por %xmm4,%xmm0
216 pand `16*($k+2)+112`(%r10),%xmm2
217 por %xmm5,%xmm1
218 pand `16*($k+3)+112`(%r10),%xmm3
219 por %xmm2,%xmm0
220 por %xmm3,%xmm1
221___
222}
223$code.=<<___;
224 por %xmm1,%xmm0
225 pshufd \$0x4e,%xmm0,%xmm1
226 por %xmm1,%xmm0
Adam Langleyd9e397b2015-01-22 14:27:53 -0800227 lea $STRIDE($bp),$bp
Adam Langleyd9e397b2015-01-22 14:27:53 -0800228 movq %xmm0,$m0 # m0=bp[0]
229
230 mov ($n0),$n0 # pull n0[0] value
231 mov ($ap),%rax
232
233 xor $i,$i # i=0
234 xor $j,$j # j=0
235
Adam Langleyd9e397b2015-01-22 14:27:53 -0800236 mov $n0,$m1
237 mulq $m0 # ap[0]*bp[0]
238 mov %rax,$lo0
239 mov ($np),%rax
240
Adam Langleyd9e397b2015-01-22 14:27:53 -0800241 imulq $lo0,$m1 # "tp[0]"*n0
242 mov %rdx,$hi0
243
Adam Langleyd9e397b2015-01-22 14:27:53 -0800244 mulq $m1 # np[0]*m1
245 add %rax,$lo0 # discarded
246 mov 8($ap),%rax
247 adc \$0,%rdx
248 mov %rdx,$hi1
249
250 lea 1($j),$j # j++
251 jmp .L1st_enter
252
253.align 16
254.L1st:
255 add %rax,$hi1
256 mov ($ap,$j,8),%rax
257 adc \$0,%rdx
258 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
259 mov $lo0,$hi0
260 adc \$0,%rdx
261 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
262 mov %rdx,$hi1
263
264.L1st_enter:
265 mulq $m0 # ap[j]*bp[0]
266 add %rax,$hi0
267 mov ($np,$j,8),%rax
268 adc \$0,%rdx
269 lea 1($j),$j # j++
270 mov %rdx,$lo0
271
272 mulq $m1 # np[j]*m1
273 cmp $num,$j
David Benjamin4969cc92016-04-22 15:02:23 -0400274 jne .L1st # note that upon exit $j==$num, so
275 # they can be used interchangeably
Adam Langleyd9e397b2015-01-22 14:27:53 -0800276
277 add %rax,$hi1
Adam Langleyd9e397b2015-01-22 14:27:53 -0800278 adc \$0,%rdx
279 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
280 adc \$0,%rdx
David Benjamin4969cc92016-04-22 15:02:23 -0400281 mov $hi1,-16(%rsp,$num,8) # tp[num-1]
Adam Langleyd9e397b2015-01-22 14:27:53 -0800282 mov %rdx,$hi1
283 mov $lo0,$hi0
284
285 xor %rdx,%rdx
286 add $hi0,$hi1
287 adc \$0,%rdx
288 mov $hi1,-8(%rsp,$num,8)
289 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
290
291 lea 1($i),$i # i++
292 jmp .Louter
293.align 16
294.Louter:
David Benjamin4969cc92016-04-22 15:02:23 -0400295 lea 24+128(%rsp,$num,8),%rdx # where 256-byte mask is (+size optimization)
296 and \$-16,%rdx
297 pxor %xmm4,%xmm4
298 pxor %xmm5,%xmm5
299___
300for($k=0;$k<$STRIDE/16;$k+=4) {
301$code.=<<___;
302 movdqa `16*($k+0)-128`($bp),%xmm0
303 movdqa `16*($k+1)-128`($bp),%xmm1
304 movdqa `16*($k+2)-128`($bp),%xmm2
305 movdqa `16*($k+3)-128`($bp),%xmm3
306 pand `16*($k+0)-128`(%rdx),%xmm0
307 pand `16*($k+1)-128`(%rdx),%xmm1
308 por %xmm0,%xmm4
309 pand `16*($k+2)-128`(%rdx),%xmm2
310 por %xmm1,%xmm5
311 pand `16*($k+3)-128`(%rdx),%xmm3
312 por %xmm2,%xmm4
313 por %xmm3,%xmm5
314___
315}
316$code.=<<___;
317 por %xmm5,%xmm4
318 pshufd \$0x4e,%xmm4,%xmm0
319 por %xmm4,%xmm0
320 lea $STRIDE($bp),$bp
321
322 mov ($ap),%rax # ap[0]
323 movq %xmm0,$m0 # m0=bp[i]
324
Adam Langleyd9e397b2015-01-22 14:27:53 -0800325 xor $j,$j # j=0
326 mov $n0,$m1
327 mov (%rsp),$lo0
328
Adam Langleyd9e397b2015-01-22 14:27:53 -0800329 mulq $m0 # ap[0]*bp[i]
330 add %rax,$lo0 # ap[0]*bp[i]+tp[0]
331 mov ($np),%rax
332 adc \$0,%rdx
333
Adam Langleyd9e397b2015-01-22 14:27:53 -0800334 imulq $lo0,$m1 # tp[0]*n0
335 mov %rdx,$hi0
336
Adam Langleyd9e397b2015-01-22 14:27:53 -0800337 mulq $m1 # np[0]*m1
338 add %rax,$lo0 # discarded
339 mov 8($ap),%rax
340 adc \$0,%rdx
341 mov 8(%rsp),$lo0 # tp[1]
342 mov %rdx,$hi1
343
344 lea 1($j),$j # j++
345 jmp .Linner_enter
346
347.align 16
348.Linner:
349 add %rax,$hi1
350 mov ($ap,$j,8),%rax
351 adc \$0,%rdx
352 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
353 mov (%rsp,$j,8),$lo0
354 adc \$0,%rdx
355 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
356 mov %rdx,$hi1
357
358.Linner_enter:
359 mulq $m0 # ap[j]*bp[i]
360 add %rax,$hi0
361 mov ($np,$j,8),%rax
362 adc \$0,%rdx
363 add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
364 mov %rdx,$hi0
365 adc \$0,$hi0
366 lea 1($j),$j # j++
367
368 mulq $m1 # np[j]*m1
369 cmp $num,$j
David Benjamin4969cc92016-04-22 15:02:23 -0400370 jne .Linner # note that upon exit $j==$num, so
371 # they can be used interchangeably
Adam Langleyd9e397b2015-01-22 14:27:53 -0800372 add %rax,$hi1
Adam Langleyd9e397b2015-01-22 14:27:53 -0800373 adc \$0,%rdx
374 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
David Benjamin4969cc92016-04-22 15:02:23 -0400375 mov (%rsp,$num,8),$lo0
Adam Langleyd9e397b2015-01-22 14:27:53 -0800376 adc \$0,%rdx
David Benjamin4969cc92016-04-22 15:02:23 -0400377 mov $hi1,-16(%rsp,$num,8) # tp[num-1]
Adam Langleyd9e397b2015-01-22 14:27:53 -0800378 mov %rdx,$hi1
379
380 xor %rdx,%rdx
381 add $hi0,$hi1
382 adc \$0,%rdx
383 add $lo0,$hi1 # pull upmost overflow bit
384 adc \$0,%rdx
385 mov $hi1,-8(%rsp,$num,8)
386 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
387
388 lea 1($i),$i # i++
389 cmp $num,$i
390 jb .Louter
391
392 xor $i,$i # i=0 and clear CF!
393 mov (%rsp),%rax # tp[0]
394 lea (%rsp),$ap # borrow ap for tp
395 mov $num,$j # j=num
396 jmp .Lsub
397.align 16
398.Lsub: sbb ($np,$i,8),%rax
399 mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
400 mov 8($ap,$i,8),%rax # tp[i+1]
401 lea 1($i),$i # i++
402 dec $j # doesnn't affect CF!
403 jnz .Lsub
404
405 sbb \$0,%rax # handle upmost overflow bit
406 xor $i,$i
Robert Sloana94fe052017-02-21 08:49:28 -0800407 and %rax,$ap
408 not %rax
409 mov $rp,$np
410 and %rax,$np
Adam Langleyd9e397b2015-01-22 14:27:53 -0800411 mov $num,$j # j=num
Robert Sloana94fe052017-02-21 08:49:28 -0800412 or $np,$ap # ap=borrow?tp:rp
Adam Langleyd9e397b2015-01-22 14:27:53 -0800413.align 16
414.Lcopy: # copy or in-place refresh
Robert Sloana94fe052017-02-21 08:49:28 -0800415 mov ($ap,$i,8),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800416 mov $i,(%rsp,$i,8) # zap temporary vector
Robert Sloana94fe052017-02-21 08:49:28 -0800417 mov %rax,($rp,$i,8) # rp[i]=tp[i]
Adam Langleyd9e397b2015-01-22 14:27:53 -0800418 lea 1($i),$i
419 sub \$1,$j
420 jnz .Lcopy
421
422 mov 8(%rsp,$num,8),%rsi # restore %rsp
Robert Sloana94fe052017-02-21 08:49:28 -0800423.cfi_def_cfa %rsi,8
Adam Langleyd9e397b2015-01-22 14:27:53 -0800424 mov \$1,%rax
David Benjamin4969cc92016-04-22 15:02:23 -0400425
Adam Langleyd9e397b2015-01-22 14:27:53 -0800426 mov -48(%rsi),%r15
Robert Sloana94fe052017-02-21 08:49:28 -0800427.cfi_restore %r15
Adam Langleyd9e397b2015-01-22 14:27:53 -0800428 mov -40(%rsi),%r14
Robert Sloana94fe052017-02-21 08:49:28 -0800429.cfi_restore %r14
Adam Langleyd9e397b2015-01-22 14:27:53 -0800430 mov -32(%rsi),%r13
Robert Sloana94fe052017-02-21 08:49:28 -0800431.cfi_restore %r13
Adam Langleyd9e397b2015-01-22 14:27:53 -0800432 mov -24(%rsi),%r12
Robert Sloana94fe052017-02-21 08:49:28 -0800433.cfi_restore %r12
Adam Langleyd9e397b2015-01-22 14:27:53 -0800434 mov -16(%rsi),%rbp
Robert Sloana94fe052017-02-21 08:49:28 -0800435.cfi_restore %rbp
Adam Langleyd9e397b2015-01-22 14:27:53 -0800436 mov -8(%rsi),%rbx
Robert Sloana94fe052017-02-21 08:49:28 -0800437.cfi_restore %rbx
Adam Langleyd9e397b2015-01-22 14:27:53 -0800438 lea (%rsi),%rsp
Robert Sloana94fe052017-02-21 08:49:28 -0800439.cfi_def_cfa_register %rsp
Adam Langleyd9e397b2015-01-22 14:27:53 -0800440.Lmul_epilogue:
441 ret
Robert Sloana94fe052017-02-21 08:49:28 -0800442.cfi_endproc
Adam Langleyd9e397b2015-01-22 14:27:53 -0800443.size bn_mul_mont_gather5,.-bn_mul_mont_gather5
444___
445{{{
446my @A=("%r10","%r11");
447my @N=("%r13","%rdi");
448$code.=<<___;
449.type bn_mul4x_mont_gather5,\@function,6
450.align 32
451bn_mul4x_mont_gather5:
Robert Sloana94fe052017-02-21 08:49:28 -0800452.cfi_startproc
453 .byte 0x67
454 mov %rsp,%rax
455.cfi_def_cfa_register %rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800456.Lmul4x_enter:
457___
458$code.=<<___ if ($addx);
David Benjamin4969cc92016-04-22 15:02:23 -0400459 and \$0x80108,%r11d
460 cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1
Adam Langleyd9e397b2015-01-22 14:27:53 -0800461 je .Lmulx4x_enter
462___
463$code.=<<___;
Adam Langleyd9e397b2015-01-22 14:27:53 -0800464 push %rbx
Robert Sloana94fe052017-02-21 08:49:28 -0800465.cfi_push %rbx
Adam Langleyd9e397b2015-01-22 14:27:53 -0800466 push %rbp
Robert Sloana94fe052017-02-21 08:49:28 -0800467.cfi_push %rbp
Adam Langleyd9e397b2015-01-22 14:27:53 -0800468 push %r12
Robert Sloana94fe052017-02-21 08:49:28 -0800469.cfi_push %r12
Adam Langleyd9e397b2015-01-22 14:27:53 -0800470 push %r13
Robert Sloana94fe052017-02-21 08:49:28 -0800471.cfi_push %r13
Adam Langleyd9e397b2015-01-22 14:27:53 -0800472 push %r14
Robert Sloana94fe052017-02-21 08:49:28 -0800473.cfi_push %r14
Adam Langleyd9e397b2015-01-22 14:27:53 -0800474 push %r15
Robert Sloana94fe052017-02-21 08:49:28 -0800475.cfi_push %r15
476.Lmul4x_prologue:
David Benjamin4969cc92016-04-22 15:02:23 -0400477
Adam Langleyd9e397b2015-01-22 14:27:53 -0800478 .byte 0x67
David Benjamin4969cc92016-04-22 15:02:23 -0400479 shl \$3,${num}d # convert $num to bytes
480 lea ($num,$num,2),%r10 # 3*$num in bytes
Adam Langleyd9e397b2015-01-22 14:27:53 -0800481 neg $num # -$num
482
483 ##############################################################
David Benjamin4969cc92016-04-22 15:02:23 -0400484 # Ensure that stack frame doesn't alias with $rptr+3*$num
485 # modulo 4096, which covers ret[num], am[num] and n[num]
486 # (see bn_exp.c). This is done to allow memory disambiguation
487 # logic do its magic. [Extra [num] is allocated in order
488 # to align with bn_power5's frame, which is cleansed after
489 # completing exponentiation. Extra 256 bytes is for power mask
490 # calculated from 7th argument, the index.]
Adam Langleyd9e397b2015-01-22 14:27:53 -0800491 #
David Benjamin4969cc92016-04-22 15:02:23 -0400492 lea -320(%rsp,$num,2),%r11
Robert Sloana94fe052017-02-21 08:49:28 -0800493 mov %rsp,%rbp
David Benjamin4969cc92016-04-22 15:02:23 -0400494 sub $rp,%r11
Adam Langleyd9e397b2015-01-22 14:27:53 -0800495 and \$4095,%r11
496 cmp %r11,%r10
497 jb .Lmul4xsp_alt
Robert Sloana94fe052017-02-21 08:49:28 -0800498 sub %r11,%rbp # align with $rp
499 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -0800500 jmp .Lmul4xsp_done
501
502.align 32
503.Lmul4xsp_alt:
David Benjamin4969cc92016-04-22 15:02:23 -0400504 lea 4096-320(,$num,2),%r10
Robert Sloana94fe052017-02-21 08:49:28 -0800505 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -0800506 sub %r10,%r11
507 mov \$0,%r10
508 cmovc %r10,%r11
Robert Sloana94fe052017-02-21 08:49:28 -0800509 sub %r11,%rbp
Adam Langleyd9e397b2015-01-22 14:27:53 -0800510.Lmul4xsp_done:
Robert Sloana94fe052017-02-21 08:49:28 -0800511 and \$-64,%rbp
512 mov %rsp,%r11
513 sub %rbp,%r11
514 and \$-4096,%r11
515 lea (%rbp,%r11),%rsp
516 mov (%rsp),%r10
517 cmp %rbp,%rsp
518 ja .Lmul4x_page_walk
519 jmp .Lmul4x_page_walk_done
520
521.Lmul4x_page_walk:
522 lea -4096(%rsp),%rsp
523 mov (%rsp),%r10
524 cmp %rbp,%rsp
525 ja .Lmul4x_page_walk
526.Lmul4x_page_walk_done:
527
Adam Langleyd9e397b2015-01-22 14:27:53 -0800528 neg $num
529
530 mov %rax,40(%rsp)
Robert Sloana94fe052017-02-21 08:49:28 -0800531.cfi_cfa_expression %rsp+40,deref,+8
Adam Langleyd9e397b2015-01-22 14:27:53 -0800532.Lmul4x_body:
533
534 call mul4x_internal
535
536 mov 40(%rsp),%rsi # restore %rsp
Robert Sloana94fe052017-02-21 08:49:28 -0800537.cfi_def_cfa %rsi,8
Adam Langleyd9e397b2015-01-22 14:27:53 -0800538 mov \$1,%rax
David Benjamin4969cc92016-04-22 15:02:23 -0400539
Adam Langleyd9e397b2015-01-22 14:27:53 -0800540 mov -48(%rsi),%r15
Robert Sloana94fe052017-02-21 08:49:28 -0800541.cfi_restore %r15
Adam Langleyd9e397b2015-01-22 14:27:53 -0800542 mov -40(%rsi),%r14
Robert Sloana94fe052017-02-21 08:49:28 -0800543.cfi_restore %r14
Adam Langleyd9e397b2015-01-22 14:27:53 -0800544 mov -32(%rsi),%r13
Robert Sloana94fe052017-02-21 08:49:28 -0800545.cfi_restore %r13
Adam Langleyd9e397b2015-01-22 14:27:53 -0800546 mov -24(%rsi),%r12
Robert Sloana94fe052017-02-21 08:49:28 -0800547.cfi_restore %r12
Adam Langleyd9e397b2015-01-22 14:27:53 -0800548 mov -16(%rsi),%rbp
Robert Sloana94fe052017-02-21 08:49:28 -0800549.cfi_restore %rbp
Adam Langleyd9e397b2015-01-22 14:27:53 -0800550 mov -8(%rsi),%rbx
Robert Sloana94fe052017-02-21 08:49:28 -0800551.cfi_restore %rbx
Adam Langleyd9e397b2015-01-22 14:27:53 -0800552 lea (%rsi),%rsp
Robert Sloana94fe052017-02-21 08:49:28 -0800553.cfi_def_cfa_register %rsp
Adam Langleyd9e397b2015-01-22 14:27:53 -0800554.Lmul4x_epilogue:
555 ret
Robert Sloana94fe052017-02-21 08:49:28 -0800556.cfi_endproc
Adam Langleyd9e397b2015-01-22 14:27:53 -0800557.size bn_mul4x_mont_gather5,.-bn_mul4x_mont_gather5
558
559.type mul4x_internal,\@abi-omnipotent
560.align 32
561mul4x_internal:
David Benjamin4969cc92016-04-22 15:02:23 -0400562 shl \$5,$num # $num was in bytes
563 movd `($win64?56:8)`(%rax),%xmm5 # load 7th argument, index
564 lea .Linc(%rip),%rax
565 lea 128(%rdx,$num),%r13 # end of powers table (+size optimization)
Adam Langleyd9e397b2015-01-22 14:27:53 -0800566 shr \$5,$num # restore $num
567___
568 $bp="%r12";
569 $STRIDE=2**5*8; # 5 is "window size"
570 $N=$STRIDE/4; # should match cache line size
571 $tp=$i;
572$code.=<<___;
David Benjamin4969cc92016-04-22 15:02:23 -0400573 movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
574 movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
575 lea 88-112(%rsp,$num),%r10 # place the mask after tp[num+1] (+ICache optimization)
576 lea 128(%rdx),$bp # size optimization
Adam Langleyd9e397b2015-01-22 14:27:53 -0800577
David Benjamin4969cc92016-04-22 15:02:23 -0400578 pshufd \$0,%xmm5,%xmm5 # broadcast index
579 movdqa %xmm1,%xmm4
580 .byte 0x67,0x67
581 movdqa %xmm1,%xmm2
582___
583########################################################################
584# calculate mask by comparing 0..31 to index and save result to stack
585#
586$code.=<<___;
587 paddd %xmm0,%xmm1
588 pcmpeqd %xmm5,%xmm0 # compare to 1,0
Adam Langleyd9e397b2015-01-22 14:27:53 -0800589 .byte 0x67
David Benjamin4969cc92016-04-22 15:02:23 -0400590 movdqa %xmm4,%xmm3
591___
592for($i=0;$i<$STRIDE/16-4;$i+=4) {
593$code.=<<___;
594 paddd %xmm1,%xmm2
595 pcmpeqd %xmm5,%xmm1 # compare to 3,2
596 movdqa %xmm0,`16*($i+0)+112`(%r10)
597 movdqa %xmm4,%xmm0
598
599 paddd %xmm2,%xmm3
600 pcmpeqd %xmm5,%xmm2 # compare to 5,4
601 movdqa %xmm1,`16*($i+1)+112`(%r10)
602 movdqa %xmm4,%xmm1
603
604 paddd %xmm3,%xmm0
605 pcmpeqd %xmm5,%xmm3 # compare to 7,6
606 movdqa %xmm2,`16*($i+2)+112`(%r10)
607 movdqa %xmm4,%xmm2
608
609 paddd %xmm0,%xmm1
610 pcmpeqd %xmm5,%xmm0
611 movdqa %xmm3,`16*($i+3)+112`(%r10)
612 movdqa %xmm4,%xmm3
613___
614}
615$code.=<<___; # last iteration can be optimized
616 paddd %xmm1,%xmm2
617 pcmpeqd %xmm5,%xmm1
618 movdqa %xmm0,`16*($i+0)+112`(%r10)
619
620 paddd %xmm2,%xmm3
Adam Langleyd9e397b2015-01-22 14:27:53 -0800621 .byte 0x67
David Benjamin4969cc92016-04-22 15:02:23 -0400622 pcmpeqd %xmm5,%xmm2
623 movdqa %xmm1,`16*($i+1)+112`(%r10)
624
625 pcmpeqd %xmm5,%xmm3
626 movdqa %xmm2,`16*($i+2)+112`(%r10)
627 pand `16*($i+0)-128`($bp),%xmm0 # while it's still in register
628
629 pand `16*($i+1)-128`($bp),%xmm1
630 pand `16*($i+2)-128`($bp),%xmm2
631 movdqa %xmm3,`16*($i+3)+112`(%r10)
632 pand `16*($i+3)-128`($bp),%xmm3
Adam Langleyd9e397b2015-01-22 14:27:53 -0800633 por %xmm2,%xmm0
David Benjamin4969cc92016-04-22 15:02:23 -0400634 por %xmm3,%xmm1
635___
636for($i=0;$i<$STRIDE/16-4;$i+=4) {
637$code.=<<___;
638 movdqa `16*($i+0)-128`($bp),%xmm4
639 movdqa `16*($i+1)-128`($bp),%xmm5
640 movdqa `16*($i+2)-128`($bp),%xmm2
641 pand `16*($i+0)+112`(%r10),%xmm4
642 movdqa `16*($i+3)-128`($bp),%xmm3
643 pand `16*($i+1)+112`(%r10),%xmm5
644 por %xmm4,%xmm0
645 pand `16*($i+2)+112`(%r10),%xmm2
646 por %xmm5,%xmm1
647 pand `16*($i+3)+112`(%r10),%xmm3
648 por %xmm2,%xmm0
649 por %xmm3,%xmm1
650___
651}
652$code.=<<___;
653 por %xmm1,%xmm0
654 pshufd \$0x4e,%xmm0,%xmm1
655 por %xmm1,%xmm0
656 lea $STRIDE($bp),$bp
Adam Langleyd9e397b2015-01-22 14:27:53 -0800657 movq %xmm0,$m0 # m0=bp[0]
David Benjamin4969cc92016-04-22 15:02:23 -0400658
Adam Langleyd9e397b2015-01-22 14:27:53 -0800659 mov %r13,16+8(%rsp) # save end of b[num]
660 mov $rp, 56+8(%rsp) # save $rp
661
662 mov ($n0),$n0 # pull n0[0] value
663 mov ($ap),%rax
664 lea ($ap,$num),$ap # end of a[num]
665 neg $num
666
667 mov $n0,$m1
668 mulq $m0 # ap[0]*bp[0]
669 mov %rax,$A[0]
670 mov ($np),%rax
671
Adam Langleyd9e397b2015-01-22 14:27:53 -0800672 imulq $A[0],$m1 # "tp[0]"*n0
David Benjamin4969cc92016-04-22 15:02:23 -0400673 lea 64+8(%rsp),$tp
Adam Langleyd9e397b2015-01-22 14:27:53 -0800674 mov %rdx,$A[1]
675
Adam Langleyd9e397b2015-01-22 14:27:53 -0800676 mulq $m1 # np[0]*m1
677 add %rax,$A[0] # discarded
678 mov 8($ap,$num),%rax
679 adc \$0,%rdx
680 mov %rdx,$N[1]
681
682 mulq $m0
683 add %rax,$A[1]
David Benjamin4969cc92016-04-22 15:02:23 -0400684 mov 8*1($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800685 adc \$0,%rdx
686 mov %rdx,$A[0]
687
688 mulq $m1
689 add %rax,$N[1]
690 mov 16($ap,$num),%rax
691 adc \$0,%rdx
692 add $A[1],$N[1]
693 lea 4*8($num),$j # j=4
David Benjamin4969cc92016-04-22 15:02:23 -0400694 lea 8*4($np),$np
Adam Langleyd9e397b2015-01-22 14:27:53 -0800695 adc \$0,%rdx
696 mov $N[1],($tp)
697 mov %rdx,$N[0]
698 jmp .L1st4x
699
700.align 32
701.L1st4x:
702 mulq $m0 # ap[j]*bp[0]
703 add %rax,$A[0]
David Benjamin4969cc92016-04-22 15:02:23 -0400704 mov -8*2($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800705 lea 32($tp),$tp
706 adc \$0,%rdx
707 mov %rdx,$A[1]
708
709 mulq $m1 # np[j]*m1
710 add %rax,$N[0]
711 mov -8($ap,$j),%rax
712 adc \$0,%rdx
713 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
714 adc \$0,%rdx
715 mov $N[0],-24($tp) # tp[j-1]
716 mov %rdx,$N[1]
717
718 mulq $m0 # ap[j]*bp[0]
719 add %rax,$A[1]
David Benjamin4969cc92016-04-22 15:02:23 -0400720 mov -8*1($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800721 adc \$0,%rdx
722 mov %rdx,$A[0]
723
724 mulq $m1 # np[j]*m1
725 add %rax,$N[1]
726 mov ($ap,$j),%rax
727 adc \$0,%rdx
728 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
729 adc \$0,%rdx
730 mov $N[1],-16($tp) # tp[j-1]
731 mov %rdx,$N[0]
732
733 mulq $m0 # ap[j]*bp[0]
734 add %rax,$A[0]
David Benjamin4969cc92016-04-22 15:02:23 -0400735 mov 8*0($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800736 adc \$0,%rdx
737 mov %rdx,$A[1]
738
739 mulq $m1 # np[j]*m1
740 add %rax,$N[0]
741 mov 8($ap,$j),%rax
742 adc \$0,%rdx
743 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
744 adc \$0,%rdx
745 mov $N[0],-8($tp) # tp[j-1]
746 mov %rdx,$N[1]
747
748 mulq $m0 # ap[j]*bp[0]
749 add %rax,$A[1]
David Benjamin4969cc92016-04-22 15:02:23 -0400750 mov 8*1($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800751 adc \$0,%rdx
752 mov %rdx,$A[0]
753
754 mulq $m1 # np[j]*m1
755 add %rax,$N[1]
756 mov 16($ap,$j),%rax
757 adc \$0,%rdx
758 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
David Benjamin4969cc92016-04-22 15:02:23 -0400759 lea 8*4($np),$np
Adam Langleyd9e397b2015-01-22 14:27:53 -0800760 adc \$0,%rdx
761 mov $N[1],($tp) # tp[j-1]
762 mov %rdx,$N[0]
763
764 add \$32,$j # j+=4
765 jnz .L1st4x
766
767 mulq $m0 # ap[j]*bp[0]
768 add %rax,$A[0]
David Benjamin4969cc92016-04-22 15:02:23 -0400769 mov -8*2($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800770 lea 32($tp),$tp
771 adc \$0,%rdx
772 mov %rdx,$A[1]
773
774 mulq $m1 # np[j]*m1
775 add %rax,$N[0]
776 mov -8($ap),%rax
777 adc \$0,%rdx
778 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
779 adc \$0,%rdx
780 mov $N[0],-24($tp) # tp[j-1]
781 mov %rdx,$N[1]
782
783 mulq $m0 # ap[j]*bp[0]
784 add %rax,$A[1]
David Benjamin4969cc92016-04-22 15:02:23 -0400785 mov -8*1($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800786 adc \$0,%rdx
787 mov %rdx,$A[0]
788
789 mulq $m1 # np[j]*m1
790 add %rax,$N[1]
791 mov ($ap,$num),%rax # ap[0]
792 adc \$0,%rdx
793 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
794 adc \$0,%rdx
795 mov $N[1],-16($tp) # tp[j-1]
796 mov %rdx,$N[0]
797
David Benjamin4969cc92016-04-22 15:02:23 -0400798 lea ($np,$num),$np # rewind $np
Adam Langleyd9e397b2015-01-22 14:27:53 -0800799
800 xor $N[1],$N[1]
801 add $A[0],$N[0]
802 adc \$0,$N[1]
803 mov $N[0],-8($tp)
804
805 jmp .Louter4x
806
807.align 32
808.Louter4x:
David Benjamin4969cc92016-04-22 15:02:23 -0400809 lea 16+128($tp),%rdx # where 256-byte mask is (+size optimization)
810 pxor %xmm4,%xmm4
811 pxor %xmm5,%xmm5
812___
813for($i=0;$i<$STRIDE/16;$i+=4) {
814$code.=<<___;
815 movdqa `16*($i+0)-128`($bp),%xmm0
816 movdqa `16*($i+1)-128`($bp),%xmm1
817 movdqa `16*($i+2)-128`($bp),%xmm2
818 movdqa `16*($i+3)-128`($bp),%xmm3
819 pand `16*($i+0)-128`(%rdx),%xmm0
820 pand `16*($i+1)-128`(%rdx),%xmm1
821 por %xmm0,%xmm4
822 pand `16*($i+2)-128`(%rdx),%xmm2
823 por %xmm1,%xmm5
824 pand `16*($i+3)-128`(%rdx),%xmm3
825 por %xmm2,%xmm4
826 por %xmm3,%xmm5
827___
828}
829$code.=<<___;
830 por %xmm5,%xmm4
831 pshufd \$0x4e,%xmm4,%xmm0
832 por %xmm4,%xmm0
833 lea $STRIDE($bp),$bp
834 movq %xmm0,$m0 # m0=bp[i]
835
Adam Langleyd9e397b2015-01-22 14:27:53 -0800836 mov ($tp,$num),$A[0]
837 mov $n0,$m1
838 mulq $m0 # ap[0]*bp[i]
839 add %rax,$A[0] # ap[0]*bp[i]+tp[0]
840 mov ($np),%rax
841 adc \$0,%rdx
842
Adam Langleyd9e397b2015-01-22 14:27:53 -0800843 imulq $A[0],$m1 # tp[0]*n0
Adam Langleyd9e397b2015-01-22 14:27:53 -0800844 mov %rdx,$A[1]
845 mov $N[1],($tp) # store upmost overflow bit
846
Adam Langleyd9e397b2015-01-22 14:27:53 -0800847 lea ($tp,$num),$tp # rewind $tp
Adam Langleyd9e397b2015-01-22 14:27:53 -0800848
849 mulq $m1 # np[0]*m1
850 add %rax,$A[0] # "$N[0]", discarded
851 mov 8($ap,$num),%rax
852 adc \$0,%rdx
853 mov %rdx,$N[1]
854
855 mulq $m0 # ap[j]*bp[i]
856 add %rax,$A[1]
David Benjamin4969cc92016-04-22 15:02:23 -0400857 mov 8*1($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800858 adc \$0,%rdx
859 add 8($tp),$A[1] # +tp[1]
860 adc \$0,%rdx
861 mov %rdx,$A[0]
862
863 mulq $m1 # np[j]*m1
864 add %rax,$N[1]
865 mov 16($ap,$num),%rax
866 adc \$0,%rdx
867 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
868 lea 4*8($num),$j # j=4
David Benjamin4969cc92016-04-22 15:02:23 -0400869 lea 8*4($np),$np
Adam Langleyd9e397b2015-01-22 14:27:53 -0800870 adc \$0,%rdx
871 mov %rdx,$N[0]
872 jmp .Linner4x
873
874.align 32
875.Linner4x:
876 mulq $m0 # ap[j]*bp[i]
877 add %rax,$A[0]
David Benjamin4969cc92016-04-22 15:02:23 -0400878 mov -8*2($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800879 adc \$0,%rdx
880 add 16($tp),$A[0] # ap[j]*bp[i]+tp[j]
881 lea 32($tp),$tp
882 adc \$0,%rdx
883 mov %rdx,$A[1]
884
885 mulq $m1 # np[j]*m1
886 add %rax,$N[0]
887 mov -8($ap,$j),%rax
888 adc \$0,%rdx
889 add $A[0],$N[0]
890 adc \$0,%rdx
891 mov $N[1],-32($tp) # tp[j-1]
892 mov %rdx,$N[1]
893
894 mulq $m0 # ap[j]*bp[i]
895 add %rax,$A[1]
David Benjamin4969cc92016-04-22 15:02:23 -0400896 mov -8*1($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800897 adc \$0,%rdx
898 add -8($tp),$A[1]
899 adc \$0,%rdx
900 mov %rdx,$A[0]
901
902 mulq $m1 # np[j]*m1
903 add %rax,$N[1]
904 mov ($ap,$j),%rax
905 adc \$0,%rdx
906 add $A[1],$N[1]
907 adc \$0,%rdx
908 mov $N[0],-24($tp) # tp[j-1]
909 mov %rdx,$N[0]
910
911 mulq $m0 # ap[j]*bp[i]
912 add %rax,$A[0]
David Benjamin4969cc92016-04-22 15:02:23 -0400913 mov 8*0($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800914 adc \$0,%rdx
915 add ($tp),$A[0] # ap[j]*bp[i]+tp[j]
916 adc \$0,%rdx
917 mov %rdx,$A[1]
918
919 mulq $m1 # np[j]*m1
920 add %rax,$N[0]
921 mov 8($ap,$j),%rax
922 adc \$0,%rdx
923 add $A[0],$N[0]
924 adc \$0,%rdx
925 mov $N[1],-16($tp) # tp[j-1]
926 mov %rdx,$N[1]
927
928 mulq $m0 # ap[j]*bp[i]
929 add %rax,$A[1]
David Benjamin4969cc92016-04-22 15:02:23 -0400930 mov 8*1($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800931 adc \$0,%rdx
932 add 8($tp),$A[1]
933 adc \$0,%rdx
934 mov %rdx,$A[0]
935
936 mulq $m1 # np[j]*m1
937 add %rax,$N[1]
938 mov 16($ap,$j),%rax
939 adc \$0,%rdx
940 add $A[1],$N[1]
David Benjamin4969cc92016-04-22 15:02:23 -0400941 lea 8*4($np),$np
Adam Langleyd9e397b2015-01-22 14:27:53 -0800942 adc \$0,%rdx
943 mov $N[0],-8($tp) # tp[j-1]
944 mov %rdx,$N[0]
945
946 add \$32,$j # j+=4
947 jnz .Linner4x
948
949 mulq $m0 # ap[j]*bp[i]
950 add %rax,$A[0]
David Benjamin4969cc92016-04-22 15:02:23 -0400951 mov -8*2($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800952 adc \$0,%rdx
953 add 16($tp),$A[0] # ap[j]*bp[i]+tp[j]
954 lea 32($tp),$tp
955 adc \$0,%rdx
956 mov %rdx,$A[1]
957
958 mulq $m1 # np[j]*m1
959 add %rax,$N[0]
960 mov -8($ap),%rax
961 adc \$0,%rdx
962 add $A[0],$N[0]
963 adc \$0,%rdx
964 mov $N[1],-32($tp) # tp[j-1]
965 mov %rdx,$N[1]
966
967 mulq $m0 # ap[j]*bp[i]
968 add %rax,$A[1]
969 mov $m1,%rax
David Benjamin4969cc92016-04-22 15:02:23 -0400970 mov -8*1($np),$m1
Adam Langleyd9e397b2015-01-22 14:27:53 -0800971 adc \$0,%rdx
972 add -8($tp),$A[1]
973 adc \$0,%rdx
974 mov %rdx,$A[0]
975
976 mulq $m1 # np[j]*m1
977 add %rax,$N[1]
978 mov ($ap,$num),%rax # ap[0]
979 adc \$0,%rdx
980 add $A[1],$N[1]
981 adc \$0,%rdx
982 mov $N[0],-24($tp) # tp[j-1]
983 mov %rdx,$N[0]
984
Adam Langleyd9e397b2015-01-22 14:27:53 -0800985 mov $N[1],-16($tp) # tp[j-1]
David Benjamin4969cc92016-04-22 15:02:23 -0400986 lea ($np,$num),$np # rewind $np
Adam Langleyd9e397b2015-01-22 14:27:53 -0800987
988 xor $N[1],$N[1]
989 add $A[0],$N[0]
990 adc \$0,$N[1]
991 add ($tp),$N[0] # pull upmost overflow bit
992 adc \$0,$N[1] # upmost overflow bit
993 mov $N[0],-8($tp)
994
995 cmp 16+8(%rsp),$bp
996 jb .Louter4x
997___
998if (1) {
999$code.=<<___;
David Benjamin4969cc92016-04-22 15:02:23 -04001000 xor %rax,%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001001 sub $N[0],$m1 # compare top-most words
1002 adc $j,$j # $j is zero
1003 or $j,$N[1]
David Benjamin4969cc92016-04-22 15:02:23 -04001004 sub $N[1],%rax # %rax=-$N[1]
Adam Langleyd9e397b2015-01-22 14:27:53 -08001005 lea ($tp,$num),%rbx # tptr in .sqr4x_sub
David Benjamin4969cc92016-04-22 15:02:23 -04001006 mov ($np),%r12
1007 lea ($np),%rbp # nptr in .sqr4x_sub
Adam Langleyd9e397b2015-01-22 14:27:53 -08001008 mov %r9,%rcx
David Benjamin4969cc92016-04-22 15:02:23 -04001009 sar \$3+2,%rcx
Adam Langleyd9e397b2015-01-22 14:27:53 -08001010 mov 56+8(%rsp),%rdi # rptr in .sqr4x_sub
David Benjamin4969cc92016-04-22 15:02:23 -04001011 dec %r12 # so that after 'not' we get -n[0]
1012 xor %r10,%r10
1013 mov 8*1(%rbp),%r13
1014 mov 8*2(%rbp),%r14
1015 mov 8*3(%rbp),%r15
1016 jmp .Lsqr4x_sub_entry
Adam Langleyd9e397b2015-01-22 14:27:53 -08001017___
1018} else {
1019my @ri=("%rax",$bp,$m0,$m1);
1020my $rp="%rdx";
1021$code.=<<___
1022 xor \$1,$N[1]
1023 lea ($tp,$num),$tp # rewind $tp
1024 sar \$5,$num # cf=0
1025 lea ($np,$N[1],8),$np
1026 mov 56+8(%rsp),$rp # restore $rp
1027 jmp .Lsub4x
1028
1029.align 32
1030.Lsub4x:
1031 .byte 0x66
1032 mov 8*0($tp),@ri[0]
1033 mov 8*1($tp),@ri[1]
1034 .byte 0x66
1035 sbb 16*0($np),@ri[0]
1036 mov 8*2($tp),@ri[2]
1037 sbb 16*1($np),@ri[1]
1038 mov 3*8($tp),@ri[3]
1039 lea 4*8($tp),$tp
1040 sbb 16*2($np),@ri[2]
1041 mov @ri[0],8*0($rp)
1042 sbb 16*3($np),@ri[3]
1043 lea 16*4($np),$np
1044 mov @ri[1],8*1($rp)
1045 mov @ri[2],8*2($rp)
1046 mov @ri[3],8*3($rp)
1047 lea 8*4($rp),$rp
1048
1049 inc $num
1050 jnz .Lsub4x
1051
1052 ret
1053___
1054}
1055$code.=<<___;
1056.size mul4x_internal,.-mul4x_internal
1057___
1058}}}
1059 {{{
1060######################################################################
1061# void bn_power5(
1062my $rptr="%rdi"; # BN_ULONG *rptr,
1063my $aptr="%rsi"; # const BN_ULONG *aptr,
1064my $bptr="%rdx"; # const void *table,
1065my $nptr="%rcx"; # const BN_ULONG *nptr,
1066my $n0 ="%r8"; # const BN_ULONG *n0);
1067my $num ="%r9"; # int num, has to be divisible by 8
Robert Sloana94fe052017-02-21 08:49:28 -08001068 # int pwr
Adam Langleyd9e397b2015-01-22 14:27:53 -08001069
1070my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
1071my @A0=("%r10","%r11");
1072my @A1=("%r12","%r13");
1073my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
1074
1075$code.=<<___;
1076.globl bn_power5
1077.type bn_power5,\@function,6
1078.align 32
1079bn_power5:
Robert Sloana94fe052017-02-21 08:49:28 -08001080.cfi_startproc
1081 mov %rsp,%rax
1082.cfi_def_cfa_register %rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001083___
1084$code.=<<___ if ($addx);
1085 mov OPENSSL_ia32cap_P+8(%rip),%r11d
David Benjamin4969cc92016-04-22 15:02:23 -04001086 and \$0x80108,%r11d
1087 cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1
Adam Langleyd9e397b2015-01-22 14:27:53 -08001088 je .Lpowerx5_enter
1089___
1090$code.=<<___;
Adam Langleyd9e397b2015-01-22 14:27:53 -08001091 push %rbx
Robert Sloana94fe052017-02-21 08:49:28 -08001092.cfi_push %rbx
Adam Langleyd9e397b2015-01-22 14:27:53 -08001093 push %rbp
Robert Sloana94fe052017-02-21 08:49:28 -08001094.cfi_push %rbp
Adam Langleyd9e397b2015-01-22 14:27:53 -08001095 push %r12
Robert Sloana94fe052017-02-21 08:49:28 -08001096.cfi_push %r12
Adam Langleyd9e397b2015-01-22 14:27:53 -08001097 push %r13
Robert Sloana94fe052017-02-21 08:49:28 -08001098.cfi_push %r13
Adam Langleyd9e397b2015-01-22 14:27:53 -08001099 push %r14
Robert Sloana94fe052017-02-21 08:49:28 -08001100.cfi_push %r14
Adam Langleyd9e397b2015-01-22 14:27:53 -08001101 push %r15
Robert Sloana94fe052017-02-21 08:49:28 -08001102.cfi_push %r15
1103.Lpower5_prologue:
David Benjamin4969cc92016-04-22 15:02:23 -04001104
Adam Langleyd9e397b2015-01-22 14:27:53 -08001105 shl \$3,${num}d # convert $num to bytes
David Benjamin4969cc92016-04-22 15:02:23 -04001106 lea ($num,$num,2),%r10d # 3*$num
Adam Langleyd9e397b2015-01-22 14:27:53 -08001107 neg $num
1108 mov ($n0),$n0 # *n0
1109
1110 ##############################################################
David Benjamin4969cc92016-04-22 15:02:23 -04001111 # Ensure that stack frame doesn't alias with $rptr+3*$num
1112 # modulo 4096, which covers ret[num], am[num] and n[num]
1113 # (see bn_exp.c). This is done to allow memory disambiguation
1114 # logic do its magic. [Extra 256 bytes is for power mask
1115 # calculated from 7th argument, the index.]
Adam Langleyd9e397b2015-01-22 14:27:53 -08001116 #
David Benjamin4969cc92016-04-22 15:02:23 -04001117 lea -320(%rsp,$num,2),%r11
Robert Sloana94fe052017-02-21 08:49:28 -08001118 mov %rsp,%rbp
David Benjamin4969cc92016-04-22 15:02:23 -04001119 sub $rptr,%r11
Adam Langleyd9e397b2015-01-22 14:27:53 -08001120 and \$4095,%r11
1121 cmp %r11,%r10
1122 jb .Lpwr_sp_alt
Robert Sloana94fe052017-02-21 08:49:28 -08001123 sub %r11,%rbp # align with $aptr
1124 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -08001125 jmp .Lpwr_sp_done
1126
1127.align 32
1128.Lpwr_sp_alt:
David Benjamin4969cc92016-04-22 15:02:23 -04001129 lea 4096-320(,$num,2),%r10
Robert Sloana94fe052017-02-21 08:49:28 -08001130 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -08001131 sub %r10,%r11
1132 mov \$0,%r10
1133 cmovc %r10,%r11
Robert Sloana94fe052017-02-21 08:49:28 -08001134 sub %r11,%rbp
Adam Langleyd9e397b2015-01-22 14:27:53 -08001135.Lpwr_sp_done:
Robert Sloana94fe052017-02-21 08:49:28 -08001136 and \$-64,%rbp
1137 mov %rsp,%r11
1138 sub %rbp,%r11
1139 and \$-4096,%r11
1140 lea (%rbp,%r11),%rsp
1141 mov (%rsp),%r10
1142 cmp %rbp,%rsp
1143 ja .Lpwr_page_walk
1144 jmp .Lpwr_page_walk_done
1145
1146.Lpwr_page_walk:
1147 lea -4096(%rsp),%rsp
1148 mov (%rsp),%r10
1149 cmp %rbp,%rsp
1150 ja .Lpwr_page_walk
1151.Lpwr_page_walk_done:
1152
1153 mov $num,%r10
Adam Langleyd9e397b2015-01-22 14:27:53 -08001154 neg $num
1155
1156 ##############################################################
1157 # Stack layout
1158 #
1159 # +0 saved $num, used in reduction section
1160 # +8 &t[2*$num], used in reduction section
1161 # +32 saved *n0
1162 # +40 saved %rsp
1163 # +48 t[2*$num]
1164 #
1165 mov $n0, 32(%rsp)
1166 mov %rax, 40(%rsp) # save original %rsp
Robert Sloana94fe052017-02-21 08:49:28 -08001167.cfi_cfa_expression %rsp+40,deref,+8
Adam Langleyd9e397b2015-01-22 14:27:53 -08001168.Lpower5_body:
David Benjamin4969cc92016-04-22 15:02:23 -04001169 movq $rptr,%xmm1 # save $rptr, used in sqr8x
Adam Langleyd9e397b2015-01-22 14:27:53 -08001170 movq $nptr,%xmm2 # save $nptr
David Benjamin4969cc92016-04-22 15:02:23 -04001171 movq %r10, %xmm3 # -$num, used in sqr8x
Adam Langleyd9e397b2015-01-22 14:27:53 -08001172 movq $bptr,%xmm4
1173
1174 call __bn_sqr8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04001175 call __bn_post4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08001176 call __bn_sqr8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04001177 call __bn_post4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08001178 call __bn_sqr8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04001179 call __bn_post4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08001180 call __bn_sqr8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04001181 call __bn_post4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08001182 call __bn_sqr8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04001183 call __bn_post4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08001184
1185 movq %xmm2,$nptr
1186 movq %xmm4,$bptr
1187 mov $aptr,$rptr
1188 mov 40(%rsp),%rax
1189 lea 32(%rsp),$n0
1190
1191 call mul4x_internal
1192
1193 mov 40(%rsp),%rsi # restore %rsp
Robert Sloana94fe052017-02-21 08:49:28 -08001194.cfi_def_cfa %rsi,8
Adam Langleyd9e397b2015-01-22 14:27:53 -08001195 mov \$1,%rax
1196 mov -48(%rsi),%r15
Robert Sloana94fe052017-02-21 08:49:28 -08001197.cfi_restore %r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08001198 mov -40(%rsi),%r14
Robert Sloana94fe052017-02-21 08:49:28 -08001199.cfi_restore %r14
Adam Langleyd9e397b2015-01-22 14:27:53 -08001200 mov -32(%rsi),%r13
Robert Sloana94fe052017-02-21 08:49:28 -08001201.cfi_restore %r13
Adam Langleyd9e397b2015-01-22 14:27:53 -08001202 mov -24(%rsi),%r12
Robert Sloana94fe052017-02-21 08:49:28 -08001203.cfi_restore %r12
Adam Langleyd9e397b2015-01-22 14:27:53 -08001204 mov -16(%rsi),%rbp
Robert Sloana94fe052017-02-21 08:49:28 -08001205.cfi_restore %rbp
Adam Langleyd9e397b2015-01-22 14:27:53 -08001206 mov -8(%rsi),%rbx
Robert Sloana94fe052017-02-21 08:49:28 -08001207.cfi_restore %rbx
Adam Langleyd9e397b2015-01-22 14:27:53 -08001208 lea (%rsi),%rsp
Robert Sloana94fe052017-02-21 08:49:28 -08001209.cfi_def_cfa_register %rsp
Adam Langleyd9e397b2015-01-22 14:27:53 -08001210.Lpower5_epilogue:
1211 ret
Robert Sloana94fe052017-02-21 08:49:28 -08001212.cfi_endproc
Adam Langleyd9e397b2015-01-22 14:27:53 -08001213.size bn_power5,.-bn_power5
1214
1215.globl bn_sqr8x_internal
1216.hidden bn_sqr8x_internal
1217.type bn_sqr8x_internal,\@abi-omnipotent
1218.align 32
1219bn_sqr8x_internal:
1220__bn_sqr8x_internal:
1221 ##############################################################
1222 # Squaring part:
1223 #
1224 # a) multiply-n-add everything but a[i]*a[i];
1225 # b) shift result of a) by 1 to the left and accumulate
1226 # a[i]*a[i] products;
1227 #
1228 ##############################################################
1229 # a[1]a[0]
1230 # a[2]a[0]
1231 # a[3]a[0]
1232 # a[2]a[1]
1233 # a[4]a[0]
1234 # a[3]a[1]
1235 # a[5]a[0]
1236 # a[4]a[1]
1237 # a[3]a[2]
1238 # a[6]a[0]
1239 # a[5]a[1]
1240 # a[4]a[2]
1241 # a[7]a[0]
1242 # a[6]a[1]
1243 # a[5]a[2]
1244 # a[4]a[3]
1245 # a[7]a[1]
1246 # a[6]a[2]
1247 # a[5]a[3]
1248 # a[7]a[2]
1249 # a[6]a[3]
1250 # a[5]a[4]
1251 # a[7]a[3]
1252 # a[6]a[4]
1253 # a[7]a[4]
1254 # a[6]a[5]
1255 # a[7]a[5]
1256 # a[7]a[6]
1257 # a[1]a[0]
1258 # a[2]a[0]
1259 # a[3]a[0]
1260 # a[4]a[0]
1261 # a[5]a[0]
1262 # a[6]a[0]
1263 # a[7]a[0]
1264 # a[2]a[1]
1265 # a[3]a[1]
1266 # a[4]a[1]
1267 # a[5]a[1]
1268 # a[6]a[1]
1269 # a[7]a[1]
1270 # a[3]a[2]
1271 # a[4]a[2]
1272 # a[5]a[2]
1273 # a[6]a[2]
1274 # a[7]a[2]
1275 # a[4]a[3]
1276 # a[5]a[3]
1277 # a[6]a[3]
1278 # a[7]a[3]
1279 # a[5]a[4]
1280 # a[6]a[4]
1281 # a[7]a[4]
1282 # a[6]a[5]
1283 # a[7]a[5]
1284 # a[7]a[6]
1285 # a[0]a[0]
1286 # a[1]a[1]
1287 # a[2]a[2]
1288 # a[3]a[3]
1289 # a[4]a[4]
1290 # a[5]a[5]
1291 # a[6]a[6]
1292 # a[7]a[7]
1293
1294 lea 32(%r10),$i # $i=-($num-32)
1295 lea ($aptr,$num),$aptr # end of a[] buffer, ($aptr,$i)=&ap[2]
1296
1297 mov $num,$j # $j=$num
1298
1299 # comments apply to $num==8 case
1300 mov -32($aptr,$i),$a0 # a[0]
1301 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1302 mov -24($aptr,$i),%rax # a[1]
1303 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1304 mov -16($aptr,$i),$ai # a[2]
1305 mov %rax,$a1
1306
1307 mul $a0 # a[1]*a[0]
1308 mov %rax,$A0[0] # a[1]*a[0]
1309 mov $ai,%rax # a[2]
1310 mov %rdx,$A0[1]
1311 mov $A0[0],-24($tptr,$i) # t[1]
1312
1313 mul $a0 # a[2]*a[0]
1314 add %rax,$A0[1]
1315 mov $ai,%rax
1316 adc \$0,%rdx
1317 mov $A0[1],-16($tptr,$i) # t[2]
1318 mov %rdx,$A0[0]
1319
1320
1321 mov -8($aptr,$i),$ai # a[3]
1322 mul $a1 # a[2]*a[1]
1323 mov %rax,$A1[0] # a[2]*a[1]+t[3]
1324 mov $ai,%rax
1325 mov %rdx,$A1[1]
1326
1327 lea ($i),$j
1328 mul $a0 # a[3]*a[0]
1329 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1330 mov $ai,%rax
1331 mov %rdx,$A0[1]
1332 adc \$0,$A0[1]
1333 add $A1[0],$A0[0]
1334 adc \$0,$A0[1]
1335 mov $A0[0],-8($tptr,$j) # t[3]
1336 jmp .Lsqr4x_1st
1337
1338.align 32
1339.Lsqr4x_1st:
1340 mov ($aptr,$j),$ai # a[4]
1341 mul $a1 # a[3]*a[1]
1342 add %rax,$A1[1] # a[3]*a[1]+t[4]
1343 mov $ai,%rax
1344 mov %rdx,$A1[0]
1345 adc \$0,$A1[0]
1346
1347 mul $a0 # a[4]*a[0]
1348 add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
1349 mov $ai,%rax # a[3]
1350 mov 8($aptr,$j),$ai # a[5]
1351 mov %rdx,$A0[0]
1352 adc \$0,$A0[0]
1353 add $A1[1],$A0[1]
1354 adc \$0,$A0[0]
1355
1356
1357 mul $a1 # a[4]*a[3]
1358 add %rax,$A1[0] # a[4]*a[3]+t[5]
1359 mov $ai,%rax
1360 mov $A0[1],($tptr,$j) # t[4]
1361 mov %rdx,$A1[1]
1362 adc \$0,$A1[1]
1363
1364 mul $a0 # a[5]*a[2]
1365 add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
1366 mov $ai,%rax
1367 mov 16($aptr,$j),$ai # a[6]
1368 mov %rdx,$A0[1]
1369 adc \$0,$A0[1]
1370 add $A1[0],$A0[0]
1371 adc \$0,$A0[1]
1372
1373 mul $a1 # a[5]*a[3]
1374 add %rax,$A1[1] # a[5]*a[3]+t[6]
1375 mov $ai,%rax
1376 mov $A0[0],8($tptr,$j) # t[5]
1377 mov %rdx,$A1[0]
1378 adc \$0,$A1[0]
1379
1380 mul $a0 # a[6]*a[2]
1381 add %rax,$A0[1] # a[6]*a[2]+a[5]*a[3]+t[6]
1382 mov $ai,%rax # a[3]
1383 mov 24($aptr,$j),$ai # a[7]
1384 mov %rdx,$A0[0]
1385 adc \$0,$A0[0]
1386 add $A1[1],$A0[1]
1387 adc \$0,$A0[0]
1388
1389
1390 mul $a1 # a[6]*a[5]
1391 add %rax,$A1[0] # a[6]*a[5]+t[7]
1392 mov $ai,%rax
1393 mov $A0[1],16($tptr,$j) # t[6]
1394 mov %rdx,$A1[1]
1395 adc \$0,$A1[1]
1396 lea 32($j),$j
1397
1398 mul $a0 # a[7]*a[4]
1399 add %rax,$A0[0] # a[7]*a[4]+a[6]*a[5]+t[6]
1400 mov $ai,%rax
1401 mov %rdx,$A0[1]
1402 adc \$0,$A0[1]
1403 add $A1[0],$A0[0]
1404 adc \$0,$A0[1]
1405 mov $A0[0],-8($tptr,$j) # t[7]
1406
1407 cmp \$0,$j
1408 jne .Lsqr4x_1st
1409
1410 mul $a1 # a[7]*a[5]
1411 add %rax,$A1[1]
1412 lea 16($i),$i
1413 adc \$0,%rdx
1414 add $A0[1],$A1[1]
1415 adc \$0,%rdx
1416
1417 mov $A1[1],($tptr) # t[8]
1418 mov %rdx,$A1[0]
1419 mov %rdx,8($tptr) # t[9]
1420 jmp .Lsqr4x_outer
1421
1422.align 32
1423.Lsqr4x_outer: # comments apply to $num==6 case
1424 mov -32($aptr,$i),$a0 # a[0]
1425 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1426 mov -24($aptr,$i),%rax # a[1]
1427 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1428 mov -16($aptr,$i),$ai # a[2]
1429 mov %rax,$a1
1430
1431 mul $a0 # a[1]*a[0]
1432 mov -24($tptr,$i),$A0[0] # t[1]
1433 add %rax,$A0[0] # a[1]*a[0]+t[1]
1434 mov $ai,%rax # a[2]
1435 adc \$0,%rdx
1436 mov $A0[0],-24($tptr,$i) # t[1]
1437 mov %rdx,$A0[1]
1438
1439 mul $a0 # a[2]*a[0]
1440 add %rax,$A0[1]
1441 mov $ai,%rax
1442 adc \$0,%rdx
1443 add -16($tptr,$i),$A0[1] # a[2]*a[0]+t[2]
1444 mov %rdx,$A0[0]
1445 adc \$0,$A0[0]
1446 mov $A0[1],-16($tptr,$i) # t[2]
1447
1448 xor $A1[0],$A1[0]
1449
1450 mov -8($aptr,$i),$ai # a[3]
1451 mul $a1 # a[2]*a[1]
1452 add %rax,$A1[0] # a[2]*a[1]+t[3]
1453 mov $ai,%rax
1454 adc \$0,%rdx
1455 add -8($tptr,$i),$A1[0]
1456 mov %rdx,$A1[1]
1457 adc \$0,$A1[1]
1458
1459 mul $a0 # a[3]*a[0]
1460 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1461 mov $ai,%rax
1462 adc \$0,%rdx
1463 add $A1[0],$A0[0]
1464 mov %rdx,$A0[1]
1465 adc \$0,$A0[1]
1466 mov $A0[0],-8($tptr,$i) # t[3]
1467
1468 lea ($i),$j
1469 jmp .Lsqr4x_inner
1470
1471.align 32
1472.Lsqr4x_inner:
1473 mov ($aptr,$j),$ai # a[4]
1474 mul $a1 # a[3]*a[1]
1475 add %rax,$A1[1] # a[3]*a[1]+t[4]
1476 mov $ai,%rax
1477 mov %rdx,$A1[0]
1478 adc \$0,$A1[0]
1479 add ($tptr,$j),$A1[1]
1480 adc \$0,$A1[0]
1481
1482 .byte 0x67
1483 mul $a0 # a[4]*a[0]
1484 add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
1485 mov $ai,%rax # a[3]
1486 mov 8($aptr,$j),$ai # a[5]
1487 mov %rdx,$A0[0]
1488 adc \$0,$A0[0]
1489 add $A1[1],$A0[1]
1490 adc \$0,$A0[0]
1491
1492 mul $a1 # a[4]*a[3]
1493 add %rax,$A1[0] # a[4]*a[3]+t[5]
1494 mov $A0[1],($tptr,$j) # t[4]
1495 mov $ai,%rax
1496 mov %rdx,$A1[1]
1497 adc \$0,$A1[1]
1498 add 8($tptr,$j),$A1[0]
1499 lea 16($j),$j # j++
1500 adc \$0,$A1[1]
1501
1502 mul $a0 # a[5]*a[2]
1503 add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
1504 mov $ai,%rax
1505 adc \$0,%rdx
1506 add $A1[0],$A0[0]
1507 mov %rdx,$A0[1]
1508 adc \$0,$A0[1]
1509 mov $A0[0],-8($tptr,$j) # t[5], "preloaded t[1]" below
1510
1511 cmp \$0,$j
1512 jne .Lsqr4x_inner
1513
1514 .byte 0x67
1515 mul $a1 # a[5]*a[3]
1516 add %rax,$A1[1]
1517 adc \$0,%rdx
1518 add $A0[1],$A1[1]
1519 adc \$0,%rdx
1520
1521 mov $A1[1],($tptr) # t[6], "preloaded t[2]" below
1522 mov %rdx,$A1[0]
1523 mov %rdx,8($tptr) # t[7], "preloaded t[3]" below
1524
1525 add \$16,$i
1526 jnz .Lsqr4x_outer
1527
1528 # comments apply to $num==4 case
1529 mov -32($aptr),$a0 # a[0]
1530 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1531 mov -24($aptr),%rax # a[1]
1532 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1533 mov -16($aptr),$ai # a[2]
1534 mov %rax,$a1
1535
1536 mul $a0 # a[1]*a[0]
1537 add %rax,$A0[0] # a[1]*a[0]+t[1], preloaded t[1]
1538 mov $ai,%rax # a[2]
1539 mov %rdx,$A0[1]
1540 adc \$0,$A0[1]
1541
1542 mul $a0 # a[2]*a[0]
1543 add %rax,$A0[1]
1544 mov $ai,%rax
1545 mov $A0[0],-24($tptr) # t[1]
1546 mov %rdx,$A0[0]
1547 adc \$0,$A0[0]
1548 add $A1[1],$A0[1] # a[2]*a[0]+t[2], preloaded t[2]
1549 mov -8($aptr),$ai # a[3]
1550 adc \$0,$A0[0]
1551
1552 mul $a1 # a[2]*a[1]
1553 add %rax,$A1[0] # a[2]*a[1]+t[3], preloaded t[3]
1554 mov $ai,%rax
1555 mov $A0[1],-16($tptr) # t[2]
1556 mov %rdx,$A1[1]
1557 adc \$0,$A1[1]
1558
1559 mul $a0 # a[3]*a[0]
1560 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1561 mov $ai,%rax
1562 mov %rdx,$A0[1]
1563 adc \$0,$A0[1]
1564 add $A1[0],$A0[0]
1565 adc \$0,$A0[1]
1566 mov $A0[0],-8($tptr) # t[3]
1567
1568 mul $a1 # a[3]*a[1]
1569 add %rax,$A1[1]
1570 mov -16($aptr),%rax # a[2]
1571 adc \$0,%rdx
1572 add $A0[1],$A1[1]
1573 adc \$0,%rdx
1574
1575 mov $A1[1],($tptr) # t[4]
1576 mov %rdx,$A1[0]
1577 mov %rdx,8($tptr) # t[5]
1578
1579 mul $ai # a[2]*a[3]
1580___
1581{
1582my ($shift,$carry)=($a0,$a1);
1583my @S=(@A1,$ai,$n0);
1584$code.=<<___;
1585 add \$16,$i
1586 xor $shift,$shift
1587 sub $num,$i # $i=16-$num
1588 xor $carry,$carry
1589
1590 add $A1[0],%rax # t[5]
1591 adc \$0,%rdx
1592 mov %rax,8($tptr) # t[5]
1593 mov %rdx,16($tptr) # t[6]
1594 mov $carry,24($tptr) # t[7]
1595
1596 mov -16($aptr,$i),%rax # a[0]
1597 lea 48+8(%rsp),$tptr
1598 xor $A0[0],$A0[0] # t[0]
1599 mov 8($tptr),$A0[1] # t[1]
1600
1601 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1602 shr \$63,$A0[0]
1603 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1604 shr \$63,$A0[1]
1605 or $A0[0],$S[1] # | t[2*i]>>63
1606 mov 16($tptr),$A0[0] # t[2*i+2] # prefetch
1607 mov $A0[1],$shift # shift=t[2*i+1]>>63
1608 mul %rax # a[i]*a[i]
1609 neg $carry # mov $carry,cf
1610 mov 24($tptr),$A0[1] # t[2*i+2+1] # prefetch
1611 adc %rax,$S[0]
1612 mov -8($aptr,$i),%rax # a[i+1] # prefetch
1613 mov $S[0],($tptr)
1614 adc %rdx,$S[1]
1615
1616 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1617 mov $S[1],8($tptr)
1618 sbb $carry,$carry # mov cf,$carry
1619 shr \$63,$A0[0]
1620 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1621 shr \$63,$A0[1]
1622 or $A0[0],$S[3] # | t[2*i]>>63
1623 mov 32($tptr),$A0[0] # t[2*i+2] # prefetch
1624 mov $A0[1],$shift # shift=t[2*i+1]>>63
1625 mul %rax # a[i]*a[i]
1626 neg $carry # mov $carry,cf
1627 mov 40($tptr),$A0[1] # t[2*i+2+1] # prefetch
1628 adc %rax,$S[2]
1629 mov 0($aptr,$i),%rax # a[i+1] # prefetch
1630 mov $S[2],16($tptr)
1631 adc %rdx,$S[3]
1632 lea 16($i),$i
1633 mov $S[3],24($tptr)
1634 sbb $carry,$carry # mov cf,$carry
1635 lea 64($tptr),$tptr
1636 jmp .Lsqr4x_shift_n_add
1637
1638.align 32
1639.Lsqr4x_shift_n_add:
1640 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1641 shr \$63,$A0[0]
1642 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1643 shr \$63,$A0[1]
1644 or $A0[0],$S[1] # | t[2*i]>>63
1645 mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
1646 mov $A0[1],$shift # shift=t[2*i+1]>>63
1647 mul %rax # a[i]*a[i]
1648 neg $carry # mov $carry,cf
1649 mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1650 adc %rax,$S[0]
1651 mov -8($aptr,$i),%rax # a[i+1] # prefetch
1652 mov $S[0],-32($tptr)
1653 adc %rdx,$S[1]
1654
1655 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1656 mov $S[1],-24($tptr)
1657 sbb $carry,$carry # mov cf,$carry
1658 shr \$63,$A0[0]
1659 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1660 shr \$63,$A0[1]
1661 or $A0[0],$S[3] # | t[2*i]>>63
1662 mov 0($tptr),$A0[0] # t[2*i+2] # prefetch
1663 mov $A0[1],$shift # shift=t[2*i+1]>>63
1664 mul %rax # a[i]*a[i]
1665 neg $carry # mov $carry,cf
1666 mov 8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1667 adc %rax,$S[2]
1668 mov 0($aptr,$i),%rax # a[i+1] # prefetch
1669 mov $S[2],-16($tptr)
1670 adc %rdx,$S[3]
1671
1672 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1673 mov $S[3],-8($tptr)
1674 sbb $carry,$carry # mov cf,$carry
1675 shr \$63,$A0[0]
1676 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1677 shr \$63,$A0[1]
1678 or $A0[0],$S[1] # | t[2*i]>>63
1679 mov 16($tptr),$A0[0] # t[2*i+2] # prefetch
1680 mov $A0[1],$shift # shift=t[2*i+1]>>63
1681 mul %rax # a[i]*a[i]
1682 neg $carry # mov $carry,cf
1683 mov 24($tptr),$A0[1] # t[2*i+2+1] # prefetch
1684 adc %rax,$S[0]
1685 mov 8($aptr,$i),%rax # a[i+1] # prefetch
1686 mov $S[0],0($tptr)
1687 adc %rdx,$S[1]
1688
1689 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1690 mov $S[1],8($tptr)
1691 sbb $carry,$carry # mov cf,$carry
1692 shr \$63,$A0[0]
1693 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1694 shr \$63,$A0[1]
1695 or $A0[0],$S[3] # | t[2*i]>>63
1696 mov 32($tptr),$A0[0] # t[2*i+2] # prefetch
1697 mov $A0[1],$shift # shift=t[2*i+1]>>63
1698 mul %rax # a[i]*a[i]
1699 neg $carry # mov $carry,cf
1700 mov 40($tptr),$A0[1] # t[2*i+2+1] # prefetch
1701 adc %rax,$S[2]
1702 mov 16($aptr,$i),%rax # a[i+1] # prefetch
1703 mov $S[2],16($tptr)
1704 adc %rdx,$S[3]
1705 mov $S[3],24($tptr)
1706 sbb $carry,$carry # mov cf,$carry
1707 lea 64($tptr),$tptr
1708 add \$32,$i
1709 jnz .Lsqr4x_shift_n_add
1710
1711 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1712 .byte 0x67
1713 shr \$63,$A0[0]
1714 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1715 shr \$63,$A0[1]
1716 or $A0[0],$S[1] # | t[2*i]>>63
1717 mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
1718 mov $A0[1],$shift # shift=t[2*i+1]>>63
1719 mul %rax # a[i]*a[i]
1720 neg $carry # mov $carry,cf
1721 mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1722 adc %rax,$S[0]
1723 mov -8($aptr),%rax # a[i+1] # prefetch
1724 mov $S[0],-32($tptr)
1725 adc %rdx,$S[1]
1726
1727 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1|shift
1728 mov $S[1],-24($tptr)
1729 sbb $carry,$carry # mov cf,$carry
1730 shr \$63,$A0[0]
1731 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1732 shr \$63,$A0[1]
1733 or $A0[0],$S[3] # | t[2*i]>>63
1734 mul %rax # a[i]*a[i]
1735 neg $carry # mov $carry,cf
1736 adc %rax,$S[2]
1737 adc %rdx,$S[3]
1738 mov $S[2],-16($tptr)
1739 mov $S[3],-8($tptr)
1740___
1741}
1742######################################################################
1743# Montgomery reduction part, "word-by-word" algorithm.
1744#
1745# This new path is inspired by multiple submissions from Intel, by
1746# Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
1747# Vinodh Gopal...
1748{
1749my ($nptr,$tptr,$carry,$m0)=("%rbp","%rdi","%rsi","%rbx");
1750
1751$code.=<<___;
1752 movq %xmm2,$nptr
David Benjamin4969cc92016-04-22 15:02:23 -04001753__bn_sqr8x_reduction:
Adam Langleyd9e397b2015-01-22 14:27:53 -08001754 xor %rax,%rax
David Benjamin4969cc92016-04-22 15:02:23 -04001755 lea ($nptr,$num),%rcx # end of n[]
Adam Langleyd9e397b2015-01-22 14:27:53 -08001756 lea 48+8(%rsp,$num,2),%rdx # end of t[] buffer
1757 mov %rcx,0+8(%rsp)
1758 lea 48+8(%rsp,$num),$tptr # end of initial t[] window
1759 mov %rdx,8+8(%rsp)
1760 neg $num
1761 jmp .L8x_reduction_loop
1762
1763.align 32
1764.L8x_reduction_loop:
1765 lea ($tptr,$num),$tptr # start of current t[] window
1766 .byte 0x66
1767 mov 8*0($tptr),$m0
1768 mov 8*1($tptr),%r9
1769 mov 8*2($tptr),%r10
1770 mov 8*3($tptr),%r11
1771 mov 8*4($tptr),%r12
1772 mov 8*5($tptr),%r13
1773 mov 8*6($tptr),%r14
1774 mov 8*7($tptr),%r15
1775 mov %rax,(%rdx) # store top-most carry bit
1776 lea 8*8($tptr),$tptr
1777
1778 .byte 0x67
1779 mov $m0,%r8
1780 imulq 32+8(%rsp),$m0 # n0*a[0]
David Benjamin4969cc92016-04-22 15:02:23 -04001781 mov 8*0($nptr),%rax # n[0]
Adam Langleyd9e397b2015-01-22 14:27:53 -08001782 mov \$8,%ecx
1783 jmp .L8x_reduce
1784
1785.align 32
1786.L8x_reduce:
1787 mulq $m0
David Benjamin4969cc92016-04-22 15:02:23 -04001788 mov 8*1($nptr),%rax # n[1]
Adam Langleyd9e397b2015-01-22 14:27:53 -08001789 neg %r8
1790 mov %rdx,%r8
1791 adc \$0,%r8
1792
1793 mulq $m0
1794 add %rax,%r9
David Benjamin4969cc92016-04-22 15:02:23 -04001795 mov 8*2($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001796 adc \$0,%rdx
1797 add %r9,%r8
1798 mov $m0,48-8+8(%rsp,%rcx,8) # put aside n0*a[i]
1799 mov %rdx,%r9
1800 adc \$0,%r9
1801
1802 mulq $m0
1803 add %rax,%r10
David Benjamin4969cc92016-04-22 15:02:23 -04001804 mov 8*3($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001805 adc \$0,%rdx
1806 add %r10,%r9
1807 mov 32+8(%rsp),$carry # pull n0, borrow $carry
1808 mov %rdx,%r10
1809 adc \$0,%r10
1810
1811 mulq $m0
1812 add %rax,%r11
David Benjamin4969cc92016-04-22 15:02:23 -04001813 mov 8*4($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001814 adc \$0,%rdx
1815 imulq %r8,$carry # modulo-scheduled
1816 add %r11,%r10
1817 mov %rdx,%r11
1818 adc \$0,%r11
1819
1820 mulq $m0
1821 add %rax,%r12
David Benjamin4969cc92016-04-22 15:02:23 -04001822 mov 8*5($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001823 adc \$0,%rdx
1824 add %r12,%r11
1825 mov %rdx,%r12
1826 adc \$0,%r12
1827
1828 mulq $m0
1829 add %rax,%r13
David Benjamin4969cc92016-04-22 15:02:23 -04001830 mov 8*6($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001831 adc \$0,%rdx
1832 add %r13,%r12
1833 mov %rdx,%r13
1834 adc \$0,%r13
1835
1836 mulq $m0
1837 add %rax,%r14
David Benjamin4969cc92016-04-22 15:02:23 -04001838 mov 8*7($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001839 adc \$0,%rdx
1840 add %r14,%r13
1841 mov %rdx,%r14
1842 adc \$0,%r14
1843
1844 mulq $m0
1845 mov $carry,$m0 # n0*a[i]
1846 add %rax,%r15
David Benjamin4969cc92016-04-22 15:02:23 -04001847 mov 8*0($nptr),%rax # n[0]
Adam Langleyd9e397b2015-01-22 14:27:53 -08001848 adc \$0,%rdx
1849 add %r15,%r14
1850 mov %rdx,%r15
1851 adc \$0,%r15
1852
1853 dec %ecx
1854 jnz .L8x_reduce
1855
David Benjamin4969cc92016-04-22 15:02:23 -04001856 lea 8*8($nptr),$nptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08001857 xor %rax,%rax
1858 mov 8+8(%rsp),%rdx # pull end of t[]
1859 cmp 0+8(%rsp),$nptr # end of n[]?
1860 jae .L8x_no_tail
1861
1862 .byte 0x66
1863 add 8*0($tptr),%r8
1864 adc 8*1($tptr),%r9
1865 adc 8*2($tptr),%r10
1866 adc 8*3($tptr),%r11
1867 adc 8*4($tptr),%r12
1868 adc 8*5($tptr),%r13
1869 adc 8*6($tptr),%r14
1870 adc 8*7($tptr),%r15
1871 sbb $carry,$carry # top carry
1872
1873 mov 48+56+8(%rsp),$m0 # pull n0*a[0]
1874 mov \$8,%ecx
David Benjamin4969cc92016-04-22 15:02:23 -04001875 mov 8*0($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001876 jmp .L8x_tail
1877
1878.align 32
1879.L8x_tail:
1880 mulq $m0
1881 add %rax,%r8
David Benjamin4969cc92016-04-22 15:02:23 -04001882 mov 8*1($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001883 mov %r8,($tptr) # save result
1884 mov %rdx,%r8
1885 adc \$0,%r8
1886
1887 mulq $m0
1888 add %rax,%r9
David Benjamin4969cc92016-04-22 15:02:23 -04001889 mov 8*2($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001890 adc \$0,%rdx
1891 add %r9,%r8
1892 lea 8($tptr),$tptr # $tptr++
1893 mov %rdx,%r9
1894 adc \$0,%r9
1895
1896 mulq $m0
1897 add %rax,%r10
David Benjamin4969cc92016-04-22 15:02:23 -04001898 mov 8*3($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001899 adc \$0,%rdx
1900 add %r10,%r9
1901 mov %rdx,%r10
1902 adc \$0,%r10
1903
1904 mulq $m0
1905 add %rax,%r11
David Benjamin4969cc92016-04-22 15:02:23 -04001906 mov 8*4($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001907 adc \$0,%rdx
1908 add %r11,%r10
1909 mov %rdx,%r11
1910 adc \$0,%r11
1911
1912 mulq $m0
1913 add %rax,%r12
David Benjamin4969cc92016-04-22 15:02:23 -04001914 mov 8*5($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001915 adc \$0,%rdx
1916 add %r12,%r11
1917 mov %rdx,%r12
1918 adc \$0,%r12
1919
1920 mulq $m0
1921 add %rax,%r13
David Benjamin4969cc92016-04-22 15:02:23 -04001922 mov 8*6($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001923 adc \$0,%rdx
1924 add %r13,%r12
1925 mov %rdx,%r13
1926 adc \$0,%r13
1927
1928 mulq $m0
1929 add %rax,%r14
David Benjamin4969cc92016-04-22 15:02:23 -04001930 mov 8*7($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001931 adc \$0,%rdx
1932 add %r14,%r13
1933 mov %rdx,%r14
1934 adc \$0,%r14
1935
1936 mulq $m0
1937 mov 48-16+8(%rsp,%rcx,8),$m0# pull n0*a[i]
1938 add %rax,%r15
1939 adc \$0,%rdx
1940 add %r15,%r14
David Benjamin4969cc92016-04-22 15:02:23 -04001941 mov 8*0($nptr),%rax # pull n[0]
Adam Langleyd9e397b2015-01-22 14:27:53 -08001942 mov %rdx,%r15
1943 adc \$0,%r15
1944
1945 dec %ecx
1946 jnz .L8x_tail
1947
David Benjamin4969cc92016-04-22 15:02:23 -04001948 lea 8*8($nptr),$nptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08001949 mov 8+8(%rsp),%rdx # pull end of t[]
1950 cmp 0+8(%rsp),$nptr # end of n[]?
1951 jae .L8x_tail_done # break out of loop
1952
1953 mov 48+56+8(%rsp),$m0 # pull n0*a[0]
1954 neg $carry
1955 mov 8*0($nptr),%rax # pull n[0]
1956 adc 8*0($tptr),%r8
1957 adc 8*1($tptr),%r9
1958 adc 8*2($tptr),%r10
1959 adc 8*3($tptr),%r11
1960 adc 8*4($tptr),%r12
1961 adc 8*5($tptr),%r13
1962 adc 8*6($tptr),%r14
1963 adc 8*7($tptr),%r15
1964 sbb $carry,$carry # top carry
1965
1966 mov \$8,%ecx
1967 jmp .L8x_tail
1968
1969.align 32
1970.L8x_tail_done:
Robert Sloan4d1ac502017-02-06 08:36:14 -08001971 xor %rax,%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001972 add (%rdx),%r8 # can this overflow?
Adam Langley4139edb2016-01-13 15:00:54 -08001973 adc \$0,%r9
1974 adc \$0,%r10
1975 adc \$0,%r11
1976 adc \$0,%r12
1977 adc \$0,%r13
1978 adc \$0,%r14
Robert Sloan4d1ac502017-02-06 08:36:14 -08001979 adc \$0,%r15
1980 adc \$0,%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001981
1982 neg $carry
1983.L8x_no_tail:
1984 adc 8*0($tptr),%r8
1985 adc 8*1($tptr),%r9
1986 adc 8*2($tptr),%r10
1987 adc 8*3($tptr),%r11
1988 adc 8*4($tptr),%r12
1989 adc 8*5($tptr),%r13
1990 adc 8*6($tptr),%r14
1991 adc 8*7($tptr),%r15
1992 adc \$0,%rax # top-most carry
David Benjamin4969cc92016-04-22 15:02:23 -04001993 mov -8($nptr),%rcx # np[num-1]
Adam Langleyd9e397b2015-01-22 14:27:53 -08001994 xor $carry,$carry
1995
1996 movq %xmm2,$nptr # restore $nptr
1997
1998 mov %r8,8*0($tptr) # store top 512 bits
1999 mov %r9,8*1($tptr)
2000 movq %xmm3,$num # $num is %r9, can't be moved upwards
2001 mov %r10,8*2($tptr)
2002 mov %r11,8*3($tptr)
2003 mov %r12,8*4($tptr)
2004 mov %r13,8*5($tptr)
2005 mov %r14,8*6($tptr)
2006 mov %r15,8*7($tptr)
2007 lea 8*8($tptr),$tptr
2008
2009 cmp %rdx,$tptr # end of t[]?
2010 jb .L8x_reduction_loop
David Benjamin4969cc92016-04-22 15:02:23 -04002011 ret
2012.size bn_sqr8x_internal,.-bn_sqr8x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08002013___
2014}
2015##############################################################
2016# Post-condition, 4x unrolled
2017#
2018{
2019my ($tptr,$nptr)=("%rbx","%rbp");
2020$code.=<<___;
David Benjamin4969cc92016-04-22 15:02:23 -04002021.type __bn_post4x_internal,\@abi-omnipotent
Adam Langleyd9e397b2015-01-22 14:27:53 -08002022.align 32
David Benjamin4969cc92016-04-22 15:02:23 -04002023__bn_post4x_internal:
2024 mov 8*0($nptr),%r12
2025 lea (%rdi,$num),$tptr # %rdi was $tptr above
2026 mov $num,%rcx
2027 movq %xmm1,$rptr # restore $rptr
2028 neg %rax
2029 movq %xmm1,$aptr # prepare for back-to-back call
2030 sar \$3+2,%rcx
2031 dec %r12 # so that after 'not' we get -n[0]
2032 xor %r10,%r10
2033 mov 8*1($nptr),%r13
2034 mov 8*2($nptr),%r14
2035 mov 8*3($nptr),%r15
2036 jmp .Lsqr4x_sub_entry
2037
2038.align 16
Adam Langleyd9e397b2015-01-22 14:27:53 -08002039.Lsqr4x_sub:
David Benjamin4969cc92016-04-22 15:02:23 -04002040 mov 8*0($nptr),%r12
2041 mov 8*1($nptr),%r13
2042 mov 8*2($nptr),%r14
2043 mov 8*3($nptr),%r15
2044.Lsqr4x_sub_entry:
2045 lea 8*4($nptr),$nptr
2046 not %r12
2047 not %r13
2048 not %r14
2049 not %r15
2050 and %rax,%r12
2051 and %rax,%r13
2052 and %rax,%r14
2053 and %rax,%r15
2054
2055 neg %r10 # mov %r10,%cf
2056 adc 8*0($tptr),%r12
2057 adc 8*1($tptr),%r13
2058 adc 8*2($tptr),%r14
2059 adc 8*3($tptr),%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002060 mov %r12,8*0($rptr)
David Benjamin4969cc92016-04-22 15:02:23 -04002061 lea 8*4($tptr),$tptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08002062 mov %r13,8*1($rptr)
David Benjamin4969cc92016-04-22 15:02:23 -04002063 sbb %r10,%r10 # mov %cf,%r10
Adam Langleyd9e397b2015-01-22 14:27:53 -08002064 mov %r14,8*2($rptr)
2065 mov %r15,8*3($rptr)
2066 lea 8*4($rptr),$rptr
2067
2068 inc %rcx # pass %cf
2069 jnz .Lsqr4x_sub
David Benjamin4969cc92016-04-22 15:02:23 -04002070
Adam Langleyd9e397b2015-01-22 14:27:53 -08002071 mov $num,%r10 # prepare for back-to-back call
Robert Sloana94fe052017-02-21 08:49:28 -08002072 neg $num # restore $num
Adam Langleyd9e397b2015-01-22 14:27:53 -08002073 ret
David Benjamin4969cc92016-04-22 15:02:23 -04002074.size __bn_post4x_internal,.-__bn_post4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08002075___
David Benjamin4969cc92016-04-22 15:02:23 -04002076}
Adam Langleyd9e397b2015-01-22 14:27:53 -08002077{
2078$code.=<<___;
2079.globl bn_from_montgomery
2080.type bn_from_montgomery,\@abi-omnipotent
2081.align 32
2082bn_from_montgomery:
2083 testl \$7,`($win64?"48(%rsp)":"%r9d")`
2084 jz bn_from_mont8x
2085 xor %eax,%eax
2086 ret
2087.size bn_from_montgomery,.-bn_from_montgomery
2088
2089.type bn_from_mont8x,\@function,6
2090.align 32
2091bn_from_mont8x:
Robert Sloana94fe052017-02-21 08:49:28 -08002092.cfi_startproc
Adam Langleyd9e397b2015-01-22 14:27:53 -08002093 .byte 0x67
2094 mov %rsp,%rax
Robert Sloana94fe052017-02-21 08:49:28 -08002095.cfi_def_cfa_register %rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08002096 push %rbx
Robert Sloana94fe052017-02-21 08:49:28 -08002097.cfi_push %rbx
Adam Langleyd9e397b2015-01-22 14:27:53 -08002098 push %rbp
Robert Sloana94fe052017-02-21 08:49:28 -08002099.cfi_push %rbp
Adam Langleyd9e397b2015-01-22 14:27:53 -08002100 push %r12
Robert Sloana94fe052017-02-21 08:49:28 -08002101.cfi_push %r12
Adam Langleyd9e397b2015-01-22 14:27:53 -08002102 push %r13
Robert Sloana94fe052017-02-21 08:49:28 -08002103.cfi_push %r13
Adam Langleyd9e397b2015-01-22 14:27:53 -08002104 push %r14
Robert Sloana94fe052017-02-21 08:49:28 -08002105.cfi_push %r14
Adam Langleyd9e397b2015-01-22 14:27:53 -08002106 push %r15
Robert Sloana94fe052017-02-21 08:49:28 -08002107.cfi_push %r15
2108.Lfrom_prologue:
David Benjamin4969cc92016-04-22 15:02:23 -04002109
Adam Langleyd9e397b2015-01-22 14:27:53 -08002110 shl \$3,${num}d # convert $num to bytes
David Benjamin4969cc92016-04-22 15:02:23 -04002111 lea ($num,$num,2),%r10 # 3*$num in bytes
Adam Langleyd9e397b2015-01-22 14:27:53 -08002112 neg $num
2113 mov ($n0),$n0 # *n0
2114
2115 ##############################################################
David Benjamin4969cc92016-04-22 15:02:23 -04002116 # Ensure that stack frame doesn't alias with $rptr+3*$num
2117 # modulo 4096, which covers ret[num], am[num] and n[num]
2118 # (see bn_exp.c). The stack is allocated to aligned with
2119 # bn_power5's frame, and as bn_from_montgomery happens to be
2120 # last operation, we use the opportunity to cleanse it.
Adam Langleyd9e397b2015-01-22 14:27:53 -08002121 #
David Benjamin4969cc92016-04-22 15:02:23 -04002122 lea -320(%rsp,$num,2),%r11
Robert Sloana94fe052017-02-21 08:49:28 -08002123 mov %rsp,%rbp
David Benjamin4969cc92016-04-22 15:02:23 -04002124 sub $rptr,%r11
Adam Langleyd9e397b2015-01-22 14:27:53 -08002125 and \$4095,%r11
2126 cmp %r11,%r10
2127 jb .Lfrom_sp_alt
Robert Sloana94fe052017-02-21 08:49:28 -08002128 sub %r11,%rbp # align with $aptr
2129 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*$num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -08002130 jmp .Lfrom_sp_done
2131
2132.align 32
2133.Lfrom_sp_alt:
David Benjamin4969cc92016-04-22 15:02:23 -04002134 lea 4096-320(,$num,2),%r10
Robert Sloana94fe052017-02-21 08:49:28 -08002135 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*$num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -08002136 sub %r10,%r11
2137 mov \$0,%r10
2138 cmovc %r10,%r11
Robert Sloana94fe052017-02-21 08:49:28 -08002139 sub %r11,%rbp
Adam Langleyd9e397b2015-01-22 14:27:53 -08002140.Lfrom_sp_done:
Robert Sloana94fe052017-02-21 08:49:28 -08002141 and \$-64,%rbp
2142 mov %rsp,%r11
2143 sub %rbp,%r11
2144 and \$-4096,%r11
2145 lea (%rbp,%r11),%rsp
2146 mov (%rsp),%r10
2147 cmp %rbp,%rsp
2148 ja .Lfrom_page_walk
2149 jmp .Lfrom_page_walk_done
2150
2151.Lfrom_page_walk:
2152 lea -4096(%rsp),%rsp
2153 mov (%rsp),%r10
2154 cmp %rbp,%rsp
2155 ja .Lfrom_page_walk
2156.Lfrom_page_walk_done:
2157
2158 mov $num,%r10
Adam Langleyd9e397b2015-01-22 14:27:53 -08002159 neg $num
2160
2161 ##############################################################
2162 # Stack layout
2163 #
2164 # +0 saved $num, used in reduction section
2165 # +8 &t[2*$num], used in reduction section
2166 # +32 saved *n0
2167 # +40 saved %rsp
2168 # +48 t[2*$num]
2169 #
2170 mov $n0, 32(%rsp)
2171 mov %rax, 40(%rsp) # save original %rsp
Robert Sloana94fe052017-02-21 08:49:28 -08002172.cfi_cfa_expression %rsp+40,deref,+8
Adam Langleyd9e397b2015-01-22 14:27:53 -08002173.Lfrom_body:
2174 mov $num,%r11
2175 lea 48(%rsp),%rax
2176 pxor %xmm0,%xmm0
2177 jmp .Lmul_by_1
2178
2179.align 32
2180.Lmul_by_1:
2181 movdqu ($aptr),%xmm1
2182 movdqu 16($aptr),%xmm2
2183 movdqu 32($aptr),%xmm3
2184 movdqa %xmm0,(%rax,$num)
2185 movdqu 48($aptr),%xmm4
2186 movdqa %xmm0,16(%rax,$num)
2187 .byte 0x48,0x8d,0xb6,0x40,0x00,0x00,0x00 # lea 64($aptr),$aptr
2188 movdqa %xmm1,(%rax)
2189 movdqa %xmm0,32(%rax,$num)
2190 movdqa %xmm2,16(%rax)
2191 movdqa %xmm0,48(%rax,$num)
2192 movdqa %xmm3,32(%rax)
2193 movdqa %xmm4,48(%rax)
2194 lea 64(%rax),%rax
2195 sub \$64,%r11
2196 jnz .Lmul_by_1
2197
2198 movq $rptr,%xmm1
2199 movq $nptr,%xmm2
2200 .byte 0x67
2201 mov $nptr,%rbp
2202 movq %r10, %xmm3 # -num
2203___
2204$code.=<<___ if ($addx);
2205 mov OPENSSL_ia32cap_P+8(%rip),%r11d
David Benjamin4969cc92016-04-22 15:02:23 -04002206 and \$0x80108,%r11d
2207 cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1
Adam Langleyd9e397b2015-01-22 14:27:53 -08002208 jne .Lfrom_mont_nox
2209
2210 lea (%rax,$num),$rptr
David Benjamin4969cc92016-04-22 15:02:23 -04002211 call __bn_sqrx8x_reduction
2212 call __bn_postx4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08002213
2214 pxor %xmm0,%xmm0
2215 lea 48(%rsp),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08002216 jmp .Lfrom_mont_zero
2217
2218.align 32
2219.Lfrom_mont_nox:
2220___
2221$code.=<<___;
David Benjamin4969cc92016-04-22 15:02:23 -04002222 call __bn_sqr8x_reduction
2223 call __bn_post4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08002224
2225 pxor %xmm0,%xmm0
2226 lea 48(%rsp),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08002227 jmp .Lfrom_mont_zero
2228
2229.align 32
2230.Lfrom_mont_zero:
Robert Sloana94fe052017-02-21 08:49:28 -08002231 mov 40(%rsp),%rsi # restore %rsp
2232.cfi_def_cfa %rsi,8
Adam Langleyd9e397b2015-01-22 14:27:53 -08002233 movdqa %xmm0,16*0(%rax)
2234 movdqa %xmm0,16*1(%rax)
2235 movdqa %xmm0,16*2(%rax)
2236 movdqa %xmm0,16*3(%rax)
2237 lea 16*4(%rax),%rax
2238 sub \$32,$num
2239 jnz .Lfrom_mont_zero
2240
2241 mov \$1,%rax
2242 mov -48(%rsi),%r15
Robert Sloana94fe052017-02-21 08:49:28 -08002243.cfi_restore %r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002244 mov -40(%rsi),%r14
Robert Sloana94fe052017-02-21 08:49:28 -08002245.cfi_restore %r14
Adam Langleyd9e397b2015-01-22 14:27:53 -08002246 mov -32(%rsi),%r13
Robert Sloana94fe052017-02-21 08:49:28 -08002247.cfi_restore %r13
Adam Langleyd9e397b2015-01-22 14:27:53 -08002248 mov -24(%rsi),%r12
Robert Sloana94fe052017-02-21 08:49:28 -08002249.cfi_restore %r12
Adam Langleyd9e397b2015-01-22 14:27:53 -08002250 mov -16(%rsi),%rbp
Robert Sloana94fe052017-02-21 08:49:28 -08002251.cfi_restore %rbp
Adam Langleyd9e397b2015-01-22 14:27:53 -08002252 mov -8(%rsi),%rbx
Robert Sloana94fe052017-02-21 08:49:28 -08002253.cfi_restore %rbx
Adam Langleyd9e397b2015-01-22 14:27:53 -08002254 lea (%rsi),%rsp
Robert Sloana94fe052017-02-21 08:49:28 -08002255.cfi_def_cfa_register %rsp
Adam Langleyd9e397b2015-01-22 14:27:53 -08002256.Lfrom_epilogue:
2257 ret
Robert Sloana94fe052017-02-21 08:49:28 -08002258.cfi_endproc
Adam Langleyd9e397b2015-01-22 14:27:53 -08002259.size bn_from_mont8x,.-bn_from_mont8x
2260___
2261}
2262}}}
2263
2264if ($addx) {{{
2265my $bp="%rdx"; # restore original value
2266
2267$code.=<<___;
2268.type bn_mulx4x_mont_gather5,\@function,6
2269.align 32
2270bn_mulx4x_mont_gather5:
Robert Sloana94fe052017-02-21 08:49:28 -08002271.cfi_startproc
Adam Langleyd9e397b2015-01-22 14:27:53 -08002272 mov %rsp,%rax
Robert Sloana94fe052017-02-21 08:49:28 -08002273.cfi_def_cfa_register %rax
2274.Lmulx4x_enter:
Adam Langleyd9e397b2015-01-22 14:27:53 -08002275 push %rbx
Robert Sloana94fe052017-02-21 08:49:28 -08002276.cfi_push %rbx
Adam Langleyd9e397b2015-01-22 14:27:53 -08002277 push %rbp
Robert Sloana94fe052017-02-21 08:49:28 -08002278.cfi_push %rbp
Adam Langleyd9e397b2015-01-22 14:27:53 -08002279 push %r12
Robert Sloana94fe052017-02-21 08:49:28 -08002280.cfi_push %r12
Adam Langleyd9e397b2015-01-22 14:27:53 -08002281 push %r13
Robert Sloana94fe052017-02-21 08:49:28 -08002282.cfi_push %r13
Adam Langleyd9e397b2015-01-22 14:27:53 -08002283 push %r14
Robert Sloana94fe052017-02-21 08:49:28 -08002284.cfi_push %r14
Adam Langleyd9e397b2015-01-22 14:27:53 -08002285 push %r15
Robert Sloana94fe052017-02-21 08:49:28 -08002286.cfi_push %r15
2287.Lmulx4x_prologue:
David Benjamin4969cc92016-04-22 15:02:23 -04002288
Adam Langleyd9e397b2015-01-22 14:27:53 -08002289 shl \$3,${num}d # convert $num to bytes
David Benjamin4969cc92016-04-22 15:02:23 -04002290 lea ($num,$num,2),%r10 # 3*$num in bytes
Adam Langleyd9e397b2015-01-22 14:27:53 -08002291 neg $num # -$num
2292 mov ($n0),$n0 # *n0
2293
2294 ##############################################################
David Benjamin4969cc92016-04-22 15:02:23 -04002295 # Ensure that stack frame doesn't alias with $rptr+3*$num
2296 # modulo 4096, which covers ret[num], am[num] and n[num]
2297 # (see bn_exp.c). This is done to allow memory disambiguation
2298 # logic do its magic. [Extra [num] is allocated in order
2299 # to align with bn_power5's frame, which is cleansed after
2300 # completing exponentiation. Extra 256 bytes is for power mask
2301 # calculated from 7th argument, the index.]
Adam Langleyd9e397b2015-01-22 14:27:53 -08002302 #
David Benjamin4969cc92016-04-22 15:02:23 -04002303 lea -320(%rsp,$num,2),%r11
Robert Sloana94fe052017-02-21 08:49:28 -08002304 mov %rsp,%rbp
David Benjamin4969cc92016-04-22 15:02:23 -04002305 sub $rp,%r11
Adam Langleyd9e397b2015-01-22 14:27:53 -08002306 and \$4095,%r11
2307 cmp %r11,%r10
2308 jb .Lmulx4xsp_alt
Robert Sloana94fe052017-02-21 08:49:28 -08002309 sub %r11,%rbp # align with $aptr
2310 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*$num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -08002311 jmp .Lmulx4xsp_done
2312
Adam Langleyd9e397b2015-01-22 14:27:53 -08002313.Lmulx4xsp_alt:
David Benjamin4969cc92016-04-22 15:02:23 -04002314 lea 4096-320(,$num,2),%r10
Robert Sloana94fe052017-02-21 08:49:28 -08002315 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*$num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -08002316 sub %r10,%r11
2317 mov \$0,%r10
2318 cmovc %r10,%r11
Robert Sloana94fe052017-02-21 08:49:28 -08002319 sub %r11,%rbp
2320.Lmulx4xsp_done:
2321 and \$-64,%rbp # ensure alignment
2322 mov %rsp,%r11
2323 sub %rbp,%r11
2324 and \$-4096,%r11
2325 lea (%rbp,%r11),%rsp
2326 mov (%rsp),%r10
2327 cmp %rbp,%rsp
2328 ja .Lmulx4x_page_walk
2329 jmp .Lmulx4x_page_walk_done
2330
2331.Lmulx4x_page_walk:
2332 lea -4096(%rsp),%rsp
2333 mov (%rsp),%r10
2334 cmp %rbp,%rsp
2335 ja .Lmulx4x_page_walk
2336.Lmulx4x_page_walk_done:
2337
Adam Langleyd9e397b2015-01-22 14:27:53 -08002338 ##############################################################
2339 # Stack layout
2340 # +0 -num
2341 # +8 off-loaded &b[i]
2342 # +16 end of b[num]
2343 # +24 inner counter
2344 # +32 saved n0
2345 # +40 saved %rsp
2346 # +48
2347 # +56 saved rp
2348 # +64 tmp[num+1]
2349 #
2350 mov $n0, 32(%rsp) # save *n0
2351 mov %rax,40(%rsp) # save original %rsp
Robert Sloana94fe052017-02-21 08:49:28 -08002352.cfi_cfa_expression %rsp+40,deref,+8
Adam Langleyd9e397b2015-01-22 14:27:53 -08002353.Lmulx4x_body:
2354 call mulx4x_internal
2355
2356 mov 40(%rsp),%rsi # restore %rsp
Robert Sloana94fe052017-02-21 08:49:28 -08002357.cfi_def_cfa %rsi,8
Adam Langleyd9e397b2015-01-22 14:27:53 -08002358 mov \$1,%rax
David Benjamin4969cc92016-04-22 15:02:23 -04002359
Adam Langleyd9e397b2015-01-22 14:27:53 -08002360 mov -48(%rsi),%r15
Robert Sloana94fe052017-02-21 08:49:28 -08002361.cfi_restore %r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002362 mov -40(%rsi),%r14
Robert Sloana94fe052017-02-21 08:49:28 -08002363.cfi_restore %r14
Adam Langleyd9e397b2015-01-22 14:27:53 -08002364 mov -32(%rsi),%r13
Robert Sloana94fe052017-02-21 08:49:28 -08002365.cfi_restore %r13
Adam Langleyd9e397b2015-01-22 14:27:53 -08002366 mov -24(%rsi),%r12
Robert Sloana94fe052017-02-21 08:49:28 -08002367.cfi_restore %r12
Adam Langleyd9e397b2015-01-22 14:27:53 -08002368 mov -16(%rsi),%rbp
Robert Sloana94fe052017-02-21 08:49:28 -08002369.cfi_restore %rbp
Adam Langleyd9e397b2015-01-22 14:27:53 -08002370 mov -8(%rsi),%rbx
Robert Sloana94fe052017-02-21 08:49:28 -08002371.cfi_restore %rbx
Adam Langleyd9e397b2015-01-22 14:27:53 -08002372 lea (%rsi),%rsp
Robert Sloana94fe052017-02-21 08:49:28 -08002373.cfi_def_cfa_register %rsp
Adam Langleyd9e397b2015-01-22 14:27:53 -08002374.Lmulx4x_epilogue:
2375 ret
Robert Sloana94fe052017-02-21 08:49:28 -08002376.cfi_endproc
Adam Langleyd9e397b2015-01-22 14:27:53 -08002377.size bn_mulx4x_mont_gather5,.-bn_mulx4x_mont_gather5
2378
2379.type mulx4x_internal,\@abi-omnipotent
2380.align 32
2381mulx4x_internal:
David Benjamin4969cc92016-04-22 15:02:23 -04002382 mov $num,8(%rsp) # save -$num (it was in bytes)
2383 mov $num,%r10
Adam Langleyd9e397b2015-01-22 14:27:53 -08002384 neg $num # restore $num
2385 shl \$5,$num
David Benjamin4969cc92016-04-22 15:02:23 -04002386 neg %r10 # restore $num
2387 lea 128($bp,$num),%r13 # end of powers table (+size optimization)
Adam Langleyd9e397b2015-01-22 14:27:53 -08002388 shr \$5+5,$num
David Benjamin4969cc92016-04-22 15:02:23 -04002389 movd `($win64?56:8)`(%rax),%xmm5 # load 7th argument
Adam Langleyd9e397b2015-01-22 14:27:53 -08002390 sub \$1,$num
David Benjamin4969cc92016-04-22 15:02:23 -04002391 lea .Linc(%rip),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08002392 mov %r13,16+8(%rsp) # end of b[num]
2393 mov $num,24+8(%rsp) # inner counter
2394 mov $rp, 56+8(%rsp) # save $rp
2395___
2396my ($aptr, $bptr, $nptr, $tptr, $mi, $bi, $zero, $num)=
2397 ("%rsi","%rdi","%rcx","%rbx","%r8","%r9","%rbp","%rax");
2398my $rptr=$bptr;
2399my $STRIDE=2**5*8; # 5 is "window size"
2400my $N=$STRIDE/4; # should match cache line size
2401$code.=<<___;
David Benjamin4969cc92016-04-22 15:02:23 -04002402 movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
2403 movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
2404 lea 88-112(%rsp,%r10),%r10 # place the mask after tp[num+1] (+ICache optimizaton)
2405 lea 128($bp),$bptr # size optimization
Adam Langleyd9e397b2015-01-22 14:27:53 -08002406
David Benjamin4969cc92016-04-22 15:02:23 -04002407 pshufd \$0,%xmm5,%xmm5 # broadcast index
2408 movdqa %xmm1,%xmm4
2409 .byte 0x67
2410 movdqa %xmm1,%xmm2
2411___
2412########################################################################
2413# calculate mask by comparing 0..31 to index and save result to stack
2414#
2415$code.=<<___;
2416 .byte 0x67
2417 paddd %xmm0,%xmm1
2418 pcmpeqd %xmm5,%xmm0 # compare to 1,0
2419 movdqa %xmm4,%xmm3
2420___
2421for($i=0;$i<$STRIDE/16-4;$i+=4) {
2422$code.=<<___;
2423 paddd %xmm1,%xmm2
2424 pcmpeqd %xmm5,%xmm1 # compare to 3,2
2425 movdqa %xmm0,`16*($i+0)+112`(%r10)
2426 movdqa %xmm4,%xmm0
2427
2428 paddd %xmm2,%xmm3
2429 pcmpeqd %xmm5,%xmm2 # compare to 5,4
2430 movdqa %xmm1,`16*($i+1)+112`(%r10)
2431 movdqa %xmm4,%xmm1
2432
2433 paddd %xmm3,%xmm0
2434 pcmpeqd %xmm5,%xmm3 # compare to 7,6
2435 movdqa %xmm2,`16*($i+2)+112`(%r10)
2436 movdqa %xmm4,%xmm2
2437
2438 paddd %xmm0,%xmm1
2439 pcmpeqd %xmm5,%xmm0
2440 movdqa %xmm3,`16*($i+3)+112`(%r10)
2441 movdqa %xmm4,%xmm3
2442___
2443}
2444$code.=<<___; # last iteration can be optimized
2445 .byte 0x67
2446 paddd %xmm1,%xmm2
2447 pcmpeqd %xmm5,%xmm1
2448 movdqa %xmm0,`16*($i+0)+112`(%r10)
2449
2450 paddd %xmm2,%xmm3
2451 pcmpeqd %xmm5,%xmm2
2452 movdqa %xmm1,`16*($i+1)+112`(%r10)
2453
2454 pcmpeqd %xmm5,%xmm3
2455 movdqa %xmm2,`16*($i+2)+112`(%r10)
2456
2457 pand `16*($i+0)-128`($bptr),%xmm0 # while it's still in register
2458 pand `16*($i+1)-128`($bptr),%xmm1
2459 pand `16*($i+2)-128`($bptr),%xmm2
2460 movdqa %xmm3,`16*($i+3)+112`(%r10)
2461 pand `16*($i+3)-128`($bptr),%xmm3
Adam Langleyd9e397b2015-01-22 14:27:53 -08002462 por %xmm2,%xmm0
David Benjamin4969cc92016-04-22 15:02:23 -04002463 por %xmm3,%xmm1
2464___
2465for($i=0;$i<$STRIDE/16-4;$i+=4) {
2466$code.=<<___;
2467 movdqa `16*($i+0)-128`($bptr),%xmm4
2468 movdqa `16*($i+1)-128`($bptr),%xmm5
2469 movdqa `16*($i+2)-128`($bptr),%xmm2
2470 pand `16*($i+0)+112`(%r10),%xmm4
2471 movdqa `16*($i+3)-128`($bptr),%xmm3
2472 pand `16*($i+1)+112`(%r10),%xmm5
2473 por %xmm4,%xmm0
2474 pand `16*($i+2)+112`(%r10),%xmm2
2475 por %xmm5,%xmm1
2476 pand `16*($i+3)+112`(%r10),%xmm3
2477 por %xmm2,%xmm0
2478 por %xmm3,%xmm1
2479___
2480}
2481$code.=<<___;
2482 pxor %xmm1,%xmm0
2483 pshufd \$0x4e,%xmm0,%xmm1
2484 por %xmm1,%xmm0
2485 lea $STRIDE($bptr),$bptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08002486 movq %xmm0,%rdx # bp[0]
David Benjamin4969cc92016-04-22 15:02:23 -04002487 lea 64+8*4+8(%rsp),$tptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08002488
2489 mov %rdx,$bi
2490 mulx 0*8($aptr),$mi,%rax # a[0]*b[0]
2491 mulx 1*8($aptr),%r11,%r12 # a[1]*b[0]
2492 add %rax,%r11
2493 mulx 2*8($aptr),%rax,%r13 # ...
2494 adc %rax,%r12
2495 adc \$0,%r13
2496 mulx 3*8($aptr),%rax,%r14
2497
2498 mov $mi,%r15
2499 imulq 32+8(%rsp),$mi # "t[0]"*n0
2500 xor $zero,$zero # cf=0, of=0
2501 mov $mi,%rdx
2502
Adam Langleyd9e397b2015-01-22 14:27:53 -08002503 mov $bptr,8+8(%rsp) # off-load &b[i]
Adam Langleyd9e397b2015-01-22 14:27:53 -08002504
David Benjamin4969cc92016-04-22 15:02:23 -04002505 lea 4*8($aptr),$aptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08002506 adcx %rax,%r13
2507 adcx $zero,%r14 # cf=0
2508
David Benjamin4969cc92016-04-22 15:02:23 -04002509 mulx 0*8($nptr),%rax,%r10
Adam Langleyd9e397b2015-01-22 14:27:53 -08002510 adcx %rax,%r15 # discarded
2511 adox %r11,%r10
David Benjamin4969cc92016-04-22 15:02:23 -04002512 mulx 1*8($nptr),%rax,%r11
Adam Langleyd9e397b2015-01-22 14:27:53 -08002513 adcx %rax,%r10
2514 adox %r12,%r11
David Benjamin4969cc92016-04-22 15:02:23 -04002515 mulx 2*8($nptr),%rax,%r12
Adam Langleyd9e397b2015-01-22 14:27:53 -08002516 mov 24+8(%rsp),$bptr # counter value
Adam Langleyd9e397b2015-01-22 14:27:53 -08002517 mov %r10,-8*4($tptr)
2518 adcx %rax,%r11
2519 adox %r13,%r12
David Benjamin4969cc92016-04-22 15:02:23 -04002520 mulx 3*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002521 mov $bi,%rdx
2522 mov %r11,-8*3($tptr)
2523 adcx %rax,%r12
2524 adox $zero,%r15 # of=0
David Benjamin4969cc92016-04-22 15:02:23 -04002525 lea 4*8($nptr),$nptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08002526 mov %r12,-8*2($tptr)
David Benjamin4969cc92016-04-22 15:02:23 -04002527 jmp .Lmulx4x_1st
Adam Langleyd9e397b2015-01-22 14:27:53 -08002528
2529.align 32
2530.Lmulx4x_1st:
2531 adcx $zero,%r15 # cf=0, modulo-scheduled
2532 mulx 0*8($aptr),%r10,%rax # a[4]*b[0]
2533 adcx %r14,%r10
2534 mulx 1*8($aptr),%r11,%r14 # a[5]*b[0]
2535 adcx %rax,%r11
2536 mulx 2*8($aptr),%r12,%rax # ...
2537 adcx %r14,%r12
2538 mulx 3*8($aptr),%r13,%r14
2539 .byte 0x67,0x67
2540 mov $mi,%rdx
2541 adcx %rax,%r13
2542 adcx $zero,%r14 # cf=0
2543 lea 4*8($aptr),$aptr
2544 lea 4*8($tptr),$tptr
2545
2546 adox %r15,%r10
David Benjamin4969cc92016-04-22 15:02:23 -04002547 mulx 0*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002548 adcx %rax,%r10
2549 adox %r15,%r11
David Benjamin4969cc92016-04-22 15:02:23 -04002550 mulx 1*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002551 adcx %rax,%r11
2552 adox %r15,%r12
David Benjamin4969cc92016-04-22 15:02:23 -04002553 mulx 2*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002554 mov %r10,-5*8($tptr)
2555 adcx %rax,%r12
2556 mov %r11,-4*8($tptr)
2557 adox %r15,%r13
David Benjamin4969cc92016-04-22 15:02:23 -04002558 mulx 3*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002559 mov $bi,%rdx
2560 mov %r12,-3*8($tptr)
2561 adcx %rax,%r13
2562 adox $zero,%r15
David Benjamin4969cc92016-04-22 15:02:23 -04002563 lea 4*8($nptr),$nptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08002564 mov %r13,-2*8($tptr)
2565
2566 dec $bptr # of=0, pass cf
2567 jnz .Lmulx4x_1st
2568
2569 mov 8(%rsp),$num # load -num
Adam Langleyd9e397b2015-01-22 14:27:53 -08002570 adc $zero,%r15 # modulo-scheduled
2571 lea ($aptr,$num),$aptr # rewind $aptr
2572 add %r15,%r14
2573 mov 8+8(%rsp),$bptr # re-load &b[i]
2574 adc $zero,$zero # top-most carry
2575 mov %r14,-1*8($tptr)
2576 jmp .Lmulx4x_outer
2577
2578.align 32
2579.Lmulx4x_outer:
David Benjamin4969cc92016-04-22 15:02:23 -04002580 lea 16-256($tptr),%r10 # where 256-byte mask is (+density control)
2581 pxor %xmm4,%xmm4
2582 .byte 0x67,0x67
2583 pxor %xmm5,%xmm5
2584___
2585for($i=0;$i<$STRIDE/16;$i+=4) {
2586$code.=<<___;
2587 movdqa `16*($i+0)-128`($bptr),%xmm0
2588 movdqa `16*($i+1)-128`($bptr),%xmm1
2589 movdqa `16*($i+2)-128`($bptr),%xmm2
2590 pand `16*($i+0)+256`(%r10),%xmm0
2591 movdqa `16*($i+3)-128`($bptr),%xmm3
2592 pand `16*($i+1)+256`(%r10),%xmm1
2593 por %xmm0,%xmm4
2594 pand `16*($i+2)+256`(%r10),%xmm2
2595 por %xmm1,%xmm5
2596 pand `16*($i+3)+256`(%r10),%xmm3
2597 por %xmm2,%xmm4
2598 por %xmm3,%xmm5
2599___
2600}
2601$code.=<<___;
2602 por %xmm5,%xmm4
2603 pshufd \$0x4e,%xmm4,%xmm0
2604 por %xmm4,%xmm0
2605 lea $STRIDE($bptr),$bptr
2606 movq %xmm0,%rdx # m0=bp[i]
2607
Adam Langleyd9e397b2015-01-22 14:27:53 -08002608 mov $zero,($tptr) # save top-most carry
2609 lea 4*8($tptr,$num),$tptr # rewind $tptr
2610 mulx 0*8($aptr),$mi,%r11 # a[0]*b[i]
2611 xor $zero,$zero # cf=0, of=0
2612 mov %rdx,$bi
2613 mulx 1*8($aptr),%r14,%r12 # a[1]*b[i]
2614 adox -4*8($tptr),$mi # +t[0]
2615 adcx %r14,%r11
2616 mulx 2*8($aptr),%r15,%r13 # ...
2617 adox -3*8($tptr),%r11
2618 adcx %r15,%r12
2619 mulx 3*8($aptr),%rdx,%r14
2620 adox -2*8($tptr),%r12
2621 adcx %rdx,%r13
David Benjamin4969cc92016-04-22 15:02:23 -04002622 lea ($nptr,$num),$nptr # rewind $nptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08002623 lea 4*8($aptr),$aptr
2624 adox -1*8($tptr),%r13
2625 adcx $zero,%r14
2626 adox $zero,%r14
2627
Adam Langleyd9e397b2015-01-22 14:27:53 -08002628 mov $mi,%r15
2629 imulq 32+8(%rsp),$mi # "t[0]"*n0
2630
Adam Langleyd9e397b2015-01-22 14:27:53 -08002631 mov $mi,%rdx
Adam Langleyd9e397b2015-01-22 14:27:53 -08002632 xor $zero,$zero # cf=0, of=0
2633 mov $bptr,8+8(%rsp) # off-load &b[i]
2634
David Benjamin4969cc92016-04-22 15:02:23 -04002635 mulx 0*8($nptr),%rax,%r10
Adam Langleyd9e397b2015-01-22 14:27:53 -08002636 adcx %rax,%r15 # discarded
2637 adox %r11,%r10
David Benjamin4969cc92016-04-22 15:02:23 -04002638 mulx 1*8($nptr),%rax,%r11
Adam Langleyd9e397b2015-01-22 14:27:53 -08002639 adcx %rax,%r10
2640 adox %r12,%r11
David Benjamin4969cc92016-04-22 15:02:23 -04002641 mulx 2*8($nptr),%rax,%r12
Adam Langleyd9e397b2015-01-22 14:27:53 -08002642 adcx %rax,%r11
2643 adox %r13,%r12
David Benjamin4969cc92016-04-22 15:02:23 -04002644 mulx 3*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002645 mov $bi,%rdx
Adam Langleyd9e397b2015-01-22 14:27:53 -08002646 mov 24+8(%rsp),$bptr # counter value
2647 mov %r10,-8*4($tptr)
Adam Langleyd9e397b2015-01-22 14:27:53 -08002648 adcx %rax,%r12
2649 mov %r11,-8*3($tptr)
2650 adox $zero,%r15 # of=0
2651 mov %r12,-8*2($tptr)
David Benjamin4969cc92016-04-22 15:02:23 -04002652 lea 4*8($nptr),$nptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08002653 jmp .Lmulx4x_inner
2654
2655.align 32
2656.Lmulx4x_inner:
2657 mulx 0*8($aptr),%r10,%rax # a[4]*b[i]
2658 adcx $zero,%r15 # cf=0, modulo-scheduled
2659 adox %r14,%r10
2660 mulx 1*8($aptr),%r11,%r14 # a[5]*b[i]
2661 adcx 0*8($tptr),%r10
2662 adox %rax,%r11
2663 mulx 2*8($aptr),%r12,%rax # ...
2664 adcx 1*8($tptr),%r11
2665 adox %r14,%r12
2666 mulx 3*8($aptr),%r13,%r14
2667 mov $mi,%rdx
2668 adcx 2*8($tptr),%r12
2669 adox %rax,%r13
2670 adcx 3*8($tptr),%r13
2671 adox $zero,%r14 # of=0
2672 lea 4*8($aptr),$aptr
2673 lea 4*8($tptr),$tptr
2674 adcx $zero,%r14 # cf=0
2675
2676 adox %r15,%r10
David Benjamin4969cc92016-04-22 15:02:23 -04002677 mulx 0*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002678 adcx %rax,%r10
2679 adox %r15,%r11
David Benjamin4969cc92016-04-22 15:02:23 -04002680 mulx 1*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002681 adcx %rax,%r11
2682 adox %r15,%r12
David Benjamin4969cc92016-04-22 15:02:23 -04002683 mulx 2*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002684 mov %r10,-5*8($tptr)
2685 adcx %rax,%r12
2686 adox %r15,%r13
2687 mov %r11,-4*8($tptr)
David Benjamin4969cc92016-04-22 15:02:23 -04002688 mulx 3*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002689 mov $bi,%rdx
David Benjamin4969cc92016-04-22 15:02:23 -04002690 lea 4*8($nptr),$nptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08002691 mov %r12,-3*8($tptr)
2692 adcx %rax,%r13
2693 adox $zero,%r15
2694 mov %r13,-2*8($tptr)
2695
2696 dec $bptr # of=0, pass cf
2697 jnz .Lmulx4x_inner
2698
2699 mov 0+8(%rsp),$num # load -num
Adam Langleyd9e397b2015-01-22 14:27:53 -08002700 adc $zero,%r15 # modulo-scheduled
2701 sub 0*8($tptr),$bptr # pull top-most carry to %cf
2702 mov 8+8(%rsp),$bptr # re-load &b[i]
2703 mov 16+8(%rsp),%r10
2704 adc %r15,%r14
2705 lea ($aptr,$num),$aptr # rewind $aptr
2706 adc $zero,$zero # top-most carry
2707 mov %r14,-1*8($tptr)
2708
2709 cmp %r10,$bptr
2710 jb .Lmulx4x_outer
2711
David Benjamin4969cc92016-04-22 15:02:23 -04002712 mov -8($nptr),%r10
2713 mov $zero,%r8
2714 mov ($nptr,$num),%r12
2715 lea ($nptr,$num),%rbp # rewind $nptr
2716 mov $num,%rcx
2717 lea ($tptr,$num),%rdi # rewind $tptr
2718 xor %eax,%eax
Adam Langleyd9e397b2015-01-22 14:27:53 -08002719 xor %r15,%r15
2720 sub %r14,%r10 # compare top-most words
2721 adc %r15,%r15
David Benjamin4969cc92016-04-22 15:02:23 -04002722 or %r15,%r8
2723 sar \$3+2,%rcx
2724 sub %r8,%rax # %rax=-%r8
Adam Langleyd9e397b2015-01-22 14:27:53 -08002725 mov 56+8(%rsp),%rdx # restore rp
David Benjamin4969cc92016-04-22 15:02:23 -04002726 dec %r12 # so that after 'not' we get -n[0]
2727 mov 8*1(%rbp),%r13
2728 xor %r8,%r8
2729 mov 8*2(%rbp),%r14
2730 mov 8*3(%rbp),%r15
2731 jmp .Lsqrx4x_sub_entry # common post-condition
Adam Langleyd9e397b2015-01-22 14:27:53 -08002732.size mulx4x_internal,.-mulx4x_internal
2733___
2734} {
2735######################################################################
2736# void bn_power5(
2737my $rptr="%rdi"; # BN_ULONG *rptr,
2738my $aptr="%rsi"; # const BN_ULONG *aptr,
2739my $bptr="%rdx"; # const void *table,
2740my $nptr="%rcx"; # const BN_ULONG *nptr,
2741my $n0 ="%r8"; # const BN_ULONG *n0);
2742my $num ="%r9"; # int num, has to be divisible by 8
2743 # int pwr);
2744
2745my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
2746my @A0=("%r10","%r11");
2747my @A1=("%r12","%r13");
2748my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
2749
2750$code.=<<___;
2751.type bn_powerx5,\@function,6
2752.align 32
2753bn_powerx5:
Robert Sloana94fe052017-02-21 08:49:28 -08002754.cfi_startproc
Adam Langleyd9e397b2015-01-22 14:27:53 -08002755 mov %rsp,%rax
Robert Sloana94fe052017-02-21 08:49:28 -08002756.cfi_def_cfa_register %rax
2757.Lpowerx5_enter:
Adam Langleyd9e397b2015-01-22 14:27:53 -08002758 push %rbx
Robert Sloana94fe052017-02-21 08:49:28 -08002759.cfi_push %rbx
Adam Langleyd9e397b2015-01-22 14:27:53 -08002760 push %rbp
Robert Sloana94fe052017-02-21 08:49:28 -08002761.cfi_push %rbp
Adam Langleyd9e397b2015-01-22 14:27:53 -08002762 push %r12
Robert Sloana94fe052017-02-21 08:49:28 -08002763.cfi_push %r12
Adam Langleyd9e397b2015-01-22 14:27:53 -08002764 push %r13
Robert Sloana94fe052017-02-21 08:49:28 -08002765.cfi_push %r13
Adam Langleyd9e397b2015-01-22 14:27:53 -08002766 push %r14
Robert Sloana94fe052017-02-21 08:49:28 -08002767.cfi_push %r14
Adam Langleyd9e397b2015-01-22 14:27:53 -08002768 push %r15
Robert Sloana94fe052017-02-21 08:49:28 -08002769.cfi_push %r15
2770.Lpowerx5_prologue:
David Benjamin4969cc92016-04-22 15:02:23 -04002771
Adam Langleyd9e397b2015-01-22 14:27:53 -08002772 shl \$3,${num}d # convert $num to bytes
David Benjamin4969cc92016-04-22 15:02:23 -04002773 lea ($num,$num,2),%r10 # 3*$num in bytes
Adam Langleyd9e397b2015-01-22 14:27:53 -08002774 neg $num
2775 mov ($n0),$n0 # *n0
2776
2777 ##############################################################
David Benjamin4969cc92016-04-22 15:02:23 -04002778 # Ensure that stack frame doesn't alias with $rptr+3*$num
2779 # modulo 4096, which covers ret[num], am[num] and n[num]
2780 # (see bn_exp.c). This is done to allow memory disambiguation
2781 # logic do its magic. [Extra 256 bytes is for power mask
2782 # calculated from 7th argument, the index.]
Adam Langleyd9e397b2015-01-22 14:27:53 -08002783 #
David Benjamin4969cc92016-04-22 15:02:23 -04002784 lea -320(%rsp,$num,2),%r11
Robert Sloana94fe052017-02-21 08:49:28 -08002785 mov %rsp,%rbp
David Benjamin4969cc92016-04-22 15:02:23 -04002786 sub $rptr,%r11
Adam Langleyd9e397b2015-01-22 14:27:53 -08002787 and \$4095,%r11
2788 cmp %r11,%r10
2789 jb .Lpwrx_sp_alt
Robert Sloana94fe052017-02-21 08:49:28 -08002790 sub %r11,%rbp # align with $aptr
2791 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*$num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -08002792 jmp .Lpwrx_sp_done
2793
2794.align 32
2795.Lpwrx_sp_alt:
David Benjamin4969cc92016-04-22 15:02:23 -04002796 lea 4096-320(,$num,2),%r10
Robert Sloana94fe052017-02-21 08:49:28 -08002797 lea -320(%rbp,$num,2),%rbp # alloca(frame+2*$num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -08002798 sub %r10,%r11
2799 mov \$0,%r10
2800 cmovc %r10,%r11
Robert Sloana94fe052017-02-21 08:49:28 -08002801 sub %r11,%rbp
Adam Langleyd9e397b2015-01-22 14:27:53 -08002802.Lpwrx_sp_done:
Robert Sloana94fe052017-02-21 08:49:28 -08002803 and \$-64,%rbp
2804 mov %rsp,%r11
2805 sub %rbp,%r11
2806 and \$-4096,%r11
2807 lea (%rbp,%r11),%rsp
2808 mov (%rsp),%r10
2809 cmp %rbp,%rsp
2810 ja .Lpwrx_page_walk
2811 jmp .Lpwrx_page_walk_done
2812
2813.Lpwrx_page_walk:
2814 lea -4096(%rsp),%rsp
2815 mov (%rsp),%r10
2816 cmp %rbp,%rsp
2817 ja .Lpwrx_page_walk
2818.Lpwrx_page_walk_done:
2819
2820 mov $num,%r10
Adam Langleyd9e397b2015-01-22 14:27:53 -08002821 neg $num
2822
2823 ##############################################################
2824 # Stack layout
2825 #
2826 # +0 saved $num, used in reduction section
2827 # +8 &t[2*$num], used in reduction section
2828 # +16 intermediate carry bit
2829 # +24 top-most carry bit, used in reduction section
2830 # +32 saved *n0
2831 # +40 saved %rsp
2832 # +48 t[2*$num]
2833 #
2834 pxor %xmm0,%xmm0
2835 movq $rptr,%xmm1 # save $rptr
2836 movq $nptr,%xmm2 # save $nptr
2837 movq %r10, %xmm3 # -$num
2838 movq $bptr,%xmm4
2839 mov $n0, 32(%rsp)
2840 mov %rax, 40(%rsp) # save original %rsp
Robert Sloana94fe052017-02-21 08:49:28 -08002841.cfi_cfa_expression %rsp+40,deref,+8
Adam Langleyd9e397b2015-01-22 14:27:53 -08002842.Lpowerx5_body:
2843
2844 call __bn_sqrx8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04002845 call __bn_postx4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08002846 call __bn_sqrx8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04002847 call __bn_postx4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08002848 call __bn_sqrx8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04002849 call __bn_postx4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08002850 call __bn_sqrx8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04002851 call __bn_postx4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08002852 call __bn_sqrx8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04002853 call __bn_postx4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08002854
2855 mov %r10,$num # -num
2856 mov $aptr,$rptr
2857 movq %xmm2,$nptr
2858 movq %xmm4,$bptr
2859 mov 40(%rsp),%rax
2860
2861 call mulx4x_internal
2862
2863 mov 40(%rsp),%rsi # restore %rsp
Robert Sloana94fe052017-02-21 08:49:28 -08002864.cfi_def_cfa %rsi,8
Adam Langleyd9e397b2015-01-22 14:27:53 -08002865 mov \$1,%rax
David Benjamin4969cc92016-04-22 15:02:23 -04002866
Adam Langleyd9e397b2015-01-22 14:27:53 -08002867 mov -48(%rsi),%r15
Robert Sloana94fe052017-02-21 08:49:28 -08002868.cfi_restore %r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002869 mov -40(%rsi),%r14
Robert Sloana94fe052017-02-21 08:49:28 -08002870.cfi_restore %r14
Adam Langleyd9e397b2015-01-22 14:27:53 -08002871 mov -32(%rsi),%r13
Robert Sloana94fe052017-02-21 08:49:28 -08002872.cfi_restore %r13
Adam Langleyd9e397b2015-01-22 14:27:53 -08002873 mov -24(%rsi),%r12
Robert Sloana94fe052017-02-21 08:49:28 -08002874.cfi_restore %r12
Adam Langleyd9e397b2015-01-22 14:27:53 -08002875 mov -16(%rsi),%rbp
Robert Sloana94fe052017-02-21 08:49:28 -08002876.cfi_restore %rbp
Adam Langleyd9e397b2015-01-22 14:27:53 -08002877 mov -8(%rsi),%rbx
Robert Sloana94fe052017-02-21 08:49:28 -08002878.cfi_restore %rbx
Adam Langleyd9e397b2015-01-22 14:27:53 -08002879 lea (%rsi),%rsp
Robert Sloana94fe052017-02-21 08:49:28 -08002880.cfi_def_cfa_register %rsp
Adam Langleyd9e397b2015-01-22 14:27:53 -08002881.Lpowerx5_epilogue:
2882 ret
Robert Sloana94fe052017-02-21 08:49:28 -08002883.cfi_endproc
Adam Langleyd9e397b2015-01-22 14:27:53 -08002884.size bn_powerx5,.-bn_powerx5
2885
2886.globl bn_sqrx8x_internal
2887.hidden bn_sqrx8x_internal
2888.type bn_sqrx8x_internal,\@abi-omnipotent
2889.align 32
2890bn_sqrx8x_internal:
2891__bn_sqrx8x_internal:
2892 ##################################################################
2893 # Squaring part:
2894 #
2895 # a) multiply-n-add everything but a[i]*a[i];
2896 # b) shift result of a) by 1 to the left and accumulate
2897 # a[i]*a[i] products;
2898 #
2899 ##################################################################
2900 # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2901 # a[1]a[0]
2902 # a[2]a[0]
2903 # a[3]a[0]
2904 # a[2]a[1]
2905 # a[3]a[1]
2906 # a[3]a[2]
2907 #
2908 # a[4]a[0]
2909 # a[5]a[0]
2910 # a[6]a[0]
2911 # a[7]a[0]
2912 # a[4]a[1]
2913 # a[5]a[1]
2914 # a[6]a[1]
2915 # a[7]a[1]
2916 # a[4]a[2]
2917 # a[5]a[2]
2918 # a[6]a[2]
2919 # a[7]a[2]
2920 # a[4]a[3]
2921 # a[5]a[3]
2922 # a[6]a[3]
2923 # a[7]a[3]
2924 #
2925 # a[5]a[4]
2926 # a[6]a[4]
2927 # a[7]a[4]
2928 # a[6]a[5]
2929 # a[7]a[5]
2930 # a[7]a[6]
2931 # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2932___
2933{
2934my ($zero,$carry)=("%rbp","%rcx");
2935my $aaptr=$zero;
2936$code.=<<___;
2937 lea 48+8(%rsp),$tptr
2938 lea ($aptr,$num),$aaptr
2939 mov $num,0+8(%rsp) # save $num
2940 mov $aaptr,8+8(%rsp) # save end of $aptr
2941 jmp .Lsqr8x_zero_start
2942
2943.align 32
2944.byte 0x66,0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00
2945.Lsqrx8x_zero:
2946 .byte 0x3e
2947 movdqa %xmm0,0*8($tptr)
2948 movdqa %xmm0,2*8($tptr)
2949 movdqa %xmm0,4*8($tptr)
2950 movdqa %xmm0,6*8($tptr)
2951.Lsqr8x_zero_start: # aligned at 32
2952 movdqa %xmm0,8*8($tptr)
2953 movdqa %xmm0,10*8($tptr)
2954 movdqa %xmm0,12*8($tptr)
2955 movdqa %xmm0,14*8($tptr)
2956 lea 16*8($tptr),$tptr
2957 sub \$64,$num
2958 jnz .Lsqrx8x_zero
2959
2960 mov 0*8($aptr),%rdx # a[0], modulo-scheduled
2961 #xor %r9,%r9 # t[1], ex-$num, zero already
2962 xor %r10,%r10
2963 xor %r11,%r11
2964 xor %r12,%r12
2965 xor %r13,%r13
2966 xor %r14,%r14
2967 xor %r15,%r15
2968 lea 48+8(%rsp),$tptr
2969 xor $zero,$zero # cf=0, cf=0
2970 jmp .Lsqrx8x_outer_loop
2971
2972.align 32
2973.Lsqrx8x_outer_loop:
2974 mulx 1*8($aptr),%r8,%rax # a[1]*a[0]
2975 adcx %r9,%r8 # a[1]*a[0]+=t[1]
2976 adox %rax,%r10
2977 mulx 2*8($aptr),%r9,%rax # a[2]*a[0]
2978 adcx %r10,%r9
2979 adox %rax,%r11
2980 .byte 0xc4,0xe2,0xab,0xf6,0x86,0x18,0x00,0x00,0x00 # mulx 3*8($aptr),%r10,%rax # ...
2981 adcx %r11,%r10
2982 adox %rax,%r12
2983 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x20,0x00,0x00,0x00 # mulx 4*8($aptr),%r11,%rax
2984 adcx %r12,%r11
2985 adox %rax,%r13
2986 mulx 5*8($aptr),%r12,%rax
2987 adcx %r13,%r12
2988 adox %rax,%r14
2989 mulx 6*8($aptr),%r13,%rax
2990 adcx %r14,%r13
2991 adox %r15,%rax
2992 mulx 7*8($aptr),%r14,%r15
2993 mov 1*8($aptr),%rdx # a[1]
2994 adcx %rax,%r14
2995 adox $zero,%r15
2996 adc 8*8($tptr),%r15
2997 mov %r8,1*8($tptr) # t[1]
2998 mov %r9,2*8($tptr) # t[2]
2999 sbb $carry,$carry # mov %cf,$carry
3000 xor $zero,$zero # cf=0, of=0
3001
3002
3003 mulx 2*8($aptr),%r8,%rbx # a[2]*a[1]
3004 mulx 3*8($aptr),%r9,%rax # a[3]*a[1]
3005 adcx %r10,%r8
3006 adox %rbx,%r9
3007 mulx 4*8($aptr),%r10,%rbx # ...
3008 adcx %r11,%r9
3009 adox %rax,%r10
3010 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x28,0x00,0x00,0x00 # mulx 5*8($aptr),%r11,%rax
3011 adcx %r12,%r10
3012 adox %rbx,%r11
3013 .byte 0xc4,0xe2,0x9b,0xf6,0x9e,0x30,0x00,0x00,0x00 # mulx 6*8($aptr),%r12,%rbx
3014 adcx %r13,%r11
3015 adox %r14,%r12
3016 .byte 0xc4,0x62,0x93,0xf6,0xb6,0x38,0x00,0x00,0x00 # mulx 7*8($aptr),%r13,%r14
3017 mov 2*8($aptr),%rdx # a[2]
3018 adcx %rax,%r12
3019 adox %rbx,%r13
3020 adcx %r15,%r13
3021 adox $zero,%r14 # of=0
3022 adcx $zero,%r14 # cf=0
3023
3024 mov %r8,3*8($tptr) # t[3]
3025 mov %r9,4*8($tptr) # t[4]
3026
3027 mulx 3*8($aptr),%r8,%rbx # a[3]*a[2]
3028 mulx 4*8($aptr),%r9,%rax # a[4]*a[2]
3029 adcx %r10,%r8
3030 adox %rbx,%r9
3031 mulx 5*8($aptr),%r10,%rbx # ...
3032 adcx %r11,%r9
3033 adox %rax,%r10
3034 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x30,0x00,0x00,0x00 # mulx 6*8($aptr),%r11,%rax
3035 adcx %r12,%r10
3036 adox %r13,%r11
3037 .byte 0xc4,0x62,0x9b,0xf6,0xae,0x38,0x00,0x00,0x00 # mulx 7*8($aptr),%r12,%r13
3038 .byte 0x3e
3039 mov 3*8($aptr),%rdx # a[3]
3040 adcx %rbx,%r11
3041 adox %rax,%r12
3042 adcx %r14,%r12
3043 mov %r8,5*8($tptr) # t[5]
3044 mov %r9,6*8($tptr) # t[6]
3045 mulx 4*8($aptr),%r8,%rax # a[4]*a[3]
3046 adox $zero,%r13 # of=0
3047 adcx $zero,%r13 # cf=0
3048
3049 mulx 5*8($aptr),%r9,%rbx # a[5]*a[3]
3050 adcx %r10,%r8
3051 adox %rax,%r9
3052 mulx 6*8($aptr),%r10,%rax # ...
3053 adcx %r11,%r9
3054 adox %r12,%r10
3055 mulx 7*8($aptr),%r11,%r12
3056 mov 4*8($aptr),%rdx # a[4]
3057 mov 5*8($aptr),%r14 # a[5]
3058 adcx %rbx,%r10
3059 adox %rax,%r11
3060 mov 6*8($aptr),%r15 # a[6]
3061 adcx %r13,%r11
3062 adox $zero,%r12 # of=0
3063 adcx $zero,%r12 # cf=0
3064
3065 mov %r8,7*8($tptr) # t[7]
3066 mov %r9,8*8($tptr) # t[8]
3067
3068 mulx %r14,%r9,%rax # a[5]*a[4]
3069 mov 7*8($aptr),%r8 # a[7]
3070 adcx %r10,%r9
3071 mulx %r15,%r10,%rbx # a[6]*a[4]
3072 adox %rax,%r10
3073 adcx %r11,%r10
3074 mulx %r8,%r11,%rax # a[7]*a[4]
3075 mov %r14,%rdx # a[5]
3076 adox %rbx,%r11
3077 adcx %r12,%r11
3078 #adox $zero,%rax # of=0
3079 adcx $zero,%rax # cf=0
3080
3081 mulx %r15,%r14,%rbx # a[6]*a[5]
3082 mulx %r8,%r12,%r13 # a[7]*a[5]
3083 mov %r15,%rdx # a[6]
3084 lea 8*8($aptr),$aptr
3085 adcx %r14,%r11
3086 adox %rbx,%r12
3087 adcx %rax,%r12
3088 adox $zero,%r13
3089
3090 .byte 0x67,0x67
3091 mulx %r8,%r8,%r14 # a[7]*a[6]
3092 adcx %r8,%r13
3093 adcx $zero,%r14
3094
3095 cmp 8+8(%rsp),$aptr
3096 je .Lsqrx8x_outer_break
3097
3098 neg $carry # mov $carry,%cf
3099 mov \$-8,%rcx
3100 mov $zero,%r15
3101 mov 8*8($tptr),%r8
3102 adcx 9*8($tptr),%r9 # +=t[9]
3103 adcx 10*8($tptr),%r10 # ...
3104 adcx 11*8($tptr),%r11
3105 adc 12*8($tptr),%r12
3106 adc 13*8($tptr),%r13
3107 adc 14*8($tptr),%r14
3108 adc 15*8($tptr),%r15
3109 lea ($aptr),$aaptr
3110 lea 2*64($tptr),$tptr
3111 sbb %rax,%rax # mov %cf,$carry
3112
3113 mov -64($aptr),%rdx # a[0]
3114 mov %rax,16+8(%rsp) # offload $carry
3115 mov $tptr,24+8(%rsp)
3116
3117 #lea 8*8($tptr),$tptr # see 2*8*8($tptr) above
3118 xor %eax,%eax # cf=0, of=0
3119 jmp .Lsqrx8x_loop
3120
3121.align 32
3122.Lsqrx8x_loop:
3123 mov %r8,%rbx
3124 mulx 0*8($aaptr),%rax,%r8 # a[8]*a[i]
3125 adcx %rax,%rbx # +=t[8]
3126 adox %r9,%r8
3127
3128 mulx 1*8($aaptr),%rax,%r9 # ...
3129 adcx %rax,%r8
3130 adox %r10,%r9
3131
3132 mulx 2*8($aaptr),%rax,%r10
3133 adcx %rax,%r9
3134 adox %r11,%r10
3135
3136 mulx 3*8($aaptr),%rax,%r11
3137 adcx %rax,%r10
3138 adox %r12,%r11
3139
3140 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 4*8($aaptr),%rax,%r12
3141 adcx %rax,%r11
3142 adox %r13,%r12
3143
3144 mulx 5*8($aaptr),%rax,%r13
3145 adcx %rax,%r12
3146 adox %r14,%r13
3147
3148 mulx 6*8($aaptr),%rax,%r14
3149 mov %rbx,($tptr,%rcx,8) # store t[8+i]
3150 mov \$0,%ebx
3151 adcx %rax,%r13
3152 adox %r15,%r14
3153
3154 .byte 0xc4,0x62,0xfb,0xf6,0xbd,0x38,0x00,0x00,0x00 # mulx 7*8($aaptr),%rax,%r15
3155 mov 8($aptr,%rcx,8),%rdx # a[i]
3156 adcx %rax,%r14
3157 adox %rbx,%r15 # %rbx is 0, of=0
3158 adcx %rbx,%r15 # cf=0
3159
3160 .byte 0x67
3161 inc %rcx # of=0
3162 jnz .Lsqrx8x_loop
3163
3164 lea 8*8($aaptr),$aaptr
3165 mov \$-8,%rcx
3166 cmp 8+8(%rsp),$aaptr # done?
3167 je .Lsqrx8x_break
3168
3169 sub 16+8(%rsp),%rbx # mov 16(%rsp),%cf
3170 .byte 0x66
3171 mov -64($aptr),%rdx
3172 adcx 0*8($tptr),%r8
3173 adcx 1*8($tptr),%r9
3174 adc 2*8($tptr),%r10
3175 adc 3*8($tptr),%r11
3176 adc 4*8($tptr),%r12
3177 adc 5*8($tptr),%r13
3178 adc 6*8($tptr),%r14
3179 adc 7*8($tptr),%r15
3180 lea 8*8($tptr),$tptr
3181 .byte 0x67
3182 sbb %rax,%rax # mov %cf,%rax
3183 xor %ebx,%ebx # cf=0, of=0
3184 mov %rax,16+8(%rsp) # offload carry
3185 jmp .Lsqrx8x_loop
3186
3187.align 32
3188.Lsqrx8x_break:
3189 sub 16+8(%rsp),%r8 # consume last carry
3190 mov 24+8(%rsp),$carry # initial $tptr, borrow $carry
3191 mov 0*8($aptr),%rdx # a[8], modulo-scheduled
3192 xor %ebp,%ebp # xor $zero,$zero
3193 mov %r8,0*8($tptr)
3194 cmp $carry,$tptr # cf=0, of=0
3195 je .Lsqrx8x_outer_loop
3196
3197 mov %r9,1*8($tptr)
3198 mov 1*8($carry),%r9
3199 mov %r10,2*8($tptr)
3200 mov 2*8($carry),%r10
3201 mov %r11,3*8($tptr)
3202 mov 3*8($carry),%r11
3203 mov %r12,4*8($tptr)
3204 mov 4*8($carry),%r12
3205 mov %r13,5*8($tptr)
3206 mov 5*8($carry),%r13
3207 mov %r14,6*8($tptr)
3208 mov 6*8($carry),%r14
3209 mov %r15,7*8($tptr)
3210 mov 7*8($carry),%r15
3211 mov $carry,$tptr
3212 jmp .Lsqrx8x_outer_loop
3213
3214.align 32
3215.Lsqrx8x_outer_break:
3216 mov %r9,9*8($tptr) # t[9]
3217 movq %xmm3,%rcx # -$num
3218 mov %r10,10*8($tptr) # ...
3219 mov %r11,11*8($tptr)
3220 mov %r12,12*8($tptr)
3221 mov %r13,13*8($tptr)
3222 mov %r14,14*8($tptr)
3223___
3224} {
3225my $i="%rcx";
3226$code.=<<___;
3227 lea 48+8(%rsp),$tptr
3228 mov ($aptr,$i),%rdx # a[0]
3229
3230 mov 8($tptr),$A0[1] # t[1]
3231 xor $A0[0],$A0[0] # t[0], of=0, cf=0
3232 mov 0+8(%rsp),$num # restore $num
3233 adox $A0[1],$A0[1]
3234 mov 16($tptr),$A1[0] # t[2] # prefetch
3235 mov 24($tptr),$A1[1] # t[3] # prefetch
3236 #jmp .Lsqrx4x_shift_n_add # happens to be aligned
3237
3238.align 32
3239.Lsqrx4x_shift_n_add:
3240 mulx %rdx,%rax,%rbx
3241 adox $A1[0],$A1[0]
3242 adcx $A0[0],%rax
3243 .byte 0x48,0x8b,0x94,0x0e,0x08,0x00,0x00,0x00 # mov 8($aptr,$i),%rdx # a[i+1] # prefetch
3244 .byte 0x4c,0x8b,0x97,0x20,0x00,0x00,0x00 # mov 32($tptr),$A0[0] # t[2*i+4] # prefetch
3245 adox $A1[1],$A1[1]
3246 adcx $A0[1],%rbx
3247 mov 40($tptr),$A0[1] # t[2*i+4+1] # prefetch
3248 mov %rax,0($tptr)
3249 mov %rbx,8($tptr)
3250
3251 mulx %rdx,%rax,%rbx
3252 adox $A0[0],$A0[0]
3253 adcx $A1[0],%rax
3254 mov 16($aptr,$i),%rdx # a[i+2] # prefetch
3255 mov 48($tptr),$A1[0] # t[2*i+6] # prefetch
3256 adox $A0[1],$A0[1]
3257 adcx $A1[1],%rbx
3258 mov 56($tptr),$A1[1] # t[2*i+6+1] # prefetch
3259 mov %rax,16($tptr)
3260 mov %rbx,24($tptr)
3261
3262 mulx %rdx,%rax,%rbx
3263 adox $A1[0],$A1[0]
3264 adcx $A0[0],%rax
3265 mov 24($aptr,$i),%rdx # a[i+3] # prefetch
3266 lea 32($i),$i
3267 mov 64($tptr),$A0[0] # t[2*i+8] # prefetch
3268 adox $A1[1],$A1[1]
3269 adcx $A0[1],%rbx
3270 mov 72($tptr),$A0[1] # t[2*i+8+1] # prefetch
3271 mov %rax,32($tptr)
3272 mov %rbx,40($tptr)
3273
3274 mulx %rdx,%rax,%rbx
3275 adox $A0[0],$A0[0]
3276 adcx $A1[0],%rax
3277 jrcxz .Lsqrx4x_shift_n_add_break
3278 .byte 0x48,0x8b,0x94,0x0e,0x00,0x00,0x00,0x00 # mov 0($aptr,$i),%rdx # a[i+4] # prefetch
3279 adox $A0[1],$A0[1]
3280 adcx $A1[1],%rbx
3281 mov 80($tptr),$A1[0] # t[2*i+10] # prefetch
3282 mov 88($tptr),$A1[1] # t[2*i+10+1] # prefetch
3283 mov %rax,48($tptr)
3284 mov %rbx,56($tptr)
3285 lea 64($tptr),$tptr
3286 nop
3287 jmp .Lsqrx4x_shift_n_add
3288
3289.align 32
3290.Lsqrx4x_shift_n_add_break:
3291 adcx $A1[1],%rbx
3292 mov %rax,48($tptr)
3293 mov %rbx,56($tptr)
3294 lea 64($tptr),$tptr # end of t[] buffer
3295___
3296}
3297######################################################################
3298# Montgomery reduction part, "word-by-word" algorithm.
3299#
3300# This new path is inspired by multiple submissions from Intel, by
3301# Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
3302# Vinodh Gopal...
3303{
3304my ($nptr,$carry,$m0)=("%rbp","%rsi","%rdx");
3305
3306$code.=<<___;
3307 movq %xmm2,$nptr
David Benjamin4969cc92016-04-22 15:02:23 -04003308__bn_sqrx8x_reduction:
Adam Langleyd9e397b2015-01-22 14:27:53 -08003309 xor %eax,%eax # initial top-most carry bit
3310 mov 32+8(%rsp),%rbx # n0
3311 mov 48+8(%rsp),%rdx # "%r8", 8*0($tptr)
David Benjamin4969cc92016-04-22 15:02:23 -04003312 lea -8*8($nptr,$num),%rcx # end of n[]
Adam Langleyd9e397b2015-01-22 14:27:53 -08003313 #lea 48+8(%rsp,$num,2),$tptr # end of t[] buffer
3314 mov %rcx, 0+8(%rsp) # save end of n[]
3315 mov $tptr,8+8(%rsp) # save end of t[]
3316
3317 lea 48+8(%rsp),$tptr # initial t[] window
3318 jmp .Lsqrx8x_reduction_loop
3319
3320.align 32
3321.Lsqrx8x_reduction_loop:
3322 mov 8*1($tptr),%r9
3323 mov 8*2($tptr),%r10
3324 mov 8*3($tptr),%r11
3325 mov 8*4($tptr),%r12
3326 mov %rdx,%r8
3327 imulq %rbx,%rdx # n0*a[i]
3328 mov 8*5($tptr),%r13
3329 mov 8*6($tptr),%r14
3330 mov 8*7($tptr),%r15
3331 mov %rax,24+8(%rsp) # store top-most carry bit
3332
3333 lea 8*8($tptr),$tptr
3334 xor $carry,$carry # cf=0,of=0
3335 mov \$-8,%rcx
3336 jmp .Lsqrx8x_reduce
3337
3338.align 32
3339.Lsqrx8x_reduce:
3340 mov %r8, %rbx
David Benjamin4969cc92016-04-22 15:02:23 -04003341 mulx 8*0($nptr),%rax,%r8 # n[0]
Adam Langleyd9e397b2015-01-22 14:27:53 -08003342 adcx %rbx,%rax # discarded
3343 adox %r9,%r8
3344
David Benjamin4969cc92016-04-22 15:02:23 -04003345 mulx 8*1($nptr),%rbx,%r9 # n[1]
Adam Langleyd9e397b2015-01-22 14:27:53 -08003346 adcx %rbx,%r8
3347 adox %r10,%r9
3348
David Benjamin4969cc92016-04-22 15:02:23 -04003349 mulx 8*2($nptr),%rbx,%r10
Adam Langleyd9e397b2015-01-22 14:27:53 -08003350 adcx %rbx,%r9
3351 adox %r11,%r10
3352
David Benjamin4969cc92016-04-22 15:02:23 -04003353 mulx 8*3($nptr),%rbx,%r11
Adam Langleyd9e397b2015-01-22 14:27:53 -08003354 adcx %rbx,%r10
3355 adox %r12,%r11
3356
David Benjamin4969cc92016-04-22 15:02:23 -04003357 .byte 0xc4,0x62,0xe3,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 8*4($nptr),%rbx,%r12
Adam Langleyd9e397b2015-01-22 14:27:53 -08003358 mov %rdx,%rax
3359 mov %r8,%rdx
3360 adcx %rbx,%r11
3361 adox %r13,%r12
3362
3363 mulx 32+8(%rsp),%rbx,%rdx # %rdx discarded
3364 mov %rax,%rdx
3365 mov %rax,64+48+8(%rsp,%rcx,8) # put aside n0*a[i]
3366
David Benjamin4969cc92016-04-22 15:02:23 -04003367 mulx 8*5($nptr),%rax,%r13
Adam Langleyd9e397b2015-01-22 14:27:53 -08003368 adcx %rax,%r12
3369 adox %r14,%r13
3370
David Benjamin4969cc92016-04-22 15:02:23 -04003371 mulx 8*6($nptr),%rax,%r14
Adam Langleyd9e397b2015-01-22 14:27:53 -08003372 adcx %rax,%r13
3373 adox %r15,%r14
3374
David Benjamin4969cc92016-04-22 15:02:23 -04003375 mulx 8*7($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08003376 mov %rbx,%rdx
3377 adcx %rax,%r14
3378 adox $carry,%r15 # $carry is 0
3379 adcx $carry,%r15 # cf=0
3380
3381 .byte 0x67,0x67,0x67
3382 inc %rcx # of=0
3383 jnz .Lsqrx8x_reduce
3384
3385 mov $carry,%rax # xor %rax,%rax
3386 cmp 0+8(%rsp),$nptr # end of n[]?
3387 jae .Lsqrx8x_no_tail
3388
3389 mov 48+8(%rsp),%rdx # pull n0*a[0]
3390 add 8*0($tptr),%r8
David Benjamin4969cc92016-04-22 15:02:23 -04003391 lea 8*8($nptr),$nptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08003392 mov \$-8,%rcx
3393 adcx 8*1($tptr),%r9
3394 adcx 8*2($tptr),%r10
3395 adc 8*3($tptr),%r11
3396 adc 8*4($tptr),%r12
3397 adc 8*5($tptr),%r13
3398 adc 8*6($tptr),%r14
3399 adc 8*7($tptr),%r15
3400 lea 8*8($tptr),$tptr
3401 sbb %rax,%rax # top carry
3402
3403 xor $carry,$carry # of=0, cf=0
3404 mov %rax,16+8(%rsp)
3405 jmp .Lsqrx8x_tail
3406
3407.align 32
3408.Lsqrx8x_tail:
3409 mov %r8,%rbx
David Benjamin4969cc92016-04-22 15:02:23 -04003410 mulx 8*0($nptr),%rax,%r8
Adam Langleyd9e397b2015-01-22 14:27:53 -08003411 adcx %rax,%rbx
3412 adox %r9,%r8
3413
David Benjamin4969cc92016-04-22 15:02:23 -04003414 mulx 8*1($nptr),%rax,%r9
Adam Langleyd9e397b2015-01-22 14:27:53 -08003415 adcx %rax,%r8
3416 adox %r10,%r9
3417
David Benjamin4969cc92016-04-22 15:02:23 -04003418 mulx 8*2($nptr),%rax,%r10
Adam Langleyd9e397b2015-01-22 14:27:53 -08003419 adcx %rax,%r9
3420 adox %r11,%r10
3421
David Benjamin4969cc92016-04-22 15:02:23 -04003422 mulx 8*3($nptr),%rax,%r11
Adam Langleyd9e397b2015-01-22 14:27:53 -08003423 adcx %rax,%r10
3424 adox %r12,%r11
3425
David Benjamin4969cc92016-04-22 15:02:23 -04003426 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 8*4($nptr),%rax,%r12
Adam Langleyd9e397b2015-01-22 14:27:53 -08003427 adcx %rax,%r11
3428 adox %r13,%r12
3429
David Benjamin4969cc92016-04-22 15:02:23 -04003430 mulx 8*5($nptr),%rax,%r13
Adam Langleyd9e397b2015-01-22 14:27:53 -08003431 adcx %rax,%r12
3432 adox %r14,%r13
3433
David Benjamin4969cc92016-04-22 15:02:23 -04003434 mulx 8*6($nptr),%rax,%r14
Adam Langleyd9e397b2015-01-22 14:27:53 -08003435 adcx %rax,%r13
3436 adox %r15,%r14
3437
David Benjamin4969cc92016-04-22 15:02:23 -04003438 mulx 8*7($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08003439 mov 72+48+8(%rsp,%rcx,8),%rdx # pull n0*a[i]
3440 adcx %rax,%r14
3441 adox $carry,%r15
3442 mov %rbx,($tptr,%rcx,8) # save result
3443 mov %r8,%rbx
3444 adcx $carry,%r15 # cf=0
3445
3446 inc %rcx # of=0
3447 jnz .Lsqrx8x_tail
3448
3449 cmp 0+8(%rsp),$nptr # end of n[]?
3450 jae .Lsqrx8x_tail_done # break out of loop
3451
3452 sub 16+8(%rsp),$carry # mov 16(%rsp),%cf
3453 mov 48+8(%rsp),%rdx # pull n0*a[0]
David Benjamin4969cc92016-04-22 15:02:23 -04003454 lea 8*8($nptr),$nptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08003455 adc 8*0($tptr),%r8
3456 adc 8*1($tptr),%r9
3457 adc 8*2($tptr),%r10
3458 adc 8*3($tptr),%r11
3459 adc 8*4($tptr),%r12
3460 adc 8*5($tptr),%r13
3461 adc 8*6($tptr),%r14
3462 adc 8*7($tptr),%r15
3463 lea 8*8($tptr),$tptr
3464 sbb %rax,%rax
3465 sub \$8,%rcx # mov \$-8,%rcx
3466
3467 xor $carry,$carry # of=0, cf=0
3468 mov %rax,16+8(%rsp)
3469 jmp .Lsqrx8x_tail
3470
3471.align 32
3472.Lsqrx8x_tail_done:
Robert Sloan4d1ac502017-02-06 08:36:14 -08003473 xor %rax,%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08003474 add 24+8(%rsp),%r8 # can this overflow?
Adam Langley4139edb2016-01-13 15:00:54 -08003475 adc \$0,%r9
3476 adc \$0,%r10
3477 adc \$0,%r11
3478 adc \$0,%r12
3479 adc \$0,%r13
3480 adc \$0,%r14
Robert Sloan4d1ac502017-02-06 08:36:14 -08003481 adc \$0,%r15
3482 adc \$0,%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08003483
3484 sub 16+8(%rsp),$carry # mov 16(%rsp),%cf
3485.Lsqrx8x_no_tail: # %cf is 0 if jumped here
3486 adc 8*0($tptr),%r8
3487 movq %xmm3,%rcx
3488 adc 8*1($tptr),%r9
David Benjamin4969cc92016-04-22 15:02:23 -04003489 mov 8*7($nptr),$carry
Adam Langleyd9e397b2015-01-22 14:27:53 -08003490 movq %xmm2,$nptr # restore $nptr
3491 adc 8*2($tptr),%r10
3492 adc 8*3($tptr),%r11
3493 adc 8*4($tptr),%r12
3494 adc 8*5($tptr),%r13
3495 adc 8*6($tptr),%r14
3496 adc 8*7($tptr),%r15
Robert Sloan4d1ac502017-02-06 08:36:14 -08003497 adc \$0,%rax # top-most carry
Adam Langleyd9e397b2015-01-22 14:27:53 -08003498
3499 mov 32+8(%rsp),%rbx # n0
3500 mov 8*8($tptr,%rcx),%rdx # modulo-scheduled "%r8"
3501
3502 mov %r8,8*0($tptr) # store top 512 bits
3503 lea 8*8($tptr),%r8 # borrow %r8
3504 mov %r9,8*1($tptr)
3505 mov %r10,8*2($tptr)
3506 mov %r11,8*3($tptr)
3507 mov %r12,8*4($tptr)
3508 mov %r13,8*5($tptr)
3509 mov %r14,8*6($tptr)
3510 mov %r15,8*7($tptr)
3511
3512 lea 8*8($tptr,%rcx),$tptr # start of current t[] window
3513 cmp 8+8(%rsp),%r8 # end of t[]?
3514 jb .Lsqrx8x_reduction_loop
David Benjamin4969cc92016-04-22 15:02:23 -04003515 ret
3516.size bn_sqrx8x_internal,.-bn_sqrx8x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08003517___
3518}
3519##############################################################
3520# Post-condition, 4x unrolled
3521#
3522{
3523my ($rptr,$nptr)=("%rdx","%rbp");
Adam Langleyd9e397b2015-01-22 14:27:53 -08003524$code.=<<___;
David Benjamin4969cc92016-04-22 15:02:23 -04003525.align 32
3526__bn_postx4x_internal:
3527 mov 8*0($nptr),%r12
Adam Langleyd9e397b2015-01-22 14:27:53 -08003528 mov %rcx,%r10 # -$num
Adam Langleyd9e397b2015-01-22 14:27:53 -08003529 mov %rcx,%r9 # -$num
David Benjamin4969cc92016-04-22 15:02:23 -04003530 neg %rax
3531 sar \$3+2,%rcx
Adam Langleyd9e397b2015-01-22 14:27:53 -08003532 #lea 48+8(%rsp,%r9),$tptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08003533 movq %xmm1,$rptr # restore $rptr
3534 movq %xmm1,$aptr # prepare for back-to-back call
David Benjamin4969cc92016-04-22 15:02:23 -04003535 dec %r12 # so that after 'not' we get -n[0]
3536 mov 8*1($nptr),%r13
3537 xor %r8,%r8
3538 mov 8*2($nptr),%r14
3539 mov 8*3($nptr),%r15
3540 jmp .Lsqrx4x_sub_entry
Adam Langleyd9e397b2015-01-22 14:27:53 -08003541
David Benjamin4969cc92016-04-22 15:02:23 -04003542.align 16
Adam Langleyd9e397b2015-01-22 14:27:53 -08003543.Lsqrx4x_sub:
David Benjamin4969cc92016-04-22 15:02:23 -04003544 mov 8*0($nptr),%r12
3545 mov 8*1($nptr),%r13
3546 mov 8*2($nptr),%r14
3547 mov 8*3($nptr),%r15
3548.Lsqrx4x_sub_entry:
3549 andn %rax,%r12,%r12
3550 lea 8*4($nptr),$nptr
3551 andn %rax,%r13,%r13
3552 andn %rax,%r14,%r14
3553 andn %rax,%r15,%r15
3554
3555 neg %r8 # mov %r8,%cf
3556 adc 8*0($tptr),%r12
3557 adc 8*1($tptr),%r13
3558 adc 8*2($tptr),%r14
3559 adc 8*3($tptr),%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08003560 mov %r12,8*0($rptr)
David Benjamin4969cc92016-04-22 15:02:23 -04003561 lea 8*4($tptr),$tptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08003562 mov %r13,8*1($rptr)
David Benjamin4969cc92016-04-22 15:02:23 -04003563 sbb %r8,%r8 # mov %cf,%r8
Adam Langleyd9e397b2015-01-22 14:27:53 -08003564 mov %r14,8*2($rptr)
3565 mov %r15,8*3($rptr)
3566 lea 8*4($rptr),$rptr
3567
3568 inc %rcx
3569 jnz .Lsqrx4x_sub
David Benjamin4969cc92016-04-22 15:02:23 -04003570
Adam Langleyd9e397b2015-01-22 14:27:53 -08003571 neg %r9 # restore $num
3572
3573 ret
David Benjamin4969cc92016-04-22 15:02:23 -04003574.size __bn_postx4x_internal,.-__bn_postx4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08003575___
David Benjamin4969cc92016-04-22 15:02:23 -04003576}
Adam Langleyd9e397b2015-01-22 14:27:53 -08003577}}}
3578{
3579my ($inp,$num,$tbl,$idx)=$win64?("%rcx","%edx","%r8", "%r9d") : # Win64 order
3580 ("%rdi","%esi","%rdx","%ecx"); # Unix order
3581my $out=$inp;
3582my $STRIDE=2**5*8;
3583my $N=$STRIDE/4;
3584
3585$code.=<<___;
3586.globl bn_scatter5
3587.type bn_scatter5,\@abi-omnipotent
3588.align 16
3589bn_scatter5:
3590 cmp \$0, $num
3591 jz .Lscatter_epilogue
3592 lea ($tbl,$idx,8),$tbl
3593.Lscatter:
3594 mov ($inp),%rax
3595 lea 8($inp),$inp
3596 mov %rax,($tbl)
3597 lea 32*8($tbl),$tbl
3598 sub \$1,$num
3599 jnz .Lscatter
3600.Lscatter_epilogue:
3601 ret
3602.size bn_scatter5,.-bn_scatter5
3603
3604.globl bn_gather5
3605.type bn_gather5,\@abi-omnipotent
David Benjamin4969cc92016-04-22 15:02:23 -04003606.align 32
Adam Langleyd9e397b2015-01-22 14:27:53 -08003607bn_gather5:
David Benjamin4969cc92016-04-22 15:02:23 -04003608.LSEH_begin_bn_gather5: # Win64 thing, but harmless in other cases
Adam Langleyd9e397b2015-01-22 14:27:53 -08003609 # I can't trust assembler to use specific encoding:-(
David Benjamin4969cc92016-04-22 15:02:23 -04003610 .byte 0x4c,0x8d,0x14,0x24 #lea (%rsp),%r10
3611 .byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00 #sub $0x108,%rsp
3612 lea .Linc(%rip),%rax
3613 and \$-16,%rsp # shouldn't be formally required
3614
3615 movd $idx,%xmm5
3616 movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
3617 movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
3618 lea 128($tbl),%r11 # size optimization
3619 lea 128(%rsp),%rax # size optimization
3620
3621 pshufd \$0,%xmm5,%xmm5 # broadcast $idx
3622 movdqa %xmm1,%xmm4
3623 movdqa %xmm1,%xmm2
3624___
3625########################################################################
3626# calculate mask by comparing 0..31 to $idx and save result to stack
3627#
3628for($i=0;$i<$STRIDE/16;$i+=4) {
3629$code.=<<___;
3630 paddd %xmm0,%xmm1
3631 pcmpeqd %xmm5,%xmm0 # compare to 1,0
3632___
3633$code.=<<___ if ($i);
3634 movdqa %xmm3,`16*($i-1)-128`(%rax)
Adam Langleyd9e397b2015-01-22 14:27:53 -08003635___
3636$code.=<<___;
David Benjamin4969cc92016-04-22 15:02:23 -04003637 movdqa %xmm4,%xmm3
Adam Langleyd9e397b2015-01-22 14:27:53 -08003638
David Benjamin4969cc92016-04-22 15:02:23 -04003639 paddd %xmm1,%xmm2
3640 pcmpeqd %xmm5,%xmm1 # compare to 3,2
3641 movdqa %xmm0,`16*($i+0)-128`(%rax)
3642 movdqa %xmm4,%xmm0
3643
3644 paddd %xmm2,%xmm3
3645 pcmpeqd %xmm5,%xmm2 # compare to 5,4
3646 movdqa %xmm1,`16*($i+1)-128`(%rax)
3647 movdqa %xmm4,%xmm1
3648
3649 paddd %xmm3,%xmm0
3650 pcmpeqd %xmm5,%xmm3 # compare to 7,6
3651 movdqa %xmm2,`16*($i+2)-128`(%rax)
3652 movdqa %xmm4,%xmm2
3653___
3654}
3655$code.=<<___;
3656 movdqa %xmm3,`16*($i-1)-128`(%rax)
3657 jmp .Lgather
3658
3659.align 32
3660.Lgather:
3661 pxor %xmm4,%xmm4
3662 pxor %xmm5,%xmm5
3663___
3664for($i=0;$i<$STRIDE/16;$i+=4) {
3665$code.=<<___;
3666 movdqa `16*($i+0)-128`(%r11),%xmm0
3667 movdqa `16*($i+1)-128`(%r11),%xmm1
3668 movdqa `16*($i+2)-128`(%r11),%xmm2
3669 pand `16*($i+0)-128`(%rax),%xmm0
3670 movdqa `16*($i+3)-128`(%r11),%xmm3
3671 pand `16*($i+1)-128`(%rax),%xmm1
3672 por %xmm0,%xmm4
3673 pand `16*($i+2)-128`(%rax),%xmm2
3674 por %xmm1,%xmm5
3675 pand `16*($i+3)-128`(%rax),%xmm3
3676 por %xmm2,%xmm4
3677 por %xmm3,%xmm5
3678___
3679}
3680$code.=<<___;
3681 por %xmm5,%xmm4
3682 lea $STRIDE(%r11),%r11
3683 pshufd \$0x4e,%xmm4,%xmm0
3684 por %xmm4,%xmm0
Adam Langleyd9e397b2015-01-22 14:27:53 -08003685 movq %xmm0,($out) # m0=bp[0]
3686 lea 8($out),$out
3687 sub \$1,$num
3688 jnz .Lgather
David Benjamin4969cc92016-04-22 15:02:23 -04003689
3690 lea (%r10),%rsp
Adam Langleyd9e397b2015-01-22 14:27:53 -08003691 ret
3692.LSEH_end_bn_gather5:
3693.size bn_gather5,.-bn_gather5
3694___
3695}
3696$code.=<<___;
3697.align 64
David Benjamin4969cc92016-04-22 15:02:23 -04003698.Linc:
3699 .long 0,0, 1,1
3700 .long 2,2, 2,2
Adam Langleyd9e397b2015-01-22 14:27:53 -08003701.asciz "Montgomery Multiplication with scatter/gather for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
3702___
3703
3704# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
3705# CONTEXT *context,DISPATCHER_CONTEXT *disp)
3706if ($win64) {
3707$rec="%rcx";
3708$frame="%rdx";
3709$context="%r8";
3710$disp="%r9";
3711
3712$code.=<<___;
3713.extern __imp_RtlVirtualUnwind
3714.type mul_handler,\@abi-omnipotent
3715.align 16
3716mul_handler:
3717 push %rsi
3718 push %rdi
3719 push %rbx
3720 push %rbp
3721 push %r12
3722 push %r13
3723 push %r14
3724 push %r15
3725 pushfq
3726 sub \$64,%rsp
3727
3728 mov 120($context),%rax # pull context->Rax
3729 mov 248($context),%rbx # pull context->Rip
3730
3731 mov 8($disp),%rsi # disp->ImageBase
3732 mov 56($disp),%r11 # disp->HandlerData
3733
3734 mov 0(%r11),%r10d # HandlerData[0]
3735 lea (%rsi,%r10),%r10 # end of prologue label
3736 cmp %r10,%rbx # context->Rip<end of prologue label
3737 jb .Lcommon_seh_tail
3738
Robert Sloana94fe052017-02-21 08:49:28 -08003739 mov 4(%r11),%r10d # HandlerData[1]
3740 lea (%rsi,%r10),%r10 # beginning of body label
3741 cmp %r10,%rbx # context->Rip<body label
3742 jb .Lcommon_pop_regs
3743
Adam Langleyd9e397b2015-01-22 14:27:53 -08003744 mov 152($context),%rax # pull context->Rsp
3745
Robert Sloana94fe052017-02-21 08:49:28 -08003746 mov 8(%r11),%r10d # HandlerData[2]
Adam Langleyd9e397b2015-01-22 14:27:53 -08003747 lea (%rsi,%r10),%r10 # epilogue label
3748 cmp %r10,%rbx # context->Rip>=epilogue label
3749 jae .Lcommon_seh_tail
3750
3751 lea .Lmul_epilogue(%rip),%r10
3752 cmp %r10,%rbx
David Benjamin4969cc92016-04-22 15:02:23 -04003753 ja .Lbody_40
Adam Langleyd9e397b2015-01-22 14:27:53 -08003754
3755 mov 192($context),%r10 # pull $num
3756 mov 8(%rax,%r10,8),%rax # pull saved stack pointer
David Benjamin4969cc92016-04-22 15:02:23 -04003757
Robert Sloana94fe052017-02-21 08:49:28 -08003758 jmp .Lcommon_pop_regs
Adam Langleyd9e397b2015-01-22 14:27:53 -08003759
3760.Lbody_40:
3761 mov 40(%rax),%rax # pull saved stack pointer
Robert Sloana94fe052017-02-21 08:49:28 -08003762.Lcommon_pop_regs:
Adam Langleyd9e397b2015-01-22 14:27:53 -08003763 mov -8(%rax),%rbx
3764 mov -16(%rax),%rbp
3765 mov -24(%rax),%r12
3766 mov -32(%rax),%r13
3767 mov -40(%rax),%r14
3768 mov -48(%rax),%r15
3769 mov %rbx,144($context) # restore context->Rbx
3770 mov %rbp,160($context) # restore context->Rbp
3771 mov %r12,216($context) # restore context->R12
3772 mov %r13,224($context) # restore context->R13
3773 mov %r14,232($context) # restore context->R14
3774 mov %r15,240($context) # restore context->R15
Adam Langleyd9e397b2015-01-22 14:27:53 -08003775
3776.Lcommon_seh_tail:
3777 mov 8(%rax),%rdi
3778 mov 16(%rax),%rsi
3779 mov %rax,152($context) # restore context->Rsp
3780 mov %rsi,168($context) # restore context->Rsi
3781 mov %rdi,176($context) # restore context->Rdi
3782
3783 mov 40($disp),%rdi # disp->ContextRecord
3784 mov $context,%rsi # context
3785 mov \$154,%ecx # sizeof(CONTEXT)
3786 .long 0xa548f3fc # cld; rep movsq
3787
3788 mov $disp,%rsi
3789 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
3790 mov 8(%rsi),%rdx # arg2, disp->ImageBase
3791 mov 0(%rsi),%r8 # arg3, disp->ControlPc
3792 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
3793 mov 40(%rsi),%r10 # disp->ContextRecord
3794 lea 56(%rsi),%r11 # &disp->HandlerData
3795 lea 24(%rsi),%r12 # &disp->EstablisherFrame
3796 mov %r10,32(%rsp) # arg5
3797 mov %r11,40(%rsp) # arg6
3798 mov %r12,48(%rsp) # arg7
3799 mov %rcx,56(%rsp) # arg8, (NULL)
3800 call *__imp_RtlVirtualUnwind(%rip)
3801
3802 mov \$1,%eax # ExceptionContinueSearch
3803 add \$64,%rsp
3804 popfq
3805 pop %r15
3806 pop %r14
3807 pop %r13
3808 pop %r12
3809 pop %rbp
3810 pop %rbx
3811 pop %rdi
3812 pop %rsi
3813 ret
3814.size mul_handler,.-mul_handler
3815
3816.section .pdata
3817.align 4
3818 .rva .LSEH_begin_bn_mul_mont_gather5
3819 .rva .LSEH_end_bn_mul_mont_gather5
3820 .rva .LSEH_info_bn_mul_mont_gather5
3821
3822 .rva .LSEH_begin_bn_mul4x_mont_gather5
3823 .rva .LSEH_end_bn_mul4x_mont_gather5
3824 .rva .LSEH_info_bn_mul4x_mont_gather5
3825
3826 .rva .LSEH_begin_bn_power5
3827 .rva .LSEH_end_bn_power5
3828 .rva .LSEH_info_bn_power5
3829
3830 .rva .LSEH_begin_bn_from_mont8x
3831 .rva .LSEH_end_bn_from_mont8x
3832 .rva .LSEH_info_bn_from_mont8x
3833___
3834$code.=<<___ if ($addx);
3835 .rva .LSEH_begin_bn_mulx4x_mont_gather5
3836 .rva .LSEH_end_bn_mulx4x_mont_gather5
3837 .rva .LSEH_info_bn_mulx4x_mont_gather5
3838
3839 .rva .LSEH_begin_bn_powerx5
3840 .rva .LSEH_end_bn_powerx5
3841 .rva .LSEH_info_bn_powerx5
3842___
3843$code.=<<___;
3844 .rva .LSEH_begin_bn_gather5
3845 .rva .LSEH_end_bn_gather5
3846 .rva .LSEH_info_bn_gather5
3847
3848.section .xdata
3849.align 8
3850.LSEH_info_bn_mul_mont_gather5:
3851 .byte 9,0,0,0
3852 .rva mul_handler
Robert Sloana94fe052017-02-21 08:49:28 -08003853 .rva .Lmul_body,.Lmul_body,.Lmul_epilogue # HandlerData[]
Adam Langleyd9e397b2015-01-22 14:27:53 -08003854.align 8
3855.LSEH_info_bn_mul4x_mont_gather5:
3856 .byte 9,0,0,0
3857 .rva mul_handler
Robert Sloana94fe052017-02-21 08:49:28 -08003858 .rva .Lmul4x_prologue,.Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
Adam Langleyd9e397b2015-01-22 14:27:53 -08003859.align 8
3860.LSEH_info_bn_power5:
3861 .byte 9,0,0,0
3862 .rva mul_handler
Robert Sloana94fe052017-02-21 08:49:28 -08003863 .rva .Lpower5_prologue,.Lpower5_body,.Lpower5_epilogue # HandlerData[]
Adam Langleyd9e397b2015-01-22 14:27:53 -08003864.align 8
3865.LSEH_info_bn_from_mont8x:
3866 .byte 9,0,0,0
3867 .rva mul_handler
Robert Sloana94fe052017-02-21 08:49:28 -08003868 .rva .Lfrom_prologue,.Lfrom_body,.Lfrom_epilogue # HandlerData[]
Adam Langleyd9e397b2015-01-22 14:27:53 -08003869___
3870$code.=<<___ if ($addx);
3871.align 8
3872.LSEH_info_bn_mulx4x_mont_gather5:
3873 .byte 9,0,0,0
3874 .rva mul_handler
Robert Sloana94fe052017-02-21 08:49:28 -08003875 .rva .Lmulx4x_prologue,.Lmulx4x_body,.Lmulx4x_epilogue # HandlerData[]
Adam Langleyd9e397b2015-01-22 14:27:53 -08003876.align 8
3877.LSEH_info_bn_powerx5:
3878 .byte 9,0,0,0
3879 .rva mul_handler
Robert Sloana94fe052017-02-21 08:49:28 -08003880 .rva .Lpowerx5_prologue,.Lpowerx5_body,.Lpowerx5_epilogue # HandlerData[]
Adam Langleyd9e397b2015-01-22 14:27:53 -08003881___
3882$code.=<<___;
3883.align 8
3884.LSEH_info_bn_gather5:
David Benjamin4969cc92016-04-22 15:02:23 -04003885 .byte 0x01,0x0b,0x03,0x0a
3886 .byte 0x0b,0x01,0x21,0x00 # sub rsp,0x108
3887 .byte 0x04,0xa3,0x00,0x00 # lea r10,(rsp)
Adam Langleyd9e397b2015-01-22 14:27:53 -08003888.align 8
3889___
3890}
3891
3892$code =~ s/\`([^\`]*)\`/eval($1)/gem;
3893
3894print $code;
3895close STDOUT;