blob: 61fde2d2dcd908198edf660605f8b4fceae5e5d8 [file] [log] [blame]
Adam Langleyd9e397b2015-01-22 14:27:53 -08001#!/usr/bin/env perl
2
3# ====================================================================
4# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9
10# August 2011.
11#
12# Companion to x86_64-mont.pl that optimizes cache-timing attack
13# countermeasures. The subroutines are produced by replacing bp[i]
14# references in their x86_64-mont.pl counterparts with cache-neutral
15# references to powers table computed in BN_mod_exp_mont_consttime.
16# In addition subroutine that scatters elements of the powers table
17# is implemented, so that scatter-/gathering can be tuned without
18# bn_exp.c modifications.
19
20# August 2013.
21#
22# Add MULX/AD*X code paths and additional interfaces to optimize for
23# branch prediction unit. For input lengths that are multiples of 8
24# the np argument is not just modulus value, but one interleaved
25# with 0. This is to optimize post-condition...
26
27$flavour = shift;
28$output = shift;
29if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
30
31$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
32
33$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
34( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
35( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
36die "can't locate x86_64-xlate.pl";
37
David Benjaminc895d6b2016-08-11 13:26:41 -040038open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
Adam Langleyd9e397b2015-01-22 14:27:53 -080039*STDOUT=*OUT;
40
Kenny Roote99801b2015-11-06 15:31:15 -080041# In upstream, this is controlled by shelling out to the compiler to check
42# versions, but BoringSSL is intended to be used with pre-generated perlasm
43# output, so this isn't useful anyway.
44#
45# TODO(davidben): Enable this after testing. $addx goes up to 1.
46$addx = 0;
Adam Langleyd9e397b2015-01-22 14:27:53 -080047
48# int bn_mul_mont_gather5(
49$rp="%rdi"; # BN_ULONG *rp,
50$ap="%rsi"; # const BN_ULONG *ap,
51$bp="%rdx"; # const BN_ULONG *bp,
52$np="%rcx"; # const BN_ULONG *np,
53$n0="%r8"; # const BN_ULONG *n0,
54$num="%r9"; # int num,
55 # int idx); # 0 to 2^5-1, "index" in $bp holding
56 # pre-computed powers of a', interlaced
57 # in such manner that b[0] is $bp[idx],
58 # b[1] is [2^5+idx], etc.
59$lo0="%r10";
60$hi0="%r11";
61$hi1="%r13";
62$i="%r14";
63$j="%r15";
64$m0="%rbx";
65$m1="%rbp";
66
67$code=<<___;
68.text
69
70.extern OPENSSL_ia32cap_P
71
72.globl bn_mul_mont_gather5
73.type bn_mul_mont_gather5,\@function,6
74.align 64
75bn_mul_mont_gather5:
76 test \$7,${num}d
77 jnz .Lmul_enter
78___
79$code.=<<___ if ($addx);
80 mov OPENSSL_ia32cap_P+8(%rip),%r11d
81___
82$code.=<<___;
83 jmp .Lmul4x_enter
84
85.align 16
86.Lmul_enter:
87 mov ${num}d,${num}d
88 mov %rsp,%rax
David Benjamin4969cc92016-04-22 15:02:23 -040089 movd `($win64?56:8)`(%rsp),%xmm5 # load 7th argument
90 lea .Linc(%rip),%r10
Adam Langleyd9e397b2015-01-22 14:27:53 -080091 push %rbx
92 push %rbp
93 push %r12
94 push %r13
95 push %r14
96 push %r15
David Benjamin4969cc92016-04-22 15:02:23 -040097
Adam Langleyd9e397b2015-01-22 14:27:53 -080098 lea 2($num),%r11
99 neg %r11
David Benjamin4969cc92016-04-22 15:02:23 -0400100 lea -264(%rsp,%r11,8),%rsp # tp=alloca(8*(num+2)+256+8)
Adam Langleyd9e397b2015-01-22 14:27:53 -0800101 and \$-1024,%rsp # minimize TLB usage
102
103 mov %rax,8(%rsp,$num,8) # tp[num+1]=%rsp
104.Lmul_body:
David Benjamin4969cc92016-04-22 15:02:23 -0400105 lea 128($bp),%r12 # reassign $bp (+size optimization)
Adam Langleyd9e397b2015-01-22 14:27:53 -0800106___
107 $bp="%r12";
108 $STRIDE=2**5*8; # 5 is "window size"
109 $N=$STRIDE/4; # should match cache line size
110$code.=<<___;
David Benjamin4969cc92016-04-22 15:02:23 -0400111 movdqa 0(%r10),%xmm0 # 00000001000000010000000000000000
112 movdqa 16(%r10),%xmm1 # 00000002000000020000000200000002
113 lea 24-112(%rsp,$num,8),%r10# place the mask after tp[num+3] (+ICache optimization)
114 and \$-16,%r10
Adam Langleyd9e397b2015-01-22 14:27:53 -0800115
David Benjamin4969cc92016-04-22 15:02:23 -0400116 pshufd \$0,%xmm5,%xmm5 # broadcast index
117 movdqa %xmm1,%xmm4
118 movdqa %xmm1,%xmm2
119___
120########################################################################
121# calculate mask by comparing 0..31 to index and save result to stack
122#
123$code.=<<___;
124 paddd %xmm0,%xmm1
125 pcmpeqd %xmm5,%xmm0 # compare to 1,0
126 .byte 0x67
127 movdqa %xmm4,%xmm3
128___
129for($k=0;$k<$STRIDE/16-4;$k+=4) {
130$code.=<<___;
131 paddd %xmm1,%xmm2
132 pcmpeqd %xmm5,%xmm1 # compare to 3,2
133 movdqa %xmm0,`16*($k+0)+112`(%r10)
134 movdqa %xmm4,%xmm0
135
136 paddd %xmm2,%xmm3
137 pcmpeqd %xmm5,%xmm2 # compare to 5,4
138 movdqa %xmm1,`16*($k+1)+112`(%r10)
139 movdqa %xmm4,%xmm1
140
141 paddd %xmm3,%xmm0
142 pcmpeqd %xmm5,%xmm3 # compare to 7,6
143 movdqa %xmm2,`16*($k+2)+112`(%r10)
144 movdqa %xmm4,%xmm2
145
146 paddd %xmm0,%xmm1
147 pcmpeqd %xmm5,%xmm0
148 movdqa %xmm3,`16*($k+3)+112`(%r10)
149 movdqa %xmm4,%xmm3
150___
151}
152$code.=<<___; # last iteration can be optimized
153 paddd %xmm1,%xmm2
154 pcmpeqd %xmm5,%xmm1
155 movdqa %xmm0,`16*($k+0)+112`(%r10)
156
157 paddd %xmm2,%xmm3
158 .byte 0x67
159 pcmpeqd %xmm5,%xmm2
160 movdqa %xmm1,`16*($k+1)+112`(%r10)
161
162 pcmpeqd %xmm5,%xmm3
163 movdqa %xmm2,`16*($k+2)+112`(%r10)
164 pand `16*($k+0)-128`($bp),%xmm0 # while it's still in register
165
166 pand `16*($k+1)-128`($bp),%xmm1
167 pand `16*($k+2)-128`($bp),%xmm2
168 movdqa %xmm3,`16*($k+3)+112`(%r10)
169 pand `16*($k+3)-128`($bp),%xmm3
Adam Langleyd9e397b2015-01-22 14:27:53 -0800170 por %xmm2,%xmm0
David Benjamin4969cc92016-04-22 15:02:23 -0400171 por %xmm3,%xmm1
172___
173for($k=0;$k<$STRIDE/16-4;$k+=4) {
174$code.=<<___;
175 movdqa `16*($k+0)-128`($bp),%xmm4
176 movdqa `16*($k+1)-128`($bp),%xmm5
177 movdqa `16*($k+2)-128`($bp),%xmm2
178 pand `16*($k+0)+112`(%r10),%xmm4
179 movdqa `16*($k+3)-128`($bp),%xmm3
180 pand `16*($k+1)+112`(%r10),%xmm5
181 por %xmm4,%xmm0
182 pand `16*($k+2)+112`(%r10),%xmm2
183 por %xmm5,%xmm1
184 pand `16*($k+3)+112`(%r10),%xmm3
185 por %xmm2,%xmm0
186 por %xmm3,%xmm1
187___
188}
189$code.=<<___;
190 por %xmm1,%xmm0
191 pshufd \$0x4e,%xmm0,%xmm1
192 por %xmm1,%xmm0
Adam Langleyd9e397b2015-01-22 14:27:53 -0800193 lea $STRIDE($bp),$bp
Adam Langleyd9e397b2015-01-22 14:27:53 -0800194 movq %xmm0,$m0 # m0=bp[0]
195
196 mov ($n0),$n0 # pull n0[0] value
197 mov ($ap),%rax
198
199 xor $i,$i # i=0
200 xor $j,$j # j=0
201
Adam Langleyd9e397b2015-01-22 14:27:53 -0800202 mov $n0,$m1
203 mulq $m0 # ap[0]*bp[0]
204 mov %rax,$lo0
205 mov ($np),%rax
206
Adam Langleyd9e397b2015-01-22 14:27:53 -0800207 imulq $lo0,$m1 # "tp[0]"*n0
208 mov %rdx,$hi0
209
Adam Langleyd9e397b2015-01-22 14:27:53 -0800210 mulq $m1 # np[0]*m1
211 add %rax,$lo0 # discarded
212 mov 8($ap),%rax
213 adc \$0,%rdx
214 mov %rdx,$hi1
215
216 lea 1($j),$j # j++
217 jmp .L1st_enter
218
219.align 16
220.L1st:
221 add %rax,$hi1
222 mov ($ap,$j,8),%rax
223 adc \$0,%rdx
224 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
225 mov $lo0,$hi0
226 adc \$0,%rdx
227 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
228 mov %rdx,$hi1
229
230.L1st_enter:
231 mulq $m0 # ap[j]*bp[0]
232 add %rax,$hi0
233 mov ($np,$j,8),%rax
234 adc \$0,%rdx
235 lea 1($j),$j # j++
236 mov %rdx,$lo0
237
238 mulq $m1 # np[j]*m1
239 cmp $num,$j
David Benjamin4969cc92016-04-22 15:02:23 -0400240 jne .L1st # note that upon exit $j==$num, so
241 # they can be used interchangeably
Adam Langleyd9e397b2015-01-22 14:27:53 -0800242
243 add %rax,$hi1
Adam Langleyd9e397b2015-01-22 14:27:53 -0800244 adc \$0,%rdx
245 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
246 adc \$0,%rdx
David Benjamin4969cc92016-04-22 15:02:23 -0400247 mov $hi1,-16(%rsp,$num,8) # tp[num-1]
Adam Langleyd9e397b2015-01-22 14:27:53 -0800248 mov %rdx,$hi1
249 mov $lo0,$hi0
250
251 xor %rdx,%rdx
252 add $hi0,$hi1
253 adc \$0,%rdx
254 mov $hi1,-8(%rsp,$num,8)
255 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
256
257 lea 1($i),$i # i++
258 jmp .Louter
259.align 16
260.Louter:
David Benjamin4969cc92016-04-22 15:02:23 -0400261 lea 24+128(%rsp,$num,8),%rdx # where 256-byte mask is (+size optimization)
262 and \$-16,%rdx
263 pxor %xmm4,%xmm4
264 pxor %xmm5,%xmm5
265___
266for($k=0;$k<$STRIDE/16;$k+=4) {
267$code.=<<___;
268 movdqa `16*($k+0)-128`($bp),%xmm0
269 movdqa `16*($k+1)-128`($bp),%xmm1
270 movdqa `16*($k+2)-128`($bp),%xmm2
271 movdqa `16*($k+3)-128`($bp),%xmm3
272 pand `16*($k+0)-128`(%rdx),%xmm0
273 pand `16*($k+1)-128`(%rdx),%xmm1
274 por %xmm0,%xmm4
275 pand `16*($k+2)-128`(%rdx),%xmm2
276 por %xmm1,%xmm5
277 pand `16*($k+3)-128`(%rdx),%xmm3
278 por %xmm2,%xmm4
279 por %xmm3,%xmm5
280___
281}
282$code.=<<___;
283 por %xmm5,%xmm4
284 pshufd \$0x4e,%xmm4,%xmm0
285 por %xmm4,%xmm0
286 lea $STRIDE($bp),$bp
287
288 mov ($ap),%rax # ap[0]
289 movq %xmm0,$m0 # m0=bp[i]
290
Adam Langleyd9e397b2015-01-22 14:27:53 -0800291 xor $j,$j # j=0
292 mov $n0,$m1
293 mov (%rsp),$lo0
294
Adam Langleyd9e397b2015-01-22 14:27:53 -0800295 mulq $m0 # ap[0]*bp[i]
296 add %rax,$lo0 # ap[0]*bp[i]+tp[0]
297 mov ($np),%rax
298 adc \$0,%rdx
299
Adam Langleyd9e397b2015-01-22 14:27:53 -0800300 imulq $lo0,$m1 # tp[0]*n0
301 mov %rdx,$hi0
302
Adam Langleyd9e397b2015-01-22 14:27:53 -0800303 mulq $m1 # np[0]*m1
304 add %rax,$lo0 # discarded
305 mov 8($ap),%rax
306 adc \$0,%rdx
307 mov 8(%rsp),$lo0 # tp[1]
308 mov %rdx,$hi1
309
310 lea 1($j),$j # j++
311 jmp .Linner_enter
312
313.align 16
314.Linner:
315 add %rax,$hi1
316 mov ($ap,$j,8),%rax
317 adc \$0,%rdx
318 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
319 mov (%rsp,$j,8),$lo0
320 adc \$0,%rdx
321 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
322 mov %rdx,$hi1
323
324.Linner_enter:
325 mulq $m0 # ap[j]*bp[i]
326 add %rax,$hi0
327 mov ($np,$j,8),%rax
328 adc \$0,%rdx
329 add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
330 mov %rdx,$hi0
331 adc \$0,$hi0
332 lea 1($j),$j # j++
333
334 mulq $m1 # np[j]*m1
335 cmp $num,$j
David Benjamin4969cc92016-04-22 15:02:23 -0400336 jne .Linner # note that upon exit $j==$num, so
337 # they can be used interchangeably
Adam Langleyd9e397b2015-01-22 14:27:53 -0800338 add %rax,$hi1
Adam Langleyd9e397b2015-01-22 14:27:53 -0800339 adc \$0,%rdx
340 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
David Benjamin4969cc92016-04-22 15:02:23 -0400341 mov (%rsp,$num,8),$lo0
Adam Langleyd9e397b2015-01-22 14:27:53 -0800342 adc \$0,%rdx
David Benjamin4969cc92016-04-22 15:02:23 -0400343 mov $hi1,-16(%rsp,$num,8) # tp[num-1]
Adam Langleyd9e397b2015-01-22 14:27:53 -0800344 mov %rdx,$hi1
345
346 xor %rdx,%rdx
347 add $hi0,$hi1
348 adc \$0,%rdx
349 add $lo0,$hi1 # pull upmost overflow bit
350 adc \$0,%rdx
351 mov $hi1,-8(%rsp,$num,8)
352 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
353
354 lea 1($i),$i # i++
355 cmp $num,$i
356 jb .Louter
357
358 xor $i,$i # i=0 and clear CF!
359 mov (%rsp),%rax # tp[0]
360 lea (%rsp),$ap # borrow ap for tp
361 mov $num,$j # j=num
362 jmp .Lsub
363.align 16
364.Lsub: sbb ($np,$i,8),%rax
365 mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
366 mov 8($ap,$i,8),%rax # tp[i+1]
367 lea 1($i),$i # i++
368 dec $j # doesnn't affect CF!
369 jnz .Lsub
370
371 sbb \$0,%rax # handle upmost overflow bit
372 xor $i,$i
373 mov $num,$j # j=num
374.align 16
375.Lcopy: # copy or in-place refresh
376 mov (%rsp,$i,8),$ap
377 mov ($rp,$i,8),$np
378 xor $np,$ap # conditional select:
379 and %rax,$ap # ((ap ^ np) & %rax) ^ np
380 xor $np,$ap # ap = borrow?tp:rp
381 mov $i,(%rsp,$i,8) # zap temporary vector
382 mov $ap,($rp,$i,8) # rp[i]=tp[i]
383 lea 1($i),$i
384 sub \$1,$j
385 jnz .Lcopy
386
387 mov 8(%rsp,$num,8),%rsi # restore %rsp
388 mov \$1,%rax
David Benjamin4969cc92016-04-22 15:02:23 -0400389
Adam Langleyd9e397b2015-01-22 14:27:53 -0800390 mov -48(%rsi),%r15
391 mov -40(%rsi),%r14
392 mov -32(%rsi),%r13
393 mov -24(%rsi),%r12
394 mov -16(%rsi),%rbp
395 mov -8(%rsi),%rbx
396 lea (%rsi),%rsp
397.Lmul_epilogue:
398 ret
399.size bn_mul_mont_gather5,.-bn_mul_mont_gather5
400___
401{{{
402my @A=("%r10","%r11");
403my @N=("%r13","%rdi");
404$code.=<<___;
405.type bn_mul4x_mont_gather5,\@function,6
406.align 32
407bn_mul4x_mont_gather5:
408.Lmul4x_enter:
409___
410$code.=<<___ if ($addx);
David Benjamin4969cc92016-04-22 15:02:23 -0400411 and \$0x80108,%r11d
412 cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1
Adam Langleyd9e397b2015-01-22 14:27:53 -0800413 je .Lmulx4x_enter
414___
415$code.=<<___;
416 .byte 0x67
417 mov %rsp,%rax
418 push %rbx
419 push %rbp
420 push %r12
421 push %r13
422 push %r14
423 push %r15
David Benjamin4969cc92016-04-22 15:02:23 -0400424
Adam Langleyd9e397b2015-01-22 14:27:53 -0800425 .byte 0x67
David Benjamin4969cc92016-04-22 15:02:23 -0400426 shl \$3,${num}d # convert $num to bytes
427 lea ($num,$num,2),%r10 # 3*$num in bytes
Adam Langleyd9e397b2015-01-22 14:27:53 -0800428 neg $num # -$num
429
430 ##############################################################
David Benjamin4969cc92016-04-22 15:02:23 -0400431 # Ensure that stack frame doesn't alias with $rptr+3*$num
432 # modulo 4096, which covers ret[num], am[num] and n[num]
433 # (see bn_exp.c). This is done to allow memory disambiguation
434 # logic do its magic. [Extra [num] is allocated in order
435 # to align with bn_power5's frame, which is cleansed after
436 # completing exponentiation. Extra 256 bytes is for power mask
437 # calculated from 7th argument, the index.]
Adam Langleyd9e397b2015-01-22 14:27:53 -0800438 #
David Benjamin4969cc92016-04-22 15:02:23 -0400439 lea -320(%rsp,$num,2),%r11
440 sub $rp,%r11
Adam Langleyd9e397b2015-01-22 14:27:53 -0800441 and \$4095,%r11
442 cmp %r11,%r10
443 jb .Lmul4xsp_alt
David Benjamin4969cc92016-04-22 15:02:23 -0400444 sub %r11,%rsp # align with $rp
445 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -0800446 jmp .Lmul4xsp_done
447
448.align 32
449.Lmul4xsp_alt:
David Benjamin4969cc92016-04-22 15:02:23 -0400450 lea 4096-320(,$num,2),%r10
451 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -0800452 sub %r10,%r11
453 mov \$0,%r10
454 cmovc %r10,%r11
455 sub %r11,%rsp
456.Lmul4xsp_done:
457 and \$-64,%rsp
458 neg $num
459
460 mov %rax,40(%rsp)
461.Lmul4x_body:
462
463 call mul4x_internal
464
465 mov 40(%rsp),%rsi # restore %rsp
466 mov \$1,%rax
David Benjamin4969cc92016-04-22 15:02:23 -0400467
Adam Langleyd9e397b2015-01-22 14:27:53 -0800468 mov -48(%rsi),%r15
469 mov -40(%rsi),%r14
470 mov -32(%rsi),%r13
471 mov -24(%rsi),%r12
472 mov -16(%rsi),%rbp
473 mov -8(%rsi),%rbx
474 lea (%rsi),%rsp
475.Lmul4x_epilogue:
476 ret
477.size bn_mul4x_mont_gather5,.-bn_mul4x_mont_gather5
478
479.type mul4x_internal,\@abi-omnipotent
480.align 32
481mul4x_internal:
David Benjamin4969cc92016-04-22 15:02:23 -0400482 shl \$5,$num # $num was in bytes
483 movd `($win64?56:8)`(%rax),%xmm5 # load 7th argument, index
484 lea .Linc(%rip),%rax
485 lea 128(%rdx,$num),%r13 # end of powers table (+size optimization)
Adam Langleyd9e397b2015-01-22 14:27:53 -0800486 shr \$5,$num # restore $num
487___
488 $bp="%r12";
489 $STRIDE=2**5*8; # 5 is "window size"
490 $N=$STRIDE/4; # should match cache line size
491 $tp=$i;
492$code.=<<___;
David Benjamin4969cc92016-04-22 15:02:23 -0400493 movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
494 movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
495 lea 88-112(%rsp,$num),%r10 # place the mask after tp[num+1] (+ICache optimization)
496 lea 128(%rdx),$bp # size optimization
Adam Langleyd9e397b2015-01-22 14:27:53 -0800497
David Benjamin4969cc92016-04-22 15:02:23 -0400498 pshufd \$0,%xmm5,%xmm5 # broadcast index
499 movdqa %xmm1,%xmm4
500 .byte 0x67,0x67
501 movdqa %xmm1,%xmm2
502___
503########################################################################
504# calculate mask by comparing 0..31 to index and save result to stack
505#
506$code.=<<___;
507 paddd %xmm0,%xmm1
508 pcmpeqd %xmm5,%xmm0 # compare to 1,0
Adam Langleyd9e397b2015-01-22 14:27:53 -0800509 .byte 0x67
David Benjamin4969cc92016-04-22 15:02:23 -0400510 movdqa %xmm4,%xmm3
511___
512for($i=0;$i<$STRIDE/16-4;$i+=4) {
513$code.=<<___;
514 paddd %xmm1,%xmm2
515 pcmpeqd %xmm5,%xmm1 # compare to 3,2
516 movdqa %xmm0,`16*($i+0)+112`(%r10)
517 movdqa %xmm4,%xmm0
518
519 paddd %xmm2,%xmm3
520 pcmpeqd %xmm5,%xmm2 # compare to 5,4
521 movdqa %xmm1,`16*($i+1)+112`(%r10)
522 movdqa %xmm4,%xmm1
523
524 paddd %xmm3,%xmm0
525 pcmpeqd %xmm5,%xmm3 # compare to 7,6
526 movdqa %xmm2,`16*($i+2)+112`(%r10)
527 movdqa %xmm4,%xmm2
528
529 paddd %xmm0,%xmm1
530 pcmpeqd %xmm5,%xmm0
531 movdqa %xmm3,`16*($i+3)+112`(%r10)
532 movdqa %xmm4,%xmm3
533___
534}
535$code.=<<___; # last iteration can be optimized
536 paddd %xmm1,%xmm2
537 pcmpeqd %xmm5,%xmm1
538 movdqa %xmm0,`16*($i+0)+112`(%r10)
539
540 paddd %xmm2,%xmm3
Adam Langleyd9e397b2015-01-22 14:27:53 -0800541 .byte 0x67
David Benjamin4969cc92016-04-22 15:02:23 -0400542 pcmpeqd %xmm5,%xmm2
543 movdqa %xmm1,`16*($i+1)+112`(%r10)
544
545 pcmpeqd %xmm5,%xmm3
546 movdqa %xmm2,`16*($i+2)+112`(%r10)
547 pand `16*($i+0)-128`($bp),%xmm0 # while it's still in register
548
549 pand `16*($i+1)-128`($bp),%xmm1
550 pand `16*($i+2)-128`($bp),%xmm2
551 movdqa %xmm3,`16*($i+3)+112`(%r10)
552 pand `16*($i+3)-128`($bp),%xmm3
Adam Langleyd9e397b2015-01-22 14:27:53 -0800553 por %xmm2,%xmm0
David Benjamin4969cc92016-04-22 15:02:23 -0400554 por %xmm3,%xmm1
555___
556for($i=0;$i<$STRIDE/16-4;$i+=4) {
557$code.=<<___;
558 movdqa `16*($i+0)-128`($bp),%xmm4
559 movdqa `16*($i+1)-128`($bp),%xmm5
560 movdqa `16*($i+2)-128`($bp),%xmm2
561 pand `16*($i+0)+112`(%r10),%xmm4
562 movdqa `16*($i+3)-128`($bp),%xmm3
563 pand `16*($i+1)+112`(%r10),%xmm5
564 por %xmm4,%xmm0
565 pand `16*($i+2)+112`(%r10),%xmm2
566 por %xmm5,%xmm1
567 pand `16*($i+3)+112`(%r10),%xmm3
568 por %xmm2,%xmm0
569 por %xmm3,%xmm1
570___
571}
572$code.=<<___;
573 por %xmm1,%xmm0
574 pshufd \$0x4e,%xmm0,%xmm1
575 por %xmm1,%xmm0
576 lea $STRIDE($bp),$bp
Adam Langleyd9e397b2015-01-22 14:27:53 -0800577 movq %xmm0,$m0 # m0=bp[0]
David Benjamin4969cc92016-04-22 15:02:23 -0400578
Adam Langleyd9e397b2015-01-22 14:27:53 -0800579 mov %r13,16+8(%rsp) # save end of b[num]
580 mov $rp, 56+8(%rsp) # save $rp
581
582 mov ($n0),$n0 # pull n0[0] value
583 mov ($ap),%rax
584 lea ($ap,$num),$ap # end of a[num]
585 neg $num
586
587 mov $n0,$m1
588 mulq $m0 # ap[0]*bp[0]
589 mov %rax,$A[0]
590 mov ($np),%rax
591
Adam Langleyd9e397b2015-01-22 14:27:53 -0800592 imulq $A[0],$m1 # "tp[0]"*n0
David Benjamin4969cc92016-04-22 15:02:23 -0400593 lea 64+8(%rsp),$tp
Adam Langleyd9e397b2015-01-22 14:27:53 -0800594 mov %rdx,$A[1]
595
Adam Langleyd9e397b2015-01-22 14:27:53 -0800596 mulq $m1 # np[0]*m1
597 add %rax,$A[0] # discarded
598 mov 8($ap,$num),%rax
599 adc \$0,%rdx
600 mov %rdx,$N[1]
601
602 mulq $m0
603 add %rax,$A[1]
David Benjamin4969cc92016-04-22 15:02:23 -0400604 mov 8*1($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800605 adc \$0,%rdx
606 mov %rdx,$A[0]
607
608 mulq $m1
609 add %rax,$N[1]
610 mov 16($ap,$num),%rax
611 adc \$0,%rdx
612 add $A[1],$N[1]
613 lea 4*8($num),$j # j=4
David Benjamin4969cc92016-04-22 15:02:23 -0400614 lea 8*4($np),$np
Adam Langleyd9e397b2015-01-22 14:27:53 -0800615 adc \$0,%rdx
616 mov $N[1],($tp)
617 mov %rdx,$N[0]
618 jmp .L1st4x
619
620.align 32
621.L1st4x:
622 mulq $m0 # ap[j]*bp[0]
623 add %rax,$A[0]
David Benjamin4969cc92016-04-22 15:02:23 -0400624 mov -8*2($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800625 lea 32($tp),$tp
626 adc \$0,%rdx
627 mov %rdx,$A[1]
628
629 mulq $m1 # np[j]*m1
630 add %rax,$N[0]
631 mov -8($ap,$j),%rax
632 adc \$0,%rdx
633 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
634 adc \$0,%rdx
635 mov $N[0],-24($tp) # tp[j-1]
636 mov %rdx,$N[1]
637
638 mulq $m0 # ap[j]*bp[0]
639 add %rax,$A[1]
David Benjamin4969cc92016-04-22 15:02:23 -0400640 mov -8*1($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800641 adc \$0,%rdx
642 mov %rdx,$A[0]
643
644 mulq $m1 # np[j]*m1
645 add %rax,$N[1]
646 mov ($ap,$j),%rax
647 adc \$0,%rdx
648 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
649 adc \$0,%rdx
650 mov $N[1],-16($tp) # tp[j-1]
651 mov %rdx,$N[0]
652
653 mulq $m0 # ap[j]*bp[0]
654 add %rax,$A[0]
David Benjamin4969cc92016-04-22 15:02:23 -0400655 mov 8*0($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800656 adc \$0,%rdx
657 mov %rdx,$A[1]
658
659 mulq $m1 # np[j]*m1
660 add %rax,$N[0]
661 mov 8($ap,$j),%rax
662 adc \$0,%rdx
663 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
664 adc \$0,%rdx
665 mov $N[0],-8($tp) # tp[j-1]
666 mov %rdx,$N[1]
667
668 mulq $m0 # ap[j]*bp[0]
669 add %rax,$A[1]
David Benjamin4969cc92016-04-22 15:02:23 -0400670 mov 8*1($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800671 adc \$0,%rdx
672 mov %rdx,$A[0]
673
674 mulq $m1 # np[j]*m1
675 add %rax,$N[1]
676 mov 16($ap,$j),%rax
677 adc \$0,%rdx
678 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
David Benjamin4969cc92016-04-22 15:02:23 -0400679 lea 8*4($np),$np
Adam Langleyd9e397b2015-01-22 14:27:53 -0800680 adc \$0,%rdx
681 mov $N[1],($tp) # tp[j-1]
682 mov %rdx,$N[0]
683
684 add \$32,$j # j+=4
685 jnz .L1st4x
686
687 mulq $m0 # ap[j]*bp[0]
688 add %rax,$A[0]
David Benjamin4969cc92016-04-22 15:02:23 -0400689 mov -8*2($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800690 lea 32($tp),$tp
691 adc \$0,%rdx
692 mov %rdx,$A[1]
693
694 mulq $m1 # np[j]*m1
695 add %rax,$N[0]
696 mov -8($ap),%rax
697 adc \$0,%rdx
698 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
699 adc \$0,%rdx
700 mov $N[0],-24($tp) # tp[j-1]
701 mov %rdx,$N[1]
702
703 mulq $m0 # ap[j]*bp[0]
704 add %rax,$A[1]
David Benjamin4969cc92016-04-22 15:02:23 -0400705 mov -8*1($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800706 adc \$0,%rdx
707 mov %rdx,$A[0]
708
709 mulq $m1 # np[j]*m1
710 add %rax,$N[1]
711 mov ($ap,$num),%rax # ap[0]
712 adc \$0,%rdx
713 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
714 adc \$0,%rdx
715 mov $N[1],-16($tp) # tp[j-1]
716 mov %rdx,$N[0]
717
David Benjamin4969cc92016-04-22 15:02:23 -0400718 lea ($np,$num),$np # rewind $np
Adam Langleyd9e397b2015-01-22 14:27:53 -0800719
720 xor $N[1],$N[1]
721 add $A[0],$N[0]
722 adc \$0,$N[1]
723 mov $N[0],-8($tp)
724
725 jmp .Louter4x
726
727.align 32
728.Louter4x:
David Benjamin4969cc92016-04-22 15:02:23 -0400729 lea 16+128($tp),%rdx # where 256-byte mask is (+size optimization)
730 pxor %xmm4,%xmm4
731 pxor %xmm5,%xmm5
732___
733for($i=0;$i<$STRIDE/16;$i+=4) {
734$code.=<<___;
735 movdqa `16*($i+0)-128`($bp),%xmm0
736 movdqa `16*($i+1)-128`($bp),%xmm1
737 movdqa `16*($i+2)-128`($bp),%xmm2
738 movdqa `16*($i+3)-128`($bp),%xmm3
739 pand `16*($i+0)-128`(%rdx),%xmm0
740 pand `16*($i+1)-128`(%rdx),%xmm1
741 por %xmm0,%xmm4
742 pand `16*($i+2)-128`(%rdx),%xmm2
743 por %xmm1,%xmm5
744 pand `16*($i+3)-128`(%rdx),%xmm3
745 por %xmm2,%xmm4
746 por %xmm3,%xmm5
747___
748}
749$code.=<<___;
750 por %xmm5,%xmm4
751 pshufd \$0x4e,%xmm4,%xmm0
752 por %xmm4,%xmm0
753 lea $STRIDE($bp),$bp
754 movq %xmm0,$m0 # m0=bp[i]
755
Adam Langleyd9e397b2015-01-22 14:27:53 -0800756 mov ($tp,$num),$A[0]
757 mov $n0,$m1
758 mulq $m0 # ap[0]*bp[i]
759 add %rax,$A[0] # ap[0]*bp[i]+tp[0]
760 mov ($np),%rax
761 adc \$0,%rdx
762
Adam Langleyd9e397b2015-01-22 14:27:53 -0800763 imulq $A[0],$m1 # tp[0]*n0
Adam Langleyd9e397b2015-01-22 14:27:53 -0800764 mov %rdx,$A[1]
765 mov $N[1],($tp) # store upmost overflow bit
766
Adam Langleyd9e397b2015-01-22 14:27:53 -0800767 lea ($tp,$num),$tp # rewind $tp
Adam Langleyd9e397b2015-01-22 14:27:53 -0800768
769 mulq $m1 # np[0]*m1
770 add %rax,$A[0] # "$N[0]", discarded
771 mov 8($ap,$num),%rax
772 adc \$0,%rdx
773 mov %rdx,$N[1]
774
775 mulq $m0 # ap[j]*bp[i]
776 add %rax,$A[1]
David Benjamin4969cc92016-04-22 15:02:23 -0400777 mov 8*1($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800778 adc \$0,%rdx
779 add 8($tp),$A[1] # +tp[1]
780 adc \$0,%rdx
781 mov %rdx,$A[0]
782
783 mulq $m1 # np[j]*m1
784 add %rax,$N[1]
785 mov 16($ap,$num),%rax
786 adc \$0,%rdx
787 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
788 lea 4*8($num),$j # j=4
David Benjamin4969cc92016-04-22 15:02:23 -0400789 lea 8*4($np),$np
Adam Langleyd9e397b2015-01-22 14:27:53 -0800790 adc \$0,%rdx
791 mov %rdx,$N[0]
792 jmp .Linner4x
793
794.align 32
795.Linner4x:
796 mulq $m0 # ap[j]*bp[i]
797 add %rax,$A[0]
David Benjamin4969cc92016-04-22 15:02:23 -0400798 mov -8*2($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800799 adc \$0,%rdx
800 add 16($tp),$A[0] # ap[j]*bp[i]+tp[j]
801 lea 32($tp),$tp
802 adc \$0,%rdx
803 mov %rdx,$A[1]
804
805 mulq $m1 # np[j]*m1
806 add %rax,$N[0]
807 mov -8($ap,$j),%rax
808 adc \$0,%rdx
809 add $A[0],$N[0]
810 adc \$0,%rdx
811 mov $N[1],-32($tp) # tp[j-1]
812 mov %rdx,$N[1]
813
814 mulq $m0 # ap[j]*bp[i]
815 add %rax,$A[1]
David Benjamin4969cc92016-04-22 15:02:23 -0400816 mov -8*1($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800817 adc \$0,%rdx
818 add -8($tp),$A[1]
819 adc \$0,%rdx
820 mov %rdx,$A[0]
821
822 mulq $m1 # np[j]*m1
823 add %rax,$N[1]
824 mov ($ap,$j),%rax
825 adc \$0,%rdx
826 add $A[1],$N[1]
827 adc \$0,%rdx
828 mov $N[0],-24($tp) # tp[j-1]
829 mov %rdx,$N[0]
830
831 mulq $m0 # ap[j]*bp[i]
832 add %rax,$A[0]
David Benjamin4969cc92016-04-22 15:02:23 -0400833 mov 8*0($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800834 adc \$0,%rdx
835 add ($tp),$A[0] # ap[j]*bp[i]+tp[j]
836 adc \$0,%rdx
837 mov %rdx,$A[1]
838
839 mulq $m1 # np[j]*m1
840 add %rax,$N[0]
841 mov 8($ap,$j),%rax
842 adc \$0,%rdx
843 add $A[0],$N[0]
844 adc \$0,%rdx
845 mov $N[1],-16($tp) # tp[j-1]
846 mov %rdx,$N[1]
847
848 mulq $m0 # ap[j]*bp[i]
849 add %rax,$A[1]
David Benjamin4969cc92016-04-22 15:02:23 -0400850 mov 8*1($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800851 adc \$0,%rdx
852 add 8($tp),$A[1]
853 adc \$0,%rdx
854 mov %rdx,$A[0]
855
856 mulq $m1 # np[j]*m1
857 add %rax,$N[1]
858 mov 16($ap,$j),%rax
859 adc \$0,%rdx
860 add $A[1],$N[1]
David Benjamin4969cc92016-04-22 15:02:23 -0400861 lea 8*4($np),$np
Adam Langleyd9e397b2015-01-22 14:27:53 -0800862 adc \$0,%rdx
863 mov $N[0],-8($tp) # tp[j-1]
864 mov %rdx,$N[0]
865
866 add \$32,$j # j+=4
867 jnz .Linner4x
868
869 mulq $m0 # ap[j]*bp[i]
870 add %rax,$A[0]
David Benjamin4969cc92016-04-22 15:02:23 -0400871 mov -8*2($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800872 adc \$0,%rdx
873 add 16($tp),$A[0] # ap[j]*bp[i]+tp[j]
874 lea 32($tp),$tp
875 adc \$0,%rdx
876 mov %rdx,$A[1]
877
878 mulq $m1 # np[j]*m1
879 add %rax,$N[0]
880 mov -8($ap),%rax
881 adc \$0,%rdx
882 add $A[0],$N[0]
883 adc \$0,%rdx
884 mov $N[1],-32($tp) # tp[j-1]
885 mov %rdx,$N[1]
886
887 mulq $m0 # ap[j]*bp[i]
888 add %rax,$A[1]
889 mov $m1,%rax
David Benjamin4969cc92016-04-22 15:02:23 -0400890 mov -8*1($np),$m1
Adam Langleyd9e397b2015-01-22 14:27:53 -0800891 adc \$0,%rdx
892 add -8($tp),$A[1]
893 adc \$0,%rdx
894 mov %rdx,$A[0]
895
896 mulq $m1 # np[j]*m1
897 add %rax,$N[1]
898 mov ($ap,$num),%rax # ap[0]
899 adc \$0,%rdx
900 add $A[1],$N[1]
901 adc \$0,%rdx
902 mov $N[0],-24($tp) # tp[j-1]
903 mov %rdx,$N[0]
904
Adam Langleyd9e397b2015-01-22 14:27:53 -0800905 mov $N[1],-16($tp) # tp[j-1]
David Benjamin4969cc92016-04-22 15:02:23 -0400906 lea ($np,$num),$np # rewind $np
Adam Langleyd9e397b2015-01-22 14:27:53 -0800907
908 xor $N[1],$N[1]
909 add $A[0],$N[0]
910 adc \$0,$N[1]
911 add ($tp),$N[0] # pull upmost overflow bit
912 adc \$0,$N[1] # upmost overflow bit
913 mov $N[0],-8($tp)
914
915 cmp 16+8(%rsp),$bp
916 jb .Louter4x
917___
918if (1) {
919$code.=<<___;
David Benjamin4969cc92016-04-22 15:02:23 -0400920 xor %rax,%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800921 sub $N[0],$m1 # compare top-most words
922 adc $j,$j # $j is zero
923 or $j,$N[1]
David Benjamin4969cc92016-04-22 15:02:23 -0400924 sub $N[1],%rax # %rax=-$N[1]
Adam Langleyd9e397b2015-01-22 14:27:53 -0800925 lea ($tp,$num),%rbx # tptr in .sqr4x_sub
David Benjamin4969cc92016-04-22 15:02:23 -0400926 mov ($np),%r12
927 lea ($np),%rbp # nptr in .sqr4x_sub
Adam Langleyd9e397b2015-01-22 14:27:53 -0800928 mov %r9,%rcx
David Benjamin4969cc92016-04-22 15:02:23 -0400929 sar \$3+2,%rcx
Adam Langleyd9e397b2015-01-22 14:27:53 -0800930 mov 56+8(%rsp),%rdi # rptr in .sqr4x_sub
David Benjamin4969cc92016-04-22 15:02:23 -0400931 dec %r12 # so that after 'not' we get -n[0]
932 xor %r10,%r10
933 mov 8*1(%rbp),%r13
934 mov 8*2(%rbp),%r14
935 mov 8*3(%rbp),%r15
936 jmp .Lsqr4x_sub_entry
Adam Langleyd9e397b2015-01-22 14:27:53 -0800937___
938} else {
939my @ri=("%rax",$bp,$m0,$m1);
940my $rp="%rdx";
941$code.=<<___
942 xor \$1,$N[1]
943 lea ($tp,$num),$tp # rewind $tp
944 sar \$5,$num # cf=0
945 lea ($np,$N[1],8),$np
946 mov 56+8(%rsp),$rp # restore $rp
947 jmp .Lsub4x
948
949.align 32
950.Lsub4x:
951 .byte 0x66
952 mov 8*0($tp),@ri[0]
953 mov 8*1($tp),@ri[1]
954 .byte 0x66
955 sbb 16*0($np),@ri[0]
956 mov 8*2($tp),@ri[2]
957 sbb 16*1($np),@ri[1]
958 mov 3*8($tp),@ri[3]
959 lea 4*8($tp),$tp
960 sbb 16*2($np),@ri[2]
961 mov @ri[0],8*0($rp)
962 sbb 16*3($np),@ri[3]
963 lea 16*4($np),$np
964 mov @ri[1],8*1($rp)
965 mov @ri[2],8*2($rp)
966 mov @ri[3],8*3($rp)
967 lea 8*4($rp),$rp
968
969 inc $num
970 jnz .Lsub4x
971
972 ret
973___
974}
975$code.=<<___;
976.size mul4x_internal,.-mul4x_internal
977___
978}}}
979 {{{
980######################################################################
981# void bn_power5(
982my $rptr="%rdi"; # BN_ULONG *rptr,
983my $aptr="%rsi"; # const BN_ULONG *aptr,
984my $bptr="%rdx"; # const void *table,
985my $nptr="%rcx"; # const BN_ULONG *nptr,
986my $n0 ="%r8"; # const BN_ULONG *n0);
987my $num ="%r9"; # int num, has to be divisible by 8
988 # int pwr
989
990my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
991my @A0=("%r10","%r11");
992my @A1=("%r12","%r13");
993my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
994
995$code.=<<___;
996.globl bn_power5
997.type bn_power5,\@function,6
998.align 32
999bn_power5:
1000___
1001$code.=<<___ if ($addx);
1002 mov OPENSSL_ia32cap_P+8(%rip),%r11d
David Benjamin4969cc92016-04-22 15:02:23 -04001003 and \$0x80108,%r11d
1004 cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1
Adam Langleyd9e397b2015-01-22 14:27:53 -08001005 je .Lpowerx5_enter
1006___
1007$code.=<<___;
1008 mov %rsp,%rax
1009 push %rbx
1010 push %rbp
1011 push %r12
1012 push %r13
1013 push %r14
1014 push %r15
David Benjamin4969cc92016-04-22 15:02:23 -04001015
Adam Langleyd9e397b2015-01-22 14:27:53 -08001016 shl \$3,${num}d # convert $num to bytes
David Benjamin4969cc92016-04-22 15:02:23 -04001017 lea ($num,$num,2),%r10d # 3*$num
Adam Langleyd9e397b2015-01-22 14:27:53 -08001018 neg $num
1019 mov ($n0),$n0 # *n0
1020
1021 ##############################################################
David Benjamin4969cc92016-04-22 15:02:23 -04001022 # Ensure that stack frame doesn't alias with $rptr+3*$num
1023 # modulo 4096, which covers ret[num], am[num] and n[num]
1024 # (see bn_exp.c). This is done to allow memory disambiguation
1025 # logic do its magic. [Extra 256 bytes is for power mask
1026 # calculated from 7th argument, the index.]
Adam Langleyd9e397b2015-01-22 14:27:53 -08001027 #
David Benjamin4969cc92016-04-22 15:02:23 -04001028 lea -320(%rsp,$num,2),%r11
1029 sub $rptr,%r11
Adam Langleyd9e397b2015-01-22 14:27:53 -08001030 and \$4095,%r11
1031 cmp %r11,%r10
1032 jb .Lpwr_sp_alt
1033 sub %r11,%rsp # align with $aptr
David Benjamin4969cc92016-04-22 15:02:23 -04001034 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -08001035 jmp .Lpwr_sp_done
1036
1037.align 32
1038.Lpwr_sp_alt:
David Benjamin4969cc92016-04-22 15:02:23 -04001039 lea 4096-320(,$num,2),%r10
1040 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -08001041 sub %r10,%r11
1042 mov \$0,%r10
1043 cmovc %r10,%r11
1044 sub %r11,%rsp
1045.Lpwr_sp_done:
1046 and \$-64,%rsp
1047 mov $num,%r10
1048 neg $num
1049
1050 ##############################################################
1051 # Stack layout
1052 #
1053 # +0 saved $num, used in reduction section
1054 # +8 &t[2*$num], used in reduction section
1055 # +32 saved *n0
1056 # +40 saved %rsp
1057 # +48 t[2*$num]
1058 #
1059 mov $n0, 32(%rsp)
1060 mov %rax, 40(%rsp) # save original %rsp
1061.Lpower5_body:
David Benjamin4969cc92016-04-22 15:02:23 -04001062 movq $rptr,%xmm1 # save $rptr, used in sqr8x
Adam Langleyd9e397b2015-01-22 14:27:53 -08001063 movq $nptr,%xmm2 # save $nptr
David Benjamin4969cc92016-04-22 15:02:23 -04001064 movq %r10, %xmm3 # -$num, used in sqr8x
Adam Langleyd9e397b2015-01-22 14:27:53 -08001065 movq $bptr,%xmm4
1066
1067 call __bn_sqr8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04001068 call __bn_post4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08001069 call __bn_sqr8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04001070 call __bn_post4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08001071 call __bn_sqr8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04001072 call __bn_post4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08001073 call __bn_sqr8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04001074 call __bn_post4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08001075 call __bn_sqr8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04001076 call __bn_post4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08001077
1078 movq %xmm2,$nptr
1079 movq %xmm4,$bptr
1080 mov $aptr,$rptr
1081 mov 40(%rsp),%rax
1082 lea 32(%rsp),$n0
1083
1084 call mul4x_internal
1085
1086 mov 40(%rsp),%rsi # restore %rsp
1087 mov \$1,%rax
1088 mov -48(%rsi),%r15
1089 mov -40(%rsi),%r14
1090 mov -32(%rsi),%r13
1091 mov -24(%rsi),%r12
1092 mov -16(%rsi),%rbp
1093 mov -8(%rsi),%rbx
1094 lea (%rsi),%rsp
1095.Lpower5_epilogue:
1096 ret
1097.size bn_power5,.-bn_power5
1098
1099.globl bn_sqr8x_internal
1100.hidden bn_sqr8x_internal
1101.type bn_sqr8x_internal,\@abi-omnipotent
1102.align 32
1103bn_sqr8x_internal:
1104__bn_sqr8x_internal:
1105 ##############################################################
1106 # Squaring part:
1107 #
1108 # a) multiply-n-add everything but a[i]*a[i];
1109 # b) shift result of a) by 1 to the left and accumulate
1110 # a[i]*a[i] products;
1111 #
1112 ##############################################################
1113 # a[1]a[0]
1114 # a[2]a[0]
1115 # a[3]a[0]
1116 # a[2]a[1]
1117 # a[4]a[0]
1118 # a[3]a[1]
1119 # a[5]a[0]
1120 # a[4]a[1]
1121 # a[3]a[2]
1122 # a[6]a[0]
1123 # a[5]a[1]
1124 # a[4]a[2]
1125 # a[7]a[0]
1126 # a[6]a[1]
1127 # a[5]a[2]
1128 # a[4]a[3]
1129 # a[7]a[1]
1130 # a[6]a[2]
1131 # a[5]a[3]
1132 # a[7]a[2]
1133 # a[6]a[3]
1134 # a[5]a[4]
1135 # a[7]a[3]
1136 # a[6]a[4]
1137 # a[7]a[4]
1138 # a[6]a[5]
1139 # a[7]a[5]
1140 # a[7]a[6]
1141 # a[1]a[0]
1142 # a[2]a[0]
1143 # a[3]a[0]
1144 # a[4]a[0]
1145 # a[5]a[0]
1146 # a[6]a[0]
1147 # a[7]a[0]
1148 # a[2]a[1]
1149 # a[3]a[1]
1150 # a[4]a[1]
1151 # a[5]a[1]
1152 # a[6]a[1]
1153 # a[7]a[1]
1154 # a[3]a[2]
1155 # a[4]a[2]
1156 # a[5]a[2]
1157 # a[6]a[2]
1158 # a[7]a[2]
1159 # a[4]a[3]
1160 # a[5]a[3]
1161 # a[6]a[3]
1162 # a[7]a[3]
1163 # a[5]a[4]
1164 # a[6]a[4]
1165 # a[7]a[4]
1166 # a[6]a[5]
1167 # a[7]a[5]
1168 # a[7]a[6]
1169 # a[0]a[0]
1170 # a[1]a[1]
1171 # a[2]a[2]
1172 # a[3]a[3]
1173 # a[4]a[4]
1174 # a[5]a[5]
1175 # a[6]a[6]
1176 # a[7]a[7]
1177
1178 lea 32(%r10),$i # $i=-($num-32)
1179 lea ($aptr,$num),$aptr # end of a[] buffer, ($aptr,$i)=&ap[2]
1180
1181 mov $num,$j # $j=$num
1182
1183 # comments apply to $num==8 case
1184 mov -32($aptr,$i),$a0 # a[0]
1185 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1186 mov -24($aptr,$i),%rax # a[1]
1187 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1188 mov -16($aptr,$i),$ai # a[2]
1189 mov %rax,$a1
1190
1191 mul $a0 # a[1]*a[0]
1192 mov %rax,$A0[0] # a[1]*a[0]
1193 mov $ai,%rax # a[2]
1194 mov %rdx,$A0[1]
1195 mov $A0[0],-24($tptr,$i) # t[1]
1196
1197 mul $a0 # a[2]*a[0]
1198 add %rax,$A0[1]
1199 mov $ai,%rax
1200 adc \$0,%rdx
1201 mov $A0[1],-16($tptr,$i) # t[2]
1202 mov %rdx,$A0[0]
1203
1204
1205 mov -8($aptr,$i),$ai # a[3]
1206 mul $a1 # a[2]*a[1]
1207 mov %rax,$A1[0] # a[2]*a[1]+t[3]
1208 mov $ai,%rax
1209 mov %rdx,$A1[1]
1210
1211 lea ($i),$j
1212 mul $a0 # a[3]*a[0]
1213 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1214 mov $ai,%rax
1215 mov %rdx,$A0[1]
1216 adc \$0,$A0[1]
1217 add $A1[0],$A0[0]
1218 adc \$0,$A0[1]
1219 mov $A0[0],-8($tptr,$j) # t[3]
1220 jmp .Lsqr4x_1st
1221
1222.align 32
1223.Lsqr4x_1st:
1224 mov ($aptr,$j),$ai # a[4]
1225 mul $a1 # a[3]*a[1]
1226 add %rax,$A1[1] # a[3]*a[1]+t[4]
1227 mov $ai,%rax
1228 mov %rdx,$A1[0]
1229 adc \$0,$A1[0]
1230
1231 mul $a0 # a[4]*a[0]
1232 add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
1233 mov $ai,%rax # a[3]
1234 mov 8($aptr,$j),$ai # a[5]
1235 mov %rdx,$A0[0]
1236 adc \$0,$A0[0]
1237 add $A1[1],$A0[1]
1238 adc \$0,$A0[0]
1239
1240
1241 mul $a1 # a[4]*a[3]
1242 add %rax,$A1[0] # a[4]*a[3]+t[5]
1243 mov $ai,%rax
1244 mov $A0[1],($tptr,$j) # t[4]
1245 mov %rdx,$A1[1]
1246 adc \$0,$A1[1]
1247
1248 mul $a0 # a[5]*a[2]
1249 add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
1250 mov $ai,%rax
1251 mov 16($aptr,$j),$ai # a[6]
1252 mov %rdx,$A0[1]
1253 adc \$0,$A0[1]
1254 add $A1[0],$A0[0]
1255 adc \$0,$A0[1]
1256
1257 mul $a1 # a[5]*a[3]
1258 add %rax,$A1[1] # a[5]*a[3]+t[6]
1259 mov $ai,%rax
1260 mov $A0[0],8($tptr,$j) # t[5]
1261 mov %rdx,$A1[0]
1262 adc \$0,$A1[0]
1263
1264 mul $a0 # a[6]*a[2]
1265 add %rax,$A0[1] # a[6]*a[2]+a[5]*a[3]+t[6]
1266 mov $ai,%rax # a[3]
1267 mov 24($aptr,$j),$ai # a[7]
1268 mov %rdx,$A0[0]
1269 adc \$0,$A0[0]
1270 add $A1[1],$A0[1]
1271 adc \$0,$A0[0]
1272
1273
1274 mul $a1 # a[6]*a[5]
1275 add %rax,$A1[0] # a[6]*a[5]+t[7]
1276 mov $ai,%rax
1277 mov $A0[1],16($tptr,$j) # t[6]
1278 mov %rdx,$A1[1]
1279 adc \$0,$A1[1]
1280 lea 32($j),$j
1281
1282 mul $a0 # a[7]*a[4]
1283 add %rax,$A0[0] # a[7]*a[4]+a[6]*a[5]+t[6]
1284 mov $ai,%rax
1285 mov %rdx,$A0[1]
1286 adc \$0,$A0[1]
1287 add $A1[0],$A0[0]
1288 adc \$0,$A0[1]
1289 mov $A0[0],-8($tptr,$j) # t[7]
1290
1291 cmp \$0,$j
1292 jne .Lsqr4x_1st
1293
1294 mul $a1 # a[7]*a[5]
1295 add %rax,$A1[1]
1296 lea 16($i),$i
1297 adc \$0,%rdx
1298 add $A0[1],$A1[1]
1299 adc \$0,%rdx
1300
1301 mov $A1[1],($tptr) # t[8]
1302 mov %rdx,$A1[0]
1303 mov %rdx,8($tptr) # t[9]
1304 jmp .Lsqr4x_outer
1305
1306.align 32
1307.Lsqr4x_outer: # comments apply to $num==6 case
1308 mov -32($aptr,$i),$a0 # a[0]
1309 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1310 mov -24($aptr,$i),%rax # a[1]
1311 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1312 mov -16($aptr,$i),$ai # a[2]
1313 mov %rax,$a1
1314
1315 mul $a0 # a[1]*a[0]
1316 mov -24($tptr,$i),$A0[0] # t[1]
1317 add %rax,$A0[0] # a[1]*a[0]+t[1]
1318 mov $ai,%rax # a[2]
1319 adc \$0,%rdx
1320 mov $A0[0],-24($tptr,$i) # t[1]
1321 mov %rdx,$A0[1]
1322
1323 mul $a0 # a[2]*a[0]
1324 add %rax,$A0[1]
1325 mov $ai,%rax
1326 adc \$0,%rdx
1327 add -16($tptr,$i),$A0[1] # a[2]*a[0]+t[2]
1328 mov %rdx,$A0[0]
1329 adc \$0,$A0[0]
1330 mov $A0[1],-16($tptr,$i) # t[2]
1331
1332 xor $A1[0],$A1[0]
1333
1334 mov -8($aptr,$i),$ai # a[3]
1335 mul $a1 # a[2]*a[1]
1336 add %rax,$A1[0] # a[2]*a[1]+t[3]
1337 mov $ai,%rax
1338 adc \$0,%rdx
1339 add -8($tptr,$i),$A1[0]
1340 mov %rdx,$A1[1]
1341 adc \$0,$A1[1]
1342
1343 mul $a0 # a[3]*a[0]
1344 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1345 mov $ai,%rax
1346 adc \$0,%rdx
1347 add $A1[0],$A0[0]
1348 mov %rdx,$A0[1]
1349 adc \$0,$A0[1]
1350 mov $A0[0],-8($tptr,$i) # t[3]
1351
1352 lea ($i),$j
1353 jmp .Lsqr4x_inner
1354
1355.align 32
1356.Lsqr4x_inner:
1357 mov ($aptr,$j),$ai # a[4]
1358 mul $a1 # a[3]*a[1]
1359 add %rax,$A1[1] # a[3]*a[1]+t[4]
1360 mov $ai,%rax
1361 mov %rdx,$A1[0]
1362 adc \$0,$A1[0]
1363 add ($tptr,$j),$A1[1]
1364 adc \$0,$A1[0]
1365
1366 .byte 0x67
1367 mul $a0 # a[4]*a[0]
1368 add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
1369 mov $ai,%rax # a[3]
1370 mov 8($aptr,$j),$ai # a[5]
1371 mov %rdx,$A0[0]
1372 adc \$0,$A0[0]
1373 add $A1[1],$A0[1]
1374 adc \$0,$A0[0]
1375
1376 mul $a1 # a[4]*a[3]
1377 add %rax,$A1[0] # a[4]*a[3]+t[5]
1378 mov $A0[1],($tptr,$j) # t[4]
1379 mov $ai,%rax
1380 mov %rdx,$A1[1]
1381 adc \$0,$A1[1]
1382 add 8($tptr,$j),$A1[0]
1383 lea 16($j),$j # j++
1384 adc \$0,$A1[1]
1385
1386 mul $a0 # a[5]*a[2]
1387 add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
1388 mov $ai,%rax
1389 adc \$0,%rdx
1390 add $A1[0],$A0[0]
1391 mov %rdx,$A0[1]
1392 adc \$0,$A0[1]
1393 mov $A0[0],-8($tptr,$j) # t[5], "preloaded t[1]" below
1394
1395 cmp \$0,$j
1396 jne .Lsqr4x_inner
1397
1398 .byte 0x67
1399 mul $a1 # a[5]*a[3]
1400 add %rax,$A1[1]
1401 adc \$0,%rdx
1402 add $A0[1],$A1[1]
1403 adc \$0,%rdx
1404
1405 mov $A1[1],($tptr) # t[6], "preloaded t[2]" below
1406 mov %rdx,$A1[0]
1407 mov %rdx,8($tptr) # t[7], "preloaded t[3]" below
1408
1409 add \$16,$i
1410 jnz .Lsqr4x_outer
1411
1412 # comments apply to $num==4 case
1413 mov -32($aptr),$a0 # a[0]
1414 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1415 mov -24($aptr),%rax # a[1]
1416 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1417 mov -16($aptr),$ai # a[2]
1418 mov %rax,$a1
1419
1420 mul $a0 # a[1]*a[0]
1421 add %rax,$A0[0] # a[1]*a[0]+t[1], preloaded t[1]
1422 mov $ai,%rax # a[2]
1423 mov %rdx,$A0[1]
1424 adc \$0,$A0[1]
1425
1426 mul $a0 # a[2]*a[0]
1427 add %rax,$A0[1]
1428 mov $ai,%rax
1429 mov $A0[0],-24($tptr) # t[1]
1430 mov %rdx,$A0[0]
1431 adc \$0,$A0[0]
1432 add $A1[1],$A0[1] # a[2]*a[0]+t[2], preloaded t[2]
1433 mov -8($aptr),$ai # a[3]
1434 adc \$0,$A0[0]
1435
1436 mul $a1 # a[2]*a[1]
1437 add %rax,$A1[0] # a[2]*a[1]+t[3], preloaded t[3]
1438 mov $ai,%rax
1439 mov $A0[1],-16($tptr) # t[2]
1440 mov %rdx,$A1[1]
1441 adc \$0,$A1[1]
1442
1443 mul $a0 # a[3]*a[0]
1444 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1445 mov $ai,%rax
1446 mov %rdx,$A0[1]
1447 adc \$0,$A0[1]
1448 add $A1[0],$A0[0]
1449 adc \$0,$A0[1]
1450 mov $A0[0],-8($tptr) # t[3]
1451
1452 mul $a1 # a[3]*a[1]
1453 add %rax,$A1[1]
1454 mov -16($aptr),%rax # a[2]
1455 adc \$0,%rdx
1456 add $A0[1],$A1[1]
1457 adc \$0,%rdx
1458
1459 mov $A1[1],($tptr) # t[4]
1460 mov %rdx,$A1[0]
1461 mov %rdx,8($tptr) # t[5]
1462
1463 mul $ai # a[2]*a[3]
1464___
1465{
1466my ($shift,$carry)=($a0,$a1);
1467my @S=(@A1,$ai,$n0);
1468$code.=<<___;
1469 add \$16,$i
1470 xor $shift,$shift
1471 sub $num,$i # $i=16-$num
1472 xor $carry,$carry
1473
1474 add $A1[0],%rax # t[5]
1475 adc \$0,%rdx
1476 mov %rax,8($tptr) # t[5]
1477 mov %rdx,16($tptr) # t[6]
1478 mov $carry,24($tptr) # t[7]
1479
1480 mov -16($aptr,$i),%rax # a[0]
1481 lea 48+8(%rsp),$tptr
1482 xor $A0[0],$A0[0] # t[0]
1483 mov 8($tptr),$A0[1] # t[1]
1484
1485 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1486 shr \$63,$A0[0]
1487 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1488 shr \$63,$A0[1]
1489 or $A0[0],$S[1] # | t[2*i]>>63
1490 mov 16($tptr),$A0[0] # t[2*i+2] # prefetch
1491 mov $A0[1],$shift # shift=t[2*i+1]>>63
1492 mul %rax # a[i]*a[i]
1493 neg $carry # mov $carry,cf
1494 mov 24($tptr),$A0[1] # t[2*i+2+1] # prefetch
1495 adc %rax,$S[0]
1496 mov -8($aptr,$i),%rax # a[i+1] # prefetch
1497 mov $S[0],($tptr)
1498 adc %rdx,$S[1]
1499
1500 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1501 mov $S[1],8($tptr)
1502 sbb $carry,$carry # mov cf,$carry
1503 shr \$63,$A0[0]
1504 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1505 shr \$63,$A0[1]
1506 or $A0[0],$S[3] # | t[2*i]>>63
1507 mov 32($tptr),$A0[0] # t[2*i+2] # prefetch
1508 mov $A0[1],$shift # shift=t[2*i+1]>>63
1509 mul %rax # a[i]*a[i]
1510 neg $carry # mov $carry,cf
1511 mov 40($tptr),$A0[1] # t[2*i+2+1] # prefetch
1512 adc %rax,$S[2]
1513 mov 0($aptr,$i),%rax # a[i+1] # prefetch
1514 mov $S[2],16($tptr)
1515 adc %rdx,$S[3]
1516 lea 16($i),$i
1517 mov $S[3],24($tptr)
1518 sbb $carry,$carry # mov cf,$carry
1519 lea 64($tptr),$tptr
1520 jmp .Lsqr4x_shift_n_add
1521
1522.align 32
1523.Lsqr4x_shift_n_add:
1524 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1525 shr \$63,$A0[0]
1526 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1527 shr \$63,$A0[1]
1528 or $A0[0],$S[1] # | t[2*i]>>63
1529 mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
1530 mov $A0[1],$shift # shift=t[2*i+1]>>63
1531 mul %rax # a[i]*a[i]
1532 neg $carry # mov $carry,cf
1533 mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1534 adc %rax,$S[0]
1535 mov -8($aptr,$i),%rax # a[i+1] # prefetch
1536 mov $S[0],-32($tptr)
1537 adc %rdx,$S[1]
1538
1539 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1540 mov $S[1],-24($tptr)
1541 sbb $carry,$carry # mov cf,$carry
1542 shr \$63,$A0[0]
1543 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1544 shr \$63,$A0[1]
1545 or $A0[0],$S[3] # | t[2*i]>>63
1546 mov 0($tptr),$A0[0] # t[2*i+2] # prefetch
1547 mov $A0[1],$shift # shift=t[2*i+1]>>63
1548 mul %rax # a[i]*a[i]
1549 neg $carry # mov $carry,cf
1550 mov 8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1551 adc %rax,$S[2]
1552 mov 0($aptr,$i),%rax # a[i+1] # prefetch
1553 mov $S[2],-16($tptr)
1554 adc %rdx,$S[3]
1555
1556 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1557 mov $S[3],-8($tptr)
1558 sbb $carry,$carry # mov cf,$carry
1559 shr \$63,$A0[0]
1560 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1561 shr \$63,$A0[1]
1562 or $A0[0],$S[1] # | t[2*i]>>63
1563 mov 16($tptr),$A0[0] # t[2*i+2] # prefetch
1564 mov $A0[1],$shift # shift=t[2*i+1]>>63
1565 mul %rax # a[i]*a[i]
1566 neg $carry # mov $carry,cf
1567 mov 24($tptr),$A0[1] # t[2*i+2+1] # prefetch
1568 adc %rax,$S[0]
1569 mov 8($aptr,$i),%rax # a[i+1] # prefetch
1570 mov $S[0],0($tptr)
1571 adc %rdx,$S[1]
1572
1573 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1574 mov $S[1],8($tptr)
1575 sbb $carry,$carry # mov cf,$carry
1576 shr \$63,$A0[0]
1577 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1578 shr \$63,$A0[1]
1579 or $A0[0],$S[3] # | t[2*i]>>63
1580 mov 32($tptr),$A0[0] # t[2*i+2] # prefetch
1581 mov $A0[1],$shift # shift=t[2*i+1]>>63
1582 mul %rax # a[i]*a[i]
1583 neg $carry # mov $carry,cf
1584 mov 40($tptr),$A0[1] # t[2*i+2+1] # prefetch
1585 adc %rax,$S[2]
1586 mov 16($aptr,$i),%rax # a[i+1] # prefetch
1587 mov $S[2],16($tptr)
1588 adc %rdx,$S[3]
1589 mov $S[3],24($tptr)
1590 sbb $carry,$carry # mov cf,$carry
1591 lea 64($tptr),$tptr
1592 add \$32,$i
1593 jnz .Lsqr4x_shift_n_add
1594
1595 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1596 .byte 0x67
1597 shr \$63,$A0[0]
1598 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1599 shr \$63,$A0[1]
1600 or $A0[0],$S[1] # | t[2*i]>>63
1601 mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
1602 mov $A0[1],$shift # shift=t[2*i+1]>>63
1603 mul %rax # a[i]*a[i]
1604 neg $carry # mov $carry,cf
1605 mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1606 adc %rax,$S[0]
1607 mov -8($aptr),%rax # a[i+1] # prefetch
1608 mov $S[0],-32($tptr)
1609 adc %rdx,$S[1]
1610
1611 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1|shift
1612 mov $S[1],-24($tptr)
1613 sbb $carry,$carry # mov cf,$carry
1614 shr \$63,$A0[0]
1615 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1616 shr \$63,$A0[1]
1617 or $A0[0],$S[3] # | t[2*i]>>63
1618 mul %rax # a[i]*a[i]
1619 neg $carry # mov $carry,cf
1620 adc %rax,$S[2]
1621 adc %rdx,$S[3]
1622 mov $S[2],-16($tptr)
1623 mov $S[3],-8($tptr)
1624___
1625}
1626######################################################################
1627# Montgomery reduction part, "word-by-word" algorithm.
1628#
1629# This new path is inspired by multiple submissions from Intel, by
1630# Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
1631# Vinodh Gopal...
1632{
1633my ($nptr,$tptr,$carry,$m0)=("%rbp","%rdi","%rsi","%rbx");
1634
1635$code.=<<___;
1636 movq %xmm2,$nptr
David Benjamin4969cc92016-04-22 15:02:23 -04001637__bn_sqr8x_reduction:
Adam Langleyd9e397b2015-01-22 14:27:53 -08001638 xor %rax,%rax
David Benjamin4969cc92016-04-22 15:02:23 -04001639 lea ($nptr,$num),%rcx # end of n[]
Adam Langleyd9e397b2015-01-22 14:27:53 -08001640 lea 48+8(%rsp,$num,2),%rdx # end of t[] buffer
1641 mov %rcx,0+8(%rsp)
1642 lea 48+8(%rsp,$num),$tptr # end of initial t[] window
1643 mov %rdx,8+8(%rsp)
1644 neg $num
1645 jmp .L8x_reduction_loop
1646
1647.align 32
1648.L8x_reduction_loop:
1649 lea ($tptr,$num),$tptr # start of current t[] window
1650 .byte 0x66
1651 mov 8*0($tptr),$m0
1652 mov 8*1($tptr),%r9
1653 mov 8*2($tptr),%r10
1654 mov 8*3($tptr),%r11
1655 mov 8*4($tptr),%r12
1656 mov 8*5($tptr),%r13
1657 mov 8*6($tptr),%r14
1658 mov 8*7($tptr),%r15
1659 mov %rax,(%rdx) # store top-most carry bit
1660 lea 8*8($tptr),$tptr
1661
1662 .byte 0x67
1663 mov $m0,%r8
1664 imulq 32+8(%rsp),$m0 # n0*a[0]
David Benjamin4969cc92016-04-22 15:02:23 -04001665 mov 8*0($nptr),%rax # n[0]
Adam Langleyd9e397b2015-01-22 14:27:53 -08001666 mov \$8,%ecx
1667 jmp .L8x_reduce
1668
1669.align 32
1670.L8x_reduce:
1671 mulq $m0
David Benjamin4969cc92016-04-22 15:02:23 -04001672 mov 8*1($nptr),%rax # n[1]
Adam Langleyd9e397b2015-01-22 14:27:53 -08001673 neg %r8
1674 mov %rdx,%r8
1675 adc \$0,%r8
1676
1677 mulq $m0
1678 add %rax,%r9
David Benjamin4969cc92016-04-22 15:02:23 -04001679 mov 8*2($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001680 adc \$0,%rdx
1681 add %r9,%r8
1682 mov $m0,48-8+8(%rsp,%rcx,8) # put aside n0*a[i]
1683 mov %rdx,%r9
1684 adc \$0,%r9
1685
1686 mulq $m0
1687 add %rax,%r10
David Benjamin4969cc92016-04-22 15:02:23 -04001688 mov 8*3($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001689 adc \$0,%rdx
1690 add %r10,%r9
1691 mov 32+8(%rsp),$carry # pull n0, borrow $carry
1692 mov %rdx,%r10
1693 adc \$0,%r10
1694
1695 mulq $m0
1696 add %rax,%r11
David Benjamin4969cc92016-04-22 15:02:23 -04001697 mov 8*4($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001698 adc \$0,%rdx
1699 imulq %r8,$carry # modulo-scheduled
1700 add %r11,%r10
1701 mov %rdx,%r11
1702 adc \$0,%r11
1703
1704 mulq $m0
1705 add %rax,%r12
David Benjamin4969cc92016-04-22 15:02:23 -04001706 mov 8*5($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001707 adc \$0,%rdx
1708 add %r12,%r11
1709 mov %rdx,%r12
1710 adc \$0,%r12
1711
1712 mulq $m0
1713 add %rax,%r13
David Benjamin4969cc92016-04-22 15:02:23 -04001714 mov 8*6($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001715 adc \$0,%rdx
1716 add %r13,%r12
1717 mov %rdx,%r13
1718 adc \$0,%r13
1719
1720 mulq $m0
1721 add %rax,%r14
David Benjamin4969cc92016-04-22 15:02:23 -04001722 mov 8*7($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001723 adc \$0,%rdx
1724 add %r14,%r13
1725 mov %rdx,%r14
1726 adc \$0,%r14
1727
1728 mulq $m0
1729 mov $carry,$m0 # n0*a[i]
1730 add %rax,%r15
David Benjamin4969cc92016-04-22 15:02:23 -04001731 mov 8*0($nptr),%rax # n[0]
Adam Langleyd9e397b2015-01-22 14:27:53 -08001732 adc \$0,%rdx
1733 add %r15,%r14
1734 mov %rdx,%r15
1735 adc \$0,%r15
1736
1737 dec %ecx
1738 jnz .L8x_reduce
1739
David Benjamin4969cc92016-04-22 15:02:23 -04001740 lea 8*8($nptr),$nptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08001741 xor %rax,%rax
1742 mov 8+8(%rsp),%rdx # pull end of t[]
1743 cmp 0+8(%rsp),$nptr # end of n[]?
1744 jae .L8x_no_tail
1745
1746 .byte 0x66
1747 add 8*0($tptr),%r8
1748 adc 8*1($tptr),%r9
1749 adc 8*2($tptr),%r10
1750 adc 8*3($tptr),%r11
1751 adc 8*4($tptr),%r12
1752 adc 8*5($tptr),%r13
1753 adc 8*6($tptr),%r14
1754 adc 8*7($tptr),%r15
1755 sbb $carry,$carry # top carry
1756
1757 mov 48+56+8(%rsp),$m0 # pull n0*a[0]
1758 mov \$8,%ecx
David Benjamin4969cc92016-04-22 15:02:23 -04001759 mov 8*0($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001760 jmp .L8x_tail
1761
1762.align 32
1763.L8x_tail:
1764 mulq $m0
1765 add %rax,%r8
David Benjamin4969cc92016-04-22 15:02:23 -04001766 mov 8*1($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001767 mov %r8,($tptr) # save result
1768 mov %rdx,%r8
1769 adc \$0,%r8
1770
1771 mulq $m0
1772 add %rax,%r9
David Benjamin4969cc92016-04-22 15:02:23 -04001773 mov 8*2($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001774 adc \$0,%rdx
1775 add %r9,%r8
1776 lea 8($tptr),$tptr # $tptr++
1777 mov %rdx,%r9
1778 adc \$0,%r9
1779
1780 mulq $m0
1781 add %rax,%r10
David Benjamin4969cc92016-04-22 15:02:23 -04001782 mov 8*3($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001783 adc \$0,%rdx
1784 add %r10,%r9
1785 mov %rdx,%r10
1786 adc \$0,%r10
1787
1788 mulq $m0
1789 add %rax,%r11
David Benjamin4969cc92016-04-22 15:02:23 -04001790 mov 8*4($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001791 adc \$0,%rdx
1792 add %r11,%r10
1793 mov %rdx,%r11
1794 adc \$0,%r11
1795
1796 mulq $m0
1797 add %rax,%r12
David Benjamin4969cc92016-04-22 15:02:23 -04001798 mov 8*5($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001799 adc \$0,%rdx
1800 add %r12,%r11
1801 mov %rdx,%r12
1802 adc \$0,%r12
1803
1804 mulq $m0
1805 add %rax,%r13
David Benjamin4969cc92016-04-22 15:02:23 -04001806 mov 8*6($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001807 adc \$0,%rdx
1808 add %r13,%r12
1809 mov %rdx,%r13
1810 adc \$0,%r13
1811
1812 mulq $m0
1813 add %rax,%r14
David Benjamin4969cc92016-04-22 15:02:23 -04001814 mov 8*7($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001815 adc \$0,%rdx
1816 add %r14,%r13
1817 mov %rdx,%r14
1818 adc \$0,%r14
1819
1820 mulq $m0
1821 mov 48-16+8(%rsp,%rcx,8),$m0# pull n0*a[i]
1822 add %rax,%r15
1823 adc \$0,%rdx
1824 add %r15,%r14
David Benjamin4969cc92016-04-22 15:02:23 -04001825 mov 8*0($nptr),%rax # pull n[0]
Adam Langleyd9e397b2015-01-22 14:27:53 -08001826 mov %rdx,%r15
1827 adc \$0,%r15
1828
1829 dec %ecx
1830 jnz .L8x_tail
1831
David Benjamin4969cc92016-04-22 15:02:23 -04001832 lea 8*8($nptr),$nptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08001833 mov 8+8(%rsp),%rdx # pull end of t[]
1834 cmp 0+8(%rsp),$nptr # end of n[]?
1835 jae .L8x_tail_done # break out of loop
1836
1837 mov 48+56+8(%rsp),$m0 # pull n0*a[0]
1838 neg $carry
1839 mov 8*0($nptr),%rax # pull n[0]
1840 adc 8*0($tptr),%r8
1841 adc 8*1($tptr),%r9
1842 adc 8*2($tptr),%r10
1843 adc 8*3($tptr),%r11
1844 adc 8*4($tptr),%r12
1845 adc 8*5($tptr),%r13
1846 adc 8*6($tptr),%r14
1847 adc 8*7($tptr),%r15
1848 sbb $carry,$carry # top carry
1849
1850 mov \$8,%ecx
1851 jmp .L8x_tail
1852
1853.align 32
1854.L8x_tail_done:
Robert Sloan4d1ac502017-02-06 08:36:14 -08001855 xor %rax,%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001856 add (%rdx),%r8 # can this overflow?
Adam Langley4139edb2016-01-13 15:00:54 -08001857 adc \$0,%r9
1858 adc \$0,%r10
1859 adc \$0,%r11
1860 adc \$0,%r12
1861 adc \$0,%r13
1862 adc \$0,%r14
Robert Sloan4d1ac502017-02-06 08:36:14 -08001863 adc \$0,%r15
1864 adc \$0,%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001865
1866 neg $carry
1867.L8x_no_tail:
1868 adc 8*0($tptr),%r8
1869 adc 8*1($tptr),%r9
1870 adc 8*2($tptr),%r10
1871 adc 8*3($tptr),%r11
1872 adc 8*4($tptr),%r12
1873 adc 8*5($tptr),%r13
1874 adc 8*6($tptr),%r14
1875 adc 8*7($tptr),%r15
1876 adc \$0,%rax # top-most carry
David Benjamin4969cc92016-04-22 15:02:23 -04001877 mov -8($nptr),%rcx # np[num-1]
Adam Langleyd9e397b2015-01-22 14:27:53 -08001878 xor $carry,$carry
1879
1880 movq %xmm2,$nptr # restore $nptr
1881
1882 mov %r8,8*0($tptr) # store top 512 bits
1883 mov %r9,8*1($tptr)
1884 movq %xmm3,$num # $num is %r9, can't be moved upwards
1885 mov %r10,8*2($tptr)
1886 mov %r11,8*3($tptr)
1887 mov %r12,8*4($tptr)
1888 mov %r13,8*5($tptr)
1889 mov %r14,8*6($tptr)
1890 mov %r15,8*7($tptr)
1891 lea 8*8($tptr),$tptr
1892
1893 cmp %rdx,$tptr # end of t[]?
1894 jb .L8x_reduction_loop
David Benjamin4969cc92016-04-22 15:02:23 -04001895 ret
1896.size bn_sqr8x_internal,.-bn_sqr8x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08001897___
1898}
1899##############################################################
1900# Post-condition, 4x unrolled
1901#
1902{
1903my ($tptr,$nptr)=("%rbx","%rbp");
1904$code.=<<___;
David Benjamin4969cc92016-04-22 15:02:23 -04001905.type __bn_post4x_internal,\@abi-omnipotent
Adam Langleyd9e397b2015-01-22 14:27:53 -08001906.align 32
David Benjamin4969cc92016-04-22 15:02:23 -04001907__bn_post4x_internal:
1908 mov 8*0($nptr),%r12
1909 lea (%rdi,$num),$tptr # %rdi was $tptr above
1910 mov $num,%rcx
1911 movq %xmm1,$rptr # restore $rptr
1912 neg %rax
1913 movq %xmm1,$aptr # prepare for back-to-back call
1914 sar \$3+2,%rcx
1915 dec %r12 # so that after 'not' we get -n[0]
1916 xor %r10,%r10
1917 mov 8*1($nptr),%r13
1918 mov 8*2($nptr),%r14
1919 mov 8*3($nptr),%r15
1920 jmp .Lsqr4x_sub_entry
1921
1922.align 16
Adam Langleyd9e397b2015-01-22 14:27:53 -08001923.Lsqr4x_sub:
David Benjamin4969cc92016-04-22 15:02:23 -04001924 mov 8*0($nptr),%r12
1925 mov 8*1($nptr),%r13
1926 mov 8*2($nptr),%r14
1927 mov 8*3($nptr),%r15
1928.Lsqr4x_sub_entry:
1929 lea 8*4($nptr),$nptr
1930 not %r12
1931 not %r13
1932 not %r14
1933 not %r15
1934 and %rax,%r12
1935 and %rax,%r13
1936 and %rax,%r14
1937 and %rax,%r15
1938
1939 neg %r10 # mov %r10,%cf
1940 adc 8*0($tptr),%r12
1941 adc 8*1($tptr),%r13
1942 adc 8*2($tptr),%r14
1943 adc 8*3($tptr),%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08001944 mov %r12,8*0($rptr)
David Benjamin4969cc92016-04-22 15:02:23 -04001945 lea 8*4($tptr),$tptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08001946 mov %r13,8*1($rptr)
David Benjamin4969cc92016-04-22 15:02:23 -04001947 sbb %r10,%r10 # mov %cf,%r10
Adam Langleyd9e397b2015-01-22 14:27:53 -08001948 mov %r14,8*2($rptr)
1949 mov %r15,8*3($rptr)
1950 lea 8*4($rptr),$rptr
1951
1952 inc %rcx # pass %cf
1953 jnz .Lsqr4x_sub
David Benjamin4969cc92016-04-22 15:02:23 -04001954
Adam Langleyd9e397b2015-01-22 14:27:53 -08001955 mov $num,%r10 # prepare for back-to-back call
1956 neg $num # restore $num
1957 ret
David Benjamin4969cc92016-04-22 15:02:23 -04001958.size __bn_post4x_internal,.-__bn_post4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08001959___
David Benjamin4969cc92016-04-22 15:02:23 -04001960}
Adam Langleyd9e397b2015-01-22 14:27:53 -08001961{
1962$code.=<<___;
1963.globl bn_from_montgomery
1964.type bn_from_montgomery,\@abi-omnipotent
1965.align 32
1966bn_from_montgomery:
1967 testl \$7,`($win64?"48(%rsp)":"%r9d")`
1968 jz bn_from_mont8x
1969 xor %eax,%eax
1970 ret
1971.size bn_from_montgomery,.-bn_from_montgomery
1972
1973.type bn_from_mont8x,\@function,6
1974.align 32
1975bn_from_mont8x:
1976 .byte 0x67
1977 mov %rsp,%rax
1978 push %rbx
1979 push %rbp
1980 push %r12
1981 push %r13
1982 push %r14
1983 push %r15
David Benjamin4969cc92016-04-22 15:02:23 -04001984
Adam Langleyd9e397b2015-01-22 14:27:53 -08001985 shl \$3,${num}d # convert $num to bytes
David Benjamin4969cc92016-04-22 15:02:23 -04001986 lea ($num,$num,2),%r10 # 3*$num in bytes
Adam Langleyd9e397b2015-01-22 14:27:53 -08001987 neg $num
1988 mov ($n0),$n0 # *n0
1989
1990 ##############################################################
David Benjamin4969cc92016-04-22 15:02:23 -04001991 # Ensure that stack frame doesn't alias with $rptr+3*$num
1992 # modulo 4096, which covers ret[num], am[num] and n[num]
1993 # (see bn_exp.c). The stack is allocated to aligned with
1994 # bn_power5's frame, and as bn_from_montgomery happens to be
1995 # last operation, we use the opportunity to cleanse it.
Adam Langleyd9e397b2015-01-22 14:27:53 -08001996 #
David Benjamin4969cc92016-04-22 15:02:23 -04001997 lea -320(%rsp,$num,2),%r11
1998 sub $rptr,%r11
Adam Langleyd9e397b2015-01-22 14:27:53 -08001999 and \$4095,%r11
2000 cmp %r11,%r10
2001 jb .Lfrom_sp_alt
2002 sub %r11,%rsp # align with $aptr
David Benjamin4969cc92016-04-22 15:02:23 -04002003 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -08002004 jmp .Lfrom_sp_done
2005
2006.align 32
2007.Lfrom_sp_alt:
David Benjamin4969cc92016-04-22 15:02:23 -04002008 lea 4096-320(,$num,2),%r10
2009 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -08002010 sub %r10,%r11
2011 mov \$0,%r10
2012 cmovc %r10,%r11
2013 sub %r11,%rsp
2014.Lfrom_sp_done:
2015 and \$-64,%rsp
2016 mov $num,%r10
2017 neg $num
2018
2019 ##############################################################
2020 # Stack layout
2021 #
2022 # +0 saved $num, used in reduction section
2023 # +8 &t[2*$num], used in reduction section
2024 # +32 saved *n0
2025 # +40 saved %rsp
2026 # +48 t[2*$num]
2027 #
2028 mov $n0, 32(%rsp)
2029 mov %rax, 40(%rsp) # save original %rsp
2030.Lfrom_body:
2031 mov $num,%r11
2032 lea 48(%rsp),%rax
2033 pxor %xmm0,%xmm0
2034 jmp .Lmul_by_1
2035
2036.align 32
2037.Lmul_by_1:
2038 movdqu ($aptr),%xmm1
2039 movdqu 16($aptr),%xmm2
2040 movdqu 32($aptr),%xmm3
2041 movdqa %xmm0,(%rax,$num)
2042 movdqu 48($aptr),%xmm4
2043 movdqa %xmm0,16(%rax,$num)
2044 .byte 0x48,0x8d,0xb6,0x40,0x00,0x00,0x00 # lea 64($aptr),$aptr
2045 movdqa %xmm1,(%rax)
2046 movdqa %xmm0,32(%rax,$num)
2047 movdqa %xmm2,16(%rax)
2048 movdqa %xmm0,48(%rax,$num)
2049 movdqa %xmm3,32(%rax)
2050 movdqa %xmm4,48(%rax)
2051 lea 64(%rax),%rax
2052 sub \$64,%r11
2053 jnz .Lmul_by_1
2054
2055 movq $rptr,%xmm1
2056 movq $nptr,%xmm2
2057 .byte 0x67
2058 mov $nptr,%rbp
2059 movq %r10, %xmm3 # -num
2060___
2061$code.=<<___ if ($addx);
2062 mov OPENSSL_ia32cap_P+8(%rip),%r11d
David Benjamin4969cc92016-04-22 15:02:23 -04002063 and \$0x80108,%r11d
2064 cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1
Adam Langleyd9e397b2015-01-22 14:27:53 -08002065 jne .Lfrom_mont_nox
2066
2067 lea (%rax,$num),$rptr
David Benjamin4969cc92016-04-22 15:02:23 -04002068 call __bn_sqrx8x_reduction
2069 call __bn_postx4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08002070
2071 pxor %xmm0,%xmm0
2072 lea 48(%rsp),%rax
2073 mov 40(%rsp),%rsi # restore %rsp
2074 jmp .Lfrom_mont_zero
2075
2076.align 32
2077.Lfrom_mont_nox:
2078___
2079$code.=<<___;
David Benjamin4969cc92016-04-22 15:02:23 -04002080 call __bn_sqr8x_reduction
2081 call __bn_post4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08002082
2083 pxor %xmm0,%xmm0
2084 lea 48(%rsp),%rax
2085 mov 40(%rsp),%rsi # restore %rsp
2086 jmp .Lfrom_mont_zero
2087
2088.align 32
2089.Lfrom_mont_zero:
2090 movdqa %xmm0,16*0(%rax)
2091 movdqa %xmm0,16*1(%rax)
2092 movdqa %xmm0,16*2(%rax)
2093 movdqa %xmm0,16*3(%rax)
2094 lea 16*4(%rax),%rax
2095 sub \$32,$num
2096 jnz .Lfrom_mont_zero
2097
2098 mov \$1,%rax
2099 mov -48(%rsi),%r15
2100 mov -40(%rsi),%r14
2101 mov -32(%rsi),%r13
2102 mov -24(%rsi),%r12
2103 mov -16(%rsi),%rbp
2104 mov -8(%rsi),%rbx
2105 lea (%rsi),%rsp
2106.Lfrom_epilogue:
2107 ret
2108.size bn_from_mont8x,.-bn_from_mont8x
2109___
2110}
2111}}}
2112
2113if ($addx) {{{
2114my $bp="%rdx"; # restore original value
2115
2116$code.=<<___;
2117.type bn_mulx4x_mont_gather5,\@function,6
2118.align 32
2119bn_mulx4x_mont_gather5:
2120.Lmulx4x_enter:
Adam Langleyd9e397b2015-01-22 14:27:53 -08002121 mov %rsp,%rax
2122 push %rbx
2123 push %rbp
2124 push %r12
2125 push %r13
2126 push %r14
2127 push %r15
David Benjamin4969cc92016-04-22 15:02:23 -04002128
Adam Langleyd9e397b2015-01-22 14:27:53 -08002129 shl \$3,${num}d # convert $num to bytes
David Benjamin4969cc92016-04-22 15:02:23 -04002130 lea ($num,$num,2),%r10 # 3*$num in bytes
Adam Langleyd9e397b2015-01-22 14:27:53 -08002131 neg $num # -$num
2132 mov ($n0),$n0 # *n0
2133
2134 ##############################################################
David Benjamin4969cc92016-04-22 15:02:23 -04002135 # Ensure that stack frame doesn't alias with $rptr+3*$num
2136 # modulo 4096, which covers ret[num], am[num] and n[num]
2137 # (see bn_exp.c). This is done to allow memory disambiguation
2138 # logic do its magic. [Extra [num] is allocated in order
2139 # to align with bn_power5's frame, which is cleansed after
2140 # completing exponentiation. Extra 256 bytes is for power mask
2141 # calculated from 7th argument, the index.]
Adam Langleyd9e397b2015-01-22 14:27:53 -08002142 #
David Benjamin4969cc92016-04-22 15:02:23 -04002143 lea -320(%rsp,$num,2),%r11
2144 sub $rp,%r11
Adam Langleyd9e397b2015-01-22 14:27:53 -08002145 and \$4095,%r11
2146 cmp %r11,%r10
2147 jb .Lmulx4xsp_alt
2148 sub %r11,%rsp # align with $aptr
David Benjamin4969cc92016-04-22 15:02:23 -04002149 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -08002150 jmp .Lmulx4xsp_done
2151
Adam Langleyd9e397b2015-01-22 14:27:53 -08002152.Lmulx4xsp_alt:
David Benjamin4969cc92016-04-22 15:02:23 -04002153 lea 4096-320(,$num,2),%r10
2154 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -08002155 sub %r10,%r11
2156 mov \$0,%r10
2157 cmovc %r10,%r11
2158 sub %r11,%rsp
2159.Lmulx4xsp_done:
2160 and \$-64,%rsp # ensure alignment
2161 ##############################################################
2162 # Stack layout
2163 # +0 -num
2164 # +8 off-loaded &b[i]
2165 # +16 end of b[num]
2166 # +24 inner counter
2167 # +32 saved n0
2168 # +40 saved %rsp
2169 # +48
2170 # +56 saved rp
2171 # +64 tmp[num+1]
2172 #
2173 mov $n0, 32(%rsp) # save *n0
2174 mov %rax,40(%rsp) # save original %rsp
2175.Lmulx4x_body:
2176 call mulx4x_internal
2177
2178 mov 40(%rsp),%rsi # restore %rsp
2179 mov \$1,%rax
David Benjamin4969cc92016-04-22 15:02:23 -04002180
Adam Langleyd9e397b2015-01-22 14:27:53 -08002181 mov -48(%rsi),%r15
2182 mov -40(%rsi),%r14
2183 mov -32(%rsi),%r13
2184 mov -24(%rsi),%r12
2185 mov -16(%rsi),%rbp
2186 mov -8(%rsi),%rbx
2187 lea (%rsi),%rsp
2188.Lmulx4x_epilogue:
2189 ret
2190.size bn_mulx4x_mont_gather5,.-bn_mulx4x_mont_gather5
2191
2192.type mulx4x_internal,\@abi-omnipotent
2193.align 32
2194mulx4x_internal:
David Benjamin4969cc92016-04-22 15:02:23 -04002195 mov $num,8(%rsp) # save -$num (it was in bytes)
2196 mov $num,%r10
Adam Langleyd9e397b2015-01-22 14:27:53 -08002197 neg $num # restore $num
2198 shl \$5,$num
David Benjamin4969cc92016-04-22 15:02:23 -04002199 neg %r10 # restore $num
2200 lea 128($bp,$num),%r13 # end of powers table (+size optimization)
Adam Langleyd9e397b2015-01-22 14:27:53 -08002201 shr \$5+5,$num
David Benjamin4969cc92016-04-22 15:02:23 -04002202 movd `($win64?56:8)`(%rax),%xmm5 # load 7th argument
Adam Langleyd9e397b2015-01-22 14:27:53 -08002203 sub \$1,$num
David Benjamin4969cc92016-04-22 15:02:23 -04002204 lea .Linc(%rip),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08002205 mov %r13,16+8(%rsp) # end of b[num]
2206 mov $num,24+8(%rsp) # inner counter
2207 mov $rp, 56+8(%rsp) # save $rp
2208___
2209my ($aptr, $bptr, $nptr, $tptr, $mi, $bi, $zero, $num)=
2210 ("%rsi","%rdi","%rcx","%rbx","%r8","%r9","%rbp","%rax");
2211my $rptr=$bptr;
2212my $STRIDE=2**5*8; # 5 is "window size"
2213my $N=$STRIDE/4; # should match cache line size
2214$code.=<<___;
David Benjamin4969cc92016-04-22 15:02:23 -04002215 movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
2216 movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
2217 lea 88-112(%rsp,%r10),%r10 # place the mask after tp[num+1] (+ICache optimizaton)
2218 lea 128($bp),$bptr # size optimization
Adam Langleyd9e397b2015-01-22 14:27:53 -08002219
David Benjamin4969cc92016-04-22 15:02:23 -04002220 pshufd \$0,%xmm5,%xmm5 # broadcast index
2221 movdqa %xmm1,%xmm4
2222 .byte 0x67
2223 movdqa %xmm1,%xmm2
2224___
2225########################################################################
2226# calculate mask by comparing 0..31 to index and save result to stack
2227#
2228$code.=<<___;
2229 .byte 0x67
2230 paddd %xmm0,%xmm1
2231 pcmpeqd %xmm5,%xmm0 # compare to 1,0
2232 movdqa %xmm4,%xmm3
2233___
2234for($i=0;$i<$STRIDE/16-4;$i+=4) {
2235$code.=<<___;
2236 paddd %xmm1,%xmm2
2237 pcmpeqd %xmm5,%xmm1 # compare to 3,2
2238 movdqa %xmm0,`16*($i+0)+112`(%r10)
2239 movdqa %xmm4,%xmm0
2240
2241 paddd %xmm2,%xmm3
2242 pcmpeqd %xmm5,%xmm2 # compare to 5,4
2243 movdqa %xmm1,`16*($i+1)+112`(%r10)
2244 movdqa %xmm4,%xmm1
2245
2246 paddd %xmm3,%xmm0
2247 pcmpeqd %xmm5,%xmm3 # compare to 7,6
2248 movdqa %xmm2,`16*($i+2)+112`(%r10)
2249 movdqa %xmm4,%xmm2
2250
2251 paddd %xmm0,%xmm1
2252 pcmpeqd %xmm5,%xmm0
2253 movdqa %xmm3,`16*($i+3)+112`(%r10)
2254 movdqa %xmm4,%xmm3
2255___
2256}
2257$code.=<<___; # last iteration can be optimized
2258 .byte 0x67
2259 paddd %xmm1,%xmm2
2260 pcmpeqd %xmm5,%xmm1
2261 movdqa %xmm0,`16*($i+0)+112`(%r10)
2262
2263 paddd %xmm2,%xmm3
2264 pcmpeqd %xmm5,%xmm2
2265 movdqa %xmm1,`16*($i+1)+112`(%r10)
2266
2267 pcmpeqd %xmm5,%xmm3
2268 movdqa %xmm2,`16*($i+2)+112`(%r10)
2269
2270 pand `16*($i+0)-128`($bptr),%xmm0 # while it's still in register
2271 pand `16*($i+1)-128`($bptr),%xmm1
2272 pand `16*($i+2)-128`($bptr),%xmm2
2273 movdqa %xmm3,`16*($i+3)+112`(%r10)
2274 pand `16*($i+3)-128`($bptr),%xmm3
Adam Langleyd9e397b2015-01-22 14:27:53 -08002275 por %xmm2,%xmm0
David Benjamin4969cc92016-04-22 15:02:23 -04002276 por %xmm3,%xmm1
2277___
2278for($i=0;$i<$STRIDE/16-4;$i+=4) {
2279$code.=<<___;
2280 movdqa `16*($i+0)-128`($bptr),%xmm4
2281 movdqa `16*($i+1)-128`($bptr),%xmm5
2282 movdqa `16*($i+2)-128`($bptr),%xmm2
2283 pand `16*($i+0)+112`(%r10),%xmm4
2284 movdqa `16*($i+3)-128`($bptr),%xmm3
2285 pand `16*($i+1)+112`(%r10),%xmm5
2286 por %xmm4,%xmm0
2287 pand `16*($i+2)+112`(%r10),%xmm2
2288 por %xmm5,%xmm1
2289 pand `16*($i+3)+112`(%r10),%xmm3
2290 por %xmm2,%xmm0
2291 por %xmm3,%xmm1
2292___
2293}
2294$code.=<<___;
2295 pxor %xmm1,%xmm0
2296 pshufd \$0x4e,%xmm0,%xmm1
2297 por %xmm1,%xmm0
2298 lea $STRIDE($bptr),$bptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08002299 movq %xmm0,%rdx # bp[0]
David Benjamin4969cc92016-04-22 15:02:23 -04002300 lea 64+8*4+8(%rsp),$tptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08002301
2302 mov %rdx,$bi
2303 mulx 0*8($aptr),$mi,%rax # a[0]*b[0]
2304 mulx 1*8($aptr),%r11,%r12 # a[1]*b[0]
2305 add %rax,%r11
2306 mulx 2*8($aptr),%rax,%r13 # ...
2307 adc %rax,%r12
2308 adc \$0,%r13
2309 mulx 3*8($aptr),%rax,%r14
2310
2311 mov $mi,%r15
2312 imulq 32+8(%rsp),$mi # "t[0]"*n0
2313 xor $zero,$zero # cf=0, of=0
2314 mov $mi,%rdx
2315
Adam Langleyd9e397b2015-01-22 14:27:53 -08002316 mov $bptr,8+8(%rsp) # off-load &b[i]
Adam Langleyd9e397b2015-01-22 14:27:53 -08002317
David Benjamin4969cc92016-04-22 15:02:23 -04002318 lea 4*8($aptr),$aptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08002319 adcx %rax,%r13
2320 adcx $zero,%r14 # cf=0
2321
David Benjamin4969cc92016-04-22 15:02:23 -04002322 mulx 0*8($nptr),%rax,%r10
Adam Langleyd9e397b2015-01-22 14:27:53 -08002323 adcx %rax,%r15 # discarded
2324 adox %r11,%r10
David Benjamin4969cc92016-04-22 15:02:23 -04002325 mulx 1*8($nptr),%rax,%r11
Adam Langleyd9e397b2015-01-22 14:27:53 -08002326 adcx %rax,%r10
2327 adox %r12,%r11
David Benjamin4969cc92016-04-22 15:02:23 -04002328 mulx 2*8($nptr),%rax,%r12
Adam Langleyd9e397b2015-01-22 14:27:53 -08002329 mov 24+8(%rsp),$bptr # counter value
Adam Langleyd9e397b2015-01-22 14:27:53 -08002330 mov %r10,-8*4($tptr)
2331 adcx %rax,%r11
2332 adox %r13,%r12
David Benjamin4969cc92016-04-22 15:02:23 -04002333 mulx 3*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002334 mov $bi,%rdx
2335 mov %r11,-8*3($tptr)
2336 adcx %rax,%r12
2337 adox $zero,%r15 # of=0
David Benjamin4969cc92016-04-22 15:02:23 -04002338 lea 4*8($nptr),$nptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08002339 mov %r12,-8*2($tptr)
David Benjamin4969cc92016-04-22 15:02:23 -04002340 jmp .Lmulx4x_1st
Adam Langleyd9e397b2015-01-22 14:27:53 -08002341
2342.align 32
2343.Lmulx4x_1st:
2344 adcx $zero,%r15 # cf=0, modulo-scheduled
2345 mulx 0*8($aptr),%r10,%rax # a[4]*b[0]
2346 adcx %r14,%r10
2347 mulx 1*8($aptr),%r11,%r14 # a[5]*b[0]
2348 adcx %rax,%r11
2349 mulx 2*8($aptr),%r12,%rax # ...
2350 adcx %r14,%r12
2351 mulx 3*8($aptr),%r13,%r14
2352 .byte 0x67,0x67
2353 mov $mi,%rdx
2354 adcx %rax,%r13
2355 adcx $zero,%r14 # cf=0
2356 lea 4*8($aptr),$aptr
2357 lea 4*8($tptr),$tptr
2358
2359 adox %r15,%r10
David Benjamin4969cc92016-04-22 15:02:23 -04002360 mulx 0*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002361 adcx %rax,%r10
2362 adox %r15,%r11
David Benjamin4969cc92016-04-22 15:02:23 -04002363 mulx 1*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002364 adcx %rax,%r11
2365 adox %r15,%r12
David Benjamin4969cc92016-04-22 15:02:23 -04002366 mulx 2*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002367 mov %r10,-5*8($tptr)
2368 adcx %rax,%r12
2369 mov %r11,-4*8($tptr)
2370 adox %r15,%r13
David Benjamin4969cc92016-04-22 15:02:23 -04002371 mulx 3*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002372 mov $bi,%rdx
2373 mov %r12,-3*8($tptr)
2374 adcx %rax,%r13
2375 adox $zero,%r15
David Benjamin4969cc92016-04-22 15:02:23 -04002376 lea 4*8($nptr),$nptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08002377 mov %r13,-2*8($tptr)
2378
2379 dec $bptr # of=0, pass cf
2380 jnz .Lmulx4x_1st
2381
2382 mov 8(%rsp),$num # load -num
Adam Langleyd9e397b2015-01-22 14:27:53 -08002383 adc $zero,%r15 # modulo-scheduled
2384 lea ($aptr,$num),$aptr # rewind $aptr
2385 add %r15,%r14
2386 mov 8+8(%rsp),$bptr # re-load &b[i]
2387 adc $zero,$zero # top-most carry
2388 mov %r14,-1*8($tptr)
2389 jmp .Lmulx4x_outer
2390
2391.align 32
2392.Lmulx4x_outer:
David Benjamin4969cc92016-04-22 15:02:23 -04002393 lea 16-256($tptr),%r10 # where 256-byte mask is (+density control)
2394 pxor %xmm4,%xmm4
2395 .byte 0x67,0x67
2396 pxor %xmm5,%xmm5
2397___
2398for($i=0;$i<$STRIDE/16;$i+=4) {
2399$code.=<<___;
2400 movdqa `16*($i+0)-128`($bptr),%xmm0
2401 movdqa `16*($i+1)-128`($bptr),%xmm1
2402 movdqa `16*($i+2)-128`($bptr),%xmm2
2403 pand `16*($i+0)+256`(%r10),%xmm0
2404 movdqa `16*($i+3)-128`($bptr),%xmm3
2405 pand `16*($i+1)+256`(%r10),%xmm1
2406 por %xmm0,%xmm4
2407 pand `16*($i+2)+256`(%r10),%xmm2
2408 por %xmm1,%xmm5
2409 pand `16*($i+3)+256`(%r10),%xmm3
2410 por %xmm2,%xmm4
2411 por %xmm3,%xmm5
2412___
2413}
2414$code.=<<___;
2415 por %xmm5,%xmm4
2416 pshufd \$0x4e,%xmm4,%xmm0
2417 por %xmm4,%xmm0
2418 lea $STRIDE($bptr),$bptr
2419 movq %xmm0,%rdx # m0=bp[i]
2420
Adam Langleyd9e397b2015-01-22 14:27:53 -08002421 mov $zero,($tptr) # save top-most carry
2422 lea 4*8($tptr,$num),$tptr # rewind $tptr
2423 mulx 0*8($aptr),$mi,%r11 # a[0]*b[i]
2424 xor $zero,$zero # cf=0, of=0
2425 mov %rdx,$bi
2426 mulx 1*8($aptr),%r14,%r12 # a[1]*b[i]
2427 adox -4*8($tptr),$mi # +t[0]
2428 adcx %r14,%r11
2429 mulx 2*8($aptr),%r15,%r13 # ...
2430 adox -3*8($tptr),%r11
2431 adcx %r15,%r12
2432 mulx 3*8($aptr),%rdx,%r14
2433 adox -2*8($tptr),%r12
2434 adcx %rdx,%r13
David Benjamin4969cc92016-04-22 15:02:23 -04002435 lea ($nptr,$num),$nptr # rewind $nptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08002436 lea 4*8($aptr),$aptr
2437 adox -1*8($tptr),%r13
2438 adcx $zero,%r14
2439 adox $zero,%r14
2440
Adam Langleyd9e397b2015-01-22 14:27:53 -08002441 mov $mi,%r15
2442 imulq 32+8(%rsp),$mi # "t[0]"*n0
2443
Adam Langleyd9e397b2015-01-22 14:27:53 -08002444 mov $mi,%rdx
Adam Langleyd9e397b2015-01-22 14:27:53 -08002445 xor $zero,$zero # cf=0, of=0
2446 mov $bptr,8+8(%rsp) # off-load &b[i]
2447
David Benjamin4969cc92016-04-22 15:02:23 -04002448 mulx 0*8($nptr),%rax,%r10
Adam Langleyd9e397b2015-01-22 14:27:53 -08002449 adcx %rax,%r15 # discarded
2450 adox %r11,%r10
David Benjamin4969cc92016-04-22 15:02:23 -04002451 mulx 1*8($nptr),%rax,%r11
Adam Langleyd9e397b2015-01-22 14:27:53 -08002452 adcx %rax,%r10
2453 adox %r12,%r11
David Benjamin4969cc92016-04-22 15:02:23 -04002454 mulx 2*8($nptr),%rax,%r12
Adam Langleyd9e397b2015-01-22 14:27:53 -08002455 adcx %rax,%r11
2456 adox %r13,%r12
David Benjamin4969cc92016-04-22 15:02:23 -04002457 mulx 3*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002458 mov $bi,%rdx
Adam Langleyd9e397b2015-01-22 14:27:53 -08002459 mov 24+8(%rsp),$bptr # counter value
2460 mov %r10,-8*4($tptr)
Adam Langleyd9e397b2015-01-22 14:27:53 -08002461 adcx %rax,%r12
2462 mov %r11,-8*3($tptr)
2463 adox $zero,%r15 # of=0
2464 mov %r12,-8*2($tptr)
David Benjamin4969cc92016-04-22 15:02:23 -04002465 lea 4*8($nptr),$nptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08002466 jmp .Lmulx4x_inner
2467
2468.align 32
2469.Lmulx4x_inner:
2470 mulx 0*8($aptr),%r10,%rax # a[4]*b[i]
2471 adcx $zero,%r15 # cf=0, modulo-scheduled
2472 adox %r14,%r10
2473 mulx 1*8($aptr),%r11,%r14 # a[5]*b[i]
2474 adcx 0*8($tptr),%r10
2475 adox %rax,%r11
2476 mulx 2*8($aptr),%r12,%rax # ...
2477 adcx 1*8($tptr),%r11
2478 adox %r14,%r12
2479 mulx 3*8($aptr),%r13,%r14
2480 mov $mi,%rdx
2481 adcx 2*8($tptr),%r12
2482 adox %rax,%r13
2483 adcx 3*8($tptr),%r13
2484 adox $zero,%r14 # of=0
2485 lea 4*8($aptr),$aptr
2486 lea 4*8($tptr),$tptr
2487 adcx $zero,%r14 # cf=0
2488
2489 adox %r15,%r10
David Benjamin4969cc92016-04-22 15:02:23 -04002490 mulx 0*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002491 adcx %rax,%r10
2492 adox %r15,%r11
David Benjamin4969cc92016-04-22 15:02:23 -04002493 mulx 1*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002494 adcx %rax,%r11
2495 adox %r15,%r12
David Benjamin4969cc92016-04-22 15:02:23 -04002496 mulx 2*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002497 mov %r10,-5*8($tptr)
2498 adcx %rax,%r12
2499 adox %r15,%r13
2500 mov %r11,-4*8($tptr)
David Benjamin4969cc92016-04-22 15:02:23 -04002501 mulx 3*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002502 mov $bi,%rdx
David Benjamin4969cc92016-04-22 15:02:23 -04002503 lea 4*8($nptr),$nptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08002504 mov %r12,-3*8($tptr)
2505 adcx %rax,%r13
2506 adox $zero,%r15
2507 mov %r13,-2*8($tptr)
2508
2509 dec $bptr # of=0, pass cf
2510 jnz .Lmulx4x_inner
2511
2512 mov 0+8(%rsp),$num # load -num
Adam Langleyd9e397b2015-01-22 14:27:53 -08002513 adc $zero,%r15 # modulo-scheduled
2514 sub 0*8($tptr),$bptr # pull top-most carry to %cf
2515 mov 8+8(%rsp),$bptr # re-load &b[i]
2516 mov 16+8(%rsp),%r10
2517 adc %r15,%r14
2518 lea ($aptr,$num),$aptr # rewind $aptr
2519 adc $zero,$zero # top-most carry
2520 mov %r14,-1*8($tptr)
2521
2522 cmp %r10,$bptr
2523 jb .Lmulx4x_outer
2524
David Benjamin4969cc92016-04-22 15:02:23 -04002525 mov -8($nptr),%r10
2526 mov $zero,%r8
2527 mov ($nptr,$num),%r12
2528 lea ($nptr,$num),%rbp # rewind $nptr
2529 mov $num,%rcx
2530 lea ($tptr,$num),%rdi # rewind $tptr
2531 xor %eax,%eax
Adam Langleyd9e397b2015-01-22 14:27:53 -08002532 xor %r15,%r15
2533 sub %r14,%r10 # compare top-most words
2534 adc %r15,%r15
David Benjamin4969cc92016-04-22 15:02:23 -04002535 or %r15,%r8
2536 sar \$3+2,%rcx
2537 sub %r8,%rax # %rax=-%r8
Adam Langleyd9e397b2015-01-22 14:27:53 -08002538 mov 56+8(%rsp),%rdx # restore rp
David Benjamin4969cc92016-04-22 15:02:23 -04002539 dec %r12 # so that after 'not' we get -n[0]
2540 mov 8*1(%rbp),%r13
2541 xor %r8,%r8
2542 mov 8*2(%rbp),%r14
2543 mov 8*3(%rbp),%r15
2544 jmp .Lsqrx4x_sub_entry # common post-condition
Adam Langleyd9e397b2015-01-22 14:27:53 -08002545.size mulx4x_internal,.-mulx4x_internal
2546___
2547} {
2548######################################################################
2549# void bn_power5(
2550my $rptr="%rdi"; # BN_ULONG *rptr,
2551my $aptr="%rsi"; # const BN_ULONG *aptr,
2552my $bptr="%rdx"; # const void *table,
2553my $nptr="%rcx"; # const BN_ULONG *nptr,
2554my $n0 ="%r8"; # const BN_ULONG *n0);
2555my $num ="%r9"; # int num, has to be divisible by 8
2556 # int pwr);
2557
2558my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
2559my @A0=("%r10","%r11");
2560my @A1=("%r12","%r13");
2561my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
2562
2563$code.=<<___;
2564.type bn_powerx5,\@function,6
2565.align 32
2566bn_powerx5:
2567.Lpowerx5_enter:
Adam Langleyd9e397b2015-01-22 14:27:53 -08002568 mov %rsp,%rax
2569 push %rbx
2570 push %rbp
2571 push %r12
2572 push %r13
2573 push %r14
2574 push %r15
David Benjamin4969cc92016-04-22 15:02:23 -04002575
Adam Langleyd9e397b2015-01-22 14:27:53 -08002576 shl \$3,${num}d # convert $num to bytes
David Benjamin4969cc92016-04-22 15:02:23 -04002577 lea ($num,$num,2),%r10 # 3*$num in bytes
Adam Langleyd9e397b2015-01-22 14:27:53 -08002578 neg $num
2579 mov ($n0),$n0 # *n0
2580
2581 ##############################################################
David Benjamin4969cc92016-04-22 15:02:23 -04002582 # Ensure that stack frame doesn't alias with $rptr+3*$num
2583 # modulo 4096, which covers ret[num], am[num] and n[num]
2584 # (see bn_exp.c). This is done to allow memory disambiguation
2585 # logic do its magic. [Extra 256 bytes is for power mask
2586 # calculated from 7th argument, the index.]
Adam Langleyd9e397b2015-01-22 14:27:53 -08002587 #
David Benjamin4969cc92016-04-22 15:02:23 -04002588 lea -320(%rsp,$num,2),%r11
2589 sub $rptr,%r11
Adam Langleyd9e397b2015-01-22 14:27:53 -08002590 and \$4095,%r11
2591 cmp %r11,%r10
2592 jb .Lpwrx_sp_alt
2593 sub %r11,%rsp # align with $aptr
David Benjamin4969cc92016-04-22 15:02:23 -04002594 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -08002595 jmp .Lpwrx_sp_done
2596
2597.align 32
2598.Lpwrx_sp_alt:
David Benjamin4969cc92016-04-22 15:02:23 -04002599 lea 4096-320(,$num,2),%r10
2600 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -08002601 sub %r10,%r11
2602 mov \$0,%r10
2603 cmovc %r10,%r11
2604 sub %r11,%rsp
2605.Lpwrx_sp_done:
2606 and \$-64,%rsp
2607 mov $num,%r10
2608 neg $num
2609
2610 ##############################################################
2611 # Stack layout
2612 #
2613 # +0 saved $num, used in reduction section
2614 # +8 &t[2*$num], used in reduction section
2615 # +16 intermediate carry bit
2616 # +24 top-most carry bit, used in reduction section
2617 # +32 saved *n0
2618 # +40 saved %rsp
2619 # +48 t[2*$num]
2620 #
2621 pxor %xmm0,%xmm0
2622 movq $rptr,%xmm1 # save $rptr
2623 movq $nptr,%xmm2 # save $nptr
2624 movq %r10, %xmm3 # -$num
2625 movq $bptr,%xmm4
2626 mov $n0, 32(%rsp)
2627 mov %rax, 40(%rsp) # save original %rsp
2628.Lpowerx5_body:
2629
2630 call __bn_sqrx8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04002631 call __bn_postx4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08002632 call __bn_sqrx8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04002633 call __bn_postx4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08002634 call __bn_sqrx8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04002635 call __bn_postx4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08002636 call __bn_sqrx8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04002637 call __bn_postx4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08002638 call __bn_sqrx8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04002639 call __bn_postx4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08002640
2641 mov %r10,$num # -num
2642 mov $aptr,$rptr
2643 movq %xmm2,$nptr
2644 movq %xmm4,$bptr
2645 mov 40(%rsp),%rax
2646
2647 call mulx4x_internal
2648
2649 mov 40(%rsp),%rsi # restore %rsp
2650 mov \$1,%rax
David Benjamin4969cc92016-04-22 15:02:23 -04002651
Adam Langleyd9e397b2015-01-22 14:27:53 -08002652 mov -48(%rsi),%r15
2653 mov -40(%rsi),%r14
2654 mov -32(%rsi),%r13
2655 mov -24(%rsi),%r12
2656 mov -16(%rsi),%rbp
2657 mov -8(%rsi),%rbx
2658 lea (%rsi),%rsp
2659.Lpowerx5_epilogue:
2660 ret
2661.size bn_powerx5,.-bn_powerx5
2662
2663.globl bn_sqrx8x_internal
2664.hidden bn_sqrx8x_internal
2665.type bn_sqrx8x_internal,\@abi-omnipotent
2666.align 32
2667bn_sqrx8x_internal:
2668__bn_sqrx8x_internal:
2669 ##################################################################
2670 # Squaring part:
2671 #
2672 # a) multiply-n-add everything but a[i]*a[i];
2673 # b) shift result of a) by 1 to the left and accumulate
2674 # a[i]*a[i] products;
2675 #
2676 ##################################################################
2677 # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2678 # a[1]a[0]
2679 # a[2]a[0]
2680 # a[3]a[0]
2681 # a[2]a[1]
2682 # a[3]a[1]
2683 # a[3]a[2]
2684 #
2685 # a[4]a[0]
2686 # a[5]a[0]
2687 # a[6]a[0]
2688 # a[7]a[0]
2689 # a[4]a[1]
2690 # a[5]a[1]
2691 # a[6]a[1]
2692 # a[7]a[1]
2693 # a[4]a[2]
2694 # a[5]a[2]
2695 # a[6]a[2]
2696 # a[7]a[2]
2697 # a[4]a[3]
2698 # a[5]a[3]
2699 # a[6]a[3]
2700 # a[7]a[3]
2701 #
2702 # a[5]a[4]
2703 # a[6]a[4]
2704 # a[7]a[4]
2705 # a[6]a[5]
2706 # a[7]a[5]
2707 # a[7]a[6]
2708 # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2709___
2710{
2711my ($zero,$carry)=("%rbp","%rcx");
2712my $aaptr=$zero;
2713$code.=<<___;
2714 lea 48+8(%rsp),$tptr
2715 lea ($aptr,$num),$aaptr
2716 mov $num,0+8(%rsp) # save $num
2717 mov $aaptr,8+8(%rsp) # save end of $aptr
2718 jmp .Lsqr8x_zero_start
2719
2720.align 32
2721.byte 0x66,0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00
2722.Lsqrx8x_zero:
2723 .byte 0x3e
2724 movdqa %xmm0,0*8($tptr)
2725 movdqa %xmm0,2*8($tptr)
2726 movdqa %xmm0,4*8($tptr)
2727 movdqa %xmm0,6*8($tptr)
2728.Lsqr8x_zero_start: # aligned at 32
2729 movdqa %xmm0,8*8($tptr)
2730 movdqa %xmm0,10*8($tptr)
2731 movdqa %xmm0,12*8($tptr)
2732 movdqa %xmm0,14*8($tptr)
2733 lea 16*8($tptr),$tptr
2734 sub \$64,$num
2735 jnz .Lsqrx8x_zero
2736
2737 mov 0*8($aptr),%rdx # a[0], modulo-scheduled
2738 #xor %r9,%r9 # t[1], ex-$num, zero already
2739 xor %r10,%r10
2740 xor %r11,%r11
2741 xor %r12,%r12
2742 xor %r13,%r13
2743 xor %r14,%r14
2744 xor %r15,%r15
2745 lea 48+8(%rsp),$tptr
2746 xor $zero,$zero # cf=0, cf=0
2747 jmp .Lsqrx8x_outer_loop
2748
2749.align 32
2750.Lsqrx8x_outer_loop:
2751 mulx 1*8($aptr),%r8,%rax # a[1]*a[0]
2752 adcx %r9,%r8 # a[1]*a[0]+=t[1]
2753 adox %rax,%r10
2754 mulx 2*8($aptr),%r9,%rax # a[2]*a[0]
2755 adcx %r10,%r9
2756 adox %rax,%r11
2757 .byte 0xc4,0xe2,0xab,0xf6,0x86,0x18,0x00,0x00,0x00 # mulx 3*8($aptr),%r10,%rax # ...
2758 adcx %r11,%r10
2759 adox %rax,%r12
2760 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x20,0x00,0x00,0x00 # mulx 4*8($aptr),%r11,%rax
2761 adcx %r12,%r11
2762 adox %rax,%r13
2763 mulx 5*8($aptr),%r12,%rax
2764 adcx %r13,%r12
2765 adox %rax,%r14
2766 mulx 6*8($aptr),%r13,%rax
2767 adcx %r14,%r13
2768 adox %r15,%rax
2769 mulx 7*8($aptr),%r14,%r15
2770 mov 1*8($aptr),%rdx # a[1]
2771 adcx %rax,%r14
2772 adox $zero,%r15
2773 adc 8*8($tptr),%r15
2774 mov %r8,1*8($tptr) # t[1]
2775 mov %r9,2*8($tptr) # t[2]
2776 sbb $carry,$carry # mov %cf,$carry
2777 xor $zero,$zero # cf=0, of=0
2778
2779
2780 mulx 2*8($aptr),%r8,%rbx # a[2]*a[1]
2781 mulx 3*8($aptr),%r9,%rax # a[3]*a[1]
2782 adcx %r10,%r8
2783 adox %rbx,%r9
2784 mulx 4*8($aptr),%r10,%rbx # ...
2785 adcx %r11,%r9
2786 adox %rax,%r10
2787 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x28,0x00,0x00,0x00 # mulx 5*8($aptr),%r11,%rax
2788 adcx %r12,%r10
2789 adox %rbx,%r11
2790 .byte 0xc4,0xe2,0x9b,0xf6,0x9e,0x30,0x00,0x00,0x00 # mulx 6*8($aptr),%r12,%rbx
2791 adcx %r13,%r11
2792 adox %r14,%r12
2793 .byte 0xc4,0x62,0x93,0xf6,0xb6,0x38,0x00,0x00,0x00 # mulx 7*8($aptr),%r13,%r14
2794 mov 2*8($aptr),%rdx # a[2]
2795 adcx %rax,%r12
2796 adox %rbx,%r13
2797 adcx %r15,%r13
2798 adox $zero,%r14 # of=0
2799 adcx $zero,%r14 # cf=0
2800
2801 mov %r8,3*8($tptr) # t[3]
2802 mov %r9,4*8($tptr) # t[4]
2803
2804 mulx 3*8($aptr),%r8,%rbx # a[3]*a[2]
2805 mulx 4*8($aptr),%r9,%rax # a[4]*a[2]
2806 adcx %r10,%r8
2807 adox %rbx,%r9
2808 mulx 5*8($aptr),%r10,%rbx # ...
2809 adcx %r11,%r9
2810 adox %rax,%r10
2811 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x30,0x00,0x00,0x00 # mulx 6*8($aptr),%r11,%rax
2812 adcx %r12,%r10
2813 adox %r13,%r11
2814 .byte 0xc4,0x62,0x9b,0xf6,0xae,0x38,0x00,0x00,0x00 # mulx 7*8($aptr),%r12,%r13
2815 .byte 0x3e
2816 mov 3*8($aptr),%rdx # a[3]
2817 adcx %rbx,%r11
2818 adox %rax,%r12
2819 adcx %r14,%r12
2820 mov %r8,5*8($tptr) # t[5]
2821 mov %r9,6*8($tptr) # t[6]
2822 mulx 4*8($aptr),%r8,%rax # a[4]*a[3]
2823 adox $zero,%r13 # of=0
2824 adcx $zero,%r13 # cf=0
2825
2826 mulx 5*8($aptr),%r9,%rbx # a[5]*a[3]
2827 adcx %r10,%r8
2828 adox %rax,%r9
2829 mulx 6*8($aptr),%r10,%rax # ...
2830 adcx %r11,%r9
2831 adox %r12,%r10
2832 mulx 7*8($aptr),%r11,%r12
2833 mov 4*8($aptr),%rdx # a[4]
2834 mov 5*8($aptr),%r14 # a[5]
2835 adcx %rbx,%r10
2836 adox %rax,%r11
2837 mov 6*8($aptr),%r15 # a[6]
2838 adcx %r13,%r11
2839 adox $zero,%r12 # of=0
2840 adcx $zero,%r12 # cf=0
2841
2842 mov %r8,7*8($tptr) # t[7]
2843 mov %r9,8*8($tptr) # t[8]
2844
2845 mulx %r14,%r9,%rax # a[5]*a[4]
2846 mov 7*8($aptr),%r8 # a[7]
2847 adcx %r10,%r9
2848 mulx %r15,%r10,%rbx # a[6]*a[4]
2849 adox %rax,%r10
2850 adcx %r11,%r10
2851 mulx %r8,%r11,%rax # a[7]*a[4]
2852 mov %r14,%rdx # a[5]
2853 adox %rbx,%r11
2854 adcx %r12,%r11
2855 #adox $zero,%rax # of=0
2856 adcx $zero,%rax # cf=0
2857
2858 mulx %r15,%r14,%rbx # a[6]*a[5]
2859 mulx %r8,%r12,%r13 # a[7]*a[5]
2860 mov %r15,%rdx # a[6]
2861 lea 8*8($aptr),$aptr
2862 adcx %r14,%r11
2863 adox %rbx,%r12
2864 adcx %rax,%r12
2865 adox $zero,%r13
2866
2867 .byte 0x67,0x67
2868 mulx %r8,%r8,%r14 # a[7]*a[6]
2869 adcx %r8,%r13
2870 adcx $zero,%r14
2871
2872 cmp 8+8(%rsp),$aptr
2873 je .Lsqrx8x_outer_break
2874
2875 neg $carry # mov $carry,%cf
2876 mov \$-8,%rcx
2877 mov $zero,%r15
2878 mov 8*8($tptr),%r8
2879 adcx 9*8($tptr),%r9 # +=t[9]
2880 adcx 10*8($tptr),%r10 # ...
2881 adcx 11*8($tptr),%r11
2882 adc 12*8($tptr),%r12
2883 adc 13*8($tptr),%r13
2884 adc 14*8($tptr),%r14
2885 adc 15*8($tptr),%r15
2886 lea ($aptr),$aaptr
2887 lea 2*64($tptr),$tptr
2888 sbb %rax,%rax # mov %cf,$carry
2889
2890 mov -64($aptr),%rdx # a[0]
2891 mov %rax,16+8(%rsp) # offload $carry
2892 mov $tptr,24+8(%rsp)
2893
2894 #lea 8*8($tptr),$tptr # see 2*8*8($tptr) above
2895 xor %eax,%eax # cf=0, of=0
2896 jmp .Lsqrx8x_loop
2897
2898.align 32
2899.Lsqrx8x_loop:
2900 mov %r8,%rbx
2901 mulx 0*8($aaptr),%rax,%r8 # a[8]*a[i]
2902 adcx %rax,%rbx # +=t[8]
2903 adox %r9,%r8
2904
2905 mulx 1*8($aaptr),%rax,%r9 # ...
2906 adcx %rax,%r8
2907 adox %r10,%r9
2908
2909 mulx 2*8($aaptr),%rax,%r10
2910 adcx %rax,%r9
2911 adox %r11,%r10
2912
2913 mulx 3*8($aaptr),%rax,%r11
2914 adcx %rax,%r10
2915 adox %r12,%r11
2916
2917 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 4*8($aaptr),%rax,%r12
2918 adcx %rax,%r11
2919 adox %r13,%r12
2920
2921 mulx 5*8($aaptr),%rax,%r13
2922 adcx %rax,%r12
2923 adox %r14,%r13
2924
2925 mulx 6*8($aaptr),%rax,%r14
2926 mov %rbx,($tptr,%rcx,8) # store t[8+i]
2927 mov \$0,%ebx
2928 adcx %rax,%r13
2929 adox %r15,%r14
2930
2931 .byte 0xc4,0x62,0xfb,0xf6,0xbd,0x38,0x00,0x00,0x00 # mulx 7*8($aaptr),%rax,%r15
2932 mov 8($aptr,%rcx,8),%rdx # a[i]
2933 adcx %rax,%r14
2934 adox %rbx,%r15 # %rbx is 0, of=0
2935 adcx %rbx,%r15 # cf=0
2936
2937 .byte 0x67
2938 inc %rcx # of=0
2939 jnz .Lsqrx8x_loop
2940
2941 lea 8*8($aaptr),$aaptr
2942 mov \$-8,%rcx
2943 cmp 8+8(%rsp),$aaptr # done?
2944 je .Lsqrx8x_break
2945
2946 sub 16+8(%rsp),%rbx # mov 16(%rsp),%cf
2947 .byte 0x66
2948 mov -64($aptr),%rdx
2949 adcx 0*8($tptr),%r8
2950 adcx 1*8($tptr),%r9
2951 adc 2*8($tptr),%r10
2952 adc 3*8($tptr),%r11
2953 adc 4*8($tptr),%r12
2954 adc 5*8($tptr),%r13
2955 adc 6*8($tptr),%r14
2956 adc 7*8($tptr),%r15
2957 lea 8*8($tptr),$tptr
2958 .byte 0x67
2959 sbb %rax,%rax # mov %cf,%rax
2960 xor %ebx,%ebx # cf=0, of=0
2961 mov %rax,16+8(%rsp) # offload carry
2962 jmp .Lsqrx8x_loop
2963
2964.align 32
2965.Lsqrx8x_break:
2966 sub 16+8(%rsp),%r8 # consume last carry
2967 mov 24+8(%rsp),$carry # initial $tptr, borrow $carry
2968 mov 0*8($aptr),%rdx # a[8], modulo-scheduled
2969 xor %ebp,%ebp # xor $zero,$zero
2970 mov %r8,0*8($tptr)
2971 cmp $carry,$tptr # cf=0, of=0
2972 je .Lsqrx8x_outer_loop
2973
2974 mov %r9,1*8($tptr)
2975 mov 1*8($carry),%r9
2976 mov %r10,2*8($tptr)
2977 mov 2*8($carry),%r10
2978 mov %r11,3*8($tptr)
2979 mov 3*8($carry),%r11
2980 mov %r12,4*8($tptr)
2981 mov 4*8($carry),%r12
2982 mov %r13,5*8($tptr)
2983 mov 5*8($carry),%r13
2984 mov %r14,6*8($tptr)
2985 mov 6*8($carry),%r14
2986 mov %r15,7*8($tptr)
2987 mov 7*8($carry),%r15
2988 mov $carry,$tptr
2989 jmp .Lsqrx8x_outer_loop
2990
2991.align 32
2992.Lsqrx8x_outer_break:
2993 mov %r9,9*8($tptr) # t[9]
2994 movq %xmm3,%rcx # -$num
2995 mov %r10,10*8($tptr) # ...
2996 mov %r11,11*8($tptr)
2997 mov %r12,12*8($tptr)
2998 mov %r13,13*8($tptr)
2999 mov %r14,14*8($tptr)
3000___
3001} {
3002my $i="%rcx";
3003$code.=<<___;
3004 lea 48+8(%rsp),$tptr
3005 mov ($aptr,$i),%rdx # a[0]
3006
3007 mov 8($tptr),$A0[1] # t[1]
3008 xor $A0[0],$A0[0] # t[0], of=0, cf=0
3009 mov 0+8(%rsp),$num # restore $num
3010 adox $A0[1],$A0[1]
3011 mov 16($tptr),$A1[0] # t[2] # prefetch
3012 mov 24($tptr),$A1[1] # t[3] # prefetch
3013 #jmp .Lsqrx4x_shift_n_add # happens to be aligned
3014
3015.align 32
3016.Lsqrx4x_shift_n_add:
3017 mulx %rdx,%rax,%rbx
3018 adox $A1[0],$A1[0]
3019 adcx $A0[0],%rax
3020 .byte 0x48,0x8b,0x94,0x0e,0x08,0x00,0x00,0x00 # mov 8($aptr,$i),%rdx # a[i+1] # prefetch
3021 .byte 0x4c,0x8b,0x97,0x20,0x00,0x00,0x00 # mov 32($tptr),$A0[0] # t[2*i+4] # prefetch
3022 adox $A1[1],$A1[1]
3023 adcx $A0[1],%rbx
3024 mov 40($tptr),$A0[1] # t[2*i+4+1] # prefetch
3025 mov %rax,0($tptr)
3026 mov %rbx,8($tptr)
3027
3028 mulx %rdx,%rax,%rbx
3029 adox $A0[0],$A0[0]
3030 adcx $A1[0],%rax
3031 mov 16($aptr,$i),%rdx # a[i+2] # prefetch
3032 mov 48($tptr),$A1[0] # t[2*i+6] # prefetch
3033 adox $A0[1],$A0[1]
3034 adcx $A1[1],%rbx
3035 mov 56($tptr),$A1[1] # t[2*i+6+1] # prefetch
3036 mov %rax,16($tptr)
3037 mov %rbx,24($tptr)
3038
3039 mulx %rdx,%rax,%rbx
3040 adox $A1[0],$A1[0]
3041 adcx $A0[0],%rax
3042 mov 24($aptr,$i),%rdx # a[i+3] # prefetch
3043 lea 32($i),$i
3044 mov 64($tptr),$A0[0] # t[2*i+8] # prefetch
3045 adox $A1[1],$A1[1]
3046 adcx $A0[1],%rbx
3047 mov 72($tptr),$A0[1] # t[2*i+8+1] # prefetch
3048 mov %rax,32($tptr)
3049 mov %rbx,40($tptr)
3050
3051 mulx %rdx,%rax,%rbx
3052 adox $A0[0],$A0[0]
3053 adcx $A1[0],%rax
3054 jrcxz .Lsqrx4x_shift_n_add_break
3055 .byte 0x48,0x8b,0x94,0x0e,0x00,0x00,0x00,0x00 # mov 0($aptr,$i),%rdx # a[i+4] # prefetch
3056 adox $A0[1],$A0[1]
3057 adcx $A1[1],%rbx
3058 mov 80($tptr),$A1[0] # t[2*i+10] # prefetch
3059 mov 88($tptr),$A1[1] # t[2*i+10+1] # prefetch
3060 mov %rax,48($tptr)
3061 mov %rbx,56($tptr)
3062 lea 64($tptr),$tptr
3063 nop
3064 jmp .Lsqrx4x_shift_n_add
3065
3066.align 32
3067.Lsqrx4x_shift_n_add_break:
3068 adcx $A1[1],%rbx
3069 mov %rax,48($tptr)
3070 mov %rbx,56($tptr)
3071 lea 64($tptr),$tptr # end of t[] buffer
3072___
3073}
3074######################################################################
3075# Montgomery reduction part, "word-by-word" algorithm.
3076#
3077# This new path is inspired by multiple submissions from Intel, by
3078# Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
3079# Vinodh Gopal...
3080{
3081my ($nptr,$carry,$m0)=("%rbp","%rsi","%rdx");
3082
3083$code.=<<___;
3084 movq %xmm2,$nptr
David Benjamin4969cc92016-04-22 15:02:23 -04003085__bn_sqrx8x_reduction:
Adam Langleyd9e397b2015-01-22 14:27:53 -08003086 xor %eax,%eax # initial top-most carry bit
3087 mov 32+8(%rsp),%rbx # n0
3088 mov 48+8(%rsp),%rdx # "%r8", 8*0($tptr)
David Benjamin4969cc92016-04-22 15:02:23 -04003089 lea -8*8($nptr,$num),%rcx # end of n[]
Adam Langleyd9e397b2015-01-22 14:27:53 -08003090 #lea 48+8(%rsp,$num,2),$tptr # end of t[] buffer
3091 mov %rcx, 0+8(%rsp) # save end of n[]
3092 mov $tptr,8+8(%rsp) # save end of t[]
3093
3094 lea 48+8(%rsp),$tptr # initial t[] window
3095 jmp .Lsqrx8x_reduction_loop
3096
3097.align 32
3098.Lsqrx8x_reduction_loop:
3099 mov 8*1($tptr),%r9
3100 mov 8*2($tptr),%r10
3101 mov 8*3($tptr),%r11
3102 mov 8*4($tptr),%r12
3103 mov %rdx,%r8
3104 imulq %rbx,%rdx # n0*a[i]
3105 mov 8*5($tptr),%r13
3106 mov 8*6($tptr),%r14
3107 mov 8*7($tptr),%r15
3108 mov %rax,24+8(%rsp) # store top-most carry bit
3109
3110 lea 8*8($tptr),$tptr
3111 xor $carry,$carry # cf=0,of=0
3112 mov \$-8,%rcx
3113 jmp .Lsqrx8x_reduce
3114
3115.align 32
3116.Lsqrx8x_reduce:
3117 mov %r8, %rbx
David Benjamin4969cc92016-04-22 15:02:23 -04003118 mulx 8*0($nptr),%rax,%r8 # n[0]
Adam Langleyd9e397b2015-01-22 14:27:53 -08003119 adcx %rbx,%rax # discarded
3120 adox %r9,%r8
3121
David Benjamin4969cc92016-04-22 15:02:23 -04003122 mulx 8*1($nptr),%rbx,%r9 # n[1]
Adam Langleyd9e397b2015-01-22 14:27:53 -08003123 adcx %rbx,%r8
3124 adox %r10,%r9
3125
David Benjamin4969cc92016-04-22 15:02:23 -04003126 mulx 8*2($nptr),%rbx,%r10
Adam Langleyd9e397b2015-01-22 14:27:53 -08003127 adcx %rbx,%r9
3128 adox %r11,%r10
3129
David Benjamin4969cc92016-04-22 15:02:23 -04003130 mulx 8*3($nptr),%rbx,%r11
Adam Langleyd9e397b2015-01-22 14:27:53 -08003131 adcx %rbx,%r10
3132 adox %r12,%r11
3133
David Benjamin4969cc92016-04-22 15:02:23 -04003134 .byte 0xc4,0x62,0xe3,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 8*4($nptr),%rbx,%r12
Adam Langleyd9e397b2015-01-22 14:27:53 -08003135 mov %rdx,%rax
3136 mov %r8,%rdx
3137 adcx %rbx,%r11
3138 adox %r13,%r12
3139
3140 mulx 32+8(%rsp),%rbx,%rdx # %rdx discarded
3141 mov %rax,%rdx
3142 mov %rax,64+48+8(%rsp,%rcx,8) # put aside n0*a[i]
3143
David Benjamin4969cc92016-04-22 15:02:23 -04003144 mulx 8*5($nptr),%rax,%r13
Adam Langleyd9e397b2015-01-22 14:27:53 -08003145 adcx %rax,%r12
3146 adox %r14,%r13
3147
David Benjamin4969cc92016-04-22 15:02:23 -04003148 mulx 8*6($nptr),%rax,%r14
Adam Langleyd9e397b2015-01-22 14:27:53 -08003149 adcx %rax,%r13
3150 adox %r15,%r14
3151
David Benjamin4969cc92016-04-22 15:02:23 -04003152 mulx 8*7($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08003153 mov %rbx,%rdx
3154 adcx %rax,%r14
3155 adox $carry,%r15 # $carry is 0
3156 adcx $carry,%r15 # cf=0
3157
3158 .byte 0x67,0x67,0x67
3159 inc %rcx # of=0
3160 jnz .Lsqrx8x_reduce
3161
3162 mov $carry,%rax # xor %rax,%rax
3163 cmp 0+8(%rsp),$nptr # end of n[]?
3164 jae .Lsqrx8x_no_tail
3165
3166 mov 48+8(%rsp),%rdx # pull n0*a[0]
3167 add 8*0($tptr),%r8
David Benjamin4969cc92016-04-22 15:02:23 -04003168 lea 8*8($nptr),$nptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08003169 mov \$-8,%rcx
3170 adcx 8*1($tptr),%r9
3171 adcx 8*2($tptr),%r10
3172 adc 8*3($tptr),%r11
3173 adc 8*4($tptr),%r12
3174 adc 8*5($tptr),%r13
3175 adc 8*6($tptr),%r14
3176 adc 8*7($tptr),%r15
3177 lea 8*8($tptr),$tptr
3178 sbb %rax,%rax # top carry
3179
3180 xor $carry,$carry # of=0, cf=0
3181 mov %rax,16+8(%rsp)
3182 jmp .Lsqrx8x_tail
3183
3184.align 32
3185.Lsqrx8x_tail:
3186 mov %r8,%rbx
David Benjamin4969cc92016-04-22 15:02:23 -04003187 mulx 8*0($nptr),%rax,%r8
Adam Langleyd9e397b2015-01-22 14:27:53 -08003188 adcx %rax,%rbx
3189 adox %r9,%r8
3190
David Benjamin4969cc92016-04-22 15:02:23 -04003191 mulx 8*1($nptr),%rax,%r9
Adam Langleyd9e397b2015-01-22 14:27:53 -08003192 adcx %rax,%r8
3193 adox %r10,%r9
3194
David Benjamin4969cc92016-04-22 15:02:23 -04003195 mulx 8*2($nptr),%rax,%r10
Adam Langleyd9e397b2015-01-22 14:27:53 -08003196 adcx %rax,%r9
3197 adox %r11,%r10
3198
David Benjamin4969cc92016-04-22 15:02:23 -04003199 mulx 8*3($nptr),%rax,%r11
Adam Langleyd9e397b2015-01-22 14:27:53 -08003200 adcx %rax,%r10
3201 adox %r12,%r11
3202
David Benjamin4969cc92016-04-22 15:02:23 -04003203 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 8*4($nptr),%rax,%r12
Adam Langleyd9e397b2015-01-22 14:27:53 -08003204 adcx %rax,%r11
3205 adox %r13,%r12
3206
David Benjamin4969cc92016-04-22 15:02:23 -04003207 mulx 8*5($nptr),%rax,%r13
Adam Langleyd9e397b2015-01-22 14:27:53 -08003208 adcx %rax,%r12
3209 adox %r14,%r13
3210
David Benjamin4969cc92016-04-22 15:02:23 -04003211 mulx 8*6($nptr),%rax,%r14
Adam Langleyd9e397b2015-01-22 14:27:53 -08003212 adcx %rax,%r13
3213 adox %r15,%r14
3214
David Benjamin4969cc92016-04-22 15:02:23 -04003215 mulx 8*7($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08003216 mov 72+48+8(%rsp,%rcx,8),%rdx # pull n0*a[i]
3217 adcx %rax,%r14
3218 adox $carry,%r15
3219 mov %rbx,($tptr,%rcx,8) # save result
3220 mov %r8,%rbx
3221 adcx $carry,%r15 # cf=0
3222
3223 inc %rcx # of=0
3224 jnz .Lsqrx8x_tail
3225
3226 cmp 0+8(%rsp),$nptr # end of n[]?
3227 jae .Lsqrx8x_tail_done # break out of loop
3228
3229 sub 16+8(%rsp),$carry # mov 16(%rsp),%cf
3230 mov 48+8(%rsp),%rdx # pull n0*a[0]
David Benjamin4969cc92016-04-22 15:02:23 -04003231 lea 8*8($nptr),$nptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08003232 adc 8*0($tptr),%r8
3233 adc 8*1($tptr),%r9
3234 adc 8*2($tptr),%r10
3235 adc 8*3($tptr),%r11
3236 adc 8*4($tptr),%r12
3237 adc 8*5($tptr),%r13
3238 adc 8*6($tptr),%r14
3239 adc 8*7($tptr),%r15
3240 lea 8*8($tptr),$tptr
3241 sbb %rax,%rax
3242 sub \$8,%rcx # mov \$-8,%rcx
3243
3244 xor $carry,$carry # of=0, cf=0
3245 mov %rax,16+8(%rsp)
3246 jmp .Lsqrx8x_tail
3247
3248.align 32
3249.Lsqrx8x_tail_done:
Robert Sloan4d1ac502017-02-06 08:36:14 -08003250 xor %rax,%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08003251 add 24+8(%rsp),%r8 # can this overflow?
Adam Langley4139edb2016-01-13 15:00:54 -08003252 adc \$0,%r9
3253 adc \$0,%r10
3254 adc \$0,%r11
3255 adc \$0,%r12
3256 adc \$0,%r13
3257 adc \$0,%r14
Robert Sloan4d1ac502017-02-06 08:36:14 -08003258 adc \$0,%r15
3259 adc \$0,%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08003260
3261 sub 16+8(%rsp),$carry # mov 16(%rsp),%cf
3262.Lsqrx8x_no_tail: # %cf is 0 if jumped here
3263 adc 8*0($tptr),%r8
3264 movq %xmm3,%rcx
3265 adc 8*1($tptr),%r9
David Benjamin4969cc92016-04-22 15:02:23 -04003266 mov 8*7($nptr),$carry
Adam Langleyd9e397b2015-01-22 14:27:53 -08003267 movq %xmm2,$nptr # restore $nptr
3268 adc 8*2($tptr),%r10
3269 adc 8*3($tptr),%r11
3270 adc 8*4($tptr),%r12
3271 adc 8*5($tptr),%r13
3272 adc 8*6($tptr),%r14
3273 adc 8*7($tptr),%r15
Robert Sloan4d1ac502017-02-06 08:36:14 -08003274 adc \$0,%rax # top-most carry
Adam Langleyd9e397b2015-01-22 14:27:53 -08003275
3276 mov 32+8(%rsp),%rbx # n0
3277 mov 8*8($tptr,%rcx),%rdx # modulo-scheduled "%r8"
3278
3279 mov %r8,8*0($tptr) # store top 512 bits
3280 lea 8*8($tptr),%r8 # borrow %r8
3281 mov %r9,8*1($tptr)
3282 mov %r10,8*2($tptr)
3283 mov %r11,8*3($tptr)
3284 mov %r12,8*4($tptr)
3285 mov %r13,8*5($tptr)
3286 mov %r14,8*6($tptr)
3287 mov %r15,8*7($tptr)
3288
3289 lea 8*8($tptr,%rcx),$tptr # start of current t[] window
3290 cmp 8+8(%rsp),%r8 # end of t[]?
3291 jb .Lsqrx8x_reduction_loop
David Benjamin4969cc92016-04-22 15:02:23 -04003292 ret
3293.size bn_sqrx8x_internal,.-bn_sqrx8x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08003294___
3295}
3296##############################################################
3297# Post-condition, 4x unrolled
3298#
3299{
3300my ($rptr,$nptr)=("%rdx","%rbp");
Adam Langleyd9e397b2015-01-22 14:27:53 -08003301$code.=<<___;
David Benjamin4969cc92016-04-22 15:02:23 -04003302.align 32
3303__bn_postx4x_internal:
3304 mov 8*0($nptr),%r12
Adam Langleyd9e397b2015-01-22 14:27:53 -08003305 mov %rcx,%r10 # -$num
Adam Langleyd9e397b2015-01-22 14:27:53 -08003306 mov %rcx,%r9 # -$num
David Benjamin4969cc92016-04-22 15:02:23 -04003307 neg %rax
3308 sar \$3+2,%rcx
Adam Langleyd9e397b2015-01-22 14:27:53 -08003309 #lea 48+8(%rsp,%r9),$tptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08003310 movq %xmm1,$rptr # restore $rptr
3311 movq %xmm1,$aptr # prepare for back-to-back call
David Benjamin4969cc92016-04-22 15:02:23 -04003312 dec %r12 # so that after 'not' we get -n[0]
3313 mov 8*1($nptr),%r13
3314 xor %r8,%r8
3315 mov 8*2($nptr),%r14
3316 mov 8*3($nptr),%r15
3317 jmp .Lsqrx4x_sub_entry
Adam Langleyd9e397b2015-01-22 14:27:53 -08003318
David Benjamin4969cc92016-04-22 15:02:23 -04003319.align 16
Adam Langleyd9e397b2015-01-22 14:27:53 -08003320.Lsqrx4x_sub:
David Benjamin4969cc92016-04-22 15:02:23 -04003321 mov 8*0($nptr),%r12
3322 mov 8*1($nptr),%r13
3323 mov 8*2($nptr),%r14
3324 mov 8*3($nptr),%r15
3325.Lsqrx4x_sub_entry:
3326 andn %rax,%r12,%r12
3327 lea 8*4($nptr),$nptr
3328 andn %rax,%r13,%r13
3329 andn %rax,%r14,%r14
3330 andn %rax,%r15,%r15
3331
3332 neg %r8 # mov %r8,%cf
3333 adc 8*0($tptr),%r12
3334 adc 8*1($tptr),%r13
3335 adc 8*2($tptr),%r14
3336 adc 8*3($tptr),%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08003337 mov %r12,8*0($rptr)
David Benjamin4969cc92016-04-22 15:02:23 -04003338 lea 8*4($tptr),$tptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08003339 mov %r13,8*1($rptr)
David Benjamin4969cc92016-04-22 15:02:23 -04003340 sbb %r8,%r8 # mov %cf,%r8
Adam Langleyd9e397b2015-01-22 14:27:53 -08003341 mov %r14,8*2($rptr)
3342 mov %r15,8*3($rptr)
3343 lea 8*4($rptr),$rptr
3344
3345 inc %rcx
3346 jnz .Lsqrx4x_sub
David Benjamin4969cc92016-04-22 15:02:23 -04003347
Adam Langleyd9e397b2015-01-22 14:27:53 -08003348 neg %r9 # restore $num
3349
3350 ret
David Benjamin4969cc92016-04-22 15:02:23 -04003351.size __bn_postx4x_internal,.-__bn_postx4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08003352___
David Benjamin4969cc92016-04-22 15:02:23 -04003353}
Adam Langleyd9e397b2015-01-22 14:27:53 -08003354}}}
3355{
3356my ($inp,$num,$tbl,$idx)=$win64?("%rcx","%edx","%r8", "%r9d") : # Win64 order
3357 ("%rdi","%esi","%rdx","%ecx"); # Unix order
3358my $out=$inp;
3359my $STRIDE=2**5*8;
3360my $N=$STRIDE/4;
3361
3362$code.=<<___;
3363.globl bn_scatter5
3364.type bn_scatter5,\@abi-omnipotent
3365.align 16
3366bn_scatter5:
3367 cmp \$0, $num
3368 jz .Lscatter_epilogue
3369 lea ($tbl,$idx,8),$tbl
3370.Lscatter:
3371 mov ($inp),%rax
3372 lea 8($inp),$inp
3373 mov %rax,($tbl)
3374 lea 32*8($tbl),$tbl
3375 sub \$1,$num
3376 jnz .Lscatter
3377.Lscatter_epilogue:
3378 ret
3379.size bn_scatter5,.-bn_scatter5
3380
3381.globl bn_gather5
3382.type bn_gather5,\@abi-omnipotent
David Benjamin4969cc92016-04-22 15:02:23 -04003383.align 32
Adam Langleyd9e397b2015-01-22 14:27:53 -08003384bn_gather5:
David Benjamin4969cc92016-04-22 15:02:23 -04003385.LSEH_begin_bn_gather5: # Win64 thing, but harmless in other cases
Adam Langleyd9e397b2015-01-22 14:27:53 -08003386 # I can't trust assembler to use specific encoding:-(
David Benjamin4969cc92016-04-22 15:02:23 -04003387 .byte 0x4c,0x8d,0x14,0x24 #lea (%rsp),%r10
3388 .byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00 #sub $0x108,%rsp
3389 lea .Linc(%rip),%rax
3390 and \$-16,%rsp # shouldn't be formally required
3391
3392 movd $idx,%xmm5
3393 movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
3394 movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
3395 lea 128($tbl),%r11 # size optimization
3396 lea 128(%rsp),%rax # size optimization
3397
3398 pshufd \$0,%xmm5,%xmm5 # broadcast $idx
3399 movdqa %xmm1,%xmm4
3400 movdqa %xmm1,%xmm2
3401___
3402########################################################################
3403# calculate mask by comparing 0..31 to $idx and save result to stack
3404#
3405for($i=0;$i<$STRIDE/16;$i+=4) {
3406$code.=<<___;
3407 paddd %xmm0,%xmm1
3408 pcmpeqd %xmm5,%xmm0 # compare to 1,0
3409___
3410$code.=<<___ if ($i);
3411 movdqa %xmm3,`16*($i-1)-128`(%rax)
Adam Langleyd9e397b2015-01-22 14:27:53 -08003412___
3413$code.=<<___;
David Benjamin4969cc92016-04-22 15:02:23 -04003414 movdqa %xmm4,%xmm3
Adam Langleyd9e397b2015-01-22 14:27:53 -08003415
David Benjamin4969cc92016-04-22 15:02:23 -04003416 paddd %xmm1,%xmm2
3417 pcmpeqd %xmm5,%xmm1 # compare to 3,2
3418 movdqa %xmm0,`16*($i+0)-128`(%rax)
3419 movdqa %xmm4,%xmm0
3420
3421 paddd %xmm2,%xmm3
3422 pcmpeqd %xmm5,%xmm2 # compare to 5,4
3423 movdqa %xmm1,`16*($i+1)-128`(%rax)
3424 movdqa %xmm4,%xmm1
3425
3426 paddd %xmm3,%xmm0
3427 pcmpeqd %xmm5,%xmm3 # compare to 7,6
3428 movdqa %xmm2,`16*($i+2)-128`(%rax)
3429 movdqa %xmm4,%xmm2
3430___
3431}
3432$code.=<<___;
3433 movdqa %xmm3,`16*($i-1)-128`(%rax)
3434 jmp .Lgather
3435
3436.align 32
3437.Lgather:
3438 pxor %xmm4,%xmm4
3439 pxor %xmm5,%xmm5
3440___
3441for($i=0;$i<$STRIDE/16;$i+=4) {
3442$code.=<<___;
3443 movdqa `16*($i+0)-128`(%r11),%xmm0
3444 movdqa `16*($i+1)-128`(%r11),%xmm1
3445 movdqa `16*($i+2)-128`(%r11),%xmm2
3446 pand `16*($i+0)-128`(%rax),%xmm0
3447 movdqa `16*($i+3)-128`(%r11),%xmm3
3448 pand `16*($i+1)-128`(%rax),%xmm1
3449 por %xmm0,%xmm4
3450 pand `16*($i+2)-128`(%rax),%xmm2
3451 por %xmm1,%xmm5
3452 pand `16*($i+3)-128`(%rax),%xmm3
3453 por %xmm2,%xmm4
3454 por %xmm3,%xmm5
3455___
3456}
3457$code.=<<___;
3458 por %xmm5,%xmm4
3459 lea $STRIDE(%r11),%r11
3460 pshufd \$0x4e,%xmm4,%xmm0
3461 por %xmm4,%xmm0
Adam Langleyd9e397b2015-01-22 14:27:53 -08003462 movq %xmm0,($out) # m0=bp[0]
3463 lea 8($out),$out
3464 sub \$1,$num
3465 jnz .Lgather
David Benjamin4969cc92016-04-22 15:02:23 -04003466
3467 lea (%r10),%rsp
Adam Langleyd9e397b2015-01-22 14:27:53 -08003468 ret
3469.LSEH_end_bn_gather5:
3470.size bn_gather5,.-bn_gather5
3471___
3472}
3473$code.=<<___;
3474.align 64
David Benjamin4969cc92016-04-22 15:02:23 -04003475.Linc:
3476 .long 0,0, 1,1
3477 .long 2,2, 2,2
Adam Langleyd9e397b2015-01-22 14:27:53 -08003478.asciz "Montgomery Multiplication with scatter/gather for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
3479___
3480
3481# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
3482# CONTEXT *context,DISPATCHER_CONTEXT *disp)
3483if ($win64) {
3484$rec="%rcx";
3485$frame="%rdx";
3486$context="%r8";
3487$disp="%r9";
3488
3489$code.=<<___;
3490.extern __imp_RtlVirtualUnwind
3491.type mul_handler,\@abi-omnipotent
3492.align 16
3493mul_handler:
3494 push %rsi
3495 push %rdi
3496 push %rbx
3497 push %rbp
3498 push %r12
3499 push %r13
3500 push %r14
3501 push %r15
3502 pushfq
3503 sub \$64,%rsp
3504
3505 mov 120($context),%rax # pull context->Rax
3506 mov 248($context),%rbx # pull context->Rip
3507
3508 mov 8($disp),%rsi # disp->ImageBase
3509 mov 56($disp),%r11 # disp->HandlerData
3510
3511 mov 0(%r11),%r10d # HandlerData[0]
3512 lea (%rsi,%r10),%r10 # end of prologue label
3513 cmp %r10,%rbx # context->Rip<end of prologue label
3514 jb .Lcommon_seh_tail
3515
3516 mov 152($context),%rax # pull context->Rsp
3517
3518 mov 4(%r11),%r10d # HandlerData[1]
3519 lea (%rsi,%r10),%r10 # epilogue label
3520 cmp %r10,%rbx # context->Rip>=epilogue label
3521 jae .Lcommon_seh_tail
3522
3523 lea .Lmul_epilogue(%rip),%r10
3524 cmp %r10,%rbx
David Benjamin4969cc92016-04-22 15:02:23 -04003525 ja .Lbody_40
Adam Langleyd9e397b2015-01-22 14:27:53 -08003526
3527 mov 192($context),%r10 # pull $num
3528 mov 8(%rax,%r10,8),%rax # pull saved stack pointer
David Benjamin4969cc92016-04-22 15:02:23 -04003529
Adam Langleyd9e397b2015-01-22 14:27:53 -08003530 jmp .Lbody_proceed
3531
3532.Lbody_40:
3533 mov 40(%rax),%rax # pull saved stack pointer
3534.Lbody_proceed:
Adam Langleyd9e397b2015-01-22 14:27:53 -08003535 mov -8(%rax),%rbx
3536 mov -16(%rax),%rbp
3537 mov -24(%rax),%r12
3538 mov -32(%rax),%r13
3539 mov -40(%rax),%r14
3540 mov -48(%rax),%r15
3541 mov %rbx,144($context) # restore context->Rbx
3542 mov %rbp,160($context) # restore context->Rbp
3543 mov %r12,216($context) # restore context->R12
3544 mov %r13,224($context) # restore context->R13
3545 mov %r14,232($context) # restore context->R14
3546 mov %r15,240($context) # restore context->R15
Adam Langleyd9e397b2015-01-22 14:27:53 -08003547
3548.Lcommon_seh_tail:
3549 mov 8(%rax),%rdi
3550 mov 16(%rax),%rsi
3551 mov %rax,152($context) # restore context->Rsp
3552 mov %rsi,168($context) # restore context->Rsi
3553 mov %rdi,176($context) # restore context->Rdi
3554
3555 mov 40($disp),%rdi # disp->ContextRecord
3556 mov $context,%rsi # context
3557 mov \$154,%ecx # sizeof(CONTEXT)
3558 .long 0xa548f3fc # cld; rep movsq
3559
3560 mov $disp,%rsi
3561 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
3562 mov 8(%rsi),%rdx # arg2, disp->ImageBase
3563 mov 0(%rsi),%r8 # arg3, disp->ControlPc
3564 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
3565 mov 40(%rsi),%r10 # disp->ContextRecord
3566 lea 56(%rsi),%r11 # &disp->HandlerData
3567 lea 24(%rsi),%r12 # &disp->EstablisherFrame
3568 mov %r10,32(%rsp) # arg5
3569 mov %r11,40(%rsp) # arg6
3570 mov %r12,48(%rsp) # arg7
3571 mov %rcx,56(%rsp) # arg8, (NULL)
3572 call *__imp_RtlVirtualUnwind(%rip)
3573
3574 mov \$1,%eax # ExceptionContinueSearch
3575 add \$64,%rsp
3576 popfq
3577 pop %r15
3578 pop %r14
3579 pop %r13
3580 pop %r12
3581 pop %rbp
3582 pop %rbx
3583 pop %rdi
3584 pop %rsi
3585 ret
3586.size mul_handler,.-mul_handler
3587
3588.section .pdata
3589.align 4
3590 .rva .LSEH_begin_bn_mul_mont_gather5
3591 .rva .LSEH_end_bn_mul_mont_gather5
3592 .rva .LSEH_info_bn_mul_mont_gather5
3593
3594 .rva .LSEH_begin_bn_mul4x_mont_gather5
3595 .rva .LSEH_end_bn_mul4x_mont_gather5
3596 .rva .LSEH_info_bn_mul4x_mont_gather5
3597
3598 .rva .LSEH_begin_bn_power5
3599 .rva .LSEH_end_bn_power5
3600 .rva .LSEH_info_bn_power5
3601
3602 .rva .LSEH_begin_bn_from_mont8x
3603 .rva .LSEH_end_bn_from_mont8x
3604 .rva .LSEH_info_bn_from_mont8x
3605___
3606$code.=<<___ if ($addx);
3607 .rva .LSEH_begin_bn_mulx4x_mont_gather5
3608 .rva .LSEH_end_bn_mulx4x_mont_gather5
3609 .rva .LSEH_info_bn_mulx4x_mont_gather5
3610
3611 .rva .LSEH_begin_bn_powerx5
3612 .rva .LSEH_end_bn_powerx5
3613 .rva .LSEH_info_bn_powerx5
3614___
3615$code.=<<___;
3616 .rva .LSEH_begin_bn_gather5
3617 .rva .LSEH_end_bn_gather5
3618 .rva .LSEH_info_bn_gather5
3619
3620.section .xdata
3621.align 8
3622.LSEH_info_bn_mul_mont_gather5:
3623 .byte 9,0,0,0
3624 .rva mul_handler
3625 .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
3626.align 8
3627.LSEH_info_bn_mul4x_mont_gather5:
3628 .byte 9,0,0,0
3629 .rva mul_handler
3630 .rva .Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
3631.align 8
3632.LSEH_info_bn_power5:
3633 .byte 9,0,0,0
3634 .rva mul_handler
3635 .rva .Lpower5_body,.Lpower5_epilogue # HandlerData[]
3636.align 8
3637.LSEH_info_bn_from_mont8x:
3638 .byte 9,0,0,0
3639 .rva mul_handler
3640 .rva .Lfrom_body,.Lfrom_epilogue # HandlerData[]
3641___
3642$code.=<<___ if ($addx);
3643.align 8
3644.LSEH_info_bn_mulx4x_mont_gather5:
3645 .byte 9,0,0,0
3646 .rva mul_handler
3647 .rva .Lmulx4x_body,.Lmulx4x_epilogue # HandlerData[]
3648.align 8
3649.LSEH_info_bn_powerx5:
3650 .byte 9,0,0,0
3651 .rva mul_handler
3652 .rva .Lpowerx5_body,.Lpowerx5_epilogue # HandlerData[]
3653___
3654$code.=<<___;
3655.align 8
3656.LSEH_info_bn_gather5:
David Benjamin4969cc92016-04-22 15:02:23 -04003657 .byte 0x01,0x0b,0x03,0x0a
3658 .byte 0x0b,0x01,0x21,0x00 # sub rsp,0x108
3659 .byte 0x04,0xa3,0x00,0x00 # lea r10,(rsp)
Adam Langleyd9e397b2015-01-22 14:27:53 -08003660.align 8
3661___
3662}
3663
3664$code =~ s/\`([^\`]*)\`/eval($1)/gem;
3665
3666print $code;
3667close STDOUT;