blob: ced3acba51ce49fd4d444e8c22585e4c406d5482 [file] [log] [blame]
Adam Langleyd9e397b2015-01-22 14:27:53 -08001#!/usr/bin/env perl
2
3# ====================================================================
4# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9
10# August 2011.
11#
12# Companion to x86_64-mont.pl that optimizes cache-timing attack
13# countermeasures. The subroutines are produced by replacing bp[i]
14# references in their x86_64-mont.pl counterparts with cache-neutral
15# references to powers table computed in BN_mod_exp_mont_consttime.
16# In addition subroutine that scatters elements of the powers table
17# is implemented, so that scatter-/gathering can be tuned without
18# bn_exp.c modifications.
19
20# August 2013.
21#
22# Add MULX/AD*X code paths and additional interfaces to optimize for
23# branch prediction unit. For input lengths that are multiples of 8
24# the np argument is not just modulus value, but one interleaved
25# with 0. This is to optimize post-condition...
26
27$flavour = shift;
28$output = shift;
29if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
30
31$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
32
33$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
34( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
35( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
36die "can't locate x86_64-xlate.pl";
37
38open OUT,"| \"$^X\" $xlate $flavour $output";
39*STDOUT=*OUT;
40
Kenny Roote99801b2015-11-06 15:31:15 -080041# In upstream, this is controlled by shelling out to the compiler to check
42# versions, but BoringSSL is intended to be used with pre-generated perlasm
43# output, so this isn't useful anyway.
44#
45# TODO(davidben): Enable this after testing. $addx goes up to 1.
46$addx = 0;
Adam Langleyd9e397b2015-01-22 14:27:53 -080047
48# int bn_mul_mont_gather5(
49$rp="%rdi"; # BN_ULONG *rp,
50$ap="%rsi"; # const BN_ULONG *ap,
51$bp="%rdx"; # const BN_ULONG *bp,
52$np="%rcx"; # const BN_ULONG *np,
53$n0="%r8"; # const BN_ULONG *n0,
54$num="%r9"; # int num,
55 # int idx); # 0 to 2^5-1, "index" in $bp holding
56 # pre-computed powers of a', interlaced
57 # in such manner that b[0] is $bp[idx],
58 # b[1] is [2^5+idx], etc.
59$lo0="%r10";
60$hi0="%r11";
61$hi1="%r13";
62$i="%r14";
63$j="%r15";
64$m0="%rbx";
65$m1="%rbp";
66
67$code=<<___;
68.text
69
70.extern OPENSSL_ia32cap_P
71
72.globl bn_mul_mont_gather5
73.type bn_mul_mont_gather5,\@function,6
74.align 64
75bn_mul_mont_gather5:
76 test \$7,${num}d
77 jnz .Lmul_enter
78___
79$code.=<<___ if ($addx);
80 mov OPENSSL_ia32cap_P+8(%rip),%r11d
81___
82$code.=<<___;
83 jmp .Lmul4x_enter
84
85.align 16
86.Lmul_enter:
87 mov ${num}d,${num}d
88 mov %rsp,%rax
David Benjamin4969cc92016-04-22 15:02:23 -040089 movd `($win64?56:8)`(%rsp),%xmm5 # load 7th argument
90 lea .Linc(%rip),%r10
Adam Langleyd9e397b2015-01-22 14:27:53 -080091 push %rbx
92 push %rbp
93 push %r12
94 push %r13
95 push %r14
96 push %r15
David Benjamin4969cc92016-04-22 15:02:23 -040097
Adam Langleyd9e397b2015-01-22 14:27:53 -080098 lea 2($num),%r11
99 neg %r11
David Benjamin4969cc92016-04-22 15:02:23 -0400100 lea -264(%rsp,%r11,8),%rsp # tp=alloca(8*(num+2)+256+8)
Adam Langleyd9e397b2015-01-22 14:27:53 -0800101 and \$-1024,%rsp # minimize TLB usage
102
103 mov %rax,8(%rsp,$num,8) # tp[num+1]=%rsp
104.Lmul_body:
David Benjamin4969cc92016-04-22 15:02:23 -0400105 lea 128($bp),%r12 # reassign $bp (+size optimization)
Adam Langleyd9e397b2015-01-22 14:27:53 -0800106___
107 $bp="%r12";
108 $STRIDE=2**5*8; # 5 is "window size"
109 $N=$STRIDE/4; # should match cache line size
110$code.=<<___;
David Benjamin4969cc92016-04-22 15:02:23 -0400111 movdqa 0(%r10),%xmm0 # 00000001000000010000000000000000
112 movdqa 16(%r10),%xmm1 # 00000002000000020000000200000002
113 lea 24-112(%rsp,$num,8),%r10# place the mask after tp[num+3] (+ICache optimization)
114 and \$-16,%r10
Adam Langleyd9e397b2015-01-22 14:27:53 -0800115
David Benjamin4969cc92016-04-22 15:02:23 -0400116 pshufd \$0,%xmm5,%xmm5 # broadcast index
117 movdqa %xmm1,%xmm4
118 movdqa %xmm1,%xmm2
119___
120########################################################################
121# calculate mask by comparing 0..31 to index and save result to stack
122#
123$code.=<<___;
124 paddd %xmm0,%xmm1
125 pcmpeqd %xmm5,%xmm0 # compare to 1,0
126 .byte 0x67
127 movdqa %xmm4,%xmm3
128___
129for($k=0;$k<$STRIDE/16-4;$k+=4) {
130$code.=<<___;
131 paddd %xmm1,%xmm2
132 pcmpeqd %xmm5,%xmm1 # compare to 3,2
133 movdqa %xmm0,`16*($k+0)+112`(%r10)
134 movdqa %xmm4,%xmm0
135
136 paddd %xmm2,%xmm3
137 pcmpeqd %xmm5,%xmm2 # compare to 5,4
138 movdqa %xmm1,`16*($k+1)+112`(%r10)
139 movdqa %xmm4,%xmm1
140
141 paddd %xmm3,%xmm0
142 pcmpeqd %xmm5,%xmm3 # compare to 7,6
143 movdqa %xmm2,`16*($k+2)+112`(%r10)
144 movdqa %xmm4,%xmm2
145
146 paddd %xmm0,%xmm1
147 pcmpeqd %xmm5,%xmm0
148 movdqa %xmm3,`16*($k+3)+112`(%r10)
149 movdqa %xmm4,%xmm3
150___
151}
152$code.=<<___; # last iteration can be optimized
153 paddd %xmm1,%xmm2
154 pcmpeqd %xmm5,%xmm1
155 movdqa %xmm0,`16*($k+0)+112`(%r10)
156
157 paddd %xmm2,%xmm3
158 .byte 0x67
159 pcmpeqd %xmm5,%xmm2
160 movdqa %xmm1,`16*($k+1)+112`(%r10)
161
162 pcmpeqd %xmm5,%xmm3
163 movdqa %xmm2,`16*($k+2)+112`(%r10)
164 pand `16*($k+0)-128`($bp),%xmm0 # while it's still in register
165
166 pand `16*($k+1)-128`($bp),%xmm1
167 pand `16*($k+2)-128`($bp),%xmm2
168 movdqa %xmm3,`16*($k+3)+112`(%r10)
169 pand `16*($k+3)-128`($bp),%xmm3
Adam Langleyd9e397b2015-01-22 14:27:53 -0800170 por %xmm2,%xmm0
David Benjamin4969cc92016-04-22 15:02:23 -0400171 por %xmm3,%xmm1
172___
173for($k=0;$k<$STRIDE/16-4;$k+=4) {
174$code.=<<___;
175 movdqa `16*($k+0)-128`($bp),%xmm4
176 movdqa `16*($k+1)-128`($bp),%xmm5
177 movdqa `16*($k+2)-128`($bp),%xmm2
178 pand `16*($k+0)+112`(%r10),%xmm4
179 movdqa `16*($k+3)-128`($bp),%xmm3
180 pand `16*($k+1)+112`(%r10),%xmm5
181 por %xmm4,%xmm0
182 pand `16*($k+2)+112`(%r10),%xmm2
183 por %xmm5,%xmm1
184 pand `16*($k+3)+112`(%r10),%xmm3
185 por %xmm2,%xmm0
186 por %xmm3,%xmm1
187___
188}
189$code.=<<___;
190 por %xmm1,%xmm0
191 pshufd \$0x4e,%xmm0,%xmm1
192 por %xmm1,%xmm0
Adam Langleyd9e397b2015-01-22 14:27:53 -0800193 lea $STRIDE($bp),$bp
Adam Langleyd9e397b2015-01-22 14:27:53 -0800194 movq %xmm0,$m0 # m0=bp[0]
195
196 mov ($n0),$n0 # pull n0[0] value
197 mov ($ap),%rax
198
199 xor $i,$i # i=0
200 xor $j,$j # j=0
201
Adam Langleyd9e397b2015-01-22 14:27:53 -0800202 mov $n0,$m1
203 mulq $m0 # ap[0]*bp[0]
204 mov %rax,$lo0
205 mov ($np),%rax
206
Adam Langleyd9e397b2015-01-22 14:27:53 -0800207 imulq $lo0,$m1 # "tp[0]"*n0
208 mov %rdx,$hi0
209
Adam Langleyd9e397b2015-01-22 14:27:53 -0800210 mulq $m1 # np[0]*m1
211 add %rax,$lo0 # discarded
212 mov 8($ap),%rax
213 adc \$0,%rdx
214 mov %rdx,$hi1
215
216 lea 1($j),$j # j++
217 jmp .L1st_enter
218
219.align 16
220.L1st:
221 add %rax,$hi1
222 mov ($ap,$j,8),%rax
223 adc \$0,%rdx
224 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
225 mov $lo0,$hi0
226 adc \$0,%rdx
227 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
228 mov %rdx,$hi1
229
230.L1st_enter:
231 mulq $m0 # ap[j]*bp[0]
232 add %rax,$hi0
233 mov ($np,$j,8),%rax
234 adc \$0,%rdx
235 lea 1($j),$j # j++
236 mov %rdx,$lo0
237
238 mulq $m1 # np[j]*m1
239 cmp $num,$j
David Benjamin4969cc92016-04-22 15:02:23 -0400240 jne .L1st # note that upon exit $j==$num, so
241 # they can be used interchangeably
Adam Langleyd9e397b2015-01-22 14:27:53 -0800242
243 add %rax,$hi1
Adam Langleyd9e397b2015-01-22 14:27:53 -0800244 adc \$0,%rdx
245 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
246 adc \$0,%rdx
David Benjamin4969cc92016-04-22 15:02:23 -0400247 mov $hi1,-16(%rsp,$num,8) # tp[num-1]
Adam Langleyd9e397b2015-01-22 14:27:53 -0800248 mov %rdx,$hi1
249 mov $lo0,$hi0
250
251 xor %rdx,%rdx
252 add $hi0,$hi1
253 adc \$0,%rdx
254 mov $hi1,-8(%rsp,$num,8)
255 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
256
257 lea 1($i),$i # i++
258 jmp .Louter
259.align 16
260.Louter:
David Benjamin4969cc92016-04-22 15:02:23 -0400261 lea 24+128(%rsp,$num,8),%rdx # where 256-byte mask is (+size optimization)
262 and \$-16,%rdx
263 pxor %xmm4,%xmm4
264 pxor %xmm5,%xmm5
265___
266for($k=0;$k<$STRIDE/16;$k+=4) {
267$code.=<<___;
268 movdqa `16*($k+0)-128`($bp),%xmm0
269 movdqa `16*($k+1)-128`($bp),%xmm1
270 movdqa `16*($k+2)-128`($bp),%xmm2
271 movdqa `16*($k+3)-128`($bp),%xmm3
272 pand `16*($k+0)-128`(%rdx),%xmm0
273 pand `16*($k+1)-128`(%rdx),%xmm1
274 por %xmm0,%xmm4
275 pand `16*($k+2)-128`(%rdx),%xmm2
276 por %xmm1,%xmm5
277 pand `16*($k+3)-128`(%rdx),%xmm3
278 por %xmm2,%xmm4
279 por %xmm3,%xmm5
280___
281}
282$code.=<<___;
283 por %xmm5,%xmm4
284 pshufd \$0x4e,%xmm4,%xmm0
285 por %xmm4,%xmm0
286 lea $STRIDE($bp),$bp
287
288 mov ($ap),%rax # ap[0]
289 movq %xmm0,$m0 # m0=bp[i]
290
Adam Langleyd9e397b2015-01-22 14:27:53 -0800291 xor $j,$j # j=0
292 mov $n0,$m1
293 mov (%rsp),$lo0
294
Adam Langleyd9e397b2015-01-22 14:27:53 -0800295 mulq $m0 # ap[0]*bp[i]
296 add %rax,$lo0 # ap[0]*bp[i]+tp[0]
297 mov ($np),%rax
298 adc \$0,%rdx
299
Adam Langleyd9e397b2015-01-22 14:27:53 -0800300 imulq $lo0,$m1 # tp[0]*n0
301 mov %rdx,$hi0
302
Adam Langleyd9e397b2015-01-22 14:27:53 -0800303 mulq $m1 # np[0]*m1
304 add %rax,$lo0 # discarded
305 mov 8($ap),%rax
306 adc \$0,%rdx
307 mov 8(%rsp),$lo0 # tp[1]
308 mov %rdx,$hi1
309
310 lea 1($j),$j # j++
311 jmp .Linner_enter
312
313.align 16
314.Linner:
315 add %rax,$hi1
316 mov ($ap,$j,8),%rax
317 adc \$0,%rdx
318 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
319 mov (%rsp,$j,8),$lo0
320 adc \$0,%rdx
321 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
322 mov %rdx,$hi1
323
324.Linner_enter:
325 mulq $m0 # ap[j]*bp[i]
326 add %rax,$hi0
327 mov ($np,$j,8),%rax
328 adc \$0,%rdx
329 add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
330 mov %rdx,$hi0
331 adc \$0,$hi0
332 lea 1($j),$j # j++
333
334 mulq $m1 # np[j]*m1
335 cmp $num,$j
David Benjamin4969cc92016-04-22 15:02:23 -0400336 jne .Linner # note that upon exit $j==$num, so
337 # they can be used interchangeably
Adam Langleyd9e397b2015-01-22 14:27:53 -0800338 add %rax,$hi1
Adam Langleyd9e397b2015-01-22 14:27:53 -0800339 adc \$0,%rdx
340 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
David Benjamin4969cc92016-04-22 15:02:23 -0400341 mov (%rsp,$num,8),$lo0
Adam Langleyd9e397b2015-01-22 14:27:53 -0800342 adc \$0,%rdx
David Benjamin4969cc92016-04-22 15:02:23 -0400343 mov $hi1,-16(%rsp,$num,8) # tp[num-1]
Adam Langleyd9e397b2015-01-22 14:27:53 -0800344 mov %rdx,$hi1
345
346 xor %rdx,%rdx
347 add $hi0,$hi1
348 adc \$0,%rdx
349 add $lo0,$hi1 # pull upmost overflow bit
350 adc \$0,%rdx
351 mov $hi1,-8(%rsp,$num,8)
352 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
353
354 lea 1($i),$i # i++
355 cmp $num,$i
356 jb .Louter
357
358 xor $i,$i # i=0 and clear CF!
359 mov (%rsp),%rax # tp[0]
360 lea (%rsp),$ap # borrow ap for tp
361 mov $num,$j # j=num
362 jmp .Lsub
363.align 16
364.Lsub: sbb ($np,$i,8),%rax
365 mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
366 mov 8($ap,$i,8),%rax # tp[i+1]
367 lea 1($i),$i # i++
368 dec $j # doesnn't affect CF!
369 jnz .Lsub
370
371 sbb \$0,%rax # handle upmost overflow bit
372 xor $i,$i
373 mov $num,$j # j=num
374.align 16
375.Lcopy: # copy or in-place refresh
376 mov (%rsp,$i,8),$ap
377 mov ($rp,$i,8),$np
378 xor $np,$ap # conditional select:
379 and %rax,$ap # ((ap ^ np) & %rax) ^ np
380 xor $np,$ap # ap = borrow?tp:rp
381 mov $i,(%rsp,$i,8) # zap temporary vector
382 mov $ap,($rp,$i,8) # rp[i]=tp[i]
383 lea 1($i),$i
384 sub \$1,$j
385 jnz .Lcopy
386
387 mov 8(%rsp,$num,8),%rsi # restore %rsp
388 mov \$1,%rax
David Benjamin4969cc92016-04-22 15:02:23 -0400389
Adam Langleyd9e397b2015-01-22 14:27:53 -0800390 mov -48(%rsi),%r15
391 mov -40(%rsi),%r14
392 mov -32(%rsi),%r13
393 mov -24(%rsi),%r12
394 mov -16(%rsi),%rbp
395 mov -8(%rsi),%rbx
396 lea (%rsi),%rsp
397.Lmul_epilogue:
398 ret
399.size bn_mul_mont_gather5,.-bn_mul_mont_gather5
400___
401{{{
402my @A=("%r10","%r11");
403my @N=("%r13","%rdi");
404$code.=<<___;
405.type bn_mul4x_mont_gather5,\@function,6
406.align 32
407bn_mul4x_mont_gather5:
408.Lmul4x_enter:
409___
410$code.=<<___ if ($addx);
David Benjamin4969cc92016-04-22 15:02:23 -0400411 and \$0x80108,%r11d
412 cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1
Adam Langleyd9e397b2015-01-22 14:27:53 -0800413 je .Lmulx4x_enter
414___
415$code.=<<___;
416 .byte 0x67
417 mov %rsp,%rax
418 push %rbx
419 push %rbp
420 push %r12
421 push %r13
422 push %r14
423 push %r15
David Benjamin4969cc92016-04-22 15:02:23 -0400424
Adam Langleyd9e397b2015-01-22 14:27:53 -0800425 .byte 0x67
David Benjamin4969cc92016-04-22 15:02:23 -0400426 shl \$3,${num}d # convert $num to bytes
427 lea ($num,$num,2),%r10 # 3*$num in bytes
Adam Langleyd9e397b2015-01-22 14:27:53 -0800428 neg $num # -$num
429
430 ##############################################################
David Benjamin4969cc92016-04-22 15:02:23 -0400431 # Ensure that stack frame doesn't alias with $rptr+3*$num
432 # modulo 4096, which covers ret[num], am[num] and n[num]
433 # (see bn_exp.c). This is done to allow memory disambiguation
434 # logic do its magic. [Extra [num] is allocated in order
435 # to align with bn_power5's frame, which is cleansed after
436 # completing exponentiation. Extra 256 bytes is for power mask
437 # calculated from 7th argument, the index.]
Adam Langleyd9e397b2015-01-22 14:27:53 -0800438 #
David Benjamin4969cc92016-04-22 15:02:23 -0400439 lea -320(%rsp,$num,2),%r11
440 sub $rp,%r11
Adam Langleyd9e397b2015-01-22 14:27:53 -0800441 and \$4095,%r11
442 cmp %r11,%r10
443 jb .Lmul4xsp_alt
David Benjamin4969cc92016-04-22 15:02:23 -0400444 sub %r11,%rsp # align with $rp
445 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -0800446 jmp .Lmul4xsp_done
447
448.align 32
449.Lmul4xsp_alt:
David Benjamin4969cc92016-04-22 15:02:23 -0400450 lea 4096-320(,$num,2),%r10
451 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -0800452 sub %r10,%r11
453 mov \$0,%r10
454 cmovc %r10,%r11
455 sub %r11,%rsp
456.Lmul4xsp_done:
457 and \$-64,%rsp
458 neg $num
459
460 mov %rax,40(%rsp)
461.Lmul4x_body:
462
463 call mul4x_internal
464
465 mov 40(%rsp),%rsi # restore %rsp
466 mov \$1,%rax
David Benjamin4969cc92016-04-22 15:02:23 -0400467
Adam Langleyd9e397b2015-01-22 14:27:53 -0800468 mov -48(%rsi),%r15
469 mov -40(%rsi),%r14
470 mov -32(%rsi),%r13
471 mov -24(%rsi),%r12
472 mov -16(%rsi),%rbp
473 mov -8(%rsi),%rbx
474 lea (%rsi),%rsp
475.Lmul4x_epilogue:
476 ret
477.size bn_mul4x_mont_gather5,.-bn_mul4x_mont_gather5
478
479.type mul4x_internal,\@abi-omnipotent
480.align 32
481mul4x_internal:
David Benjamin4969cc92016-04-22 15:02:23 -0400482 shl \$5,$num # $num was in bytes
483 movd `($win64?56:8)`(%rax),%xmm5 # load 7th argument, index
484 lea .Linc(%rip),%rax
485 lea 128(%rdx,$num),%r13 # end of powers table (+size optimization)
Adam Langleyd9e397b2015-01-22 14:27:53 -0800486 shr \$5,$num # restore $num
487___
488 $bp="%r12";
489 $STRIDE=2**5*8; # 5 is "window size"
490 $N=$STRIDE/4; # should match cache line size
491 $tp=$i;
492$code.=<<___;
David Benjamin4969cc92016-04-22 15:02:23 -0400493 movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
494 movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
495 lea 88-112(%rsp,$num),%r10 # place the mask after tp[num+1] (+ICache optimization)
496 lea 128(%rdx),$bp # size optimization
Adam Langleyd9e397b2015-01-22 14:27:53 -0800497
David Benjamin4969cc92016-04-22 15:02:23 -0400498 pshufd \$0,%xmm5,%xmm5 # broadcast index
499 movdqa %xmm1,%xmm4
500 .byte 0x67,0x67
501 movdqa %xmm1,%xmm2
502___
503########################################################################
504# calculate mask by comparing 0..31 to index and save result to stack
505#
506$code.=<<___;
507 paddd %xmm0,%xmm1
508 pcmpeqd %xmm5,%xmm0 # compare to 1,0
Adam Langleyd9e397b2015-01-22 14:27:53 -0800509 .byte 0x67
David Benjamin4969cc92016-04-22 15:02:23 -0400510 movdqa %xmm4,%xmm3
511___
512for($i=0;$i<$STRIDE/16-4;$i+=4) {
513$code.=<<___;
514 paddd %xmm1,%xmm2
515 pcmpeqd %xmm5,%xmm1 # compare to 3,2
516 movdqa %xmm0,`16*($i+0)+112`(%r10)
517 movdqa %xmm4,%xmm0
518
519 paddd %xmm2,%xmm3
520 pcmpeqd %xmm5,%xmm2 # compare to 5,4
521 movdqa %xmm1,`16*($i+1)+112`(%r10)
522 movdqa %xmm4,%xmm1
523
524 paddd %xmm3,%xmm0
525 pcmpeqd %xmm5,%xmm3 # compare to 7,6
526 movdqa %xmm2,`16*($i+2)+112`(%r10)
527 movdqa %xmm4,%xmm2
528
529 paddd %xmm0,%xmm1
530 pcmpeqd %xmm5,%xmm0
531 movdqa %xmm3,`16*($i+3)+112`(%r10)
532 movdqa %xmm4,%xmm3
533___
534}
535$code.=<<___; # last iteration can be optimized
536 paddd %xmm1,%xmm2
537 pcmpeqd %xmm5,%xmm1
538 movdqa %xmm0,`16*($i+0)+112`(%r10)
539
540 paddd %xmm2,%xmm3
Adam Langleyd9e397b2015-01-22 14:27:53 -0800541 .byte 0x67
David Benjamin4969cc92016-04-22 15:02:23 -0400542 pcmpeqd %xmm5,%xmm2
543 movdqa %xmm1,`16*($i+1)+112`(%r10)
544
545 pcmpeqd %xmm5,%xmm3
546 movdqa %xmm2,`16*($i+2)+112`(%r10)
547 pand `16*($i+0)-128`($bp),%xmm0 # while it's still in register
548
549 pand `16*($i+1)-128`($bp),%xmm1
550 pand `16*($i+2)-128`($bp),%xmm2
551 movdqa %xmm3,`16*($i+3)+112`(%r10)
552 pand `16*($i+3)-128`($bp),%xmm3
Adam Langleyd9e397b2015-01-22 14:27:53 -0800553 por %xmm2,%xmm0
David Benjamin4969cc92016-04-22 15:02:23 -0400554 por %xmm3,%xmm1
555___
556for($i=0;$i<$STRIDE/16-4;$i+=4) {
557$code.=<<___;
558 movdqa `16*($i+0)-128`($bp),%xmm4
559 movdqa `16*($i+1)-128`($bp),%xmm5
560 movdqa `16*($i+2)-128`($bp),%xmm2
561 pand `16*($i+0)+112`(%r10),%xmm4
562 movdqa `16*($i+3)-128`($bp),%xmm3
563 pand `16*($i+1)+112`(%r10),%xmm5
564 por %xmm4,%xmm0
565 pand `16*($i+2)+112`(%r10),%xmm2
566 por %xmm5,%xmm1
567 pand `16*($i+3)+112`(%r10),%xmm3
568 por %xmm2,%xmm0
569 por %xmm3,%xmm1
570___
571}
572$code.=<<___;
573 por %xmm1,%xmm0
574 pshufd \$0x4e,%xmm0,%xmm1
575 por %xmm1,%xmm0
576 lea $STRIDE($bp),$bp
Adam Langleyd9e397b2015-01-22 14:27:53 -0800577 movq %xmm0,$m0 # m0=bp[0]
David Benjamin4969cc92016-04-22 15:02:23 -0400578
Adam Langleyd9e397b2015-01-22 14:27:53 -0800579 mov %r13,16+8(%rsp) # save end of b[num]
580 mov $rp, 56+8(%rsp) # save $rp
581
582 mov ($n0),$n0 # pull n0[0] value
583 mov ($ap),%rax
584 lea ($ap,$num),$ap # end of a[num]
585 neg $num
586
587 mov $n0,$m1
588 mulq $m0 # ap[0]*bp[0]
589 mov %rax,$A[0]
590 mov ($np),%rax
591
Adam Langleyd9e397b2015-01-22 14:27:53 -0800592 imulq $A[0],$m1 # "tp[0]"*n0
David Benjamin4969cc92016-04-22 15:02:23 -0400593 lea 64+8(%rsp),$tp
Adam Langleyd9e397b2015-01-22 14:27:53 -0800594 mov %rdx,$A[1]
595
Adam Langleyd9e397b2015-01-22 14:27:53 -0800596 mulq $m1 # np[0]*m1
597 add %rax,$A[0] # discarded
598 mov 8($ap,$num),%rax
599 adc \$0,%rdx
600 mov %rdx,$N[1]
601
602 mulq $m0
603 add %rax,$A[1]
David Benjamin4969cc92016-04-22 15:02:23 -0400604 mov 8*1($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800605 adc \$0,%rdx
606 mov %rdx,$A[0]
607
608 mulq $m1
609 add %rax,$N[1]
610 mov 16($ap,$num),%rax
611 adc \$0,%rdx
612 add $A[1],$N[1]
613 lea 4*8($num),$j # j=4
David Benjamin4969cc92016-04-22 15:02:23 -0400614 lea 8*4($np),$np
Adam Langleyd9e397b2015-01-22 14:27:53 -0800615 adc \$0,%rdx
616 mov $N[1],($tp)
617 mov %rdx,$N[0]
618 jmp .L1st4x
619
620.align 32
621.L1st4x:
622 mulq $m0 # ap[j]*bp[0]
623 add %rax,$A[0]
David Benjamin4969cc92016-04-22 15:02:23 -0400624 mov -8*2($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800625 lea 32($tp),$tp
626 adc \$0,%rdx
627 mov %rdx,$A[1]
628
629 mulq $m1 # np[j]*m1
630 add %rax,$N[0]
631 mov -8($ap,$j),%rax
632 adc \$0,%rdx
633 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
634 adc \$0,%rdx
635 mov $N[0],-24($tp) # tp[j-1]
636 mov %rdx,$N[1]
637
638 mulq $m0 # ap[j]*bp[0]
639 add %rax,$A[1]
David Benjamin4969cc92016-04-22 15:02:23 -0400640 mov -8*1($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800641 adc \$0,%rdx
642 mov %rdx,$A[0]
643
644 mulq $m1 # np[j]*m1
645 add %rax,$N[1]
646 mov ($ap,$j),%rax
647 adc \$0,%rdx
648 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
649 adc \$0,%rdx
650 mov $N[1],-16($tp) # tp[j-1]
651 mov %rdx,$N[0]
652
653 mulq $m0 # ap[j]*bp[0]
654 add %rax,$A[0]
David Benjamin4969cc92016-04-22 15:02:23 -0400655 mov 8*0($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800656 adc \$0,%rdx
657 mov %rdx,$A[1]
658
659 mulq $m1 # np[j]*m1
660 add %rax,$N[0]
661 mov 8($ap,$j),%rax
662 adc \$0,%rdx
663 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
664 adc \$0,%rdx
665 mov $N[0],-8($tp) # tp[j-1]
666 mov %rdx,$N[1]
667
668 mulq $m0 # ap[j]*bp[0]
669 add %rax,$A[1]
David Benjamin4969cc92016-04-22 15:02:23 -0400670 mov 8*1($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800671 adc \$0,%rdx
672 mov %rdx,$A[0]
673
674 mulq $m1 # np[j]*m1
675 add %rax,$N[1]
676 mov 16($ap,$j),%rax
677 adc \$0,%rdx
678 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
David Benjamin4969cc92016-04-22 15:02:23 -0400679 lea 8*4($np),$np
Adam Langleyd9e397b2015-01-22 14:27:53 -0800680 adc \$0,%rdx
681 mov $N[1],($tp) # tp[j-1]
682 mov %rdx,$N[0]
683
684 add \$32,$j # j+=4
685 jnz .L1st4x
686
687 mulq $m0 # ap[j]*bp[0]
688 add %rax,$A[0]
David Benjamin4969cc92016-04-22 15:02:23 -0400689 mov -8*2($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800690 lea 32($tp),$tp
691 adc \$0,%rdx
692 mov %rdx,$A[1]
693
694 mulq $m1 # np[j]*m1
695 add %rax,$N[0]
696 mov -8($ap),%rax
697 adc \$0,%rdx
698 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
699 adc \$0,%rdx
700 mov $N[0],-24($tp) # tp[j-1]
701 mov %rdx,$N[1]
702
703 mulq $m0 # ap[j]*bp[0]
704 add %rax,$A[1]
David Benjamin4969cc92016-04-22 15:02:23 -0400705 mov -8*1($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800706 adc \$0,%rdx
707 mov %rdx,$A[0]
708
709 mulq $m1 # np[j]*m1
710 add %rax,$N[1]
711 mov ($ap,$num),%rax # ap[0]
712 adc \$0,%rdx
713 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
714 adc \$0,%rdx
715 mov $N[1],-16($tp) # tp[j-1]
716 mov %rdx,$N[0]
717
David Benjamin4969cc92016-04-22 15:02:23 -0400718 lea ($np,$num),$np # rewind $np
Adam Langleyd9e397b2015-01-22 14:27:53 -0800719
720 xor $N[1],$N[1]
721 add $A[0],$N[0]
722 adc \$0,$N[1]
723 mov $N[0],-8($tp)
724
725 jmp .Louter4x
726
727.align 32
728.Louter4x:
David Benjamin4969cc92016-04-22 15:02:23 -0400729 lea 16+128($tp),%rdx # where 256-byte mask is (+size optimization)
730 pxor %xmm4,%xmm4
731 pxor %xmm5,%xmm5
732___
733for($i=0;$i<$STRIDE/16;$i+=4) {
734$code.=<<___;
735 movdqa `16*($i+0)-128`($bp),%xmm0
736 movdqa `16*($i+1)-128`($bp),%xmm1
737 movdqa `16*($i+2)-128`($bp),%xmm2
738 movdqa `16*($i+3)-128`($bp),%xmm3
739 pand `16*($i+0)-128`(%rdx),%xmm0
740 pand `16*($i+1)-128`(%rdx),%xmm1
741 por %xmm0,%xmm4
742 pand `16*($i+2)-128`(%rdx),%xmm2
743 por %xmm1,%xmm5
744 pand `16*($i+3)-128`(%rdx),%xmm3
745 por %xmm2,%xmm4
746 por %xmm3,%xmm5
747___
748}
749$code.=<<___;
750 por %xmm5,%xmm4
751 pshufd \$0x4e,%xmm4,%xmm0
752 por %xmm4,%xmm0
753 lea $STRIDE($bp),$bp
754 movq %xmm0,$m0 # m0=bp[i]
755
Adam Langleyd9e397b2015-01-22 14:27:53 -0800756 mov ($tp,$num),$A[0]
757 mov $n0,$m1
758 mulq $m0 # ap[0]*bp[i]
759 add %rax,$A[0] # ap[0]*bp[i]+tp[0]
760 mov ($np),%rax
761 adc \$0,%rdx
762
Adam Langleyd9e397b2015-01-22 14:27:53 -0800763 imulq $A[0],$m1 # tp[0]*n0
Adam Langleyd9e397b2015-01-22 14:27:53 -0800764 mov %rdx,$A[1]
765 mov $N[1],($tp) # store upmost overflow bit
766
Adam Langleyd9e397b2015-01-22 14:27:53 -0800767 lea ($tp,$num),$tp # rewind $tp
Adam Langleyd9e397b2015-01-22 14:27:53 -0800768
769 mulq $m1 # np[0]*m1
770 add %rax,$A[0] # "$N[0]", discarded
771 mov 8($ap,$num),%rax
772 adc \$0,%rdx
773 mov %rdx,$N[1]
774
775 mulq $m0 # ap[j]*bp[i]
776 add %rax,$A[1]
David Benjamin4969cc92016-04-22 15:02:23 -0400777 mov 8*1($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800778 adc \$0,%rdx
779 add 8($tp),$A[1] # +tp[1]
780 adc \$0,%rdx
781 mov %rdx,$A[0]
782
783 mulq $m1 # np[j]*m1
784 add %rax,$N[1]
785 mov 16($ap,$num),%rax
786 adc \$0,%rdx
787 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
788 lea 4*8($num),$j # j=4
David Benjamin4969cc92016-04-22 15:02:23 -0400789 lea 8*4($np),$np
Adam Langleyd9e397b2015-01-22 14:27:53 -0800790 adc \$0,%rdx
791 mov %rdx,$N[0]
792 jmp .Linner4x
793
794.align 32
795.Linner4x:
796 mulq $m0 # ap[j]*bp[i]
797 add %rax,$A[0]
David Benjamin4969cc92016-04-22 15:02:23 -0400798 mov -8*2($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800799 adc \$0,%rdx
800 add 16($tp),$A[0] # ap[j]*bp[i]+tp[j]
801 lea 32($tp),$tp
802 adc \$0,%rdx
803 mov %rdx,$A[1]
804
805 mulq $m1 # np[j]*m1
806 add %rax,$N[0]
807 mov -8($ap,$j),%rax
808 adc \$0,%rdx
809 add $A[0],$N[0]
810 adc \$0,%rdx
811 mov $N[1],-32($tp) # tp[j-1]
812 mov %rdx,$N[1]
813
814 mulq $m0 # ap[j]*bp[i]
815 add %rax,$A[1]
David Benjamin4969cc92016-04-22 15:02:23 -0400816 mov -8*1($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800817 adc \$0,%rdx
818 add -8($tp),$A[1]
819 adc \$0,%rdx
820 mov %rdx,$A[0]
821
822 mulq $m1 # np[j]*m1
823 add %rax,$N[1]
824 mov ($ap,$j),%rax
825 adc \$0,%rdx
826 add $A[1],$N[1]
827 adc \$0,%rdx
828 mov $N[0],-24($tp) # tp[j-1]
829 mov %rdx,$N[0]
830
831 mulq $m0 # ap[j]*bp[i]
832 add %rax,$A[0]
David Benjamin4969cc92016-04-22 15:02:23 -0400833 mov 8*0($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800834 adc \$0,%rdx
835 add ($tp),$A[0] # ap[j]*bp[i]+tp[j]
836 adc \$0,%rdx
837 mov %rdx,$A[1]
838
839 mulq $m1 # np[j]*m1
840 add %rax,$N[0]
841 mov 8($ap,$j),%rax
842 adc \$0,%rdx
843 add $A[0],$N[0]
844 adc \$0,%rdx
845 mov $N[1],-16($tp) # tp[j-1]
846 mov %rdx,$N[1]
847
848 mulq $m0 # ap[j]*bp[i]
849 add %rax,$A[1]
David Benjamin4969cc92016-04-22 15:02:23 -0400850 mov 8*1($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800851 adc \$0,%rdx
852 add 8($tp),$A[1]
853 adc \$0,%rdx
854 mov %rdx,$A[0]
855
856 mulq $m1 # np[j]*m1
857 add %rax,$N[1]
858 mov 16($ap,$j),%rax
859 adc \$0,%rdx
860 add $A[1],$N[1]
David Benjamin4969cc92016-04-22 15:02:23 -0400861 lea 8*4($np),$np
Adam Langleyd9e397b2015-01-22 14:27:53 -0800862 adc \$0,%rdx
863 mov $N[0],-8($tp) # tp[j-1]
864 mov %rdx,$N[0]
865
866 add \$32,$j # j+=4
867 jnz .Linner4x
868
869 mulq $m0 # ap[j]*bp[i]
870 add %rax,$A[0]
David Benjamin4969cc92016-04-22 15:02:23 -0400871 mov -8*2($np),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800872 adc \$0,%rdx
873 add 16($tp),$A[0] # ap[j]*bp[i]+tp[j]
874 lea 32($tp),$tp
875 adc \$0,%rdx
876 mov %rdx,$A[1]
877
878 mulq $m1 # np[j]*m1
879 add %rax,$N[0]
880 mov -8($ap),%rax
881 adc \$0,%rdx
882 add $A[0],$N[0]
883 adc \$0,%rdx
884 mov $N[1],-32($tp) # tp[j-1]
885 mov %rdx,$N[1]
886
887 mulq $m0 # ap[j]*bp[i]
888 add %rax,$A[1]
889 mov $m1,%rax
David Benjamin4969cc92016-04-22 15:02:23 -0400890 mov -8*1($np),$m1
Adam Langleyd9e397b2015-01-22 14:27:53 -0800891 adc \$0,%rdx
892 add -8($tp),$A[1]
893 adc \$0,%rdx
894 mov %rdx,$A[0]
895
896 mulq $m1 # np[j]*m1
897 add %rax,$N[1]
898 mov ($ap,$num),%rax # ap[0]
899 adc \$0,%rdx
900 add $A[1],$N[1]
901 adc \$0,%rdx
902 mov $N[0],-24($tp) # tp[j-1]
903 mov %rdx,$N[0]
904
Adam Langleyd9e397b2015-01-22 14:27:53 -0800905 mov $N[1],-16($tp) # tp[j-1]
David Benjamin4969cc92016-04-22 15:02:23 -0400906 lea ($np,$num),$np # rewind $np
Adam Langleyd9e397b2015-01-22 14:27:53 -0800907
908 xor $N[1],$N[1]
909 add $A[0],$N[0]
910 adc \$0,$N[1]
911 add ($tp),$N[0] # pull upmost overflow bit
912 adc \$0,$N[1] # upmost overflow bit
913 mov $N[0],-8($tp)
914
915 cmp 16+8(%rsp),$bp
916 jb .Louter4x
917___
918if (1) {
919$code.=<<___;
David Benjamin4969cc92016-04-22 15:02:23 -0400920 xor %rax,%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -0800921 sub $N[0],$m1 # compare top-most words
922 adc $j,$j # $j is zero
923 or $j,$N[1]
David Benjamin4969cc92016-04-22 15:02:23 -0400924 sub $N[1],%rax # %rax=-$N[1]
Adam Langleyd9e397b2015-01-22 14:27:53 -0800925 lea ($tp,$num),%rbx # tptr in .sqr4x_sub
David Benjamin4969cc92016-04-22 15:02:23 -0400926 mov ($np),%r12
927 lea ($np),%rbp # nptr in .sqr4x_sub
Adam Langleyd9e397b2015-01-22 14:27:53 -0800928 mov %r9,%rcx
David Benjamin4969cc92016-04-22 15:02:23 -0400929 sar \$3+2,%rcx
Adam Langleyd9e397b2015-01-22 14:27:53 -0800930 mov 56+8(%rsp),%rdi # rptr in .sqr4x_sub
David Benjamin4969cc92016-04-22 15:02:23 -0400931 dec %r12 # so that after 'not' we get -n[0]
932 xor %r10,%r10
933 mov 8*1(%rbp),%r13
934 mov 8*2(%rbp),%r14
935 mov 8*3(%rbp),%r15
936 jmp .Lsqr4x_sub_entry
Adam Langleyd9e397b2015-01-22 14:27:53 -0800937___
938} else {
939my @ri=("%rax",$bp,$m0,$m1);
940my $rp="%rdx";
941$code.=<<___
942 xor \$1,$N[1]
943 lea ($tp,$num),$tp # rewind $tp
944 sar \$5,$num # cf=0
945 lea ($np,$N[1],8),$np
946 mov 56+8(%rsp),$rp # restore $rp
947 jmp .Lsub4x
948
949.align 32
950.Lsub4x:
951 .byte 0x66
952 mov 8*0($tp),@ri[0]
953 mov 8*1($tp),@ri[1]
954 .byte 0x66
955 sbb 16*0($np),@ri[0]
956 mov 8*2($tp),@ri[2]
957 sbb 16*1($np),@ri[1]
958 mov 3*8($tp),@ri[3]
959 lea 4*8($tp),$tp
960 sbb 16*2($np),@ri[2]
961 mov @ri[0],8*0($rp)
962 sbb 16*3($np),@ri[3]
963 lea 16*4($np),$np
964 mov @ri[1],8*1($rp)
965 mov @ri[2],8*2($rp)
966 mov @ri[3],8*3($rp)
967 lea 8*4($rp),$rp
968
969 inc $num
970 jnz .Lsub4x
971
972 ret
973___
974}
975$code.=<<___;
976.size mul4x_internal,.-mul4x_internal
977___
978}}}
979 {{{
980######################################################################
981# void bn_power5(
982my $rptr="%rdi"; # BN_ULONG *rptr,
983my $aptr="%rsi"; # const BN_ULONG *aptr,
984my $bptr="%rdx"; # const void *table,
985my $nptr="%rcx"; # const BN_ULONG *nptr,
986my $n0 ="%r8"; # const BN_ULONG *n0);
987my $num ="%r9"; # int num, has to be divisible by 8
988 # int pwr
989
990my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
991my @A0=("%r10","%r11");
992my @A1=("%r12","%r13");
993my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
994
995$code.=<<___;
996.globl bn_power5
997.type bn_power5,\@function,6
998.align 32
999bn_power5:
1000___
1001$code.=<<___ if ($addx);
1002 mov OPENSSL_ia32cap_P+8(%rip),%r11d
David Benjamin4969cc92016-04-22 15:02:23 -04001003 and \$0x80108,%r11d
1004 cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1
Adam Langleyd9e397b2015-01-22 14:27:53 -08001005 je .Lpowerx5_enter
1006___
1007$code.=<<___;
1008 mov %rsp,%rax
1009 push %rbx
1010 push %rbp
1011 push %r12
1012 push %r13
1013 push %r14
1014 push %r15
David Benjamin4969cc92016-04-22 15:02:23 -04001015
Adam Langleyd9e397b2015-01-22 14:27:53 -08001016 shl \$3,${num}d # convert $num to bytes
David Benjamin4969cc92016-04-22 15:02:23 -04001017 lea ($num,$num,2),%r10d # 3*$num
Adam Langleyd9e397b2015-01-22 14:27:53 -08001018 neg $num
1019 mov ($n0),$n0 # *n0
1020
1021 ##############################################################
David Benjamin4969cc92016-04-22 15:02:23 -04001022 # Ensure that stack frame doesn't alias with $rptr+3*$num
1023 # modulo 4096, which covers ret[num], am[num] and n[num]
1024 # (see bn_exp.c). This is done to allow memory disambiguation
1025 # logic do its magic. [Extra 256 bytes is for power mask
1026 # calculated from 7th argument, the index.]
Adam Langleyd9e397b2015-01-22 14:27:53 -08001027 #
David Benjamin4969cc92016-04-22 15:02:23 -04001028 lea -320(%rsp,$num,2),%r11
1029 sub $rptr,%r11
Adam Langleyd9e397b2015-01-22 14:27:53 -08001030 and \$4095,%r11
1031 cmp %r11,%r10
1032 jb .Lpwr_sp_alt
1033 sub %r11,%rsp # align with $aptr
David Benjamin4969cc92016-04-22 15:02:23 -04001034 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -08001035 jmp .Lpwr_sp_done
1036
1037.align 32
1038.Lpwr_sp_alt:
David Benjamin4969cc92016-04-22 15:02:23 -04001039 lea 4096-320(,$num,2),%r10
1040 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -08001041 sub %r10,%r11
1042 mov \$0,%r10
1043 cmovc %r10,%r11
1044 sub %r11,%rsp
1045.Lpwr_sp_done:
1046 and \$-64,%rsp
1047 mov $num,%r10
1048 neg $num
1049
1050 ##############################################################
1051 # Stack layout
1052 #
1053 # +0 saved $num, used in reduction section
1054 # +8 &t[2*$num], used in reduction section
1055 # +32 saved *n0
1056 # +40 saved %rsp
1057 # +48 t[2*$num]
1058 #
1059 mov $n0, 32(%rsp)
1060 mov %rax, 40(%rsp) # save original %rsp
1061.Lpower5_body:
David Benjamin4969cc92016-04-22 15:02:23 -04001062 movq $rptr,%xmm1 # save $rptr, used in sqr8x
Adam Langleyd9e397b2015-01-22 14:27:53 -08001063 movq $nptr,%xmm2 # save $nptr
David Benjamin4969cc92016-04-22 15:02:23 -04001064 movq %r10, %xmm3 # -$num, used in sqr8x
Adam Langleyd9e397b2015-01-22 14:27:53 -08001065 movq $bptr,%xmm4
1066
1067 call __bn_sqr8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04001068 call __bn_post4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08001069 call __bn_sqr8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04001070 call __bn_post4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08001071 call __bn_sqr8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04001072 call __bn_post4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08001073 call __bn_sqr8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04001074 call __bn_post4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08001075 call __bn_sqr8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04001076 call __bn_post4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08001077
1078 movq %xmm2,$nptr
1079 movq %xmm4,$bptr
1080 mov $aptr,$rptr
1081 mov 40(%rsp),%rax
1082 lea 32(%rsp),$n0
1083
1084 call mul4x_internal
1085
1086 mov 40(%rsp),%rsi # restore %rsp
1087 mov \$1,%rax
1088 mov -48(%rsi),%r15
1089 mov -40(%rsi),%r14
1090 mov -32(%rsi),%r13
1091 mov -24(%rsi),%r12
1092 mov -16(%rsi),%rbp
1093 mov -8(%rsi),%rbx
1094 lea (%rsi),%rsp
1095.Lpower5_epilogue:
1096 ret
1097.size bn_power5,.-bn_power5
1098
1099.globl bn_sqr8x_internal
1100.hidden bn_sqr8x_internal
1101.type bn_sqr8x_internal,\@abi-omnipotent
1102.align 32
1103bn_sqr8x_internal:
1104__bn_sqr8x_internal:
1105 ##############################################################
1106 # Squaring part:
1107 #
1108 # a) multiply-n-add everything but a[i]*a[i];
1109 # b) shift result of a) by 1 to the left and accumulate
1110 # a[i]*a[i] products;
1111 #
1112 ##############################################################
1113 # a[1]a[0]
1114 # a[2]a[0]
1115 # a[3]a[0]
1116 # a[2]a[1]
1117 # a[4]a[0]
1118 # a[3]a[1]
1119 # a[5]a[0]
1120 # a[4]a[1]
1121 # a[3]a[2]
1122 # a[6]a[0]
1123 # a[5]a[1]
1124 # a[4]a[2]
1125 # a[7]a[0]
1126 # a[6]a[1]
1127 # a[5]a[2]
1128 # a[4]a[3]
1129 # a[7]a[1]
1130 # a[6]a[2]
1131 # a[5]a[3]
1132 # a[7]a[2]
1133 # a[6]a[3]
1134 # a[5]a[4]
1135 # a[7]a[3]
1136 # a[6]a[4]
1137 # a[7]a[4]
1138 # a[6]a[5]
1139 # a[7]a[5]
1140 # a[7]a[6]
1141 # a[1]a[0]
1142 # a[2]a[0]
1143 # a[3]a[0]
1144 # a[4]a[0]
1145 # a[5]a[0]
1146 # a[6]a[0]
1147 # a[7]a[0]
1148 # a[2]a[1]
1149 # a[3]a[1]
1150 # a[4]a[1]
1151 # a[5]a[1]
1152 # a[6]a[1]
1153 # a[7]a[1]
1154 # a[3]a[2]
1155 # a[4]a[2]
1156 # a[5]a[2]
1157 # a[6]a[2]
1158 # a[7]a[2]
1159 # a[4]a[3]
1160 # a[5]a[3]
1161 # a[6]a[3]
1162 # a[7]a[3]
1163 # a[5]a[4]
1164 # a[6]a[4]
1165 # a[7]a[4]
1166 # a[6]a[5]
1167 # a[7]a[5]
1168 # a[7]a[6]
1169 # a[0]a[0]
1170 # a[1]a[1]
1171 # a[2]a[2]
1172 # a[3]a[3]
1173 # a[4]a[4]
1174 # a[5]a[5]
1175 # a[6]a[6]
1176 # a[7]a[7]
1177
1178 lea 32(%r10),$i # $i=-($num-32)
1179 lea ($aptr,$num),$aptr # end of a[] buffer, ($aptr,$i)=&ap[2]
1180
1181 mov $num,$j # $j=$num
1182
1183 # comments apply to $num==8 case
1184 mov -32($aptr,$i),$a0 # a[0]
1185 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1186 mov -24($aptr,$i),%rax # a[1]
1187 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1188 mov -16($aptr,$i),$ai # a[2]
1189 mov %rax,$a1
1190
1191 mul $a0 # a[1]*a[0]
1192 mov %rax,$A0[0] # a[1]*a[0]
1193 mov $ai,%rax # a[2]
1194 mov %rdx,$A0[1]
1195 mov $A0[0],-24($tptr,$i) # t[1]
1196
1197 mul $a0 # a[2]*a[0]
1198 add %rax,$A0[1]
1199 mov $ai,%rax
1200 adc \$0,%rdx
1201 mov $A0[1],-16($tptr,$i) # t[2]
1202 mov %rdx,$A0[0]
1203
1204
1205 mov -8($aptr,$i),$ai # a[3]
1206 mul $a1 # a[2]*a[1]
1207 mov %rax,$A1[0] # a[2]*a[1]+t[3]
1208 mov $ai,%rax
1209 mov %rdx,$A1[1]
1210
1211 lea ($i),$j
1212 mul $a0 # a[3]*a[0]
1213 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1214 mov $ai,%rax
1215 mov %rdx,$A0[1]
1216 adc \$0,$A0[1]
1217 add $A1[0],$A0[0]
1218 adc \$0,$A0[1]
1219 mov $A0[0],-8($tptr,$j) # t[3]
1220 jmp .Lsqr4x_1st
1221
1222.align 32
1223.Lsqr4x_1st:
1224 mov ($aptr,$j),$ai # a[4]
1225 mul $a1 # a[3]*a[1]
1226 add %rax,$A1[1] # a[3]*a[1]+t[4]
1227 mov $ai,%rax
1228 mov %rdx,$A1[0]
1229 adc \$0,$A1[0]
1230
1231 mul $a0 # a[4]*a[0]
1232 add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
1233 mov $ai,%rax # a[3]
1234 mov 8($aptr,$j),$ai # a[5]
1235 mov %rdx,$A0[0]
1236 adc \$0,$A0[0]
1237 add $A1[1],$A0[1]
1238 adc \$0,$A0[0]
1239
1240
1241 mul $a1 # a[4]*a[3]
1242 add %rax,$A1[0] # a[4]*a[3]+t[5]
1243 mov $ai,%rax
1244 mov $A0[1],($tptr,$j) # t[4]
1245 mov %rdx,$A1[1]
1246 adc \$0,$A1[1]
1247
1248 mul $a0 # a[5]*a[2]
1249 add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
1250 mov $ai,%rax
1251 mov 16($aptr,$j),$ai # a[6]
1252 mov %rdx,$A0[1]
1253 adc \$0,$A0[1]
1254 add $A1[0],$A0[0]
1255 adc \$0,$A0[1]
1256
1257 mul $a1 # a[5]*a[3]
1258 add %rax,$A1[1] # a[5]*a[3]+t[6]
1259 mov $ai,%rax
1260 mov $A0[0],8($tptr,$j) # t[5]
1261 mov %rdx,$A1[0]
1262 adc \$0,$A1[0]
1263
1264 mul $a0 # a[6]*a[2]
1265 add %rax,$A0[1] # a[6]*a[2]+a[5]*a[3]+t[6]
1266 mov $ai,%rax # a[3]
1267 mov 24($aptr,$j),$ai # a[7]
1268 mov %rdx,$A0[0]
1269 adc \$0,$A0[0]
1270 add $A1[1],$A0[1]
1271 adc \$0,$A0[0]
1272
1273
1274 mul $a1 # a[6]*a[5]
1275 add %rax,$A1[0] # a[6]*a[5]+t[7]
1276 mov $ai,%rax
1277 mov $A0[1],16($tptr,$j) # t[6]
1278 mov %rdx,$A1[1]
1279 adc \$0,$A1[1]
1280 lea 32($j),$j
1281
1282 mul $a0 # a[7]*a[4]
1283 add %rax,$A0[0] # a[7]*a[4]+a[6]*a[5]+t[6]
1284 mov $ai,%rax
1285 mov %rdx,$A0[1]
1286 adc \$0,$A0[1]
1287 add $A1[0],$A0[0]
1288 adc \$0,$A0[1]
1289 mov $A0[0],-8($tptr,$j) # t[7]
1290
1291 cmp \$0,$j
1292 jne .Lsqr4x_1st
1293
1294 mul $a1 # a[7]*a[5]
1295 add %rax,$A1[1]
1296 lea 16($i),$i
1297 adc \$0,%rdx
1298 add $A0[1],$A1[1]
1299 adc \$0,%rdx
1300
1301 mov $A1[1],($tptr) # t[8]
1302 mov %rdx,$A1[0]
1303 mov %rdx,8($tptr) # t[9]
1304 jmp .Lsqr4x_outer
1305
1306.align 32
1307.Lsqr4x_outer: # comments apply to $num==6 case
1308 mov -32($aptr,$i),$a0 # a[0]
1309 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1310 mov -24($aptr,$i),%rax # a[1]
1311 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1312 mov -16($aptr,$i),$ai # a[2]
1313 mov %rax,$a1
1314
1315 mul $a0 # a[1]*a[0]
1316 mov -24($tptr,$i),$A0[0] # t[1]
1317 add %rax,$A0[0] # a[1]*a[0]+t[1]
1318 mov $ai,%rax # a[2]
1319 adc \$0,%rdx
1320 mov $A0[0],-24($tptr,$i) # t[1]
1321 mov %rdx,$A0[1]
1322
1323 mul $a0 # a[2]*a[0]
1324 add %rax,$A0[1]
1325 mov $ai,%rax
1326 adc \$0,%rdx
1327 add -16($tptr,$i),$A0[1] # a[2]*a[0]+t[2]
1328 mov %rdx,$A0[0]
1329 adc \$0,$A0[0]
1330 mov $A0[1],-16($tptr,$i) # t[2]
1331
1332 xor $A1[0],$A1[0]
1333
1334 mov -8($aptr,$i),$ai # a[3]
1335 mul $a1 # a[2]*a[1]
1336 add %rax,$A1[0] # a[2]*a[1]+t[3]
1337 mov $ai,%rax
1338 adc \$0,%rdx
1339 add -8($tptr,$i),$A1[0]
1340 mov %rdx,$A1[1]
1341 adc \$0,$A1[1]
1342
1343 mul $a0 # a[3]*a[0]
1344 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1345 mov $ai,%rax
1346 adc \$0,%rdx
1347 add $A1[0],$A0[0]
1348 mov %rdx,$A0[1]
1349 adc \$0,$A0[1]
1350 mov $A0[0],-8($tptr,$i) # t[3]
1351
1352 lea ($i),$j
1353 jmp .Lsqr4x_inner
1354
1355.align 32
1356.Lsqr4x_inner:
1357 mov ($aptr,$j),$ai # a[4]
1358 mul $a1 # a[3]*a[1]
1359 add %rax,$A1[1] # a[3]*a[1]+t[4]
1360 mov $ai,%rax
1361 mov %rdx,$A1[0]
1362 adc \$0,$A1[0]
1363 add ($tptr,$j),$A1[1]
1364 adc \$0,$A1[0]
1365
1366 .byte 0x67
1367 mul $a0 # a[4]*a[0]
1368 add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
1369 mov $ai,%rax # a[3]
1370 mov 8($aptr,$j),$ai # a[5]
1371 mov %rdx,$A0[0]
1372 adc \$0,$A0[0]
1373 add $A1[1],$A0[1]
1374 adc \$0,$A0[0]
1375
1376 mul $a1 # a[4]*a[3]
1377 add %rax,$A1[0] # a[4]*a[3]+t[5]
1378 mov $A0[1],($tptr,$j) # t[4]
1379 mov $ai,%rax
1380 mov %rdx,$A1[1]
1381 adc \$0,$A1[1]
1382 add 8($tptr,$j),$A1[0]
1383 lea 16($j),$j # j++
1384 adc \$0,$A1[1]
1385
1386 mul $a0 # a[5]*a[2]
1387 add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
1388 mov $ai,%rax
1389 adc \$0,%rdx
1390 add $A1[0],$A0[0]
1391 mov %rdx,$A0[1]
1392 adc \$0,$A0[1]
1393 mov $A0[0],-8($tptr,$j) # t[5], "preloaded t[1]" below
1394
1395 cmp \$0,$j
1396 jne .Lsqr4x_inner
1397
1398 .byte 0x67
1399 mul $a1 # a[5]*a[3]
1400 add %rax,$A1[1]
1401 adc \$0,%rdx
1402 add $A0[1],$A1[1]
1403 adc \$0,%rdx
1404
1405 mov $A1[1],($tptr) # t[6], "preloaded t[2]" below
1406 mov %rdx,$A1[0]
1407 mov %rdx,8($tptr) # t[7], "preloaded t[3]" below
1408
1409 add \$16,$i
1410 jnz .Lsqr4x_outer
1411
1412 # comments apply to $num==4 case
1413 mov -32($aptr),$a0 # a[0]
1414 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1415 mov -24($aptr),%rax # a[1]
1416 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1417 mov -16($aptr),$ai # a[2]
1418 mov %rax,$a1
1419
1420 mul $a0 # a[1]*a[0]
1421 add %rax,$A0[0] # a[1]*a[0]+t[1], preloaded t[1]
1422 mov $ai,%rax # a[2]
1423 mov %rdx,$A0[1]
1424 adc \$0,$A0[1]
1425
1426 mul $a0 # a[2]*a[0]
1427 add %rax,$A0[1]
1428 mov $ai,%rax
1429 mov $A0[0],-24($tptr) # t[1]
1430 mov %rdx,$A0[0]
1431 adc \$0,$A0[0]
1432 add $A1[1],$A0[1] # a[2]*a[0]+t[2], preloaded t[2]
1433 mov -8($aptr),$ai # a[3]
1434 adc \$0,$A0[0]
1435
1436 mul $a1 # a[2]*a[1]
1437 add %rax,$A1[0] # a[2]*a[1]+t[3], preloaded t[3]
1438 mov $ai,%rax
1439 mov $A0[1],-16($tptr) # t[2]
1440 mov %rdx,$A1[1]
1441 adc \$0,$A1[1]
1442
1443 mul $a0 # a[3]*a[0]
1444 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1445 mov $ai,%rax
1446 mov %rdx,$A0[1]
1447 adc \$0,$A0[1]
1448 add $A1[0],$A0[0]
1449 adc \$0,$A0[1]
1450 mov $A0[0],-8($tptr) # t[3]
1451
1452 mul $a1 # a[3]*a[1]
1453 add %rax,$A1[1]
1454 mov -16($aptr),%rax # a[2]
1455 adc \$0,%rdx
1456 add $A0[1],$A1[1]
1457 adc \$0,%rdx
1458
1459 mov $A1[1],($tptr) # t[4]
1460 mov %rdx,$A1[0]
1461 mov %rdx,8($tptr) # t[5]
1462
1463 mul $ai # a[2]*a[3]
1464___
1465{
1466my ($shift,$carry)=($a0,$a1);
1467my @S=(@A1,$ai,$n0);
1468$code.=<<___;
1469 add \$16,$i
1470 xor $shift,$shift
1471 sub $num,$i # $i=16-$num
1472 xor $carry,$carry
1473
1474 add $A1[0],%rax # t[5]
1475 adc \$0,%rdx
1476 mov %rax,8($tptr) # t[5]
1477 mov %rdx,16($tptr) # t[6]
1478 mov $carry,24($tptr) # t[7]
1479
1480 mov -16($aptr,$i),%rax # a[0]
1481 lea 48+8(%rsp),$tptr
1482 xor $A0[0],$A0[0] # t[0]
1483 mov 8($tptr),$A0[1] # t[1]
1484
1485 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1486 shr \$63,$A0[0]
1487 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1488 shr \$63,$A0[1]
1489 or $A0[0],$S[1] # | t[2*i]>>63
1490 mov 16($tptr),$A0[0] # t[2*i+2] # prefetch
1491 mov $A0[1],$shift # shift=t[2*i+1]>>63
1492 mul %rax # a[i]*a[i]
1493 neg $carry # mov $carry,cf
1494 mov 24($tptr),$A0[1] # t[2*i+2+1] # prefetch
1495 adc %rax,$S[0]
1496 mov -8($aptr,$i),%rax # a[i+1] # prefetch
1497 mov $S[0],($tptr)
1498 adc %rdx,$S[1]
1499
1500 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1501 mov $S[1],8($tptr)
1502 sbb $carry,$carry # mov cf,$carry
1503 shr \$63,$A0[0]
1504 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1505 shr \$63,$A0[1]
1506 or $A0[0],$S[3] # | t[2*i]>>63
1507 mov 32($tptr),$A0[0] # t[2*i+2] # prefetch
1508 mov $A0[1],$shift # shift=t[2*i+1]>>63
1509 mul %rax # a[i]*a[i]
1510 neg $carry # mov $carry,cf
1511 mov 40($tptr),$A0[1] # t[2*i+2+1] # prefetch
1512 adc %rax,$S[2]
1513 mov 0($aptr,$i),%rax # a[i+1] # prefetch
1514 mov $S[2],16($tptr)
1515 adc %rdx,$S[3]
1516 lea 16($i),$i
1517 mov $S[3],24($tptr)
1518 sbb $carry,$carry # mov cf,$carry
1519 lea 64($tptr),$tptr
1520 jmp .Lsqr4x_shift_n_add
1521
1522.align 32
1523.Lsqr4x_shift_n_add:
1524 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1525 shr \$63,$A0[0]
1526 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1527 shr \$63,$A0[1]
1528 or $A0[0],$S[1] # | t[2*i]>>63
1529 mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
1530 mov $A0[1],$shift # shift=t[2*i+1]>>63
1531 mul %rax # a[i]*a[i]
1532 neg $carry # mov $carry,cf
1533 mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1534 adc %rax,$S[0]
1535 mov -8($aptr,$i),%rax # a[i+1] # prefetch
1536 mov $S[0],-32($tptr)
1537 adc %rdx,$S[1]
1538
1539 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1540 mov $S[1],-24($tptr)
1541 sbb $carry,$carry # mov cf,$carry
1542 shr \$63,$A0[0]
1543 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1544 shr \$63,$A0[1]
1545 or $A0[0],$S[3] # | t[2*i]>>63
1546 mov 0($tptr),$A0[0] # t[2*i+2] # prefetch
1547 mov $A0[1],$shift # shift=t[2*i+1]>>63
1548 mul %rax # a[i]*a[i]
1549 neg $carry # mov $carry,cf
1550 mov 8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1551 adc %rax,$S[2]
1552 mov 0($aptr,$i),%rax # a[i+1] # prefetch
1553 mov $S[2],-16($tptr)
1554 adc %rdx,$S[3]
1555
1556 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1557 mov $S[3],-8($tptr)
1558 sbb $carry,$carry # mov cf,$carry
1559 shr \$63,$A0[0]
1560 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1561 shr \$63,$A0[1]
1562 or $A0[0],$S[1] # | t[2*i]>>63
1563 mov 16($tptr),$A0[0] # t[2*i+2] # prefetch
1564 mov $A0[1],$shift # shift=t[2*i+1]>>63
1565 mul %rax # a[i]*a[i]
1566 neg $carry # mov $carry,cf
1567 mov 24($tptr),$A0[1] # t[2*i+2+1] # prefetch
1568 adc %rax,$S[0]
1569 mov 8($aptr,$i),%rax # a[i+1] # prefetch
1570 mov $S[0],0($tptr)
1571 adc %rdx,$S[1]
1572
1573 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1574 mov $S[1],8($tptr)
1575 sbb $carry,$carry # mov cf,$carry
1576 shr \$63,$A0[0]
1577 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1578 shr \$63,$A0[1]
1579 or $A0[0],$S[3] # | t[2*i]>>63
1580 mov 32($tptr),$A0[0] # t[2*i+2] # prefetch
1581 mov $A0[1],$shift # shift=t[2*i+1]>>63
1582 mul %rax # a[i]*a[i]
1583 neg $carry # mov $carry,cf
1584 mov 40($tptr),$A0[1] # t[2*i+2+1] # prefetch
1585 adc %rax,$S[2]
1586 mov 16($aptr,$i),%rax # a[i+1] # prefetch
1587 mov $S[2],16($tptr)
1588 adc %rdx,$S[3]
1589 mov $S[3],24($tptr)
1590 sbb $carry,$carry # mov cf,$carry
1591 lea 64($tptr),$tptr
1592 add \$32,$i
1593 jnz .Lsqr4x_shift_n_add
1594
1595 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1596 .byte 0x67
1597 shr \$63,$A0[0]
1598 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1599 shr \$63,$A0[1]
1600 or $A0[0],$S[1] # | t[2*i]>>63
1601 mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
1602 mov $A0[1],$shift # shift=t[2*i+1]>>63
1603 mul %rax # a[i]*a[i]
1604 neg $carry # mov $carry,cf
1605 mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1606 adc %rax,$S[0]
1607 mov -8($aptr),%rax # a[i+1] # prefetch
1608 mov $S[0],-32($tptr)
1609 adc %rdx,$S[1]
1610
1611 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1|shift
1612 mov $S[1],-24($tptr)
1613 sbb $carry,$carry # mov cf,$carry
1614 shr \$63,$A0[0]
1615 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1616 shr \$63,$A0[1]
1617 or $A0[0],$S[3] # | t[2*i]>>63
1618 mul %rax # a[i]*a[i]
1619 neg $carry # mov $carry,cf
1620 adc %rax,$S[2]
1621 adc %rdx,$S[3]
1622 mov $S[2],-16($tptr)
1623 mov $S[3],-8($tptr)
1624___
1625}
1626######################################################################
1627# Montgomery reduction part, "word-by-word" algorithm.
1628#
1629# This new path is inspired by multiple submissions from Intel, by
1630# Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
1631# Vinodh Gopal...
1632{
1633my ($nptr,$tptr,$carry,$m0)=("%rbp","%rdi","%rsi","%rbx");
1634
1635$code.=<<___;
1636 movq %xmm2,$nptr
David Benjamin4969cc92016-04-22 15:02:23 -04001637__bn_sqr8x_reduction:
Adam Langleyd9e397b2015-01-22 14:27:53 -08001638 xor %rax,%rax
David Benjamin4969cc92016-04-22 15:02:23 -04001639 lea ($nptr,$num),%rcx # end of n[]
Adam Langleyd9e397b2015-01-22 14:27:53 -08001640 lea 48+8(%rsp,$num,2),%rdx # end of t[] buffer
1641 mov %rcx,0+8(%rsp)
1642 lea 48+8(%rsp,$num),$tptr # end of initial t[] window
1643 mov %rdx,8+8(%rsp)
1644 neg $num
1645 jmp .L8x_reduction_loop
1646
1647.align 32
1648.L8x_reduction_loop:
1649 lea ($tptr,$num),$tptr # start of current t[] window
1650 .byte 0x66
1651 mov 8*0($tptr),$m0
1652 mov 8*1($tptr),%r9
1653 mov 8*2($tptr),%r10
1654 mov 8*3($tptr),%r11
1655 mov 8*4($tptr),%r12
1656 mov 8*5($tptr),%r13
1657 mov 8*6($tptr),%r14
1658 mov 8*7($tptr),%r15
1659 mov %rax,(%rdx) # store top-most carry bit
1660 lea 8*8($tptr),$tptr
1661
1662 .byte 0x67
1663 mov $m0,%r8
1664 imulq 32+8(%rsp),$m0 # n0*a[0]
David Benjamin4969cc92016-04-22 15:02:23 -04001665 mov 8*0($nptr),%rax # n[0]
Adam Langleyd9e397b2015-01-22 14:27:53 -08001666 mov \$8,%ecx
1667 jmp .L8x_reduce
1668
1669.align 32
1670.L8x_reduce:
1671 mulq $m0
David Benjamin4969cc92016-04-22 15:02:23 -04001672 mov 8*1($nptr),%rax # n[1]
Adam Langleyd9e397b2015-01-22 14:27:53 -08001673 neg %r8
1674 mov %rdx,%r8
1675 adc \$0,%r8
1676
1677 mulq $m0
1678 add %rax,%r9
David Benjamin4969cc92016-04-22 15:02:23 -04001679 mov 8*2($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001680 adc \$0,%rdx
1681 add %r9,%r8
1682 mov $m0,48-8+8(%rsp,%rcx,8) # put aside n0*a[i]
1683 mov %rdx,%r9
1684 adc \$0,%r9
1685
1686 mulq $m0
1687 add %rax,%r10
David Benjamin4969cc92016-04-22 15:02:23 -04001688 mov 8*3($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001689 adc \$0,%rdx
1690 add %r10,%r9
1691 mov 32+8(%rsp),$carry # pull n0, borrow $carry
1692 mov %rdx,%r10
1693 adc \$0,%r10
1694
1695 mulq $m0
1696 add %rax,%r11
David Benjamin4969cc92016-04-22 15:02:23 -04001697 mov 8*4($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001698 adc \$0,%rdx
1699 imulq %r8,$carry # modulo-scheduled
1700 add %r11,%r10
1701 mov %rdx,%r11
1702 adc \$0,%r11
1703
1704 mulq $m0
1705 add %rax,%r12
David Benjamin4969cc92016-04-22 15:02:23 -04001706 mov 8*5($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001707 adc \$0,%rdx
1708 add %r12,%r11
1709 mov %rdx,%r12
1710 adc \$0,%r12
1711
1712 mulq $m0
1713 add %rax,%r13
David Benjamin4969cc92016-04-22 15:02:23 -04001714 mov 8*6($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001715 adc \$0,%rdx
1716 add %r13,%r12
1717 mov %rdx,%r13
1718 adc \$0,%r13
1719
1720 mulq $m0
1721 add %rax,%r14
David Benjamin4969cc92016-04-22 15:02:23 -04001722 mov 8*7($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001723 adc \$0,%rdx
1724 add %r14,%r13
1725 mov %rdx,%r14
1726 adc \$0,%r14
1727
1728 mulq $m0
1729 mov $carry,$m0 # n0*a[i]
1730 add %rax,%r15
David Benjamin4969cc92016-04-22 15:02:23 -04001731 mov 8*0($nptr),%rax # n[0]
Adam Langleyd9e397b2015-01-22 14:27:53 -08001732 adc \$0,%rdx
1733 add %r15,%r14
1734 mov %rdx,%r15
1735 adc \$0,%r15
1736
1737 dec %ecx
1738 jnz .L8x_reduce
1739
David Benjamin4969cc92016-04-22 15:02:23 -04001740 lea 8*8($nptr),$nptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08001741 xor %rax,%rax
1742 mov 8+8(%rsp),%rdx # pull end of t[]
1743 cmp 0+8(%rsp),$nptr # end of n[]?
1744 jae .L8x_no_tail
1745
1746 .byte 0x66
1747 add 8*0($tptr),%r8
1748 adc 8*1($tptr),%r9
1749 adc 8*2($tptr),%r10
1750 adc 8*3($tptr),%r11
1751 adc 8*4($tptr),%r12
1752 adc 8*5($tptr),%r13
1753 adc 8*6($tptr),%r14
1754 adc 8*7($tptr),%r15
1755 sbb $carry,$carry # top carry
1756
1757 mov 48+56+8(%rsp),$m0 # pull n0*a[0]
1758 mov \$8,%ecx
David Benjamin4969cc92016-04-22 15:02:23 -04001759 mov 8*0($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001760 jmp .L8x_tail
1761
1762.align 32
1763.L8x_tail:
1764 mulq $m0
1765 add %rax,%r8
David Benjamin4969cc92016-04-22 15:02:23 -04001766 mov 8*1($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001767 mov %r8,($tptr) # save result
1768 mov %rdx,%r8
1769 adc \$0,%r8
1770
1771 mulq $m0
1772 add %rax,%r9
David Benjamin4969cc92016-04-22 15:02:23 -04001773 mov 8*2($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001774 adc \$0,%rdx
1775 add %r9,%r8
1776 lea 8($tptr),$tptr # $tptr++
1777 mov %rdx,%r9
1778 adc \$0,%r9
1779
1780 mulq $m0
1781 add %rax,%r10
David Benjamin4969cc92016-04-22 15:02:23 -04001782 mov 8*3($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001783 adc \$0,%rdx
1784 add %r10,%r9
1785 mov %rdx,%r10
1786 adc \$0,%r10
1787
1788 mulq $m0
1789 add %rax,%r11
David Benjamin4969cc92016-04-22 15:02:23 -04001790 mov 8*4($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001791 adc \$0,%rdx
1792 add %r11,%r10
1793 mov %rdx,%r11
1794 adc \$0,%r11
1795
1796 mulq $m0
1797 add %rax,%r12
David Benjamin4969cc92016-04-22 15:02:23 -04001798 mov 8*5($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001799 adc \$0,%rdx
1800 add %r12,%r11
1801 mov %rdx,%r12
1802 adc \$0,%r12
1803
1804 mulq $m0
1805 add %rax,%r13
David Benjamin4969cc92016-04-22 15:02:23 -04001806 mov 8*6($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001807 adc \$0,%rdx
1808 add %r13,%r12
1809 mov %rdx,%r13
1810 adc \$0,%r13
1811
1812 mulq $m0
1813 add %rax,%r14
David Benjamin4969cc92016-04-22 15:02:23 -04001814 mov 8*7($nptr),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08001815 adc \$0,%rdx
1816 add %r14,%r13
1817 mov %rdx,%r14
1818 adc \$0,%r14
1819
1820 mulq $m0
1821 mov 48-16+8(%rsp,%rcx,8),$m0# pull n0*a[i]
1822 add %rax,%r15
1823 adc \$0,%rdx
1824 add %r15,%r14
David Benjamin4969cc92016-04-22 15:02:23 -04001825 mov 8*0($nptr),%rax # pull n[0]
Adam Langleyd9e397b2015-01-22 14:27:53 -08001826 mov %rdx,%r15
1827 adc \$0,%r15
1828
1829 dec %ecx
1830 jnz .L8x_tail
1831
David Benjamin4969cc92016-04-22 15:02:23 -04001832 lea 8*8($nptr),$nptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08001833 mov 8+8(%rsp),%rdx # pull end of t[]
1834 cmp 0+8(%rsp),$nptr # end of n[]?
1835 jae .L8x_tail_done # break out of loop
1836
1837 mov 48+56+8(%rsp),$m0 # pull n0*a[0]
1838 neg $carry
1839 mov 8*0($nptr),%rax # pull n[0]
1840 adc 8*0($tptr),%r8
1841 adc 8*1($tptr),%r9
1842 adc 8*2($tptr),%r10
1843 adc 8*3($tptr),%r11
1844 adc 8*4($tptr),%r12
1845 adc 8*5($tptr),%r13
1846 adc 8*6($tptr),%r14
1847 adc 8*7($tptr),%r15
1848 sbb $carry,$carry # top carry
1849
1850 mov \$8,%ecx
1851 jmp .L8x_tail
1852
1853.align 32
1854.L8x_tail_done:
1855 add (%rdx),%r8 # can this overflow?
Adam Langley4139edb2016-01-13 15:00:54 -08001856 adc \$0,%r9
1857 adc \$0,%r10
1858 adc \$0,%r11
1859 adc \$0,%r12
1860 adc \$0,%r13
1861 adc \$0,%r14
1862 adc \$0,%r15 # can't overflow, because we
1863 # started with "overhung" part
1864 # of multiplication
Adam Langleyd9e397b2015-01-22 14:27:53 -08001865 xor %rax,%rax
1866
1867 neg $carry
1868.L8x_no_tail:
1869 adc 8*0($tptr),%r8
1870 adc 8*1($tptr),%r9
1871 adc 8*2($tptr),%r10
1872 adc 8*3($tptr),%r11
1873 adc 8*4($tptr),%r12
1874 adc 8*5($tptr),%r13
1875 adc 8*6($tptr),%r14
1876 adc 8*7($tptr),%r15
1877 adc \$0,%rax # top-most carry
David Benjamin4969cc92016-04-22 15:02:23 -04001878 mov -8($nptr),%rcx # np[num-1]
Adam Langleyd9e397b2015-01-22 14:27:53 -08001879 xor $carry,$carry
1880
1881 movq %xmm2,$nptr # restore $nptr
1882
1883 mov %r8,8*0($tptr) # store top 512 bits
1884 mov %r9,8*1($tptr)
1885 movq %xmm3,$num # $num is %r9, can't be moved upwards
1886 mov %r10,8*2($tptr)
1887 mov %r11,8*3($tptr)
1888 mov %r12,8*4($tptr)
1889 mov %r13,8*5($tptr)
1890 mov %r14,8*6($tptr)
1891 mov %r15,8*7($tptr)
1892 lea 8*8($tptr),$tptr
1893
1894 cmp %rdx,$tptr # end of t[]?
1895 jb .L8x_reduction_loop
David Benjamin4969cc92016-04-22 15:02:23 -04001896 ret
1897.size bn_sqr8x_internal,.-bn_sqr8x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08001898___
1899}
1900##############################################################
1901# Post-condition, 4x unrolled
1902#
1903{
1904my ($tptr,$nptr)=("%rbx","%rbp");
1905$code.=<<___;
David Benjamin4969cc92016-04-22 15:02:23 -04001906.type __bn_post4x_internal,\@abi-omnipotent
Adam Langleyd9e397b2015-01-22 14:27:53 -08001907.align 32
David Benjamin4969cc92016-04-22 15:02:23 -04001908__bn_post4x_internal:
1909 mov 8*0($nptr),%r12
1910 lea (%rdi,$num),$tptr # %rdi was $tptr above
1911 mov $num,%rcx
1912 movq %xmm1,$rptr # restore $rptr
1913 neg %rax
1914 movq %xmm1,$aptr # prepare for back-to-back call
1915 sar \$3+2,%rcx
1916 dec %r12 # so that after 'not' we get -n[0]
1917 xor %r10,%r10
1918 mov 8*1($nptr),%r13
1919 mov 8*2($nptr),%r14
1920 mov 8*3($nptr),%r15
1921 jmp .Lsqr4x_sub_entry
1922
1923.align 16
Adam Langleyd9e397b2015-01-22 14:27:53 -08001924.Lsqr4x_sub:
David Benjamin4969cc92016-04-22 15:02:23 -04001925 mov 8*0($nptr),%r12
1926 mov 8*1($nptr),%r13
1927 mov 8*2($nptr),%r14
1928 mov 8*3($nptr),%r15
1929.Lsqr4x_sub_entry:
1930 lea 8*4($nptr),$nptr
1931 not %r12
1932 not %r13
1933 not %r14
1934 not %r15
1935 and %rax,%r12
1936 and %rax,%r13
1937 and %rax,%r14
1938 and %rax,%r15
1939
1940 neg %r10 # mov %r10,%cf
1941 adc 8*0($tptr),%r12
1942 adc 8*1($tptr),%r13
1943 adc 8*2($tptr),%r14
1944 adc 8*3($tptr),%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08001945 mov %r12,8*0($rptr)
David Benjamin4969cc92016-04-22 15:02:23 -04001946 lea 8*4($tptr),$tptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08001947 mov %r13,8*1($rptr)
David Benjamin4969cc92016-04-22 15:02:23 -04001948 sbb %r10,%r10 # mov %cf,%r10
Adam Langleyd9e397b2015-01-22 14:27:53 -08001949 mov %r14,8*2($rptr)
1950 mov %r15,8*3($rptr)
1951 lea 8*4($rptr),$rptr
1952
1953 inc %rcx # pass %cf
1954 jnz .Lsqr4x_sub
David Benjamin4969cc92016-04-22 15:02:23 -04001955
Adam Langleyd9e397b2015-01-22 14:27:53 -08001956 mov $num,%r10 # prepare for back-to-back call
1957 neg $num # restore $num
1958 ret
David Benjamin4969cc92016-04-22 15:02:23 -04001959.size __bn_post4x_internal,.-__bn_post4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08001960___
David Benjamin4969cc92016-04-22 15:02:23 -04001961}
Adam Langleyd9e397b2015-01-22 14:27:53 -08001962{
1963$code.=<<___;
1964.globl bn_from_montgomery
1965.type bn_from_montgomery,\@abi-omnipotent
1966.align 32
1967bn_from_montgomery:
1968 testl \$7,`($win64?"48(%rsp)":"%r9d")`
1969 jz bn_from_mont8x
1970 xor %eax,%eax
1971 ret
1972.size bn_from_montgomery,.-bn_from_montgomery
1973
1974.type bn_from_mont8x,\@function,6
1975.align 32
1976bn_from_mont8x:
1977 .byte 0x67
1978 mov %rsp,%rax
1979 push %rbx
1980 push %rbp
1981 push %r12
1982 push %r13
1983 push %r14
1984 push %r15
David Benjamin4969cc92016-04-22 15:02:23 -04001985
Adam Langleyd9e397b2015-01-22 14:27:53 -08001986 shl \$3,${num}d # convert $num to bytes
David Benjamin4969cc92016-04-22 15:02:23 -04001987 lea ($num,$num,2),%r10 # 3*$num in bytes
Adam Langleyd9e397b2015-01-22 14:27:53 -08001988 neg $num
1989 mov ($n0),$n0 # *n0
1990
1991 ##############################################################
David Benjamin4969cc92016-04-22 15:02:23 -04001992 # Ensure that stack frame doesn't alias with $rptr+3*$num
1993 # modulo 4096, which covers ret[num], am[num] and n[num]
1994 # (see bn_exp.c). The stack is allocated to aligned with
1995 # bn_power5's frame, and as bn_from_montgomery happens to be
1996 # last operation, we use the opportunity to cleanse it.
Adam Langleyd9e397b2015-01-22 14:27:53 -08001997 #
David Benjamin4969cc92016-04-22 15:02:23 -04001998 lea -320(%rsp,$num,2),%r11
1999 sub $rptr,%r11
Adam Langleyd9e397b2015-01-22 14:27:53 -08002000 and \$4095,%r11
2001 cmp %r11,%r10
2002 jb .Lfrom_sp_alt
2003 sub %r11,%rsp # align with $aptr
David Benjamin4969cc92016-04-22 15:02:23 -04002004 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -08002005 jmp .Lfrom_sp_done
2006
2007.align 32
2008.Lfrom_sp_alt:
David Benjamin4969cc92016-04-22 15:02:23 -04002009 lea 4096-320(,$num,2),%r10
2010 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -08002011 sub %r10,%r11
2012 mov \$0,%r10
2013 cmovc %r10,%r11
2014 sub %r11,%rsp
2015.Lfrom_sp_done:
2016 and \$-64,%rsp
2017 mov $num,%r10
2018 neg $num
2019
2020 ##############################################################
2021 # Stack layout
2022 #
2023 # +0 saved $num, used in reduction section
2024 # +8 &t[2*$num], used in reduction section
2025 # +32 saved *n0
2026 # +40 saved %rsp
2027 # +48 t[2*$num]
2028 #
2029 mov $n0, 32(%rsp)
2030 mov %rax, 40(%rsp) # save original %rsp
2031.Lfrom_body:
2032 mov $num,%r11
2033 lea 48(%rsp),%rax
2034 pxor %xmm0,%xmm0
2035 jmp .Lmul_by_1
2036
2037.align 32
2038.Lmul_by_1:
2039 movdqu ($aptr),%xmm1
2040 movdqu 16($aptr),%xmm2
2041 movdqu 32($aptr),%xmm3
2042 movdqa %xmm0,(%rax,$num)
2043 movdqu 48($aptr),%xmm4
2044 movdqa %xmm0,16(%rax,$num)
2045 .byte 0x48,0x8d,0xb6,0x40,0x00,0x00,0x00 # lea 64($aptr),$aptr
2046 movdqa %xmm1,(%rax)
2047 movdqa %xmm0,32(%rax,$num)
2048 movdqa %xmm2,16(%rax)
2049 movdqa %xmm0,48(%rax,$num)
2050 movdqa %xmm3,32(%rax)
2051 movdqa %xmm4,48(%rax)
2052 lea 64(%rax),%rax
2053 sub \$64,%r11
2054 jnz .Lmul_by_1
2055
2056 movq $rptr,%xmm1
2057 movq $nptr,%xmm2
2058 .byte 0x67
2059 mov $nptr,%rbp
2060 movq %r10, %xmm3 # -num
2061___
2062$code.=<<___ if ($addx);
2063 mov OPENSSL_ia32cap_P+8(%rip),%r11d
David Benjamin4969cc92016-04-22 15:02:23 -04002064 and \$0x80108,%r11d
2065 cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1
Adam Langleyd9e397b2015-01-22 14:27:53 -08002066 jne .Lfrom_mont_nox
2067
2068 lea (%rax,$num),$rptr
David Benjamin4969cc92016-04-22 15:02:23 -04002069 call __bn_sqrx8x_reduction
2070 call __bn_postx4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08002071
2072 pxor %xmm0,%xmm0
2073 lea 48(%rsp),%rax
2074 mov 40(%rsp),%rsi # restore %rsp
2075 jmp .Lfrom_mont_zero
2076
2077.align 32
2078.Lfrom_mont_nox:
2079___
2080$code.=<<___;
David Benjamin4969cc92016-04-22 15:02:23 -04002081 call __bn_sqr8x_reduction
2082 call __bn_post4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08002083
2084 pxor %xmm0,%xmm0
2085 lea 48(%rsp),%rax
2086 mov 40(%rsp),%rsi # restore %rsp
2087 jmp .Lfrom_mont_zero
2088
2089.align 32
2090.Lfrom_mont_zero:
2091 movdqa %xmm0,16*0(%rax)
2092 movdqa %xmm0,16*1(%rax)
2093 movdqa %xmm0,16*2(%rax)
2094 movdqa %xmm0,16*3(%rax)
2095 lea 16*4(%rax),%rax
2096 sub \$32,$num
2097 jnz .Lfrom_mont_zero
2098
2099 mov \$1,%rax
2100 mov -48(%rsi),%r15
2101 mov -40(%rsi),%r14
2102 mov -32(%rsi),%r13
2103 mov -24(%rsi),%r12
2104 mov -16(%rsi),%rbp
2105 mov -8(%rsi),%rbx
2106 lea (%rsi),%rsp
2107.Lfrom_epilogue:
2108 ret
2109.size bn_from_mont8x,.-bn_from_mont8x
2110___
2111}
2112}}}
2113
2114if ($addx) {{{
2115my $bp="%rdx"; # restore original value
2116
2117$code.=<<___;
2118.type bn_mulx4x_mont_gather5,\@function,6
2119.align 32
2120bn_mulx4x_mont_gather5:
2121.Lmulx4x_enter:
Adam Langleyd9e397b2015-01-22 14:27:53 -08002122 mov %rsp,%rax
2123 push %rbx
2124 push %rbp
2125 push %r12
2126 push %r13
2127 push %r14
2128 push %r15
David Benjamin4969cc92016-04-22 15:02:23 -04002129
Adam Langleyd9e397b2015-01-22 14:27:53 -08002130 shl \$3,${num}d # convert $num to bytes
David Benjamin4969cc92016-04-22 15:02:23 -04002131 lea ($num,$num,2),%r10 # 3*$num in bytes
Adam Langleyd9e397b2015-01-22 14:27:53 -08002132 neg $num # -$num
2133 mov ($n0),$n0 # *n0
2134
2135 ##############################################################
David Benjamin4969cc92016-04-22 15:02:23 -04002136 # Ensure that stack frame doesn't alias with $rptr+3*$num
2137 # modulo 4096, which covers ret[num], am[num] and n[num]
2138 # (see bn_exp.c). This is done to allow memory disambiguation
2139 # logic do its magic. [Extra [num] is allocated in order
2140 # to align with bn_power5's frame, which is cleansed after
2141 # completing exponentiation. Extra 256 bytes is for power mask
2142 # calculated from 7th argument, the index.]
Adam Langleyd9e397b2015-01-22 14:27:53 -08002143 #
David Benjamin4969cc92016-04-22 15:02:23 -04002144 lea -320(%rsp,$num,2),%r11
2145 sub $rp,%r11
Adam Langleyd9e397b2015-01-22 14:27:53 -08002146 and \$4095,%r11
2147 cmp %r11,%r10
2148 jb .Lmulx4xsp_alt
2149 sub %r11,%rsp # align with $aptr
David Benjamin4969cc92016-04-22 15:02:23 -04002150 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -08002151 jmp .Lmulx4xsp_done
2152
Adam Langleyd9e397b2015-01-22 14:27:53 -08002153.Lmulx4xsp_alt:
David Benjamin4969cc92016-04-22 15:02:23 -04002154 lea 4096-320(,$num,2),%r10
2155 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -08002156 sub %r10,%r11
2157 mov \$0,%r10
2158 cmovc %r10,%r11
2159 sub %r11,%rsp
2160.Lmulx4xsp_done:
2161 and \$-64,%rsp # ensure alignment
2162 ##############################################################
2163 # Stack layout
2164 # +0 -num
2165 # +8 off-loaded &b[i]
2166 # +16 end of b[num]
2167 # +24 inner counter
2168 # +32 saved n0
2169 # +40 saved %rsp
2170 # +48
2171 # +56 saved rp
2172 # +64 tmp[num+1]
2173 #
2174 mov $n0, 32(%rsp) # save *n0
2175 mov %rax,40(%rsp) # save original %rsp
2176.Lmulx4x_body:
2177 call mulx4x_internal
2178
2179 mov 40(%rsp),%rsi # restore %rsp
2180 mov \$1,%rax
David Benjamin4969cc92016-04-22 15:02:23 -04002181
Adam Langleyd9e397b2015-01-22 14:27:53 -08002182 mov -48(%rsi),%r15
2183 mov -40(%rsi),%r14
2184 mov -32(%rsi),%r13
2185 mov -24(%rsi),%r12
2186 mov -16(%rsi),%rbp
2187 mov -8(%rsi),%rbx
2188 lea (%rsi),%rsp
2189.Lmulx4x_epilogue:
2190 ret
2191.size bn_mulx4x_mont_gather5,.-bn_mulx4x_mont_gather5
2192
2193.type mulx4x_internal,\@abi-omnipotent
2194.align 32
2195mulx4x_internal:
David Benjamin4969cc92016-04-22 15:02:23 -04002196 mov $num,8(%rsp) # save -$num (it was in bytes)
2197 mov $num,%r10
Adam Langleyd9e397b2015-01-22 14:27:53 -08002198 neg $num # restore $num
2199 shl \$5,$num
David Benjamin4969cc92016-04-22 15:02:23 -04002200 neg %r10 # restore $num
2201 lea 128($bp,$num),%r13 # end of powers table (+size optimization)
Adam Langleyd9e397b2015-01-22 14:27:53 -08002202 shr \$5+5,$num
David Benjamin4969cc92016-04-22 15:02:23 -04002203 movd `($win64?56:8)`(%rax),%xmm5 # load 7th argument
Adam Langleyd9e397b2015-01-22 14:27:53 -08002204 sub \$1,$num
David Benjamin4969cc92016-04-22 15:02:23 -04002205 lea .Linc(%rip),%rax
Adam Langleyd9e397b2015-01-22 14:27:53 -08002206 mov %r13,16+8(%rsp) # end of b[num]
2207 mov $num,24+8(%rsp) # inner counter
2208 mov $rp, 56+8(%rsp) # save $rp
2209___
2210my ($aptr, $bptr, $nptr, $tptr, $mi, $bi, $zero, $num)=
2211 ("%rsi","%rdi","%rcx","%rbx","%r8","%r9","%rbp","%rax");
2212my $rptr=$bptr;
2213my $STRIDE=2**5*8; # 5 is "window size"
2214my $N=$STRIDE/4; # should match cache line size
2215$code.=<<___;
David Benjamin4969cc92016-04-22 15:02:23 -04002216 movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
2217 movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
2218 lea 88-112(%rsp,%r10),%r10 # place the mask after tp[num+1] (+ICache optimizaton)
2219 lea 128($bp),$bptr # size optimization
Adam Langleyd9e397b2015-01-22 14:27:53 -08002220
David Benjamin4969cc92016-04-22 15:02:23 -04002221 pshufd \$0,%xmm5,%xmm5 # broadcast index
2222 movdqa %xmm1,%xmm4
2223 .byte 0x67
2224 movdqa %xmm1,%xmm2
2225___
2226########################################################################
2227# calculate mask by comparing 0..31 to index and save result to stack
2228#
2229$code.=<<___;
2230 .byte 0x67
2231 paddd %xmm0,%xmm1
2232 pcmpeqd %xmm5,%xmm0 # compare to 1,0
2233 movdqa %xmm4,%xmm3
2234___
2235for($i=0;$i<$STRIDE/16-4;$i+=4) {
2236$code.=<<___;
2237 paddd %xmm1,%xmm2
2238 pcmpeqd %xmm5,%xmm1 # compare to 3,2
2239 movdqa %xmm0,`16*($i+0)+112`(%r10)
2240 movdqa %xmm4,%xmm0
2241
2242 paddd %xmm2,%xmm3
2243 pcmpeqd %xmm5,%xmm2 # compare to 5,4
2244 movdqa %xmm1,`16*($i+1)+112`(%r10)
2245 movdqa %xmm4,%xmm1
2246
2247 paddd %xmm3,%xmm0
2248 pcmpeqd %xmm5,%xmm3 # compare to 7,6
2249 movdqa %xmm2,`16*($i+2)+112`(%r10)
2250 movdqa %xmm4,%xmm2
2251
2252 paddd %xmm0,%xmm1
2253 pcmpeqd %xmm5,%xmm0
2254 movdqa %xmm3,`16*($i+3)+112`(%r10)
2255 movdqa %xmm4,%xmm3
2256___
2257}
2258$code.=<<___; # last iteration can be optimized
2259 .byte 0x67
2260 paddd %xmm1,%xmm2
2261 pcmpeqd %xmm5,%xmm1
2262 movdqa %xmm0,`16*($i+0)+112`(%r10)
2263
2264 paddd %xmm2,%xmm3
2265 pcmpeqd %xmm5,%xmm2
2266 movdqa %xmm1,`16*($i+1)+112`(%r10)
2267
2268 pcmpeqd %xmm5,%xmm3
2269 movdqa %xmm2,`16*($i+2)+112`(%r10)
2270
2271 pand `16*($i+0)-128`($bptr),%xmm0 # while it's still in register
2272 pand `16*($i+1)-128`($bptr),%xmm1
2273 pand `16*($i+2)-128`($bptr),%xmm2
2274 movdqa %xmm3,`16*($i+3)+112`(%r10)
2275 pand `16*($i+3)-128`($bptr),%xmm3
Adam Langleyd9e397b2015-01-22 14:27:53 -08002276 por %xmm2,%xmm0
David Benjamin4969cc92016-04-22 15:02:23 -04002277 por %xmm3,%xmm1
2278___
2279for($i=0;$i<$STRIDE/16-4;$i+=4) {
2280$code.=<<___;
2281 movdqa `16*($i+0)-128`($bptr),%xmm4
2282 movdqa `16*($i+1)-128`($bptr),%xmm5
2283 movdqa `16*($i+2)-128`($bptr),%xmm2
2284 pand `16*($i+0)+112`(%r10),%xmm4
2285 movdqa `16*($i+3)-128`($bptr),%xmm3
2286 pand `16*($i+1)+112`(%r10),%xmm5
2287 por %xmm4,%xmm0
2288 pand `16*($i+2)+112`(%r10),%xmm2
2289 por %xmm5,%xmm1
2290 pand `16*($i+3)+112`(%r10),%xmm3
2291 por %xmm2,%xmm0
2292 por %xmm3,%xmm1
2293___
2294}
2295$code.=<<___;
2296 pxor %xmm1,%xmm0
2297 pshufd \$0x4e,%xmm0,%xmm1
2298 por %xmm1,%xmm0
2299 lea $STRIDE($bptr),$bptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08002300 movq %xmm0,%rdx # bp[0]
David Benjamin4969cc92016-04-22 15:02:23 -04002301 lea 64+8*4+8(%rsp),$tptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08002302
2303 mov %rdx,$bi
2304 mulx 0*8($aptr),$mi,%rax # a[0]*b[0]
2305 mulx 1*8($aptr),%r11,%r12 # a[1]*b[0]
2306 add %rax,%r11
2307 mulx 2*8($aptr),%rax,%r13 # ...
2308 adc %rax,%r12
2309 adc \$0,%r13
2310 mulx 3*8($aptr),%rax,%r14
2311
2312 mov $mi,%r15
2313 imulq 32+8(%rsp),$mi # "t[0]"*n0
2314 xor $zero,$zero # cf=0, of=0
2315 mov $mi,%rdx
2316
Adam Langleyd9e397b2015-01-22 14:27:53 -08002317 mov $bptr,8+8(%rsp) # off-load &b[i]
Adam Langleyd9e397b2015-01-22 14:27:53 -08002318
David Benjamin4969cc92016-04-22 15:02:23 -04002319 lea 4*8($aptr),$aptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08002320 adcx %rax,%r13
2321 adcx $zero,%r14 # cf=0
2322
David Benjamin4969cc92016-04-22 15:02:23 -04002323 mulx 0*8($nptr),%rax,%r10
Adam Langleyd9e397b2015-01-22 14:27:53 -08002324 adcx %rax,%r15 # discarded
2325 adox %r11,%r10
David Benjamin4969cc92016-04-22 15:02:23 -04002326 mulx 1*8($nptr),%rax,%r11
Adam Langleyd9e397b2015-01-22 14:27:53 -08002327 adcx %rax,%r10
2328 adox %r12,%r11
David Benjamin4969cc92016-04-22 15:02:23 -04002329 mulx 2*8($nptr),%rax,%r12
Adam Langleyd9e397b2015-01-22 14:27:53 -08002330 mov 24+8(%rsp),$bptr # counter value
Adam Langleyd9e397b2015-01-22 14:27:53 -08002331 mov %r10,-8*4($tptr)
2332 adcx %rax,%r11
2333 adox %r13,%r12
David Benjamin4969cc92016-04-22 15:02:23 -04002334 mulx 3*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002335 mov $bi,%rdx
2336 mov %r11,-8*3($tptr)
2337 adcx %rax,%r12
2338 adox $zero,%r15 # of=0
David Benjamin4969cc92016-04-22 15:02:23 -04002339 lea 4*8($nptr),$nptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08002340 mov %r12,-8*2($tptr)
David Benjamin4969cc92016-04-22 15:02:23 -04002341 jmp .Lmulx4x_1st
Adam Langleyd9e397b2015-01-22 14:27:53 -08002342
2343.align 32
2344.Lmulx4x_1st:
2345 adcx $zero,%r15 # cf=0, modulo-scheduled
2346 mulx 0*8($aptr),%r10,%rax # a[4]*b[0]
2347 adcx %r14,%r10
2348 mulx 1*8($aptr),%r11,%r14 # a[5]*b[0]
2349 adcx %rax,%r11
2350 mulx 2*8($aptr),%r12,%rax # ...
2351 adcx %r14,%r12
2352 mulx 3*8($aptr),%r13,%r14
2353 .byte 0x67,0x67
2354 mov $mi,%rdx
2355 adcx %rax,%r13
2356 adcx $zero,%r14 # cf=0
2357 lea 4*8($aptr),$aptr
2358 lea 4*8($tptr),$tptr
2359
2360 adox %r15,%r10
David Benjamin4969cc92016-04-22 15:02:23 -04002361 mulx 0*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002362 adcx %rax,%r10
2363 adox %r15,%r11
David Benjamin4969cc92016-04-22 15:02:23 -04002364 mulx 1*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002365 adcx %rax,%r11
2366 adox %r15,%r12
David Benjamin4969cc92016-04-22 15:02:23 -04002367 mulx 2*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002368 mov %r10,-5*8($tptr)
2369 adcx %rax,%r12
2370 mov %r11,-4*8($tptr)
2371 adox %r15,%r13
David Benjamin4969cc92016-04-22 15:02:23 -04002372 mulx 3*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002373 mov $bi,%rdx
2374 mov %r12,-3*8($tptr)
2375 adcx %rax,%r13
2376 adox $zero,%r15
David Benjamin4969cc92016-04-22 15:02:23 -04002377 lea 4*8($nptr),$nptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08002378 mov %r13,-2*8($tptr)
2379
2380 dec $bptr # of=0, pass cf
2381 jnz .Lmulx4x_1st
2382
2383 mov 8(%rsp),$num # load -num
Adam Langleyd9e397b2015-01-22 14:27:53 -08002384 adc $zero,%r15 # modulo-scheduled
2385 lea ($aptr,$num),$aptr # rewind $aptr
2386 add %r15,%r14
2387 mov 8+8(%rsp),$bptr # re-load &b[i]
2388 adc $zero,$zero # top-most carry
2389 mov %r14,-1*8($tptr)
2390 jmp .Lmulx4x_outer
2391
2392.align 32
2393.Lmulx4x_outer:
David Benjamin4969cc92016-04-22 15:02:23 -04002394 lea 16-256($tptr),%r10 # where 256-byte mask is (+density control)
2395 pxor %xmm4,%xmm4
2396 .byte 0x67,0x67
2397 pxor %xmm5,%xmm5
2398___
2399for($i=0;$i<$STRIDE/16;$i+=4) {
2400$code.=<<___;
2401 movdqa `16*($i+0)-128`($bptr),%xmm0
2402 movdqa `16*($i+1)-128`($bptr),%xmm1
2403 movdqa `16*($i+2)-128`($bptr),%xmm2
2404 pand `16*($i+0)+256`(%r10),%xmm0
2405 movdqa `16*($i+3)-128`($bptr),%xmm3
2406 pand `16*($i+1)+256`(%r10),%xmm1
2407 por %xmm0,%xmm4
2408 pand `16*($i+2)+256`(%r10),%xmm2
2409 por %xmm1,%xmm5
2410 pand `16*($i+3)+256`(%r10),%xmm3
2411 por %xmm2,%xmm4
2412 por %xmm3,%xmm5
2413___
2414}
2415$code.=<<___;
2416 por %xmm5,%xmm4
2417 pshufd \$0x4e,%xmm4,%xmm0
2418 por %xmm4,%xmm0
2419 lea $STRIDE($bptr),$bptr
2420 movq %xmm0,%rdx # m0=bp[i]
2421
Adam Langleyd9e397b2015-01-22 14:27:53 -08002422 mov $zero,($tptr) # save top-most carry
2423 lea 4*8($tptr,$num),$tptr # rewind $tptr
2424 mulx 0*8($aptr),$mi,%r11 # a[0]*b[i]
2425 xor $zero,$zero # cf=0, of=0
2426 mov %rdx,$bi
2427 mulx 1*8($aptr),%r14,%r12 # a[1]*b[i]
2428 adox -4*8($tptr),$mi # +t[0]
2429 adcx %r14,%r11
2430 mulx 2*8($aptr),%r15,%r13 # ...
2431 adox -3*8($tptr),%r11
2432 adcx %r15,%r12
2433 mulx 3*8($aptr),%rdx,%r14
2434 adox -2*8($tptr),%r12
2435 adcx %rdx,%r13
David Benjamin4969cc92016-04-22 15:02:23 -04002436 lea ($nptr,$num),$nptr # rewind $nptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08002437 lea 4*8($aptr),$aptr
2438 adox -1*8($tptr),%r13
2439 adcx $zero,%r14
2440 adox $zero,%r14
2441
Adam Langleyd9e397b2015-01-22 14:27:53 -08002442 mov $mi,%r15
2443 imulq 32+8(%rsp),$mi # "t[0]"*n0
2444
Adam Langleyd9e397b2015-01-22 14:27:53 -08002445 mov $mi,%rdx
Adam Langleyd9e397b2015-01-22 14:27:53 -08002446 xor $zero,$zero # cf=0, of=0
2447 mov $bptr,8+8(%rsp) # off-load &b[i]
2448
David Benjamin4969cc92016-04-22 15:02:23 -04002449 mulx 0*8($nptr),%rax,%r10
Adam Langleyd9e397b2015-01-22 14:27:53 -08002450 adcx %rax,%r15 # discarded
2451 adox %r11,%r10
David Benjamin4969cc92016-04-22 15:02:23 -04002452 mulx 1*8($nptr),%rax,%r11
Adam Langleyd9e397b2015-01-22 14:27:53 -08002453 adcx %rax,%r10
2454 adox %r12,%r11
David Benjamin4969cc92016-04-22 15:02:23 -04002455 mulx 2*8($nptr),%rax,%r12
Adam Langleyd9e397b2015-01-22 14:27:53 -08002456 adcx %rax,%r11
2457 adox %r13,%r12
David Benjamin4969cc92016-04-22 15:02:23 -04002458 mulx 3*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002459 mov $bi,%rdx
Adam Langleyd9e397b2015-01-22 14:27:53 -08002460 mov 24+8(%rsp),$bptr # counter value
2461 mov %r10,-8*4($tptr)
Adam Langleyd9e397b2015-01-22 14:27:53 -08002462 adcx %rax,%r12
2463 mov %r11,-8*3($tptr)
2464 adox $zero,%r15 # of=0
2465 mov %r12,-8*2($tptr)
David Benjamin4969cc92016-04-22 15:02:23 -04002466 lea 4*8($nptr),$nptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08002467 jmp .Lmulx4x_inner
2468
2469.align 32
2470.Lmulx4x_inner:
2471 mulx 0*8($aptr),%r10,%rax # a[4]*b[i]
2472 adcx $zero,%r15 # cf=0, modulo-scheduled
2473 adox %r14,%r10
2474 mulx 1*8($aptr),%r11,%r14 # a[5]*b[i]
2475 adcx 0*8($tptr),%r10
2476 adox %rax,%r11
2477 mulx 2*8($aptr),%r12,%rax # ...
2478 adcx 1*8($tptr),%r11
2479 adox %r14,%r12
2480 mulx 3*8($aptr),%r13,%r14
2481 mov $mi,%rdx
2482 adcx 2*8($tptr),%r12
2483 adox %rax,%r13
2484 adcx 3*8($tptr),%r13
2485 adox $zero,%r14 # of=0
2486 lea 4*8($aptr),$aptr
2487 lea 4*8($tptr),$tptr
2488 adcx $zero,%r14 # cf=0
2489
2490 adox %r15,%r10
David Benjamin4969cc92016-04-22 15:02:23 -04002491 mulx 0*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002492 adcx %rax,%r10
2493 adox %r15,%r11
David Benjamin4969cc92016-04-22 15:02:23 -04002494 mulx 1*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002495 adcx %rax,%r11
2496 adox %r15,%r12
David Benjamin4969cc92016-04-22 15:02:23 -04002497 mulx 2*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002498 mov %r10,-5*8($tptr)
2499 adcx %rax,%r12
2500 adox %r15,%r13
2501 mov %r11,-4*8($tptr)
David Benjamin4969cc92016-04-22 15:02:23 -04002502 mulx 3*8($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08002503 mov $bi,%rdx
David Benjamin4969cc92016-04-22 15:02:23 -04002504 lea 4*8($nptr),$nptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08002505 mov %r12,-3*8($tptr)
2506 adcx %rax,%r13
2507 adox $zero,%r15
2508 mov %r13,-2*8($tptr)
2509
2510 dec $bptr # of=0, pass cf
2511 jnz .Lmulx4x_inner
2512
2513 mov 0+8(%rsp),$num # load -num
Adam Langleyd9e397b2015-01-22 14:27:53 -08002514 adc $zero,%r15 # modulo-scheduled
2515 sub 0*8($tptr),$bptr # pull top-most carry to %cf
2516 mov 8+8(%rsp),$bptr # re-load &b[i]
2517 mov 16+8(%rsp),%r10
2518 adc %r15,%r14
2519 lea ($aptr,$num),$aptr # rewind $aptr
2520 adc $zero,$zero # top-most carry
2521 mov %r14,-1*8($tptr)
2522
2523 cmp %r10,$bptr
2524 jb .Lmulx4x_outer
2525
David Benjamin4969cc92016-04-22 15:02:23 -04002526 mov -8($nptr),%r10
2527 mov $zero,%r8
2528 mov ($nptr,$num),%r12
2529 lea ($nptr,$num),%rbp # rewind $nptr
2530 mov $num,%rcx
2531 lea ($tptr,$num),%rdi # rewind $tptr
2532 xor %eax,%eax
Adam Langleyd9e397b2015-01-22 14:27:53 -08002533 xor %r15,%r15
2534 sub %r14,%r10 # compare top-most words
2535 adc %r15,%r15
David Benjamin4969cc92016-04-22 15:02:23 -04002536 or %r15,%r8
2537 sar \$3+2,%rcx
2538 sub %r8,%rax # %rax=-%r8
Adam Langleyd9e397b2015-01-22 14:27:53 -08002539 mov 56+8(%rsp),%rdx # restore rp
David Benjamin4969cc92016-04-22 15:02:23 -04002540 dec %r12 # so that after 'not' we get -n[0]
2541 mov 8*1(%rbp),%r13
2542 xor %r8,%r8
2543 mov 8*2(%rbp),%r14
2544 mov 8*3(%rbp),%r15
2545 jmp .Lsqrx4x_sub_entry # common post-condition
Adam Langleyd9e397b2015-01-22 14:27:53 -08002546.size mulx4x_internal,.-mulx4x_internal
2547___
2548} {
2549######################################################################
2550# void bn_power5(
2551my $rptr="%rdi"; # BN_ULONG *rptr,
2552my $aptr="%rsi"; # const BN_ULONG *aptr,
2553my $bptr="%rdx"; # const void *table,
2554my $nptr="%rcx"; # const BN_ULONG *nptr,
2555my $n0 ="%r8"; # const BN_ULONG *n0);
2556my $num ="%r9"; # int num, has to be divisible by 8
2557 # int pwr);
2558
2559my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
2560my @A0=("%r10","%r11");
2561my @A1=("%r12","%r13");
2562my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
2563
2564$code.=<<___;
2565.type bn_powerx5,\@function,6
2566.align 32
2567bn_powerx5:
2568.Lpowerx5_enter:
Adam Langleyd9e397b2015-01-22 14:27:53 -08002569 mov %rsp,%rax
2570 push %rbx
2571 push %rbp
2572 push %r12
2573 push %r13
2574 push %r14
2575 push %r15
David Benjamin4969cc92016-04-22 15:02:23 -04002576
Adam Langleyd9e397b2015-01-22 14:27:53 -08002577 shl \$3,${num}d # convert $num to bytes
David Benjamin4969cc92016-04-22 15:02:23 -04002578 lea ($num,$num,2),%r10 # 3*$num in bytes
Adam Langleyd9e397b2015-01-22 14:27:53 -08002579 neg $num
2580 mov ($n0),$n0 # *n0
2581
2582 ##############################################################
David Benjamin4969cc92016-04-22 15:02:23 -04002583 # Ensure that stack frame doesn't alias with $rptr+3*$num
2584 # modulo 4096, which covers ret[num], am[num] and n[num]
2585 # (see bn_exp.c). This is done to allow memory disambiguation
2586 # logic do its magic. [Extra 256 bytes is for power mask
2587 # calculated from 7th argument, the index.]
Adam Langleyd9e397b2015-01-22 14:27:53 -08002588 #
David Benjamin4969cc92016-04-22 15:02:23 -04002589 lea -320(%rsp,$num,2),%r11
2590 sub $rptr,%r11
Adam Langleyd9e397b2015-01-22 14:27:53 -08002591 and \$4095,%r11
2592 cmp %r11,%r10
2593 jb .Lpwrx_sp_alt
2594 sub %r11,%rsp # align with $aptr
David Benjamin4969cc92016-04-22 15:02:23 -04002595 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -08002596 jmp .Lpwrx_sp_done
2597
2598.align 32
2599.Lpwrx_sp_alt:
David Benjamin4969cc92016-04-22 15:02:23 -04002600 lea 4096-320(,$num,2),%r10
2601 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
Adam Langleyd9e397b2015-01-22 14:27:53 -08002602 sub %r10,%r11
2603 mov \$0,%r10
2604 cmovc %r10,%r11
2605 sub %r11,%rsp
2606.Lpwrx_sp_done:
2607 and \$-64,%rsp
2608 mov $num,%r10
2609 neg $num
2610
2611 ##############################################################
2612 # Stack layout
2613 #
2614 # +0 saved $num, used in reduction section
2615 # +8 &t[2*$num], used in reduction section
2616 # +16 intermediate carry bit
2617 # +24 top-most carry bit, used in reduction section
2618 # +32 saved *n0
2619 # +40 saved %rsp
2620 # +48 t[2*$num]
2621 #
2622 pxor %xmm0,%xmm0
2623 movq $rptr,%xmm1 # save $rptr
2624 movq $nptr,%xmm2 # save $nptr
2625 movq %r10, %xmm3 # -$num
2626 movq $bptr,%xmm4
2627 mov $n0, 32(%rsp)
2628 mov %rax, 40(%rsp) # save original %rsp
2629.Lpowerx5_body:
2630
2631 call __bn_sqrx8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04002632 call __bn_postx4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08002633 call __bn_sqrx8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04002634 call __bn_postx4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08002635 call __bn_sqrx8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04002636 call __bn_postx4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08002637 call __bn_sqrx8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04002638 call __bn_postx4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08002639 call __bn_sqrx8x_internal
David Benjamin4969cc92016-04-22 15:02:23 -04002640 call __bn_postx4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08002641
2642 mov %r10,$num # -num
2643 mov $aptr,$rptr
2644 movq %xmm2,$nptr
2645 movq %xmm4,$bptr
2646 mov 40(%rsp),%rax
2647
2648 call mulx4x_internal
2649
2650 mov 40(%rsp),%rsi # restore %rsp
2651 mov \$1,%rax
David Benjamin4969cc92016-04-22 15:02:23 -04002652
Adam Langleyd9e397b2015-01-22 14:27:53 -08002653 mov -48(%rsi),%r15
2654 mov -40(%rsi),%r14
2655 mov -32(%rsi),%r13
2656 mov -24(%rsi),%r12
2657 mov -16(%rsi),%rbp
2658 mov -8(%rsi),%rbx
2659 lea (%rsi),%rsp
2660.Lpowerx5_epilogue:
2661 ret
2662.size bn_powerx5,.-bn_powerx5
2663
2664.globl bn_sqrx8x_internal
2665.hidden bn_sqrx8x_internal
2666.type bn_sqrx8x_internal,\@abi-omnipotent
2667.align 32
2668bn_sqrx8x_internal:
2669__bn_sqrx8x_internal:
2670 ##################################################################
2671 # Squaring part:
2672 #
2673 # a) multiply-n-add everything but a[i]*a[i];
2674 # b) shift result of a) by 1 to the left and accumulate
2675 # a[i]*a[i] products;
2676 #
2677 ##################################################################
2678 # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2679 # a[1]a[0]
2680 # a[2]a[0]
2681 # a[3]a[0]
2682 # a[2]a[1]
2683 # a[3]a[1]
2684 # a[3]a[2]
2685 #
2686 # a[4]a[0]
2687 # a[5]a[0]
2688 # a[6]a[0]
2689 # a[7]a[0]
2690 # a[4]a[1]
2691 # a[5]a[1]
2692 # a[6]a[1]
2693 # a[7]a[1]
2694 # a[4]a[2]
2695 # a[5]a[2]
2696 # a[6]a[2]
2697 # a[7]a[2]
2698 # a[4]a[3]
2699 # a[5]a[3]
2700 # a[6]a[3]
2701 # a[7]a[3]
2702 #
2703 # a[5]a[4]
2704 # a[6]a[4]
2705 # a[7]a[4]
2706 # a[6]a[5]
2707 # a[7]a[5]
2708 # a[7]a[6]
2709 # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2710___
2711{
2712my ($zero,$carry)=("%rbp","%rcx");
2713my $aaptr=$zero;
2714$code.=<<___;
2715 lea 48+8(%rsp),$tptr
2716 lea ($aptr,$num),$aaptr
2717 mov $num,0+8(%rsp) # save $num
2718 mov $aaptr,8+8(%rsp) # save end of $aptr
2719 jmp .Lsqr8x_zero_start
2720
2721.align 32
2722.byte 0x66,0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00
2723.Lsqrx8x_zero:
2724 .byte 0x3e
2725 movdqa %xmm0,0*8($tptr)
2726 movdqa %xmm0,2*8($tptr)
2727 movdqa %xmm0,4*8($tptr)
2728 movdqa %xmm0,6*8($tptr)
2729.Lsqr8x_zero_start: # aligned at 32
2730 movdqa %xmm0,8*8($tptr)
2731 movdqa %xmm0,10*8($tptr)
2732 movdqa %xmm0,12*8($tptr)
2733 movdqa %xmm0,14*8($tptr)
2734 lea 16*8($tptr),$tptr
2735 sub \$64,$num
2736 jnz .Lsqrx8x_zero
2737
2738 mov 0*8($aptr),%rdx # a[0], modulo-scheduled
2739 #xor %r9,%r9 # t[1], ex-$num, zero already
2740 xor %r10,%r10
2741 xor %r11,%r11
2742 xor %r12,%r12
2743 xor %r13,%r13
2744 xor %r14,%r14
2745 xor %r15,%r15
2746 lea 48+8(%rsp),$tptr
2747 xor $zero,$zero # cf=0, cf=0
2748 jmp .Lsqrx8x_outer_loop
2749
2750.align 32
2751.Lsqrx8x_outer_loop:
2752 mulx 1*8($aptr),%r8,%rax # a[1]*a[0]
2753 adcx %r9,%r8 # a[1]*a[0]+=t[1]
2754 adox %rax,%r10
2755 mulx 2*8($aptr),%r9,%rax # a[2]*a[0]
2756 adcx %r10,%r9
2757 adox %rax,%r11
2758 .byte 0xc4,0xe2,0xab,0xf6,0x86,0x18,0x00,0x00,0x00 # mulx 3*8($aptr),%r10,%rax # ...
2759 adcx %r11,%r10
2760 adox %rax,%r12
2761 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x20,0x00,0x00,0x00 # mulx 4*8($aptr),%r11,%rax
2762 adcx %r12,%r11
2763 adox %rax,%r13
2764 mulx 5*8($aptr),%r12,%rax
2765 adcx %r13,%r12
2766 adox %rax,%r14
2767 mulx 6*8($aptr),%r13,%rax
2768 adcx %r14,%r13
2769 adox %r15,%rax
2770 mulx 7*8($aptr),%r14,%r15
2771 mov 1*8($aptr),%rdx # a[1]
2772 adcx %rax,%r14
2773 adox $zero,%r15
2774 adc 8*8($tptr),%r15
2775 mov %r8,1*8($tptr) # t[1]
2776 mov %r9,2*8($tptr) # t[2]
2777 sbb $carry,$carry # mov %cf,$carry
2778 xor $zero,$zero # cf=0, of=0
2779
2780
2781 mulx 2*8($aptr),%r8,%rbx # a[2]*a[1]
2782 mulx 3*8($aptr),%r9,%rax # a[3]*a[1]
2783 adcx %r10,%r8
2784 adox %rbx,%r9
2785 mulx 4*8($aptr),%r10,%rbx # ...
2786 adcx %r11,%r9
2787 adox %rax,%r10
2788 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x28,0x00,0x00,0x00 # mulx 5*8($aptr),%r11,%rax
2789 adcx %r12,%r10
2790 adox %rbx,%r11
2791 .byte 0xc4,0xe2,0x9b,0xf6,0x9e,0x30,0x00,0x00,0x00 # mulx 6*8($aptr),%r12,%rbx
2792 adcx %r13,%r11
2793 adox %r14,%r12
2794 .byte 0xc4,0x62,0x93,0xf6,0xb6,0x38,0x00,0x00,0x00 # mulx 7*8($aptr),%r13,%r14
2795 mov 2*8($aptr),%rdx # a[2]
2796 adcx %rax,%r12
2797 adox %rbx,%r13
2798 adcx %r15,%r13
2799 adox $zero,%r14 # of=0
2800 adcx $zero,%r14 # cf=0
2801
2802 mov %r8,3*8($tptr) # t[3]
2803 mov %r9,4*8($tptr) # t[4]
2804
2805 mulx 3*8($aptr),%r8,%rbx # a[3]*a[2]
2806 mulx 4*8($aptr),%r9,%rax # a[4]*a[2]
2807 adcx %r10,%r8
2808 adox %rbx,%r9
2809 mulx 5*8($aptr),%r10,%rbx # ...
2810 adcx %r11,%r9
2811 adox %rax,%r10
2812 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x30,0x00,0x00,0x00 # mulx 6*8($aptr),%r11,%rax
2813 adcx %r12,%r10
2814 adox %r13,%r11
2815 .byte 0xc4,0x62,0x9b,0xf6,0xae,0x38,0x00,0x00,0x00 # mulx 7*8($aptr),%r12,%r13
2816 .byte 0x3e
2817 mov 3*8($aptr),%rdx # a[3]
2818 adcx %rbx,%r11
2819 adox %rax,%r12
2820 adcx %r14,%r12
2821 mov %r8,5*8($tptr) # t[5]
2822 mov %r9,6*8($tptr) # t[6]
2823 mulx 4*8($aptr),%r8,%rax # a[4]*a[3]
2824 adox $zero,%r13 # of=0
2825 adcx $zero,%r13 # cf=0
2826
2827 mulx 5*8($aptr),%r9,%rbx # a[5]*a[3]
2828 adcx %r10,%r8
2829 adox %rax,%r9
2830 mulx 6*8($aptr),%r10,%rax # ...
2831 adcx %r11,%r9
2832 adox %r12,%r10
2833 mulx 7*8($aptr),%r11,%r12
2834 mov 4*8($aptr),%rdx # a[4]
2835 mov 5*8($aptr),%r14 # a[5]
2836 adcx %rbx,%r10
2837 adox %rax,%r11
2838 mov 6*8($aptr),%r15 # a[6]
2839 adcx %r13,%r11
2840 adox $zero,%r12 # of=0
2841 adcx $zero,%r12 # cf=0
2842
2843 mov %r8,7*8($tptr) # t[7]
2844 mov %r9,8*8($tptr) # t[8]
2845
2846 mulx %r14,%r9,%rax # a[5]*a[4]
2847 mov 7*8($aptr),%r8 # a[7]
2848 adcx %r10,%r9
2849 mulx %r15,%r10,%rbx # a[6]*a[4]
2850 adox %rax,%r10
2851 adcx %r11,%r10
2852 mulx %r8,%r11,%rax # a[7]*a[4]
2853 mov %r14,%rdx # a[5]
2854 adox %rbx,%r11
2855 adcx %r12,%r11
2856 #adox $zero,%rax # of=0
2857 adcx $zero,%rax # cf=0
2858
2859 mulx %r15,%r14,%rbx # a[6]*a[5]
2860 mulx %r8,%r12,%r13 # a[7]*a[5]
2861 mov %r15,%rdx # a[6]
2862 lea 8*8($aptr),$aptr
2863 adcx %r14,%r11
2864 adox %rbx,%r12
2865 adcx %rax,%r12
2866 adox $zero,%r13
2867
2868 .byte 0x67,0x67
2869 mulx %r8,%r8,%r14 # a[7]*a[6]
2870 adcx %r8,%r13
2871 adcx $zero,%r14
2872
2873 cmp 8+8(%rsp),$aptr
2874 je .Lsqrx8x_outer_break
2875
2876 neg $carry # mov $carry,%cf
2877 mov \$-8,%rcx
2878 mov $zero,%r15
2879 mov 8*8($tptr),%r8
2880 adcx 9*8($tptr),%r9 # +=t[9]
2881 adcx 10*8($tptr),%r10 # ...
2882 adcx 11*8($tptr),%r11
2883 adc 12*8($tptr),%r12
2884 adc 13*8($tptr),%r13
2885 adc 14*8($tptr),%r14
2886 adc 15*8($tptr),%r15
2887 lea ($aptr),$aaptr
2888 lea 2*64($tptr),$tptr
2889 sbb %rax,%rax # mov %cf,$carry
2890
2891 mov -64($aptr),%rdx # a[0]
2892 mov %rax,16+8(%rsp) # offload $carry
2893 mov $tptr,24+8(%rsp)
2894
2895 #lea 8*8($tptr),$tptr # see 2*8*8($tptr) above
2896 xor %eax,%eax # cf=0, of=0
2897 jmp .Lsqrx8x_loop
2898
2899.align 32
2900.Lsqrx8x_loop:
2901 mov %r8,%rbx
2902 mulx 0*8($aaptr),%rax,%r8 # a[8]*a[i]
2903 adcx %rax,%rbx # +=t[8]
2904 adox %r9,%r8
2905
2906 mulx 1*8($aaptr),%rax,%r9 # ...
2907 adcx %rax,%r8
2908 adox %r10,%r9
2909
2910 mulx 2*8($aaptr),%rax,%r10
2911 adcx %rax,%r9
2912 adox %r11,%r10
2913
2914 mulx 3*8($aaptr),%rax,%r11
2915 adcx %rax,%r10
2916 adox %r12,%r11
2917
2918 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 4*8($aaptr),%rax,%r12
2919 adcx %rax,%r11
2920 adox %r13,%r12
2921
2922 mulx 5*8($aaptr),%rax,%r13
2923 adcx %rax,%r12
2924 adox %r14,%r13
2925
2926 mulx 6*8($aaptr),%rax,%r14
2927 mov %rbx,($tptr,%rcx,8) # store t[8+i]
2928 mov \$0,%ebx
2929 adcx %rax,%r13
2930 adox %r15,%r14
2931
2932 .byte 0xc4,0x62,0xfb,0xf6,0xbd,0x38,0x00,0x00,0x00 # mulx 7*8($aaptr),%rax,%r15
2933 mov 8($aptr,%rcx,8),%rdx # a[i]
2934 adcx %rax,%r14
2935 adox %rbx,%r15 # %rbx is 0, of=0
2936 adcx %rbx,%r15 # cf=0
2937
2938 .byte 0x67
2939 inc %rcx # of=0
2940 jnz .Lsqrx8x_loop
2941
2942 lea 8*8($aaptr),$aaptr
2943 mov \$-8,%rcx
2944 cmp 8+8(%rsp),$aaptr # done?
2945 je .Lsqrx8x_break
2946
2947 sub 16+8(%rsp),%rbx # mov 16(%rsp),%cf
2948 .byte 0x66
2949 mov -64($aptr),%rdx
2950 adcx 0*8($tptr),%r8
2951 adcx 1*8($tptr),%r9
2952 adc 2*8($tptr),%r10
2953 adc 3*8($tptr),%r11
2954 adc 4*8($tptr),%r12
2955 adc 5*8($tptr),%r13
2956 adc 6*8($tptr),%r14
2957 adc 7*8($tptr),%r15
2958 lea 8*8($tptr),$tptr
2959 .byte 0x67
2960 sbb %rax,%rax # mov %cf,%rax
2961 xor %ebx,%ebx # cf=0, of=0
2962 mov %rax,16+8(%rsp) # offload carry
2963 jmp .Lsqrx8x_loop
2964
2965.align 32
2966.Lsqrx8x_break:
2967 sub 16+8(%rsp),%r8 # consume last carry
2968 mov 24+8(%rsp),$carry # initial $tptr, borrow $carry
2969 mov 0*8($aptr),%rdx # a[8], modulo-scheduled
2970 xor %ebp,%ebp # xor $zero,$zero
2971 mov %r8,0*8($tptr)
2972 cmp $carry,$tptr # cf=0, of=0
2973 je .Lsqrx8x_outer_loop
2974
2975 mov %r9,1*8($tptr)
2976 mov 1*8($carry),%r9
2977 mov %r10,2*8($tptr)
2978 mov 2*8($carry),%r10
2979 mov %r11,3*8($tptr)
2980 mov 3*8($carry),%r11
2981 mov %r12,4*8($tptr)
2982 mov 4*8($carry),%r12
2983 mov %r13,5*8($tptr)
2984 mov 5*8($carry),%r13
2985 mov %r14,6*8($tptr)
2986 mov 6*8($carry),%r14
2987 mov %r15,7*8($tptr)
2988 mov 7*8($carry),%r15
2989 mov $carry,$tptr
2990 jmp .Lsqrx8x_outer_loop
2991
2992.align 32
2993.Lsqrx8x_outer_break:
2994 mov %r9,9*8($tptr) # t[9]
2995 movq %xmm3,%rcx # -$num
2996 mov %r10,10*8($tptr) # ...
2997 mov %r11,11*8($tptr)
2998 mov %r12,12*8($tptr)
2999 mov %r13,13*8($tptr)
3000 mov %r14,14*8($tptr)
3001___
3002} {
3003my $i="%rcx";
3004$code.=<<___;
3005 lea 48+8(%rsp),$tptr
3006 mov ($aptr,$i),%rdx # a[0]
3007
3008 mov 8($tptr),$A0[1] # t[1]
3009 xor $A0[0],$A0[0] # t[0], of=0, cf=0
3010 mov 0+8(%rsp),$num # restore $num
3011 adox $A0[1],$A0[1]
3012 mov 16($tptr),$A1[0] # t[2] # prefetch
3013 mov 24($tptr),$A1[1] # t[3] # prefetch
3014 #jmp .Lsqrx4x_shift_n_add # happens to be aligned
3015
3016.align 32
3017.Lsqrx4x_shift_n_add:
3018 mulx %rdx,%rax,%rbx
3019 adox $A1[0],$A1[0]
3020 adcx $A0[0],%rax
3021 .byte 0x48,0x8b,0x94,0x0e,0x08,0x00,0x00,0x00 # mov 8($aptr,$i),%rdx # a[i+1] # prefetch
3022 .byte 0x4c,0x8b,0x97,0x20,0x00,0x00,0x00 # mov 32($tptr),$A0[0] # t[2*i+4] # prefetch
3023 adox $A1[1],$A1[1]
3024 adcx $A0[1],%rbx
3025 mov 40($tptr),$A0[1] # t[2*i+4+1] # prefetch
3026 mov %rax,0($tptr)
3027 mov %rbx,8($tptr)
3028
3029 mulx %rdx,%rax,%rbx
3030 adox $A0[0],$A0[0]
3031 adcx $A1[0],%rax
3032 mov 16($aptr,$i),%rdx # a[i+2] # prefetch
3033 mov 48($tptr),$A1[0] # t[2*i+6] # prefetch
3034 adox $A0[1],$A0[1]
3035 adcx $A1[1],%rbx
3036 mov 56($tptr),$A1[1] # t[2*i+6+1] # prefetch
3037 mov %rax,16($tptr)
3038 mov %rbx,24($tptr)
3039
3040 mulx %rdx,%rax,%rbx
3041 adox $A1[0],$A1[0]
3042 adcx $A0[0],%rax
3043 mov 24($aptr,$i),%rdx # a[i+3] # prefetch
3044 lea 32($i),$i
3045 mov 64($tptr),$A0[0] # t[2*i+8] # prefetch
3046 adox $A1[1],$A1[1]
3047 adcx $A0[1],%rbx
3048 mov 72($tptr),$A0[1] # t[2*i+8+1] # prefetch
3049 mov %rax,32($tptr)
3050 mov %rbx,40($tptr)
3051
3052 mulx %rdx,%rax,%rbx
3053 adox $A0[0],$A0[0]
3054 adcx $A1[0],%rax
3055 jrcxz .Lsqrx4x_shift_n_add_break
3056 .byte 0x48,0x8b,0x94,0x0e,0x00,0x00,0x00,0x00 # mov 0($aptr,$i),%rdx # a[i+4] # prefetch
3057 adox $A0[1],$A0[1]
3058 adcx $A1[1],%rbx
3059 mov 80($tptr),$A1[0] # t[2*i+10] # prefetch
3060 mov 88($tptr),$A1[1] # t[2*i+10+1] # prefetch
3061 mov %rax,48($tptr)
3062 mov %rbx,56($tptr)
3063 lea 64($tptr),$tptr
3064 nop
3065 jmp .Lsqrx4x_shift_n_add
3066
3067.align 32
3068.Lsqrx4x_shift_n_add_break:
3069 adcx $A1[1],%rbx
3070 mov %rax,48($tptr)
3071 mov %rbx,56($tptr)
3072 lea 64($tptr),$tptr # end of t[] buffer
3073___
3074}
3075######################################################################
3076# Montgomery reduction part, "word-by-word" algorithm.
3077#
3078# This new path is inspired by multiple submissions from Intel, by
3079# Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
3080# Vinodh Gopal...
3081{
3082my ($nptr,$carry,$m0)=("%rbp","%rsi","%rdx");
3083
3084$code.=<<___;
3085 movq %xmm2,$nptr
David Benjamin4969cc92016-04-22 15:02:23 -04003086__bn_sqrx8x_reduction:
Adam Langleyd9e397b2015-01-22 14:27:53 -08003087 xor %eax,%eax # initial top-most carry bit
3088 mov 32+8(%rsp),%rbx # n0
3089 mov 48+8(%rsp),%rdx # "%r8", 8*0($tptr)
David Benjamin4969cc92016-04-22 15:02:23 -04003090 lea -8*8($nptr,$num),%rcx # end of n[]
Adam Langleyd9e397b2015-01-22 14:27:53 -08003091 #lea 48+8(%rsp,$num,2),$tptr # end of t[] buffer
3092 mov %rcx, 0+8(%rsp) # save end of n[]
3093 mov $tptr,8+8(%rsp) # save end of t[]
3094
3095 lea 48+8(%rsp),$tptr # initial t[] window
3096 jmp .Lsqrx8x_reduction_loop
3097
3098.align 32
3099.Lsqrx8x_reduction_loop:
3100 mov 8*1($tptr),%r9
3101 mov 8*2($tptr),%r10
3102 mov 8*3($tptr),%r11
3103 mov 8*4($tptr),%r12
3104 mov %rdx,%r8
3105 imulq %rbx,%rdx # n0*a[i]
3106 mov 8*5($tptr),%r13
3107 mov 8*6($tptr),%r14
3108 mov 8*7($tptr),%r15
3109 mov %rax,24+8(%rsp) # store top-most carry bit
3110
3111 lea 8*8($tptr),$tptr
3112 xor $carry,$carry # cf=0,of=0
3113 mov \$-8,%rcx
3114 jmp .Lsqrx8x_reduce
3115
3116.align 32
3117.Lsqrx8x_reduce:
3118 mov %r8, %rbx
David Benjamin4969cc92016-04-22 15:02:23 -04003119 mulx 8*0($nptr),%rax,%r8 # n[0]
Adam Langleyd9e397b2015-01-22 14:27:53 -08003120 adcx %rbx,%rax # discarded
3121 adox %r9,%r8
3122
David Benjamin4969cc92016-04-22 15:02:23 -04003123 mulx 8*1($nptr),%rbx,%r9 # n[1]
Adam Langleyd9e397b2015-01-22 14:27:53 -08003124 adcx %rbx,%r8
3125 adox %r10,%r9
3126
David Benjamin4969cc92016-04-22 15:02:23 -04003127 mulx 8*2($nptr),%rbx,%r10
Adam Langleyd9e397b2015-01-22 14:27:53 -08003128 adcx %rbx,%r9
3129 adox %r11,%r10
3130
David Benjamin4969cc92016-04-22 15:02:23 -04003131 mulx 8*3($nptr),%rbx,%r11
Adam Langleyd9e397b2015-01-22 14:27:53 -08003132 adcx %rbx,%r10
3133 adox %r12,%r11
3134
David Benjamin4969cc92016-04-22 15:02:23 -04003135 .byte 0xc4,0x62,0xe3,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 8*4($nptr),%rbx,%r12
Adam Langleyd9e397b2015-01-22 14:27:53 -08003136 mov %rdx,%rax
3137 mov %r8,%rdx
3138 adcx %rbx,%r11
3139 adox %r13,%r12
3140
3141 mulx 32+8(%rsp),%rbx,%rdx # %rdx discarded
3142 mov %rax,%rdx
3143 mov %rax,64+48+8(%rsp,%rcx,8) # put aside n0*a[i]
3144
David Benjamin4969cc92016-04-22 15:02:23 -04003145 mulx 8*5($nptr),%rax,%r13
Adam Langleyd9e397b2015-01-22 14:27:53 -08003146 adcx %rax,%r12
3147 adox %r14,%r13
3148
David Benjamin4969cc92016-04-22 15:02:23 -04003149 mulx 8*6($nptr),%rax,%r14
Adam Langleyd9e397b2015-01-22 14:27:53 -08003150 adcx %rax,%r13
3151 adox %r15,%r14
3152
David Benjamin4969cc92016-04-22 15:02:23 -04003153 mulx 8*7($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08003154 mov %rbx,%rdx
3155 adcx %rax,%r14
3156 adox $carry,%r15 # $carry is 0
3157 adcx $carry,%r15 # cf=0
3158
3159 .byte 0x67,0x67,0x67
3160 inc %rcx # of=0
3161 jnz .Lsqrx8x_reduce
3162
3163 mov $carry,%rax # xor %rax,%rax
3164 cmp 0+8(%rsp),$nptr # end of n[]?
3165 jae .Lsqrx8x_no_tail
3166
3167 mov 48+8(%rsp),%rdx # pull n0*a[0]
3168 add 8*0($tptr),%r8
David Benjamin4969cc92016-04-22 15:02:23 -04003169 lea 8*8($nptr),$nptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08003170 mov \$-8,%rcx
3171 adcx 8*1($tptr),%r9
3172 adcx 8*2($tptr),%r10
3173 adc 8*3($tptr),%r11
3174 adc 8*4($tptr),%r12
3175 adc 8*5($tptr),%r13
3176 adc 8*6($tptr),%r14
3177 adc 8*7($tptr),%r15
3178 lea 8*8($tptr),$tptr
3179 sbb %rax,%rax # top carry
3180
3181 xor $carry,$carry # of=0, cf=0
3182 mov %rax,16+8(%rsp)
3183 jmp .Lsqrx8x_tail
3184
3185.align 32
3186.Lsqrx8x_tail:
3187 mov %r8,%rbx
David Benjamin4969cc92016-04-22 15:02:23 -04003188 mulx 8*0($nptr),%rax,%r8
Adam Langleyd9e397b2015-01-22 14:27:53 -08003189 adcx %rax,%rbx
3190 adox %r9,%r8
3191
David Benjamin4969cc92016-04-22 15:02:23 -04003192 mulx 8*1($nptr),%rax,%r9
Adam Langleyd9e397b2015-01-22 14:27:53 -08003193 adcx %rax,%r8
3194 adox %r10,%r9
3195
David Benjamin4969cc92016-04-22 15:02:23 -04003196 mulx 8*2($nptr),%rax,%r10
Adam Langleyd9e397b2015-01-22 14:27:53 -08003197 adcx %rax,%r9
3198 adox %r11,%r10
3199
David Benjamin4969cc92016-04-22 15:02:23 -04003200 mulx 8*3($nptr),%rax,%r11
Adam Langleyd9e397b2015-01-22 14:27:53 -08003201 adcx %rax,%r10
3202 adox %r12,%r11
3203
David Benjamin4969cc92016-04-22 15:02:23 -04003204 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 8*4($nptr),%rax,%r12
Adam Langleyd9e397b2015-01-22 14:27:53 -08003205 adcx %rax,%r11
3206 adox %r13,%r12
3207
David Benjamin4969cc92016-04-22 15:02:23 -04003208 mulx 8*5($nptr),%rax,%r13
Adam Langleyd9e397b2015-01-22 14:27:53 -08003209 adcx %rax,%r12
3210 adox %r14,%r13
3211
David Benjamin4969cc92016-04-22 15:02:23 -04003212 mulx 8*6($nptr),%rax,%r14
Adam Langleyd9e397b2015-01-22 14:27:53 -08003213 adcx %rax,%r13
3214 adox %r15,%r14
3215
David Benjamin4969cc92016-04-22 15:02:23 -04003216 mulx 8*7($nptr),%rax,%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08003217 mov 72+48+8(%rsp,%rcx,8),%rdx # pull n0*a[i]
3218 adcx %rax,%r14
3219 adox $carry,%r15
3220 mov %rbx,($tptr,%rcx,8) # save result
3221 mov %r8,%rbx
3222 adcx $carry,%r15 # cf=0
3223
3224 inc %rcx # of=0
3225 jnz .Lsqrx8x_tail
3226
3227 cmp 0+8(%rsp),$nptr # end of n[]?
3228 jae .Lsqrx8x_tail_done # break out of loop
3229
3230 sub 16+8(%rsp),$carry # mov 16(%rsp),%cf
3231 mov 48+8(%rsp),%rdx # pull n0*a[0]
David Benjamin4969cc92016-04-22 15:02:23 -04003232 lea 8*8($nptr),$nptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08003233 adc 8*0($tptr),%r8
3234 adc 8*1($tptr),%r9
3235 adc 8*2($tptr),%r10
3236 adc 8*3($tptr),%r11
3237 adc 8*4($tptr),%r12
3238 adc 8*5($tptr),%r13
3239 adc 8*6($tptr),%r14
3240 adc 8*7($tptr),%r15
3241 lea 8*8($tptr),$tptr
3242 sbb %rax,%rax
3243 sub \$8,%rcx # mov \$-8,%rcx
3244
3245 xor $carry,$carry # of=0, cf=0
3246 mov %rax,16+8(%rsp)
3247 jmp .Lsqrx8x_tail
3248
3249.align 32
3250.Lsqrx8x_tail_done:
3251 add 24+8(%rsp),%r8 # can this overflow?
Adam Langley4139edb2016-01-13 15:00:54 -08003252 adc \$0,%r9
3253 adc \$0,%r10
3254 adc \$0,%r11
3255 adc \$0,%r12
3256 adc \$0,%r13
3257 adc \$0,%r14
3258 adc \$0,%r15 # can't overflow, because we
3259 # started with "overhung" part
3260 # of multiplication
Adam Langleyd9e397b2015-01-22 14:27:53 -08003261 mov $carry,%rax # xor %rax,%rax
3262
3263 sub 16+8(%rsp),$carry # mov 16(%rsp),%cf
3264.Lsqrx8x_no_tail: # %cf is 0 if jumped here
3265 adc 8*0($tptr),%r8
3266 movq %xmm3,%rcx
3267 adc 8*1($tptr),%r9
David Benjamin4969cc92016-04-22 15:02:23 -04003268 mov 8*7($nptr),$carry
Adam Langleyd9e397b2015-01-22 14:27:53 -08003269 movq %xmm2,$nptr # restore $nptr
3270 adc 8*2($tptr),%r10
3271 adc 8*3($tptr),%r11
3272 adc 8*4($tptr),%r12
3273 adc 8*5($tptr),%r13
3274 adc 8*6($tptr),%r14
3275 adc 8*7($tptr),%r15
3276 adc %rax,%rax # top-most carry
3277
3278 mov 32+8(%rsp),%rbx # n0
3279 mov 8*8($tptr,%rcx),%rdx # modulo-scheduled "%r8"
3280
3281 mov %r8,8*0($tptr) # store top 512 bits
3282 lea 8*8($tptr),%r8 # borrow %r8
3283 mov %r9,8*1($tptr)
3284 mov %r10,8*2($tptr)
3285 mov %r11,8*3($tptr)
3286 mov %r12,8*4($tptr)
3287 mov %r13,8*5($tptr)
3288 mov %r14,8*6($tptr)
3289 mov %r15,8*7($tptr)
3290
3291 lea 8*8($tptr,%rcx),$tptr # start of current t[] window
3292 cmp 8+8(%rsp),%r8 # end of t[]?
3293 jb .Lsqrx8x_reduction_loop
David Benjamin4969cc92016-04-22 15:02:23 -04003294 ret
3295.size bn_sqrx8x_internal,.-bn_sqrx8x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08003296___
3297}
3298##############################################################
3299# Post-condition, 4x unrolled
3300#
3301{
3302my ($rptr,$nptr)=("%rdx","%rbp");
Adam Langleyd9e397b2015-01-22 14:27:53 -08003303$code.=<<___;
David Benjamin4969cc92016-04-22 15:02:23 -04003304.align 32
3305__bn_postx4x_internal:
3306 mov 8*0($nptr),%r12
Adam Langleyd9e397b2015-01-22 14:27:53 -08003307 mov %rcx,%r10 # -$num
Adam Langleyd9e397b2015-01-22 14:27:53 -08003308 mov %rcx,%r9 # -$num
David Benjamin4969cc92016-04-22 15:02:23 -04003309 neg %rax
3310 sar \$3+2,%rcx
Adam Langleyd9e397b2015-01-22 14:27:53 -08003311 #lea 48+8(%rsp,%r9),$tptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08003312 movq %xmm1,$rptr # restore $rptr
3313 movq %xmm1,$aptr # prepare for back-to-back call
David Benjamin4969cc92016-04-22 15:02:23 -04003314 dec %r12 # so that after 'not' we get -n[0]
3315 mov 8*1($nptr),%r13
3316 xor %r8,%r8
3317 mov 8*2($nptr),%r14
3318 mov 8*3($nptr),%r15
3319 jmp .Lsqrx4x_sub_entry
Adam Langleyd9e397b2015-01-22 14:27:53 -08003320
David Benjamin4969cc92016-04-22 15:02:23 -04003321.align 16
Adam Langleyd9e397b2015-01-22 14:27:53 -08003322.Lsqrx4x_sub:
David Benjamin4969cc92016-04-22 15:02:23 -04003323 mov 8*0($nptr),%r12
3324 mov 8*1($nptr),%r13
3325 mov 8*2($nptr),%r14
3326 mov 8*3($nptr),%r15
3327.Lsqrx4x_sub_entry:
3328 andn %rax,%r12,%r12
3329 lea 8*4($nptr),$nptr
3330 andn %rax,%r13,%r13
3331 andn %rax,%r14,%r14
3332 andn %rax,%r15,%r15
3333
3334 neg %r8 # mov %r8,%cf
3335 adc 8*0($tptr),%r12
3336 adc 8*1($tptr),%r13
3337 adc 8*2($tptr),%r14
3338 adc 8*3($tptr),%r15
Adam Langleyd9e397b2015-01-22 14:27:53 -08003339 mov %r12,8*0($rptr)
David Benjamin4969cc92016-04-22 15:02:23 -04003340 lea 8*4($tptr),$tptr
Adam Langleyd9e397b2015-01-22 14:27:53 -08003341 mov %r13,8*1($rptr)
David Benjamin4969cc92016-04-22 15:02:23 -04003342 sbb %r8,%r8 # mov %cf,%r8
Adam Langleyd9e397b2015-01-22 14:27:53 -08003343 mov %r14,8*2($rptr)
3344 mov %r15,8*3($rptr)
3345 lea 8*4($rptr),$rptr
3346
3347 inc %rcx
3348 jnz .Lsqrx4x_sub
David Benjamin4969cc92016-04-22 15:02:23 -04003349
Adam Langleyd9e397b2015-01-22 14:27:53 -08003350 neg %r9 # restore $num
3351
3352 ret
David Benjamin4969cc92016-04-22 15:02:23 -04003353.size __bn_postx4x_internal,.-__bn_postx4x_internal
Adam Langleyd9e397b2015-01-22 14:27:53 -08003354___
David Benjamin4969cc92016-04-22 15:02:23 -04003355}
Adam Langleyd9e397b2015-01-22 14:27:53 -08003356}}}
3357{
3358my ($inp,$num,$tbl,$idx)=$win64?("%rcx","%edx","%r8", "%r9d") : # Win64 order
3359 ("%rdi","%esi","%rdx","%ecx"); # Unix order
3360my $out=$inp;
3361my $STRIDE=2**5*8;
3362my $N=$STRIDE/4;
3363
3364$code.=<<___;
3365.globl bn_scatter5
3366.type bn_scatter5,\@abi-omnipotent
3367.align 16
3368bn_scatter5:
3369 cmp \$0, $num
3370 jz .Lscatter_epilogue
3371 lea ($tbl,$idx,8),$tbl
3372.Lscatter:
3373 mov ($inp),%rax
3374 lea 8($inp),$inp
3375 mov %rax,($tbl)
3376 lea 32*8($tbl),$tbl
3377 sub \$1,$num
3378 jnz .Lscatter
3379.Lscatter_epilogue:
3380 ret
3381.size bn_scatter5,.-bn_scatter5
3382
3383.globl bn_gather5
3384.type bn_gather5,\@abi-omnipotent
David Benjamin4969cc92016-04-22 15:02:23 -04003385.align 32
Adam Langleyd9e397b2015-01-22 14:27:53 -08003386bn_gather5:
David Benjamin4969cc92016-04-22 15:02:23 -04003387.LSEH_begin_bn_gather5: # Win64 thing, but harmless in other cases
Adam Langleyd9e397b2015-01-22 14:27:53 -08003388 # I can't trust assembler to use specific encoding:-(
David Benjamin4969cc92016-04-22 15:02:23 -04003389 .byte 0x4c,0x8d,0x14,0x24 #lea (%rsp),%r10
3390 .byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00 #sub $0x108,%rsp
3391 lea .Linc(%rip),%rax
3392 and \$-16,%rsp # shouldn't be formally required
3393
3394 movd $idx,%xmm5
3395 movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
3396 movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
3397 lea 128($tbl),%r11 # size optimization
3398 lea 128(%rsp),%rax # size optimization
3399
3400 pshufd \$0,%xmm5,%xmm5 # broadcast $idx
3401 movdqa %xmm1,%xmm4
3402 movdqa %xmm1,%xmm2
3403___
3404########################################################################
3405# calculate mask by comparing 0..31 to $idx and save result to stack
3406#
3407for($i=0;$i<$STRIDE/16;$i+=4) {
3408$code.=<<___;
3409 paddd %xmm0,%xmm1
3410 pcmpeqd %xmm5,%xmm0 # compare to 1,0
3411___
3412$code.=<<___ if ($i);
3413 movdqa %xmm3,`16*($i-1)-128`(%rax)
Adam Langleyd9e397b2015-01-22 14:27:53 -08003414___
3415$code.=<<___;
David Benjamin4969cc92016-04-22 15:02:23 -04003416 movdqa %xmm4,%xmm3
Adam Langleyd9e397b2015-01-22 14:27:53 -08003417
David Benjamin4969cc92016-04-22 15:02:23 -04003418 paddd %xmm1,%xmm2
3419 pcmpeqd %xmm5,%xmm1 # compare to 3,2
3420 movdqa %xmm0,`16*($i+0)-128`(%rax)
3421 movdqa %xmm4,%xmm0
3422
3423 paddd %xmm2,%xmm3
3424 pcmpeqd %xmm5,%xmm2 # compare to 5,4
3425 movdqa %xmm1,`16*($i+1)-128`(%rax)
3426 movdqa %xmm4,%xmm1
3427
3428 paddd %xmm3,%xmm0
3429 pcmpeqd %xmm5,%xmm3 # compare to 7,6
3430 movdqa %xmm2,`16*($i+2)-128`(%rax)
3431 movdqa %xmm4,%xmm2
3432___
3433}
3434$code.=<<___;
3435 movdqa %xmm3,`16*($i-1)-128`(%rax)
3436 jmp .Lgather
3437
3438.align 32
3439.Lgather:
3440 pxor %xmm4,%xmm4
3441 pxor %xmm5,%xmm5
3442___
3443for($i=0;$i<$STRIDE/16;$i+=4) {
3444$code.=<<___;
3445 movdqa `16*($i+0)-128`(%r11),%xmm0
3446 movdqa `16*($i+1)-128`(%r11),%xmm1
3447 movdqa `16*($i+2)-128`(%r11),%xmm2
3448 pand `16*($i+0)-128`(%rax),%xmm0
3449 movdqa `16*($i+3)-128`(%r11),%xmm3
3450 pand `16*($i+1)-128`(%rax),%xmm1
3451 por %xmm0,%xmm4
3452 pand `16*($i+2)-128`(%rax),%xmm2
3453 por %xmm1,%xmm5
3454 pand `16*($i+3)-128`(%rax),%xmm3
3455 por %xmm2,%xmm4
3456 por %xmm3,%xmm5
3457___
3458}
3459$code.=<<___;
3460 por %xmm5,%xmm4
3461 lea $STRIDE(%r11),%r11
3462 pshufd \$0x4e,%xmm4,%xmm0
3463 por %xmm4,%xmm0
Adam Langleyd9e397b2015-01-22 14:27:53 -08003464 movq %xmm0,($out) # m0=bp[0]
3465 lea 8($out),$out
3466 sub \$1,$num
3467 jnz .Lgather
David Benjamin4969cc92016-04-22 15:02:23 -04003468
3469 lea (%r10),%rsp
Adam Langleyd9e397b2015-01-22 14:27:53 -08003470 ret
3471.LSEH_end_bn_gather5:
3472.size bn_gather5,.-bn_gather5
3473___
3474}
3475$code.=<<___;
3476.align 64
David Benjamin4969cc92016-04-22 15:02:23 -04003477.Linc:
3478 .long 0,0, 1,1
3479 .long 2,2, 2,2
Adam Langleyd9e397b2015-01-22 14:27:53 -08003480.asciz "Montgomery Multiplication with scatter/gather for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
3481___
3482
3483# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
3484# CONTEXT *context,DISPATCHER_CONTEXT *disp)
3485if ($win64) {
3486$rec="%rcx";
3487$frame="%rdx";
3488$context="%r8";
3489$disp="%r9";
3490
3491$code.=<<___;
3492.extern __imp_RtlVirtualUnwind
3493.type mul_handler,\@abi-omnipotent
3494.align 16
3495mul_handler:
3496 push %rsi
3497 push %rdi
3498 push %rbx
3499 push %rbp
3500 push %r12
3501 push %r13
3502 push %r14
3503 push %r15
3504 pushfq
3505 sub \$64,%rsp
3506
3507 mov 120($context),%rax # pull context->Rax
3508 mov 248($context),%rbx # pull context->Rip
3509
3510 mov 8($disp),%rsi # disp->ImageBase
3511 mov 56($disp),%r11 # disp->HandlerData
3512
3513 mov 0(%r11),%r10d # HandlerData[0]
3514 lea (%rsi,%r10),%r10 # end of prologue label
3515 cmp %r10,%rbx # context->Rip<end of prologue label
3516 jb .Lcommon_seh_tail
3517
3518 mov 152($context),%rax # pull context->Rsp
3519
3520 mov 4(%r11),%r10d # HandlerData[1]
3521 lea (%rsi,%r10),%r10 # epilogue label
3522 cmp %r10,%rbx # context->Rip>=epilogue label
3523 jae .Lcommon_seh_tail
3524
3525 lea .Lmul_epilogue(%rip),%r10
3526 cmp %r10,%rbx
David Benjamin4969cc92016-04-22 15:02:23 -04003527 ja .Lbody_40
Adam Langleyd9e397b2015-01-22 14:27:53 -08003528
3529 mov 192($context),%r10 # pull $num
3530 mov 8(%rax,%r10,8),%rax # pull saved stack pointer
David Benjamin4969cc92016-04-22 15:02:23 -04003531
Adam Langleyd9e397b2015-01-22 14:27:53 -08003532 jmp .Lbody_proceed
3533
3534.Lbody_40:
3535 mov 40(%rax),%rax # pull saved stack pointer
3536.Lbody_proceed:
Adam Langleyd9e397b2015-01-22 14:27:53 -08003537 mov -8(%rax),%rbx
3538 mov -16(%rax),%rbp
3539 mov -24(%rax),%r12
3540 mov -32(%rax),%r13
3541 mov -40(%rax),%r14
3542 mov -48(%rax),%r15
3543 mov %rbx,144($context) # restore context->Rbx
3544 mov %rbp,160($context) # restore context->Rbp
3545 mov %r12,216($context) # restore context->R12
3546 mov %r13,224($context) # restore context->R13
3547 mov %r14,232($context) # restore context->R14
3548 mov %r15,240($context) # restore context->R15
Adam Langleyd9e397b2015-01-22 14:27:53 -08003549
3550.Lcommon_seh_tail:
3551 mov 8(%rax),%rdi
3552 mov 16(%rax),%rsi
3553 mov %rax,152($context) # restore context->Rsp
3554 mov %rsi,168($context) # restore context->Rsi
3555 mov %rdi,176($context) # restore context->Rdi
3556
3557 mov 40($disp),%rdi # disp->ContextRecord
3558 mov $context,%rsi # context
3559 mov \$154,%ecx # sizeof(CONTEXT)
3560 .long 0xa548f3fc # cld; rep movsq
3561
3562 mov $disp,%rsi
3563 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
3564 mov 8(%rsi),%rdx # arg2, disp->ImageBase
3565 mov 0(%rsi),%r8 # arg3, disp->ControlPc
3566 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
3567 mov 40(%rsi),%r10 # disp->ContextRecord
3568 lea 56(%rsi),%r11 # &disp->HandlerData
3569 lea 24(%rsi),%r12 # &disp->EstablisherFrame
3570 mov %r10,32(%rsp) # arg5
3571 mov %r11,40(%rsp) # arg6
3572 mov %r12,48(%rsp) # arg7
3573 mov %rcx,56(%rsp) # arg8, (NULL)
3574 call *__imp_RtlVirtualUnwind(%rip)
3575
3576 mov \$1,%eax # ExceptionContinueSearch
3577 add \$64,%rsp
3578 popfq
3579 pop %r15
3580 pop %r14
3581 pop %r13
3582 pop %r12
3583 pop %rbp
3584 pop %rbx
3585 pop %rdi
3586 pop %rsi
3587 ret
3588.size mul_handler,.-mul_handler
3589
3590.section .pdata
3591.align 4
3592 .rva .LSEH_begin_bn_mul_mont_gather5
3593 .rva .LSEH_end_bn_mul_mont_gather5
3594 .rva .LSEH_info_bn_mul_mont_gather5
3595
3596 .rva .LSEH_begin_bn_mul4x_mont_gather5
3597 .rva .LSEH_end_bn_mul4x_mont_gather5
3598 .rva .LSEH_info_bn_mul4x_mont_gather5
3599
3600 .rva .LSEH_begin_bn_power5
3601 .rva .LSEH_end_bn_power5
3602 .rva .LSEH_info_bn_power5
3603
3604 .rva .LSEH_begin_bn_from_mont8x
3605 .rva .LSEH_end_bn_from_mont8x
3606 .rva .LSEH_info_bn_from_mont8x
3607___
3608$code.=<<___ if ($addx);
3609 .rva .LSEH_begin_bn_mulx4x_mont_gather5
3610 .rva .LSEH_end_bn_mulx4x_mont_gather5
3611 .rva .LSEH_info_bn_mulx4x_mont_gather5
3612
3613 .rva .LSEH_begin_bn_powerx5
3614 .rva .LSEH_end_bn_powerx5
3615 .rva .LSEH_info_bn_powerx5
3616___
3617$code.=<<___;
3618 .rva .LSEH_begin_bn_gather5
3619 .rva .LSEH_end_bn_gather5
3620 .rva .LSEH_info_bn_gather5
3621
3622.section .xdata
3623.align 8
3624.LSEH_info_bn_mul_mont_gather5:
3625 .byte 9,0,0,0
3626 .rva mul_handler
3627 .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
3628.align 8
3629.LSEH_info_bn_mul4x_mont_gather5:
3630 .byte 9,0,0,0
3631 .rva mul_handler
3632 .rva .Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
3633.align 8
3634.LSEH_info_bn_power5:
3635 .byte 9,0,0,0
3636 .rva mul_handler
3637 .rva .Lpower5_body,.Lpower5_epilogue # HandlerData[]
3638.align 8
3639.LSEH_info_bn_from_mont8x:
3640 .byte 9,0,0,0
3641 .rva mul_handler
3642 .rva .Lfrom_body,.Lfrom_epilogue # HandlerData[]
3643___
3644$code.=<<___ if ($addx);
3645.align 8
3646.LSEH_info_bn_mulx4x_mont_gather5:
3647 .byte 9,0,0,0
3648 .rva mul_handler
3649 .rva .Lmulx4x_body,.Lmulx4x_epilogue # HandlerData[]
3650.align 8
3651.LSEH_info_bn_powerx5:
3652 .byte 9,0,0,0
3653 .rva mul_handler
3654 .rva .Lpowerx5_body,.Lpowerx5_epilogue # HandlerData[]
3655___
3656$code.=<<___;
3657.align 8
3658.LSEH_info_bn_gather5:
David Benjamin4969cc92016-04-22 15:02:23 -04003659 .byte 0x01,0x0b,0x03,0x0a
3660 .byte 0x0b,0x01,0x21,0x00 # sub rsp,0x108
3661 .byte 0x04,0xa3,0x00,0x00 # lea r10,(rsp)
Adam Langleyd9e397b2015-01-22 14:27:53 -08003662.align 8
3663___
3664}
3665
3666$code =~ s/\`([^\`]*)\`/eval($1)/gem;
3667
3668print $code;
3669close STDOUT;