blob: 38def079a214acbfbc82005ba7da48a77c1e4672 [file] [log] [blame]
Adam Langleyd9e397b2015-01-22 14:27:53 -08001#!/usr/bin/env perl
2
3# ====================================================================
4# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5# project. The module is, however, dual licensed under OpenSSL and
6# CRYPTOGAMS licenses depending on where you obtain it. For further
7# details see http://www.openssl.org/~appro/cryptogams/.
8# ====================================================================
9
10# August 2011.
11#
12# Companion to x86_64-mont.pl that optimizes cache-timing attack
13# countermeasures. The subroutines are produced by replacing bp[i]
14# references in their x86_64-mont.pl counterparts with cache-neutral
15# references to powers table computed in BN_mod_exp_mont_consttime.
16# In addition subroutine that scatters elements of the powers table
17# is implemented, so that scatter-/gathering can be tuned without
18# bn_exp.c modifications.
19
20# August 2013.
21#
22# Add MULX/AD*X code paths and additional interfaces to optimize for
23# branch prediction unit. For input lengths that are multiples of 8
24# the np argument is not just modulus value, but one interleaved
25# with 0. This is to optimize post-condition...
26
27$flavour = shift;
28$output = shift;
29if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
30
31$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
32
33$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
34( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
35( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
36die "can't locate x86_64-xlate.pl";
37
38open OUT,"| \"$^X\" $xlate $flavour $output";
39*STDOUT=*OUT;
40
Kenny Roote99801b2015-11-06 15:31:15 -080041# In upstream, this is controlled by shelling out to the compiler to check
42# versions, but BoringSSL is intended to be used with pre-generated perlasm
43# output, so this isn't useful anyway.
44#
45# TODO(davidben): Enable this after testing. $addx goes up to 1.
46$addx = 0;
Adam Langleyd9e397b2015-01-22 14:27:53 -080047
48# int bn_mul_mont_gather5(
49$rp="%rdi"; # BN_ULONG *rp,
50$ap="%rsi"; # const BN_ULONG *ap,
51$bp="%rdx"; # const BN_ULONG *bp,
52$np="%rcx"; # const BN_ULONG *np,
53$n0="%r8"; # const BN_ULONG *n0,
54$num="%r9"; # int num,
55 # int idx); # 0 to 2^5-1, "index" in $bp holding
56 # pre-computed powers of a', interlaced
57 # in such manner that b[0] is $bp[idx],
58 # b[1] is [2^5+idx], etc.
59$lo0="%r10";
60$hi0="%r11";
61$hi1="%r13";
62$i="%r14";
63$j="%r15";
64$m0="%rbx";
65$m1="%rbp";
66
67$code=<<___;
68.text
69
70.extern OPENSSL_ia32cap_P
71
72.globl bn_mul_mont_gather5
73.type bn_mul_mont_gather5,\@function,6
74.align 64
75bn_mul_mont_gather5:
76 test \$7,${num}d
77 jnz .Lmul_enter
78___
79$code.=<<___ if ($addx);
80 mov OPENSSL_ia32cap_P+8(%rip),%r11d
81___
82$code.=<<___;
83 jmp .Lmul4x_enter
84
85.align 16
86.Lmul_enter:
87 mov ${num}d,${num}d
88 mov %rsp,%rax
89 mov `($win64?56:8)`(%rsp),%r10d # load 7th argument
90 push %rbx
91 push %rbp
92 push %r12
93 push %r13
94 push %r14
95 push %r15
96___
97$code.=<<___ if ($win64);
98 lea -0x28(%rsp),%rsp
99 movaps %xmm6,(%rsp)
100 movaps %xmm7,0x10(%rsp)
101___
102$code.=<<___;
103 lea 2($num),%r11
104 neg %r11
105 lea (%rsp,%r11,8),%rsp # tp=alloca(8*(num+2))
106 and \$-1024,%rsp # minimize TLB usage
107
108 mov %rax,8(%rsp,$num,8) # tp[num+1]=%rsp
109.Lmul_body:
110 mov $bp,%r12 # reassign $bp
111___
112 $bp="%r12";
113 $STRIDE=2**5*8; # 5 is "window size"
114 $N=$STRIDE/4; # should match cache line size
115$code.=<<___;
116 mov %r10,%r11
117 shr \$`log($N/8)/log(2)`,%r10
118 and \$`$N/8-1`,%r11
119 not %r10
120 lea .Lmagic_masks(%rip),%rax
121 and \$`2**5/($N/8)-1`,%r10 # 5 is "window size"
122 lea 96($bp,%r11,8),$bp # pointer within 1st cache line
123 movq 0(%rax,%r10,8),%xmm4 # set of masks denoting which
124 movq 8(%rax,%r10,8),%xmm5 # cache line contains element
125 movq 16(%rax,%r10,8),%xmm6 # denoted by 7th argument
126 movq 24(%rax,%r10,8),%xmm7
127
128 movq `0*$STRIDE/4-96`($bp),%xmm0
129 movq `1*$STRIDE/4-96`($bp),%xmm1
130 pand %xmm4,%xmm0
131 movq `2*$STRIDE/4-96`($bp),%xmm2
132 pand %xmm5,%xmm1
133 movq `3*$STRIDE/4-96`($bp),%xmm3
134 pand %xmm6,%xmm2
135 por %xmm1,%xmm0
136 pand %xmm7,%xmm3
137 por %xmm2,%xmm0
138 lea $STRIDE($bp),$bp
139 por %xmm3,%xmm0
140
141 movq %xmm0,$m0 # m0=bp[0]
142
143 mov ($n0),$n0 # pull n0[0] value
144 mov ($ap),%rax
145
146 xor $i,$i # i=0
147 xor $j,$j # j=0
148
149 movq `0*$STRIDE/4-96`($bp),%xmm0
150 movq `1*$STRIDE/4-96`($bp),%xmm1
151 pand %xmm4,%xmm0
152 movq `2*$STRIDE/4-96`($bp),%xmm2
153 pand %xmm5,%xmm1
154
155 mov $n0,$m1
156 mulq $m0 # ap[0]*bp[0]
157 mov %rax,$lo0
158 mov ($np),%rax
159
160 movq `3*$STRIDE/4-96`($bp),%xmm3
161 pand %xmm6,%xmm2
162 por %xmm1,%xmm0
163 pand %xmm7,%xmm3
164
165 imulq $lo0,$m1 # "tp[0]"*n0
166 mov %rdx,$hi0
167
168 por %xmm2,%xmm0
169 lea $STRIDE($bp),$bp
170 por %xmm3,%xmm0
171
172 mulq $m1 # np[0]*m1
173 add %rax,$lo0 # discarded
174 mov 8($ap),%rax
175 adc \$0,%rdx
176 mov %rdx,$hi1
177
178 lea 1($j),$j # j++
179 jmp .L1st_enter
180
181.align 16
182.L1st:
183 add %rax,$hi1
184 mov ($ap,$j,8),%rax
185 adc \$0,%rdx
186 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
187 mov $lo0,$hi0
188 adc \$0,%rdx
189 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
190 mov %rdx,$hi1
191
192.L1st_enter:
193 mulq $m0 # ap[j]*bp[0]
194 add %rax,$hi0
195 mov ($np,$j,8),%rax
196 adc \$0,%rdx
197 lea 1($j),$j # j++
198 mov %rdx,$lo0
199
200 mulq $m1 # np[j]*m1
201 cmp $num,$j
202 jne .L1st
203
204 movq %xmm0,$m0 # bp[1]
205
206 add %rax,$hi1
207 mov ($ap),%rax # ap[0]
208 adc \$0,%rdx
209 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
210 adc \$0,%rdx
211 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
212 mov %rdx,$hi1
213 mov $lo0,$hi0
214
215 xor %rdx,%rdx
216 add $hi0,$hi1
217 adc \$0,%rdx
218 mov $hi1,-8(%rsp,$num,8)
219 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
220
221 lea 1($i),$i # i++
222 jmp .Louter
223.align 16
224.Louter:
225 xor $j,$j # j=0
226 mov $n0,$m1
227 mov (%rsp),$lo0
228
229 movq `0*$STRIDE/4-96`($bp),%xmm0
230 movq `1*$STRIDE/4-96`($bp),%xmm1
231 pand %xmm4,%xmm0
232 movq `2*$STRIDE/4-96`($bp),%xmm2
233 pand %xmm5,%xmm1
234
235 mulq $m0 # ap[0]*bp[i]
236 add %rax,$lo0 # ap[0]*bp[i]+tp[0]
237 mov ($np),%rax
238 adc \$0,%rdx
239
240 movq `3*$STRIDE/4-96`($bp),%xmm3
241 pand %xmm6,%xmm2
242 por %xmm1,%xmm0
243 pand %xmm7,%xmm3
244
245 imulq $lo0,$m1 # tp[0]*n0
246 mov %rdx,$hi0
247
248 por %xmm2,%xmm0
249 lea $STRIDE($bp),$bp
250 por %xmm3,%xmm0
251
252 mulq $m1 # np[0]*m1
253 add %rax,$lo0 # discarded
254 mov 8($ap),%rax
255 adc \$0,%rdx
256 mov 8(%rsp),$lo0 # tp[1]
257 mov %rdx,$hi1
258
259 lea 1($j),$j # j++
260 jmp .Linner_enter
261
262.align 16
263.Linner:
264 add %rax,$hi1
265 mov ($ap,$j,8),%rax
266 adc \$0,%rdx
267 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
268 mov (%rsp,$j,8),$lo0
269 adc \$0,%rdx
270 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
271 mov %rdx,$hi1
272
273.Linner_enter:
274 mulq $m0 # ap[j]*bp[i]
275 add %rax,$hi0
276 mov ($np,$j,8),%rax
277 adc \$0,%rdx
278 add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
279 mov %rdx,$hi0
280 adc \$0,$hi0
281 lea 1($j),$j # j++
282
283 mulq $m1 # np[j]*m1
284 cmp $num,$j
285 jne .Linner
286
287 movq %xmm0,$m0 # bp[i+1]
288
289 add %rax,$hi1
290 mov ($ap),%rax # ap[0]
291 adc \$0,%rdx
292 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
293 mov (%rsp,$j,8),$lo0
294 adc \$0,%rdx
295 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
296 mov %rdx,$hi1
297
298 xor %rdx,%rdx
299 add $hi0,$hi1
300 adc \$0,%rdx
301 add $lo0,$hi1 # pull upmost overflow bit
302 adc \$0,%rdx
303 mov $hi1,-8(%rsp,$num,8)
304 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
305
306 lea 1($i),$i # i++
307 cmp $num,$i
308 jb .Louter
309
310 xor $i,$i # i=0 and clear CF!
311 mov (%rsp),%rax # tp[0]
312 lea (%rsp),$ap # borrow ap for tp
313 mov $num,$j # j=num
314 jmp .Lsub
315.align 16
316.Lsub: sbb ($np,$i,8),%rax
317 mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
318 mov 8($ap,$i,8),%rax # tp[i+1]
319 lea 1($i),$i # i++
320 dec $j # doesnn't affect CF!
321 jnz .Lsub
322
323 sbb \$0,%rax # handle upmost overflow bit
324 xor $i,$i
325 mov $num,$j # j=num
326.align 16
327.Lcopy: # copy or in-place refresh
328 mov (%rsp,$i,8),$ap
329 mov ($rp,$i,8),$np
330 xor $np,$ap # conditional select:
331 and %rax,$ap # ((ap ^ np) & %rax) ^ np
332 xor $np,$ap # ap = borrow?tp:rp
333 mov $i,(%rsp,$i,8) # zap temporary vector
334 mov $ap,($rp,$i,8) # rp[i]=tp[i]
335 lea 1($i),$i
336 sub \$1,$j
337 jnz .Lcopy
338
339 mov 8(%rsp,$num,8),%rsi # restore %rsp
340 mov \$1,%rax
341___
342$code.=<<___ if ($win64);
343 movaps -88(%rsi),%xmm6
344 movaps -72(%rsi),%xmm7
345___
346$code.=<<___;
347 mov -48(%rsi),%r15
348 mov -40(%rsi),%r14
349 mov -32(%rsi),%r13
350 mov -24(%rsi),%r12
351 mov -16(%rsi),%rbp
352 mov -8(%rsi),%rbx
353 lea (%rsi),%rsp
354.Lmul_epilogue:
355 ret
356.size bn_mul_mont_gather5,.-bn_mul_mont_gather5
357___
358{{{
359my @A=("%r10","%r11");
360my @N=("%r13","%rdi");
361$code.=<<___;
362.type bn_mul4x_mont_gather5,\@function,6
363.align 32
364bn_mul4x_mont_gather5:
365.Lmul4x_enter:
366___
367$code.=<<___ if ($addx);
368 and \$0x80100,%r11d
369 cmp \$0x80100,%r11d
370 je .Lmulx4x_enter
371___
372$code.=<<___;
373 .byte 0x67
374 mov %rsp,%rax
375 push %rbx
376 push %rbp
377 push %r12
378 push %r13
379 push %r14
380 push %r15
381___
382$code.=<<___ if ($win64);
383 lea -0x28(%rsp),%rsp
384 movaps %xmm6,(%rsp)
385 movaps %xmm7,0x10(%rsp)
386___
387$code.=<<___;
388 .byte 0x67
389 mov ${num}d,%r10d
390 shl \$3,${num}d
391 shl \$3+2,%r10d # 4*$num
392 neg $num # -$num
393
394 ##############################################################
395 # ensure that stack frame doesn't alias with $aptr+4*$num
396 # modulo 4096, which covers ret[num], am[num] and n[2*num]
397 # (see bn_exp.c). this is done to allow memory disambiguation
398 # logic do its magic. [excessive frame is allocated in order
399 # to allow bn_from_mont8x to clear it.]
400 #
401 lea -64(%rsp,$num,2),%r11
402 sub $ap,%r11
403 and \$4095,%r11
404 cmp %r11,%r10
405 jb .Lmul4xsp_alt
406 sub %r11,%rsp # align with $ap
407 lea -64(%rsp,$num,2),%rsp # alloca(128+num*8)
408 jmp .Lmul4xsp_done
409
410.align 32
411.Lmul4xsp_alt:
412 lea 4096-64(,$num,2),%r10
413 lea -64(%rsp,$num,2),%rsp # alloca(128+num*8)
414 sub %r10,%r11
415 mov \$0,%r10
416 cmovc %r10,%r11
417 sub %r11,%rsp
418.Lmul4xsp_done:
419 and \$-64,%rsp
420 neg $num
421
422 mov %rax,40(%rsp)
423.Lmul4x_body:
424
425 call mul4x_internal
426
427 mov 40(%rsp),%rsi # restore %rsp
428 mov \$1,%rax
429___
430$code.=<<___ if ($win64);
431 movaps -88(%rsi),%xmm6
432 movaps -72(%rsi),%xmm7
433___
434$code.=<<___;
435 mov -48(%rsi),%r15
436 mov -40(%rsi),%r14
437 mov -32(%rsi),%r13
438 mov -24(%rsi),%r12
439 mov -16(%rsi),%rbp
440 mov -8(%rsi),%rbx
441 lea (%rsi),%rsp
442.Lmul4x_epilogue:
443 ret
444.size bn_mul4x_mont_gather5,.-bn_mul4x_mont_gather5
445
446.type mul4x_internal,\@abi-omnipotent
447.align 32
448mul4x_internal:
449 shl \$5,$num
450 mov `($win64?56:8)`(%rax),%r10d # load 7th argument
451 lea 256(%rdx,$num),%r13
452 shr \$5,$num # restore $num
453___
454 $bp="%r12";
455 $STRIDE=2**5*8; # 5 is "window size"
456 $N=$STRIDE/4; # should match cache line size
457 $tp=$i;
458$code.=<<___;
459 mov %r10,%r11
460 shr \$`log($N/8)/log(2)`,%r10
461 and \$`$N/8-1`,%r11
462 not %r10
463 lea .Lmagic_masks(%rip),%rax
464 and \$`2**5/($N/8)-1`,%r10 # 5 is "window size"
465 lea 96(%rdx,%r11,8),$bp # pointer within 1st cache line
466 movq 0(%rax,%r10,8),%xmm4 # set of masks denoting which
467 movq 8(%rax,%r10,8),%xmm5 # cache line contains element
468 add \$7,%r11
469 movq 16(%rax,%r10,8),%xmm6 # denoted by 7th argument
470 movq 24(%rax,%r10,8),%xmm7
471 and \$7,%r11
472
473 movq `0*$STRIDE/4-96`($bp),%xmm0
474 lea $STRIDE($bp),$tp # borrow $tp
475 movq `1*$STRIDE/4-96`($bp),%xmm1
476 pand %xmm4,%xmm0
477 movq `2*$STRIDE/4-96`($bp),%xmm2
478 pand %xmm5,%xmm1
479 movq `3*$STRIDE/4-96`($bp),%xmm3
480 pand %xmm6,%xmm2
481 .byte 0x67
482 por %xmm1,%xmm0
483 movq `0*$STRIDE/4-96`($tp),%xmm1
484 .byte 0x67
485 pand %xmm7,%xmm3
486 .byte 0x67
487 por %xmm2,%xmm0
488 movq `1*$STRIDE/4-96`($tp),%xmm2
489 .byte 0x67
490 pand %xmm4,%xmm1
491 .byte 0x67
492 por %xmm3,%xmm0
493 movq `2*$STRIDE/4-96`($tp),%xmm3
494
495 movq %xmm0,$m0 # m0=bp[0]
496 movq `3*$STRIDE/4-96`($tp),%xmm0
497 mov %r13,16+8(%rsp) # save end of b[num]
498 mov $rp, 56+8(%rsp) # save $rp
499
500 mov ($n0),$n0 # pull n0[0] value
501 mov ($ap),%rax
502 lea ($ap,$num),$ap # end of a[num]
503 neg $num
504
505 mov $n0,$m1
506 mulq $m0 # ap[0]*bp[0]
507 mov %rax,$A[0]
508 mov ($np),%rax
509
510 pand %xmm5,%xmm2
511 pand %xmm6,%xmm3
512 por %xmm2,%xmm1
513
514 imulq $A[0],$m1 # "tp[0]"*n0
515 ##############################################################
516 # $tp is chosen so that writing to top-most element of the
517 # vector occurs just "above" references to powers table,
518 # "above" modulo cache-line size, which effectively precludes
519 # possibility of memory disambiguation logic failure when
520 # accessing the table.
521 #
522 lea 64+8(%rsp,%r11,8),$tp
523 mov %rdx,$A[1]
524
525 pand %xmm7,%xmm0
526 por %xmm3,%xmm1
527 lea 2*$STRIDE($bp),$bp
528 por %xmm1,%xmm0
529
530 mulq $m1 # np[0]*m1
531 add %rax,$A[0] # discarded
532 mov 8($ap,$num),%rax
533 adc \$0,%rdx
534 mov %rdx,$N[1]
535
536 mulq $m0
537 add %rax,$A[1]
538 mov 16*1($np),%rax # interleaved with 0, therefore 16*n
539 adc \$0,%rdx
540 mov %rdx,$A[0]
541
542 mulq $m1
543 add %rax,$N[1]
544 mov 16($ap,$num),%rax
545 adc \$0,%rdx
546 add $A[1],$N[1]
547 lea 4*8($num),$j # j=4
548 lea 16*4($np),$np
549 adc \$0,%rdx
550 mov $N[1],($tp)
551 mov %rdx,$N[0]
552 jmp .L1st4x
553
554.align 32
555.L1st4x:
556 mulq $m0 # ap[j]*bp[0]
557 add %rax,$A[0]
558 mov -16*2($np),%rax
559 lea 32($tp),$tp
560 adc \$0,%rdx
561 mov %rdx,$A[1]
562
563 mulq $m1 # np[j]*m1
564 add %rax,$N[0]
565 mov -8($ap,$j),%rax
566 adc \$0,%rdx
567 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
568 adc \$0,%rdx
569 mov $N[0],-24($tp) # tp[j-1]
570 mov %rdx,$N[1]
571
572 mulq $m0 # ap[j]*bp[0]
573 add %rax,$A[1]
574 mov -16*1($np),%rax
575 adc \$0,%rdx
576 mov %rdx,$A[0]
577
578 mulq $m1 # np[j]*m1
579 add %rax,$N[1]
580 mov ($ap,$j),%rax
581 adc \$0,%rdx
582 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
583 adc \$0,%rdx
584 mov $N[1],-16($tp) # tp[j-1]
585 mov %rdx,$N[0]
586
587 mulq $m0 # ap[j]*bp[0]
588 add %rax,$A[0]
589 mov 16*0($np),%rax
590 adc \$0,%rdx
591 mov %rdx,$A[1]
592
593 mulq $m1 # np[j]*m1
594 add %rax,$N[0]
595 mov 8($ap,$j),%rax
596 adc \$0,%rdx
597 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
598 adc \$0,%rdx
599 mov $N[0],-8($tp) # tp[j-1]
600 mov %rdx,$N[1]
601
602 mulq $m0 # ap[j]*bp[0]
603 add %rax,$A[1]
604 mov 16*1($np),%rax
605 adc \$0,%rdx
606 mov %rdx,$A[0]
607
608 mulq $m1 # np[j]*m1
609 add %rax,$N[1]
610 mov 16($ap,$j),%rax
611 adc \$0,%rdx
612 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
613 lea 16*4($np),$np
614 adc \$0,%rdx
615 mov $N[1],($tp) # tp[j-1]
616 mov %rdx,$N[0]
617
618 add \$32,$j # j+=4
619 jnz .L1st4x
620
621 mulq $m0 # ap[j]*bp[0]
622 add %rax,$A[0]
623 mov -16*2($np),%rax
624 lea 32($tp),$tp
625 adc \$0,%rdx
626 mov %rdx,$A[1]
627
628 mulq $m1 # np[j]*m1
629 add %rax,$N[0]
630 mov -8($ap),%rax
631 adc \$0,%rdx
632 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
633 adc \$0,%rdx
634 mov $N[0],-24($tp) # tp[j-1]
635 mov %rdx,$N[1]
636
637 mulq $m0 # ap[j]*bp[0]
638 add %rax,$A[1]
639 mov -16*1($np),%rax
640 adc \$0,%rdx
641 mov %rdx,$A[0]
642
643 mulq $m1 # np[j]*m1
644 add %rax,$N[1]
645 mov ($ap,$num),%rax # ap[0]
646 adc \$0,%rdx
647 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
648 adc \$0,%rdx
649 mov $N[1],-16($tp) # tp[j-1]
650 mov %rdx,$N[0]
651
652 movq %xmm0,$m0 # bp[1]
653 lea ($np,$num,2),$np # rewind $np
654
655 xor $N[1],$N[1]
656 add $A[0],$N[0]
657 adc \$0,$N[1]
658 mov $N[0],-8($tp)
659
660 jmp .Louter4x
661
662.align 32
663.Louter4x:
664 mov ($tp,$num),$A[0]
665 mov $n0,$m1
666 mulq $m0 # ap[0]*bp[i]
667 add %rax,$A[0] # ap[0]*bp[i]+tp[0]
668 mov ($np),%rax
669 adc \$0,%rdx
670
671 movq `0*$STRIDE/4-96`($bp),%xmm0
672 movq `1*$STRIDE/4-96`($bp),%xmm1
673 pand %xmm4,%xmm0
674 movq `2*$STRIDE/4-96`($bp),%xmm2
675 pand %xmm5,%xmm1
676 movq `3*$STRIDE/4-96`($bp),%xmm3
677
678 imulq $A[0],$m1 # tp[0]*n0
679 .byte 0x67
680 mov %rdx,$A[1]
681 mov $N[1],($tp) # store upmost overflow bit
682
683 pand %xmm6,%xmm2
684 por %xmm1,%xmm0
685 pand %xmm7,%xmm3
686 por %xmm2,%xmm0
687 lea ($tp,$num),$tp # rewind $tp
688 lea $STRIDE($bp),$bp
689 por %xmm3,%xmm0
690
691 mulq $m1 # np[0]*m1
692 add %rax,$A[0] # "$N[0]", discarded
693 mov 8($ap,$num),%rax
694 adc \$0,%rdx
695 mov %rdx,$N[1]
696
697 mulq $m0 # ap[j]*bp[i]
698 add %rax,$A[1]
699 mov 16*1($np),%rax # interleaved with 0, therefore 16*n
700 adc \$0,%rdx
701 add 8($tp),$A[1] # +tp[1]
702 adc \$0,%rdx
703 mov %rdx,$A[0]
704
705 mulq $m1 # np[j]*m1
706 add %rax,$N[1]
707 mov 16($ap,$num),%rax
708 adc \$0,%rdx
709 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
710 lea 4*8($num),$j # j=4
711 lea 16*4($np),$np
712 adc \$0,%rdx
713 mov %rdx,$N[0]
714 jmp .Linner4x
715
716.align 32
717.Linner4x:
718 mulq $m0 # ap[j]*bp[i]
719 add %rax,$A[0]
720 mov -16*2($np),%rax
721 adc \$0,%rdx
722 add 16($tp),$A[0] # ap[j]*bp[i]+tp[j]
723 lea 32($tp),$tp
724 adc \$0,%rdx
725 mov %rdx,$A[1]
726
727 mulq $m1 # np[j]*m1
728 add %rax,$N[0]
729 mov -8($ap,$j),%rax
730 adc \$0,%rdx
731 add $A[0],$N[0]
732 adc \$0,%rdx
733 mov $N[1],-32($tp) # tp[j-1]
734 mov %rdx,$N[1]
735
736 mulq $m0 # ap[j]*bp[i]
737 add %rax,$A[1]
738 mov -16*1($np),%rax
739 adc \$0,%rdx
740 add -8($tp),$A[1]
741 adc \$0,%rdx
742 mov %rdx,$A[0]
743
744 mulq $m1 # np[j]*m1
745 add %rax,$N[1]
746 mov ($ap,$j),%rax
747 adc \$0,%rdx
748 add $A[1],$N[1]
749 adc \$0,%rdx
750 mov $N[0],-24($tp) # tp[j-1]
751 mov %rdx,$N[0]
752
753 mulq $m0 # ap[j]*bp[i]
754 add %rax,$A[0]
755 mov 16*0($np),%rax
756 adc \$0,%rdx
757 add ($tp),$A[0] # ap[j]*bp[i]+tp[j]
758 adc \$0,%rdx
759 mov %rdx,$A[1]
760
761 mulq $m1 # np[j]*m1
762 add %rax,$N[0]
763 mov 8($ap,$j),%rax
764 adc \$0,%rdx
765 add $A[0],$N[0]
766 adc \$0,%rdx
767 mov $N[1],-16($tp) # tp[j-1]
768 mov %rdx,$N[1]
769
770 mulq $m0 # ap[j]*bp[i]
771 add %rax,$A[1]
772 mov 16*1($np),%rax
773 adc \$0,%rdx
774 add 8($tp),$A[1]
775 adc \$0,%rdx
776 mov %rdx,$A[0]
777
778 mulq $m1 # np[j]*m1
779 add %rax,$N[1]
780 mov 16($ap,$j),%rax
781 adc \$0,%rdx
782 add $A[1],$N[1]
783 lea 16*4($np),$np
784 adc \$0,%rdx
785 mov $N[0],-8($tp) # tp[j-1]
786 mov %rdx,$N[0]
787
788 add \$32,$j # j+=4
789 jnz .Linner4x
790
791 mulq $m0 # ap[j]*bp[i]
792 add %rax,$A[0]
793 mov -16*2($np),%rax
794 adc \$0,%rdx
795 add 16($tp),$A[0] # ap[j]*bp[i]+tp[j]
796 lea 32($tp),$tp
797 adc \$0,%rdx
798 mov %rdx,$A[1]
799
800 mulq $m1 # np[j]*m1
801 add %rax,$N[0]
802 mov -8($ap),%rax
803 adc \$0,%rdx
804 add $A[0],$N[0]
805 adc \$0,%rdx
806 mov $N[1],-32($tp) # tp[j-1]
807 mov %rdx,$N[1]
808
809 mulq $m0 # ap[j]*bp[i]
810 add %rax,$A[1]
811 mov $m1,%rax
812 mov -16*1($np),$m1
813 adc \$0,%rdx
814 add -8($tp),$A[1]
815 adc \$0,%rdx
816 mov %rdx,$A[0]
817
818 mulq $m1 # np[j]*m1
819 add %rax,$N[1]
820 mov ($ap,$num),%rax # ap[0]
821 adc \$0,%rdx
822 add $A[1],$N[1]
823 adc \$0,%rdx
824 mov $N[0],-24($tp) # tp[j-1]
825 mov %rdx,$N[0]
826
827 movq %xmm0,$m0 # bp[i+1]
828 mov $N[1],-16($tp) # tp[j-1]
829 lea ($np,$num,2),$np # rewind $np
830
831 xor $N[1],$N[1]
832 add $A[0],$N[0]
833 adc \$0,$N[1]
834 add ($tp),$N[0] # pull upmost overflow bit
835 adc \$0,$N[1] # upmost overflow bit
836 mov $N[0],-8($tp)
837
838 cmp 16+8(%rsp),$bp
839 jb .Louter4x
840___
841if (1) {
842$code.=<<___;
843 sub $N[0],$m1 # compare top-most words
844 adc $j,$j # $j is zero
845 or $j,$N[1]
846 xor \$1,$N[1]
847 lea ($tp,$num),%rbx # tptr in .sqr4x_sub
848 lea ($np,$N[1],8),%rbp # nptr in .sqr4x_sub
849 mov %r9,%rcx
850 sar \$3+2,%rcx # cf=0
851 mov 56+8(%rsp),%rdi # rptr in .sqr4x_sub
852 jmp .Lsqr4x_sub
853___
854} else {
855my @ri=("%rax",$bp,$m0,$m1);
856my $rp="%rdx";
857$code.=<<___
858 xor \$1,$N[1]
859 lea ($tp,$num),$tp # rewind $tp
860 sar \$5,$num # cf=0
861 lea ($np,$N[1],8),$np
862 mov 56+8(%rsp),$rp # restore $rp
863 jmp .Lsub4x
864
865.align 32
866.Lsub4x:
867 .byte 0x66
868 mov 8*0($tp),@ri[0]
869 mov 8*1($tp),@ri[1]
870 .byte 0x66
871 sbb 16*0($np),@ri[0]
872 mov 8*2($tp),@ri[2]
873 sbb 16*1($np),@ri[1]
874 mov 3*8($tp),@ri[3]
875 lea 4*8($tp),$tp
876 sbb 16*2($np),@ri[2]
877 mov @ri[0],8*0($rp)
878 sbb 16*3($np),@ri[3]
879 lea 16*4($np),$np
880 mov @ri[1],8*1($rp)
881 mov @ri[2],8*2($rp)
882 mov @ri[3],8*3($rp)
883 lea 8*4($rp),$rp
884
885 inc $num
886 jnz .Lsub4x
887
888 ret
889___
890}
891$code.=<<___;
892.size mul4x_internal,.-mul4x_internal
893___
894}}}
895 {{{
896######################################################################
897# void bn_power5(
898my $rptr="%rdi"; # BN_ULONG *rptr,
899my $aptr="%rsi"; # const BN_ULONG *aptr,
900my $bptr="%rdx"; # const void *table,
901my $nptr="%rcx"; # const BN_ULONG *nptr,
902my $n0 ="%r8"; # const BN_ULONG *n0);
903my $num ="%r9"; # int num, has to be divisible by 8
904 # int pwr
905
906my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
907my @A0=("%r10","%r11");
908my @A1=("%r12","%r13");
909my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
910
911$code.=<<___;
912.globl bn_power5
913.type bn_power5,\@function,6
914.align 32
915bn_power5:
916___
917$code.=<<___ if ($addx);
918 mov OPENSSL_ia32cap_P+8(%rip),%r11d
919 and \$0x80100,%r11d
920 cmp \$0x80100,%r11d
921 je .Lpowerx5_enter
922___
923$code.=<<___;
924 mov %rsp,%rax
925 push %rbx
926 push %rbp
927 push %r12
928 push %r13
929 push %r14
930 push %r15
931___
932$code.=<<___ if ($win64);
933 lea -0x28(%rsp),%rsp
934 movaps %xmm6,(%rsp)
935 movaps %xmm7,0x10(%rsp)
936___
937$code.=<<___;
938 mov ${num}d,%r10d
939 shl \$3,${num}d # convert $num to bytes
940 shl \$3+2,%r10d # 4*$num
941 neg $num
942 mov ($n0),$n0 # *n0
943
944 ##############################################################
945 # ensure that stack frame doesn't alias with $aptr+4*$num
946 # modulo 4096, which covers ret[num], am[num] and n[2*num]
947 # (see bn_exp.c). this is done to allow memory disambiguation
948 # logic do its magic.
949 #
950 lea -64(%rsp,$num,2),%r11
951 sub $aptr,%r11
952 and \$4095,%r11
953 cmp %r11,%r10
954 jb .Lpwr_sp_alt
955 sub %r11,%rsp # align with $aptr
956 lea -64(%rsp,$num,2),%rsp # alloca(frame+2*$num)
957 jmp .Lpwr_sp_done
958
959.align 32
960.Lpwr_sp_alt:
961 lea 4096-64(,$num,2),%r10 # 4096-frame-2*$num
962 lea -64(%rsp,$num,2),%rsp # alloca(frame+2*$num)
963 sub %r10,%r11
964 mov \$0,%r10
965 cmovc %r10,%r11
966 sub %r11,%rsp
967.Lpwr_sp_done:
968 and \$-64,%rsp
969 mov $num,%r10
970 neg $num
971
972 ##############################################################
973 # Stack layout
974 #
975 # +0 saved $num, used in reduction section
976 # +8 &t[2*$num], used in reduction section
977 # +32 saved *n0
978 # +40 saved %rsp
979 # +48 t[2*$num]
980 #
981 mov $n0, 32(%rsp)
982 mov %rax, 40(%rsp) # save original %rsp
983.Lpower5_body:
984 movq $rptr,%xmm1 # save $rptr
985 movq $nptr,%xmm2 # save $nptr
986 movq %r10, %xmm3 # -$num
987 movq $bptr,%xmm4
988
989 call __bn_sqr8x_internal
990 call __bn_sqr8x_internal
991 call __bn_sqr8x_internal
992 call __bn_sqr8x_internal
993 call __bn_sqr8x_internal
994
995 movq %xmm2,$nptr
996 movq %xmm4,$bptr
997 mov $aptr,$rptr
998 mov 40(%rsp),%rax
999 lea 32(%rsp),$n0
1000
1001 call mul4x_internal
1002
1003 mov 40(%rsp),%rsi # restore %rsp
1004 mov \$1,%rax
1005 mov -48(%rsi),%r15
1006 mov -40(%rsi),%r14
1007 mov -32(%rsi),%r13
1008 mov -24(%rsi),%r12
1009 mov -16(%rsi),%rbp
1010 mov -8(%rsi),%rbx
1011 lea (%rsi),%rsp
1012.Lpower5_epilogue:
1013 ret
1014.size bn_power5,.-bn_power5
1015
1016.globl bn_sqr8x_internal
1017.hidden bn_sqr8x_internal
1018.type bn_sqr8x_internal,\@abi-omnipotent
1019.align 32
1020bn_sqr8x_internal:
1021__bn_sqr8x_internal:
1022 ##############################################################
1023 # Squaring part:
1024 #
1025 # a) multiply-n-add everything but a[i]*a[i];
1026 # b) shift result of a) by 1 to the left and accumulate
1027 # a[i]*a[i] products;
1028 #
1029 ##############################################################
1030 # a[1]a[0]
1031 # a[2]a[0]
1032 # a[3]a[0]
1033 # a[2]a[1]
1034 # a[4]a[0]
1035 # a[3]a[1]
1036 # a[5]a[0]
1037 # a[4]a[1]
1038 # a[3]a[2]
1039 # a[6]a[0]
1040 # a[5]a[1]
1041 # a[4]a[2]
1042 # a[7]a[0]
1043 # a[6]a[1]
1044 # a[5]a[2]
1045 # a[4]a[3]
1046 # a[7]a[1]
1047 # a[6]a[2]
1048 # a[5]a[3]
1049 # a[7]a[2]
1050 # a[6]a[3]
1051 # a[5]a[4]
1052 # a[7]a[3]
1053 # a[6]a[4]
1054 # a[7]a[4]
1055 # a[6]a[5]
1056 # a[7]a[5]
1057 # a[7]a[6]
1058 # a[1]a[0]
1059 # a[2]a[0]
1060 # a[3]a[0]
1061 # a[4]a[0]
1062 # a[5]a[0]
1063 # a[6]a[0]
1064 # a[7]a[0]
1065 # a[2]a[1]
1066 # a[3]a[1]
1067 # a[4]a[1]
1068 # a[5]a[1]
1069 # a[6]a[1]
1070 # a[7]a[1]
1071 # a[3]a[2]
1072 # a[4]a[2]
1073 # a[5]a[2]
1074 # a[6]a[2]
1075 # a[7]a[2]
1076 # a[4]a[3]
1077 # a[5]a[3]
1078 # a[6]a[3]
1079 # a[7]a[3]
1080 # a[5]a[4]
1081 # a[6]a[4]
1082 # a[7]a[4]
1083 # a[6]a[5]
1084 # a[7]a[5]
1085 # a[7]a[6]
1086 # a[0]a[0]
1087 # a[1]a[1]
1088 # a[2]a[2]
1089 # a[3]a[3]
1090 # a[4]a[4]
1091 # a[5]a[5]
1092 # a[6]a[6]
1093 # a[7]a[7]
1094
1095 lea 32(%r10),$i # $i=-($num-32)
1096 lea ($aptr,$num),$aptr # end of a[] buffer, ($aptr,$i)=&ap[2]
1097
1098 mov $num,$j # $j=$num
1099
1100 # comments apply to $num==8 case
1101 mov -32($aptr,$i),$a0 # a[0]
1102 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1103 mov -24($aptr,$i),%rax # a[1]
1104 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1105 mov -16($aptr,$i),$ai # a[2]
1106 mov %rax,$a1
1107
1108 mul $a0 # a[1]*a[0]
1109 mov %rax,$A0[0] # a[1]*a[0]
1110 mov $ai,%rax # a[2]
1111 mov %rdx,$A0[1]
1112 mov $A0[0],-24($tptr,$i) # t[1]
1113
1114 mul $a0 # a[2]*a[0]
1115 add %rax,$A0[1]
1116 mov $ai,%rax
1117 adc \$0,%rdx
1118 mov $A0[1],-16($tptr,$i) # t[2]
1119 mov %rdx,$A0[0]
1120
1121
1122 mov -8($aptr,$i),$ai # a[3]
1123 mul $a1 # a[2]*a[1]
1124 mov %rax,$A1[0] # a[2]*a[1]+t[3]
1125 mov $ai,%rax
1126 mov %rdx,$A1[1]
1127
1128 lea ($i),$j
1129 mul $a0 # a[3]*a[0]
1130 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1131 mov $ai,%rax
1132 mov %rdx,$A0[1]
1133 adc \$0,$A0[1]
1134 add $A1[0],$A0[0]
1135 adc \$0,$A0[1]
1136 mov $A0[0],-8($tptr,$j) # t[3]
1137 jmp .Lsqr4x_1st
1138
1139.align 32
1140.Lsqr4x_1st:
1141 mov ($aptr,$j),$ai # a[4]
1142 mul $a1 # a[3]*a[1]
1143 add %rax,$A1[1] # a[3]*a[1]+t[4]
1144 mov $ai,%rax
1145 mov %rdx,$A1[0]
1146 adc \$0,$A1[0]
1147
1148 mul $a0 # a[4]*a[0]
1149 add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
1150 mov $ai,%rax # a[3]
1151 mov 8($aptr,$j),$ai # a[5]
1152 mov %rdx,$A0[0]
1153 adc \$0,$A0[0]
1154 add $A1[1],$A0[1]
1155 adc \$0,$A0[0]
1156
1157
1158 mul $a1 # a[4]*a[3]
1159 add %rax,$A1[0] # a[4]*a[3]+t[5]
1160 mov $ai,%rax
1161 mov $A0[1],($tptr,$j) # t[4]
1162 mov %rdx,$A1[1]
1163 adc \$0,$A1[1]
1164
1165 mul $a0 # a[5]*a[2]
1166 add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
1167 mov $ai,%rax
1168 mov 16($aptr,$j),$ai # a[6]
1169 mov %rdx,$A0[1]
1170 adc \$0,$A0[1]
1171 add $A1[0],$A0[0]
1172 adc \$0,$A0[1]
1173
1174 mul $a1 # a[5]*a[3]
1175 add %rax,$A1[1] # a[5]*a[3]+t[6]
1176 mov $ai,%rax
1177 mov $A0[0],8($tptr,$j) # t[5]
1178 mov %rdx,$A1[0]
1179 adc \$0,$A1[0]
1180
1181 mul $a0 # a[6]*a[2]
1182 add %rax,$A0[1] # a[6]*a[2]+a[5]*a[3]+t[6]
1183 mov $ai,%rax # a[3]
1184 mov 24($aptr,$j),$ai # a[7]
1185 mov %rdx,$A0[0]
1186 adc \$0,$A0[0]
1187 add $A1[1],$A0[1]
1188 adc \$0,$A0[0]
1189
1190
1191 mul $a1 # a[6]*a[5]
1192 add %rax,$A1[0] # a[6]*a[5]+t[7]
1193 mov $ai,%rax
1194 mov $A0[1],16($tptr,$j) # t[6]
1195 mov %rdx,$A1[1]
1196 adc \$0,$A1[1]
1197 lea 32($j),$j
1198
1199 mul $a0 # a[7]*a[4]
1200 add %rax,$A0[0] # a[7]*a[4]+a[6]*a[5]+t[6]
1201 mov $ai,%rax
1202 mov %rdx,$A0[1]
1203 adc \$0,$A0[1]
1204 add $A1[0],$A0[0]
1205 adc \$0,$A0[1]
1206 mov $A0[0],-8($tptr,$j) # t[7]
1207
1208 cmp \$0,$j
1209 jne .Lsqr4x_1st
1210
1211 mul $a1 # a[7]*a[5]
1212 add %rax,$A1[1]
1213 lea 16($i),$i
1214 adc \$0,%rdx
1215 add $A0[1],$A1[1]
1216 adc \$0,%rdx
1217
1218 mov $A1[1],($tptr) # t[8]
1219 mov %rdx,$A1[0]
1220 mov %rdx,8($tptr) # t[9]
1221 jmp .Lsqr4x_outer
1222
1223.align 32
1224.Lsqr4x_outer: # comments apply to $num==6 case
1225 mov -32($aptr,$i),$a0 # a[0]
1226 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1227 mov -24($aptr,$i),%rax # a[1]
1228 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1229 mov -16($aptr,$i),$ai # a[2]
1230 mov %rax,$a1
1231
1232 mul $a0 # a[1]*a[0]
1233 mov -24($tptr,$i),$A0[0] # t[1]
1234 add %rax,$A0[0] # a[1]*a[0]+t[1]
1235 mov $ai,%rax # a[2]
1236 adc \$0,%rdx
1237 mov $A0[0],-24($tptr,$i) # t[1]
1238 mov %rdx,$A0[1]
1239
1240 mul $a0 # a[2]*a[0]
1241 add %rax,$A0[1]
1242 mov $ai,%rax
1243 adc \$0,%rdx
1244 add -16($tptr,$i),$A0[1] # a[2]*a[0]+t[2]
1245 mov %rdx,$A0[0]
1246 adc \$0,$A0[0]
1247 mov $A0[1],-16($tptr,$i) # t[2]
1248
1249 xor $A1[0],$A1[0]
1250
1251 mov -8($aptr,$i),$ai # a[3]
1252 mul $a1 # a[2]*a[1]
1253 add %rax,$A1[0] # a[2]*a[1]+t[3]
1254 mov $ai,%rax
1255 adc \$0,%rdx
1256 add -8($tptr,$i),$A1[0]
1257 mov %rdx,$A1[1]
1258 adc \$0,$A1[1]
1259
1260 mul $a0 # a[3]*a[0]
1261 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1262 mov $ai,%rax
1263 adc \$0,%rdx
1264 add $A1[0],$A0[0]
1265 mov %rdx,$A0[1]
1266 adc \$0,$A0[1]
1267 mov $A0[0],-8($tptr,$i) # t[3]
1268
1269 lea ($i),$j
1270 jmp .Lsqr4x_inner
1271
1272.align 32
1273.Lsqr4x_inner:
1274 mov ($aptr,$j),$ai # a[4]
1275 mul $a1 # a[3]*a[1]
1276 add %rax,$A1[1] # a[3]*a[1]+t[4]
1277 mov $ai,%rax
1278 mov %rdx,$A1[0]
1279 adc \$0,$A1[0]
1280 add ($tptr,$j),$A1[1]
1281 adc \$0,$A1[0]
1282
1283 .byte 0x67
1284 mul $a0 # a[4]*a[0]
1285 add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
1286 mov $ai,%rax # a[3]
1287 mov 8($aptr,$j),$ai # a[5]
1288 mov %rdx,$A0[0]
1289 adc \$0,$A0[0]
1290 add $A1[1],$A0[1]
1291 adc \$0,$A0[0]
1292
1293 mul $a1 # a[4]*a[3]
1294 add %rax,$A1[0] # a[4]*a[3]+t[5]
1295 mov $A0[1],($tptr,$j) # t[4]
1296 mov $ai,%rax
1297 mov %rdx,$A1[1]
1298 adc \$0,$A1[1]
1299 add 8($tptr,$j),$A1[0]
1300 lea 16($j),$j # j++
1301 adc \$0,$A1[1]
1302
1303 mul $a0 # a[5]*a[2]
1304 add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
1305 mov $ai,%rax
1306 adc \$0,%rdx
1307 add $A1[0],$A0[0]
1308 mov %rdx,$A0[1]
1309 adc \$0,$A0[1]
1310 mov $A0[0],-8($tptr,$j) # t[5], "preloaded t[1]" below
1311
1312 cmp \$0,$j
1313 jne .Lsqr4x_inner
1314
1315 .byte 0x67
1316 mul $a1 # a[5]*a[3]
1317 add %rax,$A1[1]
1318 adc \$0,%rdx
1319 add $A0[1],$A1[1]
1320 adc \$0,%rdx
1321
1322 mov $A1[1],($tptr) # t[6], "preloaded t[2]" below
1323 mov %rdx,$A1[0]
1324 mov %rdx,8($tptr) # t[7], "preloaded t[3]" below
1325
1326 add \$16,$i
1327 jnz .Lsqr4x_outer
1328
1329 # comments apply to $num==4 case
1330 mov -32($aptr),$a0 # a[0]
1331 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1332 mov -24($aptr),%rax # a[1]
1333 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1334 mov -16($aptr),$ai # a[2]
1335 mov %rax,$a1
1336
1337 mul $a0 # a[1]*a[0]
1338 add %rax,$A0[0] # a[1]*a[0]+t[1], preloaded t[1]
1339 mov $ai,%rax # a[2]
1340 mov %rdx,$A0[1]
1341 adc \$0,$A0[1]
1342
1343 mul $a0 # a[2]*a[0]
1344 add %rax,$A0[1]
1345 mov $ai,%rax
1346 mov $A0[0],-24($tptr) # t[1]
1347 mov %rdx,$A0[0]
1348 adc \$0,$A0[0]
1349 add $A1[1],$A0[1] # a[2]*a[0]+t[2], preloaded t[2]
1350 mov -8($aptr),$ai # a[3]
1351 adc \$0,$A0[0]
1352
1353 mul $a1 # a[2]*a[1]
1354 add %rax,$A1[0] # a[2]*a[1]+t[3], preloaded t[3]
1355 mov $ai,%rax
1356 mov $A0[1],-16($tptr) # t[2]
1357 mov %rdx,$A1[1]
1358 adc \$0,$A1[1]
1359
1360 mul $a0 # a[3]*a[0]
1361 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1362 mov $ai,%rax
1363 mov %rdx,$A0[1]
1364 adc \$0,$A0[1]
1365 add $A1[0],$A0[0]
1366 adc \$0,$A0[1]
1367 mov $A0[0],-8($tptr) # t[3]
1368
1369 mul $a1 # a[3]*a[1]
1370 add %rax,$A1[1]
1371 mov -16($aptr),%rax # a[2]
1372 adc \$0,%rdx
1373 add $A0[1],$A1[1]
1374 adc \$0,%rdx
1375
1376 mov $A1[1],($tptr) # t[4]
1377 mov %rdx,$A1[0]
1378 mov %rdx,8($tptr) # t[5]
1379
1380 mul $ai # a[2]*a[3]
1381___
1382{
1383my ($shift,$carry)=($a0,$a1);
1384my @S=(@A1,$ai,$n0);
1385$code.=<<___;
1386 add \$16,$i
1387 xor $shift,$shift
1388 sub $num,$i # $i=16-$num
1389 xor $carry,$carry
1390
1391 add $A1[0],%rax # t[5]
1392 adc \$0,%rdx
1393 mov %rax,8($tptr) # t[5]
1394 mov %rdx,16($tptr) # t[6]
1395 mov $carry,24($tptr) # t[7]
1396
1397 mov -16($aptr,$i),%rax # a[0]
1398 lea 48+8(%rsp),$tptr
1399 xor $A0[0],$A0[0] # t[0]
1400 mov 8($tptr),$A0[1] # t[1]
1401
1402 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1403 shr \$63,$A0[0]
1404 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1405 shr \$63,$A0[1]
1406 or $A0[0],$S[1] # | t[2*i]>>63
1407 mov 16($tptr),$A0[0] # t[2*i+2] # prefetch
1408 mov $A0[1],$shift # shift=t[2*i+1]>>63
1409 mul %rax # a[i]*a[i]
1410 neg $carry # mov $carry,cf
1411 mov 24($tptr),$A0[1] # t[2*i+2+1] # prefetch
1412 adc %rax,$S[0]
1413 mov -8($aptr,$i),%rax # a[i+1] # prefetch
1414 mov $S[0],($tptr)
1415 adc %rdx,$S[1]
1416
1417 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1418 mov $S[1],8($tptr)
1419 sbb $carry,$carry # mov cf,$carry
1420 shr \$63,$A0[0]
1421 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1422 shr \$63,$A0[1]
1423 or $A0[0],$S[3] # | t[2*i]>>63
1424 mov 32($tptr),$A0[0] # t[2*i+2] # prefetch
1425 mov $A0[1],$shift # shift=t[2*i+1]>>63
1426 mul %rax # a[i]*a[i]
1427 neg $carry # mov $carry,cf
1428 mov 40($tptr),$A0[1] # t[2*i+2+1] # prefetch
1429 adc %rax,$S[2]
1430 mov 0($aptr,$i),%rax # a[i+1] # prefetch
1431 mov $S[2],16($tptr)
1432 adc %rdx,$S[3]
1433 lea 16($i),$i
1434 mov $S[3],24($tptr)
1435 sbb $carry,$carry # mov cf,$carry
1436 lea 64($tptr),$tptr
1437 jmp .Lsqr4x_shift_n_add
1438
1439.align 32
1440.Lsqr4x_shift_n_add:
1441 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1442 shr \$63,$A0[0]
1443 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1444 shr \$63,$A0[1]
1445 or $A0[0],$S[1] # | t[2*i]>>63
1446 mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
1447 mov $A0[1],$shift # shift=t[2*i+1]>>63
1448 mul %rax # a[i]*a[i]
1449 neg $carry # mov $carry,cf
1450 mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1451 adc %rax,$S[0]
1452 mov -8($aptr,$i),%rax # a[i+1] # prefetch
1453 mov $S[0],-32($tptr)
1454 adc %rdx,$S[1]
1455
1456 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1457 mov $S[1],-24($tptr)
1458 sbb $carry,$carry # mov cf,$carry
1459 shr \$63,$A0[0]
1460 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1461 shr \$63,$A0[1]
1462 or $A0[0],$S[3] # | t[2*i]>>63
1463 mov 0($tptr),$A0[0] # t[2*i+2] # prefetch
1464 mov $A0[1],$shift # shift=t[2*i+1]>>63
1465 mul %rax # a[i]*a[i]
1466 neg $carry # mov $carry,cf
1467 mov 8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1468 adc %rax,$S[2]
1469 mov 0($aptr,$i),%rax # a[i+1] # prefetch
1470 mov $S[2],-16($tptr)
1471 adc %rdx,$S[3]
1472
1473 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1474 mov $S[3],-8($tptr)
1475 sbb $carry,$carry # mov cf,$carry
1476 shr \$63,$A0[0]
1477 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1478 shr \$63,$A0[1]
1479 or $A0[0],$S[1] # | t[2*i]>>63
1480 mov 16($tptr),$A0[0] # t[2*i+2] # prefetch
1481 mov $A0[1],$shift # shift=t[2*i+1]>>63
1482 mul %rax # a[i]*a[i]
1483 neg $carry # mov $carry,cf
1484 mov 24($tptr),$A0[1] # t[2*i+2+1] # prefetch
1485 adc %rax,$S[0]
1486 mov 8($aptr,$i),%rax # a[i+1] # prefetch
1487 mov $S[0],0($tptr)
1488 adc %rdx,$S[1]
1489
1490 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1491 mov $S[1],8($tptr)
1492 sbb $carry,$carry # mov cf,$carry
1493 shr \$63,$A0[0]
1494 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1495 shr \$63,$A0[1]
1496 or $A0[0],$S[3] # | t[2*i]>>63
1497 mov 32($tptr),$A0[0] # t[2*i+2] # prefetch
1498 mov $A0[1],$shift # shift=t[2*i+1]>>63
1499 mul %rax # a[i]*a[i]
1500 neg $carry # mov $carry,cf
1501 mov 40($tptr),$A0[1] # t[2*i+2+1] # prefetch
1502 adc %rax,$S[2]
1503 mov 16($aptr,$i),%rax # a[i+1] # prefetch
1504 mov $S[2],16($tptr)
1505 adc %rdx,$S[3]
1506 mov $S[3],24($tptr)
1507 sbb $carry,$carry # mov cf,$carry
1508 lea 64($tptr),$tptr
1509 add \$32,$i
1510 jnz .Lsqr4x_shift_n_add
1511
1512 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1513 .byte 0x67
1514 shr \$63,$A0[0]
1515 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1516 shr \$63,$A0[1]
1517 or $A0[0],$S[1] # | t[2*i]>>63
1518 mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
1519 mov $A0[1],$shift # shift=t[2*i+1]>>63
1520 mul %rax # a[i]*a[i]
1521 neg $carry # mov $carry,cf
1522 mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1523 adc %rax,$S[0]
1524 mov -8($aptr),%rax # a[i+1] # prefetch
1525 mov $S[0],-32($tptr)
1526 adc %rdx,$S[1]
1527
1528 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1|shift
1529 mov $S[1],-24($tptr)
1530 sbb $carry,$carry # mov cf,$carry
1531 shr \$63,$A0[0]
1532 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1533 shr \$63,$A0[1]
1534 or $A0[0],$S[3] # | t[2*i]>>63
1535 mul %rax # a[i]*a[i]
1536 neg $carry # mov $carry,cf
1537 adc %rax,$S[2]
1538 adc %rdx,$S[3]
1539 mov $S[2],-16($tptr)
1540 mov $S[3],-8($tptr)
1541___
1542}
1543######################################################################
1544# Montgomery reduction part, "word-by-word" algorithm.
1545#
1546# This new path is inspired by multiple submissions from Intel, by
1547# Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
1548# Vinodh Gopal...
1549{
1550my ($nptr,$tptr,$carry,$m0)=("%rbp","%rdi","%rsi","%rbx");
1551
1552$code.=<<___;
1553 movq %xmm2,$nptr
1554sqr8x_reduction:
1555 xor %rax,%rax
1556 lea ($nptr,$num,2),%rcx # end of n[]
1557 lea 48+8(%rsp,$num,2),%rdx # end of t[] buffer
1558 mov %rcx,0+8(%rsp)
1559 lea 48+8(%rsp,$num),$tptr # end of initial t[] window
1560 mov %rdx,8+8(%rsp)
1561 neg $num
1562 jmp .L8x_reduction_loop
1563
1564.align 32
1565.L8x_reduction_loop:
1566 lea ($tptr,$num),$tptr # start of current t[] window
1567 .byte 0x66
1568 mov 8*0($tptr),$m0
1569 mov 8*1($tptr),%r9
1570 mov 8*2($tptr),%r10
1571 mov 8*3($tptr),%r11
1572 mov 8*4($tptr),%r12
1573 mov 8*5($tptr),%r13
1574 mov 8*6($tptr),%r14
1575 mov 8*7($tptr),%r15
1576 mov %rax,(%rdx) # store top-most carry bit
1577 lea 8*8($tptr),$tptr
1578
1579 .byte 0x67
1580 mov $m0,%r8
1581 imulq 32+8(%rsp),$m0 # n0*a[0]
1582 mov 16*0($nptr),%rax # n[0]
1583 mov \$8,%ecx
1584 jmp .L8x_reduce
1585
1586.align 32
1587.L8x_reduce:
1588 mulq $m0
1589 mov 16*1($nptr),%rax # n[1]
1590 neg %r8
1591 mov %rdx,%r8
1592 adc \$0,%r8
1593
1594 mulq $m0
1595 add %rax,%r9
1596 mov 16*2($nptr),%rax
1597 adc \$0,%rdx
1598 add %r9,%r8
1599 mov $m0,48-8+8(%rsp,%rcx,8) # put aside n0*a[i]
1600 mov %rdx,%r9
1601 adc \$0,%r9
1602
1603 mulq $m0
1604 add %rax,%r10
1605 mov 16*3($nptr),%rax
1606 adc \$0,%rdx
1607 add %r10,%r9
1608 mov 32+8(%rsp),$carry # pull n0, borrow $carry
1609 mov %rdx,%r10
1610 adc \$0,%r10
1611
1612 mulq $m0
1613 add %rax,%r11
1614 mov 16*4($nptr),%rax
1615 adc \$0,%rdx
1616 imulq %r8,$carry # modulo-scheduled
1617 add %r11,%r10
1618 mov %rdx,%r11
1619 adc \$0,%r11
1620
1621 mulq $m0
1622 add %rax,%r12
1623 mov 16*5($nptr),%rax
1624 adc \$0,%rdx
1625 add %r12,%r11
1626 mov %rdx,%r12
1627 adc \$0,%r12
1628
1629 mulq $m0
1630 add %rax,%r13
1631 mov 16*6($nptr),%rax
1632 adc \$0,%rdx
1633 add %r13,%r12
1634 mov %rdx,%r13
1635 adc \$0,%r13
1636
1637 mulq $m0
1638 add %rax,%r14
1639 mov 16*7($nptr),%rax
1640 adc \$0,%rdx
1641 add %r14,%r13
1642 mov %rdx,%r14
1643 adc \$0,%r14
1644
1645 mulq $m0
1646 mov $carry,$m0 # n0*a[i]
1647 add %rax,%r15
1648 mov 16*0($nptr),%rax # n[0]
1649 adc \$0,%rdx
1650 add %r15,%r14
1651 mov %rdx,%r15
1652 adc \$0,%r15
1653
1654 dec %ecx
1655 jnz .L8x_reduce
1656
1657 lea 16*8($nptr),$nptr
1658 xor %rax,%rax
1659 mov 8+8(%rsp),%rdx # pull end of t[]
1660 cmp 0+8(%rsp),$nptr # end of n[]?
1661 jae .L8x_no_tail
1662
1663 .byte 0x66
1664 add 8*0($tptr),%r8
1665 adc 8*1($tptr),%r9
1666 adc 8*2($tptr),%r10
1667 adc 8*3($tptr),%r11
1668 adc 8*4($tptr),%r12
1669 adc 8*5($tptr),%r13
1670 adc 8*6($tptr),%r14
1671 adc 8*7($tptr),%r15
1672 sbb $carry,$carry # top carry
1673
1674 mov 48+56+8(%rsp),$m0 # pull n0*a[0]
1675 mov \$8,%ecx
1676 mov 16*0($nptr),%rax
1677 jmp .L8x_tail
1678
1679.align 32
1680.L8x_tail:
1681 mulq $m0
1682 add %rax,%r8
1683 mov 16*1($nptr),%rax
1684 mov %r8,($tptr) # save result
1685 mov %rdx,%r8
1686 adc \$0,%r8
1687
1688 mulq $m0
1689 add %rax,%r9
1690 mov 16*2($nptr),%rax
1691 adc \$0,%rdx
1692 add %r9,%r8
1693 lea 8($tptr),$tptr # $tptr++
1694 mov %rdx,%r9
1695 adc \$0,%r9
1696
1697 mulq $m0
1698 add %rax,%r10
1699 mov 16*3($nptr),%rax
1700 adc \$0,%rdx
1701 add %r10,%r9
1702 mov %rdx,%r10
1703 adc \$0,%r10
1704
1705 mulq $m0
1706 add %rax,%r11
1707 mov 16*4($nptr),%rax
1708 adc \$0,%rdx
1709 add %r11,%r10
1710 mov %rdx,%r11
1711 adc \$0,%r11
1712
1713 mulq $m0
1714 add %rax,%r12
1715 mov 16*5($nptr),%rax
1716 adc \$0,%rdx
1717 add %r12,%r11
1718 mov %rdx,%r12
1719 adc \$0,%r12
1720
1721 mulq $m0
1722 add %rax,%r13
1723 mov 16*6($nptr),%rax
1724 adc \$0,%rdx
1725 add %r13,%r12
1726 mov %rdx,%r13
1727 adc \$0,%r13
1728
1729 mulq $m0
1730 add %rax,%r14
1731 mov 16*7($nptr),%rax
1732 adc \$0,%rdx
1733 add %r14,%r13
1734 mov %rdx,%r14
1735 adc \$0,%r14
1736
1737 mulq $m0
1738 mov 48-16+8(%rsp,%rcx,8),$m0# pull n0*a[i]
1739 add %rax,%r15
1740 adc \$0,%rdx
1741 add %r15,%r14
1742 mov 16*0($nptr),%rax # pull n[0]
1743 mov %rdx,%r15
1744 adc \$0,%r15
1745
1746 dec %ecx
1747 jnz .L8x_tail
1748
1749 lea 16*8($nptr),$nptr
1750 mov 8+8(%rsp),%rdx # pull end of t[]
1751 cmp 0+8(%rsp),$nptr # end of n[]?
1752 jae .L8x_tail_done # break out of loop
1753
1754 mov 48+56+8(%rsp),$m0 # pull n0*a[0]
1755 neg $carry
1756 mov 8*0($nptr),%rax # pull n[0]
1757 adc 8*0($tptr),%r8
1758 adc 8*1($tptr),%r9
1759 adc 8*2($tptr),%r10
1760 adc 8*3($tptr),%r11
1761 adc 8*4($tptr),%r12
1762 adc 8*5($tptr),%r13
1763 adc 8*6($tptr),%r14
1764 adc 8*7($tptr),%r15
1765 sbb $carry,$carry # top carry
1766
1767 mov \$8,%ecx
1768 jmp .L8x_tail
1769
1770.align 32
1771.L8x_tail_done:
1772 add (%rdx),%r8 # can this overflow?
1773 xor %rax,%rax
1774
1775 neg $carry
1776.L8x_no_tail:
1777 adc 8*0($tptr),%r8
1778 adc 8*1($tptr),%r9
1779 adc 8*2($tptr),%r10
1780 adc 8*3($tptr),%r11
1781 adc 8*4($tptr),%r12
1782 adc 8*5($tptr),%r13
1783 adc 8*6($tptr),%r14
1784 adc 8*7($tptr),%r15
1785 adc \$0,%rax # top-most carry
1786 mov -16($nptr),%rcx # np[num-1]
1787 xor $carry,$carry
1788
1789 movq %xmm2,$nptr # restore $nptr
1790
1791 mov %r8,8*0($tptr) # store top 512 bits
1792 mov %r9,8*1($tptr)
1793 movq %xmm3,$num # $num is %r9, can't be moved upwards
1794 mov %r10,8*2($tptr)
1795 mov %r11,8*3($tptr)
1796 mov %r12,8*4($tptr)
1797 mov %r13,8*5($tptr)
1798 mov %r14,8*6($tptr)
1799 mov %r15,8*7($tptr)
1800 lea 8*8($tptr),$tptr
1801
1802 cmp %rdx,$tptr # end of t[]?
1803 jb .L8x_reduction_loop
1804___
1805}
1806##############################################################
1807# Post-condition, 4x unrolled
1808#
1809{
1810my ($tptr,$nptr)=("%rbx","%rbp");
1811$code.=<<___;
1812 #xor %rsi,%rsi # %rsi was $carry above
1813 sub %r15,%rcx # compare top-most words
1814 lea (%rdi,$num),$tptr # %rdi was $tptr above
1815 adc %rsi,%rsi
1816 mov $num,%rcx
1817 or %rsi,%rax
1818 movq %xmm1,$rptr # restore $rptr
1819 xor \$1,%rax
1820 movq %xmm1,$aptr # prepare for back-to-back call
1821 lea ($nptr,%rax,8),$nptr
1822 sar \$3+2,%rcx # cf=0
1823 jmp .Lsqr4x_sub
1824
1825.align 32
1826.Lsqr4x_sub:
1827 .byte 0x66
1828 mov 8*0($tptr),%r12
1829 mov 8*1($tptr),%r13
1830 sbb 16*0($nptr),%r12
1831 mov 8*2($tptr),%r14
1832 sbb 16*1($nptr),%r13
1833 mov 8*3($tptr),%r15
1834 lea 8*4($tptr),$tptr
1835 sbb 16*2($nptr),%r14
1836 mov %r12,8*0($rptr)
1837 sbb 16*3($nptr),%r15
1838 lea 16*4($nptr),$nptr
1839 mov %r13,8*1($rptr)
1840 mov %r14,8*2($rptr)
1841 mov %r15,8*3($rptr)
1842 lea 8*4($rptr),$rptr
1843
1844 inc %rcx # pass %cf
1845 jnz .Lsqr4x_sub
1846___
1847}
1848$code.=<<___;
1849 mov $num,%r10 # prepare for back-to-back call
1850 neg $num # restore $num
1851 ret
1852.size bn_sqr8x_internal,.-bn_sqr8x_internal
1853___
1854{
1855$code.=<<___;
1856.globl bn_from_montgomery
1857.type bn_from_montgomery,\@abi-omnipotent
1858.align 32
1859bn_from_montgomery:
1860 testl \$7,`($win64?"48(%rsp)":"%r9d")`
1861 jz bn_from_mont8x
1862 xor %eax,%eax
1863 ret
1864.size bn_from_montgomery,.-bn_from_montgomery
1865
1866.type bn_from_mont8x,\@function,6
1867.align 32
1868bn_from_mont8x:
1869 .byte 0x67
1870 mov %rsp,%rax
1871 push %rbx
1872 push %rbp
1873 push %r12
1874 push %r13
1875 push %r14
1876 push %r15
1877___
1878$code.=<<___ if ($win64);
1879 lea -0x28(%rsp),%rsp
1880 movaps %xmm6,(%rsp)
1881 movaps %xmm7,0x10(%rsp)
1882___
1883$code.=<<___;
1884 .byte 0x67
1885 mov ${num}d,%r10d
1886 shl \$3,${num}d # convert $num to bytes
1887 shl \$3+2,%r10d # 4*$num
1888 neg $num
1889 mov ($n0),$n0 # *n0
1890
1891 ##############################################################
1892 # ensure that stack frame doesn't alias with $aptr+4*$num
1893 # modulo 4096, which covers ret[num], am[num] and n[2*num]
1894 # (see bn_exp.c). this is done to allow memory disambiguation
1895 # logic do its magic.
1896 #
1897 lea -64(%rsp,$num,2),%r11
1898 sub $aptr,%r11
1899 and \$4095,%r11
1900 cmp %r11,%r10
1901 jb .Lfrom_sp_alt
1902 sub %r11,%rsp # align with $aptr
1903 lea -64(%rsp,$num,2),%rsp # alloca(frame+2*$num)
1904 jmp .Lfrom_sp_done
1905
1906.align 32
1907.Lfrom_sp_alt:
1908 lea 4096-64(,$num,2),%r10 # 4096-frame-2*$num
1909 lea -64(%rsp,$num,2),%rsp # alloca(frame+2*$num)
1910 sub %r10,%r11
1911 mov \$0,%r10
1912 cmovc %r10,%r11
1913 sub %r11,%rsp
1914.Lfrom_sp_done:
1915 and \$-64,%rsp
1916 mov $num,%r10
1917 neg $num
1918
1919 ##############################################################
1920 # Stack layout
1921 #
1922 # +0 saved $num, used in reduction section
1923 # +8 &t[2*$num], used in reduction section
1924 # +32 saved *n0
1925 # +40 saved %rsp
1926 # +48 t[2*$num]
1927 #
1928 mov $n0, 32(%rsp)
1929 mov %rax, 40(%rsp) # save original %rsp
1930.Lfrom_body:
1931 mov $num,%r11
1932 lea 48(%rsp),%rax
1933 pxor %xmm0,%xmm0
1934 jmp .Lmul_by_1
1935
1936.align 32
1937.Lmul_by_1:
1938 movdqu ($aptr),%xmm1
1939 movdqu 16($aptr),%xmm2
1940 movdqu 32($aptr),%xmm3
1941 movdqa %xmm0,(%rax,$num)
1942 movdqu 48($aptr),%xmm4
1943 movdqa %xmm0,16(%rax,$num)
1944 .byte 0x48,0x8d,0xb6,0x40,0x00,0x00,0x00 # lea 64($aptr),$aptr
1945 movdqa %xmm1,(%rax)
1946 movdqa %xmm0,32(%rax,$num)
1947 movdqa %xmm2,16(%rax)
1948 movdqa %xmm0,48(%rax,$num)
1949 movdqa %xmm3,32(%rax)
1950 movdqa %xmm4,48(%rax)
1951 lea 64(%rax),%rax
1952 sub \$64,%r11
1953 jnz .Lmul_by_1
1954
1955 movq $rptr,%xmm1
1956 movq $nptr,%xmm2
1957 .byte 0x67
1958 mov $nptr,%rbp
1959 movq %r10, %xmm3 # -num
1960___
1961$code.=<<___ if ($addx);
1962 mov OPENSSL_ia32cap_P+8(%rip),%r11d
1963 and \$0x80100,%r11d
1964 cmp \$0x80100,%r11d
1965 jne .Lfrom_mont_nox
1966
1967 lea (%rax,$num),$rptr
1968 call sqrx8x_reduction
1969
1970 pxor %xmm0,%xmm0
1971 lea 48(%rsp),%rax
1972 mov 40(%rsp),%rsi # restore %rsp
1973 jmp .Lfrom_mont_zero
1974
1975.align 32
1976.Lfrom_mont_nox:
1977___
1978$code.=<<___;
1979 call sqr8x_reduction
1980
1981 pxor %xmm0,%xmm0
1982 lea 48(%rsp),%rax
1983 mov 40(%rsp),%rsi # restore %rsp
1984 jmp .Lfrom_mont_zero
1985
1986.align 32
1987.Lfrom_mont_zero:
1988 movdqa %xmm0,16*0(%rax)
1989 movdqa %xmm0,16*1(%rax)
1990 movdqa %xmm0,16*2(%rax)
1991 movdqa %xmm0,16*3(%rax)
1992 lea 16*4(%rax),%rax
1993 sub \$32,$num
1994 jnz .Lfrom_mont_zero
1995
1996 mov \$1,%rax
1997 mov -48(%rsi),%r15
1998 mov -40(%rsi),%r14
1999 mov -32(%rsi),%r13
2000 mov -24(%rsi),%r12
2001 mov -16(%rsi),%rbp
2002 mov -8(%rsi),%rbx
2003 lea (%rsi),%rsp
2004.Lfrom_epilogue:
2005 ret
2006.size bn_from_mont8x,.-bn_from_mont8x
2007___
2008}
2009}}}
2010
2011if ($addx) {{{
2012my $bp="%rdx"; # restore original value
2013
2014$code.=<<___;
2015.type bn_mulx4x_mont_gather5,\@function,6
2016.align 32
2017bn_mulx4x_mont_gather5:
2018.Lmulx4x_enter:
2019 .byte 0x67
2020 mov %rsp,%rax
2021 push %rbx
2022 push %rbp
2023 push %r12
2024 push %r13
2025 push %r14
2026 push %r15
2027___
2028$code.=<<___ if ($win64);
2029 lea -0x28(%rsp),%rsp
2030 movaps %xmm6,(%rsp)
2031 movaps %xmm7,0x10(%rsp)
2032___
2033$code.=<<___;
2034 .byte 0x67
2035 mov ${num}d,%r10d
2036 shl \$3,${num}d # convert $num to bytes
2037 shl \$3+2,%r10d # 4*$num
2038 neg $num # -$num
2039 mov ($n0),$n0 # *n0
2040
2041 ##############################################################
2042 # ensure that stack frame doesn't alias with $aptr+4*$num
2043 # modulo 4096, which covers a[num], ret[num] and n[2*num]
2044 # (see bn_exp.c). this is done to allow memory disambiguation
2045 # logic do its magic. [excessive frame is allocated in order
2046 # to allow bn_from_mont8x to clear it.]
2047 #
2048 lea -64(%rsp,$num,2),%r11
2049 sub $ap,%r11
2050 and \$4095,%r11
2051 cmp %r11,%r10
2052 jb .Lmulx4xsp_alt
2053 sub %r11,%rsp # align with $aptr
2054 lea -64(%rsp,$num,2),%rsp # alloca(frame+$num)
2055 jmp .Lmulx4xsp_done
2056
2057.align 32
2058.Lmulx4xsp_alt:
2059 lea 4096-64(,$num,2),%r10 # 4096-frame-$num
2060 lea -64(%rsp,$num,2),%rsp # alloca(frame+$num)
2061 sub %r10,%r11
2062 mov \$0,%r10
2063 cmovc %r10,%r11
2064 sub %r11,%rsp
2065.Lmulx4xsp_done:
2066 and \$-64,%rsp # ensure alignment
2067 ##############################################################
2068 # Stack layout
2069 # +0 -num
2070 # +8 off-loaded &b[i]
2071 # +16 end of b[num]
2072 # +24 inner counter
2073 # +32 saved n0
2074 # +40 saved %rsp
2075 # +48
2076 # +56 saved rp
2077 # +64 tmp[num+1]
2078 #
2079 mov $n0, 32(%rsp) # save *n0
2080 mov %rax,40(%rsp) # save original %rsp
2081.Lmulx4x_body:
2082 call mulx4x_internal
2083
2084 mov 40(%rsp),%rsi # restore %rsp
2085 mov \$1,%rax
2086___
2087$code.=<<___ if ($win64);
2088 movaps -88(%rsi),%xmm6
2089 movaps -72(%rsi),%xmm7
2090___
2091$code.=<<___;
2092 mov -48(%rsi),%r15
2093 mov -40(%rsi),%r14
2094 mov -32(%rsi),%r13
2095 mov -24(%rsi),%r12
2096 mov -16(%rsi),%rbp
2097 mov -8(%rsi),%rbx
2098 lea (%rsi),%rsp
2099.Lmulx4x_epilogue:
2100 ret
2101.size bn_mulx4x_mont_gather5,.-bn_mulx4x_mont_gather5
2102
2103.type mulx4x_internal,\@abi-omnipotent
2104.align 32
2105mulx4x_internal:
2106 .byte 0x4c,0x89,0x8c,0x24,0x08,0x00,0x00,0x00 # mov $num,8(%rsp) # save -$num
2107 .byte 0x67
2108 neg $num # restore $num
2109 shl \$5,$num
2110 lea 256($bp,$num),%r13
2111 shr \$5+5,$num
2112 mov `($win64?56:8)`(%rax),%r10d # load 7th argument
2113 sub \$1,$num
2114 mov %r13,16+8(%rsp) # end of b[num]
2115 mov $num,24+8(%rsp) # inner counter
2116 mov $rp, 56+8(%rsp) # save $rp
2117___
2118my ($aptr, $bptr, $nptr, $tptr, $mi, $bi, $zero, $num)=
2119 ("%rsi","%rdi","%rcx","%rbx","%r8","%r9","%rbp","%rax");
2120my $rptr=$bptr;
2121my $STRIDE=2**5*8; # 5 is "window size"
2122my $N=$STRIDE/4; # should match cache line size
2123$code.=<<___;
2124 mov %r10,%r11
2125 shr \$`log($N/8)/log(2)`,%r10
2126 and \$`$N/8-1`,%r11
2127 not %r10
2128 lea .Lmagic_masks(%rip),%rax
2129 and \$`2**5/($N/8)-1`,%r10 # 5 is "window size"
2130 lea 96($bp,%r11,8),$bptr # pointer within 1st cache line
2131 movq 0(%rax,%r10,8),%xmm4 # set of masks denoting which
2132 movq 8(%rax,%r10,8),%xmm5 # cache line contains element
2133 add \$7,%r11
2134 movq 16(%rax,%r10,8),%xmm6 # denoted by 7th argument
2135 movq 24(%rax,%r10,8),%xmm7
2136 and \$7,%r11
2137
2138 movq `0*$STRIDE/4-96`($bptr),%xmm0
2139 lea $STRIDE($bptr),$tptr # borrow $tptr
2140 movq `1*$STRIDE/4-96`($bptr),%xmm1
2141 pand %xmm4,%xmm0
2142 movq `2*$STRIDE/4-96`($bptr),%xmm2
2143 pand %xmm5,%xmm1
2144 movq `3*$STRIDE/4-96`($bptr),%xmm3
2145 pand %xmm6,%xmm2
2146 por %xmm1,%xmm0
2147 movq `0*$STRIDE/4-96`($tptr),%xmm1
2148 pand %xmm7,%xmm3
2149 por %xmm2,%xmm0
2150 movq `1*$STRIDE/4-96`($tptr),%xmm2
2151 por %xmm3,%xmm0
2152 .byte 0x67,0x67
2153 pand %xmm4,%xmm1
2154 movq `2*$STRIDE/4-96`($tptr),%xmm3
2155
2156 movq %xmm0,%rdx # bp[0]
2157 movq `3*$STRIDE/4-96`($tptr),%xmm0
2158 lea 2*$STRIDE($bptr),$bptr # next &b[i]
2159 pand %xmm5,%xmm2
2160 .byte 0x67,0x67
2161 pand %xmm6,%xmm3
2162 ##############################################################
2163 # $tptr is chosen so that writing to top-most element of the
2164 # vector occurs just "above" references to powers table,
2165 # "above" modulo cache-line size, which effectively precludes
2166 # possibility of memory disambiguation logic failure when
2167 # accessing the table.
2168 #
2169 lea 64+8*4+8(%rsp,%r11,8),$tptr
2170
2171 mov %rdx,$bi
2172 mulx 0*8($aptr),$mi,%rax # a[0]*b[0]
2173 mulx 1*8($aptr),%r11,%r12 # a[1]*b[0]
2174 add %rax,%r11
2175 mulx 2*8($aptr),%rax,%r13 # ...
2176 adc %rax,%r12
2177 adc \$0,%r13
2178 mulx 3*8($aptr),%rax,%r14
2179
2180 mov $mi,%r15
2181 imulq 32+8(%rsp),$mi # "t[0]"*n0
2182 xor $zero,$zero # cf=0, of=0
2183 mov $mi,%rdx
2184
2185 por %xmm2,%xmm1
2186 pand %xmm7,%xmm0
2187 por %xmm3,%xmm1
2188 mov $bptr,8+8(%rsp) # off-load &b[i]
2189 por %xmm1,%xmm0
2190
2191 .byte 0x48,0x8d,0xb6,0x20,0x00,0x00,0x00 # lea 4*8($aptr),$aptr
2192 adcx %rax,%r13
2193 adcx $zero,%r14 # cf=0
2194
2195 mulx 0*16($nptr),%rax,%r10
2196 adcx %rax,%r15 # discarded
2197 adox %r11,%r10
2198 mulx 1*16($nptr),%rax,%r11
2199 adcx %rax,%r10
2200 adox %r12,%r11
2201 mulx 2*16($nptr),%rax,%r12
2202 mov 24+8(%rsp),$bptr # counter value
2203 .byte 0x66
2204 mov %r10,-8*4($tptr)
2205 adcx %rax,%r11
2206 adox %r13,%r12
2207 mulx 3*16($nptr),%rax,%r15
2208 .byte 0x67,0x67
2209 mov $bi,%rdx
2210 mov %r11,-8*3($tptr)
2211 adcx %rax,%r12
2212 adox $zero,%r15 # of=0
2213 .byte 0x48,0x8d,0x89,0x40,0x00,0x00,0x00 # lea 4*16($nptr),$nptr
2214 mov %r12,-8*2($tptr)
2215 #jmp .Lmulx4x_1st
2216
2217.align 32
2218.Lmulx4x_1st:
2219 adcx $zero,%r15 # cf=0, modulo-scheduled
2220 mulx 0*8($aptr),%r10,%rax # a[4]*b[0]
2221 adcx %r14,%r10
2222 mulx 1*8($aptr),%r11,%r14 # a[5]*b[0]
2223 adcx %rax,%r11
2224 mulx 2*8($aptr),%r12,%rax # ...
2225 adcx %r14,%r12
2226 mulx 3*8($aptr),%r13,%r14
2227 .byte 0x67,0x67
2228 mov $mi,%rdx
2229 adcx %rax,%r13
2230 adcx $zero,%r14 # cf=0
2231 lea 4*8($aptr),$aptr
2232 lea 4*8($tptr),$tptr
2233
2234 adox %r15,%r10
2235 mulx 0*16($nptr),%rax,%r15
2236 adcx %rax,%r10
2237 adox %r15,%r11
2238 mulx 1*16($nptr),%rax,%r15
2239 adcx %rax,%r11
2240 adox %r15,%r12
2241 mulx 2*16($nptr),%rax,%r15
2242 mov %r10,-5*8($tptr)
2243 adcx %rax,%r12
2244 mov %r11,-4*8($tptr)
2245 adox %r15,%r13
2246 mulx 3*16($nptr),%rax,%r15
2247 mov $bi,%rdx
2248 mov %r12,-3*8($tptr)
2249 adcx %rax,%r13
2250 adox $zero,%r15
2251 lea 4*16($nptr),$nptr
2252 mov %r13,-2*8($tptr)
2253
2254 dec $bptr # of=0, pass cf
2255 jnz .Lmulx4x_1st
2256
2257 mov 8(%rsp),$num # load -num
2258 movq %xmm0,%rdx # bp[1]
2259 adc $zero,%r15 # modulo-scheduled
2260 lea ($aptr,$num),$aptr # rewind $aptr
2261 add %r15,%r14
2262 mov 8+8(%rsp),$bptr # re-load &b[i]
2263 adc $zero,$zero # top-most carry
2264 mov %r14,-1*8($tptr)
2265 jmp .Lmulx4x_outer
2266
2267.align 32
2268.Lmulx4x_outer:
2269 mov $zero,($tptr) # save top-most carry
2270 lea 4*8($tptr,$num),$tptr # rewind $tptr
2271 mulx 0*8($aptr),$mi,%r11 # a[0]*b[i]
2272 xor $zero,$zero # cf=0, of=0
2273 mov %rdx,$bi
2274 mulx 1*8($aptr),%r14,%r12 # a[1]*b[i]
2275 adox -4*8($tptr),$mi # +t[0]
2276 adcx %r14,%r11
2277 mulx 2*8($aptr),%r15,%r13 # ...
2278 adox -3*8($tptr),%r11
2279 adcx %r15,%r12
2280 mulx 3*8($aptr),%rdx,%r14
2281 adox -2*8($tptr),%r12
2282 adcx %rdx,%r13
2283 lea ($nptr,$num,2),$nptr # rewind $nptr
2284 lea 4*8($aptr),$aptr
2285 adox -1*8($tptr),%r13
2286 adcx $zero,%r14
2287 adox $zero,%r14
2288
2289 .byte 0x67
2290 mov $mi,%r15
2291 imulq 32+8(%rsp),$mi # "t[0]"*n0
2292
2293 movq `0*$STRIDE/4-96`($bptr),%xmm0
2294 .byte 0x67,0x67
2295 mov $mi,%rdx
2296 movq `1*$STRIDE/4-96`($bptr),%xmm1
2297 .byte 0x67
2298 pand %xmm4,%xmm0
2299 movq `2*$STRIDE/4-96`($bptr),%xmm2
2300 .byte 0x67
2301 pand %xmm5,%xmm1
2302 movq `3*$STRIDE/4-96`($bptr),%xmm3
2303 add \$$STRIDE,$bptr # next &b[i]
2304 .byte 0x67
2305 pand %xmm6,%xmm2
2306 por %xmm1,%xmm0
2307 pand %xmm7,%xmm3
2308 xor $zero,$zero # cf=0, of=0
2309 mov $bptr,8+8(%rsp) # off-load &b[i]
2310
2311 mulx 0*16($nptr),%rax,%r10
2312 adcx %rax,%r15 # discarded
2313 adox %r11,%r10
2314 mulx 1*16($nptr),%rax,%r11
2315 adcx %rax,%r10
2316 adox %r12,%r11
2317 mulx 2*16($nptr),%rax,%r12
2318 adcx %rax,%r11
2319 adox %r13,%r12
2320 mulx 3*16($nptr),%rax,%r15
2321 mov $bi,%rdx
2322 por %xmm2,%xmm0
2323 mov 24+8(%rsp),$bptr # counter value
2324 mov %r10,-8*4($tptr)
2325 por %xmm3,%xmm0
2326 adcx %rax,%r12
2327 mov %r11,-8*3($tptr)
2328 adox $zero,%r15 # of=0
2329 mov %r12,-8*2($tptr)
2330 lea 4*16($nptr),$nptr
2331 jmp .Lmulx4x_inner
2332
2333.align 32
2334.Lmulx4x_inner:
2335 mulx 0*8($aptr),%r10,%rax # a[4]*b[i]
2336 adcx $zero,%r15 # cf=0, modulo-scheduled
2337 adox %r14,%r10
2338 mulx 1*8($aptr),%r11,%r14 # a[5]*b[i]
2339 adcx 0*8($tptr),%r10
2340 adox %rax,%r11
2341 mulx 2*8($aptr),%r12,%rax # ...
2342 adcx 1*8($tptr),%r11
2343 adox %r14,%r12
2344 mulx 3*8($aptr),%r13,%r14
2345 mov $mi,%rdx
2346 adcx 2*8($tptr),%r12
2347 adox %rax,%r13
2348 adcx 3*8($tptr),%r13
2349 adox $zero,%r14 # of=0
2350 lea 4*8($aptr),$aptr
2351 lea 4*8($tptr),$tptr
2352 adcx $zero,%r14 # cf=0
2353
2354 adox %r15,%r10
2355 mulx 0*16($nptr),%rax,%r15
2356 adcx %rax,%r10
2357 adox %r15,%r11
2358 mulx 1*16($nptr),%rax,%r15
2359 adcx %rax,%r11
2360 adox %r15,%r12
2361 mulx 2*16($nptr),%rax,%r15
2362 mov %r10,-5*8($tptr)
2363 adcx %rax,%r12
2364 adox %r15,%r13
2365 mov %r11,-4*8($tptr)
2366 mulx 3*16($nptr),%rax,%r15
2367 mov $bi,%rdx
2368 lea 4*16($nptr),$nptr
2369 mov %r12,-3*8($tptr)
2370 adcx %rax,%r13
2371 adox $zero,%r15
2372 mov %r13,-2*8($tptr)
2373
2374 dec $bptr # of=0, pass cf
2375 jnz .Lmulx4x_inner
2376
2377 mov 0+8(%rsp),$num # load -num
2378 movq %xmm0,%rdx # bp[i+1]
2379 adc $zero,%r15 # modulo-scheduled
2380 sub 0*8($tptr),$bptr # pull top-most carry to %cf
2381 mov 8+8(%rsp),$bptr # re-load &b[i]
2382 mov 16+8(%rsp),%r10
2383 adc %r15,%r14
2384 lea ($aptr,$num),$aptr # rewind $aptr
2385 adc $zero,$zero # top-most carry
2386 mov %r14,-1*8($tptr)
2387
2388 cmp %r10,$bptr
2389 jb .Lmulx4x_outer
2390
2391 mov -16($nptr),%r10
2392 xor %r15,%r15
2393 sub %r14,%r10 # compare top-most words
2394 adc %r15,%r15
2395 or %r15,$zero
2396 xor \$1,$zero
2397 lea ($tptr,$num),%rdi # rewind $tptr
2398 lea ($nptr,$num,2),$nptr # rewind $nptr
2399 .byte 0x67,0x67
2400 sar \$3+2,$num # cf=0
2401 lea ($nptr,$zero,8),%rbp
2402 mov 56+8(%rsp),%rdx # restore rp
2403 mov $num,%rcx
2404 jmp .Lsqrx4x_sub # common post-condition
2405.size mulx4x_internal,.-mulx4x_internal
2406___
2407} {
2408######################################################################
2409# void bn_power5(
2410my $rptr="%rdi"; # BN_ULONG *rptr,
2411my $aptr="%rsi"; # const BN_ULONG *aptr,
2412my $bptr="%rdx"; # const void *table,
2413my $nptr="%rcx"; # const BN_ULONG *nptr,
2414my $n0 ="%r8"; # const BN_ULONG *n0);
2415my $num ="%r9"; # int num, has to be divisible by 8
2416 # int pwr);
2417
2418my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
2419my @A0=("%r10","%r11");
2420my @A1=("%r12","%r13");
2421my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
2422
2423$code.=<<___;
2424.type bn_powerx5,\@function,6
2425.align 32
2426bn_powerx5:
2427.Lpowerx5_enter:
2428 .byte 0x67
2429 mov %rsp,%rax
2430 push %rbx
2431 push %rbp
2432 push %r12
2433 push %r13
2434 push %r14
2435 push %r15
2436___
2437$code.=<<___ if ($win64);
2438 lea -0x28(%rsp),%rsp
2439 movaps %xmm6,(%rsp)
2440 movaps %xmm7,0x10(%rsp)
2441___
2442$code.=<<___;
2443 .byte 0x67
2444 mov ${num}d,%r10d
2445 shl \$3,${num}d # convert $num to bytes
2446 shl \$3+2,%r10d # 4*$num
2447 neg $num
2448 mov ($n0),$n0 # *n0
2449
2450 ##############################################################
2451 # ensure that stack frame doesn't alias with $aptr+4*$num
2452 # modulo 4096, which covers ret[num], am[num] and n[2*num]
2453 # (see bn_exp.c). this is done to allow memory disambiguation
2454 # logic do its magic.
2455 #
2456 lea -64(%rsp,$num,2),%r11
2457 sub $aptr,%r11
2458 and \$4095,%r11
2459 cmp %r11,%r10
2460 jb .Lpwrx_sp_alt
2461 sub %r11,%rsp # align with $aptr
2462 lea -64(%rsp,$num,2),%rsp # alloca(frame+2*$num)
2463 jmp .Lpwrx_sp_done
2464
2465.align 32
2466.Lpwrx_sp_alt:
2467 lea 4096-64(,$num,2),%r10 # 4096-frame-2*$num
2468 lea -64(%rsp,$num,2),%rsp # alloca(frame+2*$num)
2469 sub %r10,%r11
2470 mov \$0,%r10
2471 cmovc %r10,%r11
2472 sub %r11,%rsp
2473.Lpwrx_sp_done:
2474 and \$-64,%rsp
2475 mov $num,%r10
2476 neg $num
2477
2478 ##############################################################
2479 # Stack layout
2480 #
2481 # +0 saved $num, used in reduction section
2482 # +8 &t[2*$num], used in reduction section
2483 # +16 intermediate carry bit
2484 # +24 top-most carry bit, used in reduction section
2485 # +32 saved *n0
2486 # +40 saved %rsp
2487 # +48 t[2*$num]
2488 #
2489 pxor %xmm0,%xmm0
2490 movq $rptr,%xmm1 # save $rptr
2491 movq $nptr,%xmm2 # save $nptr
2492 movq %r10, %xmm3 # -$num
2493 movq $bptr,%xmm4
2494 mov $n0, 32(%rsp)
2495 mov %rax, 40(%rsp) # save original %rsp
2496.Lpowerx5_body:
2497
2498 call __bn_sqrx8x_internal
2499 call __bn_sqrx8x_internal
2500 call __bn_sqrx8x_internal
2501 call __bn_sqrx8x_internal
2502 call __bn_sqrx8x_internal
2503
2504 mov %r10,$num # -num
2505 mov $aptr,$rptr
2506 movq %xmm2,$nptr
2507 movq %xmm4,$bptr
2508 mov 40(%rsp),%rax
2509
2510 call mulx4x_internal
2511
2512 mov 40(%rsp),%rsi # restore %rsp
2513 mov \$1,%rax
2514___
2515$code.=<<___ if ($win64);
2516 movaps -88(%rsi),%xmm6
2517 movaps -72(%rsi),%xmm7
2518___
2519$code.=<<___;
2520 mov -48(%rsi),%r15
2521 mov -40(%rsi),%r14
2522 mov -32(%rsi),%r13
2523 mov -24(%rsi),%r12
2524 mov -16(%rsi),%rbp
2525 mov -8(%rsi),%rbx
2526 lea (%rsi),%rsp
2527.Lpowerx5_epilogue:
2528 ret
2529.size bn_powerx5,.-bn_powerx5
2530
2531.globl bn_sqrx8x_internal
2532.hidden bn_sqrx8x_internal
2533.type bn_sqrx8x_internal,\@abi-omnipotent
2534.align 32
2535bn_sqrx8x_internal:
2536__bn_sqrx8x_internal:
2537 ##################################################################
2538 # Squaring part:
2539 #
2540 # a) multiply-n-add everything but a[i]*a[i];
2541 # b) shift result of a) by 1 to the left and accumulate
2542 # a[i]*a[i] products;
2543 #
2544 ##################################################################
2545 # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2546 # a[1]a[0]
2547 # a[2]a[0]
2548 # a[3]a[0]
2549 # a[2]a[1]
2550 # a[3]a[1]
2551 # a[3]a[2]
2552 #
2553 # a[4]a[0]
2554 # a[5]a[0]
2555 # a[6]a[0]
2556 # a[7]a[0]
2557 # a[4]a[1]
2558 # a[5]a[1]
2559 # a[6]a[1]
2560 # a[7]a[1]
2561 # a[4]a[2]
2562 # a[5]a[2]
2563 # a[6]a[2]
2564 # a[7]a[2]
2565 # a[4]a[3]
2566 # a[5]a[3]
2567 # a[6]a[3]
2568 # a[7]a[3]
2569 #
2570 # a[5]a[4]
2571 # a[6]a[4]
2572 # a[7]a[4]
2573 # a[6]a[5]
2574 # a[7]a[5]
2575 # a[7]a[6]
2576 # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2577___
2578{
2579my ($zero,$carry)=("%rbp","%rcx");
2580my $aaptr=$zero;
2581$code.=<<___;
2582 lea 48+8(%rsp),$tptr
2583 lea ($aptr,$num),$aaptr
2584 mov $num,0+8(%rsp) # save $num
2585 mov $aaptr,8+8(%rsp) # save end of $aptr
2586 jmp .Lsqr8x_zero_start
2587
2588.align 32
2589.byte 0x66,0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00
2590.Lsqrx8x_zero:
2591 .byte 0x3e
2592 movdqa %xmm0,0*8($tptr)
2593 movdqa %xmm0,2*8($tptr)
2594 movdqa %xmm0,4*8($tptr)
2595 movdqa %xmm0,6*8($tptr)
2596.Lsqr8x_zero_start: # aligned at 32
2597 movdqa %xmm0,8*8($tptr)
2598 movdqa %xmm0,10*8($tptr)
2599 movdqa %xmm0,12*8($tptr)
2600 movdqa %xmm0,14*8($tptr)
2601 lea 16*8($tptr),$tptr
2602 sub \$64,$num
2603 jnz .Lsqrx8x_zero
2604
2605 mov 0*8($aptr),%rdx # a[0], modulo-scheduled
2606 #xor %r9,%r9 # t[1], ex-$num, zero already
2607 xor %r10,%r10
2608 xor %r11,%r11
2609 xor %r12,%r12
2610 xor %r13,%r13
2611 xor %r14,%r14
2612 xor %r15,%r15
2613 lea 48+8(%rsp),$tptr
2614 xor $zero,$zero # cf=0, cf=0
2615 jmp .Lsqrx8x_outer_loop
2616
2617.align 32
2618.Lsqrx8x_outer_loop:
2619 mulx 1*8($aptr),%r8,%rax # a[1]*a[0]
2620 adcx %r9,%r8 # a[1]*a[0]+=t[1]
2621 adox %rax,%r10
2622 mulx 2*8($aptr),%r9,%rax # a[2]*a[0]
2623 adcx %r10,%r9
2624 adox %rax,%r11
2625 .byte 0xc4,0xe2,0xab,0xf6,0x86,0x18,0x00,0x00,0x00 # mulx 3*8($aptr),%r10,%rax # ...
2626 adcx %r11,%r10
2627 adox %rax,%r12
2628 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x20,0x00,0x00,0x00 # mulx 4*8($aptr),%r11,%rax
2629 adcx %r12,%r11
2630 adox %rax,%r13
2631 mulx 5*8($aptr),%r12,%rax
2632 adcx %r13,%r12
2633 adox %rax,%r14
2634 mulx 6*8($aptr),%r13,%rax
2635 adcx %r14,%r13
2636 adox %r15,%rax
2637 mulx 7*8($aptr),%r14,%r15
2638 mov 1*8($aptr),%rdx # a[1]
2639 adcx %rax,%r14
2640 adox $zero,%r15
2641 adc 8*8($tptr),%r15
2642 mov %r8,1*8($tptr) # t[1]
2643 mov %r9,2*8($tptr) # t[2]
2644 sbb $carry,$carry # mov %cf,$carry
2645 xor $zero,$zero # cf=0, of=0
2646
2647
2648 mulx 2*8($aptr),%r8,%rbx # a[2]*a[1]
2649 mulx 3*8($aptr),%r9,%rax # a[3]*a[1]
2650 adcx %r10,%r8
2651 adox %rbx,%r9
2652 mulx 4*8($aptr),%r10,%rbx # ...
2653 adcx %r11,%r9
2654 adox %rax,%r10
2655 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x28,0x00,0x00,0x00 # mulx 5*8($aptr),%r11,%rax
2656 adcx %r12,%r10
2657 adox %rbx,%r11
2658 .byte 0xc4,0xe2,0x9b,0xf6,0x9e,0x30,0x00,0x00,0x00 # mulx 6*8($aptr),%r12,%rbx
2659 adcx %r13,%r11
2660 adox %r14,%r12
2661 .byte 0xc4,0x62,0x93,0xf6,0xb6,0x38,0x00,0x00,0x00 # mulx 7*8($aptr),%r13,%r14
2662 mov 2*8($aptr),%rdx # a[2]
2663 adcx %rax,%r12
2664 adox %rbx,%r13
2665 adcx %r15,%r13
2666 adox $zero,%r14 # of=0
2667 adcx $zero,%r14 # cf=0
2668
2669 mov %r8,3*8($tptr) # t[3]
2670 mov %r9,4*8($tptr) # t[4]
2671
2672 mulx 3*8($aptr),%r8,%rbx # a[3]*a[2]
2673 mulx 4*8($aptr),%r9,%rax # a[4]*a[2]
2674 adcx %r10,%r8
2675 adox %rbx,%r9
2676 mulx 5*8($aptr),%r10,%rbx # ...
2677 adcx %r11,%r9
2678 adox %rax,%r10
2679 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x30,0x00,0x00,0x00 # mulx 6*8($aptr),%r11,%rax
2680 adcx %r12,%r10
2681 adox %r13,%r11
2682 .byte 0xc4,0x62,0x9b,0xf6,0xae,0x38,0x00,0x00,0x00 # mulx 7*8($aptr),%r12,%r13
2683 .byte 0x3e
2684 mov 3*8($aptr),%rdx # a[3]
2685 adcx %rbx,%r11
2686 adox %rax,%r12
2687 adcx %r14,%r12
2688 mov %r8,5*8($tptr) # t[5]
2689 mov %r9,6*8($tptr) # t[6]
2690 mulx 4*8($aptr),%r8,%rax # a[4]*a[3]
2691 adox $zero,%r13 # of=0
2692 adcx $zero,%r13 # cf=0
2693
2694 mulx 5*8($aptr),%r9,%rbx # a[5]*a[3]
2695 adcx %r10,%r8
2696 adox %rax,%r9
2697 mulx 6*8($aptr),%r10,%rax # ...
2698 adcx %r11,%r9
2699 adox %r12,%r10
2700 mulx 7*8($aptr),%r11,%r12
2701 mov 4*8($aptr),%rdx # a[4]
2702 mov 5*8($aptr),%r14 # a[5]
2703 adcx %rbx,%r10
2704 adox %rax,%r11
2705 mov 6*8($aptr),%r15 # a[6]
2706 adcx %r13,%r11
2707 adox $zero,%r12 # of=0
2708 adcx $zero,%r12 # cf=0
2709
2710 mov %r8,7*8($tptr) # t[7]
2711 mov %r9,8*8($tptr) # t[8]
2712
2713 mulx %r14,%r9,%rax # a[5]*a[4]
2714 mov 7*8($aptr),%r8 # a[7]
2715 adcx %r10,%r9
2716 mulx %r15,%r10,%rbx # a[6]*a[4]
2717 adox %rax,%r10
2718 adcx %r11,%r10
2719 mulx %r8,%r11,%rax # a[7]*a[4]
2720 mov %r14,%rdx # a[5]
2721 adox %rbx,%r11
2722 adcx %r12,%r11
2723 #adox $zero,%rax # of=0
2724 adcx $zero,%rax # cf=0
2725
2726 mulx %r15,%r14,%rbx # a[6]*a[5]
2727 mulx %r8,%r12,%r13 # a[7]*a[5]
2728 mov %r15,%rdx # a[6]
2729 lea 8*8($aptr),$aptr
2730 adcx %r14,%r11
2731 adox %rbx,%r12
2732 adcx %rax,%r12
2733 adox $zero,%r13
2734
2735 .byte 0x67,0x67
2736 mulx %r8,%r8,%r14 # a[7]*a[6]
2737 adcx %r8,%r13
2738 adcx $zero,%r14
2739
2740 cmp 8+8(%rsp),$aptr
2741 je .Lsqrx8x_outer_break
2742
2743 neg $carry # mov $carry,%cf
2744 mov \$-8,%rcx
2745 mov $zero,%r15
2746 mov 8*8($tptr),%r8
2747 adcx 9*8($tptr),%r9 # +=t[9]
2748 adcx 10*8($tptr),%r10 # ...
2749 adcx 11*8($tptr),%r11
2750 adc 12*8($tptr),%r12
2751 adc 13*8($tptr),%r13
2752 adc 14*8($tptr),%r14
2753 adc 15*8($tptr),%r15
2754 lea ($aptr),$aaptr
2755 lea 2*64($tptr),$tptr
2756 sbb %rax,%rax # mov %cf,$carry
2757
2758 mov -64($aptr),%rdx # a[0]
2759 mov %rax,16+8(%rsp) # offload $carry
2760 mov $tptr,24+8(%rsp)
2761
2762 #lea 8*8($tptr),$tptr # see 2*8*8($tptr) above
2763 xor %eax,%eax # cf=0, of=0
2764 jmp .Lsqrx8x_loop
2765
2766.align 32
2767.Lsqrx8x_loop:
2768 mov %r8,%rbx
2769 mulx 0*8($aaptr),%rax,%r8 # a[8]*a[i]
2770 adcx %rax,%rbx # +=t[8]
2771 adox %r9,%r8
2772
2773 mulx 1*8($aaptr),%rax,%r9 # ...
2774 adcx %rax,%r8
2775 adox %r10,%r9
2776
2777 mulx 2*8($aaptr),%rax,%r10
2778 adcx %rax,%r9
2779 adox %r11,%r10
2780
2781 mulx 3*8($aaptr),%rax,%r11
2782 adcx %rax,%r10
2783 adox %r12,%r11
2784
2785 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 4*8($aaptr),%rax,%r12
2786 adcx %rax,%r11
2787 adox %r13,%r12
2788
2789 mulx 5*8($aaptr),%rax,%r13
2790 adcx %rax,%r12
2791 adox %r14,%r13
2792
2793 mulx 6*8($aaptr),%rax,%r14
2794 mov %rbx,($tptr,%rcx,8) # store t[8+i]
2795 mov \$0,%ebx
2796 adcx %rax,%r13
2797 adox %r15,%r14
2798
2799 .byte 0xc4,0x62,0xfb,0xf6,0xbd,0x38,0x00,0x00,0x00 # mulx 7*8($aaptr),%rax,%r15
2800 mov 8($aptr,%rcx,8),%rdx # a[i]
2801 adcx %rax,%r14
2802 adox %rbx,%r15 # %rbx is 0, of=0
2803 adcx %rbx,%r15 # cf=0
2804
2805 .byte 0x67
2806 inc %rcx # of=0
2807 jnz .Lsqrx8x_loop
2808
2809 lea 8*8($aaptr),$aaptr
2810 mov \$-8,%rcx
2811 cmp 8+8(%rsp),$aaptr # done?
2812 je .Lsqrx8x_break
2813
2814 sub 16+8(%rsp),%rbx # mov 16(%rsp),%cf
2815 .byte 0x66
2816 mov -64($aptr),%rdx
2817 adcx 0*8($tptr),%r8
2818 adcx 1*8($tptr),%r9
2819 adc 2*8($tptr),%r10
2820 adc 3*8($tptr),%r11
2821 adc 4*8($tptr),%r12
2822 adc 5*8($tptr),%r13
2823 adc 6*8($tptr),%r14
2824 adc 7*8($tptr),%r15
2825 lea 8*8($tptr),$tptr
2826 .byte 0x67
2827 sbb %rax,%rax # mov %cf,%rax
2828 xor %ebx,%ebx # cf=0, of=0
2829 mov %rax,16+8(%rsp) # offload carry
2830 jmp .Lsqrx8x_loop
2831
2832.align 32
2833.Lsqrx8x_break:
2834 sub 16+8(%rsp),%r8 # consume last carry
2835 mov 24+8(%rsp),$carry # initial $tptr, borrow $carry
2836 mov 0*8($aptr),%rdx # a[8], modulo-scheduled
2837 xor %ebp,%ebp # xor $zero,$zero
2838 mov %r8,0*8($tptr)
2839 cmp $carry,$tptr # cf=0, of=0
2840 je .Lsqrx8x_outer_loop
2841
2842 mov %r9,1*8($tptr)
2843 mov 1*8($carry),%r9
2844 mov %r10,2*8($tptr)
2845 mov 2*8($carry),%r10
2846 mov %r11,3*8($tptr)
2847 mov 3*8($carry),%r11
2848 mov %r12,4*8($tptr)
2849 mov 4*8($carry),%r12
2850 mov %r13,5*8($tptr)
2851 mov 5*8($carry),%r13
2852 mov %r14,6*8($tptr)
2853 mov 6*8($carry),%r14
2854 mov %r15,7*8($tptr)
2855 mov 7*8($carry),%r15
2856 mov $carry,$tptr
2857 jmp .Lsqrx8x_outer_loop
2858
2859.align 32
2860.Lsqrx8x_outer_break:
2861 mov %r9,9*8($tptr) # t[9]
2862 movq %xmm3,%rcx # -$num
2863 mov %r10,10*8($tptr) # ...
2864 mov %r11,11*8($tptr)
2865 mov %r12,12*8($tptr)
2866 mov %r13,13*8($tptr)
2867 mov %r14,14*8($tptr)
2868___
2869} {
2870my $i="%rcx";
2871$code.=<<___;
2872 lea 48+8(%rsp),$tptr
2873 mov ($aptr,$i),%rdx # a[0]
2874
2875 mov 8($tptr),$A0[1] # t[1]
2876 xor $A0[0],$A0[0] # t[0], of=0, cf=0
2877 mov 0+8(%rsp),$num # restore $num
2878 adox $A0[1],$A0[1]
2879 mov 16($tptr),$A1[0] # t[2] # prefetch
2880 mov 24($tptr),$A1[1] # t[3] # prefetch
2881 #jmp .Lsqrx4x_shift_n_add # happens to be aligned
2882
2883.align 32
2884.Lsqrx4x_shift_n_add:
2885 mulx %rdx,%rax,%rbx
2886 adox $A1[0],$A1[0]
2887 adcx $A0[0],%rax
2888 .byte 0x48,0x8b,0x94,0x0e,0x08,0x00,0x00,0x00 # mov 8($aptr,$i),%rdx # a[i+1] # prefetch
2889 .byte 0x4c,0x8b,0x97,0x20,0x00,0x00,0x00 # mov 32($tptr),$A0[0] # t[2*i+4] # prefetch
2890 adox $A1[1],$A1[1]
2891 adcx $A0[1],%rbx
2892 mov 40($tptr),$A0[1] # t[2*i+4+1] # prefetch
2893 mov %rax,0($tptr)
2894 mov %rbx,8($tptr)
2895
2896 mulx %rdx,%rax,%rbx
2897 adox $A0[0],$A0[0]
2898 adcx $A1[0],%rax
2899 mov 16($aptr,$i),%rdx # a[i+2] # prefetch
2900 mov 48($tptr),$A1[0] # t[2*i+6] # prefetch
2901 adox $A0[1],$A0[1]
2902 adcx $A1[1],%rbx
2903 mov 56($tptr),$A1[1] # t[2*i+6+1] # prefetch
2904 mov %rax,16($tptr)
2905 mov %rbx,24($tptr)
2906
2907 mulx %rdx,%rax,%rbx
2908 adox $A1[0],$A1[0]
2909 adcx $A0[0],%rax
2910 mov 24($aptr,$i),%rdx # a[i+3] # prefetch
2911 lea 32($i),$i
2912 mov 64($tptr),$A0[0] # t[2*i+8] # prefetch
2913 adox $A1[1],$A1[1]
2914 adcx $A0[1],%rbx
2915 mov 72($tptr),$A0[1] # t[2*i+8+1] # prefetch
2916 mov %rax,32($tptr)
2917 mov %rbx,40($tptr)
2918
2919 mulx %rdx,%rax,%rbx
2920 adox $A0[0],$A0[0]
2921 adcx $A1[0],%rax
2922 jrcxz .Lsqrx4x_shift_n_add_break
2923 .byte 0x48,0x8b,0x94,0x0e,0x00,0x00,0x00,0x00 # mov 0($aptr,$i),%rdx # a[i+4] # prefetch
2924 adox $A0[1],$A0[1]
2925 adcx $A1[1],%rbx
2926 mov 80($tptr),$A1[0] # t[2*i+10] # prefetch
2927 mov 88($tptr),$A1[1] # t[2*i+10+1] # prefetch
2928 mov %rax,48($tptr)
2929 mov %rbx,56($tptr)
2930 lea 64($tptr),$tptr
2931 nop
2932 jmp .Lsqrx4x_shift_n_add
2933
2934.align 32
2935.Lsqrx4x_shift_n_add_break:
2936 adcx $A1[1],%rbx
2937 mov %rax,48($tptr)
2938 mov %rbx,56($tptr)
2939 lea 64($tptr),$tptr # end of t[] buffer
2940___
2941}
2942######################################################################
2943# Montgomery reduction part, "word-by-word" algorithm.
2944#
2945# This new path is inspired by multiple submissions from Intel, by
2946# Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
2947# Vinodh Gopal...
2948{
2949my ($nptr,$carry,$m0)=("%rbp","%rsi","%rdx");
2950
2951$code.=<<___;
2952 movq %xmm2,$nptr
2953sqrx8x_reduction:
2954 xor %eax,%eax # initial top-most carry bit
2955 mov 32+8(%rsp),%rbx # n0
2956 mov 48+8(%rsp),%rdx # "%r8", 8*0($tptr)
2957 lea -128($nptr,$num,2),%rcx # end of n[]
2958 #lea 48+8(%rsp,$num,2),$tptr # end of t[] buffer
2959 mov %rcx, 0+8(%rsp) # save end of n[]
2960 mov $tptr,8+8(%rsp) # save end of t[]
2961
2962 lea 48+8(%rsp),$tptr # initial t[] window
2963 jmp .Lsqrx8x_reduction_loop
2964
2965.align 32
2966.Lsqrx8x_reduction_loop:
2967 mov 8*1($tptr),%r9
2968 mov 8*2($tptr),%r10
2969 mov 8*3($tptr),%r11
2970 mov 8*4($tptr),%r12
2971 mov %rdx,%r8
2972 imulq %rbx,%rdx # n0*a[i]
2973 mov 8*5($tptr),%r13
2974 mov 8*6($tptr),%r14
2975 mov 8*7($tptr),%r15
2976 mov %rax,24+8(%rsp) # store top-most carry bit
2977
2978 lea 8*8($tptr),$tptr
2979 xor $carry,$carry # cf=0,of=0
2980 mov \$-8,%rcx
2981 jmp .Lsqrx8x_reduce
2982
2983.align 32
2984.Lsqrx8x_reduce:
2985 mov %r8, %rbx
2986 mulx 16*0($nptr),%rax,%r8 # n[0]
2987 adcx %rbx,%rax # discarded
2988 adox %r9,%r8
2989
2990 mulx 16*1($nptr),%rbx,%r9 # n[1]
2991 adcx %rbx,%r8
2992 adox %r10,%r9
2993
2994 mulx 16*2($nptr),%rbx,%r10
2995 adcx %rbx,%r9
2996 adox %r11,%r10
2997
2998 mulx 16*3($nptr),%rbx,%r11
2999 adcx %rbx,%r10
3000 adox %r12,%r11
3001
3002 .byte 0xc4,0x62,0xe3,0xf6,0xa5,0x40,0x00,0x00,0x00 # mulx 16*4($nptr),%rbx,%r12
3003 mov %rdx,%rax
3004 mov %r8,%rdx
3005 adcx %rbx,%r11
3006 adox %r13,%r12
3007
3008 mulx 32+8(%rsp),%rbx,%rdx # %rdx discarded
3009 mov %rax,%rdx
3010 mov %rax,64+48+8(%rsp,%rcx,8) # put aside n0*a[i]
3011
3012 mulx 16*5($nptr),%rax,%r13
3013 adcx %rax,%r12
3014 adox %r14,%r13
3015
3016 mulx 16*6($nptr),%rax,%r14
3017 adcx %rax,%r13
3018 adox %r15,%r14
3019
3020 mulx 16*7($nptr),%rax,%r15
3021 mov %rbx,%rdx
3022 adcx %rax,%r14
3023 adox $carry,%r15 # $carry is 0
3024 adcx $carry,%r15 # cf=0
3025
3026 .byte 0x67,0x67,0x67
3027 inc %rcx # of=0
3028 jnz .Lsqrx8x_reduce
3029
3030 mov $carry,%rax # xor %rax,%rax
3031 cmp 0+8(%rsp),$nptr # end of n[]?
3032 jae .Lsqrx8x_no_tail
3033
3034 mov 48+8(%rsp),%rdx # pull n0*a[0]
3035 add 8*0($tptr),%r8
3036 lea 16*8($nptr),$nptr
3037 mov \$-8,%rcx
3038 adcx 8*1($tptr),%r9
3039 adcx 8*2($tptr),%r10
3040 adc 8*3($tptr),%r11
3041 adc 8*4($tptr),%r12
3042 adc 8*5($tptr),%r13
3043 adc 8*6($tptr),%r14
3044 adc 8*7($tptr),%r15
3045 lea 8*8($tptr),$tptr
3046 sbb %rax,%rax # top carry
3047
3048 xor $carry,$carry # of=0, cf=0
3049 mov %rax,16+8(%rsp)
3050 jmp .Lsqrx8x_tail
3051
3052.align 32
3053.Lsqrx8x_tail:
3054 mov %r8,%rbx
3055 mulx 16*0($nptr),%rax,%r8
3056 adcx %rax,%rbx
3057 adox %r9,%r8
3058
3059 mulx 16*1($nptr),%rax,%r9
3060 adcx %rax,%r8
3061 adox %r10,%r9
3062
3063 mulx 16*2($nptr),%rax,%r10
3064 adcx %rax,%r9
3065 adox %r11,%r10
3066
3067 mulx 16*3($nptr),%rax,%r11
3068 adcx %rax,%r10
3069 adox %r12,%r11
3070
3071 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x40,0x00,0x00,0x00 # mulx 16*4($nptr),%rax,%r12
3072 adcx %rax,%r11
3073 adox %r13,%r12
3074
3075 mulx 16*5($nptr),%rax,%r13
3076 adcx %rax,%r12
3077 adox %r14,%r13
3078
3079 mulx 16*6($nptr),%rax,%r14
3080 adcx %rax,%r13
3081 adox %r15,%r14
3082
3083 mulx 16*7($nptr),%rax,%r15
3084 mov 72+48+8(%rsp,%rcx,8),%rdx # pull n0*a[i]
3085 adcx %rax,%r14
3086 adox $carry,%r15
3087 mov %rbx,($tptr,%rcx,8) # save result
3088 mov %r8,%rbx
3089 adcx $carry,%r15 # cf=0
3090
3091 inc %rcx # of=0
3092 jnz .Lsqrx8x_tail
3093
3094 cmp 0+8(%rsp),$nptr # end of n[]?
3095 jae .Lsqrx8x_tail_done # break out of loop
3096
3097 sub 16+8(%rsp),$carry # mov 16(%rsp),%cf
3098 mov 48+8(%rsp),%rdx # pull n0*a[0]
3099 lea 16*8($nptr),$nptr
3100 adc 8*0($tptr),%r8
3101 adc 8*1($tptr),%r9
3102 adc 8*2($tptr),%r10
3103 adc 8*3($tptr),%r11
3104 adc 8*4($tptr),%r12
3105 adc 8*5($tptr),%r13
3106 adc 8*6($tptr),%r14
3107 adc 8*7($tptr),%r15
3108 lea 8*8($tptr),$tptr
3109 sbb %rax,%rax
3110 sub \$8,%rcx # mov \$-8,%rcx
3111
3112 xor $carry,$carry # of=0, cf=0
3113 mov %rax,16+8(%rsp)
3114 jmp .Lsqrx8x_tail
3115
3116.align 32
3117.Lsqrx8x_tail_done:
3118 add 24+8(%rsp),%r8 # can this overflow?
3119 mov $carry,%rax # xor %rax,%rax
3120
3121 sub 16+8(%rsp),$carry # mov 16(%rsp),%cf
3122.Lsqrx8x_no_tail: # %cf is 0 if jumped here
3123 adc 8*0($tptr),%r8
3124 movq %xmm3,%rcx
3125 adc 8*1($tptr),%r9
3126 mov 16*7($nptr),$carry
3127 movq %xmm2,$nptr # restore $nptr
3128 adc 8*2($tptr),%r10
3129 adc 8*3($tptr),%r11
3130 adc 8*4($tptr),%r12
3131 adc 8*5($tptr),%r13
3132 adc 8*6($tptr),%r14
3133 adc 8*7($tptr),%r15
3134 adc %rax,%rax # top-most carry
3135
3136 mov 32+8(%rsp),%rbx # n0
3137 mov 8*8($tptr,%rcx),%rdx # modulo-scheduled "%r8"
3138
3139 mov %r8,8*0($tptr) # store top 512 bits
3140 lea 8*8($tptr),%r8 # borrow %r8
3141 mov %r9,8*1($tptr)
3142 mov %r10,8*2($tptr)
3143 mov %r11,8*3($tptr)
3144 mov %r12,8*4($tptr)
3145 mov %r13,8*5($tptr)
3146 mov %r14,8*6($tptr)
3147 mov %r15,8*7($tptr)
3148
3149 lea 8*8($tptr,%rcx),$tptr # start of current t[] window
3150 cmp 8+8(%rsp),%r8 # end of t[]?
3151 jb .Lsqrx8x_reduction_loop
3152___
3153}
3154##############################################################
3155# Post-condition, 4x unrolled
3156#
3157{
3158my ($rptr,$nptr)=("%rdx","%rbp");
3159my @ri=map("%r$_",(10..13));
3160my @ni=map("%r$_",(14..15));
3161$code.=<<___;
3162 xor %rbx,%rbx
3163 sub %r15,%rsi # compare top-most words
3164 adc %rbx,%rbx
3165 mov %rcx,%r10 # -$num
3166 .byte 0x67
3167 or %rbx,%rax
3168 .byte 0x67
3169 mov %rcx,%r9 # -$num
3170 xor \$1,%rax
3171 sar \$3+2,%rcx # cf=0
3172 #lea 48+8(%rsp,%r9),$tptr
3173 lea ($nptr,%rax,8),$nptr
3174 movq %xmm1,$rptr # restore $rptr
3175 movq %xmm1,$aptr # prepare for back-to-back call
3176 jmp .Lsqrx4x_sub
3177
3178.align 32
3179.Lsqrx4x_sub:
3180 .byte 0x66
3181 mov 8*0($tptr),%r12
3182 mov 8*1($tptr),%r13
3183 sbb 16*0($nptr),%r12
3184 mov 8*2($tptr),%r14
3185 sbb 16*1($nptr),%r13
3186 mov 8*3($tptr),%r15
3187 lea 8*4($tptr),$tptr
3188 sbb 16*2($nptr),%r14
3189 mov %r12,8*0($rptr)
3190 sbb 16*3($nptr),%r15
3191 lea 16*4($nptr),$nptr
3192 mov %r13,8*1($rptr)
3193 mov %r14,8*2($rptr)
3194 mov %r15,8*3($rptr)
3195 lea 8*4($rptr),$rptr
3196
3197 inc %rcx
3198 jnz .Lsqrx4x_sub
3199___
3200}
3201$code.=<<___;
3202 neg %r9 # restore $num
3203
3204 ret
3205.size bn_sqrx8x_internal,.-bn_sqrx8x_internal
3206___
3207}}}
3208{
3209my ($inp,$num,$tbl,$idx)=$win64?("%rcx","%edx","%r8", "%r9d") : # Win64 order
3210 ("%rdi","%esi","%rdx","%ecx"); # Unix order
3211my $out=$inp;
3212my $STRIDE=2**5*8;
3213my $N=$STRIDE/4;
3214
3215$code.=<<___;
3216.globl bn_scatter5
3217.type bn_scatter5,\@abi-omnipotent
3218.align 16
3219bn_scatter5:
3220 cmp \$0, $num
3221 jz .Lscatter_epilogue
3222 lea ($tbl,$idx,8),$tbl
3223.Lscatter:
3224 mov ($inp),%rax
3225 lea 8($inp),$inp
3226 mov %rax,($tbl)
3227 lea 32*8($tbl),$tbl
3228 sub \$1,$num
3229 jnz .Lscatter
3230.Lscatter_epilogue:
3231 ret
3232.size bn_scatter5,.-bn_scatter5
3233
3234.globl bn_gather5
3235.type bn_gather5,\@abi-omnipotent
3236.align 16
3237bn_gather5:
3238___
3239$code.=<<___ if ($win64);
3240.LSEH_begin_bn_gather5:
3241 # I can't trust assembler to use specific encoding:-(
3242 .byte 0x48,0x83,0xec,0x28 #sub \$0x28,%rsp
3243 .byte 0x0f,0x29,0x34,0x24 #movaps %xmm6,(%rsp)
3244 .byte 0x0f,0x29,0x7c,0x24,0x10 #movdqa %xmm7,0x10(%rsp)
3245___
3246$code.=<<___;
3247 mov $idx,%r11d
3248 shr \$`log($N/8)/log(2)`,$idx
3249 and \$`$N/8-1`,%r11
3250 not $idx
3251 lea .Lmagic_masks(%rip),%rax
3252 and \$`2**5/($N/8)-1`,$idx # 5 is "window size"
3253 lea 128($tbl,%r11,8),$tbl # pointer within 1st cache line
3254 movq 0(%rax,$idx,8),%xmm4 # set of masks denoting which
3255 movq 8(%rax,$idx,8),%xmm5 # cache line contains element
3256 movq 16(%rax,$idx,8),%xmm6 # denoted by 7th argument
3257 movq 24(%rax,$idx,8),%xmm7
3258 jmp .Lgather
3259.align 16
3260.Lgather:
3261 movq `0*$STRIDE/4-128`($tbl),%xmm0
3262 movq `1*$STRIDE/4-128`($tbl),%xmm1
3263 pand %xmm4,%xmm0
3264 movq `2*$STRIDE/4-128`($tbl),%xmm2
3265 pand %xmm5,%xmm1
3266 movq `3*$STRIDE/4-128`($tbl),%xmm3
3267 pand %xmm6,%xmm2
3268 por %xmm1,%xmm0
3269 pand %xmm7,%xmm3
3270 .byte 0x67,0x67
3271 por %xmm2,%xmm0
3272 lea $STRIDE($tbl),$tbl
3273 por %xmm3,%xmm0
3274
3275 movq %xmm0,($out) # m0=bp[0]
3276 lea 8($out),$out
3277 sub \$1,$num
3278 jnz .Lgather
3279___
3280$code.=<<___ if ($win64);
3281 movaps (%rsp),%xmm6
3282 movaps 0x10(%rsp),%xmm7
3283 lea 0x28(%rsp),%rsp
3284___
3285$code.=<<___;
3286 ret
3287.LSEH_end_bn_gather5:
3288.size bn_gather5,.-bn_gather5
3289___
3290}
3291$code.=<<___;
3292.align 64
3293.Lmagic_masks:
3294 .long 0,0, 0,0, 0,0, -1,-1
3295 .long 0,0, 0,0, 0,0, 0,0
3296.asciz "Montgomery Multiplication with scatter/gather for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
3297___
3298
3299# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
3300# CONTEXT *context,DISPATCHER_CONTEXT *disp)
3301if ($win64) {
3302$rec="%rcx";
3303$frame="%rdx";
3304$context="%r8";
3305$disp="%r9";
3306
3307$code.=<<___;
3308.extern __imp_RtlVirtualUnwind
3309.type mul_handler,\@abi-omnipotent
3310.align 16
3311mul_handler:
3312 push %rsi
3313 push %rdi
3314 push %rbx
3315 push %rbp
3316 push %r12
3317 push %r13
3318 push %r14
3319 push %r15
3320 pushfq
3321 sub \$64,%rsp
3322
3323 mov 120($context),%rax # pull context->Rax
3324 mov 248($context),%rbx # pull context->Rip
3325
3326 mov 8($disp),%rsi # disp->ImageBase
3327 mov 56($disp),%r11 # disp->HandlerData
3328
3329 mov 0(%r11),%r10d # HandlerData[0]
3330 lea (%rsi,%r10),%r10 # end of prologue label
3331 cmp %r10,%rbx # context->Rip<end of prologue label
3332 jb .Lcommon_seh_tail
3333
3334 mov 152($context),%rax # pull context->Rsp
3335
3336 mov 4(%r11),%r10d # HandlerData[1]
3337 lea (%rsi,%r10),%r10 # epilogue label
3338 cmp %r10,%rbx # context->Rip>=epilogue label
3339 jae .Lcommon_seh_tail
3340
3341 lea .Lmul_epilogue(%rip),%r10
3342 cmp %r10,%rbx
3343 jb .Lbody_40
3344
3345 mov 192($context),%r10 # pull $num
3346 mov 8(%rax,%r10,8),%rax # pull saved stack pointer
3347 jmp .Lbody_proceed
3348
3349.Lbody_40:
3350 mov 40(%rax),%rax # pull saved stack pointer
3351.Lbody_proceed:
3352
3353 movaps -88(%rax),%xmm0
3354 movaps -72(%rax),%xmm1
3355
3356 mov -8(%rax),%rbx
3357 mov -16(%rax),%rbp
3358 mov -24(%rax),%r12
3359 mov -32(%rax),%r13
3360 mov -40(%rax),%r14
3361 mov -48(%rax),%r15
3362 mov %rbx,144($context) # restore context->Rbx
3363 mov %rbp,160($context) # restore context->Rbp
3364 mov %r12,216($context) # restore context->R12
3365 mov %r13,224($context) # restore context->R13
3366 mov %r14,232($context) # restore context->R14
3367 mov %r15,240($context) # restore context->R15
3368 movups %xmm0,512($context) # restore context->Xmm6
3369 movups %xmm1,528($context) # restore context->Xmm7
3370
3371.Lcommon_seh_tail:
3372 mov 8(%rax),%rdi
3373 mov 16(%rax),%rsi
3374 mov %rax,152($context) # restore context->Rsp
3375 mov %rsi,168($context) # restore context->Rsi
3376 mov %rdi,176($context) # restore context->Rdi
3377
3378 mov 40($disp),%rdi # disp->ContextRecord
3379 mov $context,%rsi # context
3380 mov \$154,%ecx # sizeof(CONTEXT)
3381 .long 0xa548f3fc # cld; rep movsq
3382
3383 mov $disp,%rsi
3384 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
3385 mov 8(%rsi),%rdx # arg2, disp->ImageBase
3386 mov 0(%rsi),%r8 # arg3, disp->ControlPc
3387 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
3388 mov 40(%rsi),%r10 # disp->ContextRecord
3389 lea 56(%rsi),%r11 # &disp->HandlerData
3390 lea 24(%rsi),%r12 # &disp->EstablisherFrame
3391 mov %r10,32(%rsp) # arg5
3392 mov %r11,40(%rsp) # arg6
3393 mov %r12,48(%rsp) # arg7
3394 mov %rcx,56(%rsp) # arg8, (NULL)
3395 call *__imp_RtlVirtualUnwind(%rip)
3396
3397 mov \$1,%eax # ExceptionContinueSearch
3398 add \$64,%rsp
3399 popfq
3400 pop %r15
3401 pop %r14
3402 pop %r13
3403 pop %r12
3404 pop %rbp
3405 pop %rbx
3406 pop %rdi
3407 pop %rsi
3408 ret
3409.size mul_handler,.-mul_handler
3410
3411.section .pdata
3412.align 4
3413 .rva .LSEH_begin_bn_mul_mont_gather5
3414 .rva .LSEH_end_bn_mul_mont_gather5
3415 .rva .LSEH_info_bn_mul_mont_gather5
3416
3417 .rva .LSEH_begin_bn_mul4x_mont_gather5
3418 .rva .LSEH_end_bn_mul4x_mont_gather5
3419 .rva .LSEH_info_bn_mul4x_mont_gather5
3420
3421 .rva .LSEH_begin_bn_power5
3422 .rva .LSEH_end_bn_power5
3423 .rva .LSEH_info_bn_power5
3424
3425 .rva .LSEH_begin_bn_from_mont8x
3426 .rva .LSEH_end_bn_from_mont8x
3427 .rva .LSEH_info_bn_from_mont8x
3428___
3429$code.=<<___ if ($addx);
3430 .rva .LSEH_begin_bn_mulx4x_mont_gather5
3431 .rva .LSEH_end_bn_mulx4x_mont_gather5
3432 .rva .LSEH_info_bn_mulx4x_mont_gather5
3433
3434 .rva .LSEH_begin_bn_powerx5
3435 .rva .LSEH_end_bn_powerx5
3436 .rva .LSEH_info_bn_powerx5
3437___
3438$code.=<<___;
3439 .rva .LSEH_begin_bn_gather5
3440 .rva .LSEH_end_bn_gather5
3441 .rva .LSEH_info_bn_gather5
3442
3443.section .xdata
3444.align 8
3445.LSEH_info_bn_mul_mont_gather5:
3446 .byte 9,0,0,0
3447 .rva mul_handler
3448 .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
3449.align 8
3450.LSEH_info_bn_mul4x_mont_gather5:
3451 .byte 9,0,0,0
3452 .rva mul_handler
3453 .rva .Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
3454.align 8
3455.LSEH_info_bn_power5:
3456 .byte 9,0,0,0
3457 .rva mul_handler
3458 .rva .Lpower5_body,.Lpower5_epilogue # HandlerData[]
3459.align 8
3460.LSEH_info_bn_from_mont8x:
3461 .byte 9,0,0,0
3462 .rva mul_handler
3463 .rva .Lfrom_body,.Lfrom_epilogue # HandlerData[]
3464___
3465$code.=<<___ if ($addx);
3466.align 8
3467.LSEH_info_bn_mulx4x_mont_gather5:
3468 .byte 9,0,0,0
3469 .rva mul_handler
3470 .rva .Lmulx4x_body,.Lmulx4x_epilogue # HandlerData[]
3471.align 8
3472.LSEH_info_bn_powerx5:
3473 .byte 9,0,0,0
3474 .rva mul_handler
3475 .rva .Lpowerx5_body,.Lpowerx5_epilogue # HandlerData[]
3476___
3477$code.=<<___;
3478.align 8
3479.LSEH_info_bn_gather5:
3480 .byte 0x01,0x0d,0x05,0x00
3481 .byte 0x0d,0x78,0x01,0x00 #movaps 0x10(rsp),xmm7
3482 .byte 0x08,0x68,0x00,0x00 #movaps (rsp),xmm6
3483 .byte 0x04,0x42,0x00,0x00 #sub rsp,0x28
3484.align 8
3485___
3486}
3487
3488$code =~ s/\`([^\`]*)\`/eval($1)/gem;
3489
3490print $code;
3491close STDOUT;