external/boringssl: Sync to c3889634a1aa52575c5d26497696238208fbd0f5.

This includes the following changes:

https://boringssl.googlesource.com/boringssl/+log/41c10e2b5f37edce8b9f292f7f3bacb7e30e25c4..c3889634a1aa52575c5d26497696238208fbd0f5

Test: atest CtsLibcoreTestCases
Change-Id: Ia1c2941ccf58a9e0d736b3409a2d13c21603a205
diff --git a/src/crypto/fipsmodule/ec/asm/p256-x86_64-asm.pl b/src/crypto/fipsmodule/ec/asm/p256-x86_64-asm.pl
index 1dc257d..5402885 100755
--- a/src/crypto/fipsmodule/ec/asm/p256-x86_64-asm.pl
+++ b/src/crypto/fipsmodule/ec/asm/p256-x86_64-asm.pl
@@ -488,7 +488,7 @@
 # void ecp_nistz256_ord_sqr_mont(
 #   uint64_t res[4],
 #   uint64_t a[4],
-#   int rep);
+#   uint64_t rep);
 
 .globl	ecp_nistz256_ord_sqr_mont
 .type	ecp_nistz256_ord_sqr_mont,\@function,3
@@ -1320,6 +1320,7 @@
 .type	__ecp_nistz256_mul_montq,\@abi-omnipotent
 .align	32
 __ecp_nistz256_mul_montq:
+.cfi_startproc
 	########################################################################
 	# Multiply a by b[0]
 	mov	%rax, $t1
@@ -1531,6 +1532,7 @@
 	mov	$acc1, 8*3($r_ptr)
 
 	ret
+.cfi_endproc
 .size	__ecp_nistz256_mul_montq,.-__ecp_nistz256_mul_montq
 
 ################################################################################
@@ -1615,6 +1617,7 @@
 .type	__ecp_nistz256_sqr_montq,\@abi-omnipotent
 .align	32
 __ecp_nistz256_sqr_montq:
+.cfi_startproc
 	mov	%rax, $acc5
 	mulq	$acc6			# a[1]*a[0]
 	mov	%rax, $acc1
@@ -1772,6 +1775,7 @@
 	mov	$acc7, 8*3($r_ptr)
 
 	ret
+.cfi_endproc
 .size	__ecp_nistz256_sqr_montq,.-__ecp_nistz256_sqr_montq
 ___
 
@@ -1780,6 +1784,7 @@
 .type	__ecp_nistz256_mul_montx,\@abi-omnipotent
 .align	32
 __ecp_nistz256_mul_montx:
+.cfi_startproc
 	########################################################################
 	# Multiply by b[0]
 	mulx	$acc1, $acc0, $acc1
@@ -1942,11 +1947,13 @@
 	mov	$acc1, 8*3($r_ptr)
 
 	ret
+.cfi_endproc
 .size	__ecp_nistz256_mul_montx,.-__ecp_nistz256_mul_montx
 
 .type	__ecp_nistz256_sqr_montx,\@abi-omnipotent
 .align	32
 __ecp_nistz256_sqr_montx:
+.cfi_startproc
 	mulx	$acc6, $acc1, $acc2	# a[0]*a[1]
 	mulx	$acc7, $t0, $acc3	# a[0]*a[2]
 	xor	%eax, %eax
@@ -2070,6 +2077,7 @@
 	mov	$acc7, 8*3($r_ptr)
 
 	ret
+.cfi_endproc
 .size	__ecp_nistz256_sqr_montx,.-__ecp_nistz256_sqr_montx
 ___
 }
@@ -2087,6 +2095,7 @@
 .type	ecp_nistz256_select_w5,\@abi-omnipotent
 .align	32
 ecp_nistz256_select_w5:
+.cfi_startproc
 ___
 $code.=<<___	if ($avx>1);
 	leaq	OPENSSL_ia32cap_P(%rip), %rax
@@ -2176,6 +2185,7 @@
 ___
 $code.=<<___;
 	ret
+.cfi_endproc
 .LSEH_end_ecp_nistz256_select_w5:
 .size	ecp_nistz256_select_w5,.-ecp_nistz256_select_w5
 
@@ -2185,6 +2195,7 @@
 .type	ecp_nistz256_select_w7,\@abi-omnipotent
 .align	32
 ecp_nistz256_select_w7:
+.cfi_startproc
 ___
 $code.=<<___	if ($avx>1);
 	leaq	OPENSSL_ia32cap_P(%rip), %rax
@@ -2263,6 +2274,7 @@
 ___
 $code.=<<___;
 	ret
+.cfi_endproc
 .LSEH_end_ecp_nistz256_select_w7:
 .size	ecp_nistz256_select_w7,.-ecp_nistz256_select_w7
 ___
@@ -2279,6 +2291,7 @@
 .type	ecp_nistz256_avx2_select_w5,\@abi-omnipotent
 .align	32
 ecp_nistz256_avx2_select_w5:
+.cfi_startproc
 .Lavx2_select_w5:
 	vzeroupper
 ___
@@ -2366,6 +2379,7 @@
 ___
 $code.=<<___;
 	ret
+.cfi_endproc
 .LSEH_end_ecp_nistz256_avx2_select_w5:
 .size	ecp_nistz256_avx2_select_w5,.-ecp_nistz256_avx2_select_w5
 ___
@@ -2385,6 +2399,7 @@
 .type	ecp_nistz256_avx2_select_w7,\@abi-omnipotent
 .align	32
 ecp_nistz256_avx2_select_w7:
+.cfi_startproc
 .Lavx2_select_w7:
 	vzeroupper
 ___
@@ -2487,6 +2502,7 @@
 ___
 $code.=<<___;
 	ret
+.cfi_endproc
 .LSEH_end_ecp_nistz256_avx2_select_w7:
 .size	ecp_nistz256_avx2_select_w7,.-ecp_nistz256_avx2_select_w7
 ___
@@ -2556,6 +2572,7 @@
 .type	__ecp_nistz256_add_toq,\@abi-omnipotent
 .align	32
 __ecp_nistz256_add_toq:
+.cfi_startproc
 	xor	$t4,$t4
 	add	8*0($b_ptr), $a0
 	adc	8*1($b_ptr), $a1
@@ -2583,11 +2600,13 @@
 	mov	$a3, 8*3($r_ptr)
 
 	ret
+.cfi_endproc
 .size	__ecp_nistz256_add_toq,.-__ecp_nistz256_add_toq
 
 .type	__ecp_nistz256_sub_fromq,\@abi-omnipotent
 .align	32
 __ecp_nistz256_sub_fromq:
+.cfi_startproc
 	sub	8*0($b_ptr), $a0
 	sbb	8*1($b_ptr), $a1
 	 mov	$a0, $t0
@@ -2614,11 +2633,13 @@
 	mov	$a3, 8*3($r_ptr)
 
 	ret
+.cfi_endproc
 .size	__ecp_nistz256_sub_fromq,.-__ecp_nistz256_sub_fromq
 
 .type	__ecp_nistz256_subq,\@abi-omnipotent
 .align	32
 __ecp_nistz256_subq:
+.cfi_startproc
 	sub	$a0, $t0
 	sbb	$a1, $t1
 	 mov	$t0, $a0
@@ -2641,11 +2662,13 @@
 	cmovnz	$t3, $a3
 
 	ret
+.cfi_endproc
 .size	__ecp_nistz256_subq,.-__ecp_nistz256_subq
 
 .type	__ecp_nistz256_mul_by_2q,\@abi-omnipotent
 .align	32
 __ecp_nistz256_mul_by_2q:
+.cfi_startproc
 	xor	$t4, $t4
 	add	$a0, $a0		# a0:a3+a0:a3
 	adc	$a1, $a1
@@ -2673,6 +2696,7 @@
 	mov	$a3, 8*3($r_ptr)
 
 	ret
+.cfi_endproc
 .size	__ecp_nistz256_mul_by_2q,.-__ecp_nistz256_mul_by_2q
 ___
 									}
@@ -3114,7 +3138,9 @@
 	movq	%xmm1, $a_ptr			# restore $a_ptr
 	movq	%xmm0, $r_ptr			# restore $r_ptr
 	add	\$`32*(18-5)`, %rsp		# difference in frame sizes
+.cfi_adjust_cfa_offset	`-32*(18-5)`
 	jmp	.Lpoint_double_shortcut$x
+.cfi_adjust_cfa_offset	`32*(18-5)`
 
 .align	32
 .Ladd_proceed$x:
@@ -3651,6 +3677,7 @@
 .type	__ecp_nistz256_add_tox,\@abi-omnipotent
 .align	32
 __ecp_nistz256_add_tox:
+.cfi_startproc
 	xor	$t4, $t4
 	adc	8*0($b_ptr), $a0
 	adc	8*1($b_ptr), $a1
@@ -3679,11 +3706,13 @@
 	mov	$a3, 8*3($r_ptr)
 
 	ret
+.cfi_endproc
 .size	__ecp_nistz256_add_tox,.-__ecp_nistz256_add_tox
 
 .type	__ecp_nistz256_sub_fromx,\@abi-omnipotent
 .align	32
 __ecp_nistz256_sub_fromx:
+.cfi_startproc
 	xor	$t4, $t4
 	sbb	8*0($b_ptr), $a0
 	sbb	8*1($b_ptr), $a1
@@ -3712,11 +3741,13 @@
 	mov	$a3, 8*3($r_ptr)
 
 	ret
+.cfi_endproc
 .size	__ecp_nistz256_sub_fromx,.-__ecp_nistz256_sub_fromx
 
 .type	__ecp_nistz256_subx,\@abi-omnipotent
 .align	32
 __ecp_nistz256_subx:
+.cfi_startproc
 	xor	$t4, $t4
 	sbb	$a0, $t0
 	sbb	$a1, $t1
@@ -3741,11 +3772,13 @@
 	cmovc	$t3, $a3
 
 	ret
+.cfi_endproc
 .size	__ecp_nistz256_subx,.-__ecp_nistz256_subx
 
 .type	__ecp_nistz256_mul_by_2x,\@abi-omnipotent
 .align	32
 __ecp_nistz256_mul_by_2x:
+.cfi_startproc
 	xor	$t4, $t4
 	adc	$a0, $a0		# a0:a3+a0:a3
 	adc	$a1, $a1
@@ -3774,6 +3807,7 @@
 	mov	$a3, 8*3($r_ptr)
 
 	ret
+.cfi_endproc
 .size	__ecp_nistz256_mul_by_2x,.-__ecp_nistz256_mul_by_2x
 ___
 									}
diff --git a/src/crypto/fipsmodule/ec/asm/p256_beeu-x86_64-asm.pl b/src/crypto/fipsmodule/ec/asm/p256_beeu-x86_64-asm.pl
index 12b9f5a..0bb6547 100644
--- a/src/crypto/fipsmodule/ec/asm/p256_beeu-x86_64-asm.pl
+++ b/src/crypto/fipsmodule/ec/asm/p256_beeu-x86_64-asm.pl
@@ -156,9 +156,6 @@
 .cfi_startproc
     push %rbp
 .cfi_push rbp
-    movq %rsp, %rbp
-.cfi_def_cfa_register rbp
-
     push %r12
 .cfi_push r12
     push %r13
@@ -173,6 +170,7 @@
 .cfi_push rsi
 
     sub \$$last_rsp_offset, %rsp
+.cfi_adjust_cfa_offset  $last_rsp_offset
     movq $out, $out_rsp(%rsp)
 
     # X=1, Y=0
@@ -380,6 +378,7 @@
 
 .Lbeeu_finish:
     add \$$last_rsp_offset, %rsp
+.cfi_adjust_cfa_offset  -$last_rsp_offset
     pop %rsi
 .cfi_pop rsi
     pop %rbx
@@ -394,9 +393,8 @@
 .cfi_pop r12
     pop %rbp
 .cfi_pop rbp
-.cfi_def_cfa rsp, 8
-.cfi_endproc
     ret
+.cfi_endproc
 
 .size beeu_mod_inverse_vartime, .-beeu_mod_inverse_vartime
 ___