Merge tag 'android-5.1.0_r1' into HEAD

Android 5.1.0 release 1

Conflicts:
	libc/arch-arm/arm.mk
	libc/arch-arm/krait/bionic/memcpy_base.S

Change-Id: Ia79ac57b847a5bd55818d8a7e95cc2bb39a9f570
diff --git a/libc/arch-arm/arm.mk b/libc/arch-arm/arm.mk
old mode 100644
new mode 100755
index cca4ed0..de4f6eb
--- a/libc/arch-arm/arm.mk
+++ b/libc/arch-arm/arm.mk
@@ -38,9 +38,9 @@
     upstream-freebsd/lib/libc/string/wcsrchr.c \
     upstream-freebsd/lib/libc/string/wmemcmp.c \
     upstream-freebsd/lib/libc/string/wmemmove.c \
+    upstream-openbsd/lib/libc/string/stpcpy.c \
 
 libc_openbsd_src_files_arm += \
-    upstream-openbsd/lib/libc/string/bcopy.c \
     upstream-openbsd/lib/libc/string/stpncpy.c \
     upstream-openbsd/lib/libc/string/strlcat.c \
     upstream-openbsd/lib/libc/string/strlcpy.c \
@@ -72,6 +72,12 @@
 ifeq ($(strip $(TARGET_$(my_2nd_arch_prefix)CPU_VARIANT)),)
   $(warning TARGET_$(my_2nd_arch_prefix)ARCH is arm, but TARGET_$(my_2nd_arch_prefix)CPU_VARIANT is not defined)
 endif
+ifneq ($(strip $(TARGET_$(my_2nd_arch_prefix)CPU_VARIANT)),krait)
+ifneq ($(strip $(TARGET_$(my_2nd_arch_prefix)CPU_VARIANT)),cortex-a53)
+libc_bionic_src_files_arm += \
+    upstream-openbsd/lib/libc/string/bcopy.c
+endif
+endif
 cpu_variant_mk := $(LOCAL_PATH)/arch-arm/$(TARGET_$(my_2nd_arch_prefix)CPU_VARIANT)/$(TARGET_$(my_2nd_arch_prefix)CPU_VARIANT).mk
 ifeq ($(wildcard $(cpu_variant_mk)),)
 $(error "TARGET_$(my_2nd_arch_prefix)CPU_VARIANT not set or set to an unknown value. Possible values are cortex-a7, cortex-a8, cortex-a9, cortex-a15, krait, denver. Use generic for devices that do not have a CPU similar to any of the supported cpu variants.")
diff --git a/libc/arch-arm/cortex-a53/cortex-a53.mk b/libc/arch-arm/cortex-a53/cortex-a53.mk
new file mode 100644
index 0000000..3ed80f2
--- /dev/null
+++ b/libc/arch-arm/cortex-a53/cortex-a53.mk
@@ -0,0 +1,10 @@
+libc_bionic_src_files_arm += \
+    arch-arm/cortex-a15/bionic/memcpy.S \
+    arch-arm/cortex-a15/bionic/memset.S \
+    arch-arm/cortex-a15/bionic/strcat.S \
+    arch-arm/cortex-a15/bionic/strcmp.S \
+    arch-arm/cortex-a15/bionic/strcpy.S \
+    arch-arm/cortex-a15/bionic/strlen.S \
+    arch-arm/cortex-a15/bionic/__strcat_chk.S \
+    arch-arm/cortex-a15/bionic/__strcpy_chk.S \
+    arch-arm/krait/bionic/memmove.S
diff --git a/libc/arch-arm/krait/bionic/memcpy_base.S b/libc/arch-arm/krait/bionic/memcpy_base.S
old mode 100644
new mode 100755
index 035dcf1..068f2f6
--- a/libc/arch-arm/krait/bionic/memcpy_base.S
+++ b/libc/arch-arm/krait/bionic/memcpy_base.S
@@ -1,123 +1,215 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
- * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
- * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
+/***************************************************************************
+ Copyright (c) 2009-2013 The Linux Foundation. All rights reserved.
 
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+     * Redistributions of source code must retain the above copyright
+       notice, this list of conditions and the following disclaimer.
+     * Redistributions in binary form must reproduce the above copyright
+       notice, this list of conditions and the following disclaimer in the
+       documentation and/or other materials provided with the distribution.
+     * Neither the name of The Linux Foundation nor the names of its contributors may
+       be used to endorse or promote products derived from this software
+       without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+  ***************************************************************************/
+
+/* Assumes neon instructions and a cache line size of 64 bytes. */
+
+#include <machine/cpu-features.h>
+#include <machine/asm.h>
 
 /*
- * This code assumes it is running on a processor that supports all arm v7
- * instructions, that supports neon instructions, and that has a 32 byte
- * cache line.
+ * These default settings are good for all Krait-based systems
+ * as of this writing, but they can be overridden in:
+ *   device/<vendor>/<board>/BoardConfig.mk
+ * by setting the following:
+ *   TARGET_USE_KRAIT_BIONIC_OPTIMIZATION := true
+ *   TARGET_USE_KRAIT_PLD_SET := true
+ *   TARGET_KRAIT_BIONIC_PLDOFFS := <pldoffset>
+ *   TARGET_KRAIT_BIONIC_PLDSIZE := <pldsize>
+ *   TARGET_KRAIT_BIONIC_PLDTHRESH := <pldthreshold>
+ *   TARGET_KRAIT_BIONIC_BBTHRESH := <bbthreshold>
  */
 
-// Assumes neon instructions and a cache line size of 32 bytes.
+#ifndef PLDOFFS
+#define PLDOFFS	(10)
+#endif
+#ifndef PLDTHRESH
+#define PLDTHRESH (PLDOFFS)
+#endif
+#ifndef BBTHRESH
+#define BBTHRESH (4096/64)
+#endif
+#if (PLDOFFS < 1)
+#error Routine does not support offsets less than 1
+#endif
+#if (PLDTHRESH < PLDOFFS)
+#error PLD threshold must be greater than or equal to the PLD offset
+#endif
+#ifndef PLDSIZE
+#define PLDSIZE	(64)
+#endif
+	.text
+	.fpu    neon
 
-ENTRY_PRIVATE(MEMCPY_BASE)
-        .cfi_def_cfa_offset 8
-        .cfi_rel_offset r0, 0
-        .cfi_rel_offset lr, 4
+ENTRY(MEMCPY_BASE)
+MEMCPY_BASE_ALIGNED:
+       // .cfi_startproc
+	.save {r0, r9, r10, lr}
+       // .cfi_def_cfa_offset 8
+	//.cfi_rel_offset r0, 0
+	//.cfi_rel_offset lr, 4
+	cmp	r2, #4
+	blt	.Lneon_lt4
+	cmp	r2, #16
+	blt	.Lneon_lt16
+	cmp	r2, #32
+	blt	.Lneon_16
+	cmp	r2, #64
+	blt	.Lneon_copy_32_a
 
-        /* do we have at least 16-bytes to copy (needed for alignment below) */
-        cmp         r2, #16
-        blo         5f
+	mov	r12, r2, lsr #6
+	cmp	r12, #PLDTHRESH
+	ble	.Lneon_copy_64_loop_nopld
 
-        /* align destination to cache-line for the write-buffer */
-        rsb         r3, r0, #0
-        ands        r3, r3, #0xF
-        beq         2f
+	push	{r9, r10}
+	.cfi_adjust_cfa_offset 8
+	.cfi_rel_offset r9, 0
+	.cfi_rel_offset r10, 4
 
-        /* copy up to 15-bytes (count in r3) */
-        sub         r2, r2, r3
-        movs        ip, r3, lsl #31
-        itt         mi
-        ldrbmi      lr, [r1], #1
-        strbmi      lr, [r0], #1
-        itttt       cs
-        ldrbcs      ip, [r1], #1
-        ldrbcs      lr, [r1], #1
-        strbcs      ip, [r0], #1
-        strbcs      lr, [r0], #1
-        movs        ip, r3, lsl #29
-        bge         1f
-        // copies 4 bytes, destination 32-bits aligned
-        vld4.8      {d0[0], d1[0], d2[0], d3[0]}, [r1]!
-        vst4.8      {d0[0], d1[0], d2[0], d3[0]}, [r0, :32]!
-1:      bcc         2f
-        // copies 8 bytes, destination 64-bits aligned
-        vld1.8      {d0}, [r1]!
-        vst1.8      {d0}, [r0, :64]!
+	cmp	r12, #BBTHRESH
+	ble	.Lneon_prime_pump
 
-2:      /* make sure we have at least 64 bytes to copy */
-        subs        r2, r2, #64
-        blo         2f
+	add	lr, r0, #0x400
+	add	r9, r1, #(PLDOFFS*PLDSIZE)
+	sub	lr, lr, r9
+	lsl	lr, lr, #21
+	lsr	lr, lr, #21
+	add	lr, lr, #(PLDOFFS*PLDSIZE)
+	cmp	r12, lr, lsr #6
+	ble	.Lneon_prime_pump
 
-1:      /* The main loop copies 64 bytes at a time */
-        vld1.8      {d0  - d3},   [r1]!
-        vld1.8      {d4  - d7},   [r1]!
-        pld         [r1, #(32*8)]
-        subs        r2, r2, #64
-        vst1.8      {d0  - d3},   [r0, :128]!
-        vst1.8      {d4  - d7},   [r0, :128]!
-        bhs         1b
+	itt	gt
+	movgt	r9, #(PLDOFFS)
+	rsbsgt	r9, r9, lr, lsr #6
+	ble	.Lneon_prime_pump
 
-2:      /* fix-up the remaining count and make sure we have >= 32 bytes left */
-        adds        r2, r2, #32
-        blo         4f
+	add	r10, r1, lr
+	bic	r10, #0x3F
 
-        /* Copy 32 bytes. These cache lines were already preloaded */
-        vld1.8      {d0 - d3},  [r1]!
-        sub         r2, r2, #32
-        vst1.8      {d0 - d3},  [r0, :128]!
+	sub	r12, r12, lr, lsr #6
 
-4:      /* less than 32 left */
-        add         r2, r2, #32
-        tst         r2, #0x10
-        beq         5f
-        // copies 16 bytes, 128-bits aligned
-        vld1.8      {d0, d1}, [r1]!
-        vst1.8      {d0, d1}, [r0, :128]!
+	cmp	r9, r12
+	itee	le
+	suble	r12, r12, r9
+	movgt	r9, r12
+	movgt	r12, #0
 
-5:      /* copy up to 15-bytes (count in r2) */
-        movs        ip, r2, lsl #29
-        bcc         1f
-        vld1.8      {d0}, [r1]!
-        vst1.8      {d0}, [r0]!
-1:      bge         2f
-        vld4.8      {d0[0], d1[0], d2[0], d3[0]}, [r1]!
-        vst4.8      {d0[0], d1[0], d2[0], d3[0]}, [r0]!
-2:      movs        ip, r2, lsl #31
-        itt         mi
-        ldrbmi      r3, [r1], #1
-        strbmi      r3, [r0], #1
-        itttt       cs
-        ldrbcs      ip, [r1], #1
-        ldrbcs      lr, [r1], #1
-        strbcs      ip, [r0], #1
-        strbcs      lr, [r0], #1
+	pld	[r1, #((PLDOFFS-1)*PLDSIZE)]
+.Lneon_copy_64_loop_outer_doublepld:
+	pld	[r1, #((PLDOFFS)*PLDSIZE)]
+	vld1.32	{q0, q1}, [r1]!
+	vld1.32	{q2, q3}, [r1]!
+	ldr	r3, [r10]
+	subs	r9, r9, #1
+	vst1.32	{q0, q1}, [r0]!
+	vst1.32	{q2, q3}, [r0]!
+	add	r10, #64
+	bne	.Lneon_copy_64_loop_outer_doublepld
+	cmp	r12, #0
+	beq	.Lneon_pop_before_nopld
 
-        ldmfd       sp!, {r0, lr}
-        bx          lr
+	cmp	r12, #(512*1024/64)
+	blt	.Lneon_copy_64_loop_outer
+
+.Lneon_copy_64_loop_ddr:
+	vld1.32	{q0, q1}, [r1]!
+	vld1.32	{q2, q3}, [r1]!
+	pld	[r10]
+	subs	r12, r12, #1
+	vst1.32	{q0, q1}, [r0]!
+	vst1.32	{q2, q3}, [r0]!
+	add	r10, #64
+	bne	.Lneon_copy_64_loop_ddr
+	b	.Lneon_pop_before_nopld
+
+.Lneon_prime_pump:
+	mov	lr, #(PLDOFFS*PLDSIZE)
+	add	r10, r1, #(PLDOFFS*PLDSIZE)
+	bic	r10, #0x3F
+	sub	r12, r12, #PLDOFFS
+	ldr	r3, [r10, #(-1*PLDSIZE)]
+.Lneon_copy_64_loop_outer:
+	vld1.32	{q0, q1}, [r1]!
+	vld1.32	{q2, q3}, [r1]!
+	ldr	r3, [r10]
+	subs	r12, r12, #1
+	vst1.32	{q0, q1}, [r0]!
+	vst1.32	{q2, q3}, [r0]!
+	add	r10, #64
+	bne	.Lneon_copy_64_loop_outer
+.Lneon_pop_before_nopld:
+	mov	r12, lr, lsr #6
+	pop	{r9, r10}
+	.cfi_restore r9
+	.cfi_restore r10
+	.cfi_adjust_cfa_offset -8
+
+.Lneon_copy_64_loop_nopld:
+	vld1.32	{q8, q9}, [r1]!
+	vld1.32	{q10, q11}, [r1]!
+	subs	r12, r12, #1
+	vst1.32	{q8, q9}, [r0]!
+	vst1.32	{q10, q11}, [r0]!
+	bne	.Lneon_copy_64_loop_nopld
+	ands	r2, r2, #0x3f
+	.cfi_restore r0
+	.cfi_adjust_cfa_offset -4
+	beq	.Lneon_exit
+.Lneon_copy_32_a:
+	movs	r3, r2, lsl #27
+	bcc	.Lneon_16
+	vld1.32	{q0,q1}, [r1]!
+	vst1.32	{q0,q1}, [r0]!
+.Lneon_16:
+	bpl	.Lneon_lt16
+	vld1.32	{q8}, [r1]!
+	vst1.32	{q8}, [r0]!
+	ands	r2, r2, #0x0f
+	beq	.Lneon_exit
+.Lneon_lt16:
+	movs	r3, r2, lsl #29
+	itttt	cs
+	ldrcs	r3, [r1], #4
+	strcs	r3, [r0], #4
+	ldrcs	r3, [r1], #4
+	strcs	r3, [r0], #4
+	itt	mi
+	ldrmi	r3, [r1], #4
+	strmi	r3, [r0], #4
+.Lneon_lt4:
+	movs	r2, r2, lsl #31
+	itt	cs
+	ldrhcs	r3, [r1], #2
+	strhcs	r3, [r0], #2
+	itt	mi
+	ldrbmi	r3, [r1]
+	strbmi	r3, [r0]
+.Lneon_exit:
+	pop	{r0, lr}
+	bx	lr
+        //.cfi_endproc
 END(MEMCPY_BASE)
+
diff --git a/libc/arch-arm/krait/bionic/memmove.S b/libc/arch-arm/krait/bionic/memmove.S
new file mode 100644
index 0000000..698cc52
--- /dev/null
+++ b/libc/arch-arm/krait/bionic/memmove.S
@@ -0,0 +1,219 @@
+/***************************************************************************
+ Copyright (c) 2009-2014 The Linux Foundation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+     * Redistributions of source code must retain the above copyright
+       notice, this list of conditions and the following disclaimer.
+     * Redistributions in binary form must reproduce the above copyright
+       notice, this list of conditions and the following disclaimer in the
+       documentation and/or other materials provided with the distribution.
+     * Neither the name of The Linux Foundation nor the names of its contributors may
+       be used to endorse or promote products derived from this software
+       without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+  ***************************************************************************/
+
+/***************************************************************************
+ *  Neon memmove: Attempts to do a memmove with Neon registers if possible,
+ *     Inputs:
+ *        dest: The destination buffer
+ *        src: The source buffer
+ *        n: The size of the buffer to transfer
+ *     Outputs:
+ *
+ ***************************************************************************/
+
+#include <private/bionic_asm.h>
+#include <private/libc_events.h>
+/*
+ * These can be overridden in:
+ *   device/<vendor>/<board>/BoardConfig.mk
+ * by setting the following:
+ *   TARGET_USE_KRAIT_BIONIC_OPTIMIZATION := true
+ *   TARGET_USE_KRAIT_PLD_SET := true
+ *   TARGET_KRAIT_BIONIC_PLDOFFS := <pldoffset>
+ *   TARGET_KRAIT_BIONIC_PLDSIZE := <pldsize>
+ *   TARGET_KRAIT_BIONIC_PLDTHRESH := <pldthreshold>
+ */
+#ifndef PLDOFFS
+#define PLDOFFS	(10)
+#endif
+#ifndef PLDTHRESH
+#define PLDTHRESH (PLDOFFS)
+#endif
+#if (PLDOFFS < 5)
+#error Routine does not support offsets less than 5
+#endif
+#if (PLDTHRESH < PLDOFFS)
+#error PLD threshold must be greater than or equal to the PLD offset
+#endif
+#ifndef PLDSIZE
+#define PLDSIZE (64)
+#endif
+
+	.text
+	.syntax	unified
+	.fpu neon
+	.thumb
+	.thumb_func
+
+ENTRY(bcopy)
+        //.cfi_startproc
+	mov	r12, r0
+	mov	r0, r1
+	mov	r1, r12
+        // Fall through to memmove
+        //.cfi_endproc
+END(bcopy)
+
+ENTRY(memmove)
+_memmove_words:
+        //.cfi_startproc
+	.save	{r0, lr}
+	cmp	r2, #0
+	it	ne
+	subsne	r12, r0, r1	// Warning: do not combine these "it" blocks
+	it	eq
+	bxeq	lr
+//	memmove only if r1 < r0 < r1+r2
+	cmp	r0, r1
+	itt	ge
+	addge	r12, r1, r2
+	cmpge	r12, r0
+	it	le
+	ble	memcpy
+	cmp	r2, #4
+	it	le
+	ble	.Lneon_b2f_smallcopy_loop
+	push	{r0, lr}
+	add	r0, r0, r2
+	add	r1, r1, r2
+	cmp	r2, #64
+	it	ge
+	bge	.Lneon_b2f_copy_64
+	cmp	r2, #32
+	it	ge
+	bge	.Lneon_b2f_copy_32
+	cmp	r2, #8
+	it	ge
+	bge	.Lneon_b2f_copy_8
+	b	.Lneon_b2f_copy_1
+.Lneon_b2f_copy_64:
+	mov	r12, r2, lsr #6
+	add	r0, r0, #32
+	add	r1, r1, #32
+	cmp	r12, #PLDTHRESH
+	it	le
+	ble	.Lneon_b2f_copy_64_loop_nopld
+	sub	r12, #PLDOFFS
+	sub	lr, r1, #(PLDOFFS)*PLDSIZE
+.Lneon_b2f_copy_64_loop_outer:
+	pld	[lr]
+	sub	r1, r1, #96
+	sub	r0, r0, #96
+	vld1.32	{q0, q1}, [r1]!
+	vld1.32	{q2, q3}, [r1]
+	sub	lr, lr, #64
+	subs	r12, r12, #1
+	vst1.32	{q0, q1}, [r0]!
+	vst1.32	{q2, q3}, [r0]
+	it	ne
+	bne	.Lneon_b2f_copy_64_loop_outer
+	mov	r12, #PLDOFFS
+.Lneon_b2f_copy_64_loop_nopld:
+	sub	r1, r1, #96
+	sub	r0, r0, #96
+	vld1.32	{q8, q9}, [r1]!
+	vld1.32	{q10, q11}, [r1]
+	subs	r12, r12, #1
+	vst1.32	{q8, q9}, [r0]!
+	vst1.32	{q10, q11}, [r0]
+	it	ne
+	bne	.Lneon_b2f_copy_64_loop_nopld
+	ands	r2, r2, #0x3f
+	it	eq
+	beq	.Lneon_memmove_done
+	sub	r1, r1, #32
+	sub	r0, r0, #32
+	cmp	r2, #32
+	it	lt
+	blt	.Lneon_b2f_copy_8
+.Lneon_b2f_copy_32:
+	sub	r1, r1, #32
+	sub	r0, r0, #32
+	vld1.32	{q0, q1}, [r1]
+	vst1.32	{q0, q1}, [r0]
+	ands	r2, r2, #0x1f
+	it	eq
+	beq	.Lneon_memmove_done
+.Lneon_b2f_copy_8:
+	movs	r12, r2, lsr #0x3
+	it	eq
+	beq	.Lneon_b2f_copy_1
+.Lneon_b2f_copy_8_loop:
+	sub	r1, r1, #8
+	sub	r0, r0, #8
+	vld1.32	{d0}, [r1]
+	subs	r12, r12, #1
+	vst1.32	{d0}, [r0]
+	it	ne
+	bne	.Lneon_b2f_copy_8_loop
+	ands	r2, r2, #0x7
+	beq	.Lneon_memmove_done
+.Lneon_b2f_copy_1:
+	movs	r12, r2, lsl #29
+	itttt	mi
+	submi	r1, r1, #4
+	submi	r0, r0, #4
+	ldrmi	r3, [r1]
+	strmi	r3, [r0]
+	movs	r2, r2, lsl #31
+	itttt	cs
+	subcs	r1, r1, #2
+	subcs	r0, r0, #2
+	ldrhcs	r3, [r1]
+	strhcs	r3, [r0]
+	itttt	mi
+	submi	r1, r1, #1
+	submi	r0, r0, #1
+	ldrbmi	r12, [r1]
+	strbmi	r12, [r0]
+.Lneon_memmove_done:
+	pop	{r0, pc}
+.Lneon_b2f_smallcopy_loop:
+	// 4 bytes or less
+	add	r1, r1, r2
+	add	r0, r0, r2
+	movs	r12, r2, lsl #29
+	itttt	mi
+	submi	r1, r1, #4
+	submi	r0, r0, #4
+	ldrmi	r3, [r1]
+	strmi	r3, [r0]
+	movs	r2, r2, lsl #31
+	itttt	cs
+	subcs	r1, r1, #2
+	subcs	r0, r0, #2
+	ldrhcs	r3, [r1]
+	strhcs	r3, [r0]
+	itttt	mi
+	submi	r1, r1, #1
+	submi	r0, r0, #1
+	ldrbmi	r12, [r1]
+	strbmi	r12, [r0]
+	bx	lr
+//	.cfi_endproc
+END(memmove)
+
diff --git a/libc/arch-arm/krait/krait.mk b/libc/arch-arm/krait/krait.mk
index 1bb7b0a..08377b4 100644
--- a/libc/arch-arm/krait/krait.mk
+++ b/libc/arch-arm/krait/krait.mk
@@ -1,9 +1,18 @@
 libc_bionic_src_files_arm += \
-    arch-arm/krait/bionic/memcpy.S \
     arch-arm/krait/bionic/memset.S \
     arch-arm/krait/bionic/strcmp.S \
     arch-arm/krait/bionic/__strcat_chk.S \
     arch-arm/krait/bionic/__strcpy_chk.S \
+    arch-arm/krait/bionic/memmove.S
+
+#For some targets we don't need this optimization
+ifeq ($(TARGET_CPU_MEMCPY_BASE_OPT_DISABLE),true)
+libc_bionic_src_files_arm += \
+    arch-arm/cortex-a15/bionic/memcpy.S
+else
+libc_bionic_src_files_arm += \
+    arch-arm/krait/bionic/memcpy.S
+endif
 
 # Use cortex-a15 versions of strcat/strcpy/strlen and standard memmove
 libc_bionic_src_files_arm += \
@@ -11,4 +20,3 @@
     arch-arm/cortex-a15/bionic/strcat.S \
     arch-arm/cortex-a15/bionic/strcpy.S \
     arch-arm/cortex-a15/bionic/strlen.S \
-    bionic/memmove.c \
diff --git a/libc/bionic/malloc_debug_check.cpp b/libc/bionic/malloc_debug_check.cpp
index dee03fa..da48059 100644
--- a/libc/bionic/malloc_debug_check.cpp
+++ b/libc/bionic/malloc_debug_check.cpp
@@ -45,6 +45,7 @@
 #include <time.h>
 #include <unistd.h>
 #include <unwind.h>
+#include <signal.h>
 
 #include "debug_mapinfo.h"
 #include "debug_stacktrace.h"
@@ -55,6 +56,14 @@
 #include "private/libc_logging.h"
 #include "private/ScopedPthreadMutexLocker.h"
 
+extern unsigned int malloc_sig_enabled;
+extern unsigned int min_allocation_report_limit;
+extern unsigned int max_allocation_limit;
+extern char* process_name;
+static size_t total_count = 0;
+static bool isDumped = false;
+static bool sigHandled = false;
+
 #define MAX_BACKTRACE_DEPTH 16
 #define ALLOCATION_TAG      0x1ee7d00d
 #define BACKLOG_TAG         0xbabecafe
@@ -63,6 +72,10 @@
 #define FRONT_GUARD_LEN     (1<<5)
 #define REAR_GUARD          0xbb
 #define REAR_GUARD_LEN      (1<<5)
+#define FRONT_GUARD_SS      0xab
+
+static void malloc_sigaction(int signum, siginfo_t * sg, void * cxt);
+static struct sigaction default_sa;
 
 static void log_message(const char* format, ...) {
   va_list args;
@@ -135,9 +148,14 @@
     memset(hdr->front_guard, FRONT_GUARD, FRONT_GUARD_LEN);
 }
 
+static inline void set_snapshot(hdr_t* hdr) {
+    memset(hdr->front_guard, FRONT_GUARD_SS, FRONT_GUARD_LEN);
+}
+
 static inline bool is_front_guard_valid(hdr_t* hdr) {
     for (size_t i = 0; i < FRONT_GUARD_LEN; i++) {
-        if (hdr->front_guard[i] != FRONT_GUARD) {
+        if (!((hdr->front_guard[i] == FRONT_GUARD) ||
+                    (hdr->front_guard[i] == FRONT_GUARD_SS))) {
             return false;
         }
     }
@@ -171,6 +189,9 @@
 }
 
 static inline void add_locked(hdr_t* hdr, hdr_t** tail, hdr_t** head) {
+    if (hdr->tag == ALLOCATION_TAG) {
+        total_count += hdr->size;
+    }
     hdr->prev = NULL;
     hdr->next = *head;
     if (*head)
@@ -181,6 +202,9 @@
 }
 
 static inline int del_locked(hdr_t* hdr, hdr_t** tail, hdr_t** head) {
+    if (hdr->tag == ALLOCATION_TAG) {
+        total_count -= hdr->size;
+    }
     if (hdr->prev) {
         hdr->prev->next = hdr->next;
     } else {
@@ -194,6 +218,25 @@
     return 0;
 }
 
+static void snapshot_report_leaked_nodes() {
+    log_message("%s: %s\n", __FILE__, __FUNCTION__);
+    hdr_t * iterator = head;
+    size_t total_size = 0;
+    do {
+        if (iterator->front_guard[0] == FRONT_GUARD &&
+                iterator->size >= min_allocation_report_limit) {
+            log_message("obj %p, size %d", iterator, iterator->size);
+            total_size += iterator->size;
+            log_backtrace(iterator->bt, iterator->bt_depth);
+            log_message("------------------------------"); // as an end marker
+            // Marking the node as we do not want to print it again.
+            set_snapshot(iterator);
+        }
+        iterator = iterator->next;
+    } while (iterator);
+    log_message("Total Pending allocations after last snapshot: %d", total_size);
+}
+
 static inline void add(hdr_t* hdr, size_t size) {
     ScopedPthreadMutexLocker locker(&lock);
     hdr->tag = ALLOCATION_TAG;
@@ -202,6 +245,21 @@
     init_rear_guard(hdr);
     ++g_allocated_block_count;
     add_locked(hdr, &tail, &head);
+    if (total_count >= max_allocation_limit && !isDumped && malloc_sig_enabled) {
+        isDumped = true;
+        log_message("Maximum limit of the %s process (%d Bytes) size has reached."\
+                "Maximum limit is set to:%d Bytes\n", process_name,
+                total_count, max_allocation_limit);
+        log_message("Start dumping allocations of the process %s", process_name);
+        log_message("+++ *** +++ *** +++ *** +++ *** +++ *** +++ *** +++ *** +++ ***\n");
+
+        // Print allocations of the process
+        snapshot_report_leaked_nodes();
+
+        log_message("*** +++ *** +++ *** +++ *** +++ *** +++ *** +++ *** +++ *** +++\n");
+        log_message("Completed dumping allocations of the process %s", process_name);
+
+    }
 }
 
 static inline int del(hdr_t* hdr) {
@@ -233,7 +291,8 @@
 static inline int check_guards(hdr_t* hdr, int* safe) {
     *safe = 1;
     if (!is_front_guard_valid(hdr)) {
-        if (hdr->front_guard[0] == FRONT_GUARD) {
+        if ((hdr->front_guard[0] == FRONT_GUARD) ||
+                ((hdr->front_guard[0] == FRONT_GUARD_SS))) {
             log_message("+++ ALLOCATION %p SIZE %d HAS A CORRUPTED FRONT GUARD\n",
                        user(hdr), hdr->size);
         } else {
@@ -637,6 +696,8 @@
 
 pthread_key_t g_debug_calls_disabled;
 
+#define DEBUG_SIGNAL SIGWINCH
+
 extern "C" bool malloc_debug_initialize(HashTable* hash_table, const MallocDebug* malloc_dispatch) {
   g_hash_table = hash_table;
   g_malloc_dispatch = malloc_dispatch;
@@ -656,6 +717,26 @@
     __libc_format_log(ANDROID_LOG_INFO, "libc", "not gathering backtrace information\n");
   }
 
+/* Initializes malloc debugging framework.
+ * See comments on MallocDebugInit in malloc_debug_common.h
+ */
+  if (malloc_sig_enabled) {
+    struct sigaction sa; //local or static?
+    //struct sigaction sa_snapshot; //local or static?
+    sa.sa_handler = NULL;
+    sa.sa_sigaction = malloc_sigaction;
+    sigemptyset(&sa.sa_mask);
+    sigaddset(&sa.sa_mask, DEBUG_SIGNAL);
+    sa.sa_flags = SA_SIGINFO;
+    sa.sa_restorer = NULL;
+    if (sigaction(DEBUG_SIGNAL, &sa, &default_sa) < 0) {
+      log_message("Failed to register signal handler w/ errno %s", strerror(errno));
+      malloc_sig_enabled = 0;
+    } else {
+      log_message("Registered signal handler");
+      sigHandled = false;
+    }
+  }
   if (g_backtrace_enabled) {
     backtrace_startup();
   }
@@ -668,9 +749,67 @@
   if (malloc_debug_level == 10) {
     ReportMemoryLeaks();
   }
+  if (malloc_sig_enabled) {
+    log_message("Deregister %d signal handler", DEBUG_SIGNAL);
+    sigaction(DEBUG_SIGNAL, &default_sa, NULL);
+    malloc_sig_enabled = 0;
+    sigHandled = false;
+  }
   if (g_backtrace_enabled) {
     backtrace_shutdown();
   }
 
   pthread_setspecific(g_debug_calls_disabled, NULL);
 }
+
+static void snapshot_nodes_locked() {
+  log_message("%s: %s\n", __FILE__, __FUNCTION__);
+  hdr_t * iterator = head;
+  do {
+    if (iterator->front_guard[0] == FRONT_GUARD) {
+      set_snapshot(iterator);
+    }
+    iterator = iterator->next;
+  } while (iterator);
+}
+
+static void malloc_sigaction(int signum, siginfo_t * info, void * context)
+{
+  log_message("%s: %s\n", __FILE__, __FUNCTION__);
+  log_message("%s got %d signal from PID: %d (context:%x)\n",
+          __func__, signum, info->si_pid, context);
+
+  if (signum != DEBUG_SIGNAL) {
+    log_message("RECEIVED %d instead of %d\n", signum, DEBUG_SIGNAL);
+    return;
+  }
+
+  ScopedPthreadMutexLocker locker(&lock);
+
+  log_message("Process under observation:%s", process_name);
+  log_message("Maximum process size limit:%d Bytes", max_allocation_limit);
+  log_message("Won't print allocation below %d Bytes", min_allocation_report_limit);
+  log_message("Total count: %d\n", total_count);
+
+  if (!head) {
+    log_message("No allocations?");
+    return;
+  }
+  // If sigHandled is false, meaning it's being handled first time
+  if (!sigHandled) {
+    sigHandled = true;
+    // Marking the nodes assuming that they should not be leaked nodes.
+    snapshot_nodes_locked();
+  } else {
+    // We need to print new allocations now
+    log_message("Start dumping allocations of the process %s", process_name);
+    log_message("+++ *** +++ *** +++ *** +++ *** +++ *** +++ *** +++ *** +++ ***\n");
+
+    // Print allocations of the process
+    snapshot_report_leaked_nodes();
+
+    log_message("*** +++ *** +++ *** +++ *** +++ *** +++ *** +++ *** +++ *** +++\n");
+    log_message("Completed dumping allocations of the process %s", process_name);
+  }
+  return;
+}
diff --git a/libc/bionic/malloc_debug_common.cpp b/libc/bionic/malloc_debug_common.cpp
index 0b6a142..e0d439a 100644
--- a/libc/bionic/malloc_debug_common.cpp
+++ b/libc/bionic/malloc_debug_common.cpp
@@ -295,6 +295,11 @@
 #include <stdio.h>
 #include "private/libc_logging.h"
 
+unsigned int malloc_sig_enabled = 0;
+unsigned int max_allocation_limit;
+unsigned int min_allocation_report_limit;
+const char* process_name;
+
 template<typename FunctionType>
 static void InitMallocFunction(void* malloc_impl_handler, FunctionType* func, const char* prefix, const char* suffix) {
   char symbol[128];
@@ -396,6 +401,21 @@
       }
       so_name = "libc_malloc_debug_qemu.so";
       break;
+    case 40:
+      malloc_sig_enabled = 1;
+      char debug_proc_size[PROP_VALUE_MAX];
+      if (__system_property_get("libc.debug.malloc.maxprocsize", debug_proc_size))
+        max_allocation_limit = atoi(debug_proc_size);
+      else
+        max_allocation_limit = 30 * 1024 * 1024; // In Bytes [Default is 30 MB]
+      if (__system_property_get("libc.debug.malloc.minalloclim", debug_proc_size))
+        min_allocation_report_limit = atoi(debug_proc_size);
+      else
+        min_allocation_report_limit = 10 * 1024; // In Bytes [Default is 10 KB]
+      process_name = getprogname();
+
+      so_name = "libc_malloc_debug_leak.so";
+      break;
     default:
       error_log("%s: Debug level %d is unknown\n", getprogname(), g_malloc_debug_level);
       return;
@@ -456,6 +476,9 @@
     case 20:
       InitMalloc(malloc_impl_handle, &malloc_dispatch_table, "qemu_instrumented");
       break;
+    case 40:
+      InitMalloc(malloc_impl_handle, &malloc_dispatch_table, "chk");
+      break;
     default:
       break;
   }
diff --git a/libc/include/signal.h b/libc/include/signal.h
index e23e65b..8cb0678 100644
--- a/libc/include/signal.h
+++ b/libc/include/signal.h
@@ -34,7 +34,9 @@
 #include <limits.h>		/* For LONG_BIT */
 #include <string.h>		/* For memset() */
 #include <sys/types.h>
+#if defined(__LP64__)
 #include <asm/sigcontext.h>
+#endif
 
 #if defined(__LP64__) || defined(__mips__)
 /* For 64-bit (and mips), the kernel's struct sigaction doesn't match the POSIX one,
diff --git a/libm/Android.mk b/libm/Android.mk
index 994caa0..30c2bc7 100644
--- a/libm/Android.mk
+++ b/libm/Android.mk
@@ -64,8 +64,6 @@
     upstream-freebsd/lib/msun/src/e_scalbf.c \
     upstream-freebsd/lib/msun/src/e_sinh.c \
     upstream-freebsd/lib/msun/src/e_sinhf.c \
-    upstream-freebsd/lib/msun/src/e_sqrt.c \
-    upstream-freebsd/lib/msun/src/e_sqrtf.c \
     upstream-freebsd/lib/msun/src/imprecise.c \
     upstream-freebsd/lib/msun/src/k_cos.c \
     upstream-freebsd/lib/msun/src/k_cosf.c \
@@ -96,7 +94,6 @@
     upstream-freebsd/lib/msun/src/s_conjf.c \
     upstream-freebsd/lib/msun/src/s_copysign.c \
     upstream-freebsd/lib/msun/src/s_copysignf.c \
-    upstream-freebsd/lib/msun/src/s_cos.c \
     upstream-freebsd/lib/msun/src/s_cosf.c \
     upstream-freebsd/lib/msun/src/s_cproj.c \
     upstream-freebsd/lib/msun/src/s_cprojf.c \
@@ -161,7 +158,6 @@
     upstream-freebsd/lib/msun/src/s_signgam.c \
     upstream-freebsd/lib/msun/src/s_significand.c \
     upstream-freebsd/lib/msun/src/s_significandf.c \
-    upstream-freebsd/lib/msun/src/s_sin.c \
     upstream-freebsd/lib/msun/src/s_sinf.c \
     upstream-freebsd/lib/msun/src/s_tan.c \
     upstream-freebsd/lib/msun/src/s_tanf.c \
@@ -173,7 +169,7 @@
     upstream-freebsd/lib/msun/src/w_cabs.c \
     upstream-freebsd/lib/msun/src/w_cabsf.c \
     upstream-freebsd/lib/msun/src/w_drem.c \
-    upstream-freebsd/lib/msun/src/w_dremf.c \
+    upstream-freebsd/lib/msun/src/w_dremf.c
 
 libm_common_src_files += \
     fake_long_double.c \
@@ -246,11 +242,37 @@
 # self recursions for lrint, lrintf, and lrintl.
 # BUG: 14225968
 libm_common_cflags += -fno-builtin-rint -fno-builtin-rintf -fno-builtin-rintl
-
 libm_common_includes := $(LOCAL_PATH)/upstream-freebsd/lib/msun/src/
 
 libm_ld_includes := $(LOCAL_PATH)/upstream-freebsd/lib/msun/ld128/
 
+ifeq ($(TARGET_USE_QCOM_BIONIC_OPTIMIZATION),true)
+  libm_arm_src_files += \
+    arm/e_pow.S \
+    arm/s_cos.S \
+    arm/s_sin.S \
+    arm/e_sqrtf.S \
+    arm/e_sqrt.S
+  libm_arm_cflags += -DQCOM_NEON_OPTIMIZATION -fno-if-conversion
+  libm_arm_includes += $(LOCAL_PATH)/../libc/
+
+  libm_arm64_src_files += \
+    arm64/e_pow64.S \
+    upstream-freebsd/lib/msun/src/s_cos.c \
+    upstream-freebsd/lib/msun/src/s_sin.c \
+    upstream-freebsd/lib/msun/src/e_sqrtf.c \
+    upstream-freebsd/lib/msun/src/e_sqrt.c
+
+  libm_arm64_cflags += -DQCOM_NEON_OPTIMIZATION
+  libm_arm64_includes += $(LOCAL_PATH)/../libc/
+else
+  libm_common_src_files += \
+    upstream-freebsd/lib/msun/src/s_cos.c \
+    upstream-freebsd/lib/msun/src/s_sin.c \
+    upstream-freebsd/lib/msun/src/e_sqrtf.c \
+    upstream-freebsd/lib/msun/src/e_sqrt.c
+endif
+
 #
 # libm.a for target.
 #
@@ -264,11 +286,13 @@
 LOCAL_SYSTEM_SHARED_LIBRARIES := libc
 
 # arch-specific settings
-LOCAL_C_INCLUDES_arm := $(LOCAL_PATH)/arm
-LOCAL_SRC_FILES_arm := arm/fenv.c
+LOCAL_CFLAGS_arm := $(libm_arm_cflags)
+LOCAL_C_INCLUDES_arm := $(LOCAL_PATH)/arm $(libm_arm_includes)
+LOCAL_SRC_FILES_arm := arm/fenv.c $(libm_arm_src_files)
 
-LOCAL_C_INCLUDES_arm64 := $(libm_ld_includes)
-LOCAL_SRC_FILES_arm64 := arm64/fenv.c $(libm_ld_src_files)
+LOCAL_CFLAGS_arm64 := $(libm_arm64_cflags)
+LOCAL_C_INCLUDES_arm64 := $(libm_ld_includes) $(libm_arm64_includes)
+LOCAL_SRC_FILES_arm64 := arm64/fenv.c $(libm_ld_src_files) $(libm_arm64_src_files)
 
 LOCAL_C_INCLUDES_x86 := $(LOCAL_PATH)/i387
 LOCAL_SRC_FILES_x86 := i387/fenv.c
diff --git a/libm/arm/e_pow.S b/libm/arm/e_pow.S
new file mode 100644
index 0000000..a730fa6
--- /dev/null
+++ b/libm/arm/e_pow.S
@@ -0,0 +1,459 @@
+@ Copyright (c) 2009-2013 The Linux Foundation. All rights reserved.
+@
+@ Redistribution and use in source and binary forms, with or without
+@ modification, are permitted provided that the following conditions are met:
+@     * Redistributions of source code must retain the above copyright
+@       notice, this list of conditions and the following disclaimer.
+@     * Redistributions in binary form must reproduce the above copyright
+@       notice, this list of conditions and the following disclaimer in the
+@       documentation and/or other materials provided with the distribution.
+@     * Neither the name of The Linux Foundation nor the names of its contributors may
+@       be used to endorse or promote products derived from this software
+@       without specific prior written permission.
+@
+@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+@ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+@ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+@ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+@ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+@ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+@ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+@ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+@ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+@ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+@ POSSIBILITY OF SUCH DAMAGE.
+
+
+#include <private/bionic_asm.h>
+#include <private/libc_events.h>
+
+@ Values which exist the program lifetime:
+#define HIGH_WORD_MASK      d31
+#define EXPONENT_MASK       d30
+#define int_1               d29
+#define double_1            d28
+@ sign and 2^int_n fixup:
+#define maxrange            r12
+#define expadjustment       d7
+#define literals            r10
+@ Values which exist within both polynomial implementations:
+#define int_n               d2
+#define int_n_low           s4
+#define int_n_high          s5
+#define double_n            d3
+#define k1                  d27
+#define k2                  d26
+#define k3                  d25
+#define k4                  d24
+@ Values which cross the boundaries between polynomial implementations:
+#define ss                  d16
+#define ss2                 d17
+#define ss4                 d18
+#define Result              d0
+#define Return_hw           r1
+#define Return_lw           r0
+#define ylg2x               d0
+@ Intermediate values only needed sometimes:
+@ initial (sorted in approximate order of availability for overwriting):
+#define x_hw                r1
+#define x_lw                r0
+#define y_hw                r3
+#define y_lw                r2
+#define x                   d0
+#define bp                  d4
+#define y                   d1
+@ log series:
+#define u                   d19
+#define v                   d20
+#define lg2coeff            d21
+#define bpa                 d5
+#define bpb                 d3
+#define lg2const            d6
+#define xmantissa           r8
+#define twoto1o5            r4
+#define twoto3o5            r5
+#define ix                  r6
+#define iEXP_MASK           r7
+@ exp input setup:
+#define twoto1o8mask        d3
+#define twoto1o4mask        d4
+#define twoto1o2mask        d1
+#define ylg2x_round_offset  d16
+#define ylg2x_temp          d17
+#define yn_temp             d18
+#define yn_round_offset     d19
+#define ln2                 d5
+@ Careful, overwriting HIGH_WORD_MASK, reset it if you need it again ...
+#define rounded_exponent    d31
+@ exp series:
+#define k5                  d23
+#define k6                  d22
+#define k7                  d21
+#define k8                  d20
+#define ss3                 d19
+@ overwrite double_1 (we're done with it by now)
+#define k0                  d28
+#define twoto1o4            d6
+
+@instructions that gas doesn't like to encode correctly:
+#define vmov_f64            fconstd
+#define vmov_f32            fconsts
+#define vmovne_f64          fconstdne
+
+#define KRAIT_NO_AAPCS_VFP_MODE
+
+ENTRY(pow)
+#if defined(KRAIT_NO_AAPCS_VFP_MODE)
+     @ ARM ABI has inputs coming in via r registers, lets move to a d register
+    vmov            x, x_lw, x_hw
+#endif
+    push            {r4, r5, r6, r7, r8, r9, r10, lr}
+
+    movw            maxrange, #0x0000
+    movt            maxrange, #0x4010
+
+    @ pre-staged bp values
+    vldr            bpa, .LbpA
+    vldr            bpb, .LbpB
+    @ load two fifths into constant term in case we need it due to offsets
+    vldr            lg2const, .Ltwofifths
+
+    @ bp is initially 1.0, may adjust later based on x value
+    vmov_f64        bp,  #0x70
+
+    @ extract the mantissa from x for scaled value comparisons
+    lsl             xmantissa, x_hw, #12
+
+    @ twoto1o5 = 2^(1/5) (input bracketing)
+    movw            twoto1o5, #0x186c
+    movt            twoto1o5, #0x2611
+    @ twoto3o5 = 2^(3/5) (input bracketing)
+    movw            twoto3o5, #0x003b
+    movt            twoto3o5, #0x8406
+
+    @ finish extracting xmantissa
+    orr             xmantissa, xmantissa, x_lw, lsr #20
+
+    @ begin preparing a mask for normalization
+    vmov.i64        HIGH_WORD_MASK, #0xffffffff00000000
+
+    @ double_1 = (double) 1.0
+    vmov_f64        double_1, #0x70
+
+#if defined(KRAIT_NO_AAPCS_VFP_MODE)
+     @ move y from r registers to a d register
+    vmov            y, y_lw, y_hw
+#endif
+
+    cmp             xmantissa, twoto1o5
+
+    vshl.i64        EXPONENT_MASK, HIGH_WORD_MASK, #20
+    vshr.u64        int_1, HIGH_WORD_MASK, #63
+
+    adr             literals, .LliteralTable
+
+    bhi             .Lxgt2to1over5
+    @ zero out lg2 constant term if don't offset our input
+    vsub.f64        lg2const, lg2const, lg2const
+    b               .Lxle2to1over5
+
+.Lxgt2to1over5:
+    @ if normalized x > 2^(1/5), bp = 1 + (2^(2/5)-1) = 2^(2/5)
+    vadd.f64        bp, bp, bpa
+
+.Lxle2to1over5:
+    @ will need ln2 for various things
+    vldr            ln2, .Lln2
+
+    cmp             xmantissa, twoto3o5
+@@@@ X Value Normalization @@@@
+
+    @ ss = abs(x) 2^(-1024)
+    vbic.i64        ss, x, EXPONENT_MASK
+
+    @ N = (floor(log2(x)) + 0x3ff) * 2^52
+    vand.i64        int_n, x, EXPONENT_MASK
+
+    bls             .Lxle2to3over5
+    @ if normalized x > 2^(3/5), bp = 2^(2/5) + (2^(4/5) - 2^(2/5) = 2^(4/5)
+    vadd.f64      bp, bp, bpb
+    vadd.f64      lg2const, lg2const, lg2const
+
+.Lxle2to3over5:
+
+    cmp             x_hw, maxrange
+    cmpls           y_hw, maxrange
+    movt            maxrange, #0x3f00
+    cmpls           maxrange, x_hw
+
+    @ load log2 polynomial series constants
+    vldm            literals!, {k4, k3, k2, k1}
+
+    @ s = abs(x) 2^(-floor(log2(x))) (normalize abs(x) to around 1)
+    vorr.i64        ss, ss, double_1
+
+@@@@ 3/2 (Log(bp(1+s)/(1-s))) input computation (s = (x-bp)/(x+bp)) @@@@
+
+    vsub.f64        u, ss, bp
+    vadd.f64        v, ss, bp
+
+    bhi             .LuseFullImpl
+
+    @ s = (x-1)/(x+1)
+    vdiv.f64        ss, u, v
+
+    @ load 2/(3log2) into lg2coeff
+    vldr            lg2coeff, .Ltwooverthreeln2
+
+    @ N = floor(log2(x)) * 2^52
+    vsub.i64        int_n, int_n, double_1
+
+@@@@ 3/2 (Log(bp(1+s)/(1-s))) polynomial series @@@@
+
+    @ ss2 = ((x-dp)/(x+dp))^2
+    vmul.f64        ss2, ss, ss
+    @ ylg2x = 3.0
+    vmov_f64        ylg2x, #8
+    vmul.f64        ss4, ss2, ss2
+
+    @ todo: useful later for two-way clamp
+    vmul.f64        lg2coeff, lg2coeff, y
+
+    @ N = floor(log2(x))
+    vshr.s64        int_n, int_n, #52
+
+    @ k3 = ss^2 * L4 + L3
+    vmla.f64        k3, ss2, k4
+
+    @ k1 = ss^2 * L2 + L1
+    vmla.f64        k1, ss2, k2
+
+    @ scale ss by 2/(3 ln 2)
+    vmul.f64        lg2coeff, ss, lg2coeff
+
+    @ ylg2x = 3.0 + s^2
+    vadd.f64        ylg2x, ylg2x, ss2
+
+    vcvt.f64.s32    double_n, int_n_low
+
+    @ k1 = s^4 (s^2 L4 + L3) + s^2 L2 + L1
+    vmla.f64        k1, ss4, k3
+
+    @ add in constant term
+    vadd.f64        double_n, lg2const
+
+    @ ylg2x = 3.0 + s^2 + s^4 (s^4 (s^2 L4 + L3) + s^2 L2 + L1)
+    vmla.f64        ylg2x, ss4, k1
+
+    @ ylg2x = y 2 s / (3 ln(2)) (3.0 + s^2 + s^4 (s^4(s^2 L4 + L3) + s^2 L2 + L1)
+    vmul.f64        ylg2x, lg2coeff, ylg2x
+
+@@@@ Compute input to Exp(s) (s = y(n + log2(x)) - (floor(8 yn + 1)/8 + floor(8 ylog2(x) + 1)/8) @@@@@
+
+    @ mask to extract bit 1 (2^-2 from our fixed-point representation)
+    vshl.u64        twoto1o4mask, int_1, #1
+
+    @ double_n = y * n
+    vmul.f64        double_n, double_n, y
+
+    @ Load 2^(1/4) for later computations
+    vldr            twoto1o4, .Ltwoto1o4
+
+    @ either add or subtract one based on the sign of double_n and ylg2x
+    vshr.s64        ylg2x_round_offset, ylg2x, #62
+    vshr.s64        yn_round_offset, double_n, #62
+
+    @ move unmodified y*lg2x into temp space
+    vmov            ylg2x_temp, ylg2x
+    @ compute floor(8 y * n + 1)/8
+    @ and floor(8 y (log2(x)) + 1)/8
+    vcvt.s32.f64    ylg2x, ylg2x, #3
+    @ move unmodified y*n into temp space
+    vmov            yn_temp, double_n
+    vcvt.s32.f64    double_n, double_n, #3
+
+    @ load exp polynomial series constants
+    vldm            literals!, {k8, k7, k6, k5, k4, k3, k2, k1}
+
+    @ mask to extract bit 2 (2^-1 from our fixed-point representation)
+    vshl.u64        twoto1o2mask, int_1, #2
+
+    @ make rounding offsets either 1 or -1 instead of 0 or -2
+    vorr.u64        ylg2x_round_offset, ylg2x_round_offset, int_1
+    vorr.u64        yn_round_offset, yn_round_offset, int_1
+
+    @ round up to the nearest 1/8th
+    vadd.s32        ylg2x, ylg2x, ylg2x_round_offset
+    vadd.s32        double_n, double_n, yn_round_offset
+
+    @ clear out round-up bit for y log2(x)
+    vbic.s32        ylg2x, ylg2x, int_1
+    @ clear out round-up bit for yn
+    vbic.s32        double_n, double_n, int_1
+    @ add together the (fixed precision) rounded parts
+    vadd.s64        rounded_exponent, double_n, ylg2x
+    @ turn int_n into a double with value 2^int_n
+    vshl.i64        int_n, rounded_exponent, #49
+    @ compute masks for 2^(1/4) and 2^(1/2) fixups for fractional part of fixed-precision rounded values:
+    vand.u64        twoto1o4mask, twoto1o4mask, rounded_exponent
+    vand.u64        twoto1o2mask, twoto1o2mask, rounded_exponent
+
+    @ convert back into floating point, double_n now holds (double) floor(8 y * n + 1)/8
+    @                                   ylg2x now holds (double) floor(8 y * log2(x) + 1)/8
+    vcvt.f64.s32    ylg2x, ylg2x, #3
+    vcvt.f64.s32    double_n, double_n, #3
+
+    @ put the 2 bit (0.5) through the roof of twoto1o2mask (make it 0x0 or 0xffffffffffffffff)
+    vqshl.u64        twoto1o2mask, twoto1o2mask, #62
+    @ put the 1 bit (0.25) through the roof of twoto1o4mask (make it 0x0 or 0xffffffffffffffff)
+    vqshl.u64        twoto1o4mask, twoto1o4mask, #63
+
+    @ center y*log2(x) fractional part between -0.125 and 0.125 by subtracting (double) floor(8 y * log2(x) + 1)/8
+    vsub.f64        ylg2x_temp, ylg2x_temp, ylg2x
+    @ center y*n fractional part between -0.125 and 0.125 by subtracting (double) floor(8 y * n + 1)/8
+    vsub.f64        yn_temp, yn_temp, double_n
+
+    @ Add fractional parts of yn and y log2(x) together
+    vadd.f64        ss, ylg2x_temp, yn_temp
+
+    @ Result = 1.0 (offset for exp(s) series)
+    vmov_f64        Result, #0x70
+
+    @ multiply fractional part of y * log2(x) by ln(2)
+    vmul.f64        ss, ln2, ss
+
+@@@@ 10th order polynomial series for Exp(s) @@@@
+
+    @ ss2 = (ss)^2
+    vmul.f64        ss2, ss, ss
+
+    @ twoto1o2mask = twoto1o2mask & twoto1o4
+    vand.u64        twoto1o2mask, twoto1o2mask, twoto1o4
+    @ twoto1o2mask = twoto1o2mask & twoto1o4
+    vand.u64        twoto1o4mask, twoto1o4mask, twoto1o4
+
+    @ Result = 1.0 + ss
+    vadd.f64        Result, Result, ss
+
+    @ k7 = ss k8 + k7
+    vmla.f64        k7, ss, k8
+
+    @ ss4 = (ss*ss) * (ss*ss)
+    vmul.f64        ss4, ss2, ss2
+
+    @ twoto1o2mask = twoto1o2mask | (double) 1.0 - results in either 1.0 or 2^(1/4) in twoto1o2mask
+    vorr.u64        twoto1o2mask, twoto1o2mask, double_1
+    @ twoto1o2mask = twoto1o4mask | (double) 1.0 - results in either 1.0 or 2^(1/4) in twoto1o4mask
+    vorr.u64        twoto1o4mask, twoto1o4mask, double_1
+
+    @ TODO: should setup sign here, expadjustment = 1.0
+    vmov_f64        expadjustment, #0x70
+
+    @ ss3 = (ss*ss) * ss
+    vmul.f64        ss3, ss2, ss
+
+    @ k0 = 1/2 (first non-unity coefficient)
+    vmov_f64        k0, #0x60
+
+    @ Mask out non-exponent bits to make sure we have just 2^int_n
+    vand.i64        int_n, int_n, EXPONENT_MASK
+
+    @ square twoto1o2mask to get 1.0 or 2^(1/2)
+    vmul.f64        twoto1o2mask, twoto1o2mask, twoto1o2mask
+    @ multiply twoto2o4mask into the exponent output adjustment value
+    vmul.f64        expadjustment, expadjustment, twoto1o4mask
+
+    @ k5 = ss k6 + k5
+    vmla.f64        k5, ss, k6
+
+    @ k3 = ss k4 + k3
+    vmla.f64        k3, ss, k4
+
+    @ k1 = ss k2 + k1
+    vmla.f64        k1, ss, k2
+
+    @ multiply twoto1o2mask into exponent output adjustment value
+    vmul.f64        expadjustment, expadjustment, twoto1o2mask
+
+    @ k5 = ss^2 ( ss k8 + k7 ) + ss k6 + k5
+    vmla.f64        k5, ss2, k7
+
+    @ k1 = ss^2 ( ss k4 + k3 ) + ss k2 + k1
+    vmla.f64        k1, ss2, k3
+
+    @ Result = 1.0 + ss + 1/2 ss^2
+    vmla.f64      Result, ss2, k0
+
+    @ Adjust int_n so that it's a double precision value that can be multiplied by Result
+    vadd.i64        expadjustment, int_n, expadjustment
+
+    @ k1 = ss^4 ( ss^2 ( ss k8 + k7 ) + ss k6 + k5 ) + ss^2 ( ss k4 + k3 ) + ss k2 + k1
+    vmla.f64        k1, ss4, k5
+
+    @ Result = 1.0 + ss + 1/2 ss^2 + ss^3 ( ss^4 ( ss^2 ( ss k8 + k7 ) + ss k6 + k5 ) + ss^2 ( ss k4 + k3 ) + ss k2 + k1 )
+    vmla.f64        Result, ss3, k1
+
+    @ multiply by adjustment (sign*(rounding ? sqrt(2) : 1) * 2^int_n)
+    vmul.f64        Result, expadjustment, Result
+
+.LleavePow:
+#if defined(KRAIT_NO_AAPCS_VFP_MODE)
+    @ return Result (FP)
+    vmov            Return_lw, Return_hw, Result
+#endif
+.LleavePowDirect:
+    @ leave directly returning whatever is in Return_lw and Return_hw
+    pop             {r4, r5, r6, r7, r8, r9, r10, pc}
+
+.LuseFullImpl:
+    pop             {r4, r5, r6, r7, r8, r9, r10, lr}
+    b               __full_ieee754_pow
+
+.align 6
+.LliteralTable:
+@ Least-sqares tuned constants for 11th order (log2((1+s)/(1-s)):
+.LL4: @ ~3/11
+    .long       0x53a79915, 0x3fd1b108
+.LL3: @ ~1/3
+    .long       0x9ca0567a, 0x3fd554fa
+.LL2: @ ~3/7
+    .long       0x1408e660, 0x3fdb6db7
+.LL1: @ ~3/5
+    .long       0x332D4313, 0x3fe33333
+
+@ Least-squares tuned constants for 10th order exp(s):
+.LE10: @ ~1/3628800
+    .long       0x25c7ba0a, 0x3e92819b
+.LE9: @ ~1/362880
+    .long       0x9499b49c, 0x3ec72294
+.LE8: @ ~1/40320
+    .long       0xabb79d95, 0x3efa019f
+.LE7: @ ~1/5040
+    .long       0x8723aeaa, 0x3f2a019f
+.LE6: @ ~1/720
+    .long       0x16c76a94, 0x3f56c16c
+.LE5: @ ~1/120
+    .long       0x11185da8, 0x3f811111
+.LE4: @ ~1/24
+    .long       0x5555551c, 0x3fa55555
+.LE3: @ ~1/6
+    .long       0x555554db, 0x3fc55555
+
+.LbpA: @ (2^(2/5) - 1)
+    .long       0x4ee54db1, 0x3fd472d1
+
+.LbpB: @ (2^(4/5) - 2^(2/5))
+    .long       0x1c8a36cf, 0x3fdafb62
+
+.Ltwofifths: @
+    .long       0x9999999a, 0x3fd99999
+
+.Ltwooverthreeln2:
+    .long       0xDC3A03FD, 0x3FEEC709
+
+.Lln2: @ ln(2)
+    .long       0xFEFA39EF, 0x3FE62E42
+
+.Ltwoto1o4: @ 2^1/4
+    .long       0x0a31b715, 0x3ff306fe
+END(pow)
diff --git a/libm/arm/e_sqrt.S b/libm/arm/e_sqrt.S
new file mode 100644
index 0000000..0b5b10c
--- /dev/null
+++ b/libm/arm/e_sqrt.S
@@ -0,0 +1,41 @@
+@ Copyright (c) 2013, The Linux Foundation. All rights reserved.
+@
+@ Redistribution and use in source and binary forms, with or without
+@ modification, are permitted provided that the following conditions are
+@ met:
+@     * Redistributions of source code must retain the above copyright
+@       notice, this list of conditions and the following disclaimer.
+@     * Redistributions in binary form must reproduce the above
+@       copyright notice, this list of conditions and the following
+@       disclaimer in the documentation and/or other materials provided
+@       with the distribution.
+@     * Neither the name of The Linux Foundation nor the names of its
+@       contributors may be used to endorse or promote products derived
+@       from this software without specific prior written permission.
+@
+@ THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+@ WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+@ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+@ ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+@ BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+@ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+@ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+@ BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+@ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+@ OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+@ IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <private/bionic_asm.h>
+#include <private/libc_events.h>
+
+ENTRY(sqrt)
+    vmov.f64    d0, r0, r1
+    vsqrt.f64   d0, d0
+    vmov.f64    r0, r1, d0
+    bx          lr
+END(sqrt)
+
+#if (__LDBL_MANT_DIG__ == 53)
+.weak sqrtl
+.equ  sqrtl, sqrt
+#endif
diff --git a/libm/arm/e_sqrtf.S b/libm/arm/e_sqrtf.S
new file mode 100644
index 0000000..b14a77b
--- /dev/null
+++ b/libm/arm/e_sqrtf.S
@@ -0,0 +1,36 @@
+@ Copyright (c) 2013, The Linux Foundation. All rights reserved.
+@
+@ Redistribution and use in source and binary forms, with or without
+@ modification, are permitted provided that the following conditions are
+@ met:
+@     * Redistributions of source code must retain the above copyright
+@       notice, this list of conditions and the following disclaimer.
+@     * Redistributions in binary form must reproduce the above
+@       copyright notice, this list of conditions and the following
+@       disclaimer in the documentation and/or other materials provided
+@       with the distribution.
+@     * Neither the name of The Linux Foundation nor the names of its
+@       contributors may be used to endorse or promote products derived
+@       from this software without specific prior written permission.
+@
+@ THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+@ WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+@ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+@ ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+@ BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+@ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+@ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+@ BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+@ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+@ OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+@ IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <private/bionic_asm.h>
+#include <private/libc_events.h>
+
+ENTRY(sqrtf)
+    vmov.f32    s0, r0
+    vsqrt.f32   s0, s0
+    vmov.f32    r0, s0
+    bx          lr
+END(sqrtf)
diff --git a/libm/arm/s_cos.S b/libm/arm/s_cos.S
new file mode 100644
index 0000000..5c8183b
--- /dev/null
+++ b/libm/arm/s_cos.S
@@ -0,0 +1,427 @@
+@ Copyright (c) 2012, The Linux Foundation. All rights reserved.
+@
+@ Redistribution and use in source and binary forms, with or without
+@ modification, are permitted provided that the following conditions are
+@ met:
+@    * Redistributions of source code must retain the above copyright
+@      notice, this list of conditions and the following disclaimer.
+@    * Redistributions in binary form must reproduce the above
+@      copyright notice, this list of conditions and the following
+@      disclaimer in the documentation and/or other materials provided
+@      with the distribution.
+@    * Neither the name of The Linux Foundation nor the names of its
+@      contributors may be used to endorse or promote products derived
+@      from this software without specific prior written permission.
+@
+@ THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+@ WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+@ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+@ ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+@ BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+@ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+@ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+@ BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+@ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+@ OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+@ IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+@
+@ Additional notices preserved for attributions purposes only.
+@
+@ ====================================================
+@ Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+@
+@ Developed at SunSoft, a Sun Microsystems, Inc. business.
+@ Permission to use, copy, modify, and distribute this
+@ software is freely granted, provided that this notice
+@ is preserved.
+@ ====================================================
+@
+@ ====================================================
+@ Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+@
+@ Developed at SunPro, a Sun Microsystems, Inc. business.
+@ Permission to use, copy, modify, and distribute this
+@ software is freely granted, provided that this notice
+@ is preserved.
+@ ====================================================
+
+#include <private/bionic_asm.h>
+#include <private/libc_events.h>
+
+#define vmov_f64 fconstd
+
+ENTRY(cos)
+    push            {r4, r6, r7, lr}
+    vmov            d0, r0, r1
+    mov             r2, r0
+    mov             r3, r1
+    movw            r1, #0x21fb
+    movt            r1, #0x3fe9
+    mov             r4, r3
+    bic             r3, r3, #0x80000000
+    sub             sp, sp, #48
+    cmp             r3, r1
+    bgt             .Lxgtpio4
+    cmp             r3, #0x3e400000
+    bge             .Lxnottiny
+    vcvt.s32.f64    s15, d0
+    vmov            r3, s15
+    cmp             r3, #0
+    beq             .Lreturnone
+.Lxnottiny:
+    vmov.i64        d1, #0
+    bl              __kernel_cos
+.Lleave_cos:
+    vmov            r0, r1, d0
+.Lleave_cos_direct:
+    add             sp, sp, #48
+    pop             {r4, r6, r7, pc}
+.Lxgtpio4:
+    movw            r2, #0xffff
+    movt            r2, #0x7fef
+    cmp             r3, r2
+    bgt             .LxisNaN
+    movw            r0, #0xd97b
+    movt            r0, #0x4002
+    cmp             r3, r0
+    movw            r2, #0x21fb
+    bgt             .Lxge3pio4
+    cmp             r4, #0
+    movt            r2, #0x3ff9
+    ble             .Lsmallxisnegative
+    vldr            d16, .Lpio2_1
+    cmp             r3, r2
+    vsub.f64        d16, d0, d16
+    beq             .Lxnearpio2
+    vldr            d17, .Lpio2_1t
+.Lfinalizesmallxremainder:
+    vsub.f64        d0, d16, d17
+    vsub.f64        d16, d16, d0
+    vstr            d0, [sp, #8]
+    vsub.f64        d1, d16, d17
+    vstr            d1, [sp, #16]
+.Lnmod3is1:
+    mov             r0, #1
+    bl              __kernel_sin
+    vneg.f64        d0, d0
+    b               .Lleave_cos
+.Lreturnone:
+    mov             r0, #0
+    movw            r1, #0x0000
+    movt            r1, #0x3ff0
+    vmov_f64        d0, #0x70
+    b               .Lleave_cos_direct
+.LxisNaN:
+    vsub.f64        d0, d0, d0
+    b               .Lleave_cos
+.Lxge3pio4:
+    movt            r2, #0x4139
+    cmp             r3, r2
+    bgt             .Lxgigantic
+    vmov_f64        d3, #0x60
+    vldr            d2, .Linvpio2
+    vldr            d18, .Lpio2_1
+    vabs.f64        d16, d0
+    vmla.f64        d3, d16, d2
+    vcvt.s32.f64    s3, d3
+    vcvt.f64.s32    d17, s3
+    vmov            r0, s3
+    cmp             r0, #31
+    vmls.f64        d16, d17, d18
+    vldr            d18, .Lpio2_1t
+    vmul.f64        d18, d17, d18
+    bgt             .Lcomputeremainder
+    ldr             r2, .Lnpio2_hw_ptr
+    sub             lr, r0, #1
+.LPICnpio2_hw0:
+    add             r12, pc, r2
+    ldr             r1, [r12, lr, lsl #2]
+    cmp             r3, r1
+    beq             .Lcomputeremainder
+.Lfinishthirditeration:
+    vsub.f64        d0, d16, d18
+    vstr            d0, [sp, #8]
+.Lfinishcomputingremainder:
+    vsub.f64        d16, d16, d0
+    cmp             r4, #0
+    vsub.f64        d1, d16, d18
+    vstr            d1, [sp, #16]
+    blt             .Lhandlenegativex
+.Lselectregion:
+    and             r0, r0, #3
+    cmp             r0, #1
+    beq             .Lnmod3is1
+    cmp             r0, #2
+    beq             .Lnmod3is2
+    cmp             r0, #0
+    bne             .Lnmod3is0
+    bl              __kernel_cos
+    b               .Lleave_cos
+.Lxgigantic:
+    asr             r2, r3, #20
+    vmov            r6, r7, d0
+    sub             r2, r2, #1040
+    mov             r0, r6
+    sub             r2, r2, #6
+    vldr            d16, .Ltwo24
+    sub             r1, r3, r2, lsl #20
+    vmov            d18, r0, r1
+    vcvt.s32.f64    s15, d18
+    add             r1, sp, #48
+    mov             r3, #3
+    vcvt.f64.s32    d17, s15
+    vsub.f64        d18, d18, d17
+    vstr            d17, [sp, #24]
+    vmul.f64        d18, d18, d16
+    vcvt.s32.f64    s15, d18
+    vcvt.f64.s32    d17, s15
+    vsub.f64        d18, d18, d17
+    vstr            d17, [sp, #32]
+    vmul.f64        d16, d18, d16
+    fcmpzd          d16
+    vstmdb          r1!, {d16}
+    vmrs            APSR_nzcv, fpscr
+    bne             .Lprocessnonzeroterm
+.Lskipzeroterms:
+    vldmdb          r1!, {d16}
+    sub             r3, r3, #1
+    fcmpzd          d16
+    vmrs            APSR_nzcv, fpscr
+    beq             .Lskipzeroterms
+.Lprocessnonzeroterm:
+    ldr             r12, .Ltwo_over_pi_ptr
+    add             r0, sp, #24
+    add             r1, sp, #8
+.LPICtwo_over_pi0:
+    add             lr, pc, r12
+    mov             r12, #2
+    str             lr, [sp, #4]
+    str             r12, [sp]
+    bl              __kernel_rem_pio2
+    cmp             r4, #0
+    vldr            d0, [sp, #8]
+    blt             .Lhandlenegativxalso
+    vldr            d1, [sp, #16]
+    b               .Lselectregion
+.Lxnearpio2:
+    vldr            d17, .Lpio2_2
+    vsub.f64        d16, d16, d17
+    vldr            d17, .Lpio2_2t
+    b               .Lfinalizesmallxremainder
+.Lsmallxisnegative:
+    vldr            d1, .Lpio2_1
+    cmp             r3, r2
+    vadd.f64        d16, d0, d1
+    beq             .Lxnearnegpio2
+    vldr            d17, .Lpio2_1t
+.Lfinalizesmallnegxremainder:
+    vadd.f64        d0, d16, d17
+    vsub.f64        d16, d16, d0
+    vstr            d0, [sp, #8]
+    vadd.f64        d1, d16, d17
+    vstr            d1, [sp, #16]
+.Lnmod3is0:
+    mov             r0, #1
+    bl              __kernel_sin
+    b               .Lleave_cos
+.Lnmod3is2:
+    bl              __kernel_cos
+    vneg.f64        d0, d0
+    b               .Lleave_cos
+.Lcomputeremainder:
+    vsub.f64        d0, d16, d18
+    asr             r1, r3, #20
+    vmov            r2, r3, d0
+    ubfx            r3, r3, #20, #11
+    rsb             r3, r3, r1
+    vstr            d0, [sp, #8]
+    cmp             r3, #16
+    ble             .Lfinishcomputingremainder
+    vldr            d18, .Lpio2_2
+    vmul.f64        d20, d17, d18
+    vsub.f64        d19, d16, d20
+    vsub.f64        d16, d16, d19
+    vsub.f64        d18, d16, d20
+    vldr            d16, .Lpio2_2t
+    vnmls.f64       d18, d17, d16
+    vsub.f64        d0, d19, d18
+    vmov            r2, r3, d0
+    ubfx            r3, r3, #20, #11
+    rsb             r1, r3, r1
+    vstr            d0, [sp, #8]
+    cmp             r1, #49
+    ble             .Lfinishseconditeration
+    vldr            d5, .Lpio2_3
+    vmul.f64        d20, d17, d5
+    vsub.f64        d16, d19, d20
+    vsub.f64        d4, d19, d16
+    vldr            d19, .Lpio2_3t
+    vsub.f64        d18, d4, d20
+    vnmls.f64       d18, d17, d19
+    b               .Lfinishthirditeration
+.Lhandlenegativex:
+    vneg.f64        d0, d0
+    rsb             r0, r0, #0
+    vneg.f64        d1, d1
+    vstr            d0, [sp, #8]
+    vstr            d1, [sp, #16]
+    b               .Lselectregion
+.Lfinishseconditeration:
+    vmov            d16, d19
+    b               .Lfinishcomputingremainder
+.Lxnearnegpio2:
+    vldr            d0, .Lpio2_2
+    vldr            d17, .Lpio2_2t
+    vadd.f64        d16, d16, d0
+    b               .Lfinalizesmallnegxremainder
+.Lhandlenegativxalso:
+    vldr            d6, [sp, #16]
+    vneg.f64        d0, d0
+    rsb             r0, r0, #0
+    vneg.f64        d1, d6
+    vstr            d0, [sp, #8]
+    vstr            d1, [sp, #16]
+    b               .Lselectregion
+
+.align 3
+.Lpio2_1:
+    .word           0x54400000, 0x3ff921fb
+.Lpio2_1t:
+    .word           0x1a626331, 0x3dd0b461
+.Linvpio2:
+    .word           0x6dc9c883, 0x3fe45f30
+.Ltwo24:
+    .word           0x00000000, 0x41700000
+.Lpio2_2:
+    .word           0x1a600000, 0x3dd0b461
+.Lpio2_2t:
+    .word           0x2e037073, 0x3ba3198a
+.Lpio2_3:
+    .word           0x2e000000, 0x3ba3198a
+.Lpio2_3t:
+    .word           0x252049c1, 0x397b839a
+.Lnpio2_hw_ptr:
+    .word           .Lnpio2_hw-(.LPICnpio2_hw0+8)
+.Ltwo_over_pi_ptr:
+    .word           .Ltwo_over_pi-(.LPICtwo_over_pi0+8)
+END(cos)
+
+#if (__LDBL_MANT_DIG__ == 53)
+.weak cosl
+.equ  cosl, cos
+#else
+ENTRY(cosl)
+END(cosl)
+#endif
+
+    .section        .rodata.npio2_hw,"a",%progbits
+    .align          2
+.Lnpio2_hw = . + 0
+    .type           npio2_hw, %object
+    .size           npio2_hw, 128
+npio2_hw:
+    .word           0x3ff921fb
+    .word           0x400921fb
+    .word           0x4012d97c
+    .word           0x401921fb
+    .word           0x401f6a7a
+    .word           0x4022d97c
+    .word           0x4025fdbb
+    .word           0x402921fb
+    .word           0x402c463a
+    .word           0x402f6a7a
+    .word           0x4031475c
+    .word           0x4032d97c
+    .word           0x40346b9c
+    .word           0x4035fdbb
+    .word           0x40378fdb
+    .word           0x403921fb
+    .word           0x403ab41b
+    .word           0x403c463a
+    .word           0x403dd85a
+    .word           0x403f6a7a
+    .word           0x40407e4c
+    .word           0x4041475c
+    .word           0x4042106c
+    .word           0x4042d97c
+    .word           0x4043a28c
+    .word           0x40446b9c
+    .word           0x404534ac
+    .word           0x4045fdbb
+    .word           0x4046c6cb
+    .word           0x40478fdb
+    .word           0x404858eb
+    .word           0x404921fb
+
+    .section        .rodata.two_over_pi,"a",%progbits
+    .align          2
+.Ltwo_over_pi = . + 0
+    .type           two_over_pi, %object
+    .size           two_over_pi, 264
+two_over_pi:
+    .word           0x00a2f983
+    .word           0x006e4e44
+    .word           0x001529fc
+    .word           0x002757d1
+    .word           0x00f534dd
+    .word           0x00c0db62
+    .word           0x0095993c
+    .word           0x00439041
+    .word           0x00fe5163
+    .word           0x00abdebb
+    .word           0x00c561b7
+    .word           0x00246e3a
+    .word           0x00424dd2
+    .word           0x00e00649
+    .word           0x002eea09
+    .word           0x00d1921c
+    .word           0x00fe1deb
+    .word           0x001cb129
+    .word           0x00a73ee8
+    .word           0x008235f5
+    .word           0x002ebb44
+    .word           0x0084e99c
+    .word           0x007026b4
+    .word           0x005f7e41
+    .word           0x003991d6
+    .word           0x00398353
+    .word           0x0039f49c
+    .word           0x00845f8b
+    .word           0x00bdf928
+    .word           0x003b1ff8
+    .word           0x0097ffde
+    .word           0x0005980f
+    .word           0x00ef2f11
+    .word           0x008b5a0a
+    .word           0x006d1f6d
+    .word           0x00367ecf
+    .word           0x0027cb09
+    .word           0x00b74f46
+    .word           0x003f669e
+    .word           0x005fea2d
+    .word           0x007527ba
+    .word           0x00c7ebe5
+    .word           0x00f17b3d
+    .word           0x000739f7
+    .word           0x008a5292
+    .word           0x00ea6bfb
+    .word           0x005fb11f
+    .word           0x008d5d08
+    .word           0x00560330
+    .word           0x0046fc7b
+    .word           0x006babf0
+    .word           0x00cfbc20
+    .word           0x009af436
+    .word           0x001da9e3
+    .word           0x0091615e
+    .word           0x00e61b08
+    .word           0x00659985
+    .word           0x005f14a0
+    .word           0x0068408d
+    .word           0x00ffd880
+    .word           0x004d7327
+    .word           0x00310606
+    .word           0x001556ca
+    .word           0x0073a8c9
+    .word           0x0060e27b
+    .word           0x00c08c6b
diff --git a/libm/arm/s_sin.S b/libm/arm/s_sin.S
new file mode 100644
index 0000000..8b9eae4
--- /dev/null
+++ b/libm/arm/s_sin.S
@@ -0,0 +1,422 @@
+@ Copyright (c) 2012, The Linux Foundation. All rights reserved.
+@
+@ Redistribution and use in source and binary forms, with or without
+@ modification, are permitted provided that the following conditions are
+@ met:
+@    * Redistributions of source code must retain the above copyright
+@      notice, this list of conditions and the following disclaimer.
+@    * Redistributions in binary form must reproduce the above
+@      copyright notice, this list of conditions and the following
+@      disclaimer in the documentation and/or other materials provided
+@      with the distribution.
+@    * Neither the name of The Linux Foundation nor the names of its
+@      contributors may be used to endorse or promote products derived
+@      from this software without specific prior written permission.
+@
+@ THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+@ WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+@ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+@ ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+@ BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+@ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+@ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+@ BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+@ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+@ OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+@ IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+@
+@ Additional notices preserved for attributions purposes only.
+@
+@ ====================================================
+@ Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+@
+@ Developed at SunSoft, a Sun Microsystems, Inc. business.
+@ Permission to use, copy, modify, and distribute this
+@ software is freely granted, provided that this notice
+@ is preserved.
+@ ====================================================
+@
+@ ====================================================
+@ Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+@
+@ Developed at SunPro, a Sun Microsystems, Inc. business.
+@ Permission to use, copy, modify, and distribute this
+@ software is freely granted, provided that this notice
+@ is preserved.
+@ ====================================================
+
+#include <private/bionic_asm.h>
+#include <private/libc_events.h>
+
+#define vmov_f64 fconstd
+
+ENTRY(sin)
+    push            {r4, r6, r7, lr}
+    vmov            d0, r0, r1
+    mov             r2, r0
+    mov             r3, r1
+    movw            r1, #0x21fb
+    movt            r1, #0x3fe9
+    mov             r4, r3
+    bic             r3, r3, #0x80000000
+    sub             sp, sp, #48
+    cmp             r3, r1
+    bgt             .Lxgtpio4
+    cmp             r3, #0x3e400000
+    bge             .Lxnottiny
+    vcvt.s32.f64    s15, d0
+    vmov            r3, s15
+    cmp             r3, #0
+    bne             .Lxnottiny
+.Lleave_sin:
+    vmov            r0, r1, d0
+    add             sp, sp, #48
+    pop             {r4, r6, r7, pc}
+.Lxgtpio4:
+    movw            r2, #0xffff
+    movt            r2, #0x7fef
+    cmp             r3, r2
+    bgt             .LxisNaN
+    movw            r0, #0xd97b
+    movt            r0, #0x4002
+    cmp             r3, r0
+    movw            r2, #0x21fb
+    bgt             .Lxge3pio4
+    cmp             r4, #0
+    movt            r2, #0x3ff9
+    ble             .Lsmallxisnegative
+    vldr            d16, .Lpio2_1
+    cmp             r3, r2
+    vsub.f64        d16, d0, d16
+    beq             .Lxnearpio2
+    vldr            d17, .Lpio2_1t
+.Lfinalizesmallxremainder:
+    vsub.f64        d0, d16, d17
+    vsub.f64        d16, d16, d0
+    vstr            d0, [sp, #8]
+    vsub.f64        d1, d16, d17
+    vstr            d1, [sp, #16]
+.Lnmod3is1:
+    bl              __kernel_cos
+    b               .Lleave_sin
+.Lxnottiny:
+    vmov.i64        d1, #0
+    mov             r0, #0
+    bl              __kernel_sin
+    b               .Lleave_sin
+.LxisNaN:
+    vsub.f64        d0, d0, d0
+    b               .Lleave_sin
+.Lxge3pio4:
+    movt            r2, #0x4139
+    cmp             r3, r2
+    bgt             .Lxgigantic
+    vmov_f64        d3, #0x60
+    vldr            d2, .Linvpio2
+    vldr            d18, .Lpio2_1
+    vabs.f64        d16, d0
+    vmla.f64        d3, d16, d2
+    vcvt.s32.f64    s3, d3
+    vcvt.f64.s32    d17, s3
+    vmov            r0, s3
+    cmp             r0, #31
+    vmls.f64        d16, d17, d18
+    vldr            d18, .Lpio2_1t
+    vmul.f64        d18, d17, d18
+    bgt             .Lcomputeremainder
+    ldr             r2, .Lnpio2_hw_ptr
+    sub             lr, r0, #1
+.LPICnpio2_hw0:
+    add             r12, pc, r2
+    ldr             r1, [r12, lr, lsl #2]
+    cmp             r3, r1
+    beq             .Lcomputeremainder
+.Lfinishthirditeration:
+    vsub.f64        d0, d16, d18
+    vstr            d0, [sp, #8]
+.Lfinishcomputingremainder:
+    vsub.f64        d16, d16, d0
+    cmp             r4, #0
+    vsub.f64        d1, d16, d18
+    vstr            d1, [sp, #16]
+    blt             .Lhandlenegativex
+.Lselectregion:
+    and             r0, r0, #3
+    cmp             r0, #1
+    beq             .Lnmod3is1
+    cmp             r0, #2
+    beq             .Lnmod3is2
+    cmp             r0, #0
+    bne             .Lnmod3is0
+    mov             r0, #1
+    bl              __kernel_sin
+    b               .Lleave_sin
+.Lxgigantic:
+    asr             r2, r3, #20
+    vmov            r6, r7, d0
+    sub             r2, r2, #1040
+    mov             r0, r6
+    sub             r2, r2, #6
+    vldr            d16, .Ltwo24
+    sub             r1, r3, r2, lsl #20
+    vmov            d18, r0, r1
+    vcvt.s32.f64    s15, d18
+    add             r1, sp, #48
+    mov             r3, #3
+    vcvt.f64.s32    d17, s15
+    vsub.f64        d18, d18, d17
+    vstr            d17, [sp, #24]
+    vmul.f64        d18, d18, d16
+    vcvt.s32.f64    s15, d18
+    vcvt.f64.s32    d17, s15
+    vsub.f64        d18, d18, d17
+    vstr            d17, [sp, #32]
+    vmul.f64        d16, d18, d16
+    fcmpzd          d16
+    vstmdb          r1!, {d16}
+    vmrs            APSR_nzcv, fpscr
+    bne             .Lprocessnonzeroterm
+.Lskipzeroterms:
+    vldmdb          r1!, {d16}
+    sub             r3, r3, #1
+    fcmpzd          d16
+    vmrs            APSR_nzcv, fpscr
+    beq             .Lskipzeroterms
+.Lprocessnonzeroterm:
+    ldr             r12, .Ltwo_over_pi_ptr
+    add             r0, sp, #24
+    add             r1, sp, #8
+.LPICtwo_over_pi0:
+    add             lr, pc, r12
+    mov             r12, #2
+    str             lr, [sp, #4]
+    str             r12, [sp]
+    bl              __kernel_rem_pio2
+    cmp             r4, #0
+    vldr            d0, [sp, #8]
+    blt             .Lhandlenegativexalso
+    vldr            d1, [sp, #16]
+    b               .Lselectregion
+.Lxnearpio2:
+    vldr            d17, .Lpio2_2
+    vsub.f64        d16, d16, d17
+    vldr            d17, .Lpio2_2t
+    b               .Lfinalizesmallxremainder
+.Lsmallxisnegative:
+    vldr            d1, .Lpio2_1
+    cmp             r3, r2
+    vadd.f64        d16, d0, d1
+    beq             .Lxnearnegpio2
+    vldr            d17, .Lpio2_1t
+.Lfinalizesmallnegxremainder:
+    vadd.f64        d0, d16, d17
+    vsub.f64        d16, d16, d0
+    vstr            d0, [sp, #8]
+    vadd.f64        d1, d16, d17
+    vstr            d1, [sp, #16]
+.Lnmod3is0:
+    bl              __kernel_cos
+    vneg.f64        d0, d0
+    b               .Lleave_sin
+.Lnmod3is2:
+    mov             r0, #1
+    bl              __kernel_sin
+    vneg.f64        d0, d0
+    b               .Lleave_sin
+.Lcomputeremainder:
+    vsub.f64        d0, d16, d18
+    asr             r1, r3, #20
+    vmov            r2, r3, d0
+    ubfx            r3, r3, #20, #11
+    rsb             r3, r3, r1
+    vstr            d0, [sp, #8]
+    cmp             r3, #16
+    ble             .Lfinishcomputingremainder
+    vldr            d18, .Lpio2_2
+    vmul.f64        d20, d17, d18
+    vsub.f64        d19, d16, d20
+    vsub.f64        d16, d16, d19
+    vsub.f64        d18, d16, d20
+    vldr            d16, .Lpio2_2t
+    vnmls.f64       d18, d17, d16
+    vsub.f64        d0, d19, d18
+    vmov            r2, r3, d0
+    ubfx            r3, r3, #20, #11
+    rsb             r1, r3, r1
+    vstr            d0, [sp, #8]
+    cmp             r1, #49
+    ble             .Lfinishseconditeration
+    vldr            d5, .Lpio2_3
+    vmul.f64        d20, d17, d5
+    vsub.f64        d16, d19, d20
+    vsub.f64        d4, d19, d16
+    vldr            d19, .Lpio2_3t
+    vsub.f64        d18, d4, d20
+    vnmls.f64       d18, d17, d19
+    b               .Lfinishthirditeration
+.Lhandlenegativex:
+    vneg.f64        d0, d0
+    rsb             r0, r0, #0
+    vneg.f64        d1, d1
+    vstr            d0, [sp, #8]
+    vstr            d1, [sp, #16]
+    b               .Lselectregion
+.Lfinishseconditeration:
+    vmov            d16, d19
+    b               .Lfinishcomputingremainder
+.Lxnearnegpio2:
+    vldr            d0, .Lpio2_2
+    vldr            d17, .Lpio2_2t
+    vadd.f64        d16, d16, d0
+    b               .Lfinalizesmallnegxremainder
+.Lhandlenegativexalso:
+    vldr            d6, [sp, #16]
+    vneg.f64        d0, d0
+    rsb             r0, r0, #0
+    vneg.f64        d1, d6
+    vstr            d0, [sp, #8]
+    vstr            d1, [sp, #16]
+    b               .Lselectregion
+
+.align 3
+.Lpio2_1:
+    .word           0x54400000, 0x3ff921fb
+.Lpio2_1t:
+    .word           0x1a626331, 0x3dd0b461
+.Linvpio2:
+    .word           0x6dc9c883, 0x3fe45f30
+.Ltwo24:
+    .word           0x00000000, 0x41700000
+.Lpio2_2:
+    .word           0x1a600000, 0x3dd0b461
+.Lpio2_2t:
+    .word           0x2e037073, 0x3ba3198a
+.Lpio2_3:
+    .word           0x2e000000, 0x3ba3198a
+.Lpio2_3t:
+    .word           0x252049c1, 0x397b839a
+.Lnpio2_hw_ptr:
+    .word           .Lnpio2_hw-(.LPICnpio2_hw0+8)
+.Ltwo_over_pi_ptr:
+    .word           .Ltwo_over_pi-(.LPICtwo_over_pi0+8)
+END(sin)
+
+#if (__LDBL_MANT_DIG__ == 53)
+.weak sinl
+.equ  sinl, sin
+#else
+ENTRY(sinl)
+END(sinl)
+#endif
+
+    .section        .rodata.npio2_hw,"a",%progbits
+    .align          2
+.Lnpio2_hw = . + 0
+    .type           npio2_hw, %object
+    .size           npio2_hw, 128
+npio2_hw:
+    .word           0x3ff921fb
+    .word           0x400921fb
+    .word           0x4012d97c
+    .word           0x401921fb
+    .word           0x401f6a7a
+    .word           0x4022d97c
+    .word           0x4025fdbb
+    .word           0x402921fb
+    .word           0x402c463a
+    .word           0x402f6a7a
+    .word           0x4031475c
+    .word           0x4032d97c
+    .word           0x40346b9c
+    .word           0x4035fdbb
+    .word           0x40378fdb
+    .word           0x403921fb
+    .word           0x403ab41b
+    .word           0x403c463a
+    .word           0x403dd85a
+    .word           0x403f6a7a
+    .word           0x40407e4c
+    .word           0x4041475c
+    .word           0x4042106c
+    .word           0x4042d97c
+    .word           0x4043a28c
+    .word           0x40446b9c
+    .word           0x404534ac
+    .word           0x4045fdbb
+    .word           0x4046c6cb
+    .word           0x40478fdb
+    .word           0x404858eb
+    .word           0x404921fb
+
+    .section        .rodata.two_over_pi,"a",%progbits
+    .align          2
+.Ltwo_over_pi = . + 0
+    .type           two_over_pi, %object
+    .size           two_over_pi, 264
+two_over_pi:
+    .word           0x00a2f983
+    .word           0x006e4e44
+    .word           0x001529fc
+    .word           0x002757d1
+    .word           0x00f534dd
+    .word           0x00c0db62
+    .word           0x0095993c
+    .word           0x00439041
+    .word           0x00fe5163
+    .word           0x00abdebb
+    .word           0x00c561b7
+    .word           0x00246e3a
+    .word           0x00424dd2
+    .word           0x00e00649
+    .word           0x002eea09
+    .word           0x00d1921c
+    .word           0x00fe1deb
+    .word           0x001cb129
+    .word           0x00a73ee8
+    .word           0x008235f5
+    .word           0x002ebb44
+    .word           0x0084e99c
+    .word           0x007026b4
+    .word           0x005f7e41
+    .word           0x003991d6
+    .word           0x00398353
+    .word           0x0039f49c
+    .word           0x00845f8b
+    .word           0x00bdf928
+    .word           0x003b1ff8
+    .word           0x0097ffde
+    .word           0x0005980f
+    .word           0x00ef2f11
+    .word           0x008b5a0a
+    .word           0x006d1f6d
+    .word           0x00367ecf
+    .word           0x0027cb09
+    .word           0x00b74f46
+    .word           0x003f669e
+    .word           0x005fea2d
+    .word           0x007527ba
+    .word           0x00c7ebe5
+    .word           0x00f17b3d
+    .word           0x000739f7
+    .word           0x008a5292
+    .word           0x00ea6bfb
+    .word           0x005fb11f
+    .word           0x008d5d08
+    .word           0x00560330
+    .word           0x0046fc7b
+    .word           0x006babf0
+    .word           0x00cfbc20
+    .word           0x009af436
+    .word           0x001da9e3
+    .word           0x0091615e
+    .word           0x00e61b08
+    .word           0x00659985
+    .word           0x005f14a0
+    .word           0x0068408d
+    .word           0x00ffd880
+    .word           0x004d7327
+    .word           0x00310606
+    .word           0x001556ca
+    .word           0x0073a8c9
+    .word           0x0060e27b
+    .word           0x00c08c6b
diff --git a/libm/arm64/e_pow64.S b/libm/arm64/e_pow64.S
new file mode 100644
index 0000000..2a30c4a
--- /dev/null
+++ b/libm/arm64/e_pow64.S
@@ -0,0 +1,450 @@
+/* Copyright (c) 2009-2014 The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its contributors may
+ *       be used to endorse or promote products derived from this software
+ *       without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <private/bionic_asm.h>
+//#define NO_FUSED_MULTIPLY
+
+#define KRAIT_NO_AAPCS_VFP_MODE
+
+ENTRY(pow)
+#if defined(KRAIT_NO_AAPCS_VFP_MODE)
+	// ARM ABI has inputs coming in via d registers, lets copy to x registers
+	fmov	x0, d0
+	fmov	x1, d1
+#endif
+	mov w12, #0x40100000    // high word of 64-bit 4.0
+
+	// pre-staged bp values
+	ldr	d5, .LbpA
+	ldr	d3, .LbpB
+
+	// load two fifths into constant term in case we need it due to offsets
+	ldr	d6, .Ltwofifths
+
+	// bp is initially 1.0, may adjust later based on x value
+	fmov	d4,  #1.0
+
+	// twoto1o5 = 2^(1/5) (input bracketing)
+	ldr	x4, .Ltwoto1o5
+
+	// twoto3o5 = 2^(3/5) (input bracketing)
+	ldr	x5, .Ltwoto3o5
+
+	// extract xmantissa
+	bic	x6, x0, #0xFFF0000000000000
+
+	// begin preparing a mask for normalization (high 32-bit mask)
+	movi	d31, #0xFFFFFFFF00000000
+
+	// double_1 = (double) 1.0
+	fmov	d28, #1.0
+
+	cmp	x6, x4
+
+	shl	d30, d31, #20	// d30 can mask just sign/exp bits
+	ushr	d29, d31, #63	// mask has only bit 1 set
+
+	adr	x10, .LliteralTable		// x10->k4 in literal table (below)
+
+	bhi	.Lxgt2to1over5
+	// zero out lg2 constant term if don't offset our input
+	fsub	d6, d6, d6
+	b	.Lxle2to1over5
+
+.Lxgt2to1over5:
+	// if normalized x > 2^(1/5), bp = 1 + (2^(2/5)-1) = 2^(2/5)
+	fadd	d4, d4, d5
+
+.Lxle2to1over5:
+	ldr	d5, .Lln2			// d5 = ln2 = 0.69314718056
+
+	cmp	x6, x5				// non-normalized compare
+
+//@@@ X Value Normalization @@@@
+
+	// ss = abs(x) 2^(-1024)
+	bic	v16.8B, v0.8B, v30.8B		// mantissa of x into v16.8B (aka d16)
+
+	// N = (floor(log2(x)) + 0x3ff) * 2^52
+	and	v2.8B, v0.8B, v30.8B		// exponent of x (d0) into v2.8B aka d2
+
+	bls	.Lxle2to3over5			// branch not taken if (x6 > x5)
+	// if normalized x > 2^(3/5), bp = 2^(2/5) + (2^(4/5) - 2^(2/5)) = 2^(4/5)
+	fadd	d4, d4, d3			// d4 = 2^(2/5) + (2^(4/5) - 2^(2/5)) = 2^(4/5)
+	fadd	d6, d6, d6			// d6 = 2/5 + 2/5 = 4/5 or 0 (see logic above)
+
+.Lxle2to3over5:
+
+	lsr	x2, x0, #32			// Need just high word of x...x2 can take it
+	cmp	w2, w12				// Compare x to 4.0 (high word only)
+	lsr	x3, x1, #32			// Need just high word of y...x3 can take it.
+	ccmp	w3, w12, #2, ls			// If x < 4.0, compare y to 4.0 (high word)
+	bic	w12, w12, #0xFFFF0000		// Change w12 for compare to 0.0000325
+	orr	w12, w12, #0x3fe00000
+	ccmp	w12, w2, #2, ls			// If y < 4, compare 0.5 to x
+
+	// load log2 polynomial series constants
+	ldp	d24, d25, [x10, #0]
+	ldp	d26, d27, [x10, #16]
+
+	// s = abs(x) 2^(-floor(log2(x))) (normalize abs(x) to around 1)
+	orr	v16.8B, v16.8B, v28.8B
+
+//@@@ 3/2 (Log(bp(1+s)/(1-s))) input computation (s = (x-bp)/(x+bp)) @@@@
+
+	fsub	d19, d16, d4		// take normalized x and subtract 2^(4/5) from it
+	fadd	d20, d16, d4
+	bhi	.LuseFullImpl			// |x| < 0.5 or x > 4 or y > 4
+
+	// s = (x-1)/(x+1)
+	fdiv	d16, d19, d20
+
+	// load 2/(3log2) into lg2coeff
+	ldr	d21, .Ltwooverthreeln2
+
+	// N = floor(log2(x)) * 2^52
+	sub	d2, d2, d28
+
+//@@@ 3/2 (Log(bp(1+s)/(1-s))) polynomial series @@@@
+
+	// ss2 = ((x-bp)/(x+bp))^2
+	fmul	d17, d16, d16
+
+	// ylg2x = 3.0
+	fmov	d0, #3.0
+	fmul	d18, d17, d17
+
+	// todo: useful later for two-way clamp
+	fmul	d21, d21, d1
+
+	// N = floor(log2(x))
+	sshr	d2, d2, #52
+	// k3 = ss^2 * L4 + L3
+#ifdef NO_FUSED_MULTIPLY
+	fmul	d3, d17, v24.2D[0]
+	fadd	d25, d25, d3
+
+	// k1 = ss^2 * L2 + L1
+	fmul	d3, d17, v26.2D[0]
+	fadd	d27, d27, d3
+#else
+	fmla	d25, d17, v24.2D[0]
+
+	// k1 = ss^2 * L2 + L1
+	fmla	d27, d17, v26.2D[0]
+#endif
+
+	// scale ss by 2/(3 ln 2)
+	fmul	d21, d16, d21
+
+	// ylg2x = 3.0 + s^2
+	fadd	d0, d0, d17
+
+	fmov	x2, d2
+	scvtf	d3, w2		// Low-order 32-bit integer half of d2 to fp64
+
+	// k1 = s^4 (s^2 L4 + L3) + s^2 L2 + L1
+#ifdef NO_FUSED_MULTIPLY
+	fmul	d31, d18, v25.2D[0]
+	fadd	d27, d27, d31
+#else
+	fmla	d27, d18, v25.2D[0]
+#endif
+	// add in constant term
+	fadd	d3, d3, d6
+
+	// ylg2x = 3.0 + s^2 + s^4 (s^4 (s^2 L4 + L3) + s^2 L2 + L1)
+#ifdef NO_FUSED_MULTIPLY
+	fmul	d31, d18, v27.2D[0]
+	fadd	d0, d0, d31
+#else
+	fmla	d0, d18, v27.2D[0]
+#endif
+	// ylg2x = y 2 s / (3 ln(2)) (3.0 + s^2 + s^4 (s^4(s^2 L4 + L3) + s^2 L2 + L1)
+	fmul	d0, d21, d0
+
+//@@@ Compute input to Exp(s) (s = y(n + log2(x)) - (floor(8 yn + 1)/8 + floor(8 ylog2(x) + 1)/8) @@@@@
+
+	// mask to extract bit 1 (2^-2 from our fixed-point representation)
+	shl	d4, d29, #1
+
+	// double_n = y * n
+	fmul	d3, d3, d1
+
+	// Load 2^(1/4) for later computations
+	ldr	d6, .Ltwoto1o4
+
+	// either add or subtract one based on the sign of double_n and ylg2x
+	sshr	d16, d0, #62
+	sshr	d19, d3, #62
+
+	// move unmodified y*lg2x into temp space
+	fmov	d17, d0
+
+	// compute floor(8 y * n + 1)/8
+	// and floor(8 y (log2(x)) + 1)/8
+	fcvtzs	w2, d0, #3	// no instruction exists to use s0 as a direct target
+	fmov	s0, w2		// run our conversion into w2, then mov it to compensate
+	// move unmodified y*n into temp space
+	fmov	d18, d3
+	fcvtzs	w2, d3, #3
+	fmov	s3, w2
+
+	// load exp polynomial series constants
+	ldp	d20, d21, [x10, #32]
+	ldp	d22, d23, [x10, #48]
+	ldp	d24, d25, [x10, #64]
+	ldp	d26, d27, [x10, #80]
+
+	// mask to extract bit 2 (2^-1 from our fixed-point representation)
+	shl	d1, d29, #2
+
+	// make rounding offsets either 1 or -1 instead of 0 or -2
+	orr	v16.8B, v16.8B, v29.8B
+	orr	v19.8B, v19.8B, v29.8B
+
+	// round up to the nearest 1/8th
+	add	d0, d0, d16
+	add	d3, d3, d19
+
+	// clear out round-up bit for y log2(x)
+	bic	v0.8B, v0.8B, v29.8B
+	// clear out round-up bit for yn
+	bic	v3.8B, v3.8B, v29.8B
+	// add together the (fixed precision) rounded parts
+	add	d31, d3, d0
+	// turn int_n into a double with value 2^int_n
+	shl	d2, d31, #49
+	// compute masks for 2^(1/4) and 2^(1/2) fixups for fractional part of fixed-precision rounded values:
+	and	v4.8B, v4.8B, v31.8B
+	and	v1.8B, v1.8B, v31.8B
+
+	// convert back into floating point, d3 now holds (double) floor(8 y * n + 1)/8
+	//                                   d0 now holds (double) floor(8 y * log2(x) + 1)/8
+	fmov	w2, s0
+	scvtf	d0, w2, #3
+	fmov	w2, s3
+	scvtf	d3, w2, #3
+
+	// put the 2 bit (0.5) through the roof of twoto1o2mask (make it 0x0 or 0xffffffffffffffff)
+	uqshl	d1, d1, #62
+
+	// put the 1 bit (0.25) through the roof of twoto1o4mask (make it 0x0 or 0xffffffffffffffff)
+	uqshl	d4, d4, #63
+
+	// center y*log2(x) fractional part between -0.125 and 0.125 by subtracting (double) floor(8 y * log2(x) + 1)/8
+	fsub	d17, d17, d0
+	// center y*n fractional part between -0.125 and 0.125 by subtracting (double) floor(8 y * n + 1)/8
+	fsub	d18, d18, d3
+
+	// Add fractional parts of yn and y log2(x) together
+	fadd	d16, d17, d18
+
+	// Result = 1.0 (offset for exp(s) series)
+	fmov	d0, #1.0
+
+	// multiply fractional part of y * log2(x) by ln(2)
+	fmul	d16, d5, d16
+
+//@@@ 10th order polynomial series for Exp(s) @@@@
+
+	// ss2 = (ss)^2
+	fmul	d17, d16, d16
+
+	// twoto1o2mask = twoto1o2mask & twoto1o4
+	and	v1.8B, v1.8B, v6.8B
+	// twoto1o2mask = twoto1o2mask & twoto1o4
+	and	v4.8B, v4.8B, v6.8B
+
+	// Result = 1.0 + ss
+	fadd	d0, d0, d16
+
+	// k7 = ss k8 + k7
+#ifdef NO_FUSED_MULTIPLY
+	fmul	d31, d16, v20.2D[0]
+	fadd	d21, d21, d31
+#else
+	fmla	d21, d16, v20.2D[0]
+#endif
+	// ss4 = (ss*ss) * (ss*ss)
+	fmul	d18, d17, d17
+
+	// twoto1o2mask = twoto1o2mask | (double) 1.0 - results in either 1.0 or 2^(1/4) in twoto1o2mask
+	orr	v1.8B, v1.8B, v28.8B
+	// twoto1o2mask = twoto1o4mask | (double) 1.0 - results in either 1.0 or 2^(1/4) in twoto1o4mask
+	orr	v4.8B, v4.8B, v28.8B
+
+	// sign could be set up here, but for now expadjustment = 1.0
+	fmov	d7, #1.0
+
+	// ss3 = (ss*ss) * ss
+	fmul	d19, d17, d16
+
+	// k0 = 1/2 (first non-unity coefficient)
+	fmov	d28, #0.5
+
+	// Mask out non-exponent bits to make sure we have just 2^int_n
+	and	v2.8B, v2.8B, v30.8B
+
+	// square twoto1o2mask to get 1.0 or 2^(1/2)
+	fmul	d1, d1, d1
+
+	// multiply twoto2o4mask into the exponent output adjustment value
+	fmul	d7, d7, d4
+
+#ifdef NO_FUSED_MULTIPLY
+	// k5 = ss k6 + k5
+	fmul	d31, d16, v22.2D[0]
+	fadd	d23, d23, d31
+
+	// k3 = ss k4 + k3
+	fmul	d31, d16, v24.2D[0]
+	fadd	d25, d25, d31
+
+	// k1 = ss k2 + k1
+	fmul	d31, d16, v26.2D[0]
+	fadd	d27, d27, d31
+#else
+	// k5 = ss k6 + k5
+	fmla	d23, d16, v22.2D[0]
+
+	// k3 = ss k4 + k3
+	fmla	d25, d16, v24.2D[0]
+
+	// k1 = ss k2 + k1
+	fmla	d27, d16, v26.2D[0]
+#endif
+	// multiply twoto1o2mask into exponent output adjustment value
+	fmul	d7, d7, d1
+#ifdef NO_FUSED_MULTIPLY
+	// k5 = ss^2 ( ss k8 + k7 ) + ss k6 + k5
+	fmul	d31, d17, v21.2D[0]
+	fadd	d23, d23, d31
+
+	// k1 = ss^2 ( ss k4 + k3 ) + ss k2 + k1
+	fmul	d31, d17, v25.2D[0]
+	fadd	d27, d27, d31
+
+	// Result = 1.0 + ss + 1/2 ss^2
+	fmul	d31, d17, v28.2D[0]
+	fadd	d0, d0, d31
+#else
+	// k5 = ss^2 ( ss k8 + k7 ) + ss k6 + k5
+	fmla	d23, d17, v21.2D[0]
+
+	// k1 = ss^2 ( ss k4 + k3 ) + ss k2 + k1
+	fmla	d27, d17, v25.2D[0]
+
+	// Result = 1.0 + ss + 1/2 ss^2
+	fmla	d0, d17, v28.2D[0]
+#endif
+	// Adjust int_n so that it's a double precision value that can be multiplied by Result
+	add	d7, d2, d7
+#ifdef NO_FUSED_MULTIPLY
+	// k1 = ss^4 ( ss^2 ( ss k8 + k7 ) + ss k6 + k5 ) + ss^2 ( ss k4 + k3 ) + ss k2 + k1
+	fmul	d31, d18, v23.2D[0]
+	fadd	d27, d27, d31
+
+	// Result = 1.0 + ss + 1/2 ss^2 + ss^3 ( ss^4 ( ss^2 ( ss k8 + k7 ) + ss k6 + k5 ) + ss^2 ( ss k4 + k3 ) + ss k2 + k1 )
+	fmul	d31, d19, v27.2D[0]
+	fadd	d0, d0, d31
+#else
+	// k1 = ss^4 ( ss^2 ( ss k8 + k7 ) + ss k6 + k5 ) + ss^2 ( ss k4 + k3 ) + ss k2 + k1
+	fmla	d27, d18, v23.2D[0]
+
+	// Result = 1.0 + ss + 1/2 ss^2 + ss^3 ( ss^4 ( ss^2 ( ss k8 + k7 ) + ss k6 + k5 ) + ss^2 ( ss k4 + k3 ) + ss k2 + k1 )
+	fmla	d0, d19, v27.2D[0]
+#endif
+	// multiply by adjustment (sign*(rounding ? sqrt(2) : 1) * 2^int_n)
+	fmul	d0, d7, d0
+
+.LleavePow:
+#if defined(KRAIT_NO_AAPCS_VFP_MODE)
+	// return Result (FP)
+	// fmov	x0, d0
+#endif
+.LleavePowDirect:
+	// leave directly returning whatever is in d0
+	ret
+.LuseFullImpl:
+	fmov	d0, x0
+	fmov	d1, x1
+	b	__full_ieee754_pow
+
+.align 6
+.LliteralTable:
+// Least-sqares tuned constants for 11th order (log2((1+s)/(1-s)):
+.LL4: // ~3/11
+    .long       0x53a79915, 0x3fd1b108
+.LL3: // ~1/3
+    .long       0x9ca0567a, 0x3fd554fa
+.LL2: // ~3/7
+    .long       0x1408e660, 0x3fdb6db7
+.LL1: // ~3/5
+    .long       0x332D4313, 0x3fe33333
+
+// Least-squares tuned constants for 10th order exp(s):
+.LE10: // ~1/3628800
+    .long       0x25c7ba0a, 0x3e92819b
+.LE9: // ~1/362880
+    .long       0x9499b49c, 0x3ec72294
+.LE8: // ~1/40320
+    .long       0xabb79d95, 0x3efa019f
+.LE7: // ~1/5040
+    .long       0x8723aeaa, 0x3f2a019f
+.LE6: // ~1/720
+    .long       0x16c76a94, 0x3f56c16c
+.LE5: // ~1/120
+    .long       0x11185da8, 0x3f811111
+.LE4: // ~1/24
+    .long       0x5555551c, 0x3fa55555
+.LE3: // ~1/6
+    .long       0x555554db, 0x3fc55555
+
+.LbpA: // (2^(2/5) - 1)
+    .long       0x4ee54db1, 0x3fd472d1
+
+.LbpB: // (2^(4/5) - 2^(2/5))
+    .long       0x1c8a36cf, 0x3fdafb62
+
+.Ltwofifths: // 2/5
+    .long       0x9999999a, 0x3fd99999
+
+.Ltwooverthreeln2:
+    .long       0xDC3A03FD, 0x3FEEC709
+
+.Ltwoto1o5:	// 2^(1/5) exponent 3ff stripped for non-normalized compares
+    .long	0x86BAE675, 0x00026111
+
+.Ltwoto3o5:	// 2^(3/5) exponent 3ff stripped for non-normalized compares
+    .long	0x03B2AE5C, 0x00084060
+
+.Lln2: // ln(2)
+    .long       0xFEFA39EF, 0x3FE62E42
+
+.Ltwoto1o4: // 2^1/4
+    .long       0x0a31b715, 0x3ff306fe
+END(pow)
diff --git a/libm/upstream-freebsd/lib/msun/src/e_pow.c b/libm/upstream-freebsd/lib/msun/src/e_pow.c
index 7607a4a..e152628 100644
--- a/libm/upstream-freebsd/lib/msun/src/e_pow.c
+++ b/libm/upstream-freebsd/lib/msun/src/e_pow.c
@@ -94,7 +94,11 @@
 ivln2_l  =  1.92596299112661746887e-08; /* 0x3E54AE0B, 0xF85DDF44 =1/ln2 tail*/
 
 double
+#if defined(KRAIT_NEON_OPTIMIZATION) || defined(QCOM_NEON_OPTIMIZATION)
+__full_ieee754_pow(double x, double y)
+#else
 __ieee754_pow(double x, double y)
+#endif
 {
 	double z,ax,z_h,z_l,p_h,p_l;
 	double y1,t1,t2,r,s,t,u,v,w;
@@ -106,6 +110,35 @@
 	EXTRACT_WORDS(hy,ly,y);
 	ix = hx&0x7fffffff;  iy = hy&0x7fffffff;
 
+#if defined(KRAIT_NEON_OPTIMIZATION) || defined(QCOM_NEON_OPTIMIZATION)
+
+    if (ly == 0) {
+        if (hy == ly) {
+            /* y==0.0, x**0 = 1 */
+            return one;
+        }
+        else if (iy > 0x7ff00000) {
+            /* y is NaN, return x+y (NaN) */
+            return x+y;
+        }
+    }
+    else if (iy >= 0x7ff00000) {
+        /* y is NaN, return x+y (NaN) */
+        return x+y;
+    }
+
+    if (lx == 0) {
+        if (ix > 0x7ff00000) {
+            /* x is NaN, return x+y (NaN) */
+            return x+y;
+        }
+    }
+    else if (ix >= 0x7ff00000) {
+        /* x is NaN, return x+y (NaN) */
+        return x+y;
+    }
+
+#else
     /* y==zero: x**0 = 1 */
 	if((iy|ly)==0) return one; 	
 
@@ -116,7 +149,7 @@
 	if(ix > 0x7ff00000 || ((ix==0x7ff00000)&&(lx!=0)) ||
 	   iy > 0x7ff00000 || ((iy==0x7ff00000)&&(ly!=0))) 
 		return (x+0.0)+(y+0.0);
-
+#endif
     /* determine if y is an odd int when x < 0
      * yisint = 0	... y is not an integer
      * yisint = 1	... y is an odd int
diff --git a/libm/upstream-freebsd/lib/msun/src/k_cos.c b/libm/upstream-freebsd/lib/msun/src/k_cos.c
index c4702e6..6037e0d 100644
--- a/libm/upstream-freebsd/lib/msun/src/k_cos.c
+++ b/libm/upstream-freebsd/lib/msun/src/k_cos.c
@@ -68,6 +68,17 @@
 double
 __kernel_cos(double x, double y)
 {
+#if defined(KRAIT_NEON_OPTIMIZATION) || defined(QCOM_NEON_OPTIMIZATION)
+	double hz,z,zz,r,w,k;
+
+	z  = x*x;
+	zz = z*z;
+	k = x*y;
+	hz = (float)0.5*z;
+	r  = z*(z*(C1+z*(C2+z*((C3+z*C4)+zz*(C5+z*C6)))));
+	w  = one-hz;
+	return w + (((one-w)-hz) + (r-k));
+#else
 	double hz,z,r,w;
 
 	z  = x*x;
@@ -76,4 +87,5 @@
 	hz = 0.5*z;
 	w  = one-hz;
 	return w + (((one-w)-hz) + (z*r-x*y));
+#endif
 }
diff --git a/libm/upstream-freebsd/lib/msun/src/k_sin.c b/libm/upstream-freebsd/lib/msun/src/k_sin.c
index 12ee8c1..afd2da8 100644
--- a/libm/upstream-freebsd/lib/msun/src/k_sin.c
+++ b/libm/upstream-freebsd/lib/msun/src/k_sin.c
@@ -59,6 +59,16 @@
 double
 __kernel_sin(double x, double y, int iy)
 {
+#if defined(KRAIT_NEON_OPTIMIZATION) || defined(QCOM_NEON_OPTIMIZATION)
+	double z,zz,r,v;
+
+	z	=  x*x;
+	zz  =  z*z;
+	v	=  z*x;
+	r	=  S2+z*((S3+z*S4)+zz*(S5+z*S6));
+	if(iy==0) return x+v*(S1+z*r);
+	else      return x-((z*(half*y-v*r)-y)-v*S1);
+#else
 	double z,r,v,w;
 
 	z	=  x*x;
@@ -67,4 +77,5 @@
 	v	=  z*x;
 	if(iy==0) return x+v*(S1+z*r);
 	else      return x-((z*(half*y-v*r)-y)-v*S1);
+#endif
 }
diff --git a/libm/upstream-freebsd/lib/msun/src/math_private.h b/libm/upstream-freebsd/lib/msun/src/math_private.h
index 8af2c65..77520d7 100644
--- a/libm/upstream-freebsd/lib/msun/src/math_private.h
+++ b/libm/upstream-freebsd/lib/msun/src/math_private.h
@@ -723,6 +723,16 @@
 #define	__ieee754_remainderf remainderf
 #define	__ieee754_scalbf scalbf
 
+#if defined(KRAIT_NEON_OPTIMIZATION) || defined(QCOM_NEON_OPTIMIZATION)
+int	__kernel_rem_pio2(double*,double*,int,int,int) __attribute__((pcs("aapcs-vfp")));
+double	__full_ieee754_pow(double,double);
+#ifndef INLINE_REM_PIO2
+int	__ieee754_rem_pio2(double,double*) __attribute__((pcs("aapcs-vfp")));
+#endif
+double	__kernel_sin(double,double,int) __attribute__((pcs("aapcs-vfp")));
+double	__kernel_cos(double,double) __attribute__((pcs("aapcs-vfp")));
+double	__kernel_tan(double,double,int) __attribute__((pcs("aapcs-vfp")));
+#else
 /* fdlibm kernel function */
 int	__kernel_rem_pio2(double*,double*,int,int,int);
 
@@ -733,6 +743,8 @@
 double	__kernel_sin(double,double,int);
 double	__kernel_cos(double,double);
 double	__kernel_tan(double,double,int);
+#endif
+
 double	__ldexp_exp(double,int);
 #ifdef _COMPLEX_H
 double complex __ldexp_cexp(double complex,int);