merge in lmp-mr1-release history after reset to b0693ec58247c36a7427267783cb756cc055aed8
diff --git a/libc/arch-arm/arm.mk b/libc/arch-arm/arm.mk
index b5ed7f0..cca4ed0 100644
--- a/libc/arch-arm/arm.mk
+++ b/libc/arch-arm/arm.mk
@@ -41,7 +41,6 @@
 
 libc_openbsd_src_files_arm += \
     upstream-openbsd/lib/libc/string/bcopy.c \
-    upstream-openbsd/lib/libc/string/stpcpy.c \
     upstream-openbsd/lib/libc/string/stpncpy.c \
     upstream-openbsd/lib/libc/string/strlcat.c \
     upstream-openbsd/lib/libc/string/strlcpy.c \
diff --git a/libc/arch-arm/cortex-a15/bionic/__strcat_chk.S b/libc/arch-arm/cortex-a15/bionic/__strcat_chk.S
index 36da2d9..a2e9c22 100644
--- a/libc/arch-arm/cortex-a15/bionic/__strcat_chk.S
+++ b/libc/arch-arm/cortex-a15/bionic/__strcat_chk.S
@@ -40,12 +40,10 @@
 ENTRY(__strcat_chk)
     pld     [r0, #0]
     push    {r0, lr}
-    .save   {r0, lr}
     .cfi_def_cfa_offset 8
     .cfi_rel_offset r0, 0
     .cfi_rel_offset lr, 4
     push    {r4, r5}
-    .save   {r4, r5}
     .cfi_adjust_cfa_offset 8
     .cfi_rel_offset r4, 0
     .cfi_rel_offset r5, 4
@@ -195,9 +193,6 @@
 #include "memcpy_base.S"
 
 ENTRY_PRIVATE(__strcat_chk_failed)
-    .save   {r0, lr}
-    .save   {r4, r5}
-
     .cfi_def_cfa_offset 8
     .cfi_rel_offset r0, 0
     .cfi_rel_offset lr, 4
diff --git a/libc/arch-arm/cortex-a15/bionic/__strcpy_chk.S b/libc/arch-arm/cortex-a15/bionic/__strcpy_chk.S
index c3e3e14..db76686 100644
--- a/libc/arch-arm/cortex-a15/bionic/__strcpy_chk.S
+++ b/libc/arch-arm/cortex-a15/bionic/__strcpy_chk.S
@@ -39,7 +39,6 @@
 ENTRY(__strcpy_chk)
     pld     [r0, #0]
     push    {r0, lr}
-    .save   {r0, lr}
     .cfi_def_cfa_offset 8
     .cfi_rel_offset r0, 0
     .cfi_rel_offset lr, 4
@@ -161,7 +160,6 @@
 #include "memcpy_base.S"
 
 ENTRY_PRIVATE(__strcpy_chk_failed)
-    .save   {r0, lr}
     .cfi_def_cfa_offset 8
     .cfi_rel_offset r0, 0
     .cfi_rel_offset lr, 4
diff --git a/libc/arch-arm/cortex-a15/bionic/memcpy.S b/libc/arch-arm/cortex-a15/bionic/memcpy.S
index da4f3dd..410b663 100644
--- a/libc/arch-arm/cortex-a15/bionic/memcpy.S
+++ b/libc/arch-arm/cortex-a15/bionic/memcpy.S
@@ -72,7 +72,6 @@
 ENTRY(memcpy)
         pld     [r1, #64]
         push    {r0, lr}
-        .save   {r0, lr}
         .cfi_def_cfa_offset 8
         .cfi_rel_offset r0, 0
         .cfi_rel_offset lr, 4
@@ -85,7 +84,6 @@
 ENTRY_PRIVATE(__memcpy_chk_fail)
         // Preserve lr for backtrace.
         push    {lr}
-        .save   {lr}
         .cfi_def_cfa_offset 4
         .cfi_rel_offset lr, 0
 
diff --git a/libc/arch-arm/cortex-a15/bionic/memcpy_base.S b/libc/arch-arm/cortex-a15/bionic/memcpy_base.S
index 6ba4931..2a73852 100644
--- a/libc/arch-arm/cortex-a15/bionic/memcpy_base.S
+++ b/libc/arch-arm/cortex-a15/bionic/memcpy_base.S
@@ -54,7 +54,6 @@
  */
 
 ENTRY_PRIVATE(MEMCPY_BASE)
-        .save   {r0, lr}
         .cfi_def_cfa_offset 8
         .cfi_rel_offset r0, 0
         .cfi_rel_offset lr, 4
@@ -172,7 +171,6 @@
 END(MEMCPY_BASE)
 
 ENTRY_PRIVATE(MEMCPY_BASE_ALIGNED)
-        .save   {r0, lr}
         .cfi_def_cfa_offset 8
         .cfi_rel_offset r0, 0
         .cfi_rel_offset lr, 4
@@ -181,17 +179,14 @@
         // i.e., not keeping the stack looking like users expect
         // (highest numbered register at highest address).
         strd    r4, r5, [sp, #-8]!
-        .save   {r4, r5}
         .cfi_adjust_cfa_offset 8
         .cfi_rel_offset r4, 0
         .cfi_rel_offset r5, 4
         strd    r6, r7, [sp, #-8]!
-        .save   {r6, r7}
         .cfi_adjust_cfa_offset 8
         .cfi_rel_offset r6, 0
         .cfi_rel_offset r7, 0
         strd    r8, r9, [sp, #-8]!
-        .save   {r8, r9}
         .cfi_adjust_cfa_offset 8
         .cfi_rel_offset r8, 0
         .cfi_rel_offset r9, 4
diff --git a/libc/arch-arm/cortex-a15/bionic/memset.S b/libc/arch-arm/cortex-a15/bionic/memset.S
index 12c68d6..e4a1ec8 100644
--- a/libc/arch-arm/cortex-a15/bionic/memset.S
+++ b/libc/arch-arm/cortex-a15/bionic/memset.S
@@ -44,7 +44,6 @@
         bls         .L_done
 
         // Preserve lr for backtrace.
-        .save       {lr}
         push        {lr}
         .cfi_def_cfa_offset 4
         .cfi_rel_offset lr, 0
@@ -68,7 +67,6 @@
 END(bzero)
 
 ENTRY(memset)
-        .save       {r0}
         stmfd       sp!, {r0}
         .cfi_def_cfa_offset 4
         .cfi_rel_offset r0, 0
diff --git a/libc/arch-arm/cortex-a15/bionic/stpcpy.S b/libc/arch-arm/cortex-a15/bionic/stpcpy.S
new file mode 100644
index 0000000..740523b
--- /dev/null
+++ b/libc/arch-arm/cortex-a15/bionic/stpcpy.S
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#define STPCPY
+#include "string_copy.S"
diff --git a/libc/arch-arm/cortex-a15/bionic/strcmp.S b/libc/arch-arm/cortex-a15/bionic/strcmp.S
index 12da115..acedf0e 100644
--- a/libc/arch-arm/cortex-a15/bionic/strcmp.S
+++ b/libc/arch-arm/cortex-a15/bionic/strcmp.S
@@ -168,7 +168,6 @@
         bne     .L_do_align
 
         /* Fast path.  */
-        .save   {r4-r7}
         init
 
 .L_doubleword_aligned:
diff --git a/libc/arch-arm/cortex-a15/bionic/strcpy.S b/libc/arch-arm/cortex-a15/bionic/strcpy.S
index cb878c4..951face 100644
--- a/libc/arch-arm/cortex-a15/bionic/strcpy.S
+++ b/libc/arch-arm/cortex-a15/bionic/strcpy.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -25,427 +25,6 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  */
-/*
- * Copyright (c) 2013 ARM Ltd
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the company may not be used to endorse or promote
- *    products derived from this software without specific prior written
- *    permission.
- *
- * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
- * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
 
-#include <private/bionic_asm.h>
-
-    .syntax unified
-
-    .thumb
-    .thumb_func
-
-    .macro m_push
-    push    {r0, r4, r5, lr}
-    .endm // m_push
-
-    .macro m_pop
-    pop     {r0, r4, r5, pc}
-    .endm // m_pop
-
-    .macro m_copy_byte reg, cmd, label
-    ldrb    \reg, [r1], #1
-    strb    \reg, [r0], #1
-    \cmd    \reg, \label
-    .endm // m_copy_byte
-
-ENTRY(strcpy)
-    // For short copies, hard-code checking the first 8 bytes since this
-    // new code doesn't win until after about 8 bytes.
-    m_push
-    m_copy_byte reg=r2, cmd=cbz, label=strcpy_finish
-    m_copy_byte reg=r3, cmd=cbz, label=strcpy_finish
-    m_copy_byte reg=r4, cmd=cbz, label=strcpy_finish
-    m_copy_byte reg=r5, cmd=cbz, label=strcpy_finish
-    m_copy_byte reg=r2, cmd=cbz, label=strcpy_finish
-    m_copy_byte reg=r3, cmd=cbz, label=strcpy_finish
-    m_copy_byte reg=r4, cmd=cbz, label=strcpy_finish
-    m_copy_byte reg=r5, cmd=cbnz, label=strcpy_continue
-
-strcpy_finish:
-    m_pop
-
-strcpy_continue:
-    pld     [r1, #0]
-    ands    r3, r0, #7
-    beq     strcpy_check_src_align
-
-    // Align to a double word (64 bits).
-    rsb     r3, r3, #8
-    lsls    ip, r3, #31
-    beq     strcpy_align_to_32
-
-    ldrb    r2, [r1], #1
-    strb    r2, [r0], #1
-    cbz     r2, strcpy_complete
-
-strcpy_align_to_32:
-    bcc     strcpy_align_to_64
-
-    ldrb    r2, [r1], #1
-    strb    r2, [r0], #1
-    cbz     r2, strcpy_complete
-    ldrb    r2, [r1], #1
-    strb    r2, [r0], #1
-    cbz     r2, strcpy_complete
-
-strcpy_align_to_64:
-    tst     r3, #4
-    beq     strcpy_check_src_align
-    ldr     r2, [r1], #4
-
-    sub     ip, r2, #0x01010101
-    bic     ip, ip, r2
-    ands    ip, ip, #0x80808080
-    bne     strcpy_zero_in_first_register
-    str     r2, [r0], #4
-
-strcpy_check_src_align:
-    // At this point dst is aligned to a double word, check if src
-    // is also aligned to a double word.
-    ands    r3, r1, #7
-    bne     strcpy_unaligned_copy
-
-    .p2align 2
-strcpy_mainloop:
-    ldrd    r2, r3, [r1], #8
-
-    pld     [r1, #64]
-
-    sub     ip, r2, #0x01010101
-    bic     ip, ip, r2
-    ands    ip, ip, #0x80808080
-    bne     strcpy_zero_in_first_register
-
-    sub     ip, r3, #0x01010101
-    bic     ip, ip, r3
-    ands    ip, ip, #0x80808080
-    bne     strcpy_zero_in_second_register
-
-    strd    r2, r3, [r0], #8
-    b       strcpy_mainloop
-
-strcpy_complete:
-    m_pop
-
-strcpy_zero_in_first_register:
-    lsls    lr, ip, #17
-    bne     strcpy_copy1byte
-    bcs     strcpy_copy2bytes
-    lsls    ip, ip, #1
-    bne     strcpy_copy3bytes
-
-strcpy_copy4bytes:
-    // Copy 4 bytes to the destiniation.
-    str     r2, [r0]
-    m_pop
-
-strcpy_copy1byte:
-    strb    r2, [r0]
-    m_pop
-
-strcpy_copy2bytes:
-    strh    r2, [r0]
-    m_pop
-
-strcpy_copy3bytes:
-    strh    r2, [r0], #2
-    lsr     r2, #16
-    strb    r2, [r0]
-    m_pop
-
-strcpy_zero_in_second_register:
-    lsls    lr, ip, #17
-    bne     strcpy_copy5bytes
-    bcs     strcpy_copy6bytes
-    lsls    ip, ip, #1
-    bne     strcpy_copy7bytes
-
-    // Copy 8 bytes to the destination.
-    strd    r2, r3, [r0]
-    m_pop
-
-strcpy_copy5bytes:
-    str     r2, [r0], #4
-    strb    r3, [r0]
-    m_pop
-
-strcpy_copy6bytes:
-    str     r2, [r0], #4
-    strh    r3, [r0]
-    m_pop
-
-strcpy_copy7bytes:
-    str     r2, [r0], #4
-    strh    r3, [r0], #2
-    lsr     r3, #16
-    strb    r3, [r0]
-    m_pop
-
-strcpy_unaligned_copy:
-    // Dst is aligned to a double word, while src is at an unknown alignment.
-    // There are 7 different versions of the unaligned copy code
-    // to prevent overreading the src. The mainloop of every single version
-    // will store 64 bits per loop. The difference is how much of src can
-    // be read without potentially crossing a page boundary.
-    tbb     [pc, r3]
-strcpy_unaligned_branchtable:
-    .byte 0
-    .byte ((strcpy_unalign7 - strcpy_unaligned_branchtable)/2)
-    .byte ((strcpy_unalign6 - strcpy_unaligned_branchtable)/2)
-    .byte ((strcpy_unalign5 - strcpy_unaligned_branchtable)/2)
-    .byte ((strcpy_unalign4 - strcpy_unaligned_branchtable)/2)
-    .byte ((strcpy_unalign3 - strcpy_unaligned_branchtable)/2)
-    .byte ((strcpy_unalign2 - strcpy_unaligned_branchtable)/2)
-    .byte ((strcpy_unalign1 - strcpy_unaligned_branchtable)/2)
-
-    .p2align 2
-    // Can read 7 bytes before possibly crossing a page.
-strcpy_unalign7:
-    ldr     r2, [r1], #4
-
-    sub     ip, r2, #0x01010101
-    bic     ip, ip, r2
-    ands    ip, ip, #0x80808080
-    bne     strcpy_zero_in_first_register
-
-    ldrb    r3, [r1]
-    cbz     r3, strcpy_unalign7_copy5bytes
-    ldrb    r4, [r1, #1]
-    cbz     r4, strcpy_unalign7_copy6bytes
-    ldrb    r5, [r1, #2]
-    cbz     r5, strcpy_unalign7_copy7bytes
-
-    ldr     r3, [r1], #4
-    pld     [r1, #64]
-
-    lsrs    ip, r3, #24
-    strd    r2, r3, [r0], #8
-    beq     strcpy_unalign_return
-    b       strcpy_unalign7
-
-strcpy_unalign7_copy5bytes:
-    str     r2, [r0], #4
-    strb    r3, [r0]
-strcpy_unalign_return:
-    m_pop
-
-strcpy_unalign7_copy6bytes:
-    str     r2, [r0], #4
-    strb    r3, [r0], #1
-    strb    r4, [r0], #1
-    m_pop
-
-strcpy_unalign7_copy7bytes:
-    str     r2, [r0], #4
-    strb    r3, [r0], #1
-    strb    r4, [r0], #1
-    strb    r5, [r0], #1
-    m_pop
-
-    .p2align 2
-    // Can read 6 bytes before possibly crossing a page.
-strcpy_unalign6:
-    ldr     r2, [r1], #4
-
-    sub     ip, r2, #0x01010101
-    bic     ip, ip, r2
-    ands    ip, ip, #0x80808080
-    bne     strcpy_zero_in_first_register
-
-    ldrb    r4, [r1]
-    cbz     r4, strcpy_unalign_copy5bytes
-    ldrb    r5, [r1, #1]
-    cbz     r5, strcpy_unalign_copy6bytes
-
-    ldr     r3, [r1], #4
-    pld     [r1, #64]
-
-    tst     r3, #0xff0000
-    beq     strcpy_copy7bytes
-    lsrs    ip, r3, #24
-    strd    r2, r3, [r0], #8
-    beq     strcpy_unalign_return
-    b       strcpy_unalign6
-
-    .p2align 2
-    // Can read 5 bytes before possibly crossing a page.
-strcpy_unalign5:
-    ldr     r2, [r1], #4
-
-    sub     ip, r2, #0x01010101
-    bic     ip, ip, r2
-    ands    ip, ip, #0x80808080
-    bne     strcpy_zero_in_first_register
-
-    ldrb    r4, [r1]
-    cbz     r4, strcpy_unalign_copy5bytes
-
-    ldr     r3, [r1], #4
-
-    pld     [r1, #64]
-
-    sub     ip, r3, #0x01010101
-    bic     ip, ip, r3
-    ands    ip, ip, #0x80808080
-    bne     strcpy_zero_in_second_register
-
-    strd    r2, r3, [r0], #8
-    b       strcpy_unalign5
-
-strcpy_unalign_copy5bytes:
-    str     r2, [r0], #4
-    strb    r4, [r0]
-    m_pop
-
-strcpy_unalign_copy6bytes:
-    str     r2, [r0], #4
-    strb    r4, [r0], #1
-    strb    r5, [r0]
-    m_pop
-
-    .p2align 2
-    // Can read 4 bytes before possibly crossing a page.
-strcpy_unalign4:
-    ldr     r2, [r1], #4
-
-    sub     ip, r2, #0x01010101
-    bic     ip, ip, r2
-    ands    ip, ip, #0x80808080
-    bne     strcpy_zero_in_first_register
-
-    ldr     r3, [r1], #4
-    pld     [r1, #64]
-
-    sub     ip, r3, #0x01010101
-    bic     ip, ip, r3
-    ands    ip, ip, #0x80808080
-    bne     strcpy_zero_in_second_register
-
-    strd    r2, r3, [r0], #8
-    b       strcpy_unalign4
-
-    .p2align 2
-    // Can read 3 bytes before possibly crossing a page.
-strcpy_unalign3:
-    ldrb    r2, [r1]
-    cbz     r2, strcpy_unalign3_copy1byte
-    ldrb    r3, [r1, #1]
-    cbz     r3, strcpy_unalign3_copy2bytes
-    ldrb    r4, [r1, #2]
-    cbz     r4, strcpy_unalign3_copy3bytes
-
-    ldr     r2, [r1], #4
-    ldr     r3, [r1], #4
-
-    pld     [r1, #64]
-
-    lsrs    lr, r2, #24
-    beq     strcpy_copy4bytes
-
-    sub     ip, r3, #0x01010101
-    bic     ip, ip, r3
-    ands    ip, ip, #0x80808080
-    bne     strcpy_zero_in_second_register
-
-    strd    r2, r3, [r0], #8
-    b       strcpy_unalign3
-
-strcpy_unalign3_copy1byte:
-    strb    r2, [r0]
-    m_pop
-
-strcpy_unalign3_copy2bytes:
-    strb    r2, [r0], #1
-    strb    r3, [r0]
-    m_pop
-
-strcpy_unalign3_copy3bytes:
-    strb    r2, [r0], #1
-    strb    r3, [r0], #1
-    strb    r4, [r0]
-    m_pop
-
-    .p2align 2
-    // Can read 2 bytes before possibly crossing a page.
-strcpy_unalign2:
-    ldrb    r2, [r1]
-    cbz     r2, strcpy_unalign_copy1byte
-    ldrb    r4, [r1, #1]
-    cbz     r4, strcpy_unalign_copy2bytes
-
-    ldr     r2, [r1], #4
-    ldr     r3, [r1], #4
-    pld     [r1, #64]
-
-    tst     r2, #0xff0000
-    beq     strcpy_copy3bytes
-    lsrs    ip, r2, #24
-    beq     strcpy_copy4bytes
-
-    sub     ip, r3, #0x01010101
-    bic     ip, ip, r3
-    ands    ip, ip, #0x80808080
-    bne     strcpy_zero_in_second_register
-
-    strd    r2, r3, [r0], #8
-    b       strcpy_unalign2
-
-    .p2align 2
-    // Can read 1 byte before possibly crossing a page.
-strcpy_unalign1:
-    ldrb    r2, [r1]
-    cbz     r2, strcpy_unalign_copy1byte
-
-    ldr     r2, [r1], #4
-    ldr     r3, [r1], #4
-
-    pld     [r1, #64]
-
-    sub     ip, r2, #0x01010101
-    bic     ip, ip, r2
-    ands    ip, ip, #0x80808080
-    bne     strcpy_zero_in_first_register
-
-    sub     ip, r3, #0x01010101
-    bic     ip, ip, r3
-    ands    ip, ip, #0x80808080
-    bne     strcpy_zero_in_second_register
-
-    strd    r2, r3, [r0], #8
-    b       strcpy_unalign1
-
-strcpy_unalign_copy1byte:
-    strb    r2, [r0]
-    m_pop
-
-strcpy_unalign_copy2bytes:
-    strb    r2, [r0], #1
-    strb    r4, [r0]
-    m_pop
-END(strcpy)
+#define STRCPY
+#include "string_copy.S"
diff --git a/libc/arch-arm/cortex-a15/bionic/string_copy.S b/libc/arch-arm/cortex-a15/bionic/string_copy.S
new file mode 100644
index 0000000..20f0e91
--- /dev/null
+++ b/libc/arch-arm/cortex-a15/bionic/string_copy.S
@@ -0,0 +1,513 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 2013 ARM Ltd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ *    products derived from this software without specific prior written
+ *    permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if !defined(STPCPY) && !defined(STRCPY)
+#error "Either STPCPY or STRCPY must be defined."
+#endif
+
+#include <private/bionic_asm.h>
+
+    .syntax unified
+
+    .thumb
+    .thumb_func
+
+#if defined(STPCPY)
+    .macro m_push
+    push    {r4, r5, lr}
+    .cfi_def_cfa_offset 12
+    .cfi_rel_offset r4, 0
+    .cfi_rel_offset r5, 4
+    .cfi_rel_offset lr, 8
+    .endm // m_push
+#else
+    .macro m_push
+    push    {r0, r4, r5, lr}
+    .cfi_def_cfa_offset 16
+    .cfi_rel_offset r0, 0
+    .cfi_rel_offset r4, 4
+    .cfi_rel_offset r5, 8
+    .cfi_rel_offset lr, 12
+    .endm // m_push
+#endif
+
+#if defined(STPCPY)
+    .macro m_pop
+    pop     {r4, r5, pc}
+    .endm // m_pop
+#else
+    .macro m_pop
+    pop     {r0, r4, r5, pc}
+    .endm // m_pop
+#endif
+
+    .macro m_copy_byte reg, cmd, label
+    ldrb    \reg, [r1], #1
+    strb    \reg, [r0], #1
+    \cmd    \reg, \label
+    .endm // m_copy_byte
+
+#if defined(STPCPY)
+ENTRY(stpcpy)
+#else
+ENTRY(strcpy)
+#endif
+    // For short copies, hard-code checking the first 8 bytes since this
+    // new code doesn't win until after about 8 bytes.
+    m_push
+    m_copy_byte reg=r2, cmd=cbz, label=.Lstringcopy_finish
+    m_copy_byte reg=r3, cmd=cbz, label=.Lstringcopy_finish
+    m_copy_byte reg=r4, cmd=cbz, label=.Lstringcopy_finish
+    m_copy_byte reg=r5, cmd=cbz, label=.Lstringcopy_finish
+    m_copy_byte reg=r2, cmd=cbz, label=.Lstringcopy_finish
+    m_copy_byte reg=r3, cmd=cbz, label=.Lstringcopy_finish
+    m_copy_byte reg=r4, cmd=cbz, label=.Lstringcopy_finish
+    m_copy_byte reg=r5, cmd=cbnz, label=.Lstringcopy_continue
+
+.Lstringcopy_finish:
+#if defined(STPCPY)
+    sub     r0, r0, #1
+#endif
+    m_pop
+
+.Lstringcopy_continue:
+    pld     [r1, #0]
+    ands    r3, r0, #7
+    beq     .Lstringcopy_check_src_align
+
+    // Align to a double word (64 bits).
+    rsb     r3, r3, #8
+    lsls    ip, r3, #31
+    beq     .Lstringcopy_align_to_32
+
+    ldrb    r2, [r1], #1
+    strb    r2, [r0], #1
+    cbz     r2, .Lstringcopy_complete
+
+.Lstringcopy_align_to_32:
+    bcc     .Lstringcopy_align_to_64
+
+    ldrb    r2, [r1], #1
+    strb    r2, [r0], #1
+    cbz     r2, .Lstringcopy_complete
+    ldrb    r2, [r1], #1
+    strb    r2, [r0], #1
+    cbz     r2, .Lstringcopy_complete
+
+.Lstringcopy_align_to_64:
+    tst     r3, #4
+    beq     .Lstringcopy_check_src_align
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_first_register
+    str     r2, [r0], #4
+
+.Lstringcopy_check_src_align:
+    // At this point dst is aligned to a double word, check if src
+    // is also aligned to a double word.
+    ands    r3, r1, #7
+    bne     .Lstringcopy_unaligned_copy
+
+    .p2align 2
+.Lstringcopy_mainloop:
+    ldrd    r2, r3, [r1], #8
+
+    pld     [r1, #64]
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_first_register
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_second_register
+
+    strd    r2, r3, [r0], #8
+    b       .Lstringcopy_mainloop
+
+.Lstringcopy_complete:
+#if defined(STPCPY)
+    sub     r0, r0, #1
+#endif
+    m_pop
+
+.Lstringcopy_zero_in_first_register:
+    lsls    lr, ip, #17
+    bne     .Lstringcopy_copy1byte
+    bcs     .Lstringcopy_copy2bytes
+    lsls    ip, ip, #1
+    bne     .Lstringcopy_copy3bytes
+
+.Lstringcopy_copy4bytes:
+    // Copy 4 bytes to the destiniation.
+#if defined(STPCPY)
+    str     r2, [r0], #3
+#else
+    str     r2, [r0]
+#endif
+    m_pop
+
+.Lstringcopy_copy1byte:
+    strb    r2, [r0]
+    m_pop
+
+.Lstringcopy_copy2bytes:
+#if defined(STPCPY)
+    strh    r2, [r0], #1
+#else
+    strh    r2, [r0]
+#endif
+    m_pop
+
+.Lstringcopy_copy3bytes:
+    strh    r2, [r0], #2
+    lsr     r2, #16
+    strb    r2, [r0]
+    m_pop
+
+.Lstringcopy_zero_in_second_register:
+    lsls    lr, ip, #17
+    bne     .Lstringcopy_copy5bytes
+    bcs     .Lstringcopy_copy6bytes
+    lsls    ip, ip, #1
+    bne     .Lstringcopy_copy7bytes
+
+    // Copy 8 bytes to the destination.
+    strd    r2, r3, [r0]
+#if defined(STPCPY)
+    add     r0, r0, #7
+#endif
+    m_pop
+
+.Lstringcopy_copy5bytes:
+    str     r2, [r0], #4
+    strb    r3, [r0]
+    m_pop
+
+.Lstringcopy_copy6bytes:
+    str     r2, [r0], #4
+#if defined(STPCPY)
+    strh    r3, [r0], #1
+#else
+    strh    r3, [r0]
+#endif
+    m_pop
+
+.Lstringcopy_copy7bytes:
+    str     r2, [r0], #4
+    strh    r3, [r0], #2
+    lsr     r3, #16
+    strb    r3, [r0]
+    m_pop
+
+.Lstringcopy_unaligned_copy:
+    // Dst is aligned to a double word, while src is at an unknown alignment.
+    // There are 7 different versions of the unaligned copy code
+    // to prevent overreading the src. The mainloop of every single version
+    // will store 64 bits per loop. The difference is how much of src can
+    // be read without potentially crossing a page boundary.
+    tbb     [pc, r3]
+.Lstringcopy_unaligned_branchtable:
+    .byte 0
+    .byte ((.Lstringcopy_unalign7 - .Lstringcopy_unaligned_branchtable)/2)
+    .byte ((.Lstringcopy_unalign6 - .Lstringcopy_unaligned_branchtable)/2)
+    .byte ((.Lstringcopy_unalign5 - .Lstringcopy_unaligned_branchtable)/2)
+    .byte ((.Lstringcopy_unalign4 - .Lstringcopy_unaligned_branchtable)/2)
+    .byte ((.Lstringcopy_unalign3 - .Lstringcopy_unaligned_branchtable)/2)
+    .byte ((.Lstringcopy_unalign2 - .Lstringcopy_unaligned_branchtable)/2)
+    .byte ((.Lstringcopy_unalign1 - .Lstringcopy_unaligned_branchtable)/2)
+
+    .p2align 2
+    // Can read 7 bytes before possibly crossing a page.
+.Lstringcopy_unalign7:
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_first_register
+
+    ldrb    r3, [r1]
+    cbz     r3, .Lstringcopy_unalign7_copy5bytes
+    ldrb    r4, [r1, #1]
+    cbz     r4, .Lstringcopy_unalign7_copy6bytes
+    ldrb    r5, [r1, #2]
+    cbz     r5, .Lstringcopy_unalign7_copy7bytes
+
+    ldr     r3, [r1], #4
+    pld     [r1, #64]
+
+    lsrs    ip, r3, #24
+    strd    r2, r3, [r0], #8
+#if defined(STPCPY)
+    beq     .Lstringcopy_finish
+#else
+    beq     .Lstringcopy_unalign_return
+#endif
+    b       .Lstringcopy_unalign7
+
+.Lstringcopy_unalign7_copy5bytes:
+    str     r2, [r0], #4
+    strb    r3, [r0]
+.Lstringcopy_unalign_return:
+    m_pop
+
+.Lstringcopy_unalign7_copy6bytes:
+    str     r2, [r0], #4
+    strb    r3, [r0], #1
+    strb    r4, [r0]
+    m_pop
+
+.Lstringcopy_unalign7_copy7bytes:
+    str     r2, [r0], #4
+    strb    r3, [r0], #1
+    strb    r4, [r0], #1
+    strb    r5, [r0]
+    m_pop
+
+    .p2align 2
+    // Can read 6 bytes before possibly crossing a page.
+.Lstringcopy_unalign6:
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_first_register
+
+    ldrb    r4, [r1]
+    cbz     r4, .Lstringcopy_unalign_copy5bytes
+    ldrb    r5, [r1, #1]
+    cbz     r5, .Lstringcopy_unalign_copy6bytes
+
+    ldr     r3, [r1], #4
+    pld     [r1, #64]
+
+    tst     r3, #0xff0000
+    beq     .Lstringcopy_copy7bytes
+    lsrs    ip, r3, #24
+    strd    r2, r3, [r0], #8
+#if defined(STPCPY)
+    beq     .Lstringcopy_finish
+#else
+    beq     .Lstringcopy_unalign_return
+#endif
+    b       .Lstringcopy_unalign6
+
+    .p2align 2
+    // Can read 5 bytes before possibly crossing a page.
+.Lstringcopy_unalign5:
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_first_register
+
+    ldrb    r4, [r1]
+    cbz     r4, .Lstringcopy_unalign_copy5bytes
+
+    ldr     r3, [r1], #4
+
+    pld     [r1, #64]
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_second_register
+
+    strd    r2, r3, [r0], #8
+    b       .Lstringcopy_unalign5
+
+.Lstringcopy_unalign_copy5bytes:
+    str     r2, [r0], #4
+    strb    r4, [r0]
+    m_pop
+
+.Lstringcopy_unalign_copy6bytes:
+    str     r2, [r0], #4
+    strb    r4, [r0], #1
+    strb    r5, [r0]
+    m_pop
+
+    .p2align 2
+    // Can read 4 bytes before possibly crossing a page.
+.Lstringcopy_unalign4:
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_first_register
+
+    ldr     r3, [r1], #4
+    pld     [r1, #64]
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_second_register
+
+    strd    r2, r3, [r0], #8
+    b       .Lstringcopy_unalign4
+
+    .p2align 2
+    // Can read 3 bytes before possibly crossing a page.
+.Lstringcopy_unalign3:
+    ldrb    r2, [r1]
+    cbz     r2, .Lstringcopy_unalign3_copy1byte
+    ldrb    r3, [r1, #1]
+    cbz     r3, .Lstringcopy_unalign3_copy2bytes
+    ldrb    r4, [r1, #2]
+    cbz     r4, .Lstringcopy_unalign3_copy3bytes
+
+    ldr     r2, [r1], #4
+    ldr     r3, [r1], #4
+
+    pld     [r1, #64]
+
+    lsrs    lr, r2, #24
+    beq     .Lstringcopy_copy4bytes
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_second_register
+
+    strd    r2, r3, [r0], #8
+    b       .Lstringcopy_unalign3
+
+.Lstringcopy_unalign3_copy1byte:
+    strb    r2, [r0]
+    m_pop
+
+.Lstringcopy_unalign3_copy2bytes:
+    strb    r2, [r0], #1
+    strb    r3, [r0]
+    m_pop
+
+.Lstringcopy_unalign3_copy3bytes:
+    strb    r2, [r0], #1
+    strb    r3, [r0], #1
+    strb    r4, [r0]
+    m_pop
+
+    .p2align 2
+    // Can read 2 bytes before possibly crossing a page.
+.Lstringcopy_unalign2:
+    ldrb    r2, [r1]
+    cbz     r2, .Lstringcopy_unalign_copy1byte
+    ldrb    r4, [r1, #1]
+    cbz     r4, .Lstringcopy_unalign_copy2bytes
+
+    ldr     r2, [r1], #4
+    ldr     r3, [r1], #4
+    pld     [r1, #64]
+
+    tst     r2, #0xff0000
+    beq     .Lstringcopy_copy3bytes
+    lsrs    ip, r2, #24
+    beq     .Lstringcopy_copy4bytes
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_second_register
+
+    strd    r2, r3, [r0], #8
+    b       .Lstringcopy_unalign2
+
+    .p2align 2
+    // Can read 1 byte before possibly crossing a page.
+.Lstringcopy_unalign1:
+    ldrb    r2, [r1]
+    cbz     r2, .Lstringcopy_unalign_copy1byte
+
+    ldr     r2, [r1], #4
+    ldr     r3, [r1], #4
+
+    pld     [r1, #64]
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_first_register
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_second_register
+
+    strd    r2, r3, [r0], #8
+    b       .Lstringcopy_unalign1
+
+.Lstringcopy_unalign_copy1byte:
+    strb    r2, [r0]
+    m_pop
+
+.Lstringcopy_unalign_copy2bytes:
+    strb    r2, [r0], #1
+    strb    r4, [r0]
+    m_pop
+#if defined(STPCPY)
+END(stpcpy)
+#else
+END(strcpy)
+#endif
diff --git a/libc/arch-arm/cortex-a15/cortex-a15.mk b/libc/arch-arm/cortex-a15/cortex-a15.mk
index 552811e..f1abe32 100644
--- a/libc/arch-arm/cortex-a15/cortex-a15.mk
+++ b/libc/arch-arm/cortex-a15/cortex-a15.mk
@@ -1,10 +1,11 @@
 libc_bionic_src_files_arm += \
     arch-arm/cortex-a15/bionic/memcpy.S \
     arch-arm/cortex-a15/bionic/memset.S \
+    arch-arm/cortex-a15/bionic/stpcpy.S \
     arch-arm/cortex-a15/bionic/strcat.S \
+    arch-arm/cortex-a15/bionic/__strcat_chk.S \
     arch-arm/cortex-a15/bionic/strcmp.S \
     arch-arm/cortex-a15/bionic/strcpy.S \
-    arch-arm/cortex-a15/bionic/strlen.S \
-    arch-arm/cortex-a15/bionic/__strcat_chk.S \
     arch-arm/cortex-a15/bionic/__strcpy_chk.S \
+    arch-arm/cortex-a15/bionic/strlen.S \
     bionic/memmove.c \
diff --git a/libc/arch-arm/cortex-a9/bionic/__strcat_chk.S b/libc/arch-arm/cortex-a9/bionic/__strcat_chk.S
index 651aefc..45517f1 100644
--- a/libc/arch-arm/cortex-a9/bionic/__strcat_chk.S
+++ b/libc/arch-arm/cortex-a9/bionic/__strcat_chk.S
@@ -40,12 +40,10 @@
 ENTRY(__strcat_chk)
     pld     [r0, #0]
     push    {r0, lr}
-    .save   {r0, lr}
     .cfi_def_cfa_offset 8
     .cfi_rel_offset r0, 0
     .cfi_rel_offset lr, 4
     push    {r4, r5}
-    .save   {r4, r5}
     .cfi_adjust_cfa_offset 8
     .cfi_rel_offset r4, 0
     .cfi_rel_offset r5, 4
@@ -199,8 +197,6 @@
 #include "memcpy_base.S"
 
 ENTRY_PRIVATE(__strcat_chk_fail)
-    .save   {r0, lr}
-    .save   {r4, r5}
     .cfi_def_cfa_offset 8
     .cfi_rel_offset r0, 0
     .cfi_rel_offset lr, 4
diff --git a/libc/arch-arm/cortex-a9/bionic/__strcpy_chk.S b/libc/arch-arm/cortex-a9/bionic/__strcpy_chk.S
index 2447780..67eca08 100644
--- a/libc/arch-arm/cortex-a9/bionic/__strcpy_chk.S
+++ b/libc/arch-arm/cortex-a9/bionic/__strcpy_chk.S
@@ -39,7 +39,6 @@
 ENTRY(__strcpy_chk)
     pld     [r0, #0]
     push    {r0, lr}
-    .save   {r0, lr}
     .cfi_def_cfa_offset 8
     .cfi_rel_offset r0, 0
     .cfi_rel_offset lr, 4
@@ -165,7 +164,6 @@
 #include "memcpy_base.S"
 
 ENTRY_PRIVATE(__strcpy_chk_fail)
-    .save   {r0, lr}
     .cfi_def_cfa_offset 8
     .cfi_rel_offset r0, 0
     .cfi_rel_offset lr, 4
diff --git a/libc/arch-arm/cortex-a9/bionic/memcpy.S b/libc/arch-arm/cortex-a9/bionic/memcpy.S
index 8dcd937..db3e26f 100644
--- a/libc/arch-arm/cortex-a9/bionic/memcpy.S
+++ b/libc/arch-arm/cortex-a9/bionic/memcpy.S
@@ -50,7 +50,6 @@
 ENTRY(memcpy)
         pld     [r1, #0]
         stmfd   sp!, {r0, lr}
-        .save   {r0, lr}
         .cfi_def_cfa_offset 8
         .cfi_rel_offset r0, 0
         .cfi_rel_offset lr, 4
@@ -64,7 +63,6 @@
 ENTRY_PRIVATE(__memcpy_chk_fail)
         // Preserve lr for backtrace.
         push    {lr}
-        .save   {lr}
         .cfi_def_cfa_offset 4
         .cfi_rel_offset lr, 0
 
diff --git a/libc/arch-arm/cortex-a9/bionic/memcpy_base.S b/libc/arch-arm/cortex-a9/bionic/memcpy_base.S
index c385657..5e81305 100644
--- a/libc/arch-arm/cortex-a9/bionic/memcpy_base.S
+++ b/libc/arch-arm/cortex-a9/bionic/memcpy_base.S
@@ -33,7 +33,6 @@
  */
 
 ENTRY_PRIVATE(MEMCPY_BASE)
-        .save       {r0, lr}
         .cfi_def_cfa_offset 8
         .cfi_rel_offset r0, 0
         .cfi_rel_offset lr, 4
@@ -139,14 +138,12 @@
 END(MEMCPY_BASE)
 
 ENTRY_PRIVATE(MEMCPY_BASE_ALIGNED)
-        .save       {r0, lr}
         .cfi_def_cfa_offset 8
         .cfi_rel_offset r0, 0
         .cfi_rel_offset lr, 4
 
         /* Simple arm-only copy loop to handle aligned copy operations */
         stmfd       sp!, {r4-r8}
-        .save       {r4-r8}
         .cfi_adjust_cfa_offset 20
         .cfi_rel_offset r4, 0
         .cfi_rel_offset r5, 4
diff --git a/libc/arch-arm/cortex-a9/bionic/memset.S b/libc/arch-arm/cortex-a9/bionic/memset.S
index a5057eb..299f5a2 100644
--- a/libc/arch-arm/cortex-a9/bionic/memset.S
+++ b/libc/arch-arm/cortex-a9/bionic/memset.S
@@ -42,7 +42,6 @@
 
         // Preserve lr for backtrace.
         push        {lr}
-        .save       {lr}
         .cfi_def_cfa_offset 4
         .cfi_rel_offset lr, 0
 
@@ -72,7 +71,6 @@
         bhi         __memset_large_copy
 
         stmfd       sp!, {r0}
-        .save       {r0}
         .cfi_def_cfa_offset 4
         .cfi_rel_offset r0, 0
 
@@ -114,7 +112,6 @@
          * offset = (4-(src&3))&3 = -src & 3
          */
         stmfd       sp!, {r0, r4-r7, lr}
-        .save       {r0, r4-r7, lr}
         .cfi_def_cfa_offset 24
         .cfi_rel_offset r0, 0
         .cfi_rel_offset r4, 4
diff --git a/libc/arch-arm/cortex-a9/bionic/stpcpy.S b/libc/arch-arm/cortex-a9/bionic/stpcpy.S
new file mode 100644
index 0000000..740523b
--- /dev/null
+++ b/libc/arch-arm/cortex-a9/bionic/stpcpy.S
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#define STPCPY
+#include "string_copy.S"
diff --git a/libc/arch-arm/cortex-a9/bionic/strcmp.S b/libc/arch-arm/cortex-a9/bionic/strcmp.S
index 2411c65..4ff26c0 100644
--- a/libc/arch-arm/cortex-a9/bionic/strcmp.S
+++ b/libc/arch-arm/cortex-a9/bionic/strcmp.S
@@ -168,7 +168,6 @@
         bne     .L_do_align
 
         /* Fast path.  */
-        .save   {r4-r7}
         init
 
 .L_doubleword_aligned:
diff --git a/libc/arch-arm/cortex-a9/bionic/strcpy.S b/libc/arch-arm/cortex-a9/bionic/strcpy.S
index 9e9610b..951face 100644
--- a/libc/arch-arm/cortex-a9/bionic/strcpy.S
+++ b/libc/arch-arm/cortex-a9/bionic/strcpy.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -25,432 +25,6 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  */
-/*
- * Copyright (c) 2013 ARM Ltd
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the company may not be used to endorse or promote
- *    products derived from this software without specific prior written
- *    permission.
- *
- * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
- * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
 
-#include <private/bionic_asm.h>
-
-    .syntax unified
-
-    .thumb
-    .thumb_func
-
-    .macro m_push
-    push    {r0, r4, r5, lr}
-    .endm // m_push
-
-    .macro m_ret inst
-    \inst   {r0, r4, r5, pc}
-    .endm // m_ret
-
-    .macro m_copy_byte reg, cmd, label
-    ldrb    \reg, [r1], #1
-    strb    \reg, [r0], #1
-    \cmd    \reg, \label
-    .endm // m_copy_byte
-
-ENTRY(strcpy)
-    // Unroll the first 8 bytes that will be copied.
-    m_push
-    m_copy_byte reg=r2, cmd=cbz, label=strcpy_finish
-    m_copy_byte reg=r3, cmd=cbz, label=strcpy_finish
-    m_copy_byte reg=r4, cmd=cbz, label=strcpy_finish
-    m_copy_byte reg=r5, cmd=cbz, label=strcpy_finish
-    m_copy_byte reg=r2, cmd=cbz, label=strcpy_finish
-    m_copy_byte reg=r3, cmd=cbz, label=strcpy_finish
-    m_copy_byte reg=r4, cmd=cbz, label=strcpy_finish
-    m_copy_byte reg=r5, cmd=cbnz, label=strcpy_continue
-
-strcpy_finish:
-    m_ret   inst=pop
-
-strcpy_continue:
-    pld     [r1, #0]
-    ands    r3, r0, #7
-    bne     strcpy_align_dst
-
-strcpy_check_src_align:
-    // At this point dst is aligned to a double word, check if src
-    // is also aligned to a double word.
-    ands    r3, r1, #7
-    bne     strcpy_unaligned_copy
-
-    .p2align 2
-strcpy_mainloop:
-    ldmia   r1!, {r2, r3}
-
-    pld     [r1, #64]
-
-    sub     ip, r2, #0x01010101
-    bic     ip, ip, r2
-    ands    ip, ip, #0x80808080
-    bne     strcpy_zero_in_first_register
-
-    sub     ip, r3, #0x01010101
-    bic     ip, ip, r3
-    ands    ip, ip, #0x80808080
-    bne     strcpy_zero_in_second_register
-
-    stmia   r0!, {r2, r3}
-    b       strcpy_mainloop
-
-strcpy_zero_in_first_register:
-    lsls    lr, ip, #17
-    itt     ne
-    strbne  r2, [r0]
-    m_ret   inst=popne
-    itt     cs
-    strhcs  r2, [r0]
-    m_ret   inst=popcs
-    lsls    ip, ip, #1
-    itt     eq
-    streq   r2, [r0]
-    m_ret   inst=popeq
-    strh    r2, [r0], #2
-    lsr     r3, r2, #16
-    strb    r3, [r0]
-    m_ret   inst=pop
-
-strcpy_zero_in_second_register:
-    lsls    lr, ip, #17
-    ittt    ne
-    stmiane r0!, {r2}
-    strbne  r3, [r0]
-    m_ret   inst=popne
-    ittt    cs
-    strcs   r2, [r0], #4
-    strhcs  r3, [r0]
-    m_ret   inst=popcs
-    lsls    ip, ip, #1
-    itt     eq
-    stmiaeq r0, {r2, r3}
-    m_ret   inst=popeq
-    stmia   r0!, {r2}
-    strh    r3, [r0], #2
-    lsr     r4, r3, #16
-    strb    r4, [r0]
-    m_ret   inst=pop
-
-strcpy_align_dst:
-    // Align to a double word (64 bits).
-    rsb     r3, r3, #8
-    lsls    ip, r3, #31
-    beq     strcpy_align_to_32
-
-    ldrb    r2, [r1], #1
-    strb    r2, [r0], #1
-    cbz     r2, strcpy_complete
-
-strcpy_align_to_32:
-    bcc     strcpy_align_to_64
-
-    ldrb    r4, [r1], #1
-    strb    r4, [r0], #1
-    cmp     r4, #0
-    it      eq
-    m_ret   inst=popeq
-    ldrb    r5, [r1], #1
-    strb    r5, [r0], #1
-    cmp     r5, #0
-    it      eq
-    m_ret   inst=popeq
-
-strcpy_align_to_64:
-    tst     r3, #4
-    beq     strcpy_check_src_align
-    ldr     r2, [r1], #4
-
-    sub     ip, r2, #0x01010101
-    bic     ip, ip, r2
-    ands    ip, ip, #0x80808080
-    bne     strcpy_zero_in_first_register
-    stmia   r0!, {r2}
-    b       strcpy_check_src_align
-
-strcpy_complete:
-    m_ret   inst=pop
-
-strcpy_unaligned_copy:
-    // Dst is aligned to a double word, while src is at an unknown alignment.
-    // There are 7 different versions of the unaligned copy code
-    // to prevent overreading the src. The mainloop of every single version
-    // will store 64 bits per loop. The difference is how much of src can
-    // be read without potentially crossing a page boundary.
-    tbb     [pc, r3]
-strcpy_unaligned_branchtable:
-    .byte 0
-    .byte ((strcpy_unalign7 - strcpy_unaligned_branchtable)/2)
-    .byte ((strcpy_unalign6 - strcpy_unaligned_branchtable)/2)
-    .byte ((strcpy_unalign5 - strcpy_unaligned_branchtable)/2)
-    .byte ((strcpy_unalign4 - strcpy_unaligned_branchtable)/2)
-    .byte ((strcpy_unalign3 - strcpy_unaligned_branchtable)/2)
-    .byte ((strcpy_unalign2 - strcpy_unaligned_branchtable)/2)
-    .byte ((strcpy_unalign1 - strcpy_unaligned_branchtable)/2)
-
-    .p2align 2
-    // Can read 7 bytes before possibly crossing a page.
-strcpy_unalign7:
-    ldr     r2, [r1], #4
-
-    sub     ip, r2, #0x01010101
-    bic     ip, ip, r2
-    ands    ip, ip, #0x80808080
-    bne     strcpy_zero_in_first_register
-
-    ldrb    r3, [r1]
-    cbz     r3, strcpy_unalign7_copy5bytes
-    ldrb    r4, [r1, #1]
-    cbz     r4, strcpy_unalign7_copy6bytes
-    ldrb    r5, [r1, #2]
-    cbz     r5, strcpy_unalign7_copy7bytes
-
-    ldr     r3, [r1], #4
-    pld     [r1, #64]
-
-    lsrs    ip, r3, #24
-    stmia   r0!, {r2, r3}
-    beq     strcpy_unalign_return
-    b       strcpy_unalign7
-
-strcpy_unalign7_copy5bytes:
-    stmia   r0!, {r2}
-    strb    r3, [r0]
-strcpy_unalign_return:
-    m_ret   inst=pop
-
-strcpy_unalign7_copy6bytes:
-    stmia   r0!, {r2}
-    strb    r3, [r0], #1
-    strb    r4, [r0], #1
-    m_ret   inst=pop
-
-strcpy_unalign7_copy7bytes:
-    stmia   r0!, {r2}
-    strb    r3, [r0], #1
-    strb    r4, [r0], #1
-    strb    r5, [r0], #1
-    m_ret   inst=pop
-
-    .p2align 2
-    // Can read 6 bytes before possibly crossing a page.
-strcpy_unalign6:
-    ldr     r2, [r1], #4
-
-    sub     ip, r2, #0x01010101
-    bic     ip, ip, r2
-    ands    ip, ip, #0x80808080
-    bne     strcpy_zero_in_first_register
-
-    ldrb    r4, [r1]
-    cbz     r4, strcpy_unalign_copy5bytes
-    ldrb    r5, [r1, #1]
-    cbz     r5, strcpy_unalign_copy6bytes
-
-    ldr     r3, [r1], #4
-    pld     [r1, #64]
-
-    tst     r3, #0xff0000
-    beq     strcpy_unalign6_copy7bytes
-    lsrs    ip, r3, #24
-    stmia   r0!, {r2, r3}
-    beq     strcpy_unalign_return
-    b       strcpy_unalign6
-
-strcpy_unalign6_copy7bytes:
-    stmia   r0!, {r2}
-    strh    r3, [r0], #2
-    lsr     r3, #16
-    strb    r3, [r0]
-    m_ret   inst=pop
-
-    .p2align 2
-    // Can read 5 bytes before possibly crossing a page.
-strcpy_unalign5:
-    ldr     r2, [r1], #4
-
-    sub     ip, r2, #0x01010101
-    bic     ip, ip, r2
-    ands    ip, ip, #0x80808080
-    bne     strcpy_zero_in_first_register
-
-    ldrb    r4, [r1]
-    cbz     r4, strcpy_unalign_copy5bytes
-
-    ldr     r3, [r1], #4
-
-    pld     [r1, #64]
-
-    sub     ip, r3, #0x01010101
-    bic     ip, ip, r3
-    ands    ip, ip, #0x80808080
-    bne     strcpy_zero_in_second_register
-
-    stmia   r0!, {r2, r3}
-    b       strcpy_unalign5
-
-strcpy_unalign_copy5bytes:
-    stmia   r0!, {r2}
-    strb    r4, [r0]
-    m_ret   inst=pop
-
-strcpy_unalign_copy6bytes:
-    stmia   r0!, {r2}
-    strb    r4, [r0], #1
-    strb    r5, [r0]
-    m_ret   inst=pop
-
-    .p2align 2
-    // Can read 4 bytes before possibly crossing a page.
-strcpy_unalign4:
-    ldmia   r1!, {r2}
-
-    sub     ip, r2, #0x01010101
-    bic     ip, ip, r2
-    ands    ip, ip, #0x80808080
-    bne     strcpy_zero_in_first_register
-
-    ldmia   r1!, {r3}
-    pld     [r1, #64]
-
-    sub     ip, r3, #0x01010101
-    bic     ip, ip, r3
-    ands    ip, ip, #0x80808080
-    bne     strcpy_zero_in_second_register
-
-    stmia   r0!, {r2, r3}
-    b       strcpy_unalign4
-
-    .p2align 2
-    // Can read 3 bytes before possibly crossing a page.
-strcpy_unalign3:
-    ldrb    r2, [r1]
-    cbz     r2, strcpy_unalign3_copy1byte
-    ldrb    r3, [r1, #1]
-    cbz     r3, strcpy_unalign3_copy2bytes
-    ldrb    r4, [r1, #2]
-    cbz     r4, strcpy_unalign3_copy3bytes
-
-    ldr     r2, [r1], #4
-    ldr     r3, [r1], #4
-
-    pld     [r1, #64]
-
-    lsrs    lr, r2, #24
-    beq     strcpy_unalign_copy4bytes
-
-    sub     ip, r3, #0x01010101
-    bic     ip, ip, r3
-    ands    ip, ip, #0x80808080
-    bne     strcpy_zero_in_second_register
-
-    stmia   r0!, {r2, r3}
-    b       strcpy_unalign3
-
-strcpy_unalign3_copy1byte:
-    strb    r2, [r0]
-    m_ret   inst=pop
-
-strcpy_unalign3_copy2bytes:
-    strb    r2, [r0], #1
-    strb    r3, [r0]
-    m_ret   inst=pop
-
-strcpy_unalign3_copy3bytes:
-    strb    r2, [r0], #1
-    strb    r3, [r0], #1
-    strb    r4, [r0]
-    m_ret   inst=pop
-
-    .p2align 2
-    // Can read 2 bytes before possibly crossing a page.
-strcpy_unalign2:
-    ldrb    r2, [r1]
-    cbz     r2, strcpy_unalign_copy1byte
-    ldrb    r3, [r1, #1]
-    cbz     r3, strcpy_unalign_copy2bytes
-
-    ldr     r2, [r1], #4
-    ldr     r3, [r1], #4
-    pld     [r1, #64]
-
-    tst     r2, #0xff0000
-    beq     strcpy_unalign_copy3bytes
-    lsrs    ip, r2, #24
-    beq     strcpy_unalign_copy4bytes
-
-    sub     ip, r3, #0x01010101
-    bic     ip, ip, r3
-    ands    ip, ip, #0x80808080
-    bne     strcpy_zero_in_second_register
-
-    stmia   r0!, {r2, r3}
-    b       strcpy_unalign2
-
-    .p2align 2
-    // Can read 1 byte before possibly crossing a page.
-strcpy_unalign1:
-    ldrb    r2, [r1]
-    cbz     r2, strcpy_unalign_copy1byte
-
-    ldr     r2, [r1], #4
-    ldr     r3, [r1], #4
-
-    pld     [r1, #64]
-
-    sub     ip, r2, #0x01010101
-    bic     ip, ip, r2
-    ands    ip, ip, #0x80808080
-    bne     strcpy_zero_in_first_register
-
-    sub     ip, r3, #0x01010101
-    bic     ip, ip, r3
-    ands    ip, ip, #0x80808080
-    bne     strcpy_zero_in_second_register
-
-    stmia   r0!, {r2, r3}
-    b       strcpy_unalign1
-
-strcpy_unalign_copy1byte:
-    strb    r2, [r0]
-    m_ret   inst=pop
-
-strcpy_unalign_copy2bytes:
-    strb    r2, [r0], #1
-    strb    r3, [r0]
-    m_ret   inst=pop
-
-strcpy_unalign_copy3bytes:
-    strh    r2, [r0], #2
-    lsr     r2, #16
-    strb    r2, [r0]
-    m_ret   inst=pop
-
-strcpy_unalign_copy4bytes:
-    stmia   r0, {r2}
-    m_ret   inst=pop
-END(strcpy)
+#define STRCPY
+#include "string_copy.S"
diff --git a/libc/arch-arm/cortex-a9/bionic/string_copy.S b/libc/arch-arm/cortex-a9/bionic/string_copy.S
new file mode 100644
index 0000000..caf5a11
--- /dev/null
+++ b/libc/arch-arm/cortex-a9/bionic/string_copy.S
@@ -0,0 +1,535 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 2013 ARM Ltd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ *    products derived from this software without specific prior written
+ *    permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if !defined(STPCPY) && !defined(STRCPY)
+#error "Either STPCPY or STRCPY must be defined."
+#endif
+
+#include <private/bionic_asm.h>
+
+    .syntax unified
+
+    .thumb
+    .thumb_func
+
+#if defined(STPCPY)
+    .macro m_push
+    push    {r4, r5, lr}
+    .cfi_def_cfa_offset 12
+    .cfi_rel_offset r4, 0
+    .cfi_rel_offset r5, 4
+    .cfi_rel_offset lr, 8
+    .endm // m_push
+#else
+    .macro m_push
+    push    {r0, r4, r5, lr}
+    .cfi_def_cfa_offset 16
+    .cfi_rel_offset r0, 0
+    .cfi_rel_offset r4, 4
+    .cfi_rel_offset r5, 8
+    .cfi_rel_offset lr, 12
+    .endm // m_push
+#endif
+
+#if defined(STPCPY)
+    .macro m_ret inst
+    \inst   {r4, r5, pc}
+    .endm // m_ret
+#else
+    .macro m_ret inst
+    \inst   {r0, r4, r5, pc}
+    .endm // m_ret
+#endif
+
+    .macro m_copy_byte reg, cmd, label
+    ldrb    \reg, [r1], #1
+    strb    \reg, [r0], #1
+    \cmd    \reg, \label
+    .endm // m_copy_byte
+
+#if defined(STPCPY)
+ENTRY(stpcpy)
+#else
+ENTRY(strcpy)
+#endif
+    // Unroll the first 8 bytes that will be copied.
+    m_push
+    m_copy_byte reg=r2, cmd=cbz, label=.Lstringcopy_finish
+    m_copy_byte reg=r3, cmd=cbz, label=.Lstringcopy_finish
+    m_copy_byte reg=r4, cmd=cbz, label=.Lstringcopy_finish
+    m_copy_byte reg=r5, cmd=cbz, label=.Lstringcopy_finish
+    m_copy_byte reg=r2, cmd=cbz, label=.Lstringcopy_finish
+    m_copy_byte reg=r3, cmd=cbz, label=.Lstringcopy_finish
+    m_copy_byte reg=r4, cmd=cbz, label=.Lstringcopy_finish
+    m_copy_byte reg=r5, cmd=cbnz, label=.Lstringcopy_continue
+
+.Lstringcopy_finish:
+#if defined(STPCPY)
+    sub     r0, r0, #1
+#endif
+    m_ret   inst=pop
+
+.Lstringcopy_continue:
+    pld     [r1, #0]
+    ands    r3, r0, #7
+    bne     .Lstringcopy_align_dst
+
+.Lstringcopy_check_src_align:
+    // At this point dst is aligned to a double word, check if src
+    // is also aligned to a double word.
+    ands    r3, r1, #7
+    bne     .Lstringcopy_unaligned_copy
+
+    .p2align 2
+.Lstringcopy_mainloop:
+    ldmia   r1!, {r2, r3}
+
+    pld     [r1, #64]
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_first_register
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_second_register
+
+    stmia   r0!, {r2, r3}
+    b       .Lstringcopy_mainloop
+
+.Lstringcopy_zero_in_first_register:
+    lsls    lr, ip, #17
+    itt     ne
+    strbne  r2, [r0]
+    m_ret   inst=popne
+    itt     cs
+#if defined(STPCPY)
+    strhcs  r2, [r0], #1
+#else
+    strhcs  r2, [r0]
+#endif
+    m_ret   inst=popcs
+    lsls    ip, ip, #1
+    itt     eq
+#if defined(STPCPY)
+    streq   r2, [r0], #3
+#else
+    streq   r2, [r0]
+#endif
+    m_ret   inst=popeq
+    strh    r2, [r0], #2
+    lsr     r3, r2, #16
+    strb    r3, [r0]
+    m_ret   inst=pop
+
+.Lstringcopy_zero_in_second_register:
+    lsls    lr, ip, #17
+    ittt    ne
+    stmiane r0!, {r2}
+    strbne  r3, [r0]
+    m_ret   inst=popne
+    ittt    cs
+    strcs   r2, [r0], #4
+#if defined(STPCPY)
+    strhcs  r3, [r0], #1
+#else
+    strhcs  r3, [r0]
+#endif
+    m_ret   inst=popcs
+    lsls    ip, ip, #1
+#if defined(STPCPY)
+    ittt    eq
+#else
+    itt     eq
+#endif
+    stmiaeq r0, {r2, r3}
+#if defined(STPCPY)
+    addeq   r0, r0, #7
+#endif
+    m_ret   inst=popeq
+    stmia   r0!, {r2}
+    strh    r3, [r0], #2
+    lsr     r4, r3, #16
+    strb    r4, [r0]
+    m_ret   inst=pop
+
+.Lstringcopy_align_dst:
+    // Align to a double word (64 bits).
+    rsb     r3, r3, #8
+    lsls    ip, r3, #31
+    beq     .Lstringcopy_align_to_32
+
+    ldrb    r2, [r1], #1
+    strb    r2, [r0], #1
+    cbz     r2, .Lstringcopy_complete
+
+.Lstringcopy_align_to_32:
+    bcc     .Lstringcopy_align_to_64
+
+    ldrb    r4, [r1], #1
+    strb    r4, [r0], #1
+    cmp     r4, #0
+#if defined(STPCPY)
+    itt     eq
+    subeq   r0, r0, #1
+#else
+    it      eq
+#endif
+    m_ret   inst=popeq
+    ldrb    r5, [r1], #1
+    strb    r5, [r0], #1
+    cmp     r5, #0
+#if defined(STPCPY)
+    itt     eq
+    subeq   r0, r0, #1
+#else
+    it      eq
+#endif
+    m_ret   inst=popeq
+
+.Lstringcopy_align_to_64:
+    tst     r3, #4
+    beq     .Lstringcopy_check_src_align
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_first_register
+    stmia   r0!, {r2}
+    b       .Lstringcopy_check_src_align
+
+.Lstringcopy_complete:
+#if defined(STPCPY)
+    sub     r0, r0, #1
+#endif
+    m_ret   inst=pop
+
+.Lstringcopy_unaligned_copy:
+    // Dst is aligned to a double word, while src is at an unknown alignment.
+    // There are 7 different versions of the unaligned copy code
+    // to prevent overreading the src. The mainloop of every single version
+    // will store 64 bits per loop. The difference is how much of src can
+    // be read without potentially crossing a page boundary.
+    tbb     [pc, r3]
+.Lstringcopy_unaligned_branchtable:
+    .byte 0
+    .byte ((.Lstringcopy_unalign7 - .Lstringcopy_unaligned_branchtable)/2)
+    .byte ((.Lstringcopy_unalign6 - .Lstringcopy_unaligned_branchtable)/2)
+    .byte ((.Lstringcopy_unalign5 - .Lstringcopy_unaligned_branchtable)/2)
+    .byte ((.Lstringcopy_unalign4 - .Lstringcopy_unaligned_branchtable)/2)
+    .byte ((.Lstringcopy_unalign3 - .Lstringcopy_unaligned_branchtable)/2)
+    .byte ((.Lstringcopy_unalign2 - .Lstringcopy_unaligned_branchtable)/2)
+    .byte ((.Lstringcopy_unalign1 - .Lstringcopy_unaligned_branchtable)/2)
+
+    .p2align 2
+    // Can read 7 bytes before possibly crossing a page.
+.Lstringcopy_unalign7:
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_first_register
+
+    ldrb    r3, [r1]
+    cbz     r3, .Lstringcopy_unalign7_copy5bytes
+    ldrb    r4, [r1, #1]
+    cbz     r4, .Lstringcopy_unalign7_copy6bytes
+    ldrb    r5, [r1, #2]
+    cbz     r5, .Lstringcopy_unalign7_copy7bytes
+
+    ldr     r3, [r1], #4
+    pld     [r1, #64]
+
+    lsrs    ip, r3, #24
+    stmia   r0!, {r2, r3}
+#if defined(STPCPY)
+    beq     .Lstringcopy_finish
+#else
+    beq     .Lstringcopy_unalign_return
+#endif
+    b       .Lstringcopy_unalign7
+
+.Lstringcopy_unalign7_copy5bytes:
+    stmia   r0!, {r2}
+    strb    r3, [r0]
+.Lstringcopy_unalign_return:
+    m_ret   inst=pop
+
+.Lstringcopy_unalign7_copy6bytes:
+    stmia   r0!, {r2}
+    strb    r3, [r0], #1
+    strb    r4, [r0]
+    m_ret   inst=pop
+
+.Lstringcopy_unalign7_copy7bytes:
+    stmia   r0!, {r2}
+    strb    r3, [r0], #1
+    strb    r4, [r0], #1
+    strb    r5, [r0]
+    m_ret   inst=pop
+
+    .p2align 2
+    // Can read 6 bytes before possibly crossing a page.
+.Lstringcopy_unalign6:
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_first_register
+
+    ldrb    r4, [r1]
+    cbz     r4, .Lstringcopy_unalign_copy5bytes
+    ldrb    r5, [r1, #1]
+    cbz     r5, .Lstringcopy_unalign_copy6bytes
+
+    ldr     r3, [r1], #4
+    pld     [r1, #64]
+
+    tst     r3, #0xff0000
+    beq     .Lstringcopy_unalign6_copy7bytes
+    lsrs    ip, r3, #24
+    stmia   r0!, {r2, r3}
+#if defined(STPCPY)
+    beq     .Lstringcopy_finish
+#else
+    beq     .Lstringcopy_unalign_return
+#endif
+    b       .Lstringcopy_unalign6
+
+.Lstringcopy_unalign6_copy7bytes:
+    stmia   r0!, {r2}
+    strh    r3, [r0], #2
+    lsr     r3, #16
+    strb    r3, [r0]
+    m_ret   inst=pop
+
+    .p2align 2
+    // Can read 5 bytes before possibly crossing a page.
+.Lstringcopy_unalign5:
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_first_register
+
+    ldrb    r4, [r1]
+    cbz     r4, .Lstringcopy_unalign_copy5bytes
+
+    ldr     r3, [r1], #4
+
+    pld     [r1, #64]
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_second_register
+
+    stmia   r0!, {r2, r3}
+    b       .Lstringcopy_unalign5
+
+.Lstringcopy_unalign_copy5bytes:
+    stmia   r0!, {r2}
+    strb    r4, [r0]
+    m_ret   inst=pop
+
+.Lstringcopy_unalign_copy6bytes:
+    stmia   r0!, {r2}
+    strb    r4, [r0], #1
+    strb    r5, [r0]
+    m_ret   inst=pop
+
+    .p2align 2
+    // Can read 4 bytes before possibly crossing a page.
+.Lstringcopy_unalign4:
+    ldmia   r1!, {r2}
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_first_register
+
+    ldmia   r1!, {r3}
+    pld     [r1, #64]
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_second_register
+
+    stmia   r0!, {r2, r3}
+    b       .Lstringcopy_unalign4
+
+    .p2align 2
+    // Can read 3 bytes before possibly crossing a page.
+.Lstringcopy_unalign3:
+    ldrb    r2, [r1]
+    cbz     r2, .Lstringcopy_unalign3_copy1byte
+    ldrb    r3, [r1, #1]
+    cbz     r3, .Lstringcopy_unalign3_copy2bytes
+    ldrb    r4, [r1, #2]
+    cbz     r4, .Lstringcopy_unalign3_copy3bytes
+
+    ldr     r2, [r1], #4
+    ldr     r3, [r1], #4
+
+    pld     [r1, #64]
+
+    lsrs    lr, r2, #24
+    beq     .Lstringcopy_unalign_copy4bytes
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_second_register
+
+    stmia   r0!, {r2, r3}
+    b       .Lstringcopy_unalign3
+
+.Lstringcopy_unalign3_copy1byte:
+    strb    r2, [r0]
+    m_ret   inst=pop
+
+.Lstringcopy_unalign3_copy2bytes:
+    strb    r2, [r0], #1
+    strb    r3, [r0]
+    m_ret   inst=pop
+
+.Lstringcopy_unalign3_copy3bytes:
+    strb    r2, [r0], #1
+    strb    r3, [r0], #1
+    strb    r4, [r0]
+    m_ret   inst=pop
+
+    .p2align 2
+    // Can read 2 bytes before possibly crossing a page.
+.Lstringcopy_unalign2:
+    ldrb    r2, [r1]
+    cbz     r2, .Lstringcopy_unalign_copy1byte
+    ldrb    r3, [r1, #1]
+    cbz     r3, .Lstringcopy_unalign_copy2bytes
+
+    ldr     r2, [r1], #4
+    ldr     r3, [r1], #4
+    pld     [r1, #64]
+
+    tst     r2, #0xff0000
+    beq     .Lstringcopy_unalign_copy3bytes
+    lsrs    ip, r2, #24
+    beq     .Lstringcopy_unalign_copy4bytes
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_second_register
+
+    stmia   r0!, {r2, r3}
+    b       .Lstringcopy_unalign2
+
+    .p2align 2
+    // Can read 1 byte before possibly crossing a page.
+.Lstringcopy_unalign1:
+    ldrb    r2, [r1]
+    cbz     r2, .Lstringcopy_unalign_copy1byte
+
+    ldr     r2, [r1], #4
+    ldr     r3, [r1], #4
+
+    pld     [r1, #64]
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_first_register
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_second_register
+
+    stmia   r0!, {r2, r3}
+    b       .Lstringcopy_unalign1
+
+.Lstringcopy_unalign_copy1byte:
+    strb    r2, [r0]
+    m_ret   inst=pop
+
+.Lstringcopy_unalign_copy2bytes:
+    strb    r2, [r0], #1
+    strb    r3, [r0]
+    m_ret   inst=pop
+
+.Lstringcopy_unalign_copy3bytes:
+    strh    r2, [r0], #2
+    lsr     r2, #16
+    strb    r2, [r0]
+    m_ret   inst=pop
+
+.Lstringcopy_unalign_copy4bytes:
+    stmia   r0, {r2}
+#if defined(STPCPY)
+    add     r0, r0, #3
+#endif
+    m_ret   inst=pop
+#if defined(STPCPY)
+END(stpcpy)
+#else
+END(strcpy)
+#endif
diff --git a/libc/arch-arm/cortex-a9/cortex-a9.mk b/libc/arch-arm/cortex-a9/cortex-a9.mk
index 9b99387..c82db3b 100644
--- a/libc/arch-arm/cortex-a9/cortex-a9.mk
+++ b/libc/arch-arm/cortex-a9/cortex-a9.mk
@@ -1,10 +1,11 @@
 libc_bionic_src_files_arm += \
     arch-arm/cortex-a9/bionic/memcpy.S \
     arch-arm/cortex-a9/bionic/memset.S \
+    arch-arm/cortex-a9/bionic/stpcpy.S \
     arch-arm/cortex-a9/bionic/strcat.S \
+    arch-arm/cortex-a9/bionic/__strcat_chk.S \
     arch-arm/cortex-a9/bionic/strcmp.S \
     arch-arm/cortex-a9/bionic/strcpy.S \
-    arch-arm/cortex-a9/bionic/strlen.S \
-    arch-arm/cortex-a9/bionic/__strcat_chk.S \
     arch-arm/cortex-a9/bionic/__strcpy_chk.S \
+    arch-arm/cortex-a9/bionic/strlen.S \
     bionic/memmove.c \
diff --git a/libc/arch-arm/denver/bionic/__strcat_chk.S b/libc/arch-arm/denver/bionic/__strcat_chk.S
index 36da2d9..a2e9c22 100644
--- a/libc/arch-arm/denver/bionic/__strcat_chk.S
+++ b/libc/arch-arm/denver/bionic/__strcat_chk.S
@@ -40,12 +40,10 @@
 ENTRY(__strcat_chk)
     pld     [r0, #0]
     push    {r0, lr}
-    .save   {r0, lr}
     .cfi_def_cfa_offset 8
     .cfi_rel_offset r0, 0
     .cfi_rel_offset lr, 4
     push    {r4, r5}
-    .save   {r4, r5}
     .cfi_adjust_cfa_offset 8
     .cfi_rel_offset r4, 0
     .cfi_rel_offset r5, 4
@@ -195,9 +193,6 @@
 #include "memcpy_base.S"
 
 ENTRY_PRIVATE(__strcat_chk_failed)
-    .save   {r0, lr}
-    .save   {r4, r5}
-
     .cfi_def_cfa_offset 8
     .cfi_rel_offset r0, 0
     .cfi_rel_offset lr, 4
diff --git a/libc/arch-arm/denver/bionic/__strcpy_chk.S b/libc/arch-arm/denver/bionic/__strcpy_chk.S
index c3e3e14..db76686 100644
--- a/libc/arch-arm/denver/bionic/__strcpy_chk.S
+++ b/libc/arch-arm/denver/bionic/__strcpy_chk.S
@@ -39,7 +39,6 @@
 ENTRY(__strcpy_chk)
     pld     [r0, #0]
     push    {r0, lr}
-    .save   {r0, lr}
     .cfi_def_cfa_offset 8
     .cfi_rel_offset r0, 0
     .cfi_rel_offset lr, 4
@@ -161,7 +160,6 @@
 #include "memcpy_base.S"
 
 ENTRY_PRIVATE(__strcpy_chk_failed)
-    .save   {r0, lr}
     .cfi_def_cfa_offset 8
     .cfi_rel_offset r0, 0
     .cfi_rel_offset lr, 4
diff --git a/libc/arch-arm/denver/bionic/memcpy.S b/libc/arch-arm/denver/bionic/memcpy.S
index da4f3dd..410b663 100644
--- a/libc/arch-arm/denver/bionic/memcpy.S
+++ b/libc/arch-arm/denver/bionic/memcpy.S
@@ -72,7 +72,6 @@
 ENTRY(memcpy)
         pld     [r1, #64]
         push    {r0, lr}
-        .save   {r0, lr}
         .cfi_def_cfa_offset 8
         .cfi_rel_offset r0, 0
         .cfi_rel_offset lr, 4
@@ -85,7 +84,6 @@
 ENTRY_PRIVATE(__memcpy_chk_fail)
         // Preserve lr for backtrace.
         push    {lr}
-        .save   {lr}
         .cfi_def_cfa_offset 4
         .cfi_rel_offset lr, 0
 
diff --git a/libc/arch-arm/denver/denver.mk b/libc/arch-arm/denver/denver.mk
index 6989187..0bc52a2 100644
--- a/libc/arch-arm/denver/denver.mk
+++ b/libc/arch-arm/denver/denver.mk
@@ -7,7 +7,8 @@
 
 # Use cortex-a15 versions of strcat/strcpy/strlen.
 libc_bionic_src_files_arm += \
+    arch-arm/cortex-a15/bionic/stpcpy.S \
     arch-arm/cortex-a15/bionic/strcat.S \
+    arch-arm/cortex-a15/bionic/strcmp.S \
     arch-arm/cortex-a15/bionic/strcpy.S \
     arch-arm/cortex-a15/bionic/strlen.S \
-    arch-arm/cortex-a15/bionic/strcmp.S \
diff --git a/libc/arch-arm/generic/generic.mk b/libc/arch-arm/generic/generic.mk
index 2456e6e..95be867 100644
--- a/libc/arch-arm/generic/generic.mk
+++ b/libc/arch-arm/generic/generic.mk
@@ -7,4 +7,5 @@
     bionic/memmove.c \
     bionic/__strcat_chk.cpp \
     bionic/__strcpy_chk.cpp \
+    upstream-openbsd/lib/libc/string/stpcpy.c \
     upstream-openbsd/lib/libc/string/strcat.c \
diff --git a/libc/arch-arm/krait/bionic/__strcat_chk.S b/libc/arch-arm/krait/bionic/__strcat_chk.S
index 34becdb..246f159 100644
--- a/libc/arch-arm/krait/bionic/__strcat_chk.S
+++ b/libc/arch-arm/krait/bionic/__strcat_chk.S
@@ -40,12 +40,10 @@
 ENTRY(__strcat_chk)
     pld     [r0, #0]
     push    {r0, lr}
-    .save   {r0, lr}
     .cfi_def_cfa_offset 8
     .cfi_rel_offset r0, 0
     .cfi_rel_offset lr, 4
     push    {r4, r5}
-    .save   {r4, r5}
     .cfi_adjust_cfa_offset 8
     .cfi_rel_offset r4, 0
     .cfi_rel_offset r5, 4
@@ -194,8 +192,6 @@
 #include "memcpy_base.S"
 
 ENTRY_PRIVATE(__strcat_chk_failed)
-    .save   {r0, lr}
-    .save   {r4, r5}
     .cfi_def_cfa_offset 8
     .cfi_rel_offset r0, 0
     .cfi_rel_offset lr, 4
diff --git a/libc/arch-arm/krait/bionic/__strcpy_chk.S b/libc/arch-arm/krait/bionic/__strcpy_chk.S
index c3e3e14..db76686 100644
--- a/libc/arch-arm/krait/bionic/__strcpy_chk.S
+++ b/libc/arch-arm/krait/bionic/__strcpy_chk.S
@@ -39,7 +39,6 @@
 ENTRY(__strcpy_chk)
     pld     [r0, #0]
     push    {r0, lr}
-    .save   {r0, lr}
     .cfi_def_cfa_offset 8
     .cfi_rel_offset r0, 0
     .cfi_rel_offset lr, 4
@@ -161,7 +160,6 @@
 #include "memcpy_base.S"
 
 ENTRY_PRIVATE(__strcpy_chk_failed)
-    .save   {r0, lr}
     .cfi_def_cfa_offset 8
     .cfi_rel_offset r0, 0
     .cfi_rel_offset lr, 4
diff --git a/libc/arch-arm/krait/bionic/memcpy.S b/libc/arch-arm/krait/bionic/memcpy.S
index 0b7b276..9ff46a8 100644
--- a/libc/arch-arm/krait/bionic/memcpy.S
+++ b/libc/arch-arm/krait/bionic/memcpy.S
@@ -53,7 +53,6 @@
 ENTRY(memcpy)
         pld     [r1, #64]
         stmfd   sp!, {r0, lr}
-        .save   {r0, lr}
         .cfi_def_cfa_offset 8
         .cfi_rel_offset r0, 0
         .cfi_rel_offset lr, 4
@@ -66,7 +65,6 @@
 ENTRY_PRIVATE(__memcpy_chk_fail)
         // Preserve lr for backtrace.
         push    {lr}
-        .save   {lr}
         .cfi_def_cfa_offset 4
         .cfi_rel_offset lr, 0
 
diff --git a/libc/arch-arm/krait/bionic/memcpy_base.S b/libc/arch-arm/krait/bionic/memcpy_base.S
index 99fc255..035dcf1 100644
--- a/libc/arch-arm/krait/bionic/memcpy_base.S
+++ b/libc/arch-arm/krait/bionic/memcpy_base.S
@@ -36,7 +36,6 @@
 // Assumes neon instructions and a cache line size of 32 bytes.
 
 ENTRY_PRIVATE(MEMCPY_BASE)
-        .save {r0, lr}
         .cfi_def_cfa_offset 8
         .cfi_rel_offset r0, 0
         .cfi_rel_offset lr, 4
diff --git a/libc/arch-arm/krait/bionic/memset.S b/libc/arch-arm/krait/bionic/memset.S
index 5d1943b..e9f6431 100644
--- a/libc/arch-arm/krait/bionic/memset.S
+++ b/libc/arch-arm/krait/bionic/memset.S
@@ -43,7 +43,6 @@
         bls         .L_done
 
         // Preserve lr for backtrace.
-        .save       {lr}
         push        {lr}
         .cfi_def_cfa_offset 4
         .cfi_rel_offset lr, 0
@@ -69,7 +68,6 @@
 
 /* memset() returns its first argument.  */
 ENTRY(memset)
-        .save       {r0}
         stmfd       sp!, {r0}
         .cfi_def_cfa_offset 4
         .cfi_rel_offset r0, 0
diff --git a/libc/arch-arm/krait/bionic/strcmp.S b/libc/arch-arm/krait/bionic/strcmp.S
index eacb82a..9121c01 100644
--- a/libc/arch-arm/krait/bionic/strcmp.S
+++ b/libc/arch-arm/krait/bionic/strcmp.S
@@ -168,7 +168,6 @@
         bne     .L_do_align
 
         /* Fast path.  */
-        .save   {r4-r7}
         init
 
 .L_doubleword_aligned:
diff --git a/libc/arch-arm/krait/krait.mk b/libc/arch-arm/krait/krait.mk
index 631ab68..1bb7b0a 100644
--- a/libc/arch-arm/krait/krait.mk
+++ b/libc/arch-arm/krait/krait.mk
@@ -7,6 +7,7 @@
 
 # Use cortex-a15 versions of strcat/strcpy/strlen and standard memmove
 libc_bionic_src_files_arm += \
+    arch-arm/cortex-a15/bionic/stpcpy.S \
     arch-arm/cortex-a15/bionic/strcat.S \
     arch-arm/cortex-a15/bionic/strcpy.S \
     arch-arm/cortex-a15/bionic/strlen.S \
diff --git a/libc/include/arpa/inet.h b/libc/include/arpa/inet.h
index 067be1f..86265bf 100644
--- a/libc/include/arpa/inet.h
+++ b/libc/include/arpa/inet.h
@@ -25,6 +25,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  */
+
 #ifndef _ARPA_INET_H_
 #define _ARPA_INET_H_
 
@@ -34,8 +35,6 @@
 
 __BEGIN_DECLS
 
-typedef uint32_t in_addr_t;
-
 in_addr_t inet_addr(const char*);
 int inet_aton(const char*, struct in_addr*);
 in_addr_t inet_lnaof(struct in_addr);
diff --git a/libc/include/netinet/in.h b/libc/include/netinet/in.h
index bf3b498..44c7fc1 100644
--- a/libc/include/netinet/in.h
+++ b/libc/include/netinet/in.h
@@ -25,6 +25,7 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  */
+
 #ifndef _NETINET_IN_H_
 #define _NETINET_IN_H_
 
@@ -43,6 +44,9 @@
 
 #define INET_ADDRSTRLEN 16
 
+typedef uint16_t in_port_t;
+typedef uint32_t in_addr_t;
+
 extern int bindresvport (int sd, struct sockaddr_in *sin);
 
 static const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT;
diff --git a/libc/include/signal.h b/libc/include/signal.h
index f1849c5..e23e65b 100644
--- a/libc/include/signal.h
+++ b/libc/include/signal.h
@@ -48,6 +48,9 @@
 #  include <linux/signal.h>
 #endif
 
+#include <sys/ucontext.h>
+#define __BIONIC_HAVE_UCONTEXT_T
+
 __BEGIN_DECLS
 
 typedef int sig_atomic_t;
diff --git a/libc/include/sys/ucontext.h b/libc/include/sys/ucontext.h
index f62380d..dd2a0bb 100644
--- a/libc/include/sys/ucontext.h
+++ b/libc/include/sys/ucontext.h
@@ -68,11 +68,9 @@
   struct ucontext* uc_link;
   stack_t uc_stack;
   mcontext_t uc_mcontext;
+  sigset_t uc_sigmask;
   // Android has a wrong (smaller) sigset_t on ARM.
-  union {
-    sigset_t bionic;
-    uint32_t kernel[2];
-  } uc_sigmask;
+  uint32_t __padding_rt_sigset;
   // The kernel adds extra padding after uc_sigmask to match glibc sigset_t on ARM.
   char __padding[120];
   unsigned long uc_regspace[128] __attribute__((__aligned__(8)));
@@ -80,6 +78,10 @@
 
 #elif defined(__aarch64__)
 
+#define NGREG 34 /* x0..x30 + sp + pc + pstate */
+typedef unsigned long greg_t;
+typedef greg_t gregset_t[NGREG];
+
 #include <asm/sigcontext.h>
 typedef struct sigcontext mcontext_t;
 
@@ -152,11 +154,9 @@
   struct ucontext* uc_link;
   stack_t uc_stack;
   mcontext_t uc_mcontext;
+  sigset_t uc_sigmask;
   // Android has a wrong (smaller) sigset_t on x86.
-  union {
-    sigset_t bionic;
-    uint32_t kernel[2];
-  } uc_sigmask;
+  uint32_t __padding_rt_sigset;
   struct _libc_fpstate __fpregs_mem;
 } ucontext_t;
 
diff --git a/libc/include/sys/user.h b/libc/include/sys/user.h
index 66b371d..0e36825 100644
--- a/libc/include/sys/user.h
+++ b/libc/include/sys/user.h
@@ -91,7 +91,7 @@
   unsigned long start_stack;
   long int signal;
   int reserved;
-  unsigned long u_ar0;
+  struct user_regs_struct* u_ar0;
   struct user_fpregs_struct* u_fpstate;
   unsigned long magic;
   char u_comm[32];
@@ -155,7 +155,7 @@
   long int signal;
   int reserved;
   int pad1;
-  unsigned long u_ar0;
+  struct user_regs_struct* u_ar0;
   struct user_fpregs_struct* u_fpstate;
   unsigned long magic;
   char u_comm[32];
@@ -175,7 +175,7 @@
   unsigned long start_data;
   unsigned long start_stack;
   long int signal;
-  unsigned long u_ar0;
+  void* u_ar0;
   unsigned long magic;
   char u_comm[32];
 };