Chris Zankel | 249ac17 | 2005-06-23 22:01:20 -0700 | [diff] [blame] | 1 | /* |
| 2 | * arch/xtensa/lib/strncpy_user.S |
| 3 | * |
| 4 | * This file is subject to the terms and conditions of the GNU General |
| 5 | * Public License. See the file "COPYING" in the main directory of |
| 6 | * this archive for more details. |
| 7 | * |
| 8 | * Returns: -EFAULT if exception before terminator, N if the entire |
| 9 | * buffer filled, else strlen. |
| 10 | * |
| 11 | * Copyright (C) 2002 Tensilica Inc. |
| 12 | */ |
| 13 | |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 14 | #include <asm/variant/core.h> |
Chris Zankel | 249ac17 | 2005-06-23 22:01:20 -0700 | [diff] [blame] | 15 | #include <linux/errno.h> |
| 16 | |
| 17 | /* Load or store instructions that may cause exceptions use the EX macro. */ |
| 18 | |
| 19 | #define EX(insn,reg1,reg2,offset,handler) \ |
| 20 | 9: insn reg1, reg2, offset; \ |
| 21 | .section __ex_table, "a"; \ |
| 22 | .word 9b, handler; \ |
| 23 | .previous |
| 24 | |
| 25 | /* |
| 26 | * char *__strncpy_user(char *dst, const char *src, size_t len) |
| 27 | */ |
Chris Zankel | a0bb46b | 2007-05-31 17:44:31 -0700 | [diff] [blame] | 28 | |
| 29 | #ifdef __XTENSA_EB__ |
| 30 | # define MASK0 0xff000000 |
| 31 | # define MASK1 0x00ff0000 |
| 32 | # define MASK2 0x0000ff00 |
| 33 | # define MASK3 0x000000ff |
| 34 | #else |
| 35 | # define MASK0 0x000000ff |
| 36 | # define MASK1 0x0000ff00 |
| 37 | # define MASK2 0x00ff0000 |
| 38 | # define MASK3 0xff000000 |
| 39 | #endif |
Chris Zankel | 249ac17 | 2005-06-23 22:01:20 -0700 | [diff] [blame] | 40 | |
| 41 | # Register use |
| 42 | # a0/ return address |
| 43 | # a1/ stack pointer |
| 44 | # a2/ return value |
| 45 | # a3/ src |
| 46 | # a4/ len |
| 47 | # a5/ mask0 |
| 48 | # a6/ mask1 |
| 49 | # a7/ mask2 |
| 50 | # a8/ mask3 |
| 51 | # a9/ tmp |
| 52 | # a10/ tmp |
| 53 | # a11/ dst |
| 54 | # a12/ tmp |
| 55 | |
Chris Zankel | a0bb46b | 2007-05-31 17:44:31 -0700 | [diff] [blame] | 56 | .text |
Chris Zankel | 249ac17 | 2005-06-23 22:01:20 -0700 | [diff] [blame] | 57 | .align 4 |
| 58 | .global __strncpy_user |
| 59 | .type __strncpy_user,@function |
| 60 | __strncpy_user: |
| 61 | entry sp, 16 # minimal stack frame |
| 62 | # a2/ dst, a3/ src, a4/ len |
| 63 | mov a11, a2 # leave dst in return value register |
| 64 | beqz a4, .Lret # if len is zero |
Chris Zankel | a0bb46b | 2007-05-31 17:44:31 -0700 | [diff] [blame] | 65 | movi a5, MASK0 # mask for byte 0 |
| 66 | movi a6, MASK1 # mask for byte 1 |
| 67 | movi a7, MASK2 # mask for byte 2 |
| 68 | movi a8, MASK3 # mask for byte 3 |
Chris Zankel | 249ac17 | 2005-06-23 22:01:20 -0700 | [diff] [blame] | 69 | bbsi.l a3, 0, .Lsrc1mod2 # if only 8-bit aligned |
| 70 | bbsi.l a3, 1, .Lsrc2mod4 # if only 16-bit aligned |
| 71 | .Lsrcaligned: # return here when src is word-aligned |
| 72 | srli a12, a4, 2 # number of loop iterations with 4B per loop |
| 73 | movi a9, 3 |
| 74 | bnone a11, a9, .Laligned |
| 75 | j .Ldstunaligned |
| 76 | |
| 77 | .Lsrc1mod2: # src address is odd |
| 78 | EX(l8ui, a9, a3, 0, fixup_l) # get byte 0 |
| 79 | addi a3, a3, 1 # advance src pointer |
| 80 | EX(s8i, a9, a11, 0, fixup_s) # store byte 0 |
| 81 | beqz a9, .Lret # if byte 0 is zero |
| 82 | addi a11, a11, 1 # advance dst pointer |
| 83 | addi a4, a4, -1 # decrement len |
| 84 | beqz a4, .Lret # if len is zero |
| 85 | bbci.l a3, 1, .Lsrcaligned # if src is now word-aligned |
| 86 | |
| 87 | .Lsrc2mod4: # src address is 2 mod 4 |
| 88 | EX(l8ui, a9, a3, 0, fixup_l) # get byte 0 |
| 89 | /* 1-cycle interlock */ |
| 90 | EX(s8i, a9, a11, 0, fixup_s) # store byte 0 |
| 91 | beqz a9, .Lret # if byte 0 is zero |
| 92 | addi a11, a11, 1 # advance dst pointer |
| 93 | addi a4, a4, -1 # decrement len |
| 94 | beqz a4, .Lret # if len is zero |
| 95 | EX(l8ui, a9, a3, 1, fixup_l) # get byte 0 |
| 96 | addi a3, a3, 2 # advance src pointer |
| 97 | EX(s8i, a9, a11, 0, fixup_s) # store byte 0 |
| 98 | beqz a9, .Lret # if byte 0 is zero |
| 99 | addi a11, a11, 1 # advance dst pointer |
| 100 | addi a4, a4, -1 # decrement len |
| 101 | bnez a4, .Lsrcaligned # if len is nonzero |
| 102 | .Lret: |
| 103 | sub a2, a11, a2 # compute strlen |
| 104 | retw |
| 105 | |
| 106 | /* |
| 107 | * dst is word-aligned, src is word-aligned |
| 108 | */ |
| 109 | .align 4 # 1 mod 4 alignment for LOOPNEZ |
| 110 | .byte 0 # (0 mod 4 alignment for LBEG) |
| 111 | .Laligned: |
| 112 | #if XCHAL_HAVE_LOOPS |
| 113 | loopnez a12, .Loop1done |
| 114 | #else |
| 115 | beqz a12, .Loop1done |
| 116 | slli a12, a12, 2 |
| 117 | add a12, a12, a11 # a12 = end of last 4B chunck |
| 118 | #endif |
| 119 | .Loop1: |
| 120 | EX(l32i, a9, a3, 0, fixup_l) # get word from src |
| 121 | addi a3, a3, 4 # advance src pointer |
| 122 | bnone a9, a5, .Lz0 # if byte 0 is zero |
| 123 | bnone a9, a6, .Lz1 # if byte 1 is zero |
| 124 | bnone a9, a7, .Lz2 # if byte 2 is zero |
| 125 | EX(s32i, a9, a11, 0, fixup_s) # store word to dst |
| 126 | bnone a9, a8, .Lz3 # if byte 3 is zero |
| 127 | addi a11, a11, 4 # advance dst pointer |
| 128 | #if !XCHAL_HAVE_LOOPS |
| 129 | blt a11, a12, .Loop1 |
| 130 | #endif |
| 131 | |
| 132 | .Loop1done: |
| 133 | bbci.l a4, 1, .L100 |
| 134 | # copy 2 bytes |
| 135 | EX(l16ui, a9, a3, 0, fixup_l) |
| 136 | addi a3, a3, 2 # advance src pointer |
| 137 | #ifdef __XTENSA_EB__ |
| 138 | bnone a9, a7, .Lz0 # if byte 2 is zero |
| 139 | bnone a9, a8, .Lz1 # if byte 3 is zero |
| 140 | #else |
| 141 | bnone a9, a5, .Lz0 # if byte 0 is zero |
| 142 | bnone a9, a6, .Lz1 # if byte 1 is zero |
| 143 | #endif |
| 144 | EX(s16i, a9, a11, 0, fixup_s) |
| 145 | addi a11, a11, 2 # advance dst pointer |
| 146 | .L100: |
| 147 | bbci.l a4, 0, .Lret |
| 148 | EX(l8ui, a9, a3, 0, fixup_l) |
| 149 | /* slot */ |
| 150 | EX(s8i, a9, a11, 0, fixup_s) |
| 151 | beqz a9, .Lret # if byte is zero |
| 152 | addi a11, a11, 1-3 # advance dst ptr 1, but also cancel |
| 153 | # the effect of adding 3 in .Lz3 code |
| 154 | /* fall thru to .Lz3 and "retw" */ |
| 155 | |
| 156 | .Lz3: # byte 3 is zero |
| 157 | addi a11, a11, 3 # advance dst pointer |
| 158 | sub a2, a11, a2 # compute strlen |
| 159 | retw |
| 160 | .Lz0: # byte 0 is zero |
| 161 | #ifdef __XTENSA_EB__ |
| 162 | movi a9, 0 |
| 163 | #endif /* __XTENSA_EB__ */ |
| 164 | EX(s8i, a9, a11, 0, fixup_s) |
| 165 | sub a2, a11, a2 # compute strlen |
| 166 | retw |
| 167 | .Lz1: # byte 1 is zero |
| 168 | #ifdef __XTENSA_EB__ |
| 169 | extui a9, a9, 16, 16 |
| 170 | #endif /* __XTENSA_EB__ */ |
| 171 | EX(s16i, a9, a11, 0, fixup_s) |
| 172 | addi a11, a11, 1 # advance dst pointer |
| 173 | sub a2, a11, a2 # compute strlen |
| 174 | retw |
| 175 | .Lz2: # byte 2 is zero |
| 176 | #ifdef __XTENSA_EB__ |
| 177 | extui a9, a9, 16, 16 |
| 178 | #endif /* __XTENSA_EB__ */ |
| 179 | EX(s16i, a9, a11, 0, fixup_s) |
| 180 | movi a9, 0 |
| 181 | EX(s8i, a9, a11, 2, fixup_s) |
| 182 | addi a11, a11, 2 # advance dst pointer |
| 183 | sub a2, a11, a2 # compute strlen |
| 184 | retw |
| 185 | |
| 186 | .align 4 # 1 mod 4 alignment for LOOPNEZ |
| 187 | .byte 0 # (0 mod 4 alignment for LBEG) |
| 188 | .Ldstunaligned: |
| 189 | /* |
| 190 | * for now just use byte copy loop |
| 191 | */ |
| 192 | #if XCHAL_HAVE_LOOPS |
| 193 | loopnez a4, .Lunalignedend |
| 194 | #else |
| 195 | beqz a4, .Lunalignedend |
| 196 | add a12, a11, a4 # a12 = ending address |
| 197 | #endif /* XCHAL_HAVE_LOOPS */ |
| 198 | .Lnextbyte: |
| 199 | EX(l8ui, a9, a3, 0, fixup_l) |
| 200 | addi a3, a3, 1 |
| 201 | EX(s8i, a9, a11, 0, fixup_s) |
| 202 | beqz a9, .Lunalignedend |
| 203 | addi a11, a11, 1 |
| 204 | #if !XCHAL_HAVE_LOOPS |
| 205 | blt a11, a12, .Lnextbyte |
| 206 | #endif |
| 207 | |
| 208 | .Lunalignedend: |
| 209 | sub a2, a11, a2 # compute strlen |
| 210 | retw |
| 211 | |
| 212 | |
| 213 | .section .fixup, "ax" |
| 214 | .align 4 |
| 215 | |
| 216 | /* For now, just return -EFAULT. Future implementations might |
| 217 | * like to clear remaining kernel space, like the fixup |
| 218 | * implementation in memset(). Thus, we differentiate between |
| 219 | * load/store fixups. */ |
| 220 | |
| 221 | fixup_s: |
| 222 | fixup_l: |
| 223 | movi a2, -EFAULT |
| 224 | retw |
| 225 | |