blob: 1d3c0e7990e5a1da216e9ecf999d0453d93fd206 [file] [log] [blame]
Michal Simek322ae8e2009-03-27 14:25:21 +01001/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
4 * Copyright (C) 2007 John Williams
5 *
6 * Reasonably optimised generic C-code for memcpy on Microblaze
7 * This is generic C code to do efficient, alignment-aware memmove.
8 *
9 * It is based on demo code originally Copyright 2001 by Intel Corp, taken from
10 * http://www.embedded.com/showArticle.jhtml?articleID=19205567
11 *
André Goddard Rosaaf901ca2009-11-14 13:09:05 -020012 * Attempts were made, unsuccessfully, to contact the original
Michal Simek322ae8e2009-03-27 14:25:21 +010013 * author of this code (Michael Morrow, Intel). Below is the original
14 * copyright notice.
15 *
16 * This software has been developed by Intel Corporation.
17 * Intel specifically disclaims all warranties, express or
18 * implied, and all liability, including consequential and
19 * other indirect damages, for the use of this program, including
20 * liability for infringement of any proprietary rights,
21 * and including the warranties of merchantability and fitness
22 * for a particular purpose. Intel does not assume any
23 * responsibility for and errors which may appear in this program
24 * not any responsibility to update it.
25 */
26
27#include <linux/types.h>
28#include <linux/stddef.h>
29#include <linux/compiler.h>
30#include <linux/module.h>
31#include <linux/string.h>
32
33#ifdef __HAVE_ARCH_MEMMOVE
Michal Simek93e2e852010-10-09 13:58:24 +100034#ifndef CONFIG_OPT_LIB_FUNCTION
Michal Simek322ae8e2009-03-27 14:25:21 +010035void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
36{
37 const char *src = v_src;
38 char *dst = v_dst;
39
Michal Simek322ae8e2009-03-27 14:25:21 +010040 if (!c)
41 return v_dst;
42
43 /* Use memcpy when source is higher than dest */
44 if (v_dst <= v_src)
45 return memcpy(v_dst, v_src, c);
46
Michal Simek322ae8e2009-03-27 14:25:21 +010047 /* copy backwards, from end to beginning */
48 src += c;
49 dst += c;
50
51 /* Simple, byte oriented memmove. */
52 while (c--)
53 *--dst = *--src;
54
55 return v_dst;
Michal Simek93e2e852010-10-09 13:58:24 +100056}
57#else /* CONFIG_OPT_LIB_FUNCTION */
58void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
59{
60 const char *src = v_src;
61 char *dst = v_dst;
62 const uint32_t *i_src;
63 uint32_t *i_dst;
64
65 if (!c)
66 return v_dst;
67
68 /* Use memcpy when source is higher than dest */
69 if (v_dst <= v_src)
70 return memcpy(v_dst, v_src, c);
71
Michal Simek322ae8e2009-03-27 14:25:21 +010072 /* The following code tries to optimize the copy by using unsigned
73 * alignment. This will work fine if both source and destination are
74 * aligned on the same boundary. However, if they are aligned on
75 * different boundaries shifts will be necessary. This might result in
76 * bad performance on MicroBlaze systems without a barrel shifter.
77 */
78 /* FIXME this part needs more test */
79 /* Do a descending copy - this is a bit trickier! */
80 dst += c;
81 src += c;
82
83 if (c >= 4) {
84 unsigned value, buf_hold;
85
86 /* Align the destination to a word boundry. */
87 /* This is done in an endian independant manner. */
88
89 switch ((unsigned long)dst & 3) {
90 case 3:
91 *--dst = *--src;
92 --c;
93 case 2:
94 *--dst = *--src;
95 --c;
96 case 1:
97 *--dst = *--src;
98 --c;
99 }
100
101 i_dst = (void *)dst;
102 /* Choose a copy scheme based on the source */
103 /* alignment relative to dstination. */
104 switch ((unsigned long)src & 3) {
105 case 0x0: /* Both byte offsets are aligned */
106
107 i_src = (const void *)src;
108
109 for (; c >= 4; c -= 4)
110 *--i_dst = *--i_src;
111
112 src = (const void *)i_src;
113 break;
114 case 0x1: /* Unaligned - Off by 1 */
115 /* Word align the source */
116 i_src = (const void *) (((unsigned)src + 4) & ~3);
117
118 /* Load the holding buffer */
119 buf_hold = *--i_src >> 24;
120
121 for (; c >= 4; c -= 4) {
122 value = *--i_src;
123 *--i_dst = buf_hold << 8 | value;
124 buf_hold = value >> 24;
125 }
126
127 /* Realign the source */
128 src = (const void *)i_src;
129 src += 1;
130 break;
131 case 0x2: /* Unaligned - Off by 2 */
132 /* Word align the source */
133 i_src = (const void *) (((unsigned)src + 4) & ~3);
134
135 /* Load the holding buffer */
136 buf_hold = *--i_src >> 16;
137
138 for (; c >= 4; c -= 4) {
139 value = *--i_src;
140 *--i_dst = buf_hold << 16 | value;
141 buf_hold = value >> 16;
142 }
143
144 /* Realign the source */
145 src = (const void *)i_src;
146 src += 2;
147 break;
148 case 0x3: /* Unaligned - Off by 3 */
149 /* Word align the source */
150 i_src = (const void *) (((unsigned)src + 4) & ~3);
151
152 /* Load the holding buffer */
153 buf_hold = *--i_src >> 8;
154
155 for (; c >= 4; c -= 4) {
156 value = *--i_src;
157 *--i_dst = buf_hold << 24 | value;
158 buf_hold = value >> 8;
159 }
160
161 /* Realign the source */
162 src = (const void *)i_src;
163 src += 3;
164 break;
165 }
166 dst = (void *)i_dst;
167 }
168
169 /* simple fast copy, ... unless a cache boundry is crossed */
170 /* Finish off any remaining bytes */
171 switch (c) {
172 case 4:
173 *--dst = *--src;
174 case 3:
175 *--dst = *--src;
176 case 2:
177 *--dst = *--src;
178 case 1:
179 *--dst = *--src;
180 }
181 return v_dst;
Michal Simek322ae8e2009-03-27 14:25:21 +0100182}
Michal Simek93e2e852010-10-09 13:58:24 +1000183#endif /* CONFIG_OPT_LIB_FUNCTION */
Michal Simek322ae8e2009-03-27 14:25:21 +0100184EXPORT_SYMBOL(memmove);
185#endif /* __HAVE_ARCH_MEMMOVE */