blob: 9c80cd515b2069cab1a28b2b54a16f70d9a19028 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __M68K_UACCESS_H
2#define __M68K_UACCESS_H
3
4/*
5 * User space memory access functions
6 */
Roman Zippeld94af932006-06-23 02:05:00 -07007#include <linux/compiler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/errno.h>
Roman Zippeld94af932006-06-23 02:05:00 -07009#include <linux/types.h>
Andrew Mortona0f7b67a2007-01-29 13:19:50 -080010#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <asm/segment.h>
12
13#define VERIFY_READ 0
14#define VERIFY_WRITE 1
15
16/* We let the MMU do all checking */
Geert Uytterhoeven70f9cac2008-05-18 20:47:07 +020017static inline int access_ok(int type, const void __user *addr,
18 unsigned long size)
19{
20 return 1;
21}
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Linus Torvalds1da177e2005-04-16 15:20:36 -070023/*
Greg Ungerere08d7032011-10-14 14:43:30 +100024 * Not all varients of the 68k family support the notion of address spaces.
25 * The traditional 680x0 parts do, and they use the sfc/dfc registers and
26 * the "moves" instruction to access user space from kernel space. Other
27 * family members like ColdFire don't support this, and only have a single
28 * address space, and use the usual "move" instruction for user space access.
29 *
30 * Outside of this difference the user space access functions are the same.
31 * So lets keep the code simple and just define in what we need to use.
32 */
33#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
34#define MOVES "moves"
35#else
36#define MOVES "move"
37#endif
38
39/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 * The exception table consists of pairs of addresses: the first is the
41 * address of an instruction that is allowed to fault, and the second is
42 * the address at which the program should continue. No registers are
43 * modified, so it is entirely up to the continuation code to figure out
44 * what to do.
45 *
46 * All the routines below use bits of fixup code that are out of line
47 * with the main instruction path. This means when everything is well,
48 * we don't even have to jump over them. Further, they do not intrude
49 * on our cache or tlb entries.
50 */
51
52struct exception_table_entry
53{
54 unsigned long insn, fixup;
55};
56
Roman Zippeld94af932006-06-23 02:05:00 -070057extern int __put_user_bad(void);
58extern int __get_user_bad(void);
59
60#define __put_user_asm(res, x, ptr, bwl, reg, err) \
61asm volatile ("\n" \
Greg Ungerere08d7032011-10-14 14:43:30 +100062 "1: "MOVES"."#bwl" %2,%1\n" \
Roman Zippeld94af932006-06-23 02:05:00 -070063 "2:\n" \
64 " .section .fixup,\"ax\"\n" \
65 " .even\n" \
66 "10: moveq.l %3,%0\n" \
67 " jra 2b\n" \
68 " .previous\n" \
69 "\n" \
70 " .section __ex_table,\"a\"\n" \
71 " .align 4\n" \
72 " .long 1b,10b\n" \
73 " .long 2b,10b\n" \
74 " .previous" \
75 : "+d" (res), "=m" (*(ptr)) \
76 : #reg (x), "i" (err))
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
78/*
79 * These are the main single-value transfer routines. They automatically
80 * use the right size if we just have the right pointer type.
81 */
82
Roman Zippeld94af932006-06-23 02:05:00 -070083#define __put_user(x, ptr) \
84({ \
85 typeof(*(ptr)) __pu_val = (x); \
86 int __pu_err = 0; \
87 __chk_user_ptr(ptr); \
88 switch (sizeof (*(ptr))) { \
89 case 1: \
90 __put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \
91 break; \
92 case 2: \
93 __put_user_asm(__pu_err, __pu_val, ptr, w, d, -EFAULT); \
94 break; \
95 case 4: \
96 __put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \
97 break; \
98 case 8: \
99 { \
Al Virob9710182006-10-11 17:27:57 +0100100 const void __user *__pu_ptr = (ptr); \
Roman Zippeld94af932006-06-23 02:05:00 -0700101 asm volatile ("\n" \
Greg Ungerere08d7032011-10-14 14:43:30 +1000102 "1: "MOVES".l %2,(%1)+\n" \
103 "2: "MOVES".l %R2,(%1)\n" \
Roman Zippeld94af932006-06-23 02:05:00 -0700104 "3:\n" \
105 " .section .fixup,\"ax\"\n" \
106 " .even\n" \
107 "10: movel %3,%0\n" \
108 " jra 3b\n" \
109 " .previous\n" \
110 "\n" \
111 " .section __ex_table,\"a\"\n" \
112 " .align 4\n" \
113 " .long 1b,10b\n" \
114 " .long 2b,10b\n" \
115 " .long 3b,10b\n" \
116 " .previous" \
117 : "+d" (__pu_err), "+a" (__pu_ptr) \
118 : "r" (__pu_val), "i" (-EFAULT) \
119 : "memory"); \
120 break; \
121 } \
122 default: \
123 __pu_err = __put_user_bad(); \
124 break; \
125 } \
126 __pu_err; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127})
Roman Zippeld94af932006-06-23 02:05:00 -0700128#define put_user(x, ptr) __put_user(x, ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Roman Zippeld94af932006-06-23 02:05:00 -0700131#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \
132 type __gu_val; \
133 asm volatile ("\n" \
Greg Ungerere08d7032011-10-14 14:43:30 +1000134 "1: "MOVES"."#bwl" %2,%1\n" \
Roman Zippeld94af932006-06-23 02:05:00 -0700135 "2:\n" \
136 " .section .fixup,\"ax\"\n" \
137 " .even\n" \
138 "10: move.l %3,%0\n" \
Greg Ungerere08d7032011-10-14 14:43:30 +1000139 " sub.l %1,%1\n" \
Roman Zippeld94af932006-06-23 02:05:00 -0700140 " jra 2b\n" \
141 " .previous\n" \
142 "\n" \
143 " .section __ex_table,\"a\"\n" \
144 " .align 4\n" \
145 " .long 1b,10b\n" \
146 " .previous" \
147 : "+d" (res), "=&" #reg (__gu_val) \
148 : "m" (*(ptr)), "i" (err)); \
Al Virob9710182006-10-11 17:27:57 +0100149 (x) = (typeof(*(ptr)))(unsigned long)__gu_val; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150})
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151
Roman Zippeld94af932006-06-23 02:05:00 -0700152#define __get_user(x, ptr) \
153({ \
154 int __gu_err = 0; \
155 __chk_user_ptr(ptr); \
156 switch (sizeof(*(ptr))) { \
157 case 1: \
158 __get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT); \
159 break; \
160 case 2: \
161 __get_user_asm(__gu_err, x, ptr, u16, w, d, -EFAULT); \
162 break; \
163 case 4: \
164 __get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT); \
165 break; \
166/* case 8: disabled because gcc-4.1 has a broken typeof \
167 { \
168 const void *__gu_ptr = (ptr); \
169 u64 __gu_val; \
170 asm volatile ("\n" \
Greg Ungerere08d7032011-10-14 14:43:30 +1000171 "1: "MOVES".l (%2)+,%1\n" \
172 "2: "MOVES".l (%2),%R1\n" \
Roman Zippeld94af932006-06-23 02:05:00 -0700173 "3:\n" \
174 " .section .fixup,\"ax\"\n" \
175 " .even\n" \
176 "10: move.l %3,%0\n" \
177 " sub.l %1,%1\n" \
178 " sub.l %R1,%R1\n" \
179 " jra 3b\n" \
180 " .previous\n" \
181 "\n" \
182 " .section __ex_table,\"a\"\n" \
183 " .align 4\n" \
184 " .long 1b,10b\n" \
185 " .long 2b,10b\n" \
186 " .previous" \
187 : "+d" (__gu_err), "=&r" (__gu_val), \
188 "+a" (__gu_ptr) \
189 : "i" (-EFAULT) \
190 : "memory"); \
191 (x) = (typeof(*(ptr)))__gu_val; \
192 break; \
193 } */ \
194 default: \
195 __gu_err = __get_user_bad(); \
196 break; \
197 } \
198 __gu_err; \
199})
200#define get_user(x, ptr) __get_user(x, ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
Roman Zippeld94af932006-06-23 02:05:00 -0700202unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n);
203unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204
Roman Zippel53617822006-06-25 05:46:53 -0700205#define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\
206 asm volatile ("\n" \
Greg Ungerere08d7032011-10-14 14:43:30 +1000207 "1: "MOVES"."#s1" (%2)+,%3\n" \
Roman Zippel53617822006-06-25 05:46:53 -0700208 " move."#s1" %3,(%1)+\n" \
Greg Ungerere08d7032011-10-14 14:43:30 +1000209 "2: "MOVES"."#s2" (%2)+,%3\n" \
Roman Zippel53617822006-06-25 05:46:53 -0700210 " move."#s2" %3,(%1)+\n" \
211 " .ifnc \""#s3"\",\"\"\n" \
Greg Ungerere08d7032011-10-14 14:43:30 +1000212 "3: "MOVES"."#s3" (%2)+,%3\n" \
Roman Zippel53617822006-06-25 05:46:53 -0700213 " move."#s3" %3,(%1)+\n" \
214 " .endif\n" \
215 "4:\n" \
216 " .section __ex_table,\"a\"\n" \
217 " .align 4\n" \
218 " .long 1b,10f\n" \
219 " .long 2b,20f\n" \
220 " .ifnc \""#s3"\",\"\"\n" \
221 " .long 3b,30f\n" \
222 " .endif\n" \
223 " .previous\n" \
224 "\n" \
225 " .section .fixup,\"ax\"\n" \
226 " .even\n" \
227 "10: clr."#s1" (%1)+\n" \
228 "20: clr."#s2" (%1)+\n" \
229 " .ifnc \""#s3"\",\"\"\n" \
230 "30: clr."#s3" (%1)+\n" \
231 " .endif\n" \
232 " moveq.l #"#n",%0\n" \
233 " jra 4b\n" \
234 " .previous\n" \
235 : "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp) \
236 : : "memory")
237
Roman Zippeld94af932006-06-23 02:05:00 -0700238static __always_inline unsigned long
Al Viro11c40f82006-01-12 01:06:24 -0800239__constant_copy_from_user(void *to, const void __user *from, unsigned long n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240{
Roman Zippeld94af932006-06-23 02:05:00 -0700241 unsigned long res = 0, tmp;
242
Roman Zippeld94af932006-06-23 02:05:00 -0700243 switch (n) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 case 1:
Al Virob9710182006-10-11 17:27:57 +0100245 __get_user_asm(res, *(u8 *)to, (u8 __user *)from, u8, b, d, 1);
Roman Zippel53617822006-06-25 05:46:53 -0700246 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 case 2:
Al Virob9710182006-10-11 17:27:57 +0100248 __get_user_asm(res, *(u16 *)to, (u16 __user *)from, u16, w, d, 2);
Roman Zippel53617822006-06-25 05:46:53 -0700249 break;
250 case 3:
251 __constant_copy_from_user_asm(res, to, from, tmp, 3, w, b,);
252 break;
Roman Zippeld94af932006-06-23 02:05:00 -0700253 case 4:
Al Virob9710182006-10-11 17:27:57 +0100254 __get_user_asm(res, *(u32 *)to, (u32 __user *)from, u32, l, r, 4);
Roman Zippel53617822006-06-25 05:46:53 -0700255 break;
256 case 5:
257 __constant_copy_from_user_asm(res, to, from, tmp, 5, l, b,);
258 break;
259 case 6:
260 __constant_copy_from_user_asm(res, to, from, tmp, 6, l, w,);
261 break;
262 case 7:
263 __constant_copy_from_user_asm(res, to, from, tmp, 7, l, w, b);
264 break;
265 case 8:
266 __constant_copy_from_user_asm(res, to, from, tmp, 8, l, l,);
267 break;
268 case 9:
269 __constant_copy_from_user_asm(res, to, from, tmp, 9, l, l, b);
270 break;
271 case 10:
272 __constant_copy_from_user_asm(res, to, from, tmp, 10, l, l, w);
273 break;
274 case 12:
275 __constant_copy_from_user_asm(res, to, from, tmp, 12, l, l, l);
276 break;
277 default:
278 /* we limit the inlined version to 3 moves */
279 return __generic_copy_from_user(to, from, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 }
Roman Zippeld94af932006-06-23 02:05:00 -0700281
Roman Zippeld94af932006-06-23 02:05:00 -0700282 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283}
284
Roman Zippel53617822006-06-25 05:46:53 -0700285#define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \
286 asm volatile ("\n" \
287 " move."#s1" (%2)+,%3\n" \
Greg Ungerere08d7032011-10-14 14:43:30 +1000288 "11: "MOVES"."#s1" %3,(%1)+\n" \
Roman Zippel53617822006-06-25 05:46:53 -0700289 "12: move."#s2" (%2)+,%3\n" \
Greg Ungerere08d7032011-10-14 14:43:30 +1000290 "21: "MOVES"."#s2" %3,(%1)+\n" \
Roman Zippel53617822006-06-25 05:46:53 -0700291 "22:\n" \
292 " .ifnc \""#s3"\",\"\"\n" \
293 " move."#s3" (%2)+,%3\n" \
Greg Ungerere08d7032011-10-14 14:43:30 +1000294 "31: "MOVES"."#s3" %3,(%1)+\n" \
Roman Zippel53617822006-06-25 05:46:53 -0700295 "32:\n" \
296 " .endif\n" \
297 "4:\n" \
298 "\n" \
299 " .section __ex_table,\"a\"\n" \
300 " .align 4\n" \
301 " .long 11b,5f\n" \
302 " .long 12b,5f\n" \
303 " .long 21b,5f\n" \
304 " .long 22b,5f\n" \
305 " .ifnc \""#s3"\",\"\"\n" \
306 " .long 31b,5f\n" \
307 " .long 32b,5f\n" \
308 " .endif\n" \
309 " .previous\n" \
310 "\n" \
311 " .section .fixup,\"ax\"\n" \
312 " .even\n" \
313 "5: moveq.l #"#n",%0\n" \
314 " jra 4b\n" \
315 " .previous\n" \
316 : "+d" (res), "+a" (to), "+a" (from), "=&d" (tmp) \
317 : : "memory")
318
Roman Zippeld94af932006-06-23 02:05:00 -0700319static __always_inline unsigned long
Al Viro11c40f82006-01-12 01:06:24 -0800320__constant_copy_to_user(void __user *to, const void *from, unsigned long n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321{
Roman Zippeld94af932006-06-23 02:05:00 -0700322 unsigned long res = 0, tmp;
323
Roman Zippeld94af932006-06-23 02:05:00 -0700324 switch (n) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 case 1:
Al Virob9710182006-10-11 17:27:57 +0100326 __put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1);
Roman Zippel53617822006-06-25 05:46:53 -0700327 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 case 2:
Al Virob9710182006-10-11 17:27:57 +0100329 __put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, d, 2);
Roman Zippel53617822006-06-25 05:46:53 -0700330 break;
331 case 3:
332 __constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,);
333 break;
Roman Zippeld94af932006-06-23 02:05:00 -0700334 case 4:
Al Virob9710182006-10-11 17:27:57 +0100335 __put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4);
Roman Zippel53617822006-06-25 05:46:53 -0700336 break;
337 case 5:
338 __constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,);
339 break;
340 case 6:
341 __constant_copy_to_user_asm(res, to, from, tmp, 6, l, w,);
342 break;
343 case 7:
344 __constant_copy_to_user_asm(res, to, from, tmp, 7, l, w, b);
345 break;
346 case 8:
347 __constant_copy_to_user_asm(res, to, from, tmp, 8, l, l,);
348 break;
349 case 9:
350 __constant_copy_to_user_asm(res, to, from, tmp, 9, l, l, b);
351 break;
352 case 10:
353 __constant_copy_to_user_asm(res, to, from, tmp, 10, l, l, w);
354 break;
355 case 12:
356 __constant_copy_to_user_asm(res, to, from, tmp, 12, l, l, l);
357 break;
358 default:
359 /* limit the inlined version to 3 moves */
360 return __generic_copy_to_user(to, from, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 }
Roman Zippeld94af932006-06-23 02:05:00 -0700362
Roman Zippeld94af932006-06-23 02:05:00 -0700363 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364}
365
Roman Zippeld94af932006-06-23 02:05:00 -0700366#define __copy_from_user(to, from, n) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367(__builtin_constant_p(n) ? \
368 __constant_copy_from_user(to, from, n) : \
369 __generic_copy_from_user(to, from, n))
370
Roman Zippeld94af932006-06-23 02:05:00 -0700371#define __copy_to_user(to, from, n) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372(__builtin_constant_p(n) ? \
373 __constant_copy_to_user(to, from, n) : \
374 __generic_copy_to_user(to, from, n))
375
Roman Zippeld94af932006-06-23 02:05:00 -0700376#define __copy_to_user_inatomic __copy_to_user
377#define __copy_from_user_inatomic __copy_from_user
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378
Roman Zippeld94af932006-06-23 02:05:00 -0700379#define copy_from_user(to, from, n) __copy_from_user(to, from, n)
380#define copy_to_user(to, from, n) __copy_to_user(to, from, n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381
Roman Zippeld94af932006-06-23 02:05:00 -0700382long strncpy_from_user(char *dst, const char __user *src, long count);
383long strnlen_user(const char __user *src, long n);
Geert Uytterhoeven3c46bdc2007-05-15 01:41:29 -0700384unsigned long __clear_user(void __user *to, unsigned long n);
385
386#define clear_user __clear_user
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387
388#define strlen_user(str) strnlen_user(str, 32767)
389
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390#endif /* _M68K_UACCESS_H */