blob: 826676778094f23a8ced8264b5c448184c0fa006 [file] [log] [blame]
Michal Simek26606632009-03-27 14:25:23 +01001/*
Michal Simek0d6de952009-05-26 16:30:23 +02002 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
Michal Simek26606632009-03-27 14:25:23 +01004 * Copyright (C) 2006 Atmark Techno, Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#ifndef _ASM_MICROBLAZE_UACCESS_H
12#define _ASM_MICROBLAZE_UACCESS_H
13
14#ifdef __KERNEL__
15#ifndef __ASSEMBLY__
16
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/sched.h> /* RLIMIT_FSIZE */
20#include <linux/mm.h>
21
22#include <asm/mmu.h>
23#include <asm/page.h>
24#include <asm/pgtable.h>
Michal Simek26606632009-03-27 14:25:23 +010025#include <linux/string.h>
26
27#define VERIFY_READ 0
28#define VERIFY_WRITE 1
29
Michal Simek40db0832010-03-05 15:34:12 +010030/*
31 * On Microblaze the fs value is actually the top of the corresponding
32 * address space.
33 *
34 * The fs value determines whether argument validity checking should be
35 * performed or not. If get_fs() == USER_DS, checking is performed, with
36 * get_fs() == KERNEL_DS, checking is bypassed.
37 *
38 * For historical reasons, these macros are grossly misnamed.
39 *
40 * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
41 */
42# define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
43
44# ifndef CONFIG_MMU
45# define KERNEL_DS MAKE_MM_SEG(0)
46# define USER_DS KERNEL_DS
47# else
48# define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
49# define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
50# endif
51
52# define get_ds() (KERNEL_DS)
53# define get_fs() (current_thread_info()->addr_limit)
54# define set_fs(val) (current_thread_info()->addr_limit = (val))
55
56# define segment_eq(a, b) ((a).seg == (b).seg)
57
Michal Simek357bc3c2010-03-05 15:37:57 +010058/*
59 * The exception table consists of pairs of addresses: the first is the
60 * address of an instruction that is allowed to fault, and the second is
61 * the address at which the program should continue. No registers are
62 * modified, so it is entirely up to the continuation code to figure out
63 * what to do.
64 *
65 * All the routines below use bits of fixup code that are out of line
66 * with the main instruction path. This means when everything is well,
67 * we don't even have to jump over them. Further, they do not intrude
68 * on our cache or tlb entries.
69 */
70struct exception_table_entry {
71 unsigned long insn, fixup;
72};
Michal Simek40db0832010-03-05 15:34:12 +010073
Michal Simek527bdb52010-03-22 16:02:59 +010074/* Returns 0 if exception not found and fixup otherwise. */
75extern unsigned long search_exception_table(unsigned long);
76
Michal Simek0d6de952009-05-26 16:30:23 +020077#ifndef CONFIG_MMU
78
Michal Simek60a729f2010-03-05 15:49:53 +010079/* Check against bounds of physical memory */
80static inline int ___range_ok(unsigned long addr, unsigned long size)
81{
82 return ((addr < memory_start) ||
Michal Simek83a92522011-12-19 13:46:35 +010083 ((addr + size - 1) > (memory_start + memory_size - 1)));
Michal Simek60a729f2010-03-05 15:49:53 +010084}
Michal Simek26606632009-03-27 14:25:23 +010085
86#define __range_ok(addr, size) \
87 ___range_ok((unsigned long)(addr), (unsigned long)(size))
88
89#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
Michal Simek40b11562010-03-05 16:50:01 +010090
91#else
92
Michal Simekf663b602013-03-28 16:42:44 +010093static inline int access_ok(int type, const void __user *addr,
94 unsigned long size)
95{
96 if (!size)
97 goto ok;
Michal Simek40b11562010-03-05 16:50:01 +010098
Michal Simekf663b602013-03-28 16:42:44 +010099 if ((get_fs().seg < ((unsigned long)addr)) ||
100 (get_fs().seg < ((unsigned long)addr + size - 1))) {
Chen Gangde295cf2014-08-06 00:25:52 +0800101 pr_devel("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
Michal Simek8706a6b2013-05-30 15:10:52 +0200102 type ? "WRITE" : "READ ", (__force u32)addr, (u32)size,
Michal Simekf663b602013-03-28 16:42:44 +0100103 (u32)get_fs().seg);
104 return 0;
105 }
106ok:
Chen Gangde295cf2014-08-06 00:25:52 +0800107 pr_devel("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n",
Michal Simek8706a6b2013-05-30 15:10:52 +0200108 type ? "WRITE" : "READ ", (__force u32)addr, (u32)size,
Michal Simekf663b602013-03-28 16:42:44 +0100109 (u32)get_fs().seg);
110 return 1;
111}
Michal Simek40b11562010-03-05 16:50:01 +0100112#endif
113
114#ifdef CONFIG_MMU
115# define __FIXUP_SECTION ".section .fixup,\"ax\"\n"
116# define __EX_TABLE_SECTION ".section __ex_table,\"a\"\n"
117#else
118# define __FIXUP_SECTION ".section .discard,\"ax\"\n"
Michal Simek7e278152012-12-21 10:53:40 +0100119# define __EX_TABLE_SECTION ".section .discard,\"ax\"\n"
Michal Simek40b11562010-03-05 16:50:01 +0100120#endif
121
Michal Simek94804a92010-03-22 18:39:20 +0100122extern unsigned long __copy_tofrom_user(void __user *to,
123 const void __user *from, unsigned long size);
124
Michal Simek527bdb52010-03-22 16:02:59 +0100125/* Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail. */
126static inline unsigned long __must_check __clear_user(void __user *to,
127 unsigned long n)
128{
129 /* normal memset with two words to __ex_table */
130 __asm__ __volatile__ ( \
Steven J. Magnani6f3946b2011-02-10 12:12:13 -0600131 "1: sb r0, %1, r0;" \
Michal Simek527bdb52010-03-22 16:02:59 +0100132 " addik %0, %0, -1;" \
133 " bneid %0, 1b;" \
Steven J. Magnani6f3946b2011-02-10 12:12:13 -0600134 " addik %1, %1, 1;" \
Michal Simek527bdb52010-03-22 16:02:59 +0100135 "2: " \
136 __EX_TABLE_SECTION \
137 ".word 1b,2b;" \
138 ".previous;" \
Steven J. Magnani6f3946b2011-02-10 12:12:13 -0600139 : "=r"(n), "=r"(to) \
140 : "0"(n), "1"(to)
Michal Simek527bdb52010-03-22 16:02:59 +0100141 );
142 return n;
143}
144
145static inline unsigned long __must_check clear_user(void __user *to,
146 unsigned long n)
147{
Michael S. Tsirkinac093f82013-05-26 17:30:56 +0300148 might_fault();
Michal Simek527bdb52010-03-22 16:02:59 +0100149 if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
150 return n;
151
152 return __clear_user(to, n);
153}
154
Michal Simekcca79122010-03-22 18:23:45 +0100155/* put_user and get_user macros */
Michal Simek3a6d7722010-03-08 10:52:24 +0100156extern long __user_bad(void);
Arnd Bergmann838d2402009-05-01 13:36:13 +0000157
Michal Simek3a6d7722010-03-08 10:52:24 +0100158#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
159({ \
160 __asm__ __volatile__ ( \
161 "1:" insn " %1, %2, r0;" \
162 " addk %0, r0, r0;" \
163 "2: " \
164 __FIXUP_SECTION \
165 "3: brid 2b;" \
166 " addik %0, r0, %3;" \
167 ".previous;" \
168 __EX_TABLE_SECTION \
169 ".word 1b,3b;" \
170 ".previous;" \
171 : "=&r"(__gu_err), "=r"(__gu_val) \
172 : "r"(__gu_ptr), "i"(-EFAULT) \
173 ); \
Michal Simek0d6de952009-05-26 16:30:23 +0200174})
Michal Simek26606632009-03-27 14:25:23 +0100175
Michal Simek3a6d7722010-03-08 10:52:24 +0100176/**
177 * get_user: - Get a simple variable from user space.
178 * @x: Variable to store result.
179 * @ptr: Source address, in user space.
180 *
David Hildenbrandb3c395e2015-05-11 17:52:08 +0200181 * Context: User context only. This function may sleep if pagefaults are
182 * enabled.
Michal Simek3a6d7722010-03-08 10:52:24 +0100183 *
184 * This macro copies a single simple variable from user space to kernel
185 * space. It supports simple types like char and int, but not larger
186 * data types like structures or arrays.
187 *
188 * @ptr must have pointer-to-simple-variable type, and the result of
189 * dereferencing @ptr must be assignable to @x without a cast.
190 *
191 * Returns zero on success, or -EFAULT on error.
192 * On error, the variable @x is set to zero.
193 */
Steven J. Magnani538722c2010-05-06 16:38:33 -0500194#define get_user(x, ptr) \
195 __get_user_check((x), (ptr), sizeof(*(ptr)))
196
197#define __get_user_check(x, ptr, size) \
198({ \
199 unsigned long __gu_val = 0; \
200 const typeof(*(ptr)) __user *__gu_addr = (ptr); \
201 int __gu_err = 0; \
202 \
203 if (access_ok(VERIFY_READ, __gu_addr, size)) { \
204 switch (size) { \
205 case 1: \
206 __get_user_asm("lbu", __gu_addr, __gu_val, \
207 __gu_err); \
208 break; \
209 case 2: \
210 __get_user_asm("lhu", __gu_addr, __gu_val, \
211 __gu_err); \
212 break; \
213 case 4: \
214 __get_user_asm("lw", __gu_addr, __gu_val, \
215 __gu_err); \
216 break; \
217 default: \
218 __gu_err = __user_bad(); \
219 break; \
220 } \
221 } else { \
222 __gu_err = -EFAULT; \
223 } \
Michael S. Tsirkin0774bf62015-01-06 17:44:02 +0200224 x = (__force typeof(*(ptr)))__gu_val; \
Steven J. Magnani538722c2010-05-06 16:38:33 -0500225 __gu_err; \
226})
Michal Simek3a6d7722010-03-08 10:52:24 +0100227
228#define __get_user(x, ptr) \
229({ \
Al Viroe98b9e32016-09-09 19:23:33 -0400230 unsigned long __gu_val = 0; \
Michal Simek3a6d7722010-03-08 10:52:24 +0100231 /*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \
232 long __gu_err; \
233 switch (sizeof(*(ptr))) { \
234 case 1: \
235 __get_user_asm("lbu", (ptr), __gu_val, __gu_err); \
236 break; \
237 case 2: \
238 __get_user_asm("lhu", (ptr), __gu_val, __gu_err); \
239 break; \
240 case 4: \
241 __get_user_asm("lw", (ptr), __gu_val, __gu_err); \
242 break; \
243 default: \
244 /* __gu_val = 0; __gu_err = -EINVAL;*/ __gu_err = __user_bad();\
245 } \
Michael S. Tsirkin0774bf62015-01-06 17:44:02 +0200246 x = (__force __typeof__(*(ptr))) __gu_val; \
Michal Simek3a6d7722010-03-08 10:52:24 +0100247 __gu_err; \
248})
249
250
Michal Simekef4e2772010-03-22 16:22:41 +0100251#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
Michal Simek0d6de952009-05-26 16:30:23 +0200252({ \
Michal Simekef4e2772010-03-22 16:22:41 +0100253 __asm__ __volatile__ ( \
254 "1:" insn " %1, %2, r0;" \
255 " addk %0, r0, r0;" \
256 "2: " \
257 __FIXUP_SECTION \
258 "3: brid 2b;" \
259 " addik %0, r0, %3;" \
260 ".previous;" \
261 __EX_TABLE_SECTION \
262 ".word 1b,3b;" \
263 ".previous;" \
264 : "=&r"(__gu_err) \
265 : "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
266 ); \
Michal Simek0d6de952009-05-26 16:30:23 +0200267})
Michal Simek26606632009-03-27 14:25:23 +0100268
Michal Simekef4e2772010-03-22 16:22:41 +0100269#define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \
270({ \
271 __asm__ __volatile__ (" lwi %0, %1, 0;" \
272 "1: swi %0, %2, 0;" \
273 " lwi %0, %1, 4;" \
274 "2: swi %0, %2, 4;" \
275 " addk %0, r0, r0;" \
276 "3: " \
277 __FIXUP_SECTION \
278 "4: brid 3b;" \
279 " addik %0, r0, %3;" \
280 ".previous;" \
281 __EX_TABLE_SECTION \
282 ".word 1b,4b,2b,4b;" \
283 ".previous;" \
284 : "=&r"(__gu_err) \
285 : "r"(&__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
286 ); \
287})
288
Michal Simek0dcb4092010-03-22 15:46:56 +0100289/**
290 * put_user: - Write a simple value into user space.
291 * @x: Value to copy to user space.
292 * @ptr: Destination address, in user space.
293 *
David Hildenbrandb3c395e2015-05-11 17:52:08 +0200294 * Context: User context only. This function may sleep if pagefaults are
295 * enabled.
Michal Simek0dcb4092010-03-22 15:46:56 +0100296 *
297 * This macro copies a single simple value from kernel space to user
298 * space. It supports simple types like char and int, but not larger
299 * data types like structures or arrays.
300 *
301 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
302 * to the result of dereferencing @ptr.
303 *
304 * Returns zero on success, or -EFAULT on error.
305 */
Steven J. Magnani538722c2010-05-06 16:38:33 -0500306#define put_user(x, ptr) \
307 __put_user_check((x), (ptr), sizeof(*(ptr)))
308
309#define __put_user_check(x, ptr, size) \
310({ \
Michael S. Tsirkin132d5df2015-01-06 17:45:03 +0200311 typeof(*(ptr)) volatile __pu_val = x; \
Steven J. Magnani538722c2010-05-06 16:38:33 -0500312 typeof(*(ptr)) __user *__pu_addr = (ptr); \
313 int __pu_err = 0; \
314 \
Steven J. Magnani538722c2010-05-06 16:38:33 -0500315 if (access_ok(VERIFY_WRITE, __pu_addr, size)) { \
316 switch (size) { \
317 case 1: \
318 __put_user_asm("sb", __pu_addr, __pu_val, \
319 __pu_err); \
320 break; \
321 case 2: \
322 __put_user_asm("sh", __pu_addr, __pu_val, \
323 __pu_err); \
324 break; \
325 case 4: \
326 __put_user_asm("sw", __pu_addr, __pu_val, \
327 __pu_err); \
328 break; \
329 case 8: \
330 __put_user_asm_8(__pu_addr, __pu_val, __pu_err);\
331 break; \
332 default: \
333 __pu_err = __user_bad(); \
334 break; \
335 } \
336 } else { \
337 __pu_err = -EFAULT; \
338 } \
339 __pu_err; \
340})
Michal Simek0dcb4092010-03-22 15:46:56 +0100341
Michal Simek0d6de952009-05-26 16:30:23 +0200342#define __put_user(x, ptr) \
343({ \
Michal Simek7bcb63b2009-07-13 16:46:54 +0200344 __typeof__(*(ptr)) volatile __gu_val = (x); \
Michal Simek0d6de952009-05-26 16:30:23 +0200345 long __gu_err = 0; \
346 switch (sizeof(__gu_val)) { \
347 case 1: \
348 __put_user_asm("sb", (ptr), __gu_val, __gu_err); \
349 break; \
Michal Simekcca79122010-03-22 18:23:45 +0100350 case 2: \
Michal Simek0d6de952009-05-26 16:30:23 +0200351 __put_user_asm("sh", (ptr), __gu_val, __gu_err); \
352 break; \
353 case 4: \
354 __put_user_asm("sw", (ptr), __gu_val, __gu_err); \
355 break; \
356 case 8: \
357 __put_user_asm_8((ptr), __gu_val, __gu_err); \
358 break; \
359 default: \
Michal Simek0dcb4092010-03-22 15:46:56 +0100360 /*__gu_err = -EINVAL;*/ __gu_err = __user_bad(); \
Michal Simek0d6de952009-05-26 16:30:23 +0200361 } \
362 __gu_err; \
363})
364
Michal Simekcca79122010-03-22 18:23:45 +0100365
Michal Simek89ae9752010-03-22 18:49:45 +0100366/* copy_to_from_user */
Michal Simek42706902010-03-22 15:56:32 +0100367#define __copy_from_user(to, from, n) \
368 __copy_tofrom_user((__force void __user *)(to), \
369 (void __user *)(from), (n))
John Williams95dfbbe42009-08-14 12:06:46 +1000370#define __copy_from_user_inatomic(to, from, n) \
Michal Simek8d7ec6e2010-05-20 10:56:29 +0200371 __copy_from_user((to), (from), (n))
Michal Simek0d6de952009-05-26 16:30:23 +0200372
Michal Simek42706902010-03-22 15:56:32 +0100373static inline long copy_from_user(void *to,
374 const void __user *from, unsigned long n)
375{
Al Virod0cf3852016-09-09 19:22:34 -0400376 unsigned long res = n;
Michael S. Tsirkinac093f82013-05-26 17:30:56 +0300377 might_fault();
Al Virod0cf3852016-09-09 19:22:34 -0400378 if (likely(access_ok(VERIFY_READ, from, n)))
379 res = __copy_from_user(to, from, n);
380 if (unlikely(res))
381 memset(to + (n - res), 0, res);
382 return res;
Michal Simek42706902010-03-22 15:56:32 +0100383}
384
Michal Simekcc5a4282010-03-22 15:52:53 +0100385#define __copy_to_user(to, from, n) \
Michal Simek42706902010-03-22 15:56:32 +0100386 __copy_tofrom_user((void __user *)(to), \
Michal Simekcc5a4282010-03-22 15:52:53 +0100387 (__force const void __user *)(from), (n))
Michal Simek8d7ec6e2010-05-20 10:56:29 +0200388#define __copy_to_user_inatomic(to, from, n) __copy_to_user((to), (from), (n))
Michal Simek0d6de952009-05-26 16:30:23 +0200389
Michal Simekcc5a4282010-03-22 15:52:53 +0100390static inline long copy_to_user(void __user *to,
391 const void *from, unsigned long n)
392{
Michael S. Tsirkinac093f82013-05-26 17:30:56 +0300393 might_fault();
Michal Simekcc5a4282010-03-22 15:52:53 +0100394 if (access_ok(VERIFY_WRITE, to, n))
395 return __copy_to_user(to, from, n);
Michal Simek89ae9752010-03-22 18:49:45 +0100396 return n;
397}
398
399/*
400 * Copy a null terminated string from userspace.
401 */
402extern int __strncpy_user(char *to, const char __user *from, int len);
403
404#define __strncpy_from_user __strncpy_user
405
406static inline long
407strncpy_from_user(char *dst, const char __user *src, long count)
408{
409 if (!access_ok(VERIFY_READ, src, 1))
410 return -EFAULT;
411 return __strncpy_from_user(dst, src, count);
412}
413
414/*
415 * Return the size of a string (including the ending 0)
416 *
417 * Return 0 on exception, a value greater than N if too long
418 */
419extern int __strnlen_user(const char __user *sstr, int len);
420
421static inline long strnlen_user(const char __user *src, long n)
422{
423 if (!access_ok(VERIFY_READ, src, 1))
424 return 0;
425 return __strnlen_user(src, n);
Michal Simekcc5a4282010-03-22 15:52:53 +0100426}
427
Michal Simek26606632009-03-27 14:25:23 +0100428#endif /* __ASSEMBLY__ */
429#endif /* __KERNEL__ */
430
431#endif /* _ASM_MICROBLAZE_UACCESS_H */