blob: cc6bb319e464f46e8e2b8b9010261ba585718e94 [file] [log] [blame]
Arnd Bergmanneed417d2009-05-13 22:56:37 +00001#ifndef __ASM_GENERIC_UACCESS_H
2#define __ASM_GENERIC_UACCESS_H
3
4/*
5 * User space memory access functions, these should work
Geert Uytterhoeven0a4a6642013-12-30 10:06:33 +01006 * on any machine that has kernel and user data in the same
Arnd Bergmanneed417d2009-05-13 22:56:37 +00007 * address space, e.g. all NOMMU machines.
8 */
9#include <linux/sched.h>
Arnd Bergmanneed417d2009-05-13 22:56:37 +000010#include <linux/string.h>
11
12#include <asm/segment.h>
13
14#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
15
16#ifndef KERNEL_DS
17#define KERNEL_DS MAKE_MM_SEG(~0UL)
18#endif
19
20#ifndef USER_DS
21#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
22#endif
23
24#ifndef get_fs
25#define get_ds() (KERNEL_DS)
26#define get_fs() (current_thread_info()->addr_limit)
27
28static inline void set_fs(mm_segment_t fs)
29{
30 current_thread_info()->addr_limit = fs;
31}
32#endif
33
Vineet Gupta10a60072013-01-18 15:12:16 +053034#ifndef segment_eq
Arnd Bergmanneed417d2009-05-13 22:56:37 +000035#define segment_eq(a, b) ((a).seg == (b).seg)
Vineet Gupta10a60072013-01-18 15:12:16 +053036#endif
Arnd Bergmanneed417d2009-05-13 22:56:37 +000037
38#define VERIFY_READ 0
39#define VERIFY_WRITE 1
40
41#define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size))
42
43/*
44 * The architecture should really override this if possible, at least
45 * doing a check on the get_fs()
46 */
47#ifndef __access_ok
48static inline int __access_ok(unsigned long addr, unsigned long size)
49{
50 return 1;
51}
52#endif
53
54/*
55 * The exception table consists of pairs of addresses: the first is the
56 * address of an instruction that is allowed to fault, and the second is
57 * the address at which the program should continue. No registers are
58 * modified, so it is entirely up to the continuation code to figure out
59 * what to do.
60 *
61 * All the routines below use bits of fixup code that are out of line
62 * with the main instruction path. This means when everything is well,
63 * we don't even have to jump over them. Further, they do not intrude
64 * on our cache or tlb entries.
65 */
66
67struct exception_table_entry
68{
69 unsigned long insn, fixup;
70};
71
Arnd Bergmanneed417d2009-05-13 22:56:37 +000072/*
73 * architectures with an MMU should override these two
74 */
75#ifndef __copy_from_user
76static inline __must_check long __copy_from_user(void *to,
77 const void __user * from, unsigned long n)
78{
79 if (__builtin_constant_p(n)) {
80 switch(n) {
81 case 1:
82 *(u8 *)to = *(u8 __force *)from;
83 return 0;
84 case 2:
85 *(u16 *)to = *(u16 __force *)from;
86 return 0;
87 case 4:
88 *(u32 *)to = *(u32 __force *)from;
89 return 0;
90#ifdef CONFIG_64BIT
91 case 8:
92 *(u64 *)to = *(u64 __force *)from;
93 return 0;
94#endif
95 default:
96 break;
97 }
98 }
99
100 memcpy(to, (const void __force *)from, n);
101 return 0;
102}
103#endif
104
105#ifndef __copy_to_user
106static inline __must_check long __copy_to_user(void __user *to,
107 const void *from, unsigned long n)
108{
109 if (__builtin_constant_p(n)) {
110 switch(n) {
111 case 1:
112 *(u8 __force *)to = *(u8 *)from;
113 return 0;
114 case 2:
115 *(u16 __force *)to = *(u16 *)from;
116 return 0;
117 case 4:
118 *(u32 __force *)to = *(u32 *)from;
119 return 0;
120#ifdef CONFIG_64BIT
121 case 8:
122 *(u64 __force *)to = *(u64 *)from;
123 return 0;
124#endif
125 default:
126 break;
127 }
128 }
129
130 memcpy((void __force *)to, from, n);
131 return 0;
132}
133#endif
134
135/*
136 * These are the main single-value transfer routines. They automatically
137 * use the right size if we just have the right pointer type.
138 * This version just falls back to copy_{from,to}_user, which should
139 * provide a fast-path for small values.
140 */
141#define __put_user(x, ptr) \
142({ \
143 __typeof__(*(ptr)) __x = (x); \
144 int __pu_err = -EFAULT; \
145 __chk_user_ptr(ptr); \
146 switch (sizeof (*(ptr))) { \
147 case 1: \
148 case 2: \
149 case 4: \
150 case 8: \
151 __pu_err = __put_user_fn(sizeof (*(ptr)), \
152 ptr, &__x); \
153 break; \
154 default: \
155 __put_user_bad(); \
156 break; \
157 } \
158 __pu_err; \
159})
160
161#define put_user(x, ptr) \
162({ \
Yoshinori Satoa02613a2015-07-16 13:56:06 +0900163 void *__p = (ptr); \
Michael S. Tsirkine0acd0b2013-05-26 17:30:36 +0300164 might_fault(); \
Yoshinori Satoa02613a2015-07-16 13:56:06 +0900165 access_ok(VERIFY_WRITE, __p, sizeof(*ptr)) ? \
166 __put_user((x), ((__typeof__(*(ptr)) *)__p)) : \
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000167 -EFAULT; \
168})
169
Vineet Gupta05d88a42013-01-18 15:12:16 +0530170#ifndef __put_user_fn
171
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000172static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
173{
174 size = __copy_to_user(ptr, x, size);
175 return size ? -EFAULT : size;
176}
177
Vineet Gupta05d88a42013-01-18 15:12:16 +0530178#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
179
180#endif
181
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000182extern int __put_user_bad(void) __attribute__((noreturn));
183
184#define __get_user(x, ptr) \
185({ \
186 int __gu_err = -EFAULT; \
187 __chk_user_ptr(ptr); \
188 switch (sizeof(*(ptr))) { \
189 case 1: { \
190 unsigned char __x; \
191 __gu_err = __get_user_fn(sizeof (*(ptr)), \
192 ptr, &__x); \
193 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
194 break; \
195 }; \
196 case 2: { \
197 unsigned short __x; \
198 __gu_err = __get_user_fn(sizeof (*(ptr)), \
199 ptr, &__x); \
200 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
201 break; \
202 }; \
203 case 4: { \
204 unsigned int __x; \
205 __gu_err = __get_user_fn(sizeof (*(ptr)), \
206 ptr, &__x); \
207 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
208 break; \
209 }; \
210 case 8: { \
211 unsigned long long __x; \
212 __gu_err = __get_user_fn(sizeof (*(ptr)), \
213 ptr, &__x); \
214 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
215 break; \
216 }; \
217 default: \
218 __get_user_bad(); \
219 break; \
220 } \
221 __gu_err; \
222})
223
224#define get_user(x, ptr) \
225({ \
Yoshinori Satoa02613a2015-07-16 13:56:06 +0900226 const void *__p = (ptr); \
Michael S. Tsirkine0acd0b2013-05-26 17:30:36 +0300227 might_fault(); \
Yoshinori Satoa02613a2015-07-16 13:56:06 +0900228 access_ok(VERIFY_READ, __p, sizeof(*ptr)) ? \
229 __get_user((x), (__typeof__(*(ptr)) *)__p) : \
Al Viro9ad18b72016-08-17 23:19:01 -0400230 ((x) = (__typeof__(*(ptr)))0,-EFAULT); \
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000231})
232
Vineet Gupta05d88a42013-01-18 15:12:16 +0530233#ifndef __get_user_fn
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000234static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
235{
Al Viro9ad18b72016-08-17 23:19:01 -0400236 size_t n = __copy_from_user(x, ptr, size);
237 if (unlikely(n)) {
238 memset(x + (size - n), 0, n);
239 return -EFAULT;
240 }
241 return 0;
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000242}
243
Vineet Gupta05d88a42013-01-18 15:12:16 +0530244#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
245
246#endif
247
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000248extern int __get_user_bad(void) __attribute__((noreturn));
249
250#ifndef __copy_from_user_inatomic
251#define __copy_from_user_inatomic __copy_from_user
252#endif
253
254#ifndef __copy_to_user_inatomic
255#define __copy_to_user_inatomic __copy_to_user
256#endif
257
258static inline long copy_from_user(void *to,
259 const void __user * from, unsigned long n)
260{
Al Viro2545e5d2016-08-17 16:36:37 -0400261 unsigned long res = n;
Michael S. Tsirkine0acd0b2013-05-26 17:30:36 +0300262 might_fault();
Al Viro2545e5d2016-08-17 16:36:37 -0400263 if (likely(access_ok(VERIFY_READ, from, n)))
264 res = __copy_from_user(to, from, n);
265 if (unlikely(res))
266 memset(to + (n - res), 0, res);
267 return res;
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000268}
269
270static inline long copy_to_user(void __user *to,
271 const void *from, unsigned long n)
272{
Michael S. Tsirkine0acd0b2013-05-26 17:30:36 +0300273 might_fault();
Mike Frysingera9ede5b2009-06-14 02:00:03 -0400274 if (access_ok(VERIFY_WRITE, to, n))
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000275 return __copy_to_user(to, from, n);
276 else
277 return n;
278}
279
280/*
281 * Copy a null terminated string from userspace.
282 */
283#ifndef __strncpy_from_user
284static inline long
285__strncpy_from_user(char *dst, const char __user *src, long count)
286{
287 char *tmp;
288 strncpy(dst, (const char __force *)src, count);
289 for (tmp = dst; *tmp && count > 0; tmp++, count--)
290 ;
291 return (tmp - dst);
292}
293#endif
294
295static inline long
296strncpy_from_user(char *dst, const char __user *src, long count)
297{
Mike Frysingera9ede5b2009-06-14 02:00:03 -0400298 if (!access_ok(VERIFY_READ, src, 1))
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000299 return -EFAULT;
300 return __strncpy_from_user(dst, src, count);
301}
302
303/*
304 * Return the size of a string (including the ending 0)
305 *
306 * Return 0 on exception, a value greater than N if too long
307 */
GuanXuetao7f509a92011-01-15 18:08:09 +0800308#ifndef __strnlen_user
Mark Salter830f5802011-10-04 09:17:36 -0400309#define __strnlen_user(s, n) (strnlen((s), (n)) + 1)
GuanXuetao7f509a92011-01-15 18:08:09 +0800310#endif
311
Mark Salter830f5802011-10-04 09:17:36 -0400312/*
313 * Unlike strnlen, strnlen_user includes the nul terminator in
314 * its returned count. Callers should check for a returned value
315 * greater than N as an indication the string is too long.
316 */
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000317static inline long strnlen_user(const char __user *src, long n)
318{
Mike Frysinger98448132009-06-14 02:00:02 -0400319 if (!access_ok(VERIFY_READ, src, 1))
320 return 0;
GuanXuetao7f509a92011-01-15 18:08:09 +0800321 return __strnlen_user(src, n);
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000322}
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000323
324static inline long strlen_user(const char __user *src)
325{
326 return strnlen_user(src, 32767);
327}
328
329/*
330 * Zero Userspace
331 */
332#ifndef __clear_user
333static inline __must_check unsigned long
334__clear_user(void __user *to, unsigned long n)
335{
336 memset((void __force *)to, 0, n);
337 return 0;
338}
339#endif
340
341static inline __must_check unsigned long
342clear_user(void __user *to, unsigned long n)
343{
Michael S. Tsirkine0acd0b2013-05-26 17:30:36 +0300344 might_fault();
Mike Frysingera9ede5b2009-06-14 02:00:03 -0400345 if (!access_ok(VERIFY_WRITE, to, n))
Arnd Bergmanneed417d2009-05-13 22:56:37 +0000346 return n;
347
348 return __clear_user(to, n);
349}
350
351#endif /* __ASM_GENERIC_UACCESS_H */