blob: 8bda94fab8e8cc52baca57952391cbb7665afd1f [file] [log] [blame]
Sam Ravnborgf5e706a2008-07-17 21:55:51 -07001#ifndef _ASM_UACCESS_H
2#define _ASM_UACCESS_H
3
4/*
5 * User space memory access functions
6 */
7
8#ifdef __KERNEL__
David S. Millerfb340352009-12-10 23:05:23 -08009#include <linux/errno.h>
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070010#include <linux/compiler.h>
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070011#include <linux/string.h>
Alexey Dobriyan8abf9192009-08-13 10:05:43 +000012#include <linux/thread_info.h>
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070013#include <asm/asi.h>
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070014#include <asm/spitfire.h>
Arnd Bergmann5b17e1c2009-05-13 22:56:30 +000015#include <asm-generic/uaccess-unaligned.h>
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070016#endif
17
18#ifndef __ASSEMBLY__
19
David Miller2c66f622012-05-26 11:14:27 -070020#include <asm/processor.h>
21
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070022/*
23 * Sparc64 is segmented, though more like the M68K than the I386.
24 * We use the secondary ASI to address user memory, which references a
25 * completely different VM map, thus there is zero chance of the user
26 * doing something queer and tricking us into poking kernel memory.
27 *
28 * What is left here is basically what is needed for the other parts of
29 * the kernel that expect to be able to manipulate, erum, "segments".
30 * Or perhaps more properly, permissions.
31 *
32 * "For historical reasons, these macros are grossly misnamed." -Linus
33 */
34
35#define KERNEL_DS ((mm_segment_t) { ASI_P })
36#define USER_DS ((mm_segment_t) { ASI_AIUS }) /* har har har */
37
38#define VERIFY_READ 0
39#define VERIFY_WRITE 1
40
Al Virodff933d2012-09-26 01:21:14 -040041#define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)})
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070042#define get_ds() (KERNEL_DS)
43
Michael S. Tsirkin71858202015-01-06 14:32:17 +020044#define segment_eq(a, b) ((a).seg == (b).seg)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070045
46#define set_fs(val) \
47do { \
Michael S. Tsirkin71858202015-01-06 14:32:17 +020048 current_thread_info()->current_ds = (val).seg; \
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070049 __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \
50} while(0)
51
David Ahernb69fb762015-06-15 16:15:45 -040052/*
53 * Test whether a block of memory is a valid user space address.
54 * Returns 0 if the range is valid, nonzero otherwise.
55 */
56static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
57{
58 if (__builtin_constant_p(size))
59 return addr > limit - size;
60
61 addr += size;
62 if (addr < size)
63 return true;
64
65 return addr > limit;
66}
67
68#define __range_not_ok(addr, size, limit) \
69({ \
70 __chk_user_ptr(addr); \
71 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
72})
73
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070074static inline int __access_ok(const void __user * addr, unsigned long size)
75{
76 return 1;
77}
78
79static inline int access_ok(int type, const void __user * addr, unsigned long size)
80{
81 return 1;
82}
83
84/*
85 * The exception table consists of pairs of addresses: the first is the
86 * address of an instruction that is allowed to fault, and the second is
87 * the address at which the program should continue. No registers are
88 * modified, so it is entirely up to the continuation code to figure out
89 * what to do.
90 *
91 * All the routines below use bits of fixup code that are out of line
92 * with the main instruction path. This means when everything is well,
93 * we don't even have to jump over them. Further, they do not intrude
94 * on our cache or tlb entries.
95 */
96
97struct exception_table_entry {
98 unsigned int insn, fixup;
99};
100
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200101void __ret_efault(void);
102void __retl_efault(void);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700103
104/* Uh, these should become the main single-value transfer routines..
105 * They automatically use the right size if we just have the right
106 * pointer type..
107 *
108 * This gets kind of ugly. We want to return _two_ values in "get_user()"
109 * and yet we don't want to do any pointers, because that is too much
110 * of a performance impact. Thus we have a few rather ugly macros here,
111 * and hide all the ugliness from the user.
112 */
Michael S. Tsirkin71858202015-01-06 14:32:17 +0200113#define put_user(x, ptr) ({ \
114 unsigned long __pu_addr = (unsigned long)(ptr); \
115 __chk_user_ptr(ptr); \
116 __put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\
117})
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700118
Michael S. Tsirkin71858202015-01-06 14:32:17 +0200119#define get_user(x, ptr) ({ \
120 unsigned long __gu_addr = (unsigned long)(ptr); \
121 __chk_user_ptr(ptr); \
122 __get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\
123})
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700124
Michael S. Tsirkin71858202015-01-06 14:32:17 +0200125#define __put_user(x, ptr) put_user(x, ptr)
126#define __get_user(x, ptr) get_user(x, ptr)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700127
128struct __large_struct { unsigned long buf[100]; };
129#define __m(x) ((struct __large_struct *)(x))
130
Michael S. Tsirkin4b636ba2015-01-06 23:29:43 +0200131#define __put_user_nocheck(data, addr, size) ({ \
132 register int __pu_ret; \
133 switch (size) { \
134 case 1: __put_user_asm(data, b, addr, __pu_ret); break; \
135 case 2: __put_user_asm(data, h, addr, __pu_ret); break; \
136 case 4: __put_user_asm(data, w, addr, __pu_ret); break; \
137 case 8: __put_user_asm(data, x, addr, __pu_ret); break; \
138 default: __pu_ret = __put_user_bad(); break; \
139 } \
140 __pu_ret; \
Michael S. Tsirkin71858202015-01-06 14:32:17 +0200141})
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700142
Michael S. Tsirkin71858202015-01-06 14:32:17 +0200143#define __put_user_asm(x, size, addr, ret) \
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700144__asm__ __volatile__( \
Michael S. Tsirkin71858202015-01-06 14:32:17 +0200145 "/* Put user asm, inline. */\n" \
146 "1:\t" "st"#size "a %1, [%2] %%asi\n\t" \
147 "clr %0\n" \
148 "2:\n\n\t" \
149 ".section .fixup,#alloc,#execinstr\n\t" \
150 ".align 4\n" \
151 "3:\n\t" \
152 "sethi %%hi(2b), %0\n\t" \
153 "jmpl %0 + %%lo(2b), %%g0\n\t" \
154 " mov %3, %0\n\n\t" \
155 ".previous\n\t" \
156 ".section __ex_table,\"a\"\n\t" \
157 ".align 4\n\t" \
158 ".word 1b, 3b\n\t" \
159 ".previous\n\n\t" \
160 : "=r" (ret) : "r" (x), "r" (__m(addr)), \
161 "i" (-EFAULT))
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700162
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200163int __put_user_bad(void);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700164
Michael S. Tsirkin4b636ba2015-01-06 23:29:43 +0200165#define __get_user_nocheck(data, addr, size, type) ({ \
166 register int __gu_ret; \
167 register unsigned long __gu_val; \
168 switch (size) { \
169 case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \
170 case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \
171 case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \
172 case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break; \
173 default: \
174 __gu_val = 0; \
175 __gu_ret = __get_user_bad(); \
176 break; \
177 } \
178 data = (__force type) __gu_val; \
179 __gu_ret; \
Michael S. Tsirkin71858202015-01-06 14:32:17 +0200180})
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700181
Michael S. Tsirkin71858202015-01-06 14:32:17 +0200182#define __get_user_asm(x, size, addr, ret) \
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700183__asm__ __volatile__( \
Michael S. Tsirkin71858202015-01-06 14:32:17 +0200184 "/* Get user asm, inline. */\n" \
185 "1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \
186 "clr %0\n" \
187 "2:\n\n\t" \
188 ".section .fixup,#alloc,#execinstr\n\t" \
189 ".align 4\n" \
190 "3:\n\t" \
191 "sethi %%hi(2b), %0\n\t" \
192 "clr %1\n\t" \
193 "jmpl %0 + %%lo(2b), %%g0\n\t" \
194 " mov %3, %0\n\n\t" \
195 ".previous\n\t" \
196 ".section __ex_table,\"a\"\n\t" \
197 ".align 4\n\t" \
198 ".word 1b, 3b\n\n\t" \
199 ".previous\n\t" \
200 : "=r" (ret), "=r" (x) : "r" (__m(addr)), \
201 "i" (-EFAULT))
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700202
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200203int __get_user_bad(void);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700204
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200205unsigned long __must_check ___copy_from_user(void *to,
206 const void __user *from,
207 unsigned long size);
208unsigned long copy_from_user_fixup(void *to, const void __user *from,
209 unsigned long size);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700210static inline unsigned long __must_check
211copy_from_user(void *to, const void __user *from, unsigned long size)
212{
Kees Cook9d9208a2016-06-23 15:10:13 -0700213 unsigned long ret;
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700214
Kees Cook9d9208a2016-06-23 15:10:13 -0700215 if (!__builtin_constant_p(size))
216 check_object_size(to, size, false);
217
218 ret = ___copy_from_user(to, from, size);
David S. Miller4cb60662010-08-09 00:45:46 -0700219 if (unlikely(ret))
220 ret = copy_from_user_fixup(to, from, size);
221
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700222 return ret;
223}
224#define __copy_from_user copy_from_user
225
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200226unsigned long __must_check ___copy_to_user(void __user *to,
227 const void *from,
228 unsigned long size);
229unsigned long copy_to_user_fixup(void __user *to, const void *from,
230 unsigned long size);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700231static inline unsigned long __must_check
232copy_to_user(void __user *to, const void *from, unsigned long size)
233{
Kees Cook9d9208a2016-06-23 15:10:13 -0700234 unsigned long ret;
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700235
Kees Cook9d9208a2016-06-23 15:10:13 -0700236 if (!__builtin_constant_p(size))
237 check_object_size(from, size, true);
238 ret = ___copy_to_user(to, from, size);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700239 if (unlikely(ret))
240 ret = copy_to_user_fixup(to, from, size);
241 return ret;
242}
243#define __copy_to_user copy_to_user
244
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200245unsigned long __must_check ___copy_in_user(void __user *to,
246 const void __user *from,
247 unsigned long size);
248unsigned long copy_in_user_fixup(void __user *to, void __user *from,
249 unsigned long size);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700250static inline unsigned long __must_check
251copy_in_user(void __user *to, void __user *from, unsigned long size)
252{
253 unsigned long ret = ___copy_in_user(to, from, size);
254
255 if (unlikely(ret))
256 ret = copy_in_user_fixup(to, from, size);
257 return ret;
258}
259#define __copy_in_user copy_in_user
260
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200261unsigned long __must_check __clear_user(void __user *, unsigned long);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700262
263#define clear_user __clear_user
264
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200265__must_check long strlen_user(const char __user *str);
266__must_check long strnlen_user(const char __user *str, long n);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700267
Dave Kleikamp16932232013-12-16 15:01:00 -0600268#define __copy_to_user_inatomic __copy_to_user
269#define __copy_from_user_inatomic __copy_from_user
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700270
David S. Millerf88620b2012-10-10 17:19:32 -0700271struct pt_regs;
Sam Ravnborgf05a6862014-05-16 23:25:50 +0200272unsigned long compute_effective_address(struct pt_regs *,
273 unsigned int insn,
274 unsigned int rd);
David S. Millerf88620b2012-10-10 17:19:32 -0700275
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700276#endif /* __ASSEMBLY__ */
277
278#endif /* _ASM_UACCESS_H */