blob: 79330af9a5f85442745110001defbaa2a1964bb8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * S390 version
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02003 * Copyright IBM Corp. 1999, 2000
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Author(s): Hartmut Penner (hp@de.ibm.com),
5 * Martin Schwidefsky (schwidefsky@de.ibm.com)
6 *
7 * Derived from "include/asm-i386/uaccess.h"
8 */
9#ifndef __S390_UACCESS_H
10#define __S390_UACCESS_H
11
12/*
13 * User space memory access functions
14 */
15#include <linux/sched.h>
16#include <linux/errno.h>
David Howellsa0616cd2012-03-28 18:30:02 +010017#include <asm/ctl_reg.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19#define VERIFY_READ 0
20#define VERIFY_WRITE 1
21
22
23/*
24 * The fs value determines whether argument validity checking should be
25 * performed or not. If get_fs() == USER_DS, checking is performed, with
26 * get_fs() == KERNEL_DS, checking is bypassed.
27 *
28 * For historical reasons, these macros are grossly misnamed.
29 */
30
31#define MAKE_MM_SEG(a) ((mm_segment_t) { (a) })
32
33
34#define KERNEL_DS MAKE_MM_SEG(0)
35#define USER_DS MAKE_MM_SEG(1)
36
37#define get_ds() (KERNEL_DS)
38#define get_fs() (current->thread.mm_segment)
39
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#define set_fs(x) \
41({ \
42 unsigned long __pto; \
43 current->thread.mm_segment = (x); \
44 __pto = current->thread.mm_segment.ar4 ? \
45 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +020046 __ctl_load(__pto, 7, 7); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070047})
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
49#define segment_eq(a,b) ((a).ar4 == (b).ar4)
50
Heiko Carstens491af992012-05-29 07:33:59 +020051static inline int __range_ok(unsigned long addr, unsigned long size)
52{
53 return 1;
54}
55
56#define __access_ok(addr, size) \
57({ \
58 __chk_user_ptr(addr); \
59 __range_ok((unsigned long)(addr), (size)); \
Heiko Carstens7683f742011-05-26 09:48:25 +020060})
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
Heiko Carstens7683f742011-05-26 09:48:25 +020062#define access_ok(type, addr, size) __access_ok(addr, size)
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Linus Torvalds1da177e2005-04-16 15:20:36 -070064/*
65 * The exception table consists of pairs of addresses: the first is the
66 * address of an instruction that is allowed to fault, and the second is
67 * the address at which the program should continue. No registers are
68 * modified, so it is entirely up to the continuation code to figure out
69 * what to do.
70 *
71 * All the routines below use bits of fixup code that are out of line
72 * with the main instruction path. This means when everything is well,
73 * we don't even have to jump over them. Further, they do not intrude
74 * on our cache or tlb entries.
75 */
76
77struct exception_table_entry
78{
Heiko Carstenseb608fb2012-09-05 13:26:11 +020079 int insn, fixup;
Linus Torvalds1da177e2005-04-16 15:20:36 -070080};
81
Heiko Carstenseb608fb2012-09-05 13:26:11 +020082static inline unsigned long extable_insn(const struct exception_table_entry *x)
83{
84 return (unsigned long)&x->insn + x->insn;
85}
86
87static inline unsigned long extable_fixup(const struct exception_table_entry *x)
88{
89 return (unsigned long)&x->fixup + x->fixup;
90}
91
92#define ARCH_HAS_SORT_EXTABLE
93#define ARCH_HAS_SEARCH_EXTABLE
94
Gerald Schaeferd02765d2006-09-20 15:59:42 +020095struct uaccess_ops {
96 size_t (*copy_from_user)(size_t, const void __user *, void *);
Gerald Schaeferd02765d2006-09-20 15:59:42 +020097 size_t (*copy_to_user)(size_t, void __user *, const void *);
Gerald Schaeferd02765d2006-09-20 15:59:42 +020098 size_t (*copy_in_user)(size_t, void __user *, const void __user *);
99 size_t (*clear_user)(size_t, void __user *);
100 size_t (*strnlen_user)(size_t, const char __user *);
101 size_t (*strncpy_from_user)(size_t, const char __user *, char *);
Michel Lespinasse8d7718a2011-03-10 18:50:58 -0800102 int (*futex_atomic_op)(int op, u32 __user *, int oparg, int *old);
103 int (*futex_atomic_cmpxchg)(u32 *, u32 __user *, u32 old, u32 new);
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200104};
105
106extern struct uaccess_ops uaccess;
Gerald Schaefer6c2a9e62006-09-20 15:59:44 +0200107extern struct uaccess_ops uaccess_mvcos;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100108extern struct uaccess_ops uaccess_pt;
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200109
Gerald Schaefer6c1e3e72009-12-07 12:51:47 +0100110extern int __handle_fault(unsigned long, unsigned long, int);
111
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200112static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
113{
Martin Schwidefskye258d712013-09-24 09:14:56 +0200114 size = uaccess.copy_to_user(size, ptr, x);
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200115 return size ? -EFAULT : size;
116}
117
118static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
119{
Martin Schwidefskye258d712013-09-24 09:14:56 +0200120 size = uaccess.copy_from_user(size, ptr, x);
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200121 return size ? -EFAULT : size;
122}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123
124/*
125 * These are the main single-value transfer routines. They automatically
126 * use the right size if we just have the right pointer type.
127 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128#define __put_user(x, ptr) \
129({ \
130 __typeof__(*(ptr)) __x = (x); \
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200131 int __pu_err = -EFAULT; \
Al Viro17566c32005-08-23 22:48:22 +0100132 __chk_user_ptr(ptr); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 switch (sizeof (*(ptr))) { \
134 case 1: \
135 case 2: \
136 case 4: \
137 case 8: \
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200138 __pu_err = __put_user_fn(sizeof (*(ptr)), \
139 ptr, &__x); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 break; \
141 default: \
142 __put_user_bad(); \
143 break; \
144 } \
145 __pu_err; \
146})
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148#define put_user(x, ptr) \
149({ \
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200150 might_fault(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 __put_user(x, ptr); \
152})
153
154
155extern int __put_user_bad(void) __attribute__((noreturn));
156
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157#define __get_user(x, ptr) \
158({ \
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200159 int __gu_err = -EFAULT; \
160 __chk_user_ptr(ptr); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 switch (sizeof(*(ptr))) { \
Martin Schwidefsky1047aa72005-11-07 00:59:11 -0800162 case 1: { \
163 unsigned char __x; \
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200164 __gu_err = __get_user_fn(sizeof (*(ptr)), \
165 ptr, &__x); \
Al Viro97fa5a62006-02-03 20:11:52 -0500166 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 break; \
Martin Schwidefsky1047aa72005-11-07 00:59:11 -0800168 }; \
169 case 2: { \
170 unsigned short __x; \
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200171 __gu_err = __get_user_fn(sizeof (*(ptr)), \
172 ptr, &__x); \
Al Viro97fa5a62006-02-03 20:11:52 -0500173 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
Martin Schwidefsky1047aa72005-11-07 00:59:11 -0800174 break; \
175 }; \
176 case 4: { \
177 unsigned int __x; \
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200178 __gu_err = __get_user_fn(sizeof (*(ptr)), \
179 ptr, &__x); \
Al Viro97fa5a62006-02-03 20:11:52 -0500180 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
Martin Schwidefsky1047aa72005-11-07 00:59:11 -0800181 break; \
182 }; \
183 case 8: { \
184 unsigned long long __x; \
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200185 __gu_err = __get_user_fn(sizeof (*(ptr)), \
186 ptr, &__x); \
Al Viro97fa5a62006-02-03 20:11:52 -0500187 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
Martin Schwidefsky1047aa72005-11-07 00:59:11 -0800188 break; \
189 }; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 default: \
191 __get_user_bad(); \
192 break; \
193 } \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 __gu_err; \
195})
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
197#define get_user(x, ptr) \
198({ \
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200199 might_fault(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 __get_user(x, ptr); \
201})
202
203extern int __get_user_bad(void) __attribute__((noreturn));
204
205#define __put_user_unaligned __put_user
206#define __get_user_unaligned __get_user
207
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208/**
209 * __copy_to_user: - Copy a block of data into user space, with less checking.
210 * @to: Destination address, in user space.
211 * @from: Source address, in kernel space.
212 * @n: Number of bytes to copy.
213 *
214 * Context: User context only. This function may sleep.
215 *
216 * Copy data from kernel space to user space. Caller must check
217 * the specified block with access_ok() before calling this function.
218 *
219 * Returns number of bytes that could not be copied.
220 * On success, this will be zero.
221 */
Heiko Carstensf7675ad2006-12-04 15:39:55 +0100222static inline unsigned long __must_check
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223__copy_to_user(void __user *to, const void *from, unsigned long n)
224{
Martin Schwidefskye258d712013-09-24 09:14:56 +0200225 return uaccess.copy_to_user(n, to, from);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226}
227
228#define __copy_to_user_inatomic __copy_to_user
229#define __copy_from_user_inatomic __copy_from_user
230
231/**
232 * copy_to_user: - Copy a block of data into user space.
233 * @to: Destination address, in user space.
234 * @from: Source address, in kernel space.
235 * @n: Number of bytes to copy.
236 *
237 * Context: User context only. This function may sleep.
238 *
239 * Copy data from kernel space to user space.
240 *
241 * Returns number of bytes that could not be copied.
242 * On success, this will be zero.
243 */
Heiko Carstensf7675ad2006-12-04 15:39:55 +0100244static inline unsigned long __must_check
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245copy_to_user(void __user *to, const void *from, unsigned long n)
246{
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200247 might_fault();
Heiko Carstensd12a2972013-02-21 16:57:42 +0100248 return __copy_to_user(to, from, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249}
250
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251/**
252 * __copy_from_user: - Copy a block of data from user space, with less checking.
253 * @to: Destination address, in kernel space.
254 * @from: Source address, in user space.
255 * @n: Number of bytes to copy.
256 *
257 * Context: User context only. This function may sleep.
258 *
259 * Copy data from user space to kernel space. Caller must check
260 * the specified block with access_ok() before calling this function.
261 *
262 * Returns number of bytes that could not be copied.
263 * On success, this will be zero.
264 *
265 * If some data could not be copied, this function will pad the copied
266 * data to the requested size using zero bytes.
267 */
Heiko Carstensf7675ad2006-12-04 15:39:55 +0100268static inline unsigned long __must_check
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269__copy_from_user(void *to, const void __user *from, unsigned long n)
270{
Martin Schwidefskye258d712013-09-24 09:14:56 +0200271 return uaccess.copy_from_user(n, from, to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272}
273
Heiko Carstens1dcec252010-02-26 22:37:22 +0100274extern void copy_from_user_overflow(void)
275#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
276__compiletime_warning("copy_from_user() buffer size is not provably correct")
277#endif
278;
279
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280/**
281 * copy_from_user: - Copy a block of data from user space.
282 * @to: Destination address, in kernel space.
283 * @from: Source address, in user space.
284 * @n: Number of bytes to copy.
285 *
286 * Context: User context only. This function may sleep.
287 *
288 * Copy data from user space to kernel space.
289 *
290 * Returns number of bytes that could not be copied.
291 * On success, this will be zero.
292 *
293 * If some data could not be copied, this function will pad the copied
294 * data to the requested size using zero bytes.
295 */
Heiko Carstensf7675ad2006-12-04 15:39:55 +0100296static inline unsigned long __must_check
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297copy_from_user(void *to, const void __user *from, unsigned long n)
298{
Heiko Carstens1dcec252010-02-26 22:37:22 +0100299 unsigned int sz = __compiletime_object_size(to);
300
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200301 might_fault();
Heiko Carstens1dcec252010-02-26 22:37:22 +0100302 if (unlikely(sz != -1 && sz < n)) {
303 copy_from_user_overflow();
304 return n;
305 }
Heiko Carstensd12a2972013-02-21 16:57:42 +0100306 return __copy_from_user(to, from, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307}
308
Heiko Carstensf7675ad2006-12-04 15:39:55 +0100309static inline unsigned long __must_check
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310__copy_in_user(void __user *to, const void __user *from, unsigned long n)
311{
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200312 return uaccess.copy_in_user(n, to, from);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313}
314
Heiko Carstensf7675ad2006-12-04 15:39:55 +0100315static inline unsigned long __must_check
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316copy_in_user(void __user *to, const void __user *from, unsigned long n)
317{
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200318 might_fault();
Heiko Carstensd12a2972013-02-21 16:57:42 +0100319 return __copy_in_user(to, from, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320}
321
322/*
323 * Copy a null terminated string from userspace.
324 */
Heiko Carstensf7675ad2006-12-04 15:39:55 +0100325static inline long __must_check
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326strncpy_from_user(char *dst, const char __user *src, long count)
327{
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200328 might_fault();
Heiko Carstensd12a2972013-02-21 16:57:42 +0100329 return uaccess.strncpy_from_user(count, src, dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330}
331
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332static inline unsigned long
333strnlen_user(const char __user * src, unsigned long n)
334{
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200335 might_fault();
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200336 return uaccess.strnlen_user(n, src);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337}
338
339/**
340 * strlen_user: - Get the size of a string in user space.
341 * @str: The string to measure.
342 *
343 * Context: User context only. This function may sleep.
344 *
345 * Get the size of a NUL-terminated string in user space.
346 *
347 * Returns the size of the string INCLUDING the terminating NUL.
348 * On exception, returns 0.
349 *
350 * If there is a limit on the length of a valid string, you may wish to
351 * consider using strnlen_user() instead.
352 */
353#define strlen_user(str) strnlen_user(str, ~0UL)
354
355/*
356 * Zero Userspace
357 */
358
Heiko Carstensf7675ad2006-12-04 15:39:55 +0100359static inline unsigned long __must_check
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360__clear_user(void __user *to, unsigned long n)
361{
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200362 return uaccess.clear_user(n, to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363}
364
Heiko Carstensf7675ad2006-12-04 15:39:55 +0100365static inline unsigned long __must_check
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366clear_user(void __user *to, unsigned long n)
367{
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200368 might_fault();
Heiko Carstensd12a2972013-02-21 16:57:42 +0100369 return uaccess.clear_user(n, to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370}
371
David Howellsa0616cd2012-03-28 18:30:02 +0100372extern int copy_to_user_real(void __user *dest, void *src, size_t count);
373extern int copy_from_user_real(void *dest, void __user *src, size_t count);
374
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375#endif /* __S390_UACCESS_H */