blob: a8ab18b18b54d0c87e8c1c6f7a160352fb7d073e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * S390 version
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02003 * Copyright IBM Corp. 1999, 2000
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Author(s): Hartmut Penner (hp@de.ibm.com),
5 * Martin Schwidefsky (schwidefsky@de.ibm.com)
6 *
7 * Derived from "include/asm-i386/uaccess.h"
8 */
9#ifndef __S390_UACCESS_H
10#define __S390_UACCESS_H
11
12/*
13 * User space memory access functions
14 */
15#include <linux/sched.h>
16#include <linux/errno.h>
David Howellsa0616cd2012-03-28 18:30:02 +010017#include <asm/ctl_reg.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19#define VERIFY_READ 0
20#define VERIFY_WRITE 1
21
22
23/*
24 * The fs value determines whether argument validity checking should be
25 * performed or not. If get_fs() == USER_DS, checking is performed, with
26 * get_fs() == KERNEL_DS, checking is bypassed.
27 *
28 * For historical reasons, these macros are grossly misnamed.
29 */
30
31#define MAKE_MM_SEG(a) ((mm_segment_t) { (a) })
32
33
34#define KERNEL_DS MAKE_MM_SEG(0)
35#define USER_DS MAKE_MM_SEG(1)
36
37#define get_ds() (KERNEL_DS)
38#define get_fs() (current->thread.mm_segment)
39
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#define set_fs(x) \
41({ \
42 unsigned long __pto; \
43 current->thread.mm_segment = (x); \
44 __pto = current->thread.mm_segment.ar4 ? \
45 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +020046 __ctl_load(__pto, 7, 7); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070047})
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
49#define segment_eq(a,b) ((a).ar4 == (b).ar4)
50
Heiko Carstens491af992012-05-29 07:33:59 +020051static inline int __range_ok(unsigned long addr, unsigned long size)
52{
53 return 1;
54}
55
56#define __access_ok(addr, size) \
57({ \
58 __chk_user_ptr(addr); \
59 __range_ok((unsigned long)(addr), (size)); \
Heiko Carstens7683f742011-05-26 09:48:25 +020060})
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
Heiko Carstens7683f742011-05-26 09:48:25 +020062#define access_ok(type, addr, size) __access_ok(addr, size)
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Linus Torvalds1da177e2005-04-16 15:20:36 -070064/*
65 * The exception table consists of pairs of addresses: the first is the
66 * address of an instruction that is allowed to fault, and the second is
67 * the address at which the program should continue. No registers are
68 * modified, so it is entirely up to the continuation code to figure out
69 * what to do.
70 *
71 * All the routines below use bits of fixup code that are out of line
72 * with the main instruction path. This means when everything is well,
73 * we don't even have to jump over them. Further, they do not intrude
74 * on our cache or tlb entries.
75 */
76
77struct exception_table_entry
78{
79 unsigned long insn, fixup;
80};
81
Gerald Schaeferd02765d2006-09-20 15:59:42 +020082struct uaccess_ops {
83 size_t (*copy_from_user)(size_t, const void __user *, void *);
84 size_t (*copy_from_user_small)(size_t, const void __user *, void *);
85 size_t (*copy_to_user)(size_t, void __user *, const void *);
86 size_t (*copy_to_user_small)(size_t, void __user *, const void *);
87 size_t (*copy_in_user)(size_t, void __user *, const void __user *);
88 size_t (*clear_user)(size_t, void __user *);
89 size_t (*strnlen_user)(size_t, const char __user *);
90 size_t (*strncpy_from_user)(size_t, const char __user *, char *);
Michel Lespinasse8d7718a2011-03-10 18:50:58 -080091 int (*futex_atomic_op)(int op, u32 __user *, int oparg, int *old);
92 int (*futex_atomic_cmpxchg)(u32 *, u32 __user *, u32 old, u32 new);
Gerald Schaeferd02765d2006-09-20 15:59:42 +020093};
94
95extern struct uaccess_ops uaccess;
96extern struct uaccess_ops uaccess_std;
Gerald Schaefer6c2a9e62006-09-20 15:59:44 +020097extern struct uaccess_ops uaccess_mvcos;
Gerald Schaeferc1821c22007-02-05 21:18:17 +010098extern struct uaccess_ops uaccess_mvcos_switch;
99extern struct uaccess_ops uaccess_pt;
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200100
Gerald Schaefer6c1e3e72009-12-07 12:51:47 +0100101extern int __handle_fault(unsigned long, unsigned long, int);
102
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200103static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
104{
105 size = uaccess.copy_to_user_small(size, ptr, x);
106 return size ? -EFAULT : size;
107}
108
109static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
110{
111 size = uaccess.copy_from_user_small(size, ptr, x);
112 return size ? -EFAULT : size;
113}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
115/*
116 * These are the main single-value transfer routines. They automatically
117 * use the right size if we just have the right pointer type.
118 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119#define __put_user(x, ptr) \
120({ \
121 __typeof__(*(ptr)) __x = (x); \
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200122 int __pu_err = -EFAULT; \
Al Viro17566c32005-08-23 22:48:22 +0100123 __chk_user_ptr(ptr); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 switch (sizeof (*(ptr))) { \
125 case 1: \
126 case 2: \
127 case 4: \
128 case 8: \
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200129 __pu_err = __put_user_fn(sizeof (*(ptr)), \
130 ptr, &__x); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 break; \
132 default: \
133 __put_user_bad(); \
134 break; \
135 } \
136 __pu_err; \
137})
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
139#define put_user(x, ptr) \
140({ \
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200141 might_fault(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 __put_user(x, ptr); \
143})
144
145
146extern int __put_user_bad(void) __attribute__((noreturn));
147
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148#define __get_user(x, ptr) \
149({ \
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200150 int __gu_err = -EFAULT; \
151 __chk_user_ptr(ptr); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 switch (sizeof(*(ptr))) { \
Martin Schwidefsky1047aa72005-11-07 00:59:11 -0800153 case 1: { \
154 unsigned char __x; \
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200155 __gu_err = __get_user_fn(sizeof (*(ptr)), \
156 ptr, &__x); \
Al Viro97fa5a62006-02-03 20:11:52 -0500157 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 break; \
Martin Schwidefsky1047aa72005-11-07 00:59:11 -0800159 }; \
160 case 2: { \
161 unsigned short __x; \
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200162 __gu_err = __get_user_fn(sizeof (*(ptr)), \
163 ptr, &__x); \
Al Viro97fa5a62006-02-03 20:11:52 -0500164 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
Martin Schwidefsky1047aa72005-11-07 00:59:11 -0800165 break; \
166 }; \
167 case 4: { \
168 unsigned int __x; \
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200169 __gu_err = __get_user_fn(sizeof (*(ptr)), \
170 ptr, &__x); \
Al Viro97fa5a62006-02-03 20:11:52 -0500171 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
Martin Schwidefsky1047aa72005-11-07 00:59:11 -0800172 break; \
173 }; \
174 case 8: { \
175 unsigned long long __x; \
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200176 __gu_err = __get_user_fn(sizeof (*(ptr)), \
177 ptr, &__x); \
Al Viro97fa5a62006-02-03 20:11:52 -0500178 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
Martin Schwidefsky1047aa72005-11-07 00:59:11 -0800179 break; \
180 }; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 default: \
182 __get_user_bad(); \
183 break; \
184 } \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 __gu_err; \
186})
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187
188#define get_user(x, ptr) \
189({ \
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200190 might_fault(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 __get_user(x, ptr); \
192})
193
194extern int __get_user_bad(void) __attribute__((noreturn));
195
196#define __put_user_unaligned __put_user
197#define __get_user_unaligned __get_user
198
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199/**
200 * __copy_to_user: - Copy a block of data into user space, with less checking.
201 * @to: Destination address, in user space.
202 * @from: Source address, in kernel space.
203 * @n: Number of bytes to copy.
204 *
205 * Context: User context only. This function may sleep.
206 *
207 * Copy data from kernel space to user space. Caller must check
208 * the specified block with access_ok() before calling this function.
209 *
210 * Returns number of bytes that could not be copied.
211 * On success, this will be zero.
212 */
Heiko Carstensf7675ad2006-12-04 15:39:55 +0100213static inline unsigned long __must_check
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214__copy_to_user(void __user *to, const void *from, unsigned long n)
215{
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200216 if (__builtin_constant_p(n) && (n <= 256))
217 return uaccess.copy_to_user_small(n, to, from);
218 else
219 return uaccess.copy_to_user(n, to, from);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220}
221
222#define __copy_to_user_inatomic __copy_to_user
223#define __copy_from_user_inatomic __copy_from_user
224
225/**
226 * copy_to_user: - Copy a block of data into user space.
227 * @to: Destination address, in user space.
228 * @from: Source address, in kernel space.
229 * @n: Number of bytes to copy.
230 *
231 * Context: User context only. This function may sleep.
232 *
233 * Copy data from kernel space to user space.
234 *
235 * Returns number of bytes that could not be copied.
236 * On success, this will be zero.
237 */
Heiko Carstensf7675ad2006-12-04 15:39:55 +0100238static inline unsigned long __must_check
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239copy_to_user(void __user *to, const void *from, unsigned long n)
240{
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200241 might_fault();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 if (access_ok(VERIFY_WRITE, to, n))
243 n = __copy_to_user(to, from, n);
244 return n;
245}
246
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247/**
248 * __copy_from_user: - Copy a block of data from user space, with less checking.
249 * @to: Destination address, in kernel space.
250 * @from: Source address, in user space.
251 * @n: Number of bytes to copy.
252 *
253 * Context: User context only. This function may sleep.
254 *
255 * Copy data from user space to kernel space. Caller must check
256 * the specified block with access_ok() before calling this function.
257 *
258 * Returns number of bytes that could not be copied.
259 * On success, this will be zero.
260 *
261 * If some data could not be copied, this function will pad the copied
262 * data to the requested size using zero bytes.
263 */
Heiko Carstensf7675ad2006-12-04 15:39:55 +0100264static inline unsigned long __must_check
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265__copy_from_user(void *to, const void __user *from, unsigned long n)
266{
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200267 if (__builtin_constant_p(n) && (n <= 256))
268 return uaccess.copy_from_user_small(n, from, to);
269 else
270 return uaccess.copy_from_user(n, from, to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271}
272
Heiko Carstens1dcec252010-02-26 22:37:22 +0100273extern void copy_from_user_overflow(void)
274#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
275__compiletime_warning("copy_from_user() buffer size is not provably correct")
276#endif
277;
278
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279/**
280 * copy_from_user: - Copy a block of data from user space.
281 * @to: Destination address, in kernel space.
282 * @from: Source address, in user space.
283 * @n: Number of bytes to copy.
284 *
285 * Context: User context only. This function may sleep.
286 *
287 * Copy data from user space to kernel space.
288 *
289 * Returns number of bytes that could not be copied.
290 * On success, this will be zero.
291 *
292 * If some data could not be copied, this function will pad the copied
293 * data to the requested size using zero bytes.
294 */
Heiko Carstensf7675ad2006-12-04 15:39:55 +0100295static inline unsigned long __must_check
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296copy_from_user(void *to, const void __user *from, unsigned long n)
297{
Heiko Carstens1dcec252010-02-26 22:37:22 +0100298 unsigned int sz = __compiletime_object_size(to);
299
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200300 might_fault();
Heiko Carstens1dcec252010-02-26 22:37:22 +0100301 if (unlikely(sz != -1 && sz < n)) {
302 copy_from_user_overflow();
303 return n;
304 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 if (access_ok(VERIFY_READ, from, n))
306 n = __copy_from_user(to, from, n);
307 else
308 memset(to, 0, n);
309 return n;
310}
311
Heiko Carstensf7675ad2006-12-04 15:39:55 +0100312static inline unsigned long __must_check
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313__copy_in_user(void __user *to, const void __user *from, unsigned long n)
314{
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200315 return uaccess.copy_in_user(n, to, from);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316}
317
Heiko Carstensf7675ad2006-12-04 15:39:55 +0100318static inline unsigned long __must_check
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319copy_in_user(void __user *to, const void __user *from, unsigned long n)
320{
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200321 might_fault();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 if (__access_ok(from,n) && __access_ok(to,n))
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200323 n = __copy_in_user(to, from, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 return n;
325}
326
327/*
328 * Copy a null terminated string from userspace.
329 */
Heiko Carstensf7675ad2006-12-04 15:39:55 +0100330static inline long __must_check
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331strncpy_from_user(char *dst, const char __user *src, long count)
332{
333 long res = -EFAULT;
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200334 might_fault();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 if (access_ok(VERIFY_READ, src, 1))
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200336 res = uaccess.strncpy_from_user(count, src, dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 return res;
338}
339
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340static inline unsigned long
341strnlen_user(const char __user * src, unsigned long n)
342{
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200343 might_fault();
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200344 return uaccess.strnlen_user(n, src);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345}
346
347/**
348 * strlen_user: - Get the size of a string in user space.
349 * @str: The string to measure.
350 *
351 * Context: User context only. This function may sleep.
352 *
353 * Get the size of a NUL-terminated string in user space.
354 *
355 * Returns the size of the string INCLUDING the terminating NUL.
356 * On exception, returns 0.
357 *
358 * If there is a limit on the length of a valid string, you may wish to
359 * consider using strnlen_user() instead.
360 */
361#define strlen_user(str) strnlen_user(str, ~0UL)
362
363/*
364 * Zero Userspace
365 */
366
Heiko Carstensf7675ad2006-12-04 15:39:55 +0100367static inline unsigned long __must_check
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368__clear_user(void __user *to, unsigned long n)
369{
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200370 return uaccess.clear_user(n, to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371}
372
Heiko Carstensf7675ad2006-12-04 15:39:55 +0100373static inline unsigned long __must_check
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374clear_user(void __user *to, unsigned long n)
375{
Heiko Carstensdab4079d2009-06-12 10:26:32 +0200376 might_fault();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 if (access_ok(VERIFY_WRITE, to, n))
Gerald Schaeferd02765d2006-09-20 15:59:42 +0200378 n = uaccess.clear_user(n, to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 return n;
380}
381
David Howellsa0616cd2012-03-28 18:30:02 +0100382extern int copy_to_user_real(void __user *dest, void *src, size_t count);
383extern int copy_from_user_real(void *dest, void __user *src, size_t count);
384
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385#endif /* __S390_UACCESS_H */