blob: 154659509afb98123b220159c2aeca04cd2eba35 [file] [log] [blame]
Catalin Marinas0aea86a2012-03-05 11:49:32 +00001/*
2 * Based on arch/arm/include/asm/uaccess.h
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#ifndef __ASM_UACCESS_H
19#define __ASM_UACCESS_H
20
Catalin Marinasbd389672016-07-01 14:58:21 +010021#include <asm/alternative.h>
22#include <asm/sysreg.h>
23
24#ifndef __ASSEMBLY__
25
Catalin Marinas0aea86a2012-03-05 11:49:32 +000026/*
27 * User space memory access functions
28 */
Andre Przywara87261d12016-10-19 14:40:54 +010029#include <linux/bitops.h>
Yang Shibffe1ba2016-06-08 14:40:56 -070030#include <linux/kasan-checks.h>
Catalin Marinas0aea86a2012-03-05 11:49:32 +000031#include <linux/string.h>
32#include <linux/thread_info.h>
33
James Morse338d4f42015-07-22 19:05:54 +010034#include <asm/cpufeature.h>
Catalin Marinas0aea86a2012-03-05 11:49:32 +000035#include <asm/ptrace.h>
36#include <asm/errno.h>
37#include <asm/memory.h>
38#include <asm/compiler.h>
39
40#define VERIFY_READ 0
41#define VERIFY_WRITE 1
42
43/*
Ard Biesheuvel6c94f272016-01-01 15:02:12 +010044 * The exception table consists of pairs of relative offsets: the first
45 * is the relative offset to an instruction that is allowed to fault,
46 * and the second is the relative offset at which the program should
47 * continue. No registers are modified, so it is entirely up to the
48 * continuation code to figure out what to do.
Catalin Marinas0aea86a2012-03-05 11:49:32 +000049 *
50 * All the routines below use bits of fixup code that are out of line
51 * with the main instruction path. This means when everything is well,
52 * we don't even have to jump over them. Further, they do not intrude
53 * on our cache or tlb entries.
54 */
55
56struct exception_table_entry
57{
Ard Biesheuvel6c94f272016-01-01 15:02:12 +010058 int insn, fixup;
Catalin Marinas0aea86a2012-03-05 11:49:32 +000059};
60
Ard Biesheuvel6c94f272016-01-01 15:02:12 +010061#define ARCH_HAS_RELATIVE_EXTABLE
62
Catalin Marinas0aea86a2012-03-05 11:49:32 +000063extern int fixup_exception(struct pt_regs *regs);
64
65#define KERNEL_DS (-1UL)
66#define get_ds() (KERNEL_DS)
67
68#define USER_DS TASK_SIZE_64
69#define get_fs() (current_thread_info()->addr_limit)
70
71static inline void set_fs(mm_segment_t fs)
72{
73 current_thread_info()->addr_limit = fs;
James Morse57f49592016-02-05 14:58:48 +000074
75 /*
76 * Enable/disable UAO so that copy_to_user() etc can access
77 * kernel memory with the unprivileged instructions.
78 */
79 if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS)
80 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
81 else
82 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO,
83 CONFIG_ARM64_UAO));
Catalin Marinas0aea86a2012-03-05 11:49:32 +000084}
85
Michael S. Tsirkin967f0e52015-01-06 15:11:13 +020086#define segment_eq(a, b) ((a) == (b))
Catalin Marinas0aea86a2012-03-05 11:49:32 +000087
88/*
Catalin Marinas0aea86a2012-03-05 11:49:32 +000089 * Test whether a block of memory is a valid user space address.
90 * Returns 1 if the range is valid, 0 otherwise.
91 *
92 * This is equivalent to the following test:
Christopher Covington31b1e942014-03-19 16:29:37 +000093 * (u65)addr + (u65)size <= current->addr_limit
Catalin Marinas0aea86a2012-03-05 11:49:32 +000094 *
95 * This needs 65-bit arithmetic.
96 */
97#define __range_ok(addr, size) \
98({ \
99 unsigned long flag, roksum; \
100 __chk_user_ptr(addr); \
Christopher Covington31b1e942014-03-19 16:29:37 +0000101 asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000102 : "=&r" (flag), "=&r" (roksum) \
103 : "1" (addr), "Ir" (size), \
104 "r" (current_thread_info()->addr_limit) \
105 : "cc"); \
106 flag; \
107})
108
Andre Przywara87261d12016-10-19 14:40:54 +0100109/*
110 * When dealing with data aborts or instruction traps we may end up with
111 * a tagged userland pointer. Clear the tag to get a sane pointer to pass
112 * on to access_ok(), for instance.
113 */
114#define untagged_addr(addr) sign_extend64(addr, 55)
115
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000116#define access_ok(type, addr, size) __range_ok(addr, size)
Will Deacon12a0ef72013-11-06 17:20:22 +0000117#define user_addr_max get_fs
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000118
Ard Biesheuvel6c94f272016-01-01 15:02:12 +0100119#define _ASM_EXTABLE(from, to) \
120 " .pushsection __ex_table, \"a\"\n" \
121 " .align 3\n" \
122 " .long (" #from " - .), (" #to " - .)\n" \
123 " .popsection\n"
124
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000125/*
Catalin Marinasbd389672016-07-01 14:58:21 +0100126 * User access enabling/disabling.
127 */
128#define __uaccess_disable(alt) \
129do { \
130 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \
131 CONFIG_ARM64_PAN)); \
132} while (0)
133
134#define __uaccess_enable(alt) \
135do { \
136 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \
137 CONFIG_ARM64_PAN)); \
138} while (0)
139
140static inline void uaccess_disable(void)
141{
142 __uaccess_disable(ARM64_HAS_PAN);
143}
144
145static inline void uaccess_enable(void)
146{
147 __uaccess_enable(ARM64_HAS_PAN);
148}
149
150/*
151 * These functions are no-ops when UAO is present.
152 */
153static inline void uaccess_disable_not_uao(void)
154{
155 __uaccess_disable(ARM64_ALT_PAN_NOT_UAO);
156}
157
158static inline void uaccess_enable_not_uao(void)
159{
160 __uaccess_enable(ARM64_ALT_PAN_NOT_UAO);
161}
162
163/*
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000164 * The "__xxx" versions of the user access functions do not verify the address
165 * space - it must have been done previously with a separate "access_ok()"
166 * call.
167 *
168 * The "__xxx_error" versions set the third argument to -EFAULT if an error
169 * occurs, and leave it unchanged on success.
170 */
James Morse57f49592016-02-05 14:58:48 +0000171#define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000172 asm volatile( \
James Morse57f49592016-02-05 14:58:48 +0000173 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
174 alt_instr " " reg "1, [%2]\n", feature) \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000175 "2:\n" \
176 " .section .fixup, \"ax\"\n" \
177 " .align 2\n" \
178 "3: mov %w0, %3\n" \
179 " mov %1, #0\n" \
180 " b 2b\n" \
181 " .previous\n" \
Ard Biesheuvel6c94f272016-01-01 15:02:12 +0100182 _ASM_EXTABLE(1b, 3b) \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000183 : "+r" (err), "=&r" (x) \
184 : "r" (addr), "i" (-EFAULT))
185
186#define __get_user_err(x, ptr, err) \
187do { \
188 unsigned long __gu_val; \
189 __chk_user_ptr(ptr); \
Catalin Marinasbd389672016-07-01 14:58:21 +0100190 uaccess_enable_not_uao(); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000191 switch (sizeof(*(ptr))) { \
192 case 1: \
James Morse57f49592016-02-05 14:58:48 +0000193 __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \
194 (err), ARM64_HAS_UAO); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000195 break; \
196 case 2: \
James Morse57f49592016-02-05 14:58:48 +0000197 __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \
198 (err), ARM64_HAS_UAO); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000199 break; \
200 case 4: \
James Morse57f49592016-02-05 14:58:48 +0000201 __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \
202 (err), ARM64_HAS_UAO); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000203 break; \
204 case 8: \
James Morse57f49592016-02-05 14:58:48 +0000205 __get_user_asm("ldr", "ldtr", "%", __gu_val, (ptr), \
206 (err), ARM64_HAS_UAO); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000207 break; \
208 default: \
209 BUILD_BUG(); \
210 } \
Catalin Marinasbd389672016-07-01 14:58:21 +0100211 uaccess_disable_not_uao(); \
Michael S. Tsirkin58fff512014-12-12 01:56:04 +0200212 (x) = (__force __typeof__(*(ptr)))__gu_val; \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000213} while (0)
214
215#define __get_user(x, ptr) \
216({ \
217 int __gu_err = 0; \
218 __get_user_err((x), (ptr), __gu_err); \
219 __gu_err; \
220})
221
222#define __get_user_error(x, ptr, err) \
223({ \
224 __get_user_err((x), (ptr), (err)); \
225 (void)0; \
226})
227
228#define __get_user_unaligned __get_user
229
230#define get_user(x, ptr) \
231({ \
AKASHI Takahiro1f65c132013-09-24 10:00:50 +0100232 __typeof__(*(ptr)) __user *__p = (ptr); \
Michael S. Tsirkin56d2ef72013-05-26 17:30:42 +0300233 might_fault(); \
AKASHI Takahiro1f65c132013-09-24 10:00:50 +0100234 access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \
235 __get_user((x), __p) : \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000236 ((x) = 0, -EFAULT); \
237})
238
James Morse57f49592016-02-05 14:58:48 +0000239#define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000240 asm volatile( \
James Morse57f49592016-02-05 14:58:48 +0000241 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
242 alt_instr " " reg "1, [%2]\n", feature) \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000243 "2:\n" \
244 " .section .fixup,\"ax\"\n" \
245 " .align 2\n" \
246 "3: mov %w0, %3\n" \
247 " b 2b\n" \
248 " .previous\n" \
Ard Biesheuvel6c94f272016-01-01 15:02:12 +0100249 _ASM_EXTABLE(1b, 3b) \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000250 : "+r" (err) \
251 : "r" (x), "r" (addr), "i" (-EFAULT))
252
253#define __put_user_err(x, ptr, err) \
254do { \
255 __typeof__(*(ptr)) __pu_val = (x); \
256 __chk_user_ptr(ptr); \
Catalin Marinasbd389672016-07-01 14:58:21 +0100257 uaccess_enable_not_uao(); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000258 switch (sizeof(*(ptr))) { \
259 case 1: \
James Morse57f49592016-02-05 14:58:48 +0000260 __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \
261 (err), ARM64_HAS_UAO); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000262 break; \
263 case 2: \
James Morse57f49592016-02-05 14:58:48 +0000264 __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \
265 (err), ARM64_HAS_UAO); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000266 break; \
267 case 4: \
James Morse57f49592016-02-05 14:58:48 +0000268 __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \
269 (err), ARM64_HAS_UAO); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000270 break; \
271 case 8: \
James Morse57f49592016-02-05 14:58:48 +0000272 __put_user_asm("str", "sttr", "%", __pu_val, (ptr), \
273 (err), ARM64_HAS_UAO); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000274 break; \
275 default: \
276 BUILD_BUG(); \
277 } \
Catalin Marinasbd389672016-07-01 14:58:21 +0100278 uaccess_disable_not_uao(); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000279} while (0)
280
281#define __put_user(x, ptr) \
282({ \
283 int __pu_err = 0; \
284 __put_user_err((x), (ptr), __pu_err); \
285 __pu_err; \
286})
287
288#define __put_user_error(x, ptr, err) \
289({ \
290 __put_user_err((x), (ptr), (err)); \
291 (void)0; \
292})
293
294#define __put_user_unaligned __put_user
295
296#define put_user(x, ptr) \
297({ \
AKASHI Takahiro1f65c132013-09-24 10:00:50 +0100298 __typeof__(*(ptr)) __user *__p = (ptr); \
Michael S. Tsirkin56d2ef72013-05-26 17:30:42 +0300299 might_fault(); \
AKASHI Takahiro1f65c132013-09-24 10:00:50 +0100300 access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \
301 __put_user((x), __p) : \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000302 -EFAULT; \
303})
304
Yang Shibffe1ba2016-06-08 14:40:56 -0700305extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
306extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000307extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
308extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
309
Yang Shibffe1ba2016-06-08 14:40:56 -0700310static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
311{
312 kasan_check_write(to, n);
Kees Cookfaf5b632016-06-23 15:59:42 -0700313 check_object_size(to, n, false);
314 return __arch_copy_from_user(to, from, n);
Yang Shibffe1ba2016-06-08 14:40:56 -0700315}
316
317static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
318{
319 kasan_check_read(from, n);
Kees Cookfaf5b632016-06-23 15:59:42 -0700320 check_object_size(from, n, true);
321 return __arch_copy_to_user(to, from, n);
Yang Shibffe1ba2016-06-08 14:40:56 -0700322}
323
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000324static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
325{
Al Viro4855bd22016-09-10 16:50:00 -0400326 unsigned long res = n;
Yang Shibffe1ba2016-06-08 14:40:56 -0700327 kasan_check_write(to, n);
328
Kees Cookfaf5b632016-06-23 15:59:42 -0700329 if (access_ok(VERIFY_READ, from, n)) {
330 check_object_size(to, n, false);
Al Viro4855bd22016-09-10 16:50:00 -0400331 res = __arch_copy_from_user(to, from, n);
332 }
333 if (unlikely(res))
334 memset(to + (n - res), 0, res);
335 return res;
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000336}
337
338static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
339{
Yang Shibffe1ba2016-06-08 14:40:56 -0700340 kasan_check_read(from, n);
341
Kees Cookfaf5b632016-06-23 15:59:42 -0700342 if (access_ok(VERIFY_WRITE, to, n)) {
343 check_object_size(from, n, true);
Yang Shibffe1ba2016-06-08 14:40:56 -0700344 n = __arch_copy_to_user(to, from, n);
Kees Cookfaf5b632016-06-23 15:59:42 -0700345 }
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000346 return n;
347}
348
349static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n)
350{
351 if (access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n))
352 n = __copy_in_user(to, from, n);
353 return n;
354}
355
356#define __copy_to_user_inatomic __copy_to_user
357#define __copy_from_user_inatomic __copy_from_user
358
359static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
360{
361 if (access_ok(VERIFY_WRITE, to, n))
362 n = __clear_user(to, n);
363 return n;
364}
365
Will Deacon12a0ef72013-11-06 17:20:22 +0000366extern long strncpy_from_user(char *dest, const char __user *src, long count);
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000367
Will Deacon12a0ef72013-11-06 17:20:22 +0000368extern __must_check long strlen_user(const char __user *str);
369extern __must_check long strnlen_user(const char __user *str, long n);
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000370
Catalin Marinasbd389672016-07-01 14:58:21 +0100371#else /* __ASSEMBLY__ */
372
373#include <asm/assembler.h>
374
375/*
376 * User access enabling/disabling macros. These are no-ops when UAO is
377 * present.
378 */
379 .macro uaccess_disable_not_uao, tmp1
380alternative_if ARM64_ALT_PAN_NOT_UAO
381 SET_PSTATE_PAN(1)
382alternative_else_nop_endif
383 .endm
384
385 .macro uaccess_enable_not_uao, tmp1, tmp2
386alternative_if ARM64_ALT_PAN_NOT_UAO
387 SET_PSTATE_PAN(0)
388alternative_else_nop_endif
389 .endm
390
391#endif /* __ASSEMBLY__ */
392
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000393#endif /* __ASM_UACCESS_H */