blob: 1d047d6c421b855931466f15d4e30aea91a2659d [file] [log] [blame]
Catalin Marinas0aea86a2012-03-05 11:49:32 +00001/*
2 * Based on arch/arm/include/asm/uaccess.h
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#ifndef __ASM_UACCESS_H
19#define __ASM_UACCESS_H
20
21/*
22 * User space memory access functions
23 */
Andre Przywara87261d12016-10-19 14:40:54 +010024#include <linux/bitops.h>
Yang Shibffe1baff2016-06-08 14:40:56 -070025#include <linux/kasan-checks.h>
Catalin Marinas0aea86a2012-03-05 11:49:32 +000026#include <linux/string.h>
27#include <linux/thread_info.h>
28
James Morse338d4f42015-07-22 19:05:54 +010029#include <asm/alternative.h>
30#include <asm/cpufeature.h>
Mark Rutlandc9100862018-04-12 12:11:00 +010031#include <asm/processor.h>
Catalin Marinas0aea86a2012-03-05 11:49:32 +000032#include <asm/ptrace.h>
James Morse338d4f42015-07-22 19:05:54 +010033#include <asm/sysreg.h>
Catalin Marinas0aea86a2012-03-05 11:49:32 +000034#include <asm/errno.h>
35#include <asm/memory.h>
36#include <asm/compiler.h>
37
38#define VERIFY_READ 0
39#define VERIFY_WRITE 1
40
41/*
Ard Biesheuvel6c94f272016-01-01 15:02:12 +010042 * The exception table consists of pairs of relative offsets: the first
43 * is the relative offset to an instruction that is allowed to fault,
44 * and the second is the relative offset at which the program should
45 * continue. No registers are modified, so it is entirely up to the
46 * continuation code to figure out what to do.
Catalin Marinas0aea86a2012-03-05 11:49:32 +000047 *
48 * All the routines below use bits of fixup code that are out of line
49 * with the main instruction path. This means when everything is well,
50 * we don't even have to jump over them. Further, they do not intrude
51 * on our cache or tlb entries.
52 */
53
54struct exception_table_entry
55{
Ard Biesheuvel6c94f272016-01-01 15:02:12 +010056 int insn, fixup;
Catalin Marinas0aea86a2012-03-05 11:49:32 +000057};
58
Ard Biesheuvel6c94f272016-01-01 15:02:12 +010059#define ARCH_HAS_RELATIVE_EXTABLE
60
Catalin Marinas0aea86a2012-03-05 11:49:32 +000061extern int fixup_exception(struct pt_regs *regs);
62
Catalin Marinas0aea86a2012-03-05 11:49:32 +000063#define get_ds() (KERNEL_DS)
Catalin Marinas0aea86a2012-03-05 11:49:32 +000064#define get_fs() (current_thread_info()->addr_limit)
65
66static inline void set_fs(mm_segment_t fs)
67{
68 current_thread_info()->addr_limit = fs;
James Morse57f49592016-02-05 14:58:48 +000069
70 /*
Mark Rutland346edd62018-04-12 12:11:03 +010071 * Prevent a mispredicted conditional call to set_fs from forwarding
72 * the wrong address limit to access_ok under speculation.
73 */
74 dsb(nsh);
75 isb();
76
77 /*
James Morse57f49592016-02-05 14:58:48 +000078 * Enable/disable UAO so that copy_to_user() etc can access
79 * kernel memory with the unprivileged instructions.
80 */
81 if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS)
82 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
83 else
84 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO,
85 CONFIG_ARM64_UAO));
Catalin Marinas0aea86a2012-03-05 11:49:32 +000086}
87
Michael S. Tsirkin967f0e52015-01-06 15:11:13 +020088#define segment_eq(a, b) ((a) == (b))
Catalin Marinas0aea86a2012-03-05 11:49:32 +000089
90/*
Catalin Marinas0aea86a2012-03-05 11:49:32 +000091 * Test whether a block of memory is a valid user space address.
92 * Returns 1 if the range is valid, 0 otherwise.
93 *
94 * This is equivalent to the following test:
Mark Rutlandc9100862018-04-12 12:11:00 +010095 * (u65)addr + (u65)size <= (u65)current->addr_limit + 1
Catalin Marinas0aea86a2012-03-05 11:49:32 +000096 */
Mark Rutlandc9100862018-04-12 12:11:00 +010097static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
98{
99 unsigned long limit = current_thread_info()->addr_limit;
100
101 __chk_user_ptr(addr);
102 asm volatile(
103 // A + B <= C + 1 for all A,B,C, in four easy steps:
104 // 1: X = A + B; X' = X % 2^64
105 " adds %0, %0, %2\n"
106 // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
107 " csel %1, xzr, %1, hi\n"
108 // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
109 // to compensate for the carry flag being set in step 4. For
110 // X > 2^64, X' merely has to remain nonzero, which it does.
111 " csinv %0, %0, xzr, cc\n"
112 // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1
113 // comes from the carry in being clear. Otherwise, we are
114 // testing X' - C == 0, subject to the previous adjustments.
115 " sbcs xzr, %0, %1\n"
116 " cset %0, ls\n"
117 : "+r" (addr), "+r" (limit) : "Ir" (size) : "cc");
118
119 return addr;
120}
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000121
Andre Przywara87261d12016-10-19 14:40:54 +0100122/*
Kristina Martsenko1d61ccb2017-06-06 20:14:09 +0100123 * When dealing with data aborts, watchpoints, or instruction traps we may end
124 * up with a tagged userland pointer. Clear the tag to get a sane pointer to
125 * pass on to access_ok(), for instance.
Andre Przywara87261d12016-10-19 14:40:54 +0100126 */
127#define untagged_addr(addr) sign_extend64(addr, 55)
128
Mark Rutlandc9100862018-04-12 12:11:00 +0100129#define access_ok(type, addr, size) __range_ok((unsigned long)(addr), size)
Will Deacon12a0ef72013-11-06 17:20:22 +0000130#define user_addr_max get_fs
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000131
Ard Biesheuvel6c94f272016-01-01 15:02:12 +0100132#define _ASM_EXTABLE(from, to) \
133 " .pushsection __ex_table, \"a\"\n" \
134 " .align 3\n" \
135 " .long (" #from " - .), (" #to " - .)\n" \
136 " .popsection\n"
137
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000138/*
Mark Rutland891bea92018-04-12 12:11:01 +0100139 * Sanitise a uaccess pointer such that it becomes NULL if above the
140 * current addr_limit.
141 */
142#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
143static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
144{
145 void __user *safe_ptr;
146
147 asm volatile(
148 " bics xzr, %1, %2\n"
149 " csel %0, %1, xzr, eq\n"
150 : "=&r" (safe_ptr)
151 : "r" (ptr), "r" (current_thread_info()->addr_limit)
152 : "cc");
153
154 csdb();
155 return safe_ptr;
156}
157
158/*
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000159 * The "__xxx" versions of the user access functions do not verify the address
160 * space - it must have been done previously with a separate "access_ok()"
161 * call.
162 *
163 * The "__xxx_error" versions set the third argument to -EFAULT if an error
164 * occurs, and leave it unchanged on success.
165 */
James Morse57f49592016-02-05 14:58:48 +0000166#define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000167 asm volatile( \
James Morse57f49592016-02-05 14:58:48 +0000168 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
169 alt_instr " " reg "1, [%2]\n", feature) \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000170 "2:\n" \
171 " .section .fixup, \"ax\"\n" \
172 " .align 2\n" \
173 "3: mov %w0, %3\n" \
174 " mov %1, #0\n" \
175 " b 2b\n" \
176 " .previous\n" \
Ard Biesheuvel6c94f272016-01-01 15:02:12 +0100177 _ASM_EXTABLE(1b, 3b) \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000178 : "+r" (err), "=&r" (x) \
179 : "r" (addr), "i" (-EFAULT))
180
181#define __get_user_err(x, ptr, err) \
182do { \
183 unsigned long __gu_val; \
184 __chk_user_ptr(ptr); \
James Morse70544192016-02-05 14:58:50 +0000185 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
James Morse338d4f42015-07-22 19:05:54 +0100186 CONFIG_ARM64_PAN)); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000187 switch (sizeof(*(ptr))) { \
188 case 1: \
James Morse57f49592016-02-05 14:58:48 +0000189 __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \
190 (err), ARM64_HAS_UAO); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000191 break; \
192 case 2: \
James Morse57f49592016-02-05 14:58:48 +0000193 __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \
194 (err), ARM64_HAS_UAO); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000195 break; \
196 case 4: \
James Morse57f49592016-02-05 14:58:48 +0000197 __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \
198 (err), ARM64_HAS_UAO); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000199 break; \
200 case 8: \
James Morse57f49592016-02-05 14:58:48 +0000201 __get_user_asm("ldr", "ldtr", "%", __gu_val, (ptr), \
202 (err), ARM64_HAS_UAO); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000203 break; \
204 default: \
205 BUILD_BUG(); \
206 } \
Michael S. Tsirkin58fff512014-12-12 01:56:04 +0200207 (x) = (__force __typeof__(*(ptr)))__gu_val; \
James Morse70544192016-02-05 14:58:50 +0000208 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
James Morse338d4f42015-07-22 19:05:54 +0100209 CONFIG_ARM64_PAN)); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000210} while (0)
211
Mark Rutland4c039282018-04-12 12:11:04 +0100212#define __get_user_check(x, ptr, err) \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000213({ \
Mark Rutland4c039282018-04-12 12:11:04 +0100214 __typeof__(*(ptr)) __user *__p = (ptr); \
215 might_fault(); \
216 if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \
217 __p = uaccess_mask_ptr(__p); \
218 __get_user_err((x), __p, (err)); \
219 } else { \
220 (x) = 0; (err) = -EFAULT; \
221 } \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000222})
223
224#define __get_user_error(x, ptr, err) \
225({ \
Mark Rutland4c039282018-04-12 12:11:04 +0100226 __get_user_check((x), (ptr), (err)); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000227 (void)0; \
228})
229
Mark Rutland4c039282018-04-12 12:11:04 +0100230#define __get_user(x, ptr) \
231({ \
232 int __gu_err = 0; \
233 __get_user_check((x), (ptr), __gu_err); \
234 __gu_err; \
235})
236
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000237#define __get_user_unaligned __get_user
238
Mark Rutland4c039282018-04-12 12:11:04 +0100239#define get_user __get_user
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000240
James Morse57f49592016-02-05 14:58:48 +0000241#define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000242 asm volatile( \
James Morse57f49592016-02-05 14:58:48 +0000243 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
244 alt_instr " " reg "1, [%2]\n", feature) \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000245 "2:\n" \
246 " .section .fixup,\"ax\"\n" \
247 " .align 2\n" \
248 "3: mov %w0, %3\n" \
249 " b 2b\n" \
250 " .previous\n" \
Ard Biesheuvel6c94f272016-01-01 15:02:12 +0100251 _ASM_EXTABLE(1b, 3b) \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000252 : "+r" (err) \
253 : "r" (x), "r" (addr), "i" (-EFAULT))
254
255#define __put_user_err(x, ptr, err) \
256do { \
257 __typeof__(*(ptr)) __pu_val = (x); \
258 __chk_user_ptr(ptr); \
James Morse70544192016-02-05 14:58:50 +0000259 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
James Morse338d4f42015-07-22 19:05:54 +0100260 CONFIG_ARM64_PAN)); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000261 switch (sizeof(*(ptr))) { \
262 case 1: \
James Morse57f49592016-02-05 14:58:48 +0000263 __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \
264 (err), ARM64_HAS_UAO); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000265 break; \
266 case 2: \
James Morse57f49592016-02-05 14:58:48 +0000267 __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \
268 (err), ARM64_HAS_UAO); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000269 break; \
270 case 4: \
James Morse57f49592016-02-05 14:58:48 +0000271 __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \
272 (err), ARM64_HAS_UAO); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000273 break; \
274 case 8: \
James Morse57f49592016-02-05 14:58:48 +0000275 __put_user_asm("str", "sttr", "%", __pu_val, (ptr), \
276 (err), ARM64_HAS_UAO); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000277 break; \
278 default: \
279 BUILD_BUG(); \
280 } \
James Morse70544192016-02-05 14:58:50 +0000281 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
James Morse338d4f42015-07-22 19:05:54 +0100282 CONFIG_ARM64_PAN)); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000283} while (0)
284
Mark Rutland4c039282018-04-12 12:11:04 +0100285#define __put_user_check(x, ptr, err) \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000286({ \
Mark Rutland4c039282018-04-12 12:11:04 +0100287 __typeof__(*(ptr)) __user *__p = (ptr); \
288 might_fault(); \
289 if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \
290 __p = uaccess_mask_ptr(__p); \
291 __put_user_err((x), __p, (err)); \
292 } else { \
293 (err) = -EFAULT; \
294 } \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000295})
296
297#define __put_user_error(x, ptr, err) \
298({ \
Mark Rutland4c039282018-04-12 12:11:04 +0100299 __put_user_check((x), (ptr), (err)); \
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000300 (void)0; \
301})
302
Mark Rutland4c039282018-04-12 12:11:04 +0100303#define __put_user(x, ptr) \
304({ \
305 int __pu_err = 0; \
306 __put_user_check((x), (ptr), __pu_err); \
307 __pu_err; \
308})
309
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000310#define __put_user_unaligned __put_user
311
Mark Rutland4c039282018-04-12 12:11:04 +0100312#define put_user __put_user
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000313
Yang Shibffe1baff2016-06-08 14:40:56 -0700314extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
315extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
Mark Rutland4504c5c2018-04-12 12:11:05 +0100316extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000317
Yang Shibffe1baff2016-06-08 14:40:56 -0700318static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
319{
320 kasan_check_write(to, n);
Kees Cookfaf5b632016-06-23 15:59:42 -0700321 check_object_size(to, n, false);
Mark Rutland4504c5c2018-04-12 12:11:05 +0100322 return __arch_copy_from_user(to, __uaccess_mask_ptr(from), n);
Yang Shibffe1baff2016-06-08 14:40:56 -0700323}
324
325static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
326{
327 kasan_check_read(from, n);
Kees Cookfaf5b632016-06-23 15:59:42 -0700328 check_object_size(from, n, true);
Mark Rutland4504c5c2018-04-12 12:11:05 +0100329 return __arch_copy_to_user(__uaccess_mask_ptr(to), from, n);
Yang Shibffe1baff2016-06-08 14:40:56 -0700330}
331
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000332static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
333{
Al Viro4855bd22016-09-10 16:50:00 -0400334 unsigned long res = n;
Yang Shibffe1baff2016-06-08 14:40:56 -0700335 kasan_check_write(to, n);
336
Kees Cookfaf5b632016-06-23 15:59:42 -0700337 if (access_ok(VERIFY_READ, from, n)) {
338 check_object_size(to, n, false);
Al Viro4855bd22016-09-10 16:50:00 -0400339 res = __arch_copy_from_user(to, from, n);
340 }
341 if (unlikely(res))
342 memset(to + (n - res), 0, res);
343 return res;
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000344}
345
346static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
347{
Yang Shibffe1baff2016-06-08 14:40:56 -0700348 kasan_check_read(from, n);
349
Kees Cookfaf5b632016-06-23 15:59:42 -0700350 if (access_ok(VERIFY_WRITE, to, n)) {
351 check_object_size(from, n, true);
Yang Shibffe1baff2016-06-08 14:40:56 -0700352 n = __arch_copy_to_user(to, from, n);
Kees Cookfaf5b632016-06-23 15:59:42 -0700353 }
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000354 return n;
355}
356
Mark Rutland4504c5c2018-04-12 12:11:05 +0100357static inline unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n)
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000358{
359 if (access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n))
Mark Rutland4504c5c2018-04-12 12:11:05 +0100360 n = __arch_copy_in_user(__uaccess_mask_ptr(to), __uaccess_mask_ptr(from), n);
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000361 return n;
362}
Mark Rutland4504c5c2018-04-12 12:11:05 +0100363#define copy_in_user __copy_in_user
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000364
365#define __copy_to_user_inatomic __copy_to_user
366#define __copy_from_user_inatomic __copy_from_user
367
Mark Rutland4504c5c2018-04-12 12:11:05 +0100368extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
369static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000370{
371 if (access_ok(VERIFY_WRITE, to, n))
Mark Rutland4504c5c2018-04-12 12:11:05 +0100372 n = __arch_clear_user(__uaccess_mask_ptr(to), n);
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000373 return n;
374}
Mark Rutland4504c5c2018-04-12 12:11:05 +0100375#define clear_user __clear_user
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000376
Will Deacon12a0ef72013-11-06 17:20:22 +0000377extern long strncpy_from_user(char *dest, const char __user *src, long count);
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000378
Will Deacon12a0ef72013-11-06 17:20:22 +0000379extern __must_check long strlen_user(const char __user *str);
380extern __must_check long strnlen_user(const char __user *str, long n);
Catalin Marinas0aea86a2012-03-05 11:49:32 +0000381
382#endif /* __ASM_UACCESS_H */