blob: d49ce8f48be38bbc4c8fc4c357dfacf9a79a29f5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Russell King4baa9922008-08-02 10:55:55 +01002 * arch/arm/include/asm/uaccess.h
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#ifndef _ASMARM_UACCESS_H
9#define _ASMARM_UACCESS_H
10
11/*
12 * User space memory access functions
13 */
Russell King87c52572008-11-29 17:35:51 +000014#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <asm/memory.h>
16#include <asm/domain.h>
Catalin Marinas8b592782009-07-24 12:32:57 +010017#include <asm/unified.h>
David Howells9f97da72012-03-28 18:30:01 +010018#include <asm/compiler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
Al Viro0f9b38c2016-12-25 03:33:03 -050020#include <asm/extable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
22/*
Russell King3fba7e22015-08-19 11:02:28 +010023 * These two functions allow hooking accesses to userspace to increase
24 * system integrity by ensuring that the kernel can not inadvertantly
25 * perform such accesses (eg, via list poison values) which could then
26 * be exploited for priviledge escalation.
27 */
28static inline unsigned int uaccess_save_and_enable(void)
29{
Russell Kinga5e090a2015-08-19 20:40:41 +010030#ifdef CONFIG_CPU_SW_DOMAIN_PAN
31 unsigned int old_domain = get_domain();
32
33 /* Set the current domain access to permit user accesses */
34 set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
35 domain_val(DOMAIN_USER, DOMAIN_CLIENT));
36
37 return old_domain;
38#else
Russell King3fba7e22015-08-19 11:02:28 +010039 return 0;
Russell Kinga5e090a2015-08-19 20:40:41 +010040#endif
Russell King3fba7e22015-08-19 11:02:28 +010041}
42
43static inline void uaccess_restore(unsigned int flags)
44{
Russell Kinga5e090a2015-08-19 20:40:41 +010045#ifdef CONFIG_CPU_SW_DOMAIN_PAN
46 /* Restore the user access mask */
47 set_domain(flags);
48#endif
Russell King3fba7e22015-08-19 11:02:28 +010049}
50
51/*
Russell King9641c7c2006-06-21 20:38:17 +010052 * These two are intentionally not defined anywhere - if the kernel
53 * code generates any references to them, that's a bug.
54 */
55extern int __get_user_bad(void);
56extern int __put_user_bad(void);
57
58/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 * Note that this is actually 0x1,0000,0000
60 */
61#define KERNEL_DS 0x00000000
Russell King9641c7c2006-06-21 20:38:17 +010062
63#ifdef CONFIG_MMU
64
65#define USER_DS TASK_SIZE
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#define get_fs() (current_thread_info()->addr_limit)
67
Russell King9641c7c2006-06-21 20:38:17 +010068static inline void set_fs(mm_segment_t fs)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
70 current_thread_info()->addr_limit = fs;
Julien Thierry621afc62018-09-11 10:13:23 +010071
72 /*
73 * Prevent a mispredicted conditional call to set_fs from forwarding
74 * the wrong address limit to access_ok under speculation.
75 */
76 dsb(nsh);
77 isb();
78
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
80}
81
Michael S. Tsirkin295bb012015-01-06 15:11:13 +020082#define segment_eq(a, b) ((a) == (b))
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
Linus Torvalds1da177e2005-04-16 15:20:36 -070084/* We use 33-bit arithmetic here... */
Michael S. Tsirkin295bb012015-01-06 15:11:13 +020085#define __range_ok(addr, size) ({ \
Tilman Schmidt16cf5b32007-02-10 01:45:41 -080086 unsigned long flag, roksum; \
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 __chk_user_ptr(addr); \
Stefan Agner32fdb042019-01-24 21:42:54 +010088 __asm__(".syntax unified\n" \
89 "adds %1, %2, %3; sbcscc %1, %1, %0; movcc %0, #0" \
Tilman Schmidt16cf5b32007-02-10 01:45:41 -080090 : "=&r" (flag), "=&r" (roksum) \
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
92 : "cc"); \
93 flag; })
94
Linus Torvalds1da177e2005-04-16 15:20:36 -070095/*
Russell Kingd09fbb32018-07-09 15:22:45 +010096 * This is a type: either unsigned long, if the argument fits into
97 * that type, or otherwise unsigned long long.
98 */
99#define __inttype(x) \
100 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
101
102/*
Julien Thierryafaf6832018-09-11 10:14:50 +0100103 * Sanitise a uaccess pointer such that it becomes NULL if addr+size
104 * is above the current addr_limit.
105 */
106#define uaccess_mask_range_ptr(ptr, size) \
107 ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
108static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
109 size_t size)
110{
111 void __user *safe_ptr = (void __user *)ptr;
112 unsigned long tmp;
113
114 asm volatile(
Stefan Agnerfe09d9c2019-03-23 12:10:44 +0100115 " .syntax unified\n"
Julien Thierryafaf6832018-09-11 10:14:50 +0100116 " sub %1, %3, #1\n"
117 " subs %1, %1, %0\n"
118 " addhs %1, %1, #1\n"
Stefan Agnerfe09d9c2019-03-23 12:10:44 +0100119 " subshs %1, %1, %2\n"
Julien Thierryafaf6832018-09-11 10:14:50 +0100120 " movlo %0, #0\n"
121 : "+r" (safe_ptr), "=&r" (tmp)
122 : "r" (size), "r" (current_thread_info()->addr_limit)
123 : "cc");
124
125 csdb();
126 return safe_ptr;
127}
128
129/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 * Single-value transfer routines. They automatically use the right
131 * size if we just have the right pointer type. Note that the functions
132 * which read from user space (*get_*) need to take care not to leak
133 * kernel data even if the calling code is buggy and fails to check
134 * the return value. This means zeroing out the destination variable
135 * or buffer on error. Normally this is done out of line by the
136 * fixup code, but there are a few places where it intrudes on the
137 * main code path. When we only write to user space, there is no
138 * problem.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140extern int __get_user_1(void *);
141extern int __get_user_2(void *);
142extern int __get_user_4(void *);
Victor Kamenskyd9981382014-09-04 06:07:33 +0100143extern int __get_user_32t_8(void *);
Daniel Thompsone38361d2014-07-10 20:58:08 +0100144extern int __get_user_8(void *);
Victor Kamenskyd9981382014-09-04 06:07:33 +0100145extern int __get_user_64t_1(void *);
146extern int __get_user_64t_2(void *);
147extern int __get_user_64t_4(void *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
Russell King84046632012-09-07 18:22:28 +0100149#define __GUP_CLOBBER_1 "lr", "cc"
150#ifdef CONFIG_CPU_USE_DOMAINS
151#define __GUP_CLOBBER_2 "ip", "lr", "cc"
152#else
153#define __GUP_CLOBBER_2 "lr", "cc"
154#endif
155#define __GUP_CLOBBER_4 "lr", "cc"
Victor Kamenskyd9981382014-09-04 06:07:33 +0100156#define __GUP_CLOBBER_32t_8 "lr", "cc"
Daniel Thompsone38361d2014-07-10 20:58:08 +0100157#define __GUP_CLOBBER_8 "lr", "cc"
Russell King84046632012-09-07 18:22:28 +0100158
Michael S. Tsirkin295bb012015-01-06 15:11:13 +0200159#define __get_user_x(__r2, __p, __e, __l, __s) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 __asm__ __volatile__ ( \
161 __asmeq("%0", "r0") __asmeq("%1", "r2") \
Russell King84046632012-09-07 18:22:28 +0100162 __asmeq("%3", "r1") \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 "bl __get_user_" #__s \
164 : "=&r" (__e), "=r" (__r2) \
Russell King84046632012-09-07 18:22:28 +0100165 : "0" (__p), "r" (__l) \
166 : __GUP_CLOBBER_##__s)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
Daniel Thompsone38361d2014-07-10 20:58:08 +0100168/* narrowing a double-word get into a single 32bit word register: */
169#ifdef __ARMEB__
Michael S. Tsirkin295bb012015-01-06 15:11:13 +0200170#define __get_user_x_32t(__r2, __p, __e, __l, __s) \
Victor Kamenskyd9981382014-09-04 06:07:33 +0100171 __get_user_x(__r2, __p, __e, __l, 32t_8)
Daniel Thompsone38361d2014-07-10 20:58:08 +0100172#else
Victor Kamenskyd9981382014-09-04 06:07:33 +0100173#define __get_user_x_32t __get_user_x
Daniel Thompsone38361d2014-07-10 20:58:08 +0100174#endif
175
Victor Kamenskyd9981382014-09-04 06:07:33 +0100176/*
177 * storing result into proper least significant word of 64bit target var,
178 * different only for big endian case where 64 bit __r2 lsw is r3:
179 */
180#ifdef __ARMEB__
181#define __get_user_x_64t(__r2, __p, __e, __l, __s) \
182 __asm__ __volatile__ ( \
183 __asmeq("%0", "r0") __asmeq("%1", "r2") \
184 __asmeq("%3", "r1") \
185 "bl __get_user_64t_" #__s \
186 : "=&r" (__e), "=r" (__r2) \
187 : "0" (__p), "r" (__l) \
188 : __GUP_CLOBBER_##__s)
189#else
190#define __get_user_x_64t __get_user_x
191#endif
192
193
Michael S. Tsirkin295bb012015-01-06 15:11:13 +0200194#define __get_user_check(x, p) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 ({ \
Russell King84046632012-09-07 18:22:28 +0100196 unsigned long __limit = current_thread_info()->addr_limit - 1; \
Stefan Agnerdb4667a2018-05-08 22:51:50 +0100197 register typeof(*(p)) __user *__p asm("r0") = (p); \
Russell Kingd09fbb32018-07-09 15:22:45 +0100198 register __inttype(x) __r2 asm("r2"); \
Russell King84046632012-09-07 18:22:28 +0100199 register unsigned long __l asm("r1") = __limit; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 register int __e asm("r0"); \
Russell King3fba7e22015-08-19 11:02:28 +0100201 unsigned int __ua_flags = uaccess_save_and_enable(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 switch (sizeof(*(__p))) { \
203 case 1: \
Victor Kamenskyd9981382014-09-04 06:07:33 +0100204 if (sizeof((x)) >= 8) \
205 __get_user_x_64t(__r2, __p, __e, __l, 1); \
206 else \
207 __get_user_x(__r2, __p, __e, __l, 1); \
Russell King84046632012-09-07 18:22:28 +0100208 break; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 case 2: \
Victor Kamenskyd9981382014-09-04 06:07:33 +0100210 if (sizeof((x)) >= 8) \
211 __get_user_x_64t(__r2, __p, __e, __l, 2); \
212 else \
213 __get_user_x(__r2, __p, __e, __l, 2); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 break; \
215 case 4: \
Victor Kamenskyd9981382014-09-04 06:07:33 +0100216 if (sizeof((x)) >= 8) \
217 __get_user_x_64t(__r2, __p, __e, __l, 4); \
218 else \
219 __get_user_x(__r2, __p, __e, __l, 4); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 break; \
Daniel Thompsone38361d2014-07-10 20:58:08 +0100221 case 8: \
222 if (sizeof((x)) < 8) \
Victor Kamenskyd9981382014-09-04 06:07:33 +0100223 __get_user_x_32t(__r2, __p, __e, __l, 4); \
Daniel Thompsone38361d2014-07-10 20:58:08 +0100224 else \
225 __get_user_x(__r2, __p, __e, __l, 8); \
226 break; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 default: __e = __get_user_bad(); break; \
228 } \
Russell King3fba7e22015-08-19 11:02:28 +0100229 uaccess_restore(__ua_flags); \
Russell Kingd2c5b692005-11-18 14:22:03 +0000230 x = (typeof(*(p))) __r2; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 __e; \
232 })
233
Michael S. Tsirkin295bb012015-01-06 15:11:13 +0200234#define get_user(x, p) \
Will Deaconad729072012-09-07 18:24:10 +0100235 ({ \
236 might_fault(); \
Michael S. Tsirkin295bb012015-01-06 15:11:13 +0200237 __get_user_check(x, p); \
Will Deaconad729072012-09-07 18:24:10 +0100238 })
239
Russell King9641c7c2006-06-21 20:38:17 +0100240extern int __put_user_1(void *, unsigned int);
241extern int __put_user_2(void *, unsigned int);
242extern int __put_user_4(void *, unsigned int);
243extern int __put_user_8(void *, unsigned long long);
244
Russell King9f73bd82016-02-04 16:54:45 +0000245#define __put_user_check(__pu_val, __ptr, __err, __s) \
Russell King9641c7c2006-06-21 20:38:17 +0100246 ({ \
Russell King84046632012-09-07 18:22:28 +0100247 unsigned long __limit = current_thread_info()->addr_limit - 1; \
Russell King9f73bd82016-02-04 16:54:45 +0000248 register typeof(__pu_val) __r2 asm("r2") = __pu_val; \
249 register const void __user *__p asm("r0") = __ptr; \
Russell King84046632012-09-07 18:22:28 +0100250 register unsigned long __l asm("r1") = __limit; \
Russell King9641c7c2006-06-21 20:38:17 +0100251 register int __e asm("r0"); \
Russell King9f73bd82016-02-04 16:54:45 +0000252 __asm__ __volatile__ ( \
253 __asmeq("%0", "r0") __asmeq("%2", "r2") \
254 __asmeq("%3", "r1") \
255 "bl __put_user_" #__s \
256 : "=&r" (__e) \
257 : "0" (__p), "r" (__r2), "r" (__l) \
258 : "ip", "lr", "cc"); \
259 __err = __e; \
Russell King9641c7c2006-06-21 20:38:17 +0100260 })
261
262#else /* CONFIG_MMU */
263
264/*
265 * uClinux has only one addr space, so has simplified address limits.
266 */
267#define USER_DS KERNEL_DS
268
Michael S. Tsirkin295bb012015-01-06 15:11:13 +0200269#define segment_eq(a, b) (1)
270#define __addr_ok(addr) ((void)(addr), 1)
271#define __range_ok(addr, size) ((void)(addr), 0)
Russell King9641c7c2006-06-21 20:38:17 +0100272#define get_fs() (KERNEL_DS)
273
274static inline void set_fs(mm_segment_t fs)
275{
276}
277
Michael S. Tsirkin295bb012015-01-06 15:11:13 +0200278#define get_user(x, p) __get_user(x, p)
Russell King9f73bd82016-02-04 16:54:45 +0000279#define __put_user_check __put_user_nocheck
Russell King9641c7c2006-06-21 20:38:17 +0100280
281#endif /* CONFIG_MMU */
282
Linus Torvalds96d4f262019-01-03 18:57:57 -0800283#define access_ok(addr, size) (__range_ok(addr, size) == 0)
Russell King9641c7c2006-06-21 20:38:17 +0100284
Will Deacon8c56cc82012-07-06 15:45:39 +0100285#define user_addr_max() \
Al Virodb68ce12017-03-20 21:08:07 -0400286 (uaccess_kernel() ? ~0UL : get_fs())
Will Deacon8c56cc82012-07-06 15:45:39 +0100287
Russell Kingb1cd0a12018-07-09 10:28:12 +0100288#ifdef CONFIG_CPU_SPECTRE
289/*
290 * When mitigating Spectre variant 1, it is not worth fixing the non-
291 * verifying accessors, because we need to add verification of the
292 * address space there. Force these to use the standard get_user()
293 * version instead.
294 */
295#define __get_user(x, ptr) get_user(x, ptr)
296#else
297
Russell King9641c7c2006-06-21 20:38:17 +0100298/*
299 * The "__xxx" versions of the user access functions do not verify the
300 * address space - it must have been done previously with a separate
301 * "access_ok()" call.
302 *
303 * The "xxx_error" versions set the third argument to EFAULT if an
304 * error occurs, and leave it unchanged on success. Note that these
305 * versions are void (ie, don't return a value as such).
306 */
Michael S. Tsirkin295bb012015-01-06 15:11:13 +0200307#define __get_user(x, ptr) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308({ \
309 long __gu_err = 0; \
Michael S. Tsirkin295bb012015-01-06 15:11:13 +0200310 __get_user_err((x), (ptr), __gu_err); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 __gu_err; \
312})
313
Michael S. Tsirkin295bb012015-01-06 15:11:13 +0200314#define __get_user_err(x, ptr, err) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315do { \
316 unsigned long __gu_addr = (unsigned long)(ptr); \
317 unsigned long __gu_val; \
Russell King3fba7e22015-08-19 11:02:28 +0100318 unsigned int __ua_flags; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 __chk_user_ptr(ptr); \
Will Deaconad729072012-09-07 18:24:10 +0100320 might_fault(); \
Russell King3fba7e22015-08-19 11:02:28 +0100321 __ua_flags = uaccess_save_and_enable(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 switch (sizeof(*(ptr))) { \
Michael S. Tsirkin295bb012015-01-06 15:11:13 +0200323 case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \
324 case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \
325 case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 default: (__gu_val) = __get_user_bad(); \
327 } \
Russell King3fba7e22015-08-19 11:02:28 +0100328 uaccess_restore(__ua_flags); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 (x) = (__typeof__(*(ptr)))__gu_val; \
330} while (0)
331
Russell Kingb64d1f62015-08-18 23:06:25 +0100332#define __get_user_asm(x, addr, err, instr) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 __asm__ __volatile__( \
Russell Kingb64d1f62015-08-18 23:06:25 +0100334 "1: " TUSER(instr) " %1, [%2], #0\n" \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 "2:\n" \
Ard Biesheuvelc4a84ae2015-03-24 10:41:09 +0100336 " .pushsection .text.fixup,\"ax\"\n" \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 " .align 2\n" \
338 "3: mov %0, %3\n" \
339 " mov %1, #0\n" \
340 " b 2b\n" \
Russell King42604152010-04-19 10:15:03 +0100341 " .popsection\n" \
342 " .pushsection __ex_table,\"a\"\n" \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 " .align 3\n" \
344 " .long 1b, 3b\n" \
Russell King42604152010-04-19 10:15:03 +0100345 " .popsection" \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 : "+r" (err), "=&r" (x) \
347 : "r" (addr), "i" (-EFAULT) \
348 : "cc")
349
Russell Kingb64d1f62015-08-18 23:06:25 +0100350#define __get_user_asm_byte(x, addr, err) \
351 __get_user_asm(x, addr, err, ldrb)
352
Vincent Whitchurch344eb5532018-11-09 10:12:30 +0100353#if __LINUX_ARM_ARCH__ >= 6
354
355#define __get_user_asm_half(x, addr, err) \
356 __get_user_asm(x, addr, err, ldrh)
357
358#else
359
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360#ifndef __ARMEB__
Michael S. Tsirkin295bb012015-01-06 15:11:13 +0200361#define __get_user_asm_half(x, __gu_addr, err) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362({ \
363 unsigned long __b1, __b2; \
364 __get_user_asm_byte(__b1, __gu_addr, err); \
365 __get_user_asm_byte(__b2, __gu_addr + 1, err); \
366 (x) = __b1 | (__b2 << 8); \
367})
368#else
Michael S. Tsirkin295bb012015-01-06 15:11:13 +0200369#define __get_user_asm_half(x, __gu_addr, err) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370({ \
371 unsigned long __b1, __b2; \
372 __get_user_asm_byte(__b1, __gu_addr, err); \
373 __get_user_asm_byte(__b2, __gu_addr + 1, err); \
374 (x) = (__b1 << 8) | __b2; \
375})
376#endif
377
Vincent Whitchurch344eb5532018-11-09 10:12:30 +0100378#endif /* __LINUX_ARM_ARCH__ >= 6 */
379
Michael S. Tsirkin295bb012015-01-06 15:11:13 +0200380#define __get_user_asm_word(x, addr, err) \
Russell Kingb64d1f62015-08-18 23:06:25 +0100381 __get_user_asm(x, addr, err, ldr)
Russell Kingb1cd0a12018-07-09 10:28:12 +0100382#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383
Russell King9f73bd82016-02-04 16:54:45 +0000384
385#define __put_user_switch(x, ptr, __err, __fn) \
386 do { \
387 const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \
388 __typeof__(*(ptr)) __pu_val = (x); \
389 unsigned int __ua_flags; \
390 might_fault(); \
391 __ua_flags = uaccess_save_and_enable(); \
392 switch (sizeof(*(ptr))) { \
393 case 1: __fn(__pu_val, __pu_ptr, __err, 1); break; \
394 case 2: __fn(__pu_val, __pu_ptr, __err, 2); break; \
395 case 4: __fn(__pu_val, __pu_ptr, __err, 4); break; \
396 case 8: __fn(__pu_val, __pu_ptr, __err, 8); break; \
397 default: __err = __put_user_bad(); break; \
398 } \
399 uaccess_restore(__ua_flags); \
400 } while (0)
401
402#define put_user(x, ptr) \
403({ \
404 int __pu_err = 0; \
405 __put_user_switch((x), (ptr), __pu_err, __put_user_check); \
406 __pu_err; \
407})
408
Julien Thierrye3aa6242018-09-11 10:13:39 +0100409#ifdef CONFIG_CPU_SPECTRE
410/*
411 * When mitigating Spectre variant 1.1, all accessors need to include
412 * verification of the address space.
413 */
414#define __put_user(x, ptr) put_user(x, ptr)
415
416#else
Michael S. Tsirkin295bb012015-01-06 15:11:13 +0200417#define __put_user(x, ptr) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418({ \
419 long __pu_err = 0; \
Russell King9f73bd82016-02-04 16:54:45 +0000420 __put_user_switch((x), (ptr), __pu_err, __put_user_nocheck); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 __pu_err; \
422})
423
Russell King9f73bd82016-02-04 16:54:45 +0000424#define __put_user_nocheck(x, __pu_ptr, __err, __size) \
425 do { \
426 unsigned long __pu_addr = (unsigned long)__pu_ptr; \
427 __put_user_nocheck_##__size(x, __pu_addr, __err); \
428 } while (0)
429
430#define __put_user_nocheck_1 __put_user_asm_byte
431#define __put_user_nocheck_2 __put_user_asm_half
432#define __put_user_nocheck_4 __put_user_asm_word
433#define __put_user_nocheck_8 __put_user_asm_dword
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
Russell Kingb64d1f62015-08-18 23:06:25 +0100435#define __put_user_asm(x, __pu_addr, err, instr) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 __asm__ __volatile__( \
Russell Kingb64d1f62015-08-18 23:06:25 +0100437 "1: " TUSER(instr) " %1, [%2], #0\n" \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 "2:\n" \
Ard Biesheuvelc4a84ae2015-03-24 10:41:09 +0100439 " .pushsection .text.fixup,\"ax\"\n" \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 " .align 2\n" \
441 "3: mov %0, %3\n" \
442 " b 2b\n" \
Russell King42604152010-04-19 10:15:03 +0100443 " .popsection\n" \
444 " .pushsection __ex_table,\"a\"\n" \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 " .align 3\n" \
446 " .long 1b, 3b\n" \
Russell King42604152010-04-19 10:15:03 +0100447 " .popsection" \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 : "+r" (err) \
449 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
450 : "cc")
451
Russell Kingb64d1f62015-08-18 23:06:25 +0100452#define __put_user_asm_byte(x, __pu_addr, err) \
453 __put_user_asm(x, __pu_addr, err, strb)
454
Vincent Whitchurch344eb5532018-11-09 10:12:30 +0100455#if __LINUX_ARM_ARCH__ >= 6
456
457#define __put_user_asm_half(x, __pu_addr, err) \
458 __put_user_asm(x, __pu_addr, err, strh)
459
460#else
461
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462#ifndef __ARMEB__
Michael S. Tsirkin295bb012015-01-06 15:11:13 +0200463#define __put_user_asm_half(x, __pu_addr, err) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464({ \
Michael S. Tsirkine8b94de2015-01-06 14:37:22 +0200465 unsigned long __temp = (__force unsigned long)(x); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 __put_user_asm_byte(__temp, __pu_addr, err); \
467 __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \
468})
469#else
Michael S. Tsirkin295bb012015-01-06 15:11:13 +0200470#define __put_user_asm_half(x, __pu_addr, err) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471({ \
Michael S. Tsirkine8b94de2015-01-06 14:37:22 +0200472 unsigned long __temp = (__force unsigned long)(x); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 __put_user_asm_byte(__temp >> 8, __pu_addr, err); \
474 __put_user_asm_byte(__temp, __pu_addr + 1, err); \
475})
476#endif
477
Vincent Whitchurch344eb5532018-11-09 10:12:30 +0100478#endif /* __LINUX_ARM_ARCH__ >= 6 */
479
Michael S. Tsirkin295bb012015-01-06 15:11:13 +0200480#define __put_user_asm_word(x, __pu_addr, err) \
Russell Kingb64d1f62015-08-18 23:06:25 +0100481 __put_user_asm(x, __pu_addr, err, str)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482
483#ifndef __ARMEB__
484#define __reg_oper0 "%R2"
485#define __reg_oper1 "%Q2"
486#else
487#define __reg_oper0 "%Q2"
488#define __reg_oper1 "%R2"
489#endif
490
Michael S. Tsirkin295bb012015-01-06 15:11:13 +0200491#define __put_user_asm_dword(x, __pu_addr, err) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 __asm__ __volatile__( \
Catalin Marinas4e7682d2012-01-25 11:38:13 +0100493 ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \
494 ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \
495 THUMB( "1: " TUSER(str) " " __reg_oper1 ", [%1]\n" ) \
496 THUMB( "2: " TUSER(str) " " __reg_oper0 ", [%1, #4]\n" ) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 "3:\n" \
Ard Biesheuvelc4a84ae2015-03-24 10:41:09 +0100498 " .pushsection .text.fixup,\"ax\"\n" \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 " .align 2\n" \
500 "4: mov %0, %3\n" \
501 " b 3b\n" \
Russell King42604152010-04-19 10:15:03 +0100502 " .popsection\n" \
503 " .pushsection __ex_table,\"a\"\n" \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 " .align 3\n" \
505 " .long 1b, 4b\n" \
506 " .long 2b, 4b\n" \
Russell King42604152010-04-19 10:15:03 +0100507 " .popsection" \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 : "+r" (err), "+r" (__pu_addr) \
509 : "r" (x), "i" (-EFAULT) \
510 : "cc")
511
Julien Thierrye3aa6242018-09-11 10:13:39 +0100512#endif /* !CONFIG_CPU_SPECTRE */
Russell King02fcb972006-06-21 14:44:52 +0100513
Russell King9641c7c2006-06-21 20:38:17 +0100514#ifdef CONFIG_MMU
Russell King3fba7e22015-08-19 11:02:28 +0100515extern unsigned long __must_check
516arm_copy_from_user(void *to, const void __user *from, unsigned long n);
517
518static inline unsigned long __must_check
Al Viro4de5b632017-03-21 08:23:33 -0400519raw_copy_from_user(void *to, const void __user *from, unsigned long n)
Russell King3fba7e22015-08-19 11:02:28 +0100520{
Kees Cookdfd45b62016-06-23 15:06:53 -0700521 unsigned int __ua_flags;
522
Kees Cookdfd45b62016-06-23 15:06:53 -0700523 __ua_flags = uaccess_save_and_enable();
Russell King3fba7e22015-08-19 11:02:28 +0100524 n = arm_copy_from_user(to, from, n);
525 uaccess_restore(__ua_flags);
526 return n;
527}
528
529extern unsigned long __must_check
530arm_copy_to_user(void __user *to, const void *from, unsigned long n);
531extern unsigned long __must_check
532__copy_to_user_std(void __user *to, const void *from, unsigned long n);
533
534static inline unsigned long __must_check
Al Viro4de5b632017-03-21 08:23:33 -0400535raw_copy_to_user(void __user *to, const void *from, unsigned long n)
Russell King3fba7e22015-08-19 11:02:28 +0100536{
Russell Kingc014953d2015-12-05 13:42:07 +0000537#ifndef CONFIG_UACCESS_WITH_MEMCPY
Kees Cookdfd45b62016-06-23 15:06:53 -0700538 unsigned int __ua_flags;
Kees Cookdfd45b62016-06-23 15:06:53 -0700539 __ua_flags = uaccess_save_and_enable();
Russell King3fba7e22015-08-19 11:02:28 +0100540 n = arm_copy_to_user(to, from, n);
541 uaccess_restore(__ua_flags);
542 return n;
Russell Kingc014953d2015-12-05 13:42:07 +0000543#else
544 return arm_copy_to_user(to, from, n);
545#endif
Russell King3fba7e22015-08-19 11:02:28 +0100546}
547
548extern unsigned long __must_check
549arm_clear_user(void __user *addr, unsigned long n);
550extern unsigned long __must_check
551__clear_user_std(void __user *addr, unsigned long n);
552
553static inline unsigned long __must_check
554__clear_user(void __user *addr, unsigned long n)
555{
556 unsigned int __ua_flags = uaccess_save_and_enable();
557 n = arm_clear_user(addr, n);
558 uaccess_restore(__ua_flags);
559 return n;
560}
561
Russell King9641c7c2006-06-21 20:38:17 +0100562#else
Al Viro4de5b632017-03-21 08:23:33 -0400563static inline unsigned long
564raw_copy_from_user(void *to, const void __user *from, unsigned long n)
565{
566 memcpy(to, (const void __force *)from, n);
567 return 0;
568}
569static inline unsigned long
570raw_copy_to_user(void __user *to, const void *from, unsigned long n)
571{
572 memcpy((void __force *)to, from, n);
573 return 0;
574}
Michael S. Tsirkin295bb012015-01-06 15:11:13 +0200575#define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
Russell King9641c7c2006-06-21 20:38:17 +0100576#endif
Al Viro4de5b632017-03-21 08:23:33 -0400577#define INLINE_COPY_TO_USER
578#define INLINE_COPY_FROM_USER
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
Russell King99573292006-10-26 10:27:42 +0100580static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581{
Linus Torvalds96d4f262019-01-03 18:57:57 -0800582 if (access_ok(to, n))
Russell King02fcb972006-06-21 14:44:52 +0100583 n = __clear_user(to, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 return n;
585}
586
Russell King3fba7e22015-08-19 11:02:28 +0100587/* These are from lib/ code, and use __get_user() and friends */
Will Deacon8c56cc82012-07-06 15:45:39 +0100588extern long strncpy_from_user(char *dest, const char __user *src, long count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589
Will Deacon8c56cc82012-07-06 15:45:39 +0100590extern __must_check long strnlen_user(const char __user *str, long n);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591
592#endif /* _ASMARM_UACCESS_H */