Hiro Yoshioka | c22ce14 | 2006-06-23 02:04:16 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_UACCESS_H__ |
| 2 | #define __LINUX_UACCESS_H__ |
| 3 | |
Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 4 | #include <linux/preempt.h> |
Hiro Yoshioka | c22ce14 | 2006-06-23 02:04:16 -0700 | [diff] [blame] | 5 | #include <asm/uaccess.h> |
| 6 | |
Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 7 | /* |
| 8 | * These routines enable/disable the pagefault handler in that |
| 9 | * it will not take any locks and go straight to the fixup table. |
| 10 | * |
| 11 | * They have great resemblance to the preempt_disable/enable calls |
| 12 | * and in fact they are identical; this is because currently there is |
| 13 | * no other way to make the pagefault handlers do this. So we do |
| 14 | * disable preemption but we don't necessarily care about that. |
| 15 | */ |
| 16 | static inline void pagefault_disable(void) |
| 17 | { |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 18 | preempt_count_inc(); |
Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 19 | /* |
| 20 | * make sure to have issued the store before a pagefault |
| 21 | * can hit. |
| 22 | */ |
| 23 | barrier(); |
| 24 | } |
| 25 | |
| 26 | static inline void pagefault_enable(void) |
| 27 | { |
| 28 | /* |
| 29 | * make sure to issue those last loads/stores before enabling |
| 30 | * the pagefault handler again. |
| 31 | */ |
| 32 | barrier(); |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 33 | preempt_count_dec(); |
Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 34 | preempt_check_resched(); |
| 35 | } |
| 36 | |
Hiro Yoshioka | c22ce14 | 2006-06-23 02:04:16 -0700 | [diff] [blame] | 37 | #ifndef ARCH_HAS_NOCACHE_UACCESS |
| 38 | |
| 39 | static inline unsigned long __copy_from_user_inatomic_nocache(void *to, |
| 40 | const void __user *from, unsigned long n) |
| 41 | { |
| 42 | return __copy_from_user_inatomic(to, from, n); |
| 43 | } |
| 44 | |
| 45 | static inline unsigned long __copy_from_user_nocache(void *to, |
| 46 | const void __user *from, unsigned long n) |
| 47 | { |
| 48 | return __copy_from_user(to, from, n); |
| 49 | } |
| 50 | |
| 51 | #endif /* ARCH_HAS_NOCACHE_UACCESS */ |
| 52 | |
Andrew Morton | 1b79e55 | 2006-09-27 01:51:14 -0700 | [diff] [blame] | 53 | /** |
| 54 | * probe_kernel_address(): safely attempt to read from a location |
| 55 | * @addr: address to read from - its type is type typeof(retval)* |
| 56 | * @retval: read into this variable |
| 57 | * |
| 58 | * Safely read from address @addr into variable @revtal. If a kernel fault |
| 59 | * happens, handle that and return -EFAULT. |
| 60 | * We ensure that the __get_user() is executed in atomic context so that |
| 61 | * do_page_fault() doesn't attempt to take mmap_sem. This makes |
| 62 | * probe_kernel_address() suitable for use within regions where the caller |
| 63 | * already holds mmap_sem, or other locks which nest inside mmap_sem. |
Andrew Morton | 20aa7b2 | 2006-12-06 20:36:40 -0800 | [diff] [blame] | 64 | * This must be a macro because __get_user() needs to know the types of the |
| 65 | * args. |
| 66 | * |
| 67 | * We don't include enough header files to be able to do the set_fs(). We |
| 68 | * require that the probe_kernel_address() caller will do that. |
Andrew Morton | 1b79e55 | 2006-09-27 01:51:14 -0700 | [diff] [blame] | 69 | */ |
| 70 | #define probe_kernel_address(addr, retval) \ |
| 71 | ({ \ |
| 72 | long ret; \ |
Andrew Morton | 20aa7b2 | 2006-12-06 20:36:40 -0800 | [diff] [blame] | 73 | mm_segment_t old_fs = get_fs(); \ |
Andrew Morton | 1b79e55 | 2006-09-27 01:51:14 -0700 | [diff] [blame] | 74 | \ |
Andrew Morton | 20aa7b2 | 2006-12-06 20:36:40 -0800 | [diff] [blame] | 75 | set_fs(KERNEL_DS); \ |
Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 76 | pagefault_disable(); \ |
Hiroshi Shimamoto | fb71e45 | 2008-09-15 18:04:26 -0700 | [diff] [blame] | 77 | ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \ |
Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 78 | pagefault_enable(); \ |
Andrew Morton | 20aa7b2 | 2006-12-06 20:36:40 -0800 | [diff] [blame] | 79 | set_fs(old_fs); \ |
Andrew Morton | 1b79e55 | 2006-09-27 01:51:14 -0700 | [diff] [blame] | 80 | ret; \ |
| 81 | }) |
| 82 | |
Ingo Molnar | c33fa9f | 2008-04-17 20:05:36 +0200 | [diff] [blame] | 83 | /* |
| 84 | * probe_kernel_read(): safely attempt to read from a location |
| 85 | * @dst: pointer to the buffer that shall take the data |
| 86 | * @src: address to read from |
| 87 | * @size: size of the data chunk |
| 88 | * |
| 89 | * Safely read from address @src to the buffer at @dst. If a kernel fault |
| 90 | * happens, handle that and return -EFAULT. |
| 91 | */ |
Steven Rostedt | f29c504 | 2011-05-19 14:35:33 -0400 | [diff] [blame] | 92 | extern long probe_kernel_read(void *dst, const void *src, size_t size); |
| 93 | extern long __probe_kernel_read(void *dst, const void *src, size_t size); |
Ingo Molnar | c33fa9f | 2008-04-17 20:05:36 +0200 | [diff] [blame] | 94 | |
| 95 | /* |
| 96 | * probe_kernel_write(): safely attempt to write to a location |
| 97 | * @dst: address to write to |
| 98 | * @src: pointer to the data that shall be written |
| 99 | * @size: size of the data chunk |
| 100 | * |
| 101 | * Safely write to address @dst from the buffer at @src. If a kernel fault |
| 102 | * happens, handle that and return -EFAULT. |
| 103 | */ |
Steven Rostedt | f29c504 | 2011-05-19 14:35:33 -0400 | [diff] [blame] | 104 | extern long notrace probe_kernel_write(void *dst, const void *src, size_t size); |
| 105 | extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size); |
Ingo Molnar | c33fa9f | 2008-04-17 20:05:36 +0200 | [diff] [blame] | 106 | |
Hiro Yoshioka | c22ce14 | 2006-06-23 02:04:16 -0700 | [diff] [blame] | 107 | #endif /* __LINUX_UACCESS_H__ */ |