Hiro Yoshioka | c22ce14 | 2006-06-23 02:04:16 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_UACCESS_H__ |
| 2 | #define __LINUX_UACCESS_H__ |
| 3 | |
| 4 | #include <asm/uaccess.h> |
| 5 | |
| 6 | #ifndef ARCH_HAS_NOCACHE_UACCESS |
| 7 | |
| 8 | static inline unsigned long __copy_from_user_inatomic_nocache(void *to, |
| 9 | const void __user *from, unsigned long n) |
| 10 | { |
| 11 | return __copy_from_user_inatomic(to, from, n); |
| 12 | } |
| 13 | |
| 14 | static inline unsigned long __copy_from_user_nocache(void *to, |
| 15 | const void __user *from, unsigned long n) |
| 16 | { |
| 17 | return __copy_from_user(to, from, n); |
| 18 | } |
| 19 | |
| 20 | #endif /* ARCH_HAS_NOCACHE_UACCESS */ |
| 21 | |
Andrew Morton | 1b79e55 | 2006-09-27 01:51:14 -0700 | [diff] [blame] | 22 | /** |
| 23 | * probe_kernel_address(): safely attempt to read from a location |
| 24 | * @addr: address to read from - its type is type typeof(retval)* |
| 25 | * @retval: read into this variable |
| 26 | * |
| 27 | * Safely read from address @addr into variable @revtal. If a kernel fault |
| 28 | * happens, handle that and return -EFAULT. |
| 29 | * We ensure that the __get_user() is executed in atomic context so that |
| 30 | * do_page_fault() doesn't attempt to take mmap_sem. This makes |
| 31 | * probe_kernel_address() suitable for use within regions where the caller |
| 32 | * already holds mmap_sem, or other locks which nest inside mmap_sem. |
| 33 | */ |
| 34 | #define probe_kernel_address(addr, retval) \ |
| 35 | ({ \ |
| 36 | long ret; \ |
| 37 | \ |
| 38 | inc_preempt_count(); \ |
| 39 | ret = __get_user(retval, addr); \ |
| 40 | dec_preempt_count(); \ |
| 41 | ret; \ |
| 42 | }) |
| 43 | |
Hiro Yoshioka | c22ce14 | 2006-06-23 02:04:16 -0700 | [diff] [blame] | 44 | #endif /* __LINUX_UACCESS_H__ */ |