Robert Richter | 1ac2e6c | 2011-06-07 11:49:55 +0200 | [diff] [blame] | 1 | /* |
| 2 | * User address space access functions. |
| 3 | * |
| 4 | * For licencing details see kernel-base/COPYING |
| 5 | */ |
| 6 | |
| 7 | #include <linux/highmem.h> |
Paul Gortmaker | e683014 | 2016-07-13 20:18:57 -0400 | [diff] [blame] | 8 | #include <linux/export.h> |
Robert Richter | 1ac2e6c | 2011-06-07 11:49:55 +0200 | [diff] [blame] | 9 | |
Linus Torvalds | 92ae03f | 2012-04-06 14:32:32 -0700 | [diff] [blame] | 10 | #include <asm/word-at-a-time.h> |
Arun Sharma | db0dc75 | 2012-04-20 15:41:36 -0700 | [diff] [blame] | 11 | #include <linux/sched.h> |
Linus Torvalds | 92ae03f | 2012-04-06 14:32:32 -0700 | [diff] [blame] | 12 | |
Robert Richter | 1ac2e6c | 2011-06-07 11:49:55 +0200 | [diff] [blame] | 13 | /* |
Peter Zijlstra | e00b12e | 2013-10-24 12:52:06 +0200 | [diff] [blame] | 14 | * We rely on the nested NMI work to allow atomic faults from the NMI path; the |
| 15 | * nested NMI paths are careful to preserve CR2. |
Robert Richter | 1ac2e6c | 2011-06-07 11:49:55 +0200 | [diff] [blame] | 16 | */ |
| 17 | unsigned long |
| 18 | copy_from_user_nmi(void *to, const void __user *from, unsigned long n) |
| 19 | { |
Peter Zijlstra | e00b12e | 2013-10-24 12:52:06 +0200 | [diff] [blame] | 20 | unsigned long ret; |
Robert Richter | 1ac2e6c | 2011-06-07 11:49:55 +0200 | [diff] [blame] | 21 | |
Stephane Eranian | 25f4298 | 2012-06-11 15:44:26 +0200 | [diff] [blame] | 22 | if (__range_not_ok(from, n, TASK_SIZE)) |
Yann Droneaud | ebf2d26 | 2015-06-22 21:38:43 +0200 | [diff] [blame] | 23 | return n; |
Arun Sharma | db0dc75 | 2012-04-20 15:41:36 -0700 | [diff] [blame] | 24 | |
Peter Zijlstra | e00b12e | 2013-10-24 12:52:06 +0200 | [diff] [blame] | 25 | /* |
| 26 | * Even though this function is typically called from NMI/IRQ context |
| 27 | * disable pagefaults so that its behaviour is consistent even when |
| 28 | * called form other contexts. |
| 29 | */ |
| 30 | pagefault_disable(); |
| 31 | ret = __copy_from_user_inatomic(to, from, n); |
| 32 | pagefault_enable(); |
Robert Richter | 1ac2e6c | 2011-06-07 11:49:55 +0200 | [diff] [blame] | 33 | |
Peter Zijlstra | 0a19684 | 2013-10-30 21:16:22 +0100 | [diff] [blame] | 34 | return ret; |
Robert Richter | 1ac2e6c | 2011-06-07 11:49:55 +0200 | [diff] [blame] | 35 | } |
| 36 | EXPORT_SYMBOL_GPL(copy_from_user_nmi); |
Borislav Petkov | adb402c | 2016-10-31 16:10:15 +0100 | [diff] [blame] | 37 | |
| 38 | /** |
| 39 | * copy_to_user: - Copy a block of data into user space. |
| 40 | * @to: Destination address, in user space. |
| 41 | * @from: Source address, in kernel space. |
| 42 | * @n: Number of bytes to copy. |
| 43 | * |
| 44 | * Context: User context only. This function may sleep if pagefaults are |
| 45 | * enabled. |
| 46 | * |
| 47 | * Copy data from kernel space to user space. |
| 48 | * |
| 49 | * Returns number of bytes that could not be copied. |
| 50 | * On success, this will be zero. |
| 51 | */ |
| 52 | unsigned long _copy_to_user(void __user *to, const void *from, unsigned n) |
| 53 | { |
| 54 | if (access_ok(VERIFY_WRITE, to, n)) |
| 55 | n = __copy_to_user(to, from, n); |
| 56 | return n; |
| 57 | } |
| 58 | EXPORT_SYMBOL(_copy_to_user); |
| 59 | |
| 60 | /** |
| 61 | * copy_from_user: - Copy a block of data from user space. |
| 62 | * @to: Destination address, in kernel space. |
| 63 | * @from: Source address, in user space. |
| 64 | * @n: Number of bytes to copy. |
| 65 | * |
| 66 | * Context: User context only. This function may sleep if pagefaults are |
| 67 | * enabled. |
| 68 | * |
| 69 | * Copy data from user space to kernel space. |
| 70 | * |
| 71 | * Returns number of bytes that could not be copied. |
| 72 | * On success, this will be zero. |
| 73 | * |
| 74 | * If some data could not be copied, this function will pad the copied |
| 75 | * data to the requested size using zero bytes. |
| 76 | */ |
| 77 | unsigned long _copy_from_user(void *to, const void __user *from, unsigned n) |
| 78 | { |
| 79 | if (access_ok(VERIFY_READ, from, n)) |
| 80 | n = __copy_from_user(to, from, n); |
| 81 | else |
| 82 | memset(to, 0, n); |
| 83 | return n; |
| 84 | } |
| 85 | EXPORT_SYMBOL(_copy_from_user); |