Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * User address space access functions. |
| 3 | * |
| 4 | * Copyright 1997 Andi Kleen <ak@muc.de> |
| 5 | * Copyright 1997 Linus Torvalds |
| 6 | * Copyright 2002 Andi Kleen <ak@suse.de> |
| 7 | */ |
Paul Gortmaker | e683014 | 2016-07-13 20:18:57 -0400 | [diff] [blame] | 8 | #include <linux/export.h> |
Andy Lutomirski | 13d4ea0 | 2016-07-14 13:22:57 -0700 | [diff] [blame] | 9 | #include <linux/uaccess.h> |
Dan Williams | 0aed55a | 2017-05-29 12:22:50 -0700 | [diff] [blame] | 10 | #include <linux/highmem.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | |
| 12 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | * Zero Userspace |
| 14 | */ |
| 15 | |
| 16 | unsigned long __clear_user(void __user *addr, unsigned long size) |
| 17 | { |
| 18 | long __d0; |
Nick Piggin | 3ee1afa | 2008-09-10 13:37:17 +0200 | [diff] [blame] | 19 | might_fault(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | /* no memory constraint because it doesn't change any memory gcc knows |
| 21 | about */ |
H. Peter Anvin | 63bcff2 | 2012-09-21 12:43:12 -0700 | [diff] [blame] | 22 | stac(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | asm volatile( |
| 24 | " testq %[size8],%[size8]\n" |
| 25 | " jz 4f\n" |
| 26 | "0: movq %[zero],(%[dst])\n" |
| 27 | " addq %[eight],%[dst]\n" |
| 28 | " decl %%ecx ; jnz 0b\n" |
| 29 | "4: movq %[size1],%%rcx\n" |
| 30 | " testl %%ecx,%%ecx\n" |
| 31 | " jz 2f\n" |
| 32 | "1: movb %b[zero],(%[dst])\n" |
| 33 | " incq %[dst]\n" |
| 34 | " decl %%ecx ; jnz 1b\n" |
| 35 | "2:\n" |
| 36 | ".section .fixup,\"ax\"\n" |
| 37 | "3: lea 0(%[size1],%[size8],8),%[size8]\n" |
| 38 | " jmp 2b\n" |
| 39 | ".previous\n" |
H. Peter Anvin | 8da804f | 2008-02-04 16:47:57 +0100 | [diff] [blame] | 40 | _ASM_EXTABLE(0b,3b) |
| 41 | _ASM_EXTABLE(1b,2b) |
Andi Kleen | e0a9612 | 2009-01-16 15:22:11 +0100 | [diff] [blame] | 42 | : [size8] "=&c"(size), [dst] "=&D" (__d0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr), |
| 44 | [zero] "r" (0UL), [eight] "r" (8UL)); |
H. Peter Anvin | 63bcff2 | 2012-09-21 12:43:12 -0700 | [diff] [blame] | 45 | clac(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | return size; |
| 47 | } |
Andi Kleen | 2ee60e17 | 2006-06-26 13:59:44 +0200 | [diff] [blame] | 48 | EXPORT_SYMBOL(__clear_user); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | |
| 50 | unsigned long clear_user(void __user *to, unsigned long n) |
| 51 | { |
| 52 | if (access_ok(VERIFY_WRITE, to, n)) |
| 53 | return __clear_user(to, n); |
| 54 | return n; |
| 55 | } |
Andi Kleen | 2ee60e17 | 2006-06-26 13:59:44 +0200 | [diff] [blame] | 56 | EXPORT_SYMBOL(clear_user); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | |
Vitaly Mayatskikh | 1129585 | 2008-07-02 15:48:21 +0200 | [diff] [blame] | 58 | /* |
| 59 | * Try to copy last bytes and clear the rest if needed. |
| 60 | * Since protection fault in copy_from/to_user is not a normal situation, |
| 61 | * it is not necessary to optimize tail handling. |
| 62 | */ |
Andi Kleen | 277d5b4 | 2013-08-05 15:02:43 -0700 | [diff] [blame] | 63 | __visible unsigned long |
Linus Torvalds | cae2a17 | 2015-04-06 10:26:17 -0700 | [diff] [blame] | 64 | copy_user_handle_tail(char *to, char *from, unsigned len) |
Vitaly Mayatskikh | 1129585 | 2008-07-02 15:48:21 +0200 | [diff] [blame] | 65 | { |
CQ Tang | 66db3fe | 2013-03-18 11:02:21 -0400 | [diff] [blame] | 66 | for (; len; --len, to++) { |
Linus Torvalds | cae2a17 | 2015-04-06 10:26:17 -0700 | [diff] [blame] | 67 | char c; |
| 68 | |
Vitaly Mayatskikh | 1129585 | 2008-07-02 15:48:21 +0200 | [diff] [blame] | 69 | if (__get_user_nocheck(c, from++, sizeof(char))) |
| 70 | break; |
CQ Tang | 66db3fe | 2013-03-18 11:02:21 -0400 | [diff] [blame] | 71 | if (__put_user_nocheck(c, to, sizeof(char))) |
Vitaly Mayatskikh | 1129585 | 2008-07-02 15:48:21 +0200 | [diff] [blame] | 72 | break; |
| 73 | } |
H. Peter Anvin | 63bcff2 | 2012-09-21 12:43:12 -0700 | [diff] [blame] | 74 | clac(); |
Vitaly Mayatskikh | 1129585 | 2008-07-02 15:48:21 +0200 | [diff] [blame] | 75 | return len; |
| 76 | } |
Dan Williams | 0aed55a | 2017-05-29 12:22:50 -0700 | [diff] [blame] | 77 | |
| 78 | #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE |
| 79 | /** |
| 80 | * clean_cache_range - write back a cache range with CLWB |
| 81 | * @vaddr: virtual start address |
| 82 | * @size: number of bytes to write back |
| 83 | * |
| 84 | * Write back a cache range using the CLWB (cache line write back) |
| 85 | * instruction. Note that @size is internally rounded up to be cache |
| 86 | * line size aligned. |
| 87 | */ |
| 88 | static void clean_cache_range(void *addr, size_t size) |
| 89 | { |
| 90 | u16 x86_clflush_size = boot_cpu_data.x86_clflush_size; |
| 91 | unsigned long clflush_mask = x86_clflush_size - 1; |
| 92 | void *vend = addr + size; |
| 93 | void *p; |
| 94 | |
| 95 | for (p = (void *)((unsigned long)addr & ~clflush_mask); |
| 96 | p < vend; p += x86_clflush_size) |
| 97 | clwb(p); |
| 98 | } |
| 99 | |
Dan Williams | 4e4f00a | 2017-05-29 22:40:44 -0700 | [diff] [blame] | 100 | void arch_wb_cache_pmem(void *addr, size_t size) |
| 101 | { |
| 102 | clean_cache_range(addr, size); |
| 103 | } |
| 104 | EXPORT_SYMBOL_GPL(arch_wb_cache_pmem); |
| 105 | |
Dan Williams | 0aed55a | 2017-05-29 12:22:50 -0700 | [diff] [blame] | 106 | long __copy_user_flushcache(void *dst, const void __user *src, unsigned size) |
| 107 | { |
| 108 | unsigned long flushed, dest = (unsigned long) dst; |
| 109 | long rc = __copy_user_nocache(dst, src, size, 0); |
| 110 | |
| 111 | /* |
| 112 | * __copy_user_nocache() uses non-temporal stores for the bulk |
| 113 | * of the transfer, but we need to manually flush if the |
| 114 | * transfer is unaligned. A cached memory copy is used when |
| 115 | * destination or size is not naturally aligned. That is: |
| 116 | * - Require 8-byte alignment when size is 8 bytes or larger. |
| 117 | * - Require 4-byte alignment when size is 4 bytes. |
| 118 | */ |
| 119 | if (size < 8) { |
| 120 | if (!IS_ALIGNED(dest, 4) || size != 4) |
| 121 | clean_cache_range(dst, 1); |
| 122 | } else { |
| 123 | if (!IS_ALIGNED(dest, 8)) { |
| 124 | dest = ALIGN(dest, boot_cpu_data.x86_clflush_size); |
| 125 | clean_cache_range(dst, 1); |
| 126 | } |
| 127 | |
| 128 | flushed = dest - (unsigned long) dst; |
| 129 | if (size > flushed && !IS_ALIGNED(size - flushed, 8)) |
| 130 | clean_cache_range(dst + size - 1, 1); |
| 131 | } |
| 132 | |
| 133 | return rc; |
| 134 | } |
| 135 | |
| 136 | void memcpy_flushcache(void *_dst, const void *_src, size_t size) |
| 137 | { |
| 138 | unsigned long dest = (unsigned long) _dst; |
| 139 | unsigned long source = (unsigned long) _src; |
| 140 | |
| 141 | /* cache copy and flush to align dest */ |
| 142 | if (!IS_ALIGNED(dest, 8)) { |
| 143 | unsigned len = min_t(unsigned, size, ALIGN(dest, 8) - dest); |
| 144 | |
| 145 | memcpy((void *) dest, (void *) source, len); |
| 146 | clean_cache_range((void *) dest, len); |
| 147 | dest += len; |
| 148 | source += len; |
| 149 | size -= len; |
| 150 | if (!size) |
| 151 | return; |
| 152 | } |
| 153 | |
| 154 | /* 4x8 movnti loop */ |
| 155 | while (size >= 32) { |
| 156 | asm("movq (%0), %%r8\n" |
| 157 | "movq 8(%0), %%r9\n" |
| 158 | "movq 16(%0), %%r10\n" |
| 159 | "movq 24(%0), %%r11\n" |
| 160 | "movnti %%r8, (%1)\n" |
| 161 | "movnti %%r9, 8(%1)\n" |
| 162 | "movnti %%r10, 16(%1)\n" |
| 163 | "movnti %%r11, 24(%1)\n" |
| 164 | :: "r" (source), "r" (dest) |
| 165 | : "memory", "r8", "r9", "r10", "r11"); |
| 166 | dest += 32; |
| 167 | source += 32; |
| 168 | size -= 32; |
| 169 | } |
| 170 | |
| 171 | /* 1x8 movnti loop */ |
| 172 | while (size >= 8) { |
| 173 | asm("movq (%0), %%r8\n" |
| 174 | "movnti %%r8, (%1)\n" |
| 175 | :: "r" (source), "r" (dest) |
| 176 | : "memory", "r8"); |
| 177 | dest += 8; |
| 178 | source += 8; |
| 179 | size -= 8; |
| 180 | } |
| 181 | |
| 182 | /* 1x4 movnti loop */ |
| 183 | while (size >= 4) { |
| 184 | asm("movl (%0), %%r8d\n" |
| 185 | "movnti %%r8d, (%1)\n" |
| 186 | :: "r" (source), "r" (dest) |
| 187 | : "memory", "r8"); |
| 188 | dest += 4; |
| 189 | source += 4; |
| 190 | size -= 4; |
| 191 | } |
| 192 | |
| 193 | /* cache copy for remaining bytes */ |
| 194 | if (size) { |
| 195 | memcpy((void *) dest, (void *) source, size); |
| 196 | clean_cache_range((void *) dest, size); |
| 197 | } |
| 198 | } |
| 199 | EXPORT_SYMBOL_GPL(memcpy_flushcache); |
| 200 | |
| 201 | void memcpy_page_flushcache(char *to, struct page *page, size_t offset, |
| 202 | size_t len) |
| 203 | { |
| 204 | char *from = kmap_atomic(page); |
| 205 | |
| 206 | memcpy_flushcache(to, from + offset, len); |
| 207 | kunmap_atomic(from); |
| 208 | } |
| 209 | #endif |