blob: 75d3776123cc0f8ee4d79dc2c07e17c431a950dd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * User address space access functions.
3 *
4 * Copyright 1997 Andi Kleen <ak@muc.de>
5 * Copyright 1997 Linus Torvalds
6 * Copyright 2002 Andi Kleen <ak@suse.de>
7 */
Paul Gortmakere6830142016-07-13 20:18:57 -04008#include <linux/export.h>
Andy Lutomirski13d4ea02016-07-14 13:22:57 -07009#include <linux/uaccess.h>
Dan Williams0aed55a2017-05-29 12:22:50 -070010#include <linux/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
12/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * Zero Userspace
14 */
15
16unsigned long __clear_user(void __user *addr, unsigned long size)
17{
18 long __d0;
Nick Piggin3ee1afa2008-09-10 13:37:17 +020019 might_fault();
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 /* no memory constraint because it doesn't change any memory gcc knows
21 about */
H. Peter Anvin63bcff22012-09-21 12:43:12 -070022 stac();
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 asm volatile(
24 " testq %[size8],%[size8]\n"
25 " jz 4f\n"
26 "0: movq %[zero],(%[dst])\n"
27 " addq %[eight],%[dst]\n"
28 " decl %%ecx ; jnz 0b\n"
29 "4: movq %[size1],%%rcx\n"
30 " testl %%ecx,%%ecx\n"
31 " jz 2f\n"
32 "1: movb %b[zero],(%[dst])\n"
33 " incq %[dst]\n"
34 " decl %%ecx ; jnz 1b\n"
35 "2:\n"
36 ".section .fixup,\"ax\"\n"
37 "3: lea 0(%[size1],%[size8],8),%[size8]\n"
38 " jmp 2b\n"
39 ".previous\n"
H. Peter Anvin8da804f2008-02-04 16:47:57 +010040 _ASM_EXTABLE(0b,3b)
41 _ASM_EXTABLE(1b,2b)
Andi Kleene0a96122009-01-16 15:22:11 +010042 : [size8] "=&c"(size), [dst] "=&D" (__d0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070043 : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
44 [zero] "r" (0UL), [eight] "r" (8UL));
H. Peter Anvin63bcff22012-09-21 12:43:12 -070045 clac();
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 return size;
47}
Andi Kleen2ee60e172006-06-26 13:59:44 +020048EXPORT_SYMBOL(__clear_user);
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
50unsigned long clear_user(void __user *to, unsigned long n)
51{
52 if (access_ok(VERIFY_WRITE, to, n))
53 return __clear_user(to, n);
54 return n;
55}
Andi Kleen2ee60e172006-06-26 13:59:44 +020056EXPORT_SYMBOL(clear_user);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
Vitaly Mayatskikh11295852008-07-02 15:48:21 +020058/*
59 * Try to copy last bytes and clear the rest if needed.
60 * Since protection fault in copy_from/to_user is not a normal situation,
61 * it is not necessary to optimize tail handling.
62 */
Andi Kleen277d5b42013-08-05 15:02:43 -070063__visible unsigned long
Linus Torvaldscae2a172015-04-06 10:26:17 -070064copy_user_handle_tail(char *to, char *from, unsigned len)
Vitaly Mayatskikh11295852008-07-02 15:48:21 +020065{
CQ Tang66db3fe2013-03-18 11:02:21 -040066 for (; len; --len, to++) {
Linus Torvaldscae2a172015-04-06 10:26:17 -070067 char c;
68
Vitaly Mayatskikh11295852008-07-02 15:48:21 +020069 if (__get_user_nocheck(c, from++, sizeof(char)))
70 break;
CQ Tang66db3fe2013-03-18 11:02:21 -040071 if (__put_user_nocheck(c, to, sizeof(char)))
Vitaly Mayatskikh11295852008-07-02 15:48:21 +020072 break;
73 }
H. Peter Anvin63bcff22012-09-21 12:43:12 -070074 clac();
Vitaly Mayatskikh11295852008-07-02 15:48:21 +020075 return len;
76}
Dan Williams0aed55a2017-05-29 12:22:50 -070077
78#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
79/**
80 * clean_cache_range - write back a cache range with CLWB
81 * @vaddr: virtual start address
82 * @size: number of bytes to write back
83 *
84 * Write back a cache range using the CLWB (cache line write back)
85 * instruction. Note that @size is internally rounded up to be cache
86 * line size aligned.
87 */
88static void clean_cache_range(void *addr, size_t size)
89{
90 u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
91 unsigned long clflush_mask = x86_clflush_size - 1;
92 void *vend = addr + size;
93 void *p;
94
95 for (p = (void *)((unsigned long)addr & ~clflush_mask);
96 p < vend; p += x86_clflush_size)
97 clwb(p);
98}
99
Dan Williams4e4f00a2017-05-29 22:40:44 -0700100void arch_wb_cache_pmem(void *addr, size_t size)
101{
102 clean_cache_range(addr, size);
103}
104EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
105
Dan Williams0aed55a2017-05-29 12:22:50 -0700106long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
107{
108 unsigned long flushed, dest = (unsigned long) dst;
109 long rc = __copy_user_nocache(dst, src, size, 0);
110
111 /*
112 * __copy_user_nocache() uses non-temporal stores for the bulk
113 * of the transfer, but we need to manually flush if the
114 * transfer is unaligned. A cached memory copy is used when
115 * destination or size is not naturally aligned. That is:
116 * - Require 8-byte alignment when size is 8 bytes or larger.
117 * - Require 4-byte alignment when size is 4 bytes.
118 */
119 if (size < 8) {
120 if (!IS_ALIGNED(dest, 4) || size != 4)
121 clean_cache_range(dst, 1);
122 } else {
123 if (!IS_ALIGNED(dest, 8)) {
124 dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
125 clean_cache_range(dst, 1);
126 }
127
128 flushed = dest - (unsigned long) dst;
129 if (size > flushed && !IS_ALIGNED(size - flushed, 8))
130 clean_cache_range(dst + size - 1, 1);
131 }
132
133 return rc;
134}
135
136void memcpy_flushcache(void *_dst, const void *_src, size_t size)
137{
138 unsigned long dest = (unsigned long) _dst;
139 unsigned long source = (unsigned long) _src;
140
141 /* cache copy and flush to align dest */
142 if (!IS_ALIGNED(dest, 8)) {
143 unsigned len = min_t(unsigned, size, ALIGN(dest, 8) - dest);
144
145 memcpy((void *) dest, (void *) source, len);
146 clean_cache_range((void *) dest, len);
147 dest += len;
148 source += len;
149 size -= len;
150 if (!size)
151 return;
152 }
153
154 /* 4x8 movnti loop */
155 while (size >= 32) {
156 asm("movq (%0), %%r8\n"
157 "movq 8(%0), %%r9\n"
158 "movq 16(%0), %%r10\n"
159 "movq 24(%0), %%r11\n"
160 "movnti %%r8, (%1)\n"
161 "movnti %%r9, 8(%1)\n"
162 "movnti %%r10, 16(%1)\n"
163 "movnti %%r11, 24(%1)\n"
164 :: "r" (source), "r" (dest)
165 : "memory", "r8", "r9", "r10", "r11");
166 dest += 32;
167 source += 32;
168 size -= 32;
169 }
170
171 /* 1x8 movnti loop */
172 while (size >= 8) {
173 asm("movq (%0), %%r8\n"
174 "movnti %%r8, (%1)\n"
175 :: "r" (source), "r" (dest)
176 : "memory", "r8");
177 dest += 8;
178 source += 8;
179 size -= 8;
180 }
181
182 /* 1x4 movnti loop */
183 while (size >= 4) {
184 asm("movl (%0), %%r8d\n"
185 "movnti %%r8d, (%1)\n"
186 :: "r" (source), "r" (dest)
187 : "memory", "r8");
188 dest += 4;
189 source += 4;
190 size -= 4;
191 }
192
193 /* cache copy for remaining bytes */
194 if (size) {
195 memcpy((void *) dest, (void *) source, size);
196 clean_cache_range((void *) dest, size);
197 }
198}
199EXPORT_SYMBOL_GPL(memcpy_flushcache);
200
201void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
202 size_t len)
203{
204 char *from = kmap_atomic(page);
205
206 memcpy_flushcache(to, from + offset, len);
207 kunmap_atomic(from);
208}
209#endif