blob: 1dd611423d8be4a18de022c48f7f30bca84d8970 [file] [log] [blame]
Kees Cooka3dff712016-06-26 08:46:23 -07001/*
2 * This is for all the tests related to copy_to_user() and copy_from_user()
3 * hardening.
4 */
Kees Cook6d2e91a2016-07-15 16:04:39 -07005#include "lkdtm.h"
Kees Cooka3dff712016-06-26 08:46:23 -07006#include <linux/slab.h>
7#include <linux/vmalloc.h>
8#include <linux/mman.h>
9#include <linux/uaccess.h>
10#include <asm/cacheflush.h>
11
Kees Cook3c176482016-09-06 11:26:12 -070012/*
13 * Many of the tests here end up using const sizes, but those would
14 * normally be ignored by hardened usercopy, so force the compiler
15 * into choosing the non-const path to make sure we trigger the
16 * hardened usercopy checks by added "unconst" to all the const copies,
17 * and making sure "cache_size" isn't optimized into a const.
18 */
19static volatile size_t unconst = 0;
20static volatile size_t cache_size = 1024;
Kees Cooka3dff712016-06-26 08:46:23 -070021static struct kmem_cache *bad_cache;
22
23static const unsigned char test_text[] = "This is a test.\n";
24
25/*
26 * Instead of adding -Wno-return-local-addr, just pass the stack address
27 * through a function to obfuscate it from the compiler.
28 */
29static noinline unsigned char *trick_compiler(unsigned char *stack)
30{
31 return stack + 0;
32}
33
34static noinline unsigned char *do_usercopy_stack_callee(int value)
35{
36 unsigned char buf[32];
37 int i;
38
39 /* Exercise stack to avoid everything living in registers. */
40 for (i = 0; i < sizeof(buf); i++) {
41 buf[i] = value & 0xff;
42 }
43
44 return trick_compiler(buf);
45}
46
47static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
48{
49 unsigned long user_addr;
50 unsigned char good_stack[32];
51 unsigned char *bad_stack;
52 int i;
53
54 /* Exercise stack to avoid everything living in registers. */
55 for (i = 0; i < sizeof(good_stack); i++)
56 good_stack[i] = test_text[i % sizeof(test_text)];
57
58 /* This is a pointer to outside our current stack frame. */
59 if (bad_frame) {
Kees Cook30010872016-07-19 13:04:14 -070060 bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack);
Kees Cooka3dff712016-06-26 08:46:23 -070061 } else {
62 /* Put start address just inside stack. */
63 bad_stack = task_stack_page(current) + THREAD_SIZE;
64 bad_stack -= sizeof(unsigned long);
65 }
66
67 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
68 PROT_READ | PROT_WRITE | PROT_EXEC,
69 MAP_ANONYMOUS | MAP_PRIVATE, 0);
70 if (user_addr >= TASK_SIZE) {
71 pr_warn("Failed to allocate user memory\n");
72 return;
73 }
74
75 if (to_user) {
76 pr_info("attempting good copy_to_user of local stack\n");
77 if (copy_to_user((void __user *)user_addr, good_stack,
Kees Cook3c176482016-09-06 11:26:12 -070078 unconst + sizeof(good_stack))) {
Kees Cooka3dff712016-06-26 08:46:23 -070079 pr_warn("copy_to_user failed unexpectedly?!\n");
80 goto free_user;
81 }
82
83 pr_info("attempting bad copy_to_user of distant stack\n");
84 if (copy_to_user((void __user *)user_addr, bad_stack,
Kees Cook3c176482016-09-06 11:26:12 -070085 unconst + sizeof(good_stack))) {
Kees Cooka3dff712016-06-26 08:46:23 -070086 pr_warn("copy_to_user failed, but lacked Oops\n");
87 goto free_user;
88 }
89 } else {
90 /*
91 * There isn't a safe way to not be protected by usercopy
92 * if we're going to write to another thread's stack.
93 */
94 if (!bad_frame)
95 goto free_user;
96
97 pr_info("attempting good copy_from_user of local stack\n");
98 if (copy_from_user(good_stack, (void __user *)user_addr,
Kees Cook3c176482016-09-06 11:26:12 -070099 unconst + sizeof(good_stack))) {
Kees Cooka3dff712016-06-26 08:46:23 -0700100 pr_warn("copy_from_user failed unexpectedly?!\n");
101 goto free_user;
102 }
103
104 pr_info("attempting bad copy_from_user of distant stack\n");
105 if (copy_from_user(bad_stack, (void __user *)user_addr,
Kees Cook3c176482016-09-06 11:26:12 -0700106 unconst + sizeof(good_stack))) {
Kees Cooka3dff712016-06-26 08:46:23 -0700107 pr_warn("copy_from_user failed, but lacked Oops\n");
108 goto free_user;
109 }
110 }
111
112free_user:
113 vm_munmap(user_addr, PAGE_SIZE);
114}
115
116static void do_usercopy_heap_size(bool to_user)
117{
118 unsigned long user_addr;
119 unsigned char *one, *two;
Kees Cook3c176482016-09-06 11:26:12 -0700120 size_t size = unconst + 1024;
Kees Cooka3dff712016-06-26 08:46:23 -0700121
122 one = kmalloc(size, GFP_KERNEL);
123 two = kmalloc(size, GFP_KERNEL);
124 if (!one || !two) {
125 pr_warn("Failed to allocate kernel memory\n");
126 goto free_kernel;
127 }
128
129 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
130 PROT_READ | PROT_WRITE | PROT_EXEC,
131 MAP_ANONYMOUS | MAP_PRIVATE, 0);
132 if (user_addr >= TASK_SIZE) {
133 pr_warn("Failed to allocate user memory\n");
134 goto free_kernel;
135 }
136
137 memset(one, 'A', size);
138 memset(two, 'B', size);
139
140 if (to_user) {
141 pr_info("attempting good copy_to_user of correct size\n");
142 if (copy_to_user((void __user *)user_addr, one, size)) {
143 pr_warn("copy_to_user failed unexpectedly?!\n");
144 goto free_user;
145 }
146
147 pr_info("attempting bad copy_to_user of too large size\n");
148 if (copy_to_user((void __user *)user_addr, one, 2 * size)) {
149 pr_warn("copy_to_user failed, but lacked Oops\n");
150 goto free_user;
151 }
152 } else {
153 pr_info("attempting good copy_from_user of correct size\n");
154 if (copy_from_user(one, (void __user *)user_addr, size)) {
155 pr_warn("copy_from_user failed unexpectedly?!\n");
156 goto free_user;
157 }
158
159 pr_info("attempting bad copy_from_user of too large size\n");
160 if (copy_from_user(one, (void __user *)user_addr, 2 * size)) {
161 pr_warn("copy_from_user failed, but lacked Oops\n");
162 goto free_user;
163 }
164 }
165
166free_user:
167 vm_munmap(user_addr, PAGE_SIZE);
168free_kernel:
169 kfree(one);
170 kfree(two);
171}
172
173static void do_usercopy_heap_flag(bool to_user)
174{
175 unsigned long user_addr;
176 unsigned char *good_buf = NULL;
177 unsigned char *bad_buf = NULL;
178
179 /* Make sure cache was prepared. */
180 if (!bad_cache) {
181 pr_warn("Failed to allocate kernel cache\n");
182 return;
183 }
184
185 /*
186 * Allocate one buffer from each cache (kmalloc will have the
187 * SLAB_USERCOPY flag already, but "bad_cache" won't).
188 */
189 good_buf = kmalloc(cache_size, GFP_KERNEL);
190 bad_buf = kmem_cache_alloc(bad_cache, GFP_KERNEL);
191 if (!good_buf || !bad_buf) {
192 pr_warn("Failed to allocate buffers from caches\n");
193 goto free_alloc;
194 }
195
196 /* Allocate user memory we'll poke at. */
197 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
198 PROT_READ | PROT_WRITE | PROT_EXEC,
199 MAP_ANONYMOUS | MAP_PRIVATE, 0);
200 if (user_addr >= TASK_SIZE) {
201 pr_warn("Failed to allocate user memory\n");
202 goto free_alloc;
203 }
204
205 memset(good_buf, 'A', cache_size);
206 memset(bad_buf, 'B', cache_size);
207
208 if (to_user) {
209 pr_info("attempting good copy_to_user with SLAB_USERCOPY\n");
210 if (copy_to_user((void __user *)user_addr, good_buf,
211 cache_size)) {
212 pr_warn("copy_to_user failed unexpectedly?!\n");
213 goto free_user;
214 }
215
216 pr_info("attempting bad copy_to_user w/o SLAB_USERCOPY\n");
217 if (copy_to_user((void __user *)user_addr, bad_buf,
218 cache_size)) {
219 pr_warn("copy_to_user failed, but lacked Oops\n");
220 goto free_user;
221 }
222 } else {
223 pr_info("attempting good copy_from_user with SLAB_USERCOPY\n");
224 if (copy_from_user(good_buf, (void __user *)user_addr,
225 cache_size)) {
226 pr_warn("copy_from_user failed unexpectedly?!\n");
227 goto free_user;
228 }
229
230 pr_info("attempting bad copy_from_user w/o SLAB_USERCOPY\n");
231 if (copy_from_user(bad_buf, (void __user *)user_addr,
232 cache_size)) {
233 pr_warn("copy_from_user failed, but lacked Oops\n");
234 goto free_user;
235 }
236 }
237
238free_user:
239 vm_munmap(user_addr, PAGE_SIZE);
240free_alloc:
241 if (bad_buf)
242 kmem_cache_free(bad_cache, bad_buf);
243 kfree(good_buf);
244}
245
246/* Callable tests. */
247void lkdtm_USERCOPY_HEAP_SIZE_TO(void)
248{
249 do_usercopy_heap_size(true);
250}
251
252void lkdtm_USERCOPY_HEAP_SIZE_FROM(void)
253{
254 do_usercopy_heap_size(false);
255}
256
257void lkdtm_USERCOPY_HEAP_FLAG_TO(void)
258{
259 do_usercopy_heap_flag(true);
260}
261
262void lkdtm_USERCOPY_HEAP_FLAG_FROM(void)
263{
264 do_usercopy_heap_flag(false);
265}
266
267void lkdtm_USERCOPY_STACK_FRAME_TO(void)
268{
269 do_usercopy_stack(true, true);
270}
271
272void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
273{
274 do_usercopy_stack(false, true);
275}
276
277void lkdtm_USERCOPY_STACK_BEYOND(void)
278{
279 do_usercopy_stack(true, false);
280}
281
282void lkdtm_USERCOPY_KERNEL(void)
283{
284 unsigned long user_addr;
285
286 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
287 PROT_READ | PROT_WRITE | PROT_EXEC,
288 MAP_ANONYMOUS | MAP_PRIVATE, 0);
289 if (user_addr >= TASK_SIZE) {
290 pr_warn("Failed to allocate user memory\n");
291 return;
292 }
293
294 pr_info("attempting good copy_to_user from kernel rodata\n");
295 if (copy_to_user((void __user *)user_addr, test_text,
Kees Cook3c176482016-09-06 11:26:12 -0700296 unconst + sizeof(test_text))) {
Kees Cooka3dff712016-06-26 08:46:23 -0700297 pr_warn("copy_to_user failed unexpectedly?!\n");
298 goto free_user;
299 }
300
301 pr_info("attempting bad copy_to_user from kernel text\n");
Kees Cook3c176482016-09-06 11:26:12 -0700302 if (copy_to_user((void __user *)user_addr, vm_mmap,
303 unconst + PAGE_SIZE)) {
Kees Cooka3dff712016-06-26 08:46:23 -0700304 pr_warn("copy_to_user failed, but lacked Oops\n");
305 goto free_user;
306 }
307
308free_user:
309 vm_munmap(user_addr, PAGE_SIZE);
310}
311
312void __init lkdtm_usercopy_init(void)
313{
314 /* Prepare cache that lacks SLAB_USERCOPY flag. */
315 bad_cache = kmem_cache_create("lkdtm-no-usercopy", cache_size, 0,
316 0, NULL);
317}
318
319void __exit lkdtm_usercopy_exit(void)
320{
321 kmem_cache_destroy(bad_cache);
322}