blob: 7d78b5f5908be129dd893e067fb0fb4f29f23d84 [file] [log] [blame]
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -08001/*
2 * This file contains shadow memory manipulation code.
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
Andrey Ryabinin2baf9e82015-08-14 15:35:13 -07005 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -08006 *
Andrey Konovalov5d0926e2015-11-05 18:51:12 -08007 * Some code borrowed from https://github.com/xairy/kasan-prototype by
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -08008 * Andrey Konovalov <adech.fo@gmail.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17#define DISABLE_BRANCH_PROFILING
18
19#include <linux/export.h>
Alexander Potapenkocd110162016-03-25 14:22:08 -070020#include <linux/interrupt.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080021#include <linux/init.h>
Alexander Potapenkocd110162016-03-25 14:22:08 -070022#include <linux/kasan.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080023#include <linux/kernel.h>
Andrey Ryabinin45937252015-11-20 15:57:18 -080024#include <linux/kmemleak.h>
Mark Rutlande3ae1162016-03-09 14:08:15 -080025#include <linux/linkage.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080026#include <linux/memblock.h>
Andrey Ryabinin786a8952015-02-13 14:39:21 -080027#include <linux/memory.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080028#include <linux/mm.h>
Andrey Ryabininbebf56a2015-02-13 14:40:17 -080029#include <linux/module.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080030#include <linux/printk.h>
31#include <linux/sched.h>
32#include <linux/slab.h>
33#include <linux/stacktrace.h>
34#include <linux/string.h>
35#include <linux/types.h>
Andrey Ryabinina5af5aa2015-03-12 16:26:11 -070036#include <linux/vmalloc.h>
Dmitry Vyukov9f7d4162016-10-14 16:07:23 +020037#include <linux/bug.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080038
39#include "kasan.h"
Andrey Ryabinin0316bec2015-02-13 14:39:42 -080040#include "../slab.h"
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080041
Ingo Molnar46ae3e42017-02-03 09:57:00 +010042void kasan_enable_current(void)
43{
44 current->kasan_depth++;
45}
46
47void kasan_disable_current(void)
48{
49 current->kasan_depth--;
50}
51
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080052/*
53 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
54 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
55 */
56static void kasan_poison_shadow(const void *address, size_t size, u8 value)
57{
58 void *shadow_start, *shadow_end;
59
60 shadow_start = kasan_mem_to_shadow(address);
61 shadow_end = kasan_mem_to_shadow(address + size);
62
63 memset(shadow_start, value, shadow_end - shadow_start);
64}
65
66void kasan_unpoison_shadow(const void *address, size_t size)
67{
68 kasan_poison_shadow(address, size, 0);
69
70 if (size & KASAN_SHADOW_MASK) {
71 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
72 *shadow = size & KASAN_SHADOW_MASK;
73 }
74}
75
Dmitry Vyukov9f7d4162016-10-14 16:07:23 +020076static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
Mark Rutlande3ae1162016-03-09 14:08:15 -080077{
78 void *base = task_stack_page(task);
79 size_t size = sp - base;
80
81 kasan_unpoison_shadow(base, size);
82}
83
84/* Unpoison the entire stack for a task. */
85void kasan_unpoison_task_stack(struct task_struct *task)
86{
87 __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
88}
89
90/* Unpoison the stack for the current task beyond a watermark sp value. */
Dmitry Vyukov9f7d4162016-10-14 16:07:23 +020091asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
Mark Rutlande3ae1162016-03-09 14:08:15 -080092{
Dmitry Vyukov9f7d4162016-10-14 16:07:23 +020093 __kasan_unpoison_stack(current, watermark);
94}
95
96/*
97 * Clear all poison for the region between the current SP and a provided
98 * watermark value, as is sometimes required prior to hand-crafted asm function
99 * returns in the middle of functions.
100 */
101void kasan_unpoison_stack_above_sp_to(const void *watermark)
102{
103 const void *sp = __builtin_frame_address(0);
104 size_t size = watermark - sp;
105
106 if (WARN_ON(sp > watermark))
107 return;
108 kasan_unpoison_shadow(sp, size);
Mark Rutlande3ae1162016-03-09 14:08:15 -0800109}
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800110
111/*
112 * All functions below always inlined so compiler could
113 * perform better optimizations in each of __asan_loadX/__assn_storeX
114 * depending on memory access size X.
115 */
116
117static __always_inline bool memory_is_poisoned_1(unsigned long addr)
118{
119 s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
120
121 if (unlikely(shadow_value)) {
122 s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
123 return unlikely(last_accessible_byte >= shadow_value);
124 }
125
126 return false;
127}
128
129static __always_inline bool memory_is_poisoned_2(unsigned long addr)
130{
131 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
132
133 if (unlikely(*shadow_addr)) {
134 if (memory_is_poisoned_1(addr + 1))
135 return true;
136
Xishi Qiu10f70262015-11-05 18:51:21 -0800137 /*
138 * If single shadow byte covers 2-byte access, we don't
139 * need to do anything more. Otherwise, test the first
140 * shadow byte.
141 */
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800142 if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
143 return false;
144
145 return unlikely(*(u8 *)shadow_addr);
146 }
147
148 return false;
149}
150
151static __always_inline bool memory_is_poisoned_4(unsigned long addr)
152{
153 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
154
155 if (unlikely(*shadow_addr)) {
156 if (memory_is_poisoned_1(addr + 3))
157 return true;
158
Xishi Qiu10f70262015-11-05 18:51:21 -0800159 /*
160 * If single shadow byte covers 4-byte access, we don't
161 * need to do anything more. Otherwise, test the first
162 * shadow byte.
163 */
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800164 if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
165 return false;
166
167 return unlikely(*(u8 *)shadow_addr);
168 }
169
170 return false;
171}
172
173static __always_inline bool memory_is_poisoned_8(unsigned long addr)
174{
175 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
176
177 if (unlikely(*shadow_addr)) {
178 if (memory_is_poisoned_1(addr + 7))
179 return true;
180
Xishi Qiu10f70262015-11-05 18:51:21 -0800181 /*
182 * If single shadow byte covers 8-byte access, we don't
183 * need to do anything more. Otherwise, test the first
184 * shadow byte.
185 */
186 if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800187 return false;
188
189 return unlikely(*(u8 *)shadow_addr);
190 }
191
192 return false;
193}
194
195static __always_inline bool memory_is_poisoned_16(unsigned long addr)
196{
197 u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr);
198
199 if (unlikely(*shadow_addr)) {
200 u16 shadow_first_bytes = *(u16 *)shadow_addr;
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800201
202 if (unlikely(shadow_first_bytes))
203 return true;
204
Xishi Qiu10f70262015-11-05 18:51:21 -0800205 /*
206 * If two shadow bytes covers 16-byte access, we don't
207 * need to do anything more. Otherwise, test the last
208 * shadow byte.
209 */
210 if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800211 return false;
212
213 return memory_is_poisoned_1(addr + 15);
214 }
215
216 return false;
217}
218
219static __always_inline unsigned long bytes_is_zero(const u8 *start,
220 size_t size)
221{
222 while (size) {
223 if (unlikely(*start))
224 return (unsigned long)start;
225 start++;
226 size--;
227 }
228
229 return 0;
230}
231
232static __always_inline unsigned long memory_is_zero(const void *start,
233 const void *end)
234{
235 unsigned int words;
236 unsigned long ret;
237 unsigned int prefix = (unsigned long)start % 8;
238
239 if (end - start <= 16)
240 return bytes_is_zero(start, end - start);
241
242 if (prefix) {
243 prefix = 8 - prefix;
244 ret = bytes_is_zero(start, prefix);
245 if (unlikely(ret))
246 return ret;
247 start += prefix;
248 }
249
250 words = (end - start) / 8;
251 while (words) {
252 if (unlikely(*(u64 *)start))
253 return bytes_is_zero(start, 8);
254 start += 8;
255 words--;
256 }
257
258 return bytes_is_zero(start, (end - start) % 8);
259}
260
261static __always_inline bool memory_is_poisoned_n(unsigned long addr,
262 size_t size)
263{
264 unsigned long ret;
265
266 ret = memory_is_zero(kasan_mem_to_shadow((void *)addr),
267 kasan_mem_to_shadow((void *)addr + size - 1) + 1);
268
269 if (unlikely(ret)) {
270 unsigned long last_byte = addr + size - 1;
271 s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
272
273 if (unlikely(ret != (unsigned long)last_shadow ||
Wang Longe0d57712015-11-05 18:51:18 -0800274 ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800275 return true;
276 }
277 return false;
278}
279
280static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
281{
282 if (__builtin_constant_p(size)) {
283 switch (size) {
284 case 1:
285 return memory_is_poisoned_1(addr);
286 case 2:
287 return memory_is_poisoned_2(addr);
288 case 4:
289 return memory_is_poisoned_4(addr);
290 case 8:
291 return memory_is_poisoned_8(addr);
292 case 16:
293 return memory_is_poisoned_16(addr);
294 default:
295 BUILD_BUG();
296 }
297 }
298
299 return memory_is_poisoned_n(addr, size);
300}
301
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700302static __always_inline void check_memory_region_inline(unsigned long addr,
303 size_t size, bool write,
304 unsigned long ret_ip)
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800305{
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800306 if (unlikely(size == 0))
307 return;
308
309 if (unlikely((void *)addr <
310 kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700311 kasan_report(addr, size, write, ret_ip);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800312 return;
313 }
314
315 if (likely(!memory_is_poisoned(addr, size)))
316 return;
317
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700318 kasan_report(addr, size, write, ret_ip);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800319}
320
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700321static void check_memory_region(unsigned long addr,
322 size_t size, bool write,
323 unsigned long ret_ip)
324{
325 check_memory_region_inline(addr, size, write, ret_ip);
326}
Andrey Ryabinin393f2032015-02-13 14:39:56 -0800327
Andrey Ryabinin64f8eba2016-05-20 16:59:28 -0700328void kasan_check_read(const void *p, unsigned int size)
329{
330 check_memory_region((unsigned long)p, size, false, _RET_IP_);
331}
332EXPORT_SYMBOL(kasan_check_read);
333
334void kasan_check_write(const void *p, unsigned int size)
335{
336 check_memory_region((unsigned long)p, size, true, _RET_IP_);
337}
338EXPORT_SYMBOL(kasan_check_write);
339
Andrey Ryabinin393f2032015-02-13 14:39:56 -0800340#undef memset
341void *memset(void *addr, int c, size_t len)
342{
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700343 check_memory_region((unsigned long)addr, len, true, _RET_IP_);
Andrey Ryabinin393f2032015-02-13 14:39:56 -0800344
345 return __memset(addr, c, len);
346}
347
348#undef memmove
349void *memmove(void *dest, const void *src, size_t len)
350{
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700351 check_memory_region((unsigned long)src, len, false, _RET_IP_);
352 check_memory_region((unsigned long)dest, len, true, _RET_IP_);
Andrey Ryabinin393f2032015-02-13 14:39:56 -0800353
354 return __memmove(dest, src, len);
355}
356
357#undef memcpy
358void *memcpy(void *dest, const void *src, size_t len)
359{
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700360 check_memory_region((unsigned long)src, len, false, _RET_IP_);
361 check_memory_region((unsigned long)dest, len, true, _RET_IP_);
Andrey Ryabinin393f2032015-02-13 14:39:56 -0800362
363 return __memcpy(dest, src, len);
364}
365
Andrey Ryabininb8c73fc2015-02-13 14:39:28 -0800366void kasan_alloc_pages(struct page *page, unsigned int order)
367{
368 if (likely(!PageHighMem(page)))
369 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
370}
371
372void kasan_free_pages(struct page *page, unsigned int order)
373{
374 if (likely(!PageHighMem(page)))
375 kasan_poison_shadow(page_address(page),
376 PAGE_SIZE << order,
377 KASAN_FREE_PAGE);
378}
379
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700380/*
381 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
382 * For larger allocations larger redzones are used.
383 */
384static size_t optimal_redzone(size_t object_size)
385{
386 int rz =
387 object_size <= 64 - 16 ? 16 :
388 object_size <= 128 - 32 ? 32 :
389 object_size <= 512 - 64 ? 64 :
390 object_size <= 4096 - 128 ? 128 :
391 object_size <= (1 << 14) - 256 ? 256 :
392 object_size <= (1 << 15) - 512 ? 512 :
393 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
394 return rz;
395}
396
397void kasan_cache_create(struct kmem_cache *cache, size_t *size,
398 unsigned long *flags)
399{
400 int redzone_adjust;
Alexander Potapenko80a92012016-07-28 15:49:07 -0700401 int orig_size = *size;
402
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700403 /* Add alloc meta. */
404 cache->kasan_info.alloc_meta_offset = *size;
405 *size += sizeof(struct kasan_alloc_meta);
406
407 /* Add free meta. */
408 if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor ||
409 cache->object_size < sizeof(struct kasan_free_meta)) {
410 cache->kasan_info.free_meta_offset = *size;
411 *size += sizeof(struct kasan_free_meta);
412 }
413 redzone_adjust = optimal_redzone(cache->object_size) -
414 (*size - cache->object_size);
Alexander Potapenko80a92012016-07-28 15:49:07 -0700415
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700416 if (redzone_adjust > 0)
417 *size += redzone_adjust;
Alexander Potapenko80a92012016-07-28 15:49:07 -0700418
419 *size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size +
420 optimal_redzone(cache->object_size)));
421
422 /*
423 * If the metadata doesn't fit, don't enable KASAN at all.
424 */
425 if (*size <= cache->kasan_info.alloc_meta_offset ||
426 *size <= cache->kasan_info.free_meta_offset) {
427 cache->kasan_info.alloc_meta_offset = 0;
428 cache->kasan_info.free_meta_offset = 0;
429 *size = orig_size;
430 return;
431 }
432
433 *flags |= SLAB_KASAN;
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700434}
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700435
Alexander Potapenko55834c52016-05-20 16:59:11 -0700436void kasan_cache_shrink(struct kmem_cache *cache)
437{
438 quarantine_remove_cache(cache);
439}
440
Greg Thelencb61d5a2017-02-24 15:00:05 -0800441void kasan_cache_shutdown(struct kmem_cache *cache)
Alexander Potapenko55834c52016-05-20 16:59:11 -0700442{
443 quarantine_remove_cache(cache);
444}
445
Alexander Potapenko80a92012016-07-28 15:49:07 -0700446size_t kasan_metadata_size(struct kmem_cache *cache)
447{
448 return (cache->kasan_info.alloc_meta_offset ?
449 sizeof(struct kasan_alloc_meta) : 0) +
450 (cache->kasan_info.free_meta_offset ?
451 sizeof(struct kasan_free_meta) : 0);
452}
453
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800454void kasan_poison_slab(struct page *page)
455{
456 kasan_poison_shadow(page_address(page),
457 PAGE_SIZE << compound_order(page),
458 KASAN_KMALLOC_REDZONE);
459}
460
461void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
462{
463 kasan_unpoison_shadow(object, cache->object_size);
464}
465
466void kasan_poison_object_data(struct kmem_cache *cache, void *object)
467{
468 kasan_poison_shadow(object,
469 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
470 KASAN_KMALLOC_REDZONE);
471}
472
Alexander Potapenkocd110162016-03-25 14:22:08 -0700473static inline int in_irqentry_text(unsigned long ptr)
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700474{
Alexander Potapenkocd110162016-03-25 14:22:08 -0700475 return (ptr >= (unsigned long)&__irqentry_text_start &&
476 ptr < (unsigned long)&__irqentry_text_end) ||
477 (ptr >= (unsigned long)&__softirqentry_text_start &&
478 ptr < (unsigned long)&__softirqentry_text_end);
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700479}
480
Alexander Potapenkocd110162016-03-25 14:22:08 -0700481static inline void filter_irq_stacks(struct stack_trace *trace)
482{
483 int i;
484
485 if (!trace->nr_entries)
486 return;
487 for (i = 0; i < trace->nr_entries; i++)
488 if (in_irqentry_text(trace->entries[i])) {
489 /* Include the irqentry function into the stack. */
490 trace->nr_entries = i + 1;
491 break;
492 }
493}
494
495static inline depot_stack_handle_t save_stack(gfp_t flags)
496{
497 unsigned long entries[KASAN_STACK_DEPTH];
498 struct stack_trace trace = {
499 .nr_entries = 0,
500 .entries = entries,
501 .max_entries = KASAN_STACK_DEPTH,
502 .skip = 0
503 };
504
505 save_stack_trace(&trace);
506 filter_irq_stacks(&trace);
507 if (trace.nr_entries != 0 &&
508 trace.entries[trace.nr_entries-1] == ULONG_MAX)
509 trace.nr_entries--;
510
511 return depot_save_stack(&trace, flags);
512}
513
514static inline void set_track(struct kasan_track *track, gfp_t flags)
515{
516 track->pid = current->pid;
517 track->stack = save_stack(flags);
518}
519
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700520struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
521 const void *object)
522{
Alexander Potapenkocd110162016-03-25 14:22:08 -0700523 BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700524 return (void *)object + cache->kasan_info.alloc_meta_offset;
525}
526
527struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
528 const void *object)
529{
Alexander Potapenkocd110162016-03-25 14:22:08 -0700530 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700531 return (void *)object + cache->kasan_info.free_meta_offset;
532}
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700533
Andrey Ryabininb3cbd9b2016-08-02 14:02:52 -0700534void kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
535{
536 struct kasan_alloc_meta *alloc_info;
537
538 if (!(cache->flags & SLAB_KASAN))
539 return;
540
541 alloc_info = get_alloc_info(cache, object);
542 __memset(alloc_info, 0, sizeof(*alloc_info));
543}
544
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700545void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800546{
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700547 kasan_kmalloc(cache, object, cache->object_size, flags);
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800548}
549
Andrey Ryabinin9b75a862016-06-24 14:49:34 -0700550static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800551{
552 unsigned long size = cache->object_size;
553 unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
554
555 /* RCU slabs could be legally used after free within the RCU period */
556 if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
557 return;
558
Alexander Potapenko55834c52016-05-20 16:59:11 -0700559 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
560}
561
562bool kasan_slab_free(struct kmem_cache *cache, void *object)
563{
Andrey Ryabininb3cbd9b2016-08-02 14:02:52 -0700564 s8 shadow_byte;
565
Alexander Potapenko55834c52016-05-20 16:59:11 -0700566 /* RCU slabs could be legally used after free within the RCU period */
567 if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
568 return false;
569
Andrey Ryabininb3cbd9b2016-08-02 14:02:52 -0700570 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
571 if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
Andrey Konovalovbef4eb32017-05-03 14:56:47 -0700572 kasan_report_double_free(cache, object,
573 __builtin_return_address(1));
Andrey Ryabininb3cbd9b2016-08-02 14:02:52 -0700574 return true;
Alexander Potapenko55834c52016-05-20 16:59:11 -0700575 }
Andrey Ryabininb3cbd9b2016-08-02 14:02:52 -0700576
577 kasan_poison_slab_free(cache, object);
578
579 if (unlikely(!(cache->flags & SLAB_KASAN)))
580 return false;
581
582 set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
583 quarantine_put(get_free_info(cache, object), cache);
584 return true;
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800585}
586
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700587void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
588 gfp_t flags)
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800589{
590 unsigned long redzone_start;
591 unsigned long redzone_end;
592
Andrey Ryabinin4b3ec5a2016-08-02 14:02:43 -0700593 if (gfpflags_allow_blocking(flags))
Alexander Potapenko55834c52016-05-20 16:59:11 -0700594 quarantine_reduce();
595
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800596 if (unlikely(object == NULL))
597 return;
598
599 redzone_start = round_up((unsigned long)(object + size),
600 KASAN_SHADOW_SCALE_SIZE);
601 redzone_end = round_up((unsigned long)object + cache->object_size,
602 KASAN_SHADOW_SCALE_SIZE);
603
604 kasan_unpoison_shadow(object, size);
605 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
606 KASAN_KMALLOC_REDZONE);
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700607
Andrey Ryabininb3cbd9b2016-08-02 14:02:52 -0700608 if (cache->flags & SLAB_KASAN)
609 set_track(&get_alloc_info(cache, object)->alloc_track, flags);
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800610}
611EXPORT_SYMBOL(kasan_kmalloc);
612
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700613void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800614{
615 struct page *page;
616 unsigned long redzone_start;
617 unsigned long redzone_end;
618
Andrey Ryabinin4b3ec5a2016-08-02 14:02:43 -0700619 if (gfpflags_allow_blocking(flags))
Alexander Potapenko55834c52016-05-20 16:59:11 -0700620 quarantine_reduce();
621
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800622 if (unlikely(ptr == NULL))
623 return;
624
625 page = virt_to_page(ptr);
626 redzone_start = round_up((unsigned long)(ptr + size),
627 KASAN_SHADOW_SCALE_SIZE);
628 redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
629
630 kasan_unpoison_shadow(ptr, size);
631 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
632 KASAN_PAGE_REDZONE);
633}
634
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700635void kasan_krealloc(const void *object, size_t size, gfp_t flags)
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800636{
637 struct page *page;
638
639 if (unlikely(object == ZERO_SIZE_PTR))
640 return;
641
642 page = virt_to_head_page(object);
643
644 if (unlikely(!PageSlab(page)))
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700645 kasan_kmalloc_large(object, size, flags);
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800646 else
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700647 kasan_kmalloc(page->slab_cache, object, size, flags);
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800648}
649
Andrey Ryabinin9b75a862016-06-24 14:49:34 -0700650void kasan_poison_kfree(void *ptr)
Andrey Ryabinin92393612015-04-15 16:15:05 -0700651{
652 struct page *page;
653
654 page = virt_to_head_page(ptr);
655
656 if (unlikely(!PageSlab(page)))
657 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
658 KASAN_FREE_PAGE);
659 else
Andrey Ryabinin9b75a862016-06-24 14:49:34 -0700660 kasan_poison_slab_free(page->slab_cache, ptr);
Andrey Ryabinin92393612015-04-15 16:15:05 -0700661}
662
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800663void kasan_kfree_large(const void *ptr)
664{
665 struct page *page = virt_to_page(ptr);
666
667 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
668 KASAN_FREE_PAGE);
669}
670
Andrey Ryabininbebf56a2015-02-13 14:40:17 -0800671int kasan_module_alloc(void *addr, size_t size)
672{
673 void *ret;
674 size_t shadow_size;
675 unsigned long shadow_start;
676
677 shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
678 shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
679 PAGE_SIZE);
680
681 if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
682 return -EINVAL;
683
684 ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
685 shadow_start + shadow_size,
686 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
687 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
688 __builtin_return_address(0));
Andrey Ryabinina5af5aa2015-03-12 16:26:11 -0700689
690 if (ret) {
691 find_vm_area(addr)->flags |= VM_KASAN;
Andrey Ryabinin45937252015-11-20 15:57:18 -0800692 kmemleak_ignore(ret);
Andrey Ryabinina5af5aa2015-03-12 16:26:11 -0700693 return 0;
694 }
695
696 return -ENOMEM;
Andrey Ryabininbebf56a2015-02-13 14:40:17 -0800697}
698
Andrey Ryabinina5af5aa2015-03-12 16:26:11 -0700699void kasan_free_shadow(const struct vm_struct *vm)
Andrey Ryabininbebf56a2015-02-13 14:40:17 -0800700{
Andrey Ryabinina5af5aa2015-03-12 16:26:11 -0700701 if (vm->flags & VM_KASAN)
702 vfree(kasan_mem_to_shadow(vm->addr));
Andrey Ryabininbebf56a2015-02-13 14:40:17 -0800703}
704
705static void register_global(struct kasan_global *global)
706{
707 size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
708
709 kasan_unpoison_shadow(global->beg, global->size);
710
711 kasan_poison_shadow(global->beg + aligned_size,
712 global->size_with_redzone - aligned_size,
713 KASAN_GLOBAL_REDZONE);
714}
715
716void __asan_register_globals(struct kasan_global *globals, size_t size)
717{
718 int i;
719
720 for (i = 0; i < size; i++)
721 register_global(&globals[i]);
722}
723EXPORT_SYMBOL(__asan_register_globals);
724
725void __asan_unregister_globals(struct kasan_global *globals, size_t size)
726{
727}
728EXPORT_SYMBOL(__asan_unregister_globals);
729
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700730#define DEFINE_ASAN_LOAD_STORE(size) \
731 void __asan_load##size(unsigned long addr) \
732 { \
733 check_memory_region_inline(addr, size, false, _RET_IP_);\
734 } \
735 EXPORT_SYMBOL(__asan_load##size); \
736 __alias(__asan_load##size) \
737 void __asan_load##size##_noabort(unsigned long); \
738 EXPORT_SYMBOL(__asan_load##size##_noabort); \
739 void __asan_store##size(unsigned long addr) \
740 { \
741 check_memory_region_inline(addr, size, true, _RET_IP_); \
742 } \
743 EXPORT_SYMBOL(__asan_store##size); \
744 __alias(__asan_store##size) \
745 void __asan_store##size##_noabort(unsigned long); \
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800746 EXPORT_SYMBOL(__asan_store##size##_noabort)
747
748DEFINE_ASAN_LOAD_STORE(1);
749DEFINE_ASAN_LOAD_STORE(2);
750DEFINE_ASAN_LOAD_STORE(4);
751DEFINE_ASAN_LOAD_STORE(8);
752DEFINE_ASAN_LOAD_STORE(16);
753
754void __asan_loadN(unsigned long addr, size_t size)
755{
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700756 check_memory_region(addr, size, false, _RET_IP_);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800757}
758EXPORT_SYMBOL(__asan_loadN);
759
760__alias(__asan_loadN)
761void __asan_loadN_noabort(unsigned long, size_t);
762EXPORT_SYMBOL(__asan_loadN_noabort);
763
764void __asan_storeN(unsigned long addr, size_t size)
765{
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700766 check_memory_region(addr, size, true, _RET_IP_);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800767}
768EXPORT_SYMBOL(__asan_storeN);
769
770__alias(__asan_storeN)
771void __asan_storeN_noabort(unsigned long, size_t);
772EXPORT_SYMBOL(__asan_storeN_noabort);
773
774/* to shut up compiler complaints */
775void __asan_handle_no_return(void) {}
776EXPORT_SYMBOL(__asan_handle_no_return);
Andrey Ryabinin786a8952015-02-13 14:39:21 -0800777
Dmitry Vyukov828347f2016-11-30 15:54:16 -0800778/* Emitted by compiler to poison large objects when they go out of scope. */
779void __asan_poison_stack_memory(const void *addr, size_t size)
780{
781 /*
782 * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded
783 * by redzones, so we simply round up size to simplify logic.
784 */
785 kasan_poison_shadow(addr, round_up(size, KASAN_SHADOW_SCALE_SIZE),
786 KASAN_USE_AFTER_SCOPE);
787}
788EXPORT_SYMBOL(__asan_poison_stack_memory);
789
790/* Emitted by compiler to unpoison large objects when they go into scope. */
791void __asan_unpoison_stack_memory(const void *addr, size_t size)
792{
793 kasan_unpoison_shadow(addr, size);
794}
795EXPORT_SYMBOL(__asan_unpoison_stack_memory);
796
Paul Lawrence9351dcd2018-02-06 15:36:11 -0800797/* Emitted by compiler to poison alloca()ed objects. */
798void __asan_alloca_poison(unsigned long addr, size_t size)
799{
800 size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
801 size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
802 rounded_up_size;
803 size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE);
804
805 const void *left_redzone = (const void *)(addr -
806 KASAN_ALLOCA_REDZONE_SIZE);
807 const void *right_redzone = (const void *)(addr + rounded_up_size);
808
809 WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));
810
811 kasan_unpoison_shadow((const void *)(addr + rounded_down_size),
812 size - rounded_down_size);
813 kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
814 KASAN_ALLOCA_LEFT);
815 kasan_poison_shadow(right_redzone,
816 padding_size + KASAN_ALLOCA_REDZONE_SIZE,
817 KASAN_ALLOCA_RIGHT);
818}
819EXPORT_SYMBOL(__asan_alloca_poison);
820
821/* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
822void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
823{
824 if (unlikely(!stack_top || stack_top > stack_bottom))
825 return;
826
827 kasan_unpoison_shadow(stack_top, stack_bottom - stack_top);
828}
829EXPORT_SYMBOL(__asan_allocas_unpoison);
830
Alexander Potapenkod6f27742018-02-06 15:36:20 -0800831/* Emitted by the compiler to [un]poison local variables. */
832#define DEFINE_ASAN_SET_SHADOW(byte) \
833 void __asan_set_shadow_##byte(const void *addr, size_t size) \
834 { \
835 __memset((void *)addr, 0x##byte, size); \
836 } \
837 EXPORT_SYMBOL(__asan_set_shadow_##byte)
838
839DEFINE_ASAN_SET_SHADOW(00);
840DEFINE_ASAN_SET_SHADOW(f1);
841DEFINE_ASAN_SET_SHADOW(f2);
842DEFINE_ASAN_SET_SHADOW(f3);
843DEFINE_ASAN_SET_SHADOW(f5);
844DEFINE_ASAN_SET_SHADOW(f8);
845
Andrey Ryabinin786a8952015-02-13 14:39:21 -0800846#ifdef CONFIG_MEMORY_HOTPLUG
847static int kasan_mem_notifier(struct notifier_block *nb,
848 unsigned long action, void *data)
849{
850 return (action == MEM_GOING_ONLINE) ? NOTIFY_BAD : NOTIFY_OK;
851}
852
853static int __init kasan_memhotplug_init(void)
854{
Shuah Khan91a4c272016-06-08 15:33:45 -0700855 pr_info("WARNING: KASAN doesn't support memory hot-add\n");
856 pr_info("Memory hot-add will be disabled\n");
Andrey Ryabinin786a8952015-02-13 14:39:21 -0800857
858 hotplug_memory_notifier(kasan_mem_notifier, 0);
859
860 return 0;
861}
862
David Hildenbrandb1fc8ec2018-05-25 14:48:11 -0700863core_initcall(kasan_memhotplug_init);
Andrey Ryabinin786a8952015-02-13 14:39:21 -0800864#endif