blob: 70c009741aab705c6bb344df1f1851d9582bc7a7 [file] [log] [blame]
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -08001/*
2 * This file contains shadow memory manipulation code.
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
Andrey Ryabinin2baf9e82015-08-14 15:35:13 -07005 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -08006 *
Andrey Konovalov5d0926e2015-11-05 18:51:12 -08007 * Some code borrowed from https://github.com/xairy/kasan-prototype by
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -08008 * Andrey Konovalov <adech.fo@gmail.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17#define DISABLE_BRANCH_PROFILING
18
19#include <linux/export.h>
Alexander Potapenkocd110162016-03-25 14:22:08 -070020#include <linux/interrupt.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080021#include <linux/init.h>
Alexander Potapenkocd110162016-03-25 14:22:08 -070022#include <linux/kasan.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080023#include <linux/kernel.h>
Andrey Ryabinin45937252015-11-20 15:57:18 -080024#include <linux/kmemleak.h>
Mark Rutlande3ae1162016-03-09 14:08:15 -080025#include <linux/linkage.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080026#include <linux/memblock.h>
Andrey Ryabinin786a8952015-02-13 14:39:21 -080027#include <linux/memory.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080028#include <linux/mm.h>
Andrey Ryabininbebf56a2015-02-13 14:40:17 -080029#include <linux/module.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080030#include <linux/printk.h>
31#include <linux/sched.h>
32#include <linux/slab.h>
33#include <linux/stacktrace.h>
34#include <linux/string.h>
35#include <linux/types.h>
Andrey Ryabinina5af5aa2015-03-12 16:26:11 -070036#include <linux/vmalloc.h>
Dmitry Vyukov9f7d4162016-10-14 16:07:23 +020037#include <linux/bug.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080038
39#include "kasan.h"
Andrey Ryabinin0316bec2015-02-13 14:39:42 -080040#include "../slab.h"
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080041
42/*
43 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
44 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
45 */
46static void kasan_poison_shadow(const void *address, size_t size, u8 value)
47{
48 void *shadow_start, *shadow_end;
49
50 shadow_start = kasan_mem_to_shadow(address);
51 shadow_end = kasan_mem_to_shadow(address + size);
52
53 memset(shadow_start, value, shadow_end - shadow_start);
54}
55
56void kasan_unpoison_shadow(const void *address, size_t size)
57{
58 kasan_poison_shadow(address, size, 0);
59
60 if (size & KASAN_SHADOW_MASK) {
61 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
62 *shadow = size & KASAN_SHADOW_MASK;
63 }
64}
65
Dmitry Vyukov9f7d4162016-10-14 16:07:23 +020066static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
Mark Rutlande3ae1162016-03-09 14:08:15 -080067{
68 void *base = task_stack_page(task);
69 size_t size = sp - base;
70
71 kasan_unpoison_shadow(base, size);
72}
73
74/* Unpoison the entire stack for a task. */
75void kasan_unpoison_task_stack(struct task_struct *task)
76{
77 __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
78}
79
80/* Unpoison the stack for the current task beyond a watermark sp value. */
Dmitry Vyukov9f7d4162016-10-14 16:07:23 +020081asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
Mark Rutlande3ae1162016-03-09 14:08:15 -080082{
Dmitry Vyukov9f7d4162016-10-14 16:07:23 +020083 __kasan_unpoison_stack(current, watermark);
84}
85
86/*
87 * Clear all poison for the region between the current SP and a provided
88 * watermark value, as is sometimes required prior to hand-crafted asm function
89 * returns in the middle of functions.
90 */
91void kasan_unpoison_stack_above_sp_to(const void *watermark)
92{
93 const void *sp = __builtin_frame_address(0);
94 size_t size = watermark - sp;
95
96 if (WARN_ON(sp > watermark))
97 return;
98 kasan_unpoison_shadow(sp, size);
Mark Rutlande3ae1162016-03-09 14:08:15 -080099}
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800100
101/*
102 * All functions below always inlined so compiler could
103 * perform better optimizations in each of __asan_loadX/__assn_storeX
104 * depending on memory access size X.
105 */
106
107static __always_inline bool memory_is_poisoned_1(unsigned long addr)
108{
109 s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
110
111 if (unlikely(shadow_value)) {
112 s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
113 return unlikely(last_accessible_byte >= shadow_value);
114 }
115
116 return false;
117}
118
119static __always_inline bool memory_is_poisoned_2(unsigned long addr)
120{
121 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
122
123 if (unlikely(*shadow_addr)) {
124 if (memory_is_poisoned_1(addr + 1))
125 return true;
126
Xishi Qiu10f70262015-11-05 18:51:21 -0800127 /*
128 * If single shadow byte covers 2-byte access, we don't
129 * need to do anything more. Otherwise, test the first
130 * shadow byte.
131 */
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800132 if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
133 return false;
134
135 return unlikely(*(u8 *)shadow_addr);
136 }
137
138 return false;
139}
140
141static __always_inline bool memory_is_poisoned_4(unsigned long addr)
142{
143 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
144
145 if (unlikely(*shadow_addr)) {
146 if (memory_is_poisoned_1(addr + 3))
147 return true;
148
Xishi Qiu10f70262015-11-05 18:51:21 -0800149 /*
150 * If single shadow byte covers 4-byte access, we don't
151 * need to do anything more. Otherwise, test the first
152 * shadow byte.
153 */
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800154 if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
155 return false;
156
157 return unlikely(*(u8 *)shadow_addr);
158 }
159
160 return false;
161}
162
163static __always_inline bool memory_is_poisoned_8(unsigned long addr)
164{
165 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
166
167 if (unlikely(*shadow_addr)) {
168 if (memory_is_poisoned_1(addr + 7))
169 return true;
170
Xishi Qiu10f70262015-11-05 18:51:21 -0800171 /*
172 * If single shadow byte covers 8-byte access, we don't
173 * need to do anything more. Otherwise, test the first
174 * shadow byte.
175 */
176 if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800177 return false;
178
179 return unlikely(*(u8 *)shadow_addr);
180 }
181
182 return false;
183}
184
185static __always_inline bool memory_is_poisoned_16(unsigned long addr)
186{
187 u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr);
188
189 if (unlikely(*shadow_addr)) {
190 u16 shadow_first_bytes = *(u16 *)shadow_addr;
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800191
192 if (unlikely(shadow_first_bytes))
193 return true;
194
Xishi Qiu10f70262015-11-05 18:51:21 -0800195 /*
196 * If two shadow bytes covers 16-byte access, we don't
197 * need to do anything more. Otherwise, test the last
198 * shadow byte.
199 */
200 if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800201 return false;
202
203 return memory_is_poisoned_1(addr + 15);
204 }
205
206 return false;
207}
208
209static __always_inline unsigned long bytes_is_zero(const u8 *start,
210 size_t size)
211{
212 while (size) {
213 if (unlikely(*start))
214 return (unsigned long)start;
215 start++;
216 size--;
217 }
218
219 return 0;
220}
221
222static __always_inline unsigned long memory_is_zero(const void *start,
223 const void *end)
224{
225 unsigned int words;
226 unsigned long ret;
227 unsigned int prefix = (unsigned long)start % 8;
228
229 if (end - start <= 16)
230 return bytes_is_zero(start, end - start);
231
232 if (prefix) {
233 prefix = 8 - prefix;
234 ret = bytes_is_zero(start, prefix);
235 if (unlikely(ret))
236 return ret;
237 start += prefix;
238 }
239
240 words = (end - start) / 8;
241 while (words) {
242 if (unlikely(*(u64 *)start))
243 return bytes_is_zero(start, 8);
244 start += 8;
245 words--;
246 }
247
248 return bytes_is_zero(start, (end - start) % 8);
249}
250
251static __always_inline bool memory_is_poisoned_n(unsigned long addr,
252 size_t size)
253{
254 unsigned long ret;
255
256 ret = memory_is_zero(kasan_mem_to_shadow((void *)addr),
257 kasan_mem_to_shadow((void *)addr + size - 1) + 1);
258
259 if (unlikely(ret)) {
260 unsigned long last_byte = addr + size - 1;
261 s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
262
263 if (unlikely(ret != (unsigned long)last_shadow ||
Wang Longe0d57712015-11-05 18:51:18 -0800264 ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800265 return true;
266 }
267 return false;
268}
269
270static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
271{
272 if (__builtin_constant_p(size)) {
273 switch (size) {
274 case 1:
275 return memory_is_poisoned_1(addr);
276 case 2:
277 return memory_is_poisoned_2(addr);
278 case 4:
279 return memory_is_poisoned_4(addr);
280 case 8:
281 return memory_is_poisoned_8(addr);
282 case 16:
283 return memory_is_poisoned_16(addr);
284 default:
285 BUILD_BUG();
286 }
287 }
288
289 return memory_is_poisoned_n(addr, size);
290}
291
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700292static __always_inline void check_memory_region_inline(unsigned long addr,
293 size_t size, bool write,
294 unsigned long ret_ip)
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800295{
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800296 if (unlikely(size == 0))
297 return;
298
299 if (unlikely((void *)addr <
300 kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700301 kasan_report(addr, size, write, ret_ip);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800302 return;
303 }
304
305 if (likely(!memory_is_poisoned(addr, size)))
306 return;
307
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700308 kasan_report(addr, size, write, ret_ip);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800309}
310
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700311static void check_memory_region(unsigned long addr,
312 size_t size, bool write,
313 unsigned long ret_ip)
314{
315 check_memory_region_inline(addr, size, write, ret_ip);
316}
Andrey Ryabinin393f2032015-02-13 14:39:56 -0800317
Andrey Ryabinin64f8eba2016-05-20 16:59:28 -0700318void kasan_check_read(const void *p, unsigned int size)
319{
320 check_memory_region((unsigned long)p, size, false, _RET_IP_);
321}
322EXPORT_SYMBOL(kasan_check_read);
323
324void kasan_check_write(const void *p, unsigned int size)
325{
326 check_memory_region((unsigned long)p, size, true, _RET_IP_);
327}
328EXPORT_SYMBOL(kasan_check_write);
329
Andrey Ryabinin393f2032015-02-13 14:39:56 -0800330#undef memset
331void *memset(void *addr, int c, size_t len)
332{
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700333 check_memory_region((unsigned long)addr, len, true, _RET_IP_);
Andrey Ryabinin393f2032015-02-13 14:39:56 -0800334
335 return __memset(addr, c, len);
336}
337
338#undef memmove
339void *memmove(void *dest, const void *src, size_t len)
340{
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700341 check_memory_region((unsigned long)src, len, false, _RET_IP_);
342 check_memory_region((unsigned long)dest, len, true, _RET_IP_);
Andrey Ryabinin393f2032015-02-13 14:39:56 -0800343
344 return __memmove(dest, src, len);
345}
346
347#undef memcpy
348void *memcpy(void *dest, const void *src, size_t len)
349{
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700350 check_memory_region((unsigned long)src, len, false, _RET_IP_);
351 check_memory_region((unsigned long)dest, len, true, _RET_IP_);
Andrey Ryabinin393f2032015-02-13 14:39:56 -0800352
353 return __memcpy(dest, src, len);
354}
355
Andrey Ryabininb8c73fc2015-02-13 14:39:28 -0800356void kasan_alloc_pages(struct page *page, unsigned int order)
357{
358 if (likely(!PageHighMem(page)))
359 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
360}
361
362void kasan_free_pages(struct page *page, unsigned int order)
363{
364 if (likely(!PageHighMem(page)))
365 kasan_poison_shadow(page_address(page),
366 PAGE_SIZE << order,
367 KASAN_FREE_PAGE);
368}
369
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700370/*
371 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
372 * For larger allocations larger redzones are used.
373 */
374static size_t optimal_redzone(size_t object_size)
375{
376 int rz =
377 object_size <= 64 - 16 ? 16 :
378 object_size <= 128 - 32 ? 32 :
379 object_size <= 512 - 64 ? 64 :
380 object_size <= 4096 - 128 ? 128 :
381 object_size <= (1 << 14) - 256 ? 256 :
382 object_size <= (1 << 15) - 512 ? 512 :
383 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
384 return rz;
385}
386
387void kasan_cache_create(struct kmem_cache *cache, size_t *size,
388 unsigned long *flags)
389{
390 int redzone_adjust;
Alexander Potapenko80a92012016-07-28 15:49:07 -0700391 int orig_size = *size;
392
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700393 /* Add alloc meta. */
394 cache->kasan_info.alloc_meta_offset = *size;
395 *size += sizeof(struct kasan_alloc_meta);
396
397 /* Add free meta. */
398 if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor ||
399 cache->object_size < sizeof(struct kasan_free_meta)) {
400 cache->kasan_info.free_meta_offset = *size;
401 *size += sizeof(struct kasan_free_meta);
402 }
403 redzone_adjust = optimal_redzone(cache->object_size) -
404 (*size - cache->object_size);
Alexander Potapenko80a92012016-07-28 15:49:07 -0700405
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700406 if (redzone_adjust > 0)
407 *size += redzone_adjust;
Alexander Potapenko80a92012016-07-28 15:49:07 -0700408
409 *size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size +
410 optimal_redzone(cache->object_size)));
411
412 /*
413 * If the metadata doesn't fit, don't enable KASAN at all.
414 */
415 if (*size <= cache->kasan_info.alloc_meta_offset ||
416 *size <= cache->kasan_info.free_meta_offset) {
417 cache->kasan_info.alloc_meta_offset = 0;
418 cache->kasan_info.free_meta_offset = 0;
419 *size = orig_size;
420 return;
421 }
422
423 *flags |= SLAB_KASAN;
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700424}
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700425
Alexander Potapenko55834c52016-05-20 16:59:11 -0700426void kasan_cache_shrink(struct kmem_cache *cache)
427{
428 quarantine_remove_cache(cache);
429}
430
431void kasan_cache_destroy(struct kmem_cache *cache)
432{
433 quarantine_remove_cache(cache);
434}
435
Alexander Potapenko80a92012016-07-28 15:49:07 -0700436size_t kasan_metadata_size(struct kmem_cache *cache)
437{
438 return (cache->kasan_info.alloc_meta_offset ?
439 sizeof(struct kasan_alloc_meta) : 0) +
440 (cache->kasan_info.free_meta_offset ?
441 sizeof(struct kasan_free_meta) : 0);
442}
443
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800444void kasan_poison_slab(struct page *page)
445{
446 kasan_poison_shadow(page_address(page),
447 PAGE_SIZE << compound_order(page),
448 KASAN_KMALLOC_REDZONE);
449}
450
451void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
452{
453 kasan_unpoison_shadow(object, cache->object_size);
454}
455
456void kasan_poison_object_data(struct kmem_cache *cache, void *object)
457{
458 kasan_poison_shadow(object,
459 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
460 KASAN_KMALLOC_REDZONE);
461}
462
Alexander Potapenkocd110162016-03-25 14:22:08 -0700463static inline int in_irqentry_text(unsigned long ptr)
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700464{
Alexander Potapenkocd110162016-03-25 14:22:08 -0700465 return (ptr >= (unsigned long)&__irqentry_text_start &&
466 ptr < (unsigned long)&__irqentry_text_end) ||
467 (ptr >= (unsigned long)&__softirqentry_text_start &&
468 ptr < (unsigned long)&__softirqentry_text_end);
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700469}
470
Alexander Potapenkocd110162016-03-25 14:22:08 -0700471static inline void filter_irq_stacks(struct stack_trace *trace)
472{
473 int i;
474
475 if (!trace->nr_entries)
476 return;
477 for (i = 0; i < trace->nr_entries; i++)
478 if (in_irqentry_text(trace->entries[i])) {
479 /* Include the irqentry function into the stack. */
480 trace->nr_entries = i + 1;
481 break;
482 }
483}
484
485static inline depot_stack_handle_t save_stack(gfp_t flags)
486{
487 unsigned long entries[KASAN_STACK_DEPTH];
488 struct stack_trace trace = {
489 .nr_entries = 0,
490 .entries = entries,
491 .max_entries = KASAN_STACK_DEPTH,
492 .skip = 0
493 };
494
495 save_stack_trace(&trace);
496 filter_irq_stacks(&trace);
497 if (trace.nr_entries != 0 &&
498 trace.entries[trace.nr_entries-1] == ULONG_MAX)
499 trace.nr_entries--;
500
501 return depot_save_stack(&trace, flags);
502}
503
504static inline void set_track(struct kasan_track *track, gfp_t flags)
505{
506 track->pid = current->pid;
507 track->stack = save_stack(flags);
508}
509
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700510struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
511 const void *object)
512{
Alexander Potapenkocd110162016-03-25 14:22:08 -0700513 BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700514 return (void *)object + cache->kasan_info.alloc_meta_offset;
515}
516
517struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
518 const void *object)
519{
Alexander Potapenkocd110162016-03-25 14:22:08 -0700520 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700521 return (void *)object + cache->kasan_info.free_meta_offset;
522}
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700523
Andrey Ryabininb3cbd9b2016-08-02 14:02:52 -0700524void kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
525{
526 struct kasan_alloc_meta *alloc_info;
527
528 if (!(cache->flags & SLAB_KASAN))
529 return;
530
531 alloc_info = get_alloc_info(cache, object);
532 __memset(alloc_info, 0, sizeof(*alloc_info));
533}
534
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700535void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800536{
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700537 kasan_kmalloc(cache, object, cache->object_size, flags);
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800538}
539
Andrey Ryabinin9b75a862016-06-24 14:49:34 -0700540static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800541{
542 unsigned long size = cache->object_size;
543 unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
544
545 /* RCU slabs could be legally used after free within the RCU period */
546 if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
547 return;
548
Alexander Potapenko55834c52016-05-20 16:59:11 -0700549 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
550}
551
552bool kasan_slab_free(struct kmem_cache *cache, void *object)
553{
Andrey Ryabininb3cbd9b2016-08-02 14:02:52 -0700554 s8 shadow_byte;
555
Alexander Potapenko55834c52016-05-20 16:59:11 -0700556 /* RCU slabs could be legally used after free within the RCU period */
557 if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
558 return false;
559
Andrey Ryabininb3cbd9b2016-08-02 14:02:52 -0700560 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
561 if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
Andrey Ryabinin7e088972016-08-02 14:02:55 -0700562 kasan_report_double_free(cache, object, shadow_byte);
Andrey Ryabininb3cbd9b2016-08-02 14:02:52 -0700563 return true;
Alexander Potapenko55834c52016-05-20 16:59:11 -0700564 }
Andrey Ryabininb3cbd9b2016-08-02 14:02:52 -0700565
566 kasan_poison_slab_free(cache, object);
567
568 if (unlikely(!(cache->flags & SLAB_KASAN)))
569 return false;
570
571 set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
572 quarantine_put(get_free_info(cache, object), cache);
573 return true;
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800574}
575
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700576void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
577 gfp_t flags)
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800578{
579 unsigned long redzone_start;
580 unsigned long redzone_end;
581
Andrey Ryabinin4b3ec5a2016-08-02 14:02:43 -0700582 if (gfpflags_allow_blocking(flags))
Alexander Potapenko55834c52016-05-20 16:59:11 -0700583 quarantine_reduce();
584
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800585 if (unlikely(object == NULL))
586 return;
587
588 redzone_start = round_up((unsigned long)(object + size),
589 KASAN_SHADOW_SCALE_SIZE);
590 redzone_end = round_up((unsigned long)object + cache->object_size,
591 KASAN_SHADOW_SCALE_SIZE);
592
593 kasan_unpoison_shadow(object, size);
594 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
595 KASAN_KMALLOC_REDZONE);
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700596
Andrey Ryabininb3cbd9b2016-08-02 14:02:52 -0700597 if (cache->flags & SLAB_KASAN)
598 set_track(&get_alloc_info(cache, object)->alloc_track, flags);
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800599}
600EXPORT_SYMBOL(kasan_kmalloc);
601
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700602void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800603{
604 struct page *page;
605 unsigned long redzone_start;
606 unsigned long redzone_end;
607
Andrey Ryabinin4b3ec5a2016-08-02 14:02:43 -0700608 if (gfpflags_allow_blocking(flags))
Alexander Potapenko55834c52016-05-20 16:59:11 -0700609 quarantine_reduce();
610
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800611 if (unlikely(ptr == NULL))
612 return;
613
614 page = virt_to_page(ptr);
615 redzone_start = round_up((unsigned long)(ptr + size),
616 KASAN_SHADOW_SCALE_SIZE);
617 redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
618
619 kasan_unpoison_shadow(ptr, size);
620 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
621 KASAN_PAGE_REDZONE);
622}
623
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700624void kasan_krealloc(const void *object, size_t size, gfp_t flags)
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800625{
626 struct page *page;
627
628 if (unlikely(object == ZERO_SIZE_PTR))
629 return;
630
631 page = virt_to_head_page(object);
632
633 if (unlikely(!PageSlab(page)))
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700634 kasan_kmalloc_large(object, size, flags);
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800635 else
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700636 kasan_kmalloc(page->slab_cache, object, size, flags);
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800637}
638
Andrey Ryabinin9b75a862016-06-24 14:49:34 -0700639void kasan_poison_kfree(void *ptr)
Andrey Ryabinin92393612015-04-15 16:15:05 -0700640{
641 struct page *page;
642
643 page = virt_to_head_page(ptr);
644
645 if (unlikely(!PageSlab(page)))
646 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
647 KASAN_FREE_PAGE);
648 else
Andrey Ryabinin9b75a862016-06-24 14:49:34 -0700649 kasan_poison_slab_free(page->slab_cache, ptr);
Andrey Ryabinin92393612015-04-15 16:15:05 -0700650}
651
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800652void kasan_kfree_large(const void *ptr)
653{
654 struct page *page = virt_to_page(ptr);
655
656 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
657 KASAN_FREE_PAGE);
658}
659
Andrey Ryabininbebf56a2015-02-13 14:40:17 -0800660int kasan_module_alloc(void *addr, size_t size)
661{
662 void *ret;
663 size_t shadow_size;
664 unsigned long shadow_start;
665
666 shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
667 shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
668 PAGE_SIZE);
669
670 if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
671 return -EINVAL;
672
673 ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
674 shadow_start + shadow_size,
675 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
676 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
677 __builtin_return_address(0));
Andrey Ryabinina5af5aa2015-03-12 16:26:11 -0700678
679 if (ret) {
680 find_vm_area(addr)->flags |= VM_KASAN;
Andrey Ryabinin45937252015-11-20 15:57:18 -0800681 kmemleak_ignore(ret);
Andrey Ryabinina5af5aa2015-03-12 16:26:11 -0700682 return 0;
683 }
684
685 return -ENOMEM;
Andrey Ryabininbebf56a2015-02-13 14:40:17 -0800686}
687
Andrey Ryabinina5af5aa2015-03-12 16:26:11 -0700688void kasan_free_shadow(const struct vm_struct *vm)
Andrey Ryabininbebf56a2015-02-13 14:40:17 -0800689{
Andrey Ryabinina5af5aa2015-03-12 16:26:11 -0700690 if (vm->flags & VM_KASAN)
691 vfree(kasan_mem_to_shadow(vm->addr));
Andrey Ryabininbebf56a2015-02-13 14:40:17 -0800692}
693
694static void register_global(struct kasan_global *global)
695{
696 size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
697
698 kasan_unpoison_shadow(global->beg, global->size);
699
700 kasan_poison_shadow(global->beg + aligned_size,
701 global->size_with_redzone - aligned_size,
702 KASAN_GLOBAL_REDZONE);
703}
704
705void __asan_register_globals(struct kasan_global *globals, size_t size)
706{
707 int i;
708
709 for (i = 0; i < size; i++)
710 register_global(&globals[i]);
711}
712EXPORT_SYMBOL(__asan_register_globals);
713
714void __asan_unregister_globals(struct kasan_global *globals, size_t size)
715{
716}
717EXPORT_SYMBOL(__asan_unregister_globals);
718
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700719#define DEFINE_ASAN_LOAD_STORE(size) \
720 void __asan_load##size(unsigned long addr) \
721 { \
722 check_memory_region_inline(addr, size, false, _RET_IP_);\
723 } \
724 EXPORT_SYMBOL(__asan_load##size); \
725 __alias(__asan_load##size) \
726 void __asan_load##size##_noabort(unsigned long); \
727 EXPORT_SYMBOL(__asan_load##size##_noabort); \
728 void __asan_store##size(unsigned long addr) \
729 { \
730 check_memory_region_inline(addr, size, true, _RET_IP_); \
731 } \
732 EXPORT_SYMBOL(__asan_store##size); \
733 __alias(__asan_store##size) \
734 void __asan_store##size##_noabort(unsigned long); \
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800735 EXPORT_SYMBOL(__asan_store##size##_noabort)
736
737DEFINE_ASAN_LOAD_STORE(1);
738DEFINE_ASAN_LOAD_STORE(2);
739DEFINE_ASAN_LOAD_STORE(4);
740DEFINE_ASAN_LOAD_STORE(8);
741DEFINE_ASAN_LOAD_STORE(16);
742
743void __asan_loadN(unsigned long addr, size_t size)
744{
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700745 check_memory_region(addr, size, false, _RET_IP_);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800746}
747EXPORT_SYMBOL(__asan_loadN);
748
749__alias(__asan_loadN)
750void __asan_loadN_noabort(unsigned long, size_t);
751EXPORT_SYMBOL(__asan_loadN_noabort);
752
753void __asan_storeN(unsigned long addr, size_t size)
754{
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700755 check_memory_region(addr, size, true, _RET_IP_);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800756}
757EXPORT_SYMBOL(__asan_storeN);
758
759__alias(__asan_storeN)
760void __asan_storeN_noabort(unsigned long, size_t);
761EXPORT_SYMBOL(__asan_storeN_noabort);
762
763/* to shut up compiler complaints */
764void __asan_handle_no_return(void) {}
765EXPORT_SYMBOL(__asan_handle_no_return);
Andrey Ryabinin786a8952015-02-13 14:39:21 -0800766
767#ifdef CONFIG_MEMORY_HOTPLUG
768static int kasan_mem_notifier(struct notifier_block *nb,
769 unsigned long action, void *data)
770{
771 return (action == MEM_GOING_ONLINE) ? NOTIFY_BAD : NOTIFY_OK;
772}
773
774static int __init kasan_memhotplug_init(void)
775{
Shuah Khan91a4c272016-06-08 15:33:45 -0700776 pr_info("WARNING: KASAN doesn't support memory hot-add\n");
777 pr_info("Memory hot-add will be disabled\n");
Andrey Ryabinin786a8952015-02-13 14:39:21 -0800778
779 hotplug_memory_notifier(kasan_mem_notifier, 0);
780
781 return 0;
782}
783
784module_init(kasan_memhotplug_init);
785#endif