blob: f185455b34065d27efa2b6a90c9dd2c1dfe92ae9 [file] [log] [blame]
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -08001/*
2 * This file contains shadow memory manipulation code.
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
Andrey Ryabinin2baf9e82015-08-14 15:35:13 -07005 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -08006 *
Andrey Konovalov5d0926e2015-11-05 18:51:12 -08007 * Some code borrowed from https://github.com/xairy/kasan-prototype by
Andrey Konovalov5f21f3a2018-02-06 15:36:41 -08008 * Andrey Konovalov <andreyknvl@gmail.com>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -08009 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17#define DISABLE_BRANCH_PROFILING
18
19#include <linux/export.h>
Alexander Potapenkocd110162016-03-25 14:22:08 -070020#include <linux/interrupt.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080021#include <linux/init.h>
Alexander Potapenkocd110162016-03-25 14:22:08 -070022#include <linux/kasan.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080023#include <linux/kernel.h>
Andrey Ryabinin45937252015-11-20 15:57:18 -080024#include <linux/kmemleak.h>
Mark Rutlande3ae1162016-03-09 14:08:15 -080025#include <linux/linkage.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080026#include <linux/memblock.h>
Andrey Ryabinin786a8952015-02-13 14:39:21 -080027#include <linux/memory.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080028#include <linux/mm.h>
Andrey Ryabininbebf56a2015-02-13 14:40:17 -080029#include <linux/module.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080030#include <linux/printk.h>
31#include <linux/sched.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +010032#include <linux/sched/task_stack.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080033#include <linux/slab.h>
34#include <linux/stacktrace.h>
35#include <linux/string.h>
36#include <linux/types.h>
Andrey Ryabinina5af5aa2015-03-12 16:26:11 -070037#include <linux/vmalloc.h>
Dmitry Vyukov9f7d4162016-10-14 16:07:23 +020038#include <linux/bug.h>
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080039
40#include "kasan.h"
Andrey Ryabinin0316bec2015-02-13 14:39:42 -080041#include "../slab.h"
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080042
Ingo Molnaraf8601a2017-02-03 09:57:00 +010043void kasan_enable_current(void)
44{
45 current->kasan_depth++;
46}
47
48void kasan_disable_current(void)
49{
50 current->kasan_depth--;
51}
52
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -080053/*
54 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
55 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
56 */
57static void kasan_poison_shadow(const void *address, size_t size, u8 value)
58{
59 void *shadow_start, *shadow_end;
60
61 shadow_start = kasan_mem_to_shadow(address);
62 shadow_end = kasan_mem_to_shadow(address + size);
63
64 memset(shadow_start, value, shadow_end - shadow_start);
65}
66
67void kasan_unpoison_shadow(const void *address, size_t size)
68{
69 kasan_poison_shadow(address, size, 0);
70
71 if (size & KASAN_SHADOW_MASK) {
72 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
73 *shadow = size & KASAN_SHADOW_MASK;
74 }
75}
76
Dmitry Vyukov9f7d4162016-10-14 16:07:23 +020077static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
Mark Rutlande3ae1162016-03-09 14:08:15 -080078{
79 void *base = task_stack_page(task);
80 size_t size = sp - base;
81
82 kasan_unpoison_shadow(base, size);
83}
84
85/* Unpoison the entire stack for a task. */
86void kasan_unpoison_task_stack(struct task_struct *task)
87{
88 __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
89}
90
91/* Unpoison the stack for the current task beyond a watermark sp value. */
Dmitry Vyukov9f7d4162016-10-14 16:07:23 +020092asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
Mark Rutlande3ae1162016-03-09 14:08:15 -080093{
Josh Poimboeufb53f40d2016-12-02 11:42:21 -060094 /*
95 * Calculate the task stack base address. Avoid using 'current'
96 * because this function is called by early resume code which hasn't
97 * yet set up the percpu register (%gs).
98 */
99 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
100
101 kasan_unpoison_shadow(base, watermark - base);
Dmitry Vyukov9f7d4162016-10-14 16:07:23 +0200102}
103
104/*
105 * Clear all poison for the region between the current SP and a provided
106 * watermark value, as is sometimes required prior to hand-crafted asm function
107 * returns in the middle of functions.
108 */
109void kasan_unpoison_stack_above_sp_to(const void *watermark)
110{
111 const void *sp = __builtin_frame_address(0);
112 size_t size = watermark - sp;
113
114 if (WARN_ON(sp > watermark))
115 return;
116 kasan_unpoison_shadow(sp, size);
Mark Rutlande3ae1162016-03-09 14:08:15 -0800117}
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800118
119/*
120 * All functions below always inlined so compiler could
121 * perform better optimizations in each of __asan_loadX/__assn_storeX
122 * depending on memory access size X.
123 */
124
125static __always_inline bool memory_is_poisoned_1(unsigned long addr)
126{
127 s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
128
129 if (unlikely(shadow_value)) {
130 s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
131 return unlikely(last_accessible_byte >= shadow_value);
132 }
133
134 return false;
135}
136
Andrey Ryabininc634d802017-07-10 15:50:24 -0700137static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
138 unsigned long size)
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800139{
Andrey Ryabininc634d802017-07-10 15:50:24 -0700140 u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800141
Andrey Ryabininc634d802017-07-10 15:50:24 -0700142 /*
143 * Access crosses 8(shadow size)-byte boundary. Such access maps
144 * into 2 shadow bytes, so we need to check them both.
145 */
146 if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
147 return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800148
Andrey Ryabininc634d802017-07-10 15:50:24 -0700149 return memory_is_poisoned_1(addr + size - 1);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800150}
151
152static __always_inline bool memory_is_poisoned_16(unsigned long addr)
153{
Andrey Ryabininc634d802017-07-10 15:50:24 -0700154 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800155
Andrey Ryabininc634d802017-07-10 15:50:24 -0700156 /* Unaligned 16-bytes access maps into 3 shadow bytes. */
157 if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
158 return *shadow_addr || memory_is_poisoned_1(addr + 15);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800159
Andrey Ryabininc634d802017-07-10 15:50:24 -0700160 return *shadow_addr;
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800161}
162
Joonsoo Kimf5bd62c2017-07-10 15:50:37 -0700163static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800164 size_t size)
165{
166 while (size) {
167 if (unlikely(*start))
168 return (unsigned long)start;
169 start++;
170 size--;
171 }
172
173 return 0;
174}
175
Joonsoo Kimf5bd62c2017-07-10 15:50:37 -0700176static __always_inline unsigned long memory_is_nonzero(const void *start,
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800177 const void *end)
178{
179 unsigned int words;
180 unsigned long ret;
181 unsigned int prefix = (unsigned long)start % 8;
182
183 if (end - start <= 16)
Joonsoo Kimf5bd62c2017-07-10 15:50:37 -0700184 return bytes_is_nonzero(start, end - start);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800185
186 if (prefix) {
187 prefix = 8 - prefix;
Joonsoo Kimf5bd62c2017-07-10 15:50:37 -0700188 ret = bytes_is_nonzero(start, prefix);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800189 if (unlikely(ret))
190 return ret;
191 start += prefix;
192 }
193
194 words = (end - start) / 8;
195 while (words) {
196 if (unlikely(*(u64 *)start))
Joonsoo Kimf5bd62c2017-07-10 15:50:37 -0700197 return bytes_is_nonzero(start, 8);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800198 start += 8;
199 words--;
200 }
201
Joonsoo Kimf5bd62c2017-07-10 15:50:37 -0700202 return bytes_is_nonzero(start, (end - start) % 8);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800203}
204
205static __always_inline bool memory_is_poisoned_n(unsigned long addr,
206 size_t size)
207{
208 unsigned long ret;
209
Joonsoo Kimf5bd62c2017-07-10 15:50:37 -0700210 ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr),
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800211 kasan_mem_to_shadow((void *)addr + size - 1) + 1);
212
213 if (unlikely(ret)) {
214 unsigned long last_byte = addr + size - 1;
215 s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
216
217 if (unlikely(ret != (unsigned long)last_shadow ||
Wang Longe0d57712015-11-05 18:51:18 -0800218 ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800219 return true;
220 }
221 return false;
222}
223
224static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
225{
226 if (__builtin_constant_p(size)) {
227 switch (size) {
228 case 1:
229 return memory_is_poisoned_1(addr);
230 case 2:
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800231 case 4:
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800232 case 8:
Andrey Ryabininc634d802017-07-10 15:50:24 -0700233 return memory_is_poisoned_2_4_8(addr, size);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800234 case 16:
235 return memory_is_poisoned_16(addr);
236 default:
237 BUILD_BUG();
238 }
239 }
240
241 return memory_is_poisoned_n(addr, size);
242}
243
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700244static __always_inline void check_memory_region_inline(unsigned long addr,
245 size_t size, bool write,
246 unsigned long ret_ip)
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800247{
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800248 if (unlikely(size == 0))
249 return;
250
251 if (unlikely((void *)addr <
252 kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700253 kasan_report(addr, size, write, ret_ip);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800254 return;
255 }
256
257 if (likely(!memory_is_poisoned(addr, size)))
258 return;
259
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700260 kasan_report(addr, size, write, ret_ip);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800261}
262
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700263static void check_memory_region(unsigned long addr,
264 size_t size, bool write,
265 unsigned long ret_ip)
266{
267 check_memory_region_inline(addr, size, write, ret_ip);
268}
Andrey Ryabinin393f2032015-02-13 14:39:56 -0800269
Dmitry Vyukovf06e8c52017-06-22 16:14:17 +0200270void kasan_check_read(const volatile void *p, unsigned int size)
Andrey Ryabinin64f8eba2016-05-20 16:59:28 -0700271{
272 check_memory_region((unsigned long)p, size, false, _RET_IP_);
273}
274EXPORT_SYMBOL(kasan_check_read);
275
Dmitry Vyukovf06e8c52017-06-22 16:14:17 +0200276void kasan_check_write(const volatile void *p, unsigned int size)
Andrey Ryabinin64f8eba2016-05-20 16:59:28 -0700277{
278 check_memory_region((unsigned long)p, size, true, _RET_IP_);
279}
280EXPORT_SYMBOL(kasan_check_write);
281
Andrey Ryabinin393f2032015-02-13 14:39:56 -0800282#undef memset
283void *memset(void *addr, int c, size_t len)
284{
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700285 check_memory_region((unsigned long)addr, len, true, _RET_IP_);
Andrey Ryabinin393f2032015-02-13 14:39:56 -0800286
287 return __memset(addr, c, len);
288}
289
290#undef memmove
291void *memmove(void *dest, const void *src, size_t len)
292{
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700293 check_memory_region((unsigned long)src, len, false, _RET_IP_);
294 check_memory_region((unsigned long)dest, len, true, _RET_IP_);
Andrey Ryabinin393f2032015-02-13 14:39:56 -0800295
296 return __memmove(dest, src, len);
297}
298
299#undef memcpy
300void *memcpy(void *dest, const void *src, size_t len)
301{
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700302 check_memory_region((unsigned long)src, len, false, _RET_IP_);
303 check_memory_region((unsigned long)dest, len, true, _RET_IP_);
Andrey Ryabinin393f2032015-02-13 14:39:56 -0800304
305 return __memcpy(dest, src, len);
306}
307
Andrey Ryabininb8c73fc2015-02-13 14:39:28 -0800308void kasan_alloc_pages(struct page *page, unsigned int order)
309{
310 if (likely(!PageHighMem(page)))
311 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
312}
313
314void kasan_free_pages(struct page *page, unsigned int order)
315{
316 if (likely(!PageHighMem(page)))
317 kasan_poison_shadow(page_address(page),
318 PAGE_SIZE << order,
319 KASAN_FREE_PAGE);
320}
321
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700322/*
323 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
324 * For larger allocations larger redzones are used.
325 */
Alexey Dobriyanbe4a7982018-04-05 16:21:28 -0700326static unsigned int optimal_redzone(unsigned int object_size)
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700327{
Alexey Dobriyanbe4a7982018-04-05 16:21:28 -0700328 return
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700329 object_size <= 64 - 16 ? 16 :
330 object_size <= 128 - 32 ? 32 :
331 object_size <= 512 - 64 ? 64 :
332 object_size <= 4096 - 128 ? 128 :
333 object_size <= (1 << 14) - 256 ? 256 :
334 object_size <= (1 << 15) - 512 ? 512 :
335 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700336}
337
Alexey Dobriyanbe4a7982018-04-05 16:21:28 -0700338void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
Alexey Dobriyand50112e2017-11-15 17:32:18 -0800339 slab_flags_t *flags)
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700340{
Alexey Dobriyanbe4a7982018-04-05 16:21:28 -0700341 unsigned int orig_size = *size;
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700342 int redzone_adjust;
Alexander Potapenko80a92012016-07-28 15:49:07 -0700343
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700344 /* Add alloc meta. */
345 cache->kasan_info.alloc_meta_offset = *size;
346 *size += sizeof(struct kasan_alloc_meta);
347
348 /* Add free meta. */
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800349 if (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700350 cache->object_size < sizeof(struct kasan_free_meta)) {
351 cache->kasan_info.free_meta_offset = *size;
352 *size += sizeof(struct kasan_free_meta);
353 }
354 redzone_adjust = optimal_redzone(cache->object_size) -
355 (*size - cache->object_size);
Alexander Potapenko80a92012016-07-28 15:49:07 -0700356
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700357 if (redzone_adjust > 0)
358 *size += redzone_adjust;
Alexander Potapenko80a92012016-07-28 15:49:07 -0700359
Alexey Dobriyanbe4a7982018-04-05 16:21:28 -0700360 *size = min_t(unsigned int, KMALLOC_MAX_SIZE,
361 max(*size, cache->object_size +
Alexander Potapenko80a92012016-07-28 15:49:07 -0700362 optimal_redzone(cache->object_size)));
363
364 /*
365 * If the metadata doesn't fit, don't enable KASAN at all.
366 */
367 if (*size <= cache->kasan_info.alloc_meta_offset ||
368 *size <= cache->kasan_info.free_meta_offset) {
369 cache->kasan_info.alloc_meta_offset = 0;
370 cache->kasan_info.free_meta_offset = 0;
371 *size = orig_size;
372 return;
373 }
374
375 *flags |= SLAB_KASAN;
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700376}
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700377
Alexander Potapenko55834c52016-05-20 16:59:11 -0700378void kasan_cache_shrink(struct kmem_cache *cache)
379{
380 quarantine_remove_cache(cache);
381}
382
Greg Thelenf9fa1d92017-02-24 15:00:05 -0800383void kasan_cache_shutdown(struct kmem_cache *cache)
Alexander Potapenko55834c52016-05-20 16:59:11 -0700384{
Shakeel Buttf9e13c02018-04-05 16:21:57 -0700385 if (!__kmem_cache_empty(cache))
386 quarantine_remove_cache(cache);
Alexander Potapenko55834c52016-05-20 16:59:11 -0700387}
388
Alexander Potapenko80a92012016-07-28 15:49:07 -0700389size_t kasan_metadata_size(struct kmem_cache *cache)
390{
391 return (cache->kasan_info.alloc_meta_offset ?
392 sizeof(struct kasan_alloc_meta) : 0) +
393 (cache->kasan_info.free_meta_offset ?
394 sizeof(struct kasan_free_meta) : 0);
395}
396
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800397void kasan_poison_slab(struct page *page)
398{
399 kasan_poison_shadow(page_address(page),
400 PAGE_SIZE << compound_order(page),
401 KASAN_KMALLOC_REDZONE);
402}
403
404void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
405{
406 kasan_unpoison_shadow(object, cache->object_size);
407}
408
409void kasan_poison_object_data(struct kmem_cache *cache, void *object)
410{
411 kasan_poison_shadow(object,
412 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
413 KASAN_KMALLOC_REDZONE);
414}
415
Alexander Potapenkocd110162016-03-25 14:22:08 -0700416static inline int in_irqentry_text(unsigned long ptr)
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700417{
Alexander Potapenkocd110162016-03-25 14:22:08 -0700418 return (ptr >= (unsigned long)&__irqentry_text_start &&
419 ptr < (unsigned long)&__irqentry_text_end) ||
420 (ptr >= (unsigned long)&__softirqentry_text_start &&
421 ptr < (unsigned long)&__softirqentry_text_end);
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700422}
423
Alexander Potapenkocd110162016-03-25 14:22:08 -0700424static inline void filter_irq_stacks(struct stack_trace *trace)
425{
426 int i;
427
428 if (!trace->nr_entries)
429 return;
430 for (i = 0; i < trace->nr_entries; i++)
431 if (in_irqentry_text(trace->entries[i])) {
432 /* Include the irqentry function into the stack. */
433 trace->nr_entries = i + 1;
434 break;
435 }
436}
437
438static inline depot_stack_handle_t save_stack(gfp_t flags)
439{
440 unsigned long entries[KASAN_STACK_DEPTH];
441 struct stack_trace trace = {
442 .nr_entries = 0,
443 .entries = entries,
444 .max_entries = KASAN_STACK_DEPTH,
445 .skip = 0
446 };
447
448 save_stack_trace(&trace);
449 filter_irq_stacks(&trace);
450 if (trace.nr_entries != 0 &&
451 trace.entries[trace.nr_entries-1] == ULONG_MAX)
452 trace.nr_entries--;
453
454 return depot_save_stack(&trace, flags);
455}
456
457static inline void set_track(struct kasan_track *track, gfp_t flags)
458{
459 track->pid = current->pid;
460 track->stack = save_stack(flags);
461}
462
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700463struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
464 const void *object)
465{
Alexander Potapenkocd110162016-03-25 14:22:08 -0700466 BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700467 return (void *)object + cache->kasan_info.alloc_meta_offset;
468}
469
470struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
471 const void *object)
472{
Alexander Potapenkocd110162016-03-25 14:22:08 -0700473 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700474 return (void *)object + cache->kasan_info.free_meta_offset;
475}
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700476
Andrey Ryabininb3cbd9b2016-08-02 14:02:52 -0700477void kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
478{
479 struct kasan_alloc_meta *alloc_info;
480
481 if (!(cache->flags & SLAB_KASAN))
482 return;
483
484 alloc_info = get_alloc_info(cache, object);
485 __memset(alloc_info, 0, sizeof(*alloc_info));
486}
487
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700488void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800489{
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700490 kasan_kmalloc(cache, object, cache->object_size, flags);
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800491}
492
Dmitry Vyukov1db0e0f2018-02-06 15:36:34 -0800493static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
494 unsigned long ip, bool quarantine)
Alexander Potapenko55834c52016-05-20 16:59:11 -0700495{
Andrey Ryabininb3cbd9b2016-08-02 14:02:52 -0700496 s8 shadow_byte;
Dmitry Vyukov1db0e0f2018-02-06 15:36:34 -0800497 unsigned long rounded_up_size;
Andrey Ryabininb3cbd9b2016-08-02 14:02:52 -0700498
Dmitry Vyukovb1d57282018-02-06 15:36:37 -0800499 if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
500 object)) {
501 kasan_report_invalid_free(object, ip);
502 return true;
503 }
504
Alexander Potapenko55834c52016-05-20 16:59:11 -0700505 /* RCU slabs could be legally used after free within the RCU period */
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800506 if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
Alexander Potapenko55834c52016-05-20 16:59:11 -0700507 return false;
508
Andrey Ryabininb3cbd9b2016-08-02 14:02:52 -0700509 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
510 if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
Dmitry Vyukovee3ce772018-02-06 15:36:27 -0800511 kasan_report_invalid_free(object, ip);
Andrey Ryabininb3cbd9b2016-08-02 14:02:52 -0700512 return true;
Alexander Potapenko55834c52016-05-20 16:59:11 -0700513 }
Andrey Ryabininb3cbd9b2016-08-02 14:02:52 -0700514
Dmitry Vyukov1db0e0f2018-02-06 15:36:34 -0800515 rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
516 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
Andrey Ryabininb3cbd9b2016-08-02 14:02:52 -0700517
Dmitry Vyukov1db0e0f2018-02-06 15:36:34 -0800518 if (!quarantine || unlikely(!(cache->flags & SLAB_KASAN)))
Andrey Ryabininb3cbd9b2016-08-02 14:02:52 -0700519 return false;
520
521 set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
522 quarantine_put(get_free_info(cache, object), cache);
523 return true;
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800524}
525
Dmitry Vyukov1db0e0f2018-02-06 15:36:34 -0800526bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
527{
528 return __kasan_slab_free(cache, object, ip, true);
529}
530
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700531void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
532 gfp_t flags)
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800533{
534 unsigned long redzone_start;
535 unsigned long redzone_end;
536
Andrey Ryabinin4b3ec5a2016-08-02 14:02:43 -0700537 if (gfpflags_allow_blocking(flags))
Alexander Potapenko55834c52016-05-20 16:59:11 -0700538 quarantine_reduce();
539
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800540 if (unlikely(object == NULL))
541 return;
542
543 redzone_start = round_up((unsigned long)(object + size),
544 KASAN_SHADOW_SCALE_SIZE);
545 redzone_end = round_up((unsigned long)object + cache->object_size,
546 KASAN_SHADOW_SCALE_SIZE);
547
548 kasan_unpoison_shadow(object, size);
549 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
550 KASAN_KMALLOC_REDZONE);
Alexander Potapenko7ed2f9e2016-03-25 14:21:59 -0700551
Andrey Ryabininb3cbd9b2016-08-02 14:02:52 -0700552 if (cache->flags & SLAB_KASAN)
553 set_track(&get_alloc_info(cache, object)->alloc_track, flags);
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800554}
555EXPORT_SYMBOL(kasan_kmalloc);
556
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700557void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800558{
559 struct page *page;
560 unsigned long redzone_start;
561 unsigned long redzone_end;
562
Andrey Ryabinin4b3ec5a2016-08-02 14:02:43 -0700563 if (gfpflags_allow_blocking(flags))
Alexander Potapenko55834c52016-05-20 16:59:11 -0700564 quarantine_reduce();
565
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800566 if (unlikely(ptr == NULL))
567 return;
568
569 page = virt_to_page(ptr);
570 redzone_start = round_up((unsigned long)(ptr + size),
571 KASAN_SHADOW_SCALE_SIZE);
572 redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
573
574 kasan_unpoison_shadow(ptr, size);
575 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
576 KASAN_PAGE_REDZONE);
577}
578
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700579void kasan_krealloc(const void *object, size_t size, gfp_t flags)
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800580{
581 struct page *page;
582
583 if (unlikely(object == ZERO_SIZE_PTR))
584 return;
585
586 page = virt_to_head_page(object);
587
588 if (unlikely(!PageSlab(page)))
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700589 kasan_kmalloc_large(object, size, flags);
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800590 else
Alexander Potapenko505f5dc2016-03-25 14:22:02 -0700591 kasan_kmalloc(page->slab_cache, object, size, flags);
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800592}
593
Dmitry Vyukov6860f632018-02-06 15:36:30 -0800594void kasan_poison_kfree(void *ptr, unsigned long ip)
Andrey Ryabinin92393612015-04-15 16:15:05 -0700595{
596 struct page *page;
597
598 page = virt_to_head_page(ptr);
599
Dmitry Vyukov6860f632018-02-06 15:36:30 -0800600 if (unlikely(!PageSlab(page))) {
601 if (ptr != page_address(page)) {
602 kasan_report_invalid_free(ptr, ip);
603 return;
604 }
Andrey Ryabinin92393612015-04-15 16:15:05 -0700605 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
606 KASAN_FREE_PAGE);
Dmitry Vyukov6860f632018-02-06 15:36:30 -0800607 } else {
Dmitry Vyukov1db0e0f2018-02-06 15:36:34 -0800608 __kasan_slab_free(page->slab_cache, ptr, ip, false);
Dmitry Vyukov6860f632018-02-06 15:36:30 -0800609 }
Andrey Ryabinin92393612015-04-15 16:15:05 -0700610}
611
Dmitry Vyukovee3ce772018-02-06 15:36:27 -0800612void kasan_kfree_large(void *ptr, unsigned long ip)
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800613{
Dmitry Vyukov47adccc2018-02-06 15:36:23 -0800614 if (ptr != page_address(virt_to_head_page(ptr)))
Dmitry Vyukovee3ce772018-02-06 15:36:27 -0800615 kasan_report_invalid_free(ptr, ip);
Dmitry Vyukov47adccc2018-02-06 15:36:23 -0800616 /* The object will be poisoned by page_alloc. */
Andrey Ryabinin0316bec2015-02-13 14:39:42 -0800617}
618
Andrey Ryabininbebf56a2015-02-13 14:40:17 -0800619int kasan_module_alloc(void *addr, size_t size)
620{
621 void *ret;
622 size_t shadow_size;
623 unsigned long shadow_start;
624
625 shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
626 shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
627 PAGE_SIZE);
628
629 if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
630 return -EINVAL;
631
632 ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
633 shadow_start + shadow_size,
Michal Hocko19809c22017-05-08 15:57:44 -0700634 GFP_KERNEL | __GFP_ZERO,
Andrey Ryabininbebf56a2015-02-13 14:40:17 -0800635 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
636 __builtin_return_address(0));
Andrey Ryabinina5af5aa2015-03-12 16:26:11 -0700637
638 if (ret) {
639 find_vm_area(addr)->flags |= VM_KASAN;
Andrey Ryabinin45937252015-11-20 15:57:18 -0800640 kmemleak_ignore(ret);
Andrey Ryabinina5af5aa2015-03-12 16:26:11 -0700641 return 0;
642 }
643
644 return -ENOMEM;
Andrey Ryabininbebf56a2015-02-13 14:40:17 -0800645}
646
Andrey Ryabinina5af5aa2015-03-12 16:26:11 -0700647void kasan_free_shadow(const struct vm_struct *vm)
Andrey Ryabininbebf56a2015-02-13 14:40:17 -0800648{
Andrey Ryabinina5af5aa2015-03-12 16:26:11 -0700649 if (vm->flags & VM_KASAN)
650 vfree(kasan_mem_to_shadow(vm->addr));
Andrey Ryabininbebf56a2015-02-13 14:40:17 -0800651}
652
653static void register_global(struct kasan_global *global)
654{
655 size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
656
657 kasan_unpoison_shadow(global->beg, global->size);
658
659 kasan_poison_shadow(global->beg + aligned_size,
660 global->size_with_redzone - aligned_size,
661 KASAN_GLOBAL_REDZONE);
662}
663
664void __asan_register_globals(struct kasan_global *globals, size_t size)
665{
666 int i;
667
668 for (i = 0; i < size; i++)
669 register_global(&globals[i]);
670}
671EXPORT_SYMBOL(__asan_register_globals);
672
673void __asan_unregister_globals(struct kasan_global *globals, size_t size)
674{
675}
676EXPORT_SYMBOL(__asan_unregister_globals);
677
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700678#define DEFINE_ASAN_LOAD_STORE(size) \
679 void __asan_load##size(unsigned long addr) \
680 { \
681 check_memory_region_inline(addr, size, false, _RET_IP_);\
682 } \
683 EXPORT_SYMBOL(__asan_load##size); \
684 __alias(__asan_load##size) \
685 void __asan_load##size##_noabort(unsigned long); \
686 EXPORT_SYMBOL(__asan_load##size##_noabort); \
687 void __asan_store##size(unsigned long addr) \
688 { \
689 check_memory_region_inline(addr, size, true, _RET_IP_); \
690 } \
691 EXPORT_SYMBOL(__asan_store##size); \
692 __alias(__asan_store##size) \
693 void __asan_store##size##_noabort(unsigned long); \
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800694 EXPORT_SYMBOL(__asan_store##size##_noabort)
695
696DEFINE_ASAN_LOAD_STORE(1);
697DEFINE_ASAN_LOAD_STORE(2);
698DEFINE_ASAN_LOAD_STORE(4);
699DEFINE_ASAN_LOAD_STORE(8);
700DEFINE_ASAN_LOAD_STORE(16);
701
702void __asan_loadN(unsigned long addr, size_t size)
703{
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700704 check_memory_region(addr, size, false, _RET_IP_);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800705}
706EXPORT_SYMBOL(__asan_loadN);
707
708__alias(__asan_loadN)
709void __asan_loadN_noabort(unsigned long, size_t);
710EXPORT_SYMBOL(__asan_loadN_noabort);
711
712void __asan_storeN(unsigned long addr, size_t size)
713{
Andrey Ryabinin936bb4b2016-05-20 16:59:20 -0700714 check_memory_region(addr, size, true, _RET_IP_);
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -0800715}
716EXPORT_SYMBOL(__asan_storeN);
717
718__alias(__asan_storeN)
719void __asan_storeN_noabort(unsigned long, size_t);
720EXPORT_SYMBOL(__asan_storeN_noabort);
721
722/* to shut up compiler complaints */
723void __asan_handle_no_return(void) {}
724EXPORT_SYMBOL(__asan_handle_no_return);
Andrey Ryabinin786a8952015-02-13 14:39:21 -0800725
Dmitry Vyukov828347f2016-11-30 15:54:16 -0800726/* Emitted by compiler to poison large objects when they go out of scope. */
727void __asan_poison_stack_memory(const void *addr, size_t size)
728{
729 /*
730 * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded
731 * by redzones, so we simply round up size to simplify logic.
732 */
733 kasan_poison_shadow(addr, round_up(size, KASAN_SHADOW_SCALE_SIZE),
734 KASAN_USE_AFTER_SCOPE);
735}
736EXPORT_SYMBOL(__asan_poison_stack_memory);
737
738/* Emitted by compiler to unpoison large objects when they go into scope. */
739void __asan_unpoison_stack_memory(const void *addr, size_t size)
740{
741 kasan_unpoison_shadow(addr, size);
742}
743EXPORT_SYMBOL(__asan_unpoison_stack_memory);
744
Paul Lawrence342061e2018-02-06 15:36:11 -0800745/* Emitted by compiler to poison alloca()ed objects. */
746void __asan_alloca_poison(unsigned long addr, size_t size)
747{
748 size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
749 size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
750 rounded_up_size;
751 size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE);
752
753 const void *left_redzone = (const void *)(addr -
754 KASAN_ALLOCA_REDZONE_SIZE);
755 const void *right_redzone = (const void *)(addr + rounded_up_size);
756
757 WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));
758
759 kasan_unpoison_shadow((const void *)(addr + rounded_down_size),
760 size - rounded_down_size);
761 kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
762 KASAN_ALLOCA_LEFT);
763 kasan_poison_shadow(right_redzone,
764 padding_size + KASAN_ALLOCA_REDZONE_SIZE,
765 KASAN_ALLOCA_RIGHT);
766}
767EXPORT_SYMBOL(__asan_alloca_poison);
768
769/* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
770void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
771{
772 if (unlikely(!stack_top || stack_top > stack_bottom))
773 return;
774
775 kasan_unpoison_shadow(stack_top, stack_bottom - stack_top);
776}
777EXPORT_SYMBOL(__asan_allocas_unpoison);
778
Alexander Potapenkod3215992018-02-06 15:36:20 -0800779/* Emitted by the compiler to [un]poison local variables. */
780#define DEFINE_ASAN_SET_SHADOW(byte) \
781 void __asan_set_shadow_##byte(const void *addr, size_t size) \
782 { \
783 __memset((void *)addr, 0x##byte, size); \
784 } \
785 EXPORT_SYMBOL(__asan_set_shadow_##byte)
786
787DEFINE_ASAN_SET_SHADOW(00);
788DEFINE_ASAN_SET_SHADOW(f1);
789DEFINE_ASAN_SET_SHADOW(f2);
790DEFINE_ASAN_SET_SHADOW(f3);
791DEFINE_ASAN_SET_SHADOW(f5);
792DEFINE_ASAN_SET_SHADOW(f8);
793
Andrey Ryabinin786a8952015-02-13 14:39:21 -0800794#ifdef CONFIG_MEMORY_HOTPLUG
Andrey Ryabinin0f901dc2018-05-25 14:47:38 -0700795static bool shadow_mapped(unsigned long addr)
796{
797 pgd_t *pgd = pgd_offset_k(addr);
798 p4d_t *p4d;
799 pud_t *pud;
800 pmd_t *pmd;
801 pte_t *pte;
802
803 if (pgd_none(*pgd))
804 return false;
805 p4d = p4d_offset(pgd, addr);
806 if (p4d_none(*p4d))
807 return false;
808 pud = pud_offset(p4d, addr);
809 if (pud_none(*pud))
810 return false;
811
812 /*
813 * We can't use pud_large() or pud_huge(), the first one is
814 * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse
815 * pud_bad(), if pud is bad then it's bad because it's huge.
816 */
817 if (pud_bad(*pud))
818 return true;
819 pmd = pmd_offset(pud, addr);
820 if (pmd_none(*pmd))
821 return false;
822
823 if (pmd_bad(*pmd))
824 return true;
825 pte = pte_offset_kernel(pmd, addr);
826 return !pte_none(*pte);
827}
828
Andrey Ryabininfa69b592017-07-10 15:50:34 -0700829static int __meminit kasan_mem_notifier(struct notifier_block *nb,
Andrey Ryabinin786a8952015-02-13 14:39:21 -0800830 unsigned long action, void *data)
831{
Andrey Ryabininfa69b592017-07-10 15:50:34 -0700832 struct memory_notify *mem_data = data;
833 unsigned long nr_shadow_pages, start_kaddr, shadow_start;
834 unsigned long shadow_end, shadow_size;
835
836 nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
837 start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
838 shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
839 shadow_size = nr_shadow_pages << PAGE_SHIFT;
840 shadow_end = shadow_start + shadow_size;
841
842 if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) ||
843 WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT)))
844 return NOTIFY_BAD;
845
846 switch (action) {
847 case MEM_GOING_ONLINE: {
848 void *ret;
849
Andrey Ryabinin0f901dc2018-05-25 14:47:38 -0700850 /*
851 * If shadow is mapped already than it must have been mapped
852 * during the boot. This could happen if we onlining previously
853 * offlined memory.
854 */
855 if (shadow_mapped(shadow_start))
856 return NOTIFY_OK;
857
Andrey Ryabininfa69b592017-07-10 15:50:34 -0700858 ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
859 shadow_end, GFP_KERNEL,
860 PAGE_KERNEL, VM_NO_GUARD,
861 pfn_to_nid(mem_data->start_pfn),
862 __builtin_return_address(0));
863 if (!ret)
864 return NOTIFY_BAD;
865
866 kmemleak_ignore(ret);
867 return NOTIFY_OK;
868 }
David Hildenbranded1596f2018-05-25 14:48:08 -0700869 case MEM_CANCEL_ONLINE:
Andrey Ryabinin0f901dc2018-05-25 14:47:38 -0700870 case MEM_OFFLINE: {
871 struct vm_struct *vm;
872
873 /*
874 * shadow_start was either mapped during boot by kasan_init()
875 * or during memory online by __vmalloc_node_range().
876 * In the latter case we can use vfree() to free shadow.
877 * Non-NULL result of the find_vm_area() will tell us if
878 * that was the second case.
879 *
880 * Currently it's not possible to free shadow mapped
881 * during boot by kasan_init(). It's because the code
882 * to do that hasn't been written yet. So we'll just
883 * leak the memory.
884 */
885 vm = find_vm_area((void *)shadow_start);
886 if (vm)
887 vfree((void *)shadow_start);
888 }
Andrey Ryabininfa69b592017-07-10 15:50:34 -0700889 }
890
891 return NOTIFY_OK;
Andrey Ryabinin786a8952015-02-13 14:39:21 -0800892}
893
894static int __init kasan_memhotplug_init(void)
895{
Andrey Ryabinin786a8952015-02-13 14:39:21 -0800896 hotplug_memory_notifier(kasan_mem_notifier, 0);
897
898 return 0;
899}
900
David Hildenbrand3f195972018-05-25 14:48:11 -0700901core_initcall(kasan_memhotplug_init);
Andrey Ryabinin786a8952015-02-13 14:39:21 -0800902#endif