Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 1 | /* |
| 2 | * This file contains shadow memory manipulation code. |
| 3 | * |
| 4 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. |
Andrey Ryabinin | 2baf9e8 | 2015-08-14 15:35:13 -0700 | [diff] [blame] | 5 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 6 | * |
Andrey Konovalov | 5d0926e | 2015-11-05 18:51:12 -0800 | [diff] [blame] | 7 | * Some code borrowed from https://github.com/xairy/kasan-prototype by |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 8 | * Andrey Konovalov <adech.fo@gmail.com> |
| 9 | * |
| 10 | * This program is free software; you can redistribute it and/or modify |
| 11 | * it under the terms of the GNU General Public License version 2 as |
| 12 | * published by the Free Software Foundation. |
| 13 | * |
| 14 | */ |
| 15 | |
| 16 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 17 | #define DISABLE_BRANCH_PROFILING |
| 18 | |
| 19 | #include <linux/export.h> |
Alexander Potapenko | cd11016 | 2016-03-25 14:22:08 -0700 | [diff] [blame] | 20 | #include <linux/interrupt.h> |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 21 | #include <linux/init.h> |
Alexander Potapenko | cd11016 | 2016-03-25 14:22:08 -0700 | [diff] [blame] | 22 | #include <linux/kasan.h> |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 23 | #include <linux/kernel.h> |
Andrey Ryabinin | 4593725 | 2015-11-20 15:57:18 -0800 | [diff] [blame] | 24 | #include <linux/kmemleak.h> |
Mark Rutland | e3ae116 | 2016-03-09 14:08:15 -0800 | [diff] [blame] | 25 | #include <linux/linkage.h> |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 26 | #include <linux/memblock.h> |
Andrey Ryabinin | 786a895 | 2015-02-13 14:39:21 -0800 | [diff] [blame] | 27 | #include <linux/memory.h> |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 28 | #include <linux/mm.h> |
Andrey Ryabinin | bebf56a | 2015-02-13 14:40:17 -0800 | [diff] [blame] | 29 | #include <linux/module.h> |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 30 | #include <linux/printk.h> |
| 31 | #include <linux/sched.h> |
| 32 | #include <linux/slab.h> |
| 33 | #include <linux/stacktrace.h> |
| 34 | #include <linux/string.h> |
| 35 | #include <linux/types.h> |
Andrey Ryabinin | a5af5aa | 2015-03-12 16:26:11 -0700 | [diff] [blame] | 36 | #include <linux/vmalloc.h> |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 37 | |
| 38 | #include "kasan.h" |
Andrey Ryabinin | 0316bec | 2015-02-13 14:39:42 -0800 | [diff] [blame] | 39 | #include "../slab.h" |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 40 | |
| 41 | /* |
| 42 | * Poisons the shadow memory for 'size' bytes starting from 'addr'. |
| 43 | * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE. |
| 44 | */ |
| 45 | static void kasan_poison_shadow(const void *address, size_t size, u8 value) |
| 46 | { |
| 47 | void *shadow_start, *shadow_end; |
| 48 | |
| 49 | shadow_start = kasan_mem_to_shadow(address); |
| 50 | shadow_end = kasan_mem_to_shadow(address + size); |
| 51 | |
| 52 | memset(shadow_start, value, shadow_end - shadow_start); |
| 53 | } |
| 54 | |
| 55 | void kasan_unpoison_shadow(const void *address, size_t size) |
| 56 | { |
| 57 | kasan_poison_shadow(address, size, 0); |
| 58 | |
| 59 | if (size & KASAN_SHADOW_MASK) { |
| 60 | u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size); |
| 61 | *shadow = size & KASAN_SHADOW_MASK; |
| 62 | } |
| 63 | } |
| 64 | |
Mark Rutland | e3ae116 | 2016-03-09 14:08:15 -0800 | [diff] [blame] | 65 | static void __kasan_unpoison_stack(struct task_struct *task, void *sp) |
| 66 | { |
| 67 | void *base = task_stack_page(task); |
| 68 | size_t size = sp - base; |
| 69 | |
| 70 | kasan_unpoison_shadow(base, size); |
| 71 | } |
| 72 | |
| 73 | /* Unpoison the entire stack for a task. */ |
| 74 | void kasan_unpoison_task_stack(struct task_struct *task) |
| 75 | { |
| 76 | __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE); |
| 77 | } |
| 78 | |
| 79 | /* Unpoison the stack for the current task beyond a watermark sp value. */ |
| 80 | asmlinkage void kasan_unpoison_remaining_stack(void *sp) |
| 81 | { |
| 82 | __kasan_unpoison_stack(current, sp); |
| 83 | } |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 84 | |
| 85 | /* |
| 86 | * All functions below always inlined so compiler could |
| 87 | * perform better optimizations in each of __asan_loadX/__assn_storeX |
| 88 | * depending on memory access size X. |
| 89 | */ |
| 90 | |
| 91 | static __always_inline bool memory_is_poisoned_1(unsigned long addr) |
| 92 | { |
| 93 | s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr); |
| 94 | |
| 95 | if (unlikely(shadow_value)) { |
| 96 | s8 last_accessible_byte = addr & KASAN_SHADOW_MASK; |
| 97 | return unlikely(last_accessible_byte >= shadow_value); |
| 98 | } |
| 99 | |
| 100 | return false; |
| 101 | } |
| 102 | |
| 103 | static __always_inline bool memory_is_poisoned_2(unsigned long addr) |
| 104 | { |
| 105 | u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); |
| 106 | |
| 107 | if (unlikely(*shadow_addr)) { |
| 108 | if (memory_is_poisoned_1(addr + 1)) |
| 109 | return true; |
| 110 | |
Xishi Qiu | 10f7026 | 2015-11-05 18:51:21 -0800 | [diff] [blame] | 111 | /* |
| 112 | * If single shadow byte covers 2-byte access, we don't |
| 113 | * need to do anything more. Otherwise, test the first |
| 114 | * shadow byte. |
| 115 | */ |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 116 | if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0)) |
| 117 | return false; |
| 118 | |
| 119 | return unlikely(*(u8 *)shadow_addr); |
| 120 | } |
| 121 | |
| 122 | return false; |
| 123 | } |
| 124 | |
| 125 | static __always_inline bool memory_is_poisoned_4(unsigned long addr) |
| 126 | { |
| 127 | u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); |
| 128 | |
| 129 | if (unlikely(*shadow_addr)) { |
| 130 | if (memory_is_poisoned_1(addr + 3)) |
| 131 | return true; |
| 132 | |
Xishi Qiu | 10f7026 | 2015-11-05 18:51:21 -0800 | [diff] [blame] | 133 | /* |
| 134 | * If single shadow byte covers 4-byte access, we don't |
| 135 | * need to do anything more. Otherwise, test the first |
| 136 | * shadow byte. |
| 137 | */ |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 138 | if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3)) |
| 139 | return false; |
| 140 | |
| 141 | return unlikely(*(u8 *)shadow_addr); |
| 142 | } |
| 143 | |
| 144 | return false; |
| 145 | } |
| 146 | |
| 147 | static __always_inline bool memory_is_poisoned_8(unsigned long addr) |
| 148 | { |
| 149 | u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); |
| 150 | |
| 151 | if (unlikely(*shadow_addr)) { |
| 152 | if (memory_is_poisoned_1(addr + 7)) |
| 153 | return true; |
| 154 | |
Xishi Qiu | 10f7026 | 2015-11-05 18:51:21 -0800 | [diff] [blame] | 155 | /* |
| 156 | * If single shadow byte covers 8-byte access, we don't |
| 157 | * need to do anything more. Otherwise, test the first |
| 158 | * shadow byte. |
| 159 | */ |
| 160 | if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE))) |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 161 | return false; |
| 162 | |
| 163 | return unlikely(*(u8 *)shadow_addr); |
| 164 | } |
| 165 | |
| 166 | return false; |
| 167 | } |
| 168 | |
| 169 | static __always_inline bool memory_is_poisoned_16(unsigned long addr) |
| 170 | { |
| 171 | u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr); |
| 172 | |
| 173 | if (unlikely(*shadow_addr)) { |
| 174 | u16 shadow_first_bytes = *(u16 *)shadow_addr; |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 175 | |
| 176 | if (unlikely(shadow_first_bytes)) |
| 177 | return true; |
| 178 | |
Xishi Qiu | 10f7026 | 2015-11-05 18:51:21 -0800 | [diff] [blame] | 179 | /* |
| 180 | * If two shadow bytes covers 16-byte access, we don't |
| 181 | * need to do anything more. Otherwise, test the last |
| 182 | * shadow byte. |
| 183 | */ |
| 184 | if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE))) |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 185 | return false; |
| 186 | |
| 187 | return memory_is_poisoned_1(addr + 15); |
| 188 | } |
| 189 | |
| 190 | return false; |
| 191 | } |
| 192 | |
| 193 | static __always_inline unsigned long bytes_is_zero(const u8 *start, |
| 194 | size_t size) |
| 195 | { |
| 196 | while (size) { |
| 197 | if (unlikely(*start)) |
| 198 | return (unsigned long)start; |
| 199 | start++; |
| 200 | size--; |
| 201 | } |
| 202 | |
| 203 | return 0; |
| 204 | } |
| 205 | |
| 206 | static __always_inline unsigned long memory_is_zero(const void *start, |
| 207 | const void *end) |
| 208 | { |
| 209 | unsigned int words; |
| 210 | unsigned long ret; |
| 211 | unsigned int prefix = (unsigned long)start % 8; |
| 212 | |
| 213 | if (end - start <= 16) |
| 214 | return bytes_is_zero(start, end - start); |
| 215 | |
| 216 | if (prefix) { |
| 217 | prefix = 8 - prefix; |
| 218 | ret = bytes_is_zero(start, prefix); |
| 219 | if (unlikely(ret)) |
| 220 | return ret; |
| 221 | start += prefix; |
| 222 | } |
| 223 | |
| 224 | words = (end - start) / 8; |
| 225 | while (words) { |
| 226 | if (unlikely(*(u64 *)start)) |
| 227 | return bytes_is_zero(start, 8); |
| 228 | start += 8; |
| 229 | words--; |
| 230 | } |
| 231 | |
| 232 | return bytes_is_zero(start, (end - start) % 8); |
| 233 | } |
| 234 | |
| 235 | static __always_inline bool memory_is_poisoned_n(unsigned long addr, |
| 236 | size_t size) |
| 237 | { |
| 238 | unsigned long ret; |
| 239 | |
| 240 | ret = memory_is_zero(kasan_mem_to_shadow((void *)addr), |
| 241 | kasan_mem_to_shadow((void *)addr + size - 1) + 1); |
| 242 | |
| 243 | if (unlikely(ret)) { |
| 244 | unsigned long last_byte = addr + size - 1; |
| 245 | s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte); |
| 246 | |
| 247 | if (unlikely(ret != (unsigned long)last_shadow || |
Wang Long | e0d5771 | 2015-11-05 18:51:18 -0800 | [diff] [blame] | 248 | ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow))) |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 249 | return true; |
| 250 | } |
| 251 | return false; |
| 252 | } |
| 253 | |
| 254 | static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size) |
| 255 | { |
| 256 | if (__builtin_constant_p(size)) { |
| 257 | switch (size) { |
| 258 | case 1: |
| 259 | return memory_is_poisoned_1(addr); |
| 260 | case 2: |
| 261 | return memory_is_poisoned_2(addr); |
| 262 | case 4: |
| 263 | return memory_is_poisoned_4(addr); |
| 264 | case 8: |
| 265 | return memory_is_poisoned_8(addr); |
| 266 | case 16: |
| 267 | return memory_is_poisoned_16(addr); |
| 268 | default: |
| 269 | BUILD_BUG(); |
| 270 | } |
| 271 | } |
| 272 | |
| 273 | return memory_is_poisoned_n(addr, size); |
| 274 | } |
| 275 | |
| 276 | |
| 277 | static __always_inline void check_memory_region(unsigned long addr, |
| 278 | size_t size, bool write) |
| 279 | { |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 280 | if (unlikely(size == 0)) |
| 281 | return; |
| 282 | |
| 283 | if (unlikely((void *)addr < |
| 284 | kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) { |
Andrey Konovalov | e9121076 | 2015-11-05 18:50:55 -0800 | [diff] [blame] | 285 | kasan_report(addr, size, write, _RET_IP_); |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 286 | return; |
| 287 | } |
| 288 | |
| 289 | if (likely(!memory_is_poisoned(addr, size))) |
| 290 | return; |
| 291 | |
| 292 | kasan_report(addr, size, write, _RET_IP_); |
| 293 | } |
| 294 | |
Andrey Ryabinin | 393f203 | 2015-02-13 14:39:56 -0800 | [diff] [blame] | 295 | void __asan_loadN(unsigned long addr, size_t size); |
| 296 | void __asan_storeN(unsigned long addr, size_t size); |
| 297 | |
| 298 | #undef memset |
| 299 | void *memset(void *addr, int c, size_t len) |
| 300 | { |
| 301 | __asan_storeN((unsigned long)addr, len); |
| 302 | |
| 303 | return __memset(addr, c, len); |
| 304 | } |
| 305 | |
| 306 | #undef memmove |
| 307 | void *memmove(void *dest, const void *src, size_t len) |
| 308 | { |
| 309 | __asan_loadN((unsigned long)src, len); |
| 310 | __asan_storeN((unsigned long)dest, len); |
| 311 | |
| 312 | return __memmove(dest, src, len); |
| 313 | } |
| 314 | |
| 315 | #undef memcpy |
| 316 | void *memcpy(void *dest, const void *src, size_t len) |
| 317 | { |
| 318 | __asan_loadN((unsigned long)src, len); |
| 319 | __asan_storeN((unsigned long)dest, len); |
| 320 | |
| 321 | return __memcpy(dest, src, len); |
| 322 | } |
| 323 | |
Andrey Ryabinin | b8c73fc | 2015-02-13 14:39:28 -0800 | [diff] [blame] | 324 | void kasan_alloc_pages(struct page *page, unsigned int order) |
| 325 | { |
| 326 | if (likely(!PageHighMem(page))) |
| 327 | kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); |
| 328 | } |
| 329 | |
| 330 | void kasan_free_pages(struct page *page, unsigned int order) |
| 331 | { |
| 332 | if (likely(!PageHighMem(page))) |
| 333 | kasan_poison_shadow(page_address(page), |
| 334 | PAGE_SIZE << order, |
| 335 | KASAN_FREE_PAGE); |
| 336 | } |
| 337 | |
Alexander Potapenko | 7ed2f9e | 2016-03-25 14:21:59 -0700 | [diff] [blame] | 338 | #ifdef CONFIG_SLAB |
| 339 | /* |
| 340 | * Adaptive redzone policy taken from the userspace AddressSanitizer runtime. |
| 341 | * For larger allocations larger redzones are used. |
| 342 | */ |
| 343 | static size_t optimal_redzone(size_t object_size) |
| 344 | { |
| 345 | int rz = |
| 346 | object_size <= 64 - 16 ? 16 : |
| 347 | object_size <= 128 - 32 ? 32 : |
| 348 | object_size <= 512 - 64 ? 64 : |
| 349 | object_size <= 4096 - 128 ? 128 : |
| 350 | object_size <= (1 << 14) - 256 ? 256 : |
| 351 | object_size <= (1 << 15) - 512 ? 512 : |
| 352 | object_size <= (1 << 16) - 1024 ? 1024 : 2048; |
| 353 | return rz; |
| 354 | } |
| 355 | |
| 356 | void kasan_cache_create(struct kmem_cache *cache, size_t *size, |
| 357 | unsigned long *flags) |
| 358 | { |
| 359 | int redzone_adjust; |
| 360 | /* Make sure the adjusted size is still less than |
| 361 | * KMALLOC_MAX_CACHE_SIZE. |
| 362 | * TODO: this check is only useful for SLAB, but not SLUB. We'll need |
| 363 | * to skip it for SLUB when it starts using kasan_cache_create(). |
| 364 | */ |
| 365 | if (*size > KMALLOC_MAX_CACHE_SIZE - |
| 366 | sizeof(struct kasan_alloc_meta) - |
| 367 | sizeof(struct kasan_free_meta)) |
| 368 | return; |
| 369 | *flags |= SLAB_KASAN; |
| 370 | /* Add alloc meta. */ |
| 371 | cache->kasan_info.alloc_meta_offset = *size; |
| 372 | *size += sizeof(struct kasan_alloc_meta); |
| 373 | |
| 374 | /* Add free meta. */ |
| 375 | if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor || |
| 376 | cache->object_size < sizeof(struct kasan_free_meta)) { |
| 377 | cache->kasan_info.free_meta_offset = *size; |
| 378 | *size += sizeof(struct kasan_free_meta); |
| 379 | } |
| 380 | redzone_adjust = optimal_redzone(cache->object_size) - |
| 381 | (*size - cache->object_size); |
| 382 | if (redzone_adjust > 0) |
| 383 | *size += redzone_adjust; |
| 384 | *size = min(KMALLOC_MAX_CACHE_SIZE, |
| 385 | max(*size, |
| 386 | cache->object_size + |
| 387 | optimal_redzone(cache->object_size))); |
| 388 | } |
| 389 | #endif |
| 390 | |
Andrey Ryabinin | 0316bec | 2015-02-13 14:39:42 -0800 | [diff] [blame] | 391 | void kasan_poison_slab(struct page *page) |
| 392 | { |
| 393 | kasan_poison_shadow(page_address(page), |
| 394 | PAGE_SIZE << compound_order(page), |
| 395 | KASAN_KMALLOC_REDZONE); |
| 396 | } |
| 397 | |
| 398 | void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) |
| 399 | { |
| 400 | kasan_unpoison_shadow(object, cache->object_size); |
| 401 | } |
| 402 | |
| 403 | void kasan_poison_object_data(struct kmem_cache *cache, void *object) |
| 404 | { |
| 405 | kasan_poison_shadow(object, |
| 406 | round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE), |
| 407 | KASAN_KMALLOC_REDZONE); |
Alexander Potapenko | 7ed2f9e | 2016-03-25 14:21:59 -0700 | [diff] [blame] | 408 | #ifdef CONFIG_SLAB |
| 409 | if (cache->flags & SLAB_KASAN) { |
| 410 | struct kasan_alloc_meta *alloc_info = |
| 411 | get_alloc_info(cache, object); |
| 412 | alloc_info->state = KASAN_STATE_INIT; |
| 413 | } |
| 414 | #endif |
Andrey Ryabinin | 0316bec | 2015-02-13 14:39:42 -0800 | [diff] [blame] | 415 | } |
| 416 | |
Alexander Potapenko | cd11016 | 2016-03-25 14:22:08 -0700 | [diff] [blame] | 417 | #ifdef CONFIG_SLAB |
| 418 | static inline int in_irqentry_text(unsigned long ptr) |
Alexander Potapenko | 7ed2f9e | 2016-03-25 14:21:59 -0700 | [diff] [blame] | 419 | { |
Alexander Potapenko | cd11016 | 2016-03-25 14:22:08 -0700 | [diff] [blame] | 420 | return (ptr >= (unsigned long)&__irqentry_text_start && |
| 421 | ptr < (unsigned long)&__irqentry_text_end) || |
| 422 | (ptr >= (unsigned long)&__softirqentry_text_start && |
| 423 | ptr < (unsigned long)&__softirqentry_text_end); |
Alexander Potapenko | 7ed2f9e | 2016-03-25 14:21:59 -0700 | [diff] [blame] | 424 | } |
| 425 | |
Alexander Potapenko | cd11016 | 2016-03-25 14:22:08 -0700 | [diff] [blame] | 426 | static inline void filter_irq_stacks(struct stack_trace *trace) |
| 427 | { |
| 428 | int i; |
| 429 | |
| 430 | if (!trace->nr_entries) |
| 431 | return; |
| 432 | for (i = 0; i < trace->nr_entries; i++) |
| 433 | if (in_irqentry_text(trace->entries[i])) { |
| 434 | /* Include the irqentry function into the stack. */ |
| 435 | trace->nr_entries = i + 1; |
| 436 | break; |
| 437 | } |
| 438 | } |
| 439 | |
| 440 | static inline depot_stack_handle_t save_stack(gfp_t flags) |
| 441 | { |
| 442 | unsigned long entries[KASAN_STACK_DEPTH]; |
| 443 | struct stack_trace trace = { |
| 444 | .nr_entries = 0, |
| 445 | .entries = entries, |
| 446 | .max_entries = KASAN_STACK_DEPTH, |
| 447 | .skip = 0 |
| 448 | }; |
| 449 | |
| 450 | save_stack_trace(&trace); |
| 451 | filter_irq_stacks(&trace); |
| 452 | if (trace.nr_entries != 0 && |
| 453 | trace.entries[trace.nr_entries-1] == ULONG_MAX) |
| 454 | trace.nr_entries--; |
| 455 | |
| 456 | return depot_save_stack(&trace, flags); |
| 457 | } |
| 458 | |
| 459 | static inline void set_track(struct kasan_track *track, gfp_t flags) |
| 460 | { |
| 461 | track->pid = current->pid; |
| 462 | track->stack = save_stack(flags); |
| 463 | } |
| 464 | |
Alexander Potapenko | 7ed2f9e | 2016-03-25 14:21:59 -0700 | [diff] [blame] | 465 | struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, |
| 466 | const void *object) |
| 467 | { |
Alexander Potapenko | cd11016 | 2016-03-25 14:22:08 -0700 | [diff] [blame] | 468 | BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32); |
Alexander Potapenko | 7ed2f9e | 2016-03-25 14:21:59 -0700 | [diff] [blame] | 469 | return (void *)object + cache->kasan_info.alloc_meta_offset; |
| 470 | } |
| 471 | |
| 472 | struct kasan_free_meta *get_free_info(struct kmem_cache *cache, |
| 473 | const void *object) |
| 474 | { |
Alexander Potapenko | cd11016 | 2016-03-25 14:22:08 -0700 | [diff] [blame] | 475 | BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32); |
Alexander Potapenko | 7ed2f9e | 2016-03-25 14:21:59 -0700 | [diff] [blame] | 476 | return (void *)object + cache->kasan_info.free_meta_offset; |
| 477 | } |
| 478 | #endif |
| 479 | |
Alexander Potapenko | 505f5dc | 2016-03-25 14:22:02 -0700 | [diff] [blame] | 480 | void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags) |
Andrey Ryabinin | 0316bec | 2015-02-13 14:39:42 -0800 | [diff] [blame] | 481 | { |
Alexander Potapenko | 505f5dc | 2016-03-25 14:22:02 -0700 | [diff] [blame] | 482 | kasan_kmalloc(cache, object, cache->object_size, flags); |
Andrey Ryabinin | 0316bec | 2015-02-13 14:39:42 -0800 | [diff] [blame] | 483 | } |
| 484 | |
| 485 | void kasan_slab_free(struct kmem_cache *cache, void *object) |
| 486 | { |
| 487 | unsigned long size = cache->object_size; |
| 488 | unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE); |
| 489 | |
| 490 | /* RCU slabs could be legally used after free within the RCU period */ |
| 491 | if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU)) |
| 492 | return; |
| 493 | |
Alexander Potapenko | 7ed2f9e | 2016-03-25 14:21:59 -0700 | [diff] [blame] | 494 | #ifdef CONFIG_SLAB |
| 495 | if (cache->flags & SLAB_KASAN) { |
| 496 | struct kasan_free_meta *free_info = |
| 497 | get_free_info(cache, object); |
| 498 | struct kasan_alloc_meta *alloc_info = |
| 499 | get_alloc_info(cache, object); |
| 500 | alloc_info->state = KASAN_STATE_FREE; |
Alexander Potapenko | 0b355ea | 2016-04-01 14:31:15 -0700 | [diff] [blame] | 501 | set_track(&free_info->track, GFP_NOWAIT); |
Alexander Potapenko | 7ed2f9e | 2016-03-25 14:21:59 -0700 | [diff] [blame] | 502 | } |
| 503 | #endif |
| 504 | |
Andrey Ryabinin | 0316bec | 2015-02-13 14:39:42 -0800 | [diff] [blame] | 505 | kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE); |
| 506 | } |
| 507 | |
Alexander Potapenko | 505f5dc | 2016-03-25 14:22:02 -0700 | [diff] [blame] | 508 | void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size, |
| 509 | gfp_t flags) |
Andrey Ryabinin | 0316bec | 2015-02-13 14:39:42 -0800 | [diff] [blame] | 510 | { |
| 511 | unsigned long redzone_start; |
| 512 | unsigned long redzone_end; |
| 513 | |
| 514 | if (unlikely(object == NULL)) |
| 515 | return; |
| 516 | |
| 517 | redzone_start = round_up((unsigned long)(object + size), |
| 518 | KASAN_SHADOW_SCALE_SIZE); |
| 519 | redzone_end = round_up((unsigned long)object + cache->object_size, |
| 520 | KASAN_SHADOW_SCALE_SIZE); |
| 521 | |
| 522 | kasan_unpoison_shadow(object, size); |
| 523 | kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, |
| 524 | KASAN_KMALLOC_REDZONE); |
Alexander Potapenko | 7ed2f9e | 2016-03-25 14:21:59 -0700 | [diff] [blame] | 525 | #ifdef CONFIG_SLAB |
| 526 | if (cache->flags & SLAB_KASAN) { |
| 527 | struct kasan_alloc_meta *alloc_info = |
| 528 | get_alloc_info(cache, object); |
| 529 | |
| 530 | alloc_info->state = KASAN_STATE_ALLOC; |
| 531 | alloc_info->alloc_size = size; |
Alexander Potapenko | cd11016 | 2016-03-25 14:22:08 -0700 | [diff] [blame] | 532 | set_track(&alloc_info->track, flags); |
Alexander Potapenko | 7ed2f9e | 2016-03-25 14:21:59 -0700 | [diff] [blame] | 533 | } |
| 534 | #endif |
Andrey Ryabinin | 0316bec | 2015-02-13 14:39:42 -0800 | [diff] [blame] | 535 | } |
| 536 | EXPORT_SYMBOL(kasan_kmalloc); |
| 537 | |
Alexander Potapenko | 505f5dc | 2016-03-25 14:22:02 -0700 | [diff] [blame] | 538 | void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) |
Andrey Ryabinin | 0316bec | 2015-02-13 14:39:42 -0800 | [diff] [blame] | 539 | { |
| 540 | struct page *page; |
| 541 | unsigned long redzone_start; |
| 542 | unsigned long redzone_end; |
| 543 | |
| 544 | if (unlikely(ptr == NULL)) |
| 545 | return; |
| 546 | |
| 547 | page = virt_to_page(ptr); |
| 548 | redzone_start = round_up((unsigned long)(ptr + size), |
| 549 | KASAN_SHADOW_SCALE_SIZE); |
| 550 | redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page)); |
| 551 | |
| 552 | kasan_unpoison_shadow(ptr, size); |
| 553 | kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start, |
| 554 | KASAN_PAGE_REDZONE); |
| 555 | } |
| 556 | |
Alexander Potapenko | 505f5dc | 2016-03-25 14:22:02 -0700 | [diff] [blame] | 557 | void kasan_krealloc(const void *object, size_t size, gfp_t flags) |
Andrey Ryabinin | 0316bec | 2015-02-13 14:39:42 -0800 | [diff] [blame] | 558 | { |
| 559 | struct page *page; |
| 560 | |
| 561 | if (unlikely(object == ZERO_SIZE_PTR)) |
| 562 | return; |
| 563 | |
| 564 | page = virt_to_head_page(object); |
| 565 | |
| 566 | if (unlikely(!PageSlab(page))) |
Alexander Potapenko | 505f5dc | 2016-03-25 14:22:02 -0700 | [diff] [blame] | 567 | kasan_kmalloc_large(object, size, flags); |
Andrey Ryabinin | 0316bec | 2015-02-13 14:39:42 -0800 | [diff] [blame] | 568 | else |
Alexander Potapenko | 505f5dc | 2016-03-25 14:22:02 -0700 | [diff] [blame] | 569 | kasan_kmalloc(page->slab_cache, object, size, flags); |
Andrey Ryabinin | 0316bec | 2015-02-13 14:39:42 -0800 | [diff] [blame] | 570 | } |
| 571 | |
Andrey Ryabinin | 9239361 | 2015-04-15 16:15:05 -0700 | [diff] [blame] | 572 | void kasan_kfree(void *ptr) |
| 573 | { |
| 574 | struct page *page; |
| 575 | |
| 576 | page = virt_to_head_page(ptr); |
| 577 | |
| 578 | if (unlikely(!PageSlab(page))) |
| 579 | kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), |
| 580 | KASAN_FREE_PAGE); |
| 581 | else |
| 582 | kasan_slab_free(page->slab_cache, ptr); |
| 583 | } |
| 584 | |
Andrey Ryabinin | 0316bec | 2015-02-13 14:39:42 -0800 | [diff] [blame] | 585 | void kasan_kfree_large(const void *ptr) |
| 586 | { |
| 587 | struct page *page = virt_to_page(ptr); |
| 588 | |
| 589 | kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), |
| 590 | KASAN_FREE_PAGE); |
| 591 | } |
| 592 | |
Andrey Ryabinin | bebf56a | 2015-02-13 14:40:17 -0800 | [diff] [blame] | 593 | int kasan_module_alloc(void *addr, size_t size) |
| 594 | { |
| 595 | void *ret; |
| 596 | size_t shadow_size; |
| 597 | unsigned long shadow_start; |
| 598 | |
| 599 | shadow_start = (unsigned long)kasan_mem_to_shadow(addr); |
| 600 | shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT, |
| 601 | PAGE_SIZE); |
| 602 | |
| 603 | if (WARN_ON(!PAGE_ALIGNED(shadow_start))) |
| 604 | return -EINVAL; |
| 605 | |
| 606 | ret = __vmalloc_node_range(shadow_size, 1, shadow_start, |
| 607 | shadow_start + shadow_size, |
| 608 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, |
| 609 | PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE, |
| 610 | __builtin_return_address(0)); |
Andrey Ryabinin | a5af5aa | 2015-03-12 16:26:11 -0700 | [diff] [blame] | 611 | |
| 612 | if (ret) { |
| 613 | find_vm_area(addr)->flags |= VM_KASAN; |
Andrey Ryabinin | 4593725 | 2015-11-20 15:57:18 -0800 | [diff] [blame] | 614 | kmemleak_ignore(ret); |
Andrey Ryabinin | a5af5aa | 2015-03-12 16:26:11 -0700 | [diff] [blame] | 615 | return 0; |
| 616 | } |
| 617 | |
| 618 | return -ENOMEM; |
Andrey Ryabinin | bebf56a | 2015-02-13 14:40:17 -0800 | [diff] [blame] | 619 | } |
| 620 | |
Andrey Ryabinin | a5af5aa | 2015-03-12 16:26:11 -0700 | [diff] [blame] | 621 | void kasan_free_shadow(const struct vm_struct *vm) |
Andrey Ryabinin | bebf56a | 2015-02-13 14:40:17 -0800 | [diff] [blame] | 622 | { |
Andrey Ryabinin | a5af5aa | 2015-03-12 16:26:11 -0700 | [diff] [blame] | 623 | if (vm->flags & VM_KASAN) |
| 624 | vfree(kasan_mem_to_shadow(vm->addr)); |
Andrey Ryabinin | bebf56a | 2015-02-13 14:40:17 -0800 | [diff] [blame] | 625 | } |
| 626 | |
| 627 | static void register_global(struct kasan_global *global) |
| 628 | { |
| 629 | size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE); |
| 630 | |
| 631 | kasan_unpoison_shadow(global->beg, global->size); |
| 632 | |
| 633 | kasan_poison_shadow(global->beg + aligned_size, |
| 634 | global->size_with_redzone - aligned_size, |
| 635 | KASAN_GLOBAL_REDZONE); |
| 636 | } |
| 637 | |
| 638 | void __asan_register_globals(struct kasan_global *globals, size_t size) |
| 639 | { |
| 640 | int i; |
| 641 | |
| 642 | for (i = 0; i < size; i++) |
| 643 | register_global(&globals[i]); |
| 644 | } |
| 645 | EXPORT_SYMBOL(__asan_register_globals); |
| 646 | |
| 647 | void __asan_unregister_globals(struct kasan_global *globals, size_t size) |
| 648 | { |
| 649 | } |
| 650 | EXPORT_SYMBOL(__asan_unregister_globals); |
| 651 | |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 652 | #define DEFINE_ASAN_LOAD_STORE(size) \ |
| 653 | void __asan_load##size(unsigned long addr) \ |
| 654 | { \ |
| 655 | check_memory_region(addr, size, false); \ |
| 656 | } \ |
| 657 | EXPORT_SYMBOL(__asan_load##size); \ |
| 658 | __alias(__asan_load##size) \ |
| 659 | void __asan_load##size##_noabort(unsigned long); \ |
| 660 | EXPORT_SYMBOL(__asan_load##size##_noabort); \ |
| 661 | void __asan_store##size(unsigned long addr) \ |
| 662 | { \ |
| 663 | check_memory_region(addr, size, true); \ |
| 664 | } \ |
| 665 | EXPORT_SYMBOL(__asan_store##size); \ |
| 666 | __alias(__asan_store##size) \ |
| 667 | void __asan_store##size##_noabort(unsigned long); \ |
| 668 | EXPORT_SYMBOL(__asan_store##size##_noabort) |
| 669 | |
| 670 | DEFINE_ASAN_LOAD_STORE(1); |
| 671 | DEFINE_ASAN_LOAD_STORE(2); |
| 672 | DEFINE_ASAN_LOAD_STORE(4); |
| 673 | DEFINE_ASAN_LOAD_STORE(8); |
| 674 | DEFINE_ASAN_LOAD_STORE(16); |
| 675 | |
| 676 | void __asan_loadN(unsigned long addr, size_t size) |
| 677 | { |
| 678 | check_memory_region(addr, size, false); |
| 679 | } |
| 680 | EXPORT_SYMBOL(__asan_loadN); |
| 681 | |
| 682 | __alias(__asan_loadN) |
| 683 | void __asan_loadN_noabort(unsigned long, size_t); |
| 684 | EXPORT_SYMBOL(__asan_loadN_noabort); |
| 685 | |
| 686 | void __asan_storeN(unsigned long addr, size_t size) |
| 687 | { |
| 688 | check_memory_region(addr, size, true); |
| 689 | } |
| 690 | EXPORT_SYMBOL(__asan_storeN); |
| 691 | |
| 692 | __alias(__asan_storeN) |
| 693 | void __asan_storeN_noabort(unsigned long, size_t); |
| 694 | EXPORT_SYMBOL(__asan_storeN_noabort); |
| 695 | |
| 696 | /* to shut up compiler complaints */ |
| 697 | void __asan_handle_no_return(void) {} |
| 698 | EXPORT_SYMBOL(__asan_handle_no_return); |
Andrey Ryabinin | 786a895 | 2015-02-13 14:39:21 -0800 | [diff] [blame] | 699 | |
| 700 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 701 | static int kasan_mem_notifier(struct notifier_block *nb, |
| 702 | unsigned long action, void *data) |
| 703 | { |
| 704 | return (action == MEM_GOING_ONLINE) ? NOTIFY_BAD : NOTIFY_OK; |
| 705 | } |
| 706 | |
| 707 | static int __init kasan_memhotplug_init(void) |
| 708 | { |
Andrey Konovalov | 25add7e | 2015-11-05 18:51:03 -0800 | [diff] [blame] | 709 | pr_err("WARNING: KASAN doesn't support memory hot-add\n"); |
Andrey Ryabinin | 786a895 | 2015-02-13 14:39:21 -0800 | [diff] [blame] | 710 | pr_err("Memory hot-add will be disabled\n"); |
| 711 | |
| 712 | hotplug_memory_notifier(kasan_mem_notifier, 0); |
| 713 | |
| 714 | return 0; |
| 715 | } |
| 716 | |
| 717 | module_init(kasan_memhotplug_init); |
| 718 | #endif |