Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 1 | /* |
| 2 | * This file contains shadow memory manipulation code. |
| 3 | * |
| 4 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. |
| 5 | * Author: Andrey Ryabinin <a.ryabinin@samsung.com> |
| 6 | * |
| 7 | * Some of code borrowed from https://github.com/xairy/linux by |
| 8 | * Andrey Konovalov <adech.fo@gmail.com> |
| 9 | * |
| 10 | * This program is free software; you can redistribute it and/or modify |
| 11 | * it under the terms of the GNU General Public License version 2 as |
| 12 | * published by the Free Software Foundation. |
| 13 | * |
| 14 | */ |
| 15 | |
| 16 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 17 | #define DISABLE_BRANCH_PROFILING |
| 18 | |
| 19 | #include <linux/export.h> |
| 20 | #include <linux/init.h> |
| 21 | #include <linux/kernel.h> |
| 22 | #include <linux/memblock.h> |
Andrey Ryabinin | 786a895 | 2015-02-13 14:39:21 -0800 | [diff] [blame] | 23 | #include <linux/memory.h> |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 24 | #include <linux/mm.h> |
| 25 | #include <linux/printk.h> |
| 26 | #include <linux/sched.h> |
| 27 | #include <linux/slab.h> |
| 28 | #include <linux/stacktrace.h> |
| 29 | #include <linux/string.h> |
| 30 | #include <linux/types.h> |
| 31 | #include <linux/kasan.h> |
| 32 | |
| 33 | #include "kasan.h" |
| 34 | |
| 35 | /* |
| 36 | * Poisons the shadow memory for 'size' bytes starting from 'addr'. |
| 37 | * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE. |
| 38 | */ |
| 39 | static void kasan_poison_shadow(const void *address, size_t size, u8 value) |
| 40 | { |
| 41 | void *shadow_start, *shadow_end; |
| 42 | |
| 43 | shadow_start = kasan_mem_to_shadow(address); |
| 44 | shadow_end = kasan_mem_to_shadow(address + size); |
| 45 | |
| 46 | memset(shadow_start, value, shadow_end - shadow_start); |
| 47 | } |
| 48 | |
| 49 | void kasan_unpoison_shadow(const void *address, size_t size) |
| 50 | { |
| 51 | kasan_poison_shadow(address, size, 0); |
| 52 | |
| 53 | if (size & KASAN_SHADOW_MASK) { |
| 54 | u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size); |
| 55 | *shadow = size & KASAN_SHADOW_MASK; |
| 56 | } |
| 57 | } |
| 58 | |
| 59 | |
| 60 | /* |
| 61 | * All functions below always inlined so compiler could |
| 62 | * perform better optimizations in each of __asan_loadX/__assn_storeX |
| 63 | * depending on memory access size X. |
| 64 | */ |
| 65 | |
| 66 | static __always_inline bool memory_is_poisoned_1(unsigned long addr) |
| 67 | { |
| 68 | s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr); |
| 69 | |
| 70 | if (unlikely(shadow_value)) { |
| 71 | s8 last_accessible_byte = addr & KASAN_SHADOW_MASK; |
| 72 | return unlikely(last_accessible_byte >= shadow_value); |
| 73 | } |
| 74 | |
| 75 | return false; |
| 76 | } |
| 77 | |
| 78 | static __always_inline bool memory_is_poisoned_2(unsigned long addr) |
| 79 | { |
| 80 | u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); |
| 81 | |
| 82 | if (unlikely(*shadow_addr)) { |
| 83 | if (memory_is_poisoned_1(addr + 1)) |
| 84 | return true; |
| 85 | |
| 86 | if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0)) |
| 87 | return false; |
| 88 | |
| 89 | return unlikely(*(u8 *)shadow_addr); |
| 90 | } |
| 91 | |
| 92 | return false; |
| 93 | } |
| 94 | |
| 95 | static __always_inline bool memory_is_poisoned_4(unsigned long addr) |
| 96 | { |
| 97 | u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); |
| 98 | |
| 99 | if (unlikely(*shadow_addr)) { |
| 100 | if (memory_is_poisoned_1(addr + 3)) |
| 101 | return true; |
| 102 | |
| 103 | if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3)) |
| 104 | return false; |
| 105 | |
| 106 | return unlikely(*(u8 *)shadow_addr); |
| 107 | } |
| 108 | |
| 109 | return false; |
| 110 | } |
| 111 | |
| 112 | static __always_inline bool memory_is_poisoned_8(unsigned long addr) |
| 113 | { |
| 114 | u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); |
| 115 | |
| 116 | if (unlikely(*shadow_addr)) { |
| 117 | if (memory_is_poisoned_1(addr + 7)) |
| 118 | return true; |
| 119 | |
| 120 | if (likely(((addr + 7) & KASAN_SHADOW_MASK) >= 7)) |
| 121 | return false; |
| 122 | |
| 123 | return unlikely(*(u8 *)shadow_addr); |
| 124 | } |
| 125 | |
| 126 | return false; |
| 127 | } |
| 128 | |
| 129 | static __always_inline bool memory_is_poisoned_16(unsigned long addr) |
| 130 | { |
| 131 | u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr); |
| 132 | |
| 133 | if (unlikely(*shadow_addr)) { |
| 134 | u16 shadow_first_bytes = *(u16 *)shadow_addr; |
| 135 | s8 last_byte = (addr + 15) & KASAN_SHADOW_MASK; |
| 136 | |
| 137 | if (unlikely(shadow_first_bytes)) |
| 138 | return true; |
| 139 | |
| 140 | if (likely(!last_byte)) |
| 141 | return false; |
| 142 | |
| 143 | return memory_is_poisoned_1(addr + 15); |
| 144 | } |
| 145 | |
| 146 | return false; |
| 147 | } |
| 148 | |
| 149 | static __always_inline unsigned long bytes_is_zero(const u8 *start, |
| 150 | size_t size) |
| 151 | { |
| 152 | while (size) { |
| 153 | if (unlikely(*start)) |
| 154 | return (unsigned long)start; |
| 155 | start++; |
| 156 | size--; |
| 157 | } |
| 158 | |
| 159 | return 0; |
| 160 | } |
| 161 | |
| 162 | static __always_inline unsigned long memory_is_zero(const void *start, |
| 163 | const void *end) |
| 164 | { |
| 165 | unsigned int words; |
| 166 | unsigned long ret; |
| 167 | unsigned int prefix = (unsigned long)start % 8; |
| 168 | |
| 169 | if (end - start <= 16) |
| 170 | return bytes_is_zero(start, end - start); |
| 171 | |
| 172 | if (prefix) { |
| 173 | prefix = 8 - prefix; |
| 174 | ret = bytes_is_zero(start, prefix); |
| 175 | if (unlikely(ret)) |
| 176 | return ret; |
| 177 | start += prefix; |
| 178 | } |
| 179 | |
| 180 | words = (end - start) / 8; |
| 181 | while (words) { |
| 182 | if (unlikely(*(u64 *)start)) |
| 183 | return bytes_is_zero(start, 8); |
| 184 | start += 8; |
| 185 | words--; |
| 186 | } |
| 187 | |
| 188 | return bytes_is_zero(start, (end - start) % 8); |
| 189 | } |
| 190 | |
| 191 | static __always_inline bool memory_is_poisoned_n(unsigned long addr, |
| 192 | size_t size) |
| 193 | { |
| 194 | unsigned long ret; |
| 195 | |
| 196 | ret = memory_is_zero(kasan_mem_to_shadow((void *)addr), |
| 197 | kasan_mem_to_shadow((void *)addr + size - 1) + 1); |
| 198 | |
| 199 | if (unlikely(ret)) { |
| 200 | unsigned long last_byte = addr + size - 1; |
| 201 | s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte); |
| 202 | |
| 203 | if (unlikely(ret != (unsigned long)last_shadow || |
| 204 | ((last_byte & KASAN_SHADOW_MASK) >= *last_shadow))) |
| 205 | return true; |
| 206 | } |
| 207 | return false; |
| 208 | } |
| 209 | |
| 210 | static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size) |
| 211 | { |
| 212 | if (__builtin_constant_p(size)) { |
| 213 | switch (size) { |
| 214 | case 1: |
| 215 | return memory_is_poisoned_1(addr); |
| 216 | case 2: |
| 217 | return memory_is_poisoned_2(addr); |
| 218 | case 4: |
| 219 | return memory_is_poisoned_4(addr); |
| 220 | case 8: |
| 221 | return memory_is_poisoned_8(addr); |
| 222 | case 16: |
| 223 | return memory_is_poisoned_16(addr); |
| 224 | default: |
| 225 | BUILD_BUG(); |
| 226 | } |
| 227 | } |
| 228 | |
| 229 | return memory_is_poisoned_n(addr, size); |
| 230 | } |
| 231 | |
| 232 | |
| 233 | static __always_inline void check_memory_region(unsigned long addr, |
| 234 | size_t size, bool write) |
| 235 | { |
| 236 | struct kasan_access_info info; |
| 237 | |
| 238 | if (unlikely(size == 0)) |
| 239 | return; |
| 240 | |
| 241 | if (unlikely((void *)addr < |
| 242 | kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) { |
| 243 | info.access_addr = (void *)addr; |
| 244 | info.access_size = size; |
| 245 | info.is_write = write; |
| 246 | info.ip = _RET_IP_; |
| 247 | kasan_report_user_access(&info); |
| 248 | return; |
| 249 | } |
| 250 | |
| 251 | if (likely(!memory_is_poisoned(addr, size))) |
| 252 | return; |
| 253 | |
| 254 | kasan_report(addr, size, write, _RET_IP_); |
| 255 | } |
| 256 | |
Andrey Ryabinin | b8c73fc | 2015-02-13 14:39:28 -0800 | [diff] [blame^] | 257 | void kasan_alloc_pages(struct page *page, unsigned int order) |
| 258 | { |
| 259 | if (likely(!PageHighMem(page))) |
| 260 | kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order); |
| 261 | } |
| 262 | |
| 263 | void kasan_free_pages(struct page *page, unsigned int order) |
| 264 | { |
| 265 | if (likely(!PageHighMem(page))) |
| 266 | kasan_poison_shadow(page_address(page), |
| 267 | PAGE_SIZE << order, |
| 268 | KASAN_FREE_PAGE); |
| 269 | } |
| 270 | |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 271 | #define DEFINE_ASAN_LOAD_STORE(size) \ |
| 272 | void __asan_load##size(unsigned long addr) \ |
| 273 | { \ |
| 274 | check_memory_region(addr, size, false); \ |
| 275 | } \ |
| 276 | EXPORT_SYMBOL(__asan_load##size); \ |
| 277 | __alias(__asan_load##size) \ |
| 278 | void __asan_load##size##_noabort(unsigned long); \ |
| 279 | EXPORT_SYMBOL(__asan_load##size##_noabort); \ |
| 280 | void __asan_store##size(unsigned long addr) \ |
| 281 | { \ |
| 282 | check_memory_region(addr, size, true); \ |
| 283 | } \ |
| 284 | EXPORT_SYMBOL(__asan_store##size); \ |
| 285 | __alias(__asan_store##size) \ |
| 286 | void __asan_store##size##_noabort(unsigned long); \ |
| 287 | EXPORT_SYMBOL(__asan_store##size##_noabort) |
| 288 | |
| 289 | DEFINE_ASAN_LOAD_STORE(1); |
| 290 | DEFINE_ASAN_LOAD_STORE(2); |
| 291 | DEFINE_ASAN_LOAD_STORE(4); |
| 292 | DEFINE_ASAN_LOAD_STORE(8); |
| 293 | DEFINE_ASAN_LOAD_STORE(16); |
| 294 | |
| 295 | void __asan_loadN(unsigned long addr, size_t size) |
| 296 | { |
| 297 | check_memory_region(addr, size, false); |
| 298 | } |
| 299 | EXPORT_SYMBOL(__asan_loadN); |
| 300 | |
| 301 | __alias(__asan_loadN) |
| 302 | void __asan_loadN_noabort(unsigned long, size_t); |
| 303 | EXPORT_SYMBOL(__asan_loadN_noabort); |
| 304 | |
| 305 | void __asan_storeN(unsigned long addr, size_t size) |
| 306 | { |
| 307 | check_memory_region(addr, size, true); |
| 308 | } |
| 309 | EXPORT_SYMBOL(__asan_storeN); |
| 310 | |
| 311 | __alias(__asan_storeN) |
| 312 | void __asan_storeN_noabort(unsigned long, size_t); |
| 313 | EXPORT_SYMBOL(__asan_storeN_noabort); |
| 314 | |
| 315 | /* to shut up compiler complaints */ |
| 316 | void __asan_handle_no_return(void) {} |
| 317 | EXPORT_SYMBOL(__asan_handle_no_return); |
Andrey Ryabinin | 786a895 | 2015-02-13 14:39:21 -0800 | [diff] [blame] | 318 | |
| 319 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 320 | static int kasan_mem_notifier(struct notifier_block *nb, |
| 321 | unsigned long action, void *data) |
| 322 | { |
| 323 | return (action == MEM_GOING_ONLINE) ? NOTIFY_BAD : NOTIFY_OK; |
| 324 | } |
| 325 | |
| 326 | static int __init kasan_memhotplug_init(void) |
| 327 | { |
| 328 | pr_err("WARNING: KASan doesn't support memory hot-add\n"); |
| 329 | pr_err("Memory hot-add will be disabled\n"); |
| 330 | |
| 331 | hotplug_memory_notifier(kasan_mem_notifier, 0); |
| 332 | |
| 333 | return 0; |
| 334 | } |
| 335 | |
| 336 | module_init(kasan_memhotplug_init); |
| 337 | #endif |