Andrey Ryabinin | 8515522 | 2015-07-02 12:09:37 +0300 | [diff] [blame] | 1 | #define pr_fmt(fmt) "kasan: " fmt |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 2 | #include <linux/bootmem.h> |
| 3 | #include <linux/kasan.h> |
| 4 | #include <linux/kdebug.h> |
| 5 | #include <linux/mm.h> |
| 6 | #include <linux/sched.h> |
Ingo Molnar | 9164bb4 | 2017-02-04 01:20:53 +0100 | [diff] [blame] | 7 | #include <linux/sched/task.h> |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 8 | #include <linux/vmalloc.h> |
| 9 | |
| 10 | #include <asm/tlbflush.h> |
| 11 | #include <asm/sections.h> |
| 12 | |
| 13 | extern pgd_t early_level4_pgt[PTRS_PER_PGD]; |
| 14 | extern struct range pfn_mapped[E820_X_MAX]; |
| 15 | |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 16 | static int __init map_range(struct range *range) |
| 17 | { |
| 18 | unsigned long start; |
| 19 | unsigned long end; |
| 20 | |
| 21 | start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start)); |
| 22 | end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end)); |
| 23 | |
| 24 | /* |
| 25 | * end + 1 here is intentional. We check several shadow bytes in advance |
| 26 | * to slightly speed up fastpath. In some rare cases we could cross |
| 27 | * boundary of mapped shadow, so we just map some more here. |
| 28 | */ |
| 29 | return vmemmap_populate(start, end + 1, NUMA_NO_NODE); |
| 30 | } |
| 31 | |
| 32 | static void __init clear_pgds(unsigned long start, |
| 33 | unsigned long end) |
| 34 | { |
| 35 | for (; start < end; start += PGDIR_SIZE) |
| 36 | pgd_clear(pgd_offset_k(start)); |
| 37 | } |
| 38 | |
Alexander Popov | 5d5aa3c | 2015-07-02 12:09:34 +0300 | [diff] [blame] | 39 | static void __init kasan_map_early_shadow(pgd_t *pgd) |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 40 | { |
| 41 | int i; |
| 42 | unsigned long start = KASAN_SHADOW_START; |
| 43 | unsigned long end = KASAN_SHADOW_END; |
| 44 | |
| 45 | for (i = pgd_index(start); start < end; i++) { |
| 46 | pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud) |
| 47 | | _KERNPG_TABLE); |
| 48 | start += PGDIR_SIZE; |
| 49 | } |
| 50 | } |
| 51 | |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 52 | #ifdef CONFIG_KASAN_INLINE |
| 53 | static int kasan_die_handler(struct notifier_block *self, |
| 54 | unsigned long val, |
| 55 | void *data) |
| 56 | { |
| 57 | if (val == DIE_GPF) { |
Dmitry Vyukov | 2ba7805 | 2016-07-14 12:06:53 -0700 | [diff] [blame] | 58 | pr_emerg("CONFIG_KASAN_INLINE enabled\n"); |
| 59 | pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n"); |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 60 | } |
| 61 | return NOTIFY_OK; |
| 62 | } |
| 63 | |
| 64 | static struct notifier_block kasan_die_notifier = { |
| 65 | .notifier_call = kasan_die_handler, |
| 66 | }; |
| 67 | #endif |
| 68 | |
Alexander Popov | 5d5aa3c | 2015-07-02 12:09:34 +0300 | [diff] [blame] | 69 | void __init kasan_early_init(void) |
| 70 | { |
| 71 | int i; |
| 72 | pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL; |
| 73 | pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE; |
| 74 | pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE; |
| 75 | |
| 76 | for (i = 0; i < PTRS_PER_PTE; i++) |
| 77 | kasan_zero_pte[i] = __pte(pte_val); |
| 78 | |
| 79 | for (i = 0; i < PTRS_PER_PMD; i++) |
| 80 | kasan_zero_pmd[i] = __pmd(pmd_val); |
| 81 | |
| 82 | for (i = 0; i < PTRS_PER_PUD; i++) |
| 83 | kasan_zero_pud[i] = __pud(pud_val); |
| 84 | |
| 85 | kasan_map_early_shadow(early_level4_pgt); |
| 86 | kasan_map_early_shadow(init_level4_pgt); |
| 87 | } |
| 88 | |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 89 | void __init kasan_init(void) |
| 90 | { |
| 91 | int i; |
| 92 | |
| 93 | #ifdef CONFIG_KASAN_INLINE |
| 94 | register_die_notifier(&kasan_die_notifier); |
| 95 | #endif |
| 96 | |
| 97 | memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt)); |
| 98 | load_cr3(early_level4_pgt); |
Andrey Ryabinin | 241d2c5 | 2015-07-02 12:09:35 +0300 | [diff] [blame] | 99 | __flush_tlb_all(); |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 100 | |
| 101 | clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); |
| 102 | |
Andrey Ryabinin | 69786cdb | 2015-08-13 08:37:24 +0300 | [diff] [blame] | 103 | kasan_populate_zero_shadow((void *)KASAN_SHADOW_START, |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 104 | kasan_mem_to_shadow((void *)PAGE_OFFSET)); |
| 105 | |
| 106 | for (i = 0; i < E820_X_MAX; i++) { |
| 107 | if (pfn_mapped[i].end == 0) |
| 108 | break; |
| 109 | |
| 110 | if (map_range(&pfn_mapped[i])) |
| 111 | panic("kasan: unable to allocate shadow!"); |
| 112 | } |
Andrey Ryabinin | 69786cdb | 2015-08-13 08:37:24 +0300 | [diff] [blame] | 113 | kasan_populate_zero_shadow( |
| 114 | kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), |
| 115 | kasan_mem_to_shadow((void *)__START_KERNEL_map)); |
Andrey Ryabinin | c420f16 | 2015-02-13 14:39:59 -0800 | [diff] [blame] | 116 | |
| 117 | vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext), |
| 118 | (unsigned long)kasan_mem_to_shadow(_end), |
| 119 | NUMA_NO_NODE); |
| 120 | |
Andrey Ryabinin | 69786cdb | 2015-08-13 08:37:24 +0300 | [diff] [blame] | 121 | kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), |
Andrey Ryabinin | c420f16 | 2015-02-13 14:39:59 -0800 | [diff] [blame] | 122 | (void *)KASAN_SHADOW_END); |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 123 | |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 124 | load_cr3(init_level4_pgt); |
Andrey Ryabinin | 241d2c5 | 2015-07-02 12:09:35 +0300 | [diff] [blame] | 125 | __flush_tlb_all(); |
Andrey Ryabinin | 8515522 | 2015-07-02 12:09:37 +0300 | [diff] [blame] | 126 | |
Andrey Ryabinin | 69e0210 | 2016-01-11 15:51:18 +0300 | [diff] [blame] | 127 | /* |
| 128 | * kasan_zero_page has been used as early shadow memory, thus it may |
Andrey Ryabinin | 063fb3e | 2016-01-11 15:51:19 +0300 | [diff] [blame] | 129 | * contain some garbage. Now we can clear and write protect it, since |
| 130 | * after the TLB flush no one should write to it. |
Andrey Ryabinin | 69e0210 | 2016-01-11 15:51:18 +0300 | [diff] [blame] | 131 | */ |
| 132 | memset(kasan_zero_page, 0, PAGE_SIZE); |
Andrey Ryabinin | 063fb3e | 2016-01-11 15:51:19 +0300 | [diff] [blame] | 133 | for (i = 0; i < PTRS_PER_PTE; i++) { |
| 134 | pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO); |
| 135 | set_pte(&kasan_zero_pte[i], pte); |
| 136 | } |
| 137 | /* Flush TLBs again to be sure that write protection applied. */ |
| 138 | __flush_tlb_all(); |
Andrey Ryabinin | 69e0210 | 2016-01-11 15:51:18 +0300 | [diff] [blame] | 139 | |
| 140 | init_task.kasan_depth = 0; |
Andrey Konovalov | 25add7e | 2015-11-05 18:51:03 -0800 | [diff] [blame] | 141 | pr_info("KernelAddressSanitizer initialized\n"); |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 142 | } |