blob: 02c9d75534091a0cf06b78716a990c41847cb6e4 [file] [log] [blame]
Andrey Ryabininbe3606f2017-03-13 19:33:37 +03001#define DISABLE_BRANCH_PROFILING
Andrey Ryabinin85155222015-07-02 12:09:37 +03002#define pr_fmt(fmt) "kasan: " fmt
Andrey Ryabininef7f0d62015-02-13 14:39:25 -08003#include <linux/bootmem.h>
4#include <linux/kasan.h>
5#include <linux/kdebug.h>
6#include <linux/mm.h>
7#include <linux/sched.h>
Ingo Molnar9164bb42017-02-04 01:20:53 +01008#include <linux/sched/task.h>
Andrey Ryabininef7f0d62015-02-13 14:39:25 -08009#include <linux/vmalloc.h>
10
Ingo Molnar5520b7e2017-01-27 11:59:46 +010011#include <asm/e820/types.h>
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080012#include <asm/tlbflush.h>
13#include <asm/sections.h>
14
Kirill A. Shutemov65ade2f2017-06-06 14:31:27 +030015extern pgd_t early_top_pgt[PTRS_PER_PGD];
Ingo Molnar08b46d52017-01-28 17:29:08 +010016extern struct range pfn_mapped[E820_MAX_ENTRIES];
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080017
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080018static int __init map_range(struct range *range)
19{
20 unsigned long start;
21 unsigned long end;
22
23 start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
24 end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
25
Andrey Ryabinin4d461332017-07-10 15:50:27 -070026 return vmemmap_populate(start, end, NUMA_NO_NODE);
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080027}
28
29static void __init clear_pgds(unsigned long start,
30 unsigned long end)
31{
Kirill A. Shutemovd691a3c2017-03-17 21:55:13 +030032 pgd_t *pgd;
33
34 for (; start < end; start += PGDIR_SIZE) {
35 pgd = pgd_offset_k(start);
36 /*
37 * With folded p4d, pgd_clear() is nop, use p4d_clear()
38 * instead.
39 */
40 if (CONFIG_PGTABLE_LEVELS < 5)
41 p4d_clear(p4d_offset(pgd, start));
42 else
43 pgd_clear(pgd);
44 }
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080045}
46
Alexander Popov5d5aa3c2015-07-02 12:09:34 +030047static void __init kasan_map_early_shadow(pgd_t *pgd)
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080048{
49 int i;
50 unsigned long start = KASAN_SHADOW_START;
51 unsigned long end = KASAN_SHADOW_END;
52
53 for (i = pgd_index(start); start < end; i++) {
Kirill A. Shutemov5480bb62017-03-30 11:07:30 +030054 switch (CONFIG_PGTABLE_LEVELS) {
55 case 4:
56 pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud) |
57 _KERNPG_TABLE);
58 break;
59 case 5:
60 pgd[i] = __pgd(__pa_nodebug(kasan_zero_p4d) |
61 _KERNPG_TABLE);
62 break;
63 default:
64 BUILD_BUG();
65 }
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080066 start += PGDIR_SIZE;
67 }
68}
69
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080070#ifdef CONFIG_KASAN_INLINE
71static int kasan_die_handler(struct notifier_block *self,
72 unsigned long val,
73 void *data)
74{
75 if (val == DIE_GPF) {
Dmitry Vyukov2ba78052016-07-14 12:06:53 -070076 pr_emerg("CONFIG_KASAN_INLINE enabled\n");
77 pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080078 }
79 return NOTIFY_OK;
80}
81
82static struct notifier_block kasan_die_notifier = {
83 .notifier_call = kasan_die_handler,
84};
85#endif
86
Alexander Popov5d5aa3c2015-07-02 12:09:34 +030087void __init kasan_early_init(void)
88{
89 int i;
90 pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
91 pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
92 pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
Kirill A. Shutemov5480bb62017-03-30 11:07:30 +030093 p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE;
Alexander Popov5d5aa3c2015-07-02 12:09:34 +030094
95 for (i = 0; i < PTRS_PER_PTE; i++)
96 kasan_zero_pte[i] = __pte(pte_val);
97
98 for (i = 0; i < PTRS_PER_PMD; i++)
99 kasan_zero_pmd[i] = __pmd(pmd_val);
100
101 for (i = 0; i < PTRS_PER_PUD; i++)
102 kasan_zero_pud[i] = __pud(pud_val);
103
Kirill A. Shutemov5480bb62017-03-30 11:07:30 +0300104 for (i = 0; CONFIG_PGTABLE_LEVELS >= 5 && i < PTRS_PER_P4D; i++)
105 kasan_zero_p4d[i] = __p4d(p4d_val);
106
Kirill A. Shutemov65ade2f2017-06-06 14:31:27 +0300107 kasan_map_early_shadow(early_top_pgt);
108 kasan_map_early_shadow(init_top_pgt);
Alexander Popov5d5aa3c2015-07-02 12:09:34 +0300109}
110
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800111void __init kasan_init(void)
112{
113 int i;
114
115#ifdef CONFIG_KASAN_INLINE
116 register_die_notifier(&kasan_die_notifier);
117#endif
118
Kirill A. Shutemov65ade2f2017-06-06 14:31:27 +0300119 memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
120 load_cr3(early_top_pgt);
Andrey Ryabinin241d2c52015-07-02 12:09:35 +0300121 __flush_tlb_all();
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800122
123 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
124
Andrey Ryabinin69786cdb2015-08-13 08:37:24 +0300125 kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800126 kasan_mem_to_shadow((void *)PAGE_OFFSET));
127
Ingo Molnar08b46d52017-01-28 17:29:08 +0100128 for (i = 0; i < E820_MAX_ENTRIES; i++) {
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800129 if (pfn_mapped[i].end == 0)
130 break;
131
132 if (map_range(&pfn_mapped[i]))
133 panic("kasan: unable to allocate shadow!");
134 }
Andrey Ryabinin69786cdb2015-08-13 08:37:24 +0300135 kasan_populate_zero_shadow(
136 kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
137 kasan_mem_to_shadow((void *)__START_KERNEL_map));
Andrey Ryabininc420f162015-02-13 14:39:59 -0800138
139 vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
140 (unsigned long)kasan_mem_to_shadow(_end),
141 NUMA_NO_NODE);
142
Andrey Ryabinin69786cdb2015-08-13 08:37:24 +0300143 kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
Andrey Ryabininc420f162015-02-13 14:39:59 -0800144 (void *)KASAN_SHADOW_END);
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800145
Kirill A. Shutemov65ade2f2017-06-06 14:31:27 +0300146 load_cr3(init_top_pgt);
Andrey Ryabinin241d2c52015-07-02 12:09:35 +0300147 __flush_tlb_all();
Andrey Ryabinin85155222015-07-02 12:09:37 +0300148
Andrey Ryabinin69e02102016-01-11 15:51:18 +0300149 /*
150 * kasan_zero_page has been used as early shadow memory, thus it may
Andrey Ryabinin063fb3e2016-01-11 15:51:19 +0300151 * contain some garbage. Now we can clear and write protect it, since
152 * after the TLB flush no one should write to it.
Andrey Ryabinin69e02102016-01-11 15:51:18 +0300153 */
154 memset(kasan_zero_page, 0, PAGE_SIZE);
Andrey Ryabinin063fb3e2016-01-11 15:51:19 +0300155 for (i = 0; i < PTRS_PER_PTE; i++) {
156 pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO);
157 set_pte(&kasan_zero_pte[i], pte);
158 }
159 /* Flush TLBs again to be sure that write protection applied. */
160 __flush_tlb_all();
Andrey Ryabinin69e02102016-01-11 15:51:18 +0300161
162 init_task.kasan_depth = 0;
Andrey Konovalov25add7e2015-11-05 18:51:03 -0800163 pr_info("KernelAddressSanitizer initialized\n");
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800164}