blob: 88215ac16b24bd3d721a2069eaa9a09e933ec883 [file] [log] [blame]
Andrey Ryabininbe3606f2017-03-13 19:33:37 +03001#define DISABLE_BRANCH_PROFILING
Andrey Ryabinin85155222015-07-02 12:09:37 +03002#define pr_fmt(fmt) "kasan: " fmt
Andrey Ryabininef7f0d62015-02-13 14:39:25 -08003#include <linux/bootmem.h>
4#include <linux/kasan.h>
5#include <linux/kdebug.h>
6#include <linux/mm.h>
7#include <linux/sched.h>
Ingo Molnar9164bb42017-02-04 01:20:53 +01008#include <linux/sched/task.h>
Andrey Ryabininef7f0d62015-02-13 14:39:25 -08009#include <linux/vmalloc.h>
10
Ingo Molnar5520b7e2017-01-27 11:59:46 +010011#include <asm/e820/types.h>
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080012#include <asm/tlbflush.h>
13#include <asm/sections.h>
14
Kirill A. Shutemov65ade2f2017-06-06 14:31:27 +030015extern pgd_t early_top_pgt[PTRS_PER_PGD];
Ingo Molnar08b46d52017-01-28 17:29:08 +010016extern struct range pfn_mapped[E820_MAX_ENTRIES];
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080017
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080018static int __init map_range(struct range *range)
19{
20 unsigned long start;
21 unsigned long end;
22
23 start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
24 end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
25
26 /*
27 * end + 1 here is intentional. We check several shadow bytes in advance
28 * to slightly speed up fastpath. In some rare cases we could cross
29 * boundary of mapped shadow, so we just map some more here.
30 */
31 return vmemmap_populate(start, end + 1, NUMA_NO_NODE);
32}
33
34static void __init clear_pgds(unsigned long start,
35 unsigned long end)
36{
Kirill A. Shutemovd691a3c2017-03-17 21:55:13 +030037 pgd_t *pgd;
38
39 for (; start < end; start += PGDIR_SIZE) {
40 pgd = pgd_offset_k(start);
41 /*
42 * With folded p4d, pgd_clear() is nop, use p4d_clear()
43 * instead.
44 */
45 if (CONFIG_PGTABLE_LEVELS < 5)
46 p4d_clear(p4d_offset(pgd, start));
47 else
48 pgd_clear(pgd);
49 }
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080050}
51
Alexander Popov5d5aa3c2015-07-02 12:09:34 +030052static void __init kasan_map_early_shadow(pgd_t *pgd)
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080053{
54 int i;
55 unsigned long start = KASAN_SHADOW_START;
56 unsigned long end = KASAN_SHADOW_END;
57
58 for (i = pgd_index(start); start < end; i++) {
Kirill A. Shutemov5480bb62017-03-30 11:07:30 +030059 switch (CONFIG_PGTABLE_LEVELS) {
60 case 4:
61 pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud) |
62 _KERNPG_TABLE);
63 break;
64 case 5:
65 pgd[i] = __pgd(__pa_nodebug(kasan_zero_p4d) |
66 _KERNPG_TABLE);
67 break;
68 default:
69 BUILD_BUG();
70 }
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080071 start += PGDIR_SIZE;
72 }
73}
74
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080075#ifdef CONFIG_KASAN_INLINE
76static int kasan_die_handler(struct notifier_block *self,
77 unsigned long val,
78 void *data)
79{
80 if (val == DIE_GPF) {
Dmitry Vyukov2ba78052016-07-14 12:06:53 -070081 pr_emerg("CONFIG_KASAN_INLINE enabled\n");
82 pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080083 }
84 return NOTIFY_OK;
85}
86
87static struct notifier_block kasan_die_notifier = {
88 .notifier_call = kasan_die_handler,
89};
90#endif
91
Alexander Popov5d5aa3c2015-07-02 12:09:34 +030092void __init kasan_early_init(void)
93{
94 int i;
95 pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
96 pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
97 pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
Kirill A. Shutemov5480bb62017-03-30 11:07:30 +030098 p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE;
Alexander Popov5d5aa3c2015-07-02 12:09:34 +030099
100 for (i = 0; i < PTRS_PER_PTE; i++)
101 kasan_zero_pte[i] = __pte(pte_val);
102
103 for (i = 0; i < PTRS_PER_PMD; i++)
104 kasan_zero_pmd[i] = __pmd(pmd_val);
105
106 for (i = 0; i < PTRS_PER_PUD; i++)
107 kasan_zero_pud[i] = __pud(pud_val);
108
Kirill A. Shutemov5480bb62017-03-30 11:07:30 +0300109 for (i = 0; CONFIG_PGTABLE_LEVELS >= 5 && i < PTRS_PER_P4D; i++)
110 kasan_zero_p4d[i] = __p4d(p4d_val);
111
Kirill A. Shutemov65ade2f2017-06-06 14:31:27 +0300112 kasan_map_early_shadow(early_top_pgt);
113 kasan_map_early_shadow(init_top_pgt);
Alexander Popov5d5aa3c2015-07-02 12:09:34 +0300114}
115
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800116void __init kasan_init(void)
117{
118 int i;
119
120#ifdef CONFIG_KASAN_INLINE
121 register_die_notifier(&kasan_die_notifier);
122#endif
123
Kirill A. Shutemov65ade2f2017-06-06 14:31:27 +0300124 memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
125 load_cr3(early_top_pgt);
Andrey Ryabinin241d2c52015-07-02 12:09:35 +0300126 __flush_tlb_all();
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800127
128 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
129
Andrey Ryabinin69786cdb2015-08-13 08:37:24 +0300130 kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800131 kasan_mem_to_shadow((void *)PAGE_OFFSET));
132
Ingo Molnar08b46d52017-01-28 17:29:08 +0100133 for (i = 0; i < E820_MAX_ENTRIES; i++) {
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800134 if (pfn_mapped[i].end == 0)
135 break;
136
137 if (map_range(&pfn_mapped[i]))
138 panic("kasan: unable to allocate shadow!");
139 }
Andrey Ryabinin69786cdb2015-08-13 08:37:24 +0300140 kasan_populate_zero_shadow(
141 kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
142 kasan_mem_to_shadow((void *)__START_KERNEL_map));
Andrey Ryabininc420f162015-02-13 14:39:59 -0800143
144 vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
145 (unsigned long)kasan_mem_to_shadow(_end),
146 NUMA_NO_NODE);
147
Andrey Ryabinin69786cdb2015-08-13 08:37:24 +0300148 kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
Andrey Ryabininc420f162015-02-13 14:39:59 -0800149 (void *)KASAN_SHADOW_END);
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800150
Kirill A. Shutemov65ade2f2017-06-06 14:31:27 +0300151 load_cr3(init_top_pgt);
Andrey Ryabinin241d2c52015-07-02 12:09:35 +0300152 __flush_tlb_all();
Andrey Ryabinin85155222015-07-02 12:09:37 +0300153
Andrey Ryabinin69e02102016-01-11 15:51:18 +0300154 /*
155 * kasan_zero_page has been used as early shadow memory, thus it may
Andrey Ryabinin063fb3e2016-01-11 15:51:19 +0300156 * contain some garbage. Now we can clear and write protect it, since
157 * after the TLB flush no one should write to it.
Andrey Ryabinin69e02102016-01-11 15:51:18 +0300158 */
159 memset(kasan_zero_page, 0, PAGE_SIZE);
Andrey Ryabinin063fb3e2016-01-11 15:51:19 +0300160 for (i = 0; i < PTRS_PER_PTE; i++) {
161 pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO);
162 set_pte(&kasan_zero_pte[i], pte);
163 }
164 /* Flush TLBs again to be sure that write protection applied. */
165 __flush_tlb_all();
Andrey Ryabinin69e02102016-01-11 15:51:18 +0300166
167 init_task.kasan_depth = 0;
Andrey Konovalov25add7e2015-11-05 18:51:03 -0800168 pr_info("KernelAddressSanitizer initialized\n");
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800169}