blob: 53508708b7aa22af7dd62eec1e0544102f08d2a7 [file] [log] [blame]
Andrey Ryabininef7f0d62015-02-13 14:39:25 -08001#include <linux/bootmem.h>
2#include <linux/kasan.h>
3#include <linux/kdebug.h>
4#include <linux/mm.h>
5#include <linux/sched.h>
6#include <linux/vmalloc.h>
7
8#include <asm/tlbflush.h>
9#include <asm/sections.h>
10
11extern pgd_t early_level4_pgt[PTRS_PER_PGD];
12extern struct range pfn_mapped[E820_X_MAX];
13
14extern unsigned char kasan_zero_page[PAGE_SIZE];
15
16static int __init map_range(struct range *range)
17{
18 unsigned long start;
19 unsigned long end;
20
21 start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
22 end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
23
24 /*
25 * end + 1 here is intentional. We check several shadow bytes in advance
26 * to slightly speed up fastpath. In some rare cases we could cross
27 * boundary of mapped shadow, so we just map some more here.
28 */
29 return vmemmap_populate(start, end + 1, NUMA_NO_NODE);
30}
31
32static void __init clear_pgds(unsigned long start,
33 unsigned long end)
34{
35 for (; start < end; start += PGDIR_SIZE)
36 pgd_clear(pgd_offset_k(start));
37}
38
39void __init kasan_map_early_shadow(pgd_t *pgd)
40{
41 int i;
42 unsigned long start = KASAN_SHADOW_START;
43 unsigned long end = KASAN_SHADOW_END;
44
45 for (i = pgd_index(start); start < end; i++) {
46 pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud)
47 | _KERNPG_TABLE);
48 start += PGDIR_SIZE;
49 }
50}
51
52static int __init zero_pte_populate(pmd_t *pmd, unsigned long addr,
53 unsigned long end)
54{
55 pte_t *pte = pte_offset_kernel(pmd, addr);
56
57 while (addr + PAGE_SIZE <= end) {
58 WARN_ON(!pte_none(*pte));
59 set_pte(pte, __pte(__pa_nodebug(kasan_zero_page)
60 | __PAGE_KERNEL_RO));
61 addr += PAGE_SIZE;
62 pte = pte_offset_kernel(pmd, addr);
63 }
64 return 0;
65}
66
67static int __init zero_pmd_populate(pud_t *pud, unsigned long addr,
68 unsigned long end)
69{
70 int ret = 0;
71 pmd_t *pmd = pmd_offset(pud, addr);
72
73 while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) {
74 WARN_ON(!pmd_none(*pmd));
75 set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte)
76 | __PAGE_KERNEL_RO));
77 addr += PMD_SIZE;
78 pmd = pmd_offset(pud, addr);
79 }
80 if (addr < end) {
81 if (pmd_none(*pmd)) {
82 void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
83 if (!p)
84 return -ENOMEM;
85 set_pmd(pmd, __pmd(__pa_nodebug(p) | _KERNPG_TABLE));
86 }
87 ret = zero_pte_populate(pmd, addr, end);
88 }
89 return ret;
90}
91
92
93static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
94 unsigned long end)
95{
96 int ret = 0;
97 pud_t *pud = pud_offset(pgd, addr);
98
99 while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) {
100 WARN_ON(!pud_none(*pud));
101 set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd)
102 | __PAGE_KERNEL_RO));
103 addr += PUD_SIZE;
104 pud = pud_offset(pgd, addr);
105 }
106
107 if (addr < end) {
108 if (pud_none(*pud)) {
109 void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
110 if (!p)
111 return -ENOMEM;
112 set_pud(pud, __pud(__pa_nodebug(p) | _KERNPG_TABLE));
113 }
114 ret = zero_pmd_populate(pud, addr, end);
115 }
116 return ret;
117}
118
119static int __init zero_pgd_populate(unsigned long addr, unsigned long end)
120{
121 int ret = 0;
122 pgd_t *pgd = pgd_offset_k(addr);
123
124 while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) {
125 WARN_ON(!pgd_none(*pgd));
126 set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud)
127 | __PAGE_KERNEL_RO));
128 addr += PGDIR_SIZE;
129 pgd = pgd_offset_k(addr);
130 }
131
132 if (addr < end) {
133 if (pgd_none(*pgd)) {
134 void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
135 if (!p)
136 return -ENOMEM;
137 set_pgd(pgd, __pgd(__pa_nodebug(p) | _KERNPG_TABLE));
138 }
139 ret = zero_pud_populate(pgd, addr, end);
140 }
141 return ret;
142}
143
144
145static void __init populate_zero_shadow(const void *start, const void *end)
146{
147 if (zero_pgd_populate((unsigned long)start, (unsigned long)end))
148 panic("kasan: unable to map zero shadow!");
149}
150
151
152#ifdef CONFIG_KASAN_INLINE
153static int kasan_die_handler(struct notifier_block *self,
154 unsigned long val,
155 void *data)
156{
157 if (val == DIE_GPF) {
158 pr_emerg("CONFIG_KASAN_INLINE enabled");
159 pr_emerg("GPF could be caused by NULL-ptr deref or user memory access");
160 }
161 return NOTIFY_OK;
162}
163
164static struct notifier_block kasan_die_notifier = {
165 .notifier_call = kasan_die_handler,
166};
167#endif
168
169void __init kasan_init(void)
170{
171 int i;
172
173#ifdef CONFIG_KASAN_INLINE
174 register_die_notifier(&kasan_die_notifier);
175#endif
176
177 memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
178 load_cr3(early_level4_pgt);
179
180 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
181
182 populate_zero_shadow((void *)KASAN_SHADOW_START,
183 kasan_mem_to_shadow((void *)PAGE_OFFSET));
184
185 for (i = 0; i < E820_X_MAX; i++) {
186 if (pfn_mapped[i].end == 0)
187 break;
188
189 if (map_range(&pfn_mapped[i]))
190 panic("kasan: unable to allocate shadow!");
191 }
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800192 populate_zero_shadow(kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
Andrey Ryabininc420f162015-02-13 14:39:59 -0800193 kasan_mem_to_shadow((void *)__START_KERNEL_map));
194
195 vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
196 (unsigned long)kasan_mem_to_shadow(_end),
197 NUMA_NO_NODE);
198
199 populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_VADDR),
200 (void *)KASAN_SHADOW_END);
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800201
202 memset(kasan_zero_page, 0, PAGE_SIZE);
203
204 load_cr3(init_level4_pgt);
Andrey Ryabininc420f162015-02-13 14:39:59 -0800205 init_task.kasan_depth = 0;
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800206}