blob: 980dbebd0ca76c81a5d1e50e7a4ba5ad4f4fb241 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Andrey Ryabininbe3606f2017-03-13 19:33:37 +03002#define DISABLE_BRANCH_PROFILING
Andrey Ryabinin85155222015-07-02 12:09:37 +03003#define pr_fmt(fmt) "kasan: " fmt
Kirill A. Shutemov39b95522018-02-16 14:49:48 +03004
5#ifdef CONFIG_X86_5LEVEL
6/* Too early to use cpu_feature_enabled() */
7#define pgtable_l5_enabled __pgtable_l5_enabled
8#endif
9
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080010#include <linux/bootmem.h>
11#include <linux/kasan.h>
12#include <linux/kdebug.h>
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -080013#include <linux/memblock.h>
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080014#include <linux/mm.h>
15#include <linux/sched.h>
Ingo Molnar9164bb42017-02-04 01:20:53 +010016#include <linux/sched/task.h>
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080017#include <linux/vmalloc.h>
18
Ingo Molnar5520b7e2017-01-27 11:59:46 +010019#include <asm/e820/types.h>
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -080020#include <asm/pgalloc.h>
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080021#include <asm/tlbflush.h>
22#include <asm/sections.h>
Tom Lendackyb9d05202017-07-17 16:10:11 -050023#include <asm/pgtable.h>
Thomas Gleixner92a0f812017-12-20 18:51:31 +010024#include <asm/cpu_entry_area.h>
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080025
Ingo Molnar08b46d52017-01-28 17:29:08 +010026extern struct range pfn_mapped[E820_MAX_ENTRIES];
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080027
Kirill A. Shutemovc65e7742018-02-14 14:16:53 +030028static p4d_t tmp_p4d_table[MAX_PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
Andrey Ryabinin12a8cc72017-09-29 17:08:18 +030029
Andrey Ryabinin0d39e262018-01-10 18:36:02 +030030static __init void *early_alloc(size_t size, int nid, bool panic)
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -080031{
Andrey Ryabinin0d39e262018-01-10 18:36:02 +030032 if (panic)
33 return memblock_virt_alloc_try_nid(size, size,
34 __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
35 else
36 return memblock_virt_alloc_try_nid_nopanic(size, size,
37 __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -080038}
39
40static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
41 unsigned long end, int nid)
42{
43 pte_t *pte;
44
45 if (pmd_none(*pmd)) {
46 void *p;
47
48 if (boot_cpu_has(X86_FEATURE_PSE) &&
49 ((end - addr) == PMD_SIZE) &&
50 IS_ALIGNED(addr, PMD_SIZE)) {
Andrey Ryabinin0d39e262018-01-10 18:36:02 +030051 p = early_alloc(PMD_SIZE, nid, false);
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -080052 if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
53 return;
54 else if (p)
55 memblock_free(__pa(p), PMD_SIZE);
56 }
57
Andrey Ryabinin0d39e262018-01-10 18:36:02 +030058 p = early_alloc(PAGE_SIZE, nid, true);
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -080059 pmd_populate_kernel(&init_mm, pmd, p);
60 }
61
62 pte = pte_offset_kernel(pmd, addr);
63 do {
64 pte_t entry;
65 void *p;
66
67 if (!pte_none(*pte))
68 continue;
69
Andrey Ryabinin0d39e262018-01-10 18:36:02 +030070 p = early_alloc(PAGE_SIZE, nid, true);
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -080071 entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
72 set_pte_at(&init_mm, addr, pte, entry);
73 } while (pte++, addr += PAGE_SIZE, addr != end);
74}
75
76static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
77 unsigned long end, int nid)
78{
79 pmd_t *pmd;
80 unsigned long next;
81
82 if (pud_none(*pud)) {
83 void *p;
84
85 if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
86 ((end - addr) == PUD_SIZE) &&
87 IS_ALIGNED(addr, PUD_SIZE)) {
Andrey Ryabinin0d39e262018-01-10 18:36:02 +030088 p = early_alloc(PUD_SIZE, nid, false);
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -080089 if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
90 return;
91 else if (p)
92 memblock_free(__pa(p), PUD_SIZE);
93 }
94
Andrey Ryabinin0d39e262018-01-10 18:36:02 +030095 p = early_alloc(PAGE_SIZE, nid, true);
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -080096 pud_populate(&init_mm, pud, p);
97 }
98
99 pmd = pmd_offset(pud, addr);
100 do {
101 next = pmd_addr_end(addr, end);
102 if (!pmd_large(*pmd))
103 kasan_populate_pmd(pmd, addr, next, nid);
104 } while (pmd++, addr = next, addr != end);
105}
106
107static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
108 unsigned long end, int nid)
109{
110 pud_t *pud;
111 unsigned long next;
112
113 if (p4d_none(*p4d)) {
Andrey Ryabinin0d39e262018-01-10 18:36:02 +0300114 void *p = early_alloc(PAGE_SIZE, nid, true);
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -0800115
116 p4d_populate(&init_mm, p4d, p);
117 }
118
119 pud = pud_offset(p4d, addr);
120 do {
121 next = pud_addr_end(addr, end);
122 if (!pud_large(*pud))
123 kasan_populate_pud(pud, addr, next, nid);
124 } while (pud++, addr = next, addr != end);
125}
126
127static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr,
128 unsigned long end, int nid)
129{
130 void *p;
131 p4d_t *p4d;
132 unsigned long next;
133
134 if (pgd_none(*pgd)) {
Andrey Ryabinin0d39e262018-01-10 18:36:02 +0300135 p = early_alloc(PAGE_SIZE, nid, true);
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -0800136 pgd_populate(&init_mm, pgd, p);
137 }
138
139 p4d = p4d_offset(pgd, addr);
140 do {
141 next = p4d_addr_end(addr, end);
142 kasan_populate_p4d(p4d, addr, next, nid);
143 } while (p4d++, addr = next, addr != end);
144}
145
146static void __init kasan_populate_shadow(unsigned long addr, unsigned long end,
147 int nid)
148{
149 pgd_t *pgd;
150 unsigned long next;
151
152 addr = addr & PAGE_MASK;
153 end = round_up(end, PAGE_SIZE);
154 pgd = pgd_offset_k(addr);
155 do {
156 next = pgd_addr_end(addr, end);
157 kasan_populate_pgd(pgd, addr, next, nid);
158 } while (pgd++, addr = next, addr != end);
159}
160
161static void __init map_range(struct range *range)
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800162{
163 unsigned long start;
164 unsigned long end;
165
166 start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
167 end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
168
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -0800169 kasan_populate_shadow(start, end, early_pfn_to_nid(range->start));
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800170}
171
172static void __init clear_pgds(unsigned long start,
173 unsigned long end)
174{
Kirill A. Shutemovd691a3c2017-03-17 21:55:13 +0300175 pgd_t *pgd;
Andrey Ryabinin12a8cc72017-09-29 17:08:18 +0300176 /* See comment in kasan_init() */
177 unsigned long pgd_end = end & PGDIR_MASK;
Kirill A. Shutemovd691a3c2017-03-17 21:55:13 +0300178
Andrey Ryabinin12a8cc72017-09-29 17:08:18 +0300179 for (; start < pgd_end; start += PGDIR_SIZE) {
Kirill A. Shutemovd691a3c2017-03-17 21:55:13 +0300180 pgd = pgd_offset_k(start);
181 /*
182 * With folded p4d, pgd_clear() is nop, use p4d_clear()
183 * instead.
184 */
Kirill A. Shutemov91f606a2018-02-14 21:25:41 +0300185 if (pgtable_l5_enabled)
Kirill A. Shutemovd691a3c2017-03-17 21:55:13 +0300186 pgd_clear(pgd);
Kirill A. Shutemov91f606a2018-02-14 21:25:41 +0300187 else
188 p4d_clear(p4d_offset(pgd, start));
Kirill A. Shutemovd691a3c2017-03-17 21:55:13 +0300189 }
Andrey Ryabinin12a8cc72017-09-29 17:08:18 +0300190
191 pgd = pgd_offset_k(start);
192 for (; start < end; start += P4D_SIZE)
193 p4d_clear(p4d_offset(pgd, start));
194}
195
196static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
197{
198 unsigned long p4d;
199
Kirill A. Shutemov91f606a2018-02-14 21:25:41 +0300200 if (!pgtable_l5_enabled)
Andrey Ryabinin12a8cc72017-09-29 17:08:18 +0300201 return (p4d_t *)pgd;
202
203 p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK;
204 p4d += __START_KERNEL_map - phys_base;
205 return (p4d_t *)p4d + p4d_index(addr);
206}
207
208static void __init kasan_early_p4d_populate(pgd_t *pgd,
209 unsigned long addr,
210 unsigned long end)
211{
212 pgd_t pgd_entry;
213 p4d_t *p4d, p4d_entry;
214 unsigned long next;
215
216 if (pgd_none(*pgd)) {
217 pgd_entry = __pgd(_KERNPG_TABLE | __pa_nodebug(kasan_zero_p4d));
218 set_pgd(pgd, pgd_entry);
219 }
220
221 p4d = early_p4d_offset(pgd, addr);
222 do {
223 next = p4d_addr_end(addr, end);
224
225 if (!p4d_none(*p4d))
226 continue;
227
228 p4d_entry = __p4d(_KERNPG_TABLE | __pa_nodebug(kasan_zero_pud));
229 set_p4d(p4d, p4d_entry);
230 } while (p4d++, addr = next, addr != end && p4d_none(*p4d));
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800231}
232
Alexander Popov5d5aa3c2015-07-02 12:09:34 +0300233static void __init kasan_map_early_shadow(pgd_t *pgd)
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800234{
Andrey Ryabinin12a8cc72017-09-29 17:08:18 +0300235 /* See comment in kasan_init() */
236 unsigned long addr = KASAN_SHADOW_START & PGDIR_MASK;
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800237 unsigned long end = KASAN_SHADOW_END;
Andrey Ryabinin12a8cc72017-09-29 17:08:18 +0300238 unsigned long next;
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800239
Andrey Ryabinin12a8cc72017-09-29 17:08:18 +0300240 pgd += pgd_index(addr);
241 do {
242 next = pgd_addr_end(addr, end);
243 kasan_early_p4d_populate(pgd, addr, next);
244 } while (pgd++, addr = next, addr != end);
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800245}
246
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800247#ifdef CONFIG_KASAN_INLINE
248static int kasan_die_handler(struct notifier_block *self,
249 unsigned long val,
250 void *data)
251{
252 if (val == DIE_GPF) {
Dmitry Vyukov2ba78052016-07-14 12:06:53 -0700253 pr_emerg("CONFIG_KASAN_INLINE enabled\n");
254 pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800255 }
256 return NOTIFY_OK;
257}
258
259static struct notifier_block kasan_die_notifier = {
260 .notifier_call = kasan_die_handler,
261};
262#endif
263
Alexander Popov5d5aa3c2015-07-02 12:09:34 +0300264void __init kasan_early_init(void)
265{
266 int i;
Tom Lendacky21729f82017-07-17 16:10:07 -0500267 pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL | _PAGE_ENC;
Alexander Popov5d5aa3c2015-07-02 12:09:34 +0300268 pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
269 pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
Kirill A. Shutemov5480bb62017-03-30 11:07:30 +0300270 p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE;
Alexander Popov5d5aa3c2015-07-02 12:09:34 +0300271
Dave Hansenfb43d6c2018-04-06 13:55:09 -0700272 /* Mask out unsupported __PAGE_KERNEL bits: */
273 pte_val &= __default_kernel_pte_mask;
274 pmd_val &= __default_kernel_pte_mask;
275 pud_val &= __default_kernel_pte_mask;
276 p4d_val &= __default_kernel_pte_mask;
277
Alexander Popov5d5aa3c2015-07-02 12:09:34 +0300278 for (i = 0; i < PTRS_PER_PTE; i++)
279 kasan_zero_pte[i] = __pte(pte_val);
280
281 for (i = 0; i < PTRS_PER_PMD; i++)
282 kasan_zero_pmd[i] = __pmd(pmd_val);
283
284 for (i = 0; i < PTRS_PER_PUD; i++)
285 kasan_zero_pud[i] = __pud(pud_val);
286
Kirill A. Shutemov91f606a2018-02-14 21:25:41 +0300287 for (i = 0; pgtable_l5_enabled && i < PTRS_PER_P4D; i++)
Kirill A. Shutemov5480bb62017-03-30 11:07:30 +0300288 kasan_zero_p4d[i] = __p4d(p4d_val);
289
Kirill A. Shutemov65ade2f2017-06-06 14:31:27 +0300290 kasan_map_early_shadow(early_top_pgt);
291 kasan_map_early_shadow(init_top_pgt);
Alexander Popov5d5aa3c2015-07-02 12:09:34 +0300292}
293
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800294void __init kasan_init(void)
295{
296 int i;
Andy Lutomirski21506522017-12-04 15:07:16 +0100297 void *shadow_cpu_entry_begin, *shadow_cpu_entry_end;
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800298
299#ifdef CONFIG_KASAN_INLINE
300 register_die_notifier(&kasan_die_notifier);
301#endif
302
Kirill A. Shutemov65ade2f2017-06-06 14:31:27 +0300303 memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
Andrey Ryabinin12a8cc72017-09-29 17:08:18 +0300304
305 /*
306 * We use the same shadow offset for 4- and 5-level paging to
307 * facilitate boot-time switching between paging modes.
308 * As result in 5-level paging mode KASAN_SHADOW_START and
309 * KASAN_SHADOW_END are not aligned to PGD boundary.
310 *
311 * KASAN_SHADOW_START doesn't share PGD with anything else.
312 * We claim whole PGD entry to make things easier.
313 *
314 * KASAN_SHADOW_END lands in the last PGD entry and it collides with
315 * bunch of things like kernel code, modules, EFI mapping, etc.
316 * We need to take extra steps to not overwrite them.
317 */
Kirill A. Shutemov91f606a2018-02-14 21:25:41 +0300318 if (pgtable_l5_enabled) {
Andrey Ryabinin12a8cc72017-09-29 17:08:18 +0300319 void *ptr;
320
321 ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
322 memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table));
323 set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)],
324 __pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE));
325 }
326
Kirill A. Shutemov65ade2f2017-06-06 14:31:27 +0300327 load_cr3(early_top_pgt);
Andrey Ryabinin241d2c52015-07-02 12:09:35 +0300328 __flush_tlb_all();
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800329
Andrey Ryabinin12a8cc72017-09-29 17:08:18 +0300330 clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800331
Andrey Ryabinin12a8cc72017-09-29 17:08:18 +0300332 kasan_populate_zero_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800333 kasan_mem_to_shadow((void *)PAGE_OFFSET));
334
Ingo Molnar08b46d52017-01-28 17:29:08 +0100335 for (i = 0; i < E820_MAX_ENTRIES; i++) {
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800336 if (pfn_mapped[i].end == 0)
337 break;
338
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -0800339 map_range(&pfn_mapped[i]);
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800340 }
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -0800341
Thomas Gleixner92a0f812017-12-20 18:51:31 +0100342 shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE;
343 shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
344 shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin,
345 PAGE_SIZE);
346
347 shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE +
348 CPU_ENTRY_AREA_MAP_SIZE);
349 shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
350 shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end,
351 PAGE_SIZE);
352
Andrey Ryabinin69786cdb2015-08-13 08:37:24 +0300353 kasan_populate_zero_shadow(
354 kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
Thomas Gleixner92a0f812017-12-20 18:51:31 +0100355 shadow_cpu_entry_begin);
356
357 kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
358 (unsigned long)shadow_cpu_entry_end, 0);
359
360 kasan_populate_zero_shadow(shadow_cpu_entry_end,
361 kasan_mem_to_shadow((void *)__START_KERNEL_map));
Andrey Ryabininc420f162015-02-13 14:39:59 -0800362
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -0800363 kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
364 (unsigned long)kasan_mem_to_shadow(_end),
365 early_pfn_to_nid(__pa(_stext)));
Andrey Ryabininc420f162015-02-13 14:39:59 -0800366
Andrey Ryabinin69786cdb2015-08-13 08:37:24 +0300367 kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
Thomas Gleixner92a0f812017-12-20 18:51:31 +0100368 (void *)KASAN_SHADOW_END);
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800369
Kirill A. Shutemov65ade2f2017-06-06 14:31:27 +0300370 load_cr3(init_top_pgt);
Andrey Ryabinin241d2c52015-07-02 12:09:35 +0300371 __flush_tlb_all();
Andrey Ryabinin85155222015-07-02 12:09:37 +0300372
Andrey Ryabinin69e02102016-01-11 15:51:18 +0300373 /*
374 * kasan_zero_page has been used as early shadow memory, thus it may
Andrey Ryabinin063fb3e2016-01-11 15:51:19 +0300375 * contain some garbage. Now we can clear and write protect it, since
376 * after the TLB flush no one should write to it.
Andrey Ryabinin69e02102016-01-11 15:51:18 +0300377 */
378 memset(kasan_zero_page, 0, PAGE_SIZE);
Andrey Ryabinin063fb3e2016-01-11 15:51:19 +0300379 for (i = 0; i < PTRS_PER_PTE; i++) {
Dave Hansenfb43d6c2018-04-06 13:55:09 -0700380 pte_t pte;
381 pgprot_t prot;
382
383 prot = __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC);
384 pgprot_val(prot) &= __default_kernel_pte_mask;
385
386 pte = __pte(__pa(kasan_zero_page) | pgprot_val(prot));
Andrey Ryabinin063fb3e2016-01-11 15:51:19 +0300387 set_pte(&kasan_zero_pte[i], pte);
388 }
389 /* Flush TLBs again to be sure that write protection applied. */
390 __flush_tlb_all();
Andrey Ryabinin69e02102016-01-11 15:51:18 +0300391
392 init_task.kasan_depth = 0;
Andrey Konovalov25add7e2015-11-05 18:51:03 -0800393 pr_info("KernelAddressSanitizer initialized\n");
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800394}