blob: e3e77527f8dff8e44bd560d4f62a11ee8cf90c26 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Andrey Ryabininbe3606f2017-03-13 19:33:37 +03002#define DISABLE_BRANCH_PROFILING
Andrey Ryabinin85155222015-07-02 12:09:37 +03003#define pr_fmt(fmt) "kasan: " fmt
Kirill A. Shutemov39b95522018-02-16 14:49:48 +03004
Kirill A. Shutemovad3fe522018-05-18 13:35:23 +03005/* cpu_feature_enabled() cannot be used this early */
6#define USE_EARLY_PGTABLE_L5
Kirill A. Shutemov39b95522018-02-16 14:49:48 +03007
Andrey Ryabininef7f0d62015-02-13 14:39:25 -08008#include <linux/bootmem.h>
9#include <linux/kasan.h>
10#include <linux/kdebug.h>
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -080011#include <linux/memblock.h>
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080012#include <linux/mm.h>
13#include <linux/sched.h>
Ingo Molnar9164bb42017-02-04 01:20:53 +010014#include <linux/sched/task.h>
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080015#include <linux/vmalloc.h>
16
Ingo Molnar5520b7e2017-01-27 11:59:46 +010017#include <asm/e820/types.h>
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -080018#include <asm/pgalloc.h>
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080019#include <asm/tlbflush.h>
20#include <asm/sections.h>
Tom Lendackyb9d05202017-07-17 16:10:11 -050021#include <asm/pgtable.h>
Thomas Gleixner92a0f812017-12-20 18:51:31 +010022#include <asm/cpu_entry_area.h>
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080023
Ingo Molnar08b46d52017-01-28 17:29:08 +010024extern struct range pfn_mapped[E820_MAX_ENTRIES];
Andrey Ryabininef7f0d62015-02-13 14:39:25 -080025
Kirill A. Shutemovc65e7742018-02-14 14:16:53 +030026static p4d_t tmp_p4d_table[MAX_PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
Andrey Ryabinin12a8cc72017-09-29 17:08:18 +030027
Andrey Ryabinin0d39e262018-01-10 18:36:02 +030028static __init void *early_alloc(size_t size, int nid, bool panic)
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -080029{
Andrey Ryabinin0d39e262018-01-10 18:36:02 +030030 if (panic)
31 return memblock_virt_alloc_try_nid(size, size,
32 __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
33 else
34 return memblock_virt_alloc_try_nid_nopanic(size, size,
35 __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -080036}
37
38static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
39 unsigned long end, int nid)
40{
41 pte_t *pte;
42
43 if (pmd_none(*pmd)) {
44 void *p;
45
46 if (boot_cpu_has(X86_FEATURE_PSE) &&
47 ((end - addr) == PMD_SIZE) &&
48 IS_ALIGNED(addr, PMD_SIZE)) {
Andrey Ryabinin0d39e262018-01-10 18:36:02 +030049 p = early_alloc(PMD_SIZE, nid, false);
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -080050 if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
51 return;
52 else if (p)
53 memblock_free(__pa(p), PMD_SIZE);
54 }
55
Andrey Ryabinin0d39e262018-01-10 18:36:02 +030056 p = early_alloc(PAGE_SIZE, nid, true);
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -080057 pmd_populate_kernel(&init_mm, pmd, p);
58 }
59
60 pte = pte_offset_kernel(pmd, addr);
61 do {
62 pte_t entry;
63 void *p;
64
65 if (!pte_none(*pte))
66 continue;
67
Andrey Ryabinin0d39e262018-01-10 18:36:02 +030068 p = early_alloc(PAGE_SIZE, nid, true);
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -080069 entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
70 set_pte_at(&init_mm, addr, pte, entry);
71 } while (pte++, addr += PAGE_SIZE, addr != end);
72}
73
74static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
75 unsigned long end, int nid)
76{
77 pmd_t *pmd;
78 unsigned long next;
79
80 if (pud_none(*pud)) {
81 void *p;
82
83 if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
84 ((end - addr) == PUD_SIZE) &&
85 IS_ALIGNED(addr, PUD_SIZE)) {
Andrey Ryabinin0d39e262018-01-10 18:36:02 +030086 p = early_alloc(PUD_SIZE, nid, false);
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -080087 if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
88 return;
89 else if (p)
90 memblock_free(__pa(p), PUD_SIZE);
91 }
92
Andrey Ryabinin0d39e262018-01-10 18:36:02 +030093 p = early_alloc(PAGE_SIZE, nid, true);
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -080094 pud_populate(&init_mm, pud, p);
95 }
96
97 pmd = pmd_offset(pud, addr);
98 do {
99 next = pmd_addr_end(addr, end);
100 if (!pmd_large(*pmd))
101 kasan_populate_pmd(pmd, addr, next, nid);
102 } while (pmd++, addr = next, addr != end);
103}
104
105static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
106 unsigned long end, int nid)
107{
108 pud_t *pud;
109 unsigned long next;
110
111 if (p4d_none(*p4d)) {
Andrey Ryabinin0d39e262018-01-10 18:36:02 +0300112 void *p = early_alloc(PAGE_SIZE, nid, true);
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -0800113
114 p4d_populate(&init_mm, p4d, p);
115 }
116
117 pud = pud_offset(p4d, addr);
118 do {
119 next = pud_addr_end(addr, end);
120 if (!pud_large(*pud))
121 kasan_populate_pud(pud, addr, next, nid);
122 } while (pud++, addr = next, addr != end);
123}
124
125static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr,
126 unsigned long end, int nid)
127{
128 void *p;
129 p4d_t *p4d;
130 unsigned long next;
131
132 if (pgd_none(*pgd)) {
Andrey Ryabinin0d39e262018-01-10 18:36:02 +0300133 p = early_alloc(PAGE_SIZE, nid, true);
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -0800134 pgd_populate(&init_mm, pgd, p);
135 }
136
137 p4d = p4d_offset(pgd, addr);
138 do {
139 next = p4d_addr_end(addr, end);
140 kasan_populate_p4d(p4d, addr, next, nid);
141 } while (p4d++, addr = next, addr != end);
142}
143
144static void __init kasan_populate_shadow(unsigned long addr, unsigned long end,
145 int nid)
146{
147 pgd_t *pgd;
148 unsigned long next;
149
150 addr = addr & PAGE_MASK;
151 end = round_up(end, PAGE_SIZE);
152 pgd = pgd_offset_k(addr);
153 do {
154 next = pgd_addr_end(addr, end);
155 kasan_populate_pgd(pgd, addr, next, nid);
156 } while (pgd++, addr = next, addr != end);
157}
158
159static void __init map_range(struct range *range)
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800160{
161 unsigned long start;
162 unsigned long end;
163
164 start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
165 end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
166
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -0800167 kasan_populate_shadow(start, end, early_pfn_to_nid(range->start));
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800168}
169
170static void __init clear_pgds(unsigned long start,
171 unsigned long end)
172{
Kirill A. Shutemovd691a3c2017-03-17 21:55:13 +0300173 pgd_t *pgd;
Andrey Ryabinin12a8cc72017-09-29 17:08:18 +0300174 /* See comment in kasan_init() */
175 unsigned long pgd_end = end & PGDIR_MASK;
Kirill A. Shutemovd691a3c2017-03-17 21:55:13 +0300176
Andrey Ryabinin12a8cc72017-09-29 17:08:18 +0300177 for (; start < pgd_end; start += PGDIR_SIZE) {
Kirill A. Shutemovd691a3c2017-03-17 21:55:13 +0300178 pgd = pgd_offset_k(start);
179 /*
180 * With folded p4d, pgd_clear() is nop, use p4d_clear()
181 * instead.
182 */
Kirill A. Shutemoved7588d2018-05-18 13:35:24 +0300183 if (pgtable_l5_enabled())
Kirill A. Shutemovd691a3c2017-03-17 21:55:13 +0300184 pgd_clear(pgd);
Kirill A. Shutemov91f606a2018-02-14 21:25:41 +0300185 else
186 p4d_clear(p4d_offset(pgd, start));
Kirill A. Shutemovd691a3c2017-03-17 21:55:13 +0300187 }
Andrey Ryabinin12a8cc72017-09-29 17:08:18 +0300188
189 pgd = pgd_offset_k(start);
190 for (; start < end; start += P4D_SIZE)
191 p4d_clear(p4d_offset(pgd, start));
192}
193
194static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
195{
196 unsigned long p4d;
197
Kirill A. Shutemoved7588d2018-05-18 13:35:24 +0300198 if (!pgtable_l5_enabled())
Andrey Ryabinin12a8cc72017-09-29 17:08:18 +0300199 return (p4d_t *)pgd;
200
201 p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK;
202 p4d += __START_KERNEL_map - phys_base;
203 return (p4d_t *)p4d + p4d_index(addr);
204}
205
206static void __init kasan_early_p4d_populate(pgd_t *pgd,
207 unsigned long addr,
208 unsigned long end)
209{
210 pgd_t pgd_entry;
211 p4d_t *p4d, p4d_entry;
212 unsigned long next;
213
214 if (pgd_none(*pgd)) {
215 pgd_entry = __pgd(_KERNPG_TABLE | __pa_nodebug(kasan_zero_p4d));
216 set_pgd(pgd, pgd_entry);
217 }
218
219 p4d = early_p4d_offset(pgd, addr);
220 do {
221 next = p4d_addr_end(addr, end);
222
223 if (!p4d_none(*p4d))
224 continue;
225
226 p4d_entry = __p4d(_KERNPG_TABLE | __pa_nodebug(kasan_zero_pud));
227 set_p4d(p4d, p4d_entry);
228 } while (p4d++, addr = next, addr != end && p4d_none(*p4d));
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800229}
230
Alexander Popov5d5aa3c2015-07-02 12:09:34 +0300231static void __init kasan_map_early_shadow(pgd_t *pgd)
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800232{
Andrey Ryabinin12a8cc72017-09-29 17:08:18 +0300233 /* See comment in kasan_init() */
234 unsigned long addr = KASAN_SHADOW_START & PGDIR_MASK;
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800235 unsigned long end = KASAN_SHADOW_END;
Andrey Ryabinin12a8cc72017-09-29 17:08:18 +0300236 unsigned long next;
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800237
Andrey Ryabinin12a8cc72017-09-29 17:08:18 +0300238 pgd += pgd_index(addr);
239 do {
240 next = pgd_addr_end(addr, end);
241 kasan_early_p4d_populate(pgd, addr, next);
242 } while (pgd++, addr = next, addr != end);
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800243}
244
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800245#ifdef CONFIG_KASAN_INLINE
246static int kasan_die_handler(struct notifier_block *self,
247 unsigned long val,
248 void *data)
249{
250 if (val == DIE_GPF) {
Dmitry Vyukov2ba78052016-07-14 12:06:53 -0700251 pr_emerg("CONFIG_KASAN_INLINE enabled\n");
252 pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800253 }
254 return NOTIFY_OK;
255}
256
257static struct notifier_block kasan_die_notifier = {
258 .notifier_call = kasan_die_handler,
259};
260#endif
261
Alexander Popov5d5aa3c2015-07-02 12:09:34 +0300262void __init kasan_early_init(void)
263{
264 int i;
Tom Lendacky21729f82017-07-17 16:10:07 -0500265 pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL | _PAGE_ENC;
Alexander Popov5d5aa3c2015-07-02 12:09:34 +0300266 pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
267 pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
Kirill A. Shutemov5480bb62017-03-30 11:07:30 +0300268 p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE;
Alexander Popov5d5aa3c2015-07-02 12:09:34 +0300269
Dave Hansenfb43d6c2018-04-06 13:55:09 -0700270 /* Mask out unsupported __PAGE_KERNEL bits: */
271 pte_val &= __default_kernel_pte_mask;
272 pmd_val &= __default_kernel_pte_mask;
273 pud_val &= __default_kernel_pte_mask;
274 p4d_val &= __default_kernel_pte_mask;
275
Alexander Popov5d5aa3c2015-07-02 12:09:34 +0300276 for (i = 0; i < PTRS_PER_PTE; i++)
277 kasan_zero_pte[i] = __pte(pte_val);
278
279 for (i = 0; i < PTRS_PER_PMD; i++)
280 kasan_zero_pmd[i] = __pmd(pmd_val);
281
282 for (i = 0; i < PTRS_PER_PUD; i++)
283 kasan_zero_pud[i] = __pud(pud_val);
284
Kirill A. Shutemoved7588d2018-05-18 13:35:24 +0300285 for (i = 0; pgtable_l5_enabled() && i < PTRS_PER_P4D; i++)
Kirill A. Shutemov5480bb62017-03-30 11:07:30 +0300286 kasan_zero_p4d[i] = __p4d(p4d_val);
287
Kirill A. Shutemov65ade2f2017-06-06 14:31:27 +0300288 kasan_map_early_shadow(early_top_pgt);
289 kasan_map_early_shadow(init_top_pgt);
Alexander Popov5d5aa3c2015-07-02 12:09:34 +0300290}
291
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800292void __init kasan_init(void)
293{
294 int i;
Andy Lutomirski21506522017-12-04 15:07:16 +0100295 void *shadow_cpu_entry_begin, *shadow_cpu_entry_end;
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800296
297#ifdef CONFIG_KASAN_INLINE
298 register_die_notifier(&kasan_die_notifier);
299#endif
300
Kirill A. Shutemov65ade2f2017-06-06 14:31:27 +0300301 memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
Andrey Ryabinin12a8cc72017-09-29 17:08:18 +0300302
303 /*
304 * We use the same shadow offset for 4- and 5-level paging to
305 * facilitate boot-time switching between paging modes.
306 * As result in 5-level paging mode KASAN_SHADOW_START and
307 * KASAN_SHADOW_END are not aligned to PGD boundary.
308 *
309 * KASAN_SHADOW_START doesn't share PGD with anything else.
310 * We claim whole PGD entry to make things easier.
311 *
312 * KASAN_SHADOW_END lands in the last PGD entry and it collides with
313 * bunch of things like kernel code, modules, EFI mapping, etc.
314 * We need to take extra steps to not overwrite them.
315 */
Kirill A. Shutemoved7588d2018-05-18 13:35:24 +0300316 if (pgtable_l5_enabled()) {
Andrey Ryabinin12a8cc72017-09-29 17:08:18 +0300317 void *ptr;
318
319 ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
320 memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table));
321 set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)],
322 __pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE));
323 }
324
Kirill A. Shutemov65ade2f2017-06-06 14:31:27 +0300325 load_cr3(early_top_pgt);
Andrey Ryabinin241d2c52015-07-02 12:09:35 +0300326 __flush_tlb_all();
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800327
Andrey Ryabinin12a8cc72017-09-29 17:08:18 +0300328 clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800329
Andrey Ryabinin12a8cc72017-09-29 17:08:18 +0300330 kasan_populate_zero_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800331 kasan_mem_to_shadow((void *)PAGE_OFFSET));
332
Ingo Molnar08b46d52017-01-28 17:29:08 +0100333 for (i = 0; i < E820_MAX_ENTRIES; i++) {
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800334 if (pfn_mapped[i].end == 0)
335 break;
336
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -0800337 map_range(&pfn_mapped[i]);
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800338 }
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -0800339
Thomas Gleixner92a0f812017-12-20 18:51:31 +0100340 shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE;
341 shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
342 shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin,
343 PAGE_SIZE);
344
345 shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE +
346 CPU_ENTRY_AREA_MAP_SIZE);
347 shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
348 shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end,
349 PAGE_SIZE);
350
Andrey Ryabinin69786cdb2015-08-13 08:37:24 +0300351 kasan_populate_zero_shadow(
352 kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
Thomas Gleixner92a0f812017-12-20 18:51:31 +0100353 shadow_cpu_entry_begin);
354
355 kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
356 (unsigned long)shadow_cpu_entry_end, 0);
357
358 kasan_populate_zero_shadow(shadow_cpu_entry_end,
359 kasan_mem_to_shadow((void *)__START_KERNEL_map));
Andrey Ryabininc420f162015-02-13 14:39:59 -0800360
Andrey Ryabinin2aeb0732017-11-15 17:36:35 -0800361 kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
362 (unsigned long)kasan_mem_to_shadow(_end),
363 early_pfn_to_nid(__pa(_stext)));
Andrey Ryabininc420f162015-02-13 14:39:59 -0800364
Andrey Ryabinin69786cdb2015-08-13 08:37:24 +0300365 kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
Thomas Gleixner92a0f812017-12-20 18:51:31 +0100366 (void *)KASAN_SHADOW_END);
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800367
Kirill A. Shutemov65ade2f2017-06-06 14:31:27 +0300368 load_cr3(init_top_pgt);
Andrey Ryabinin241d2c52015-07-02 12:09:35 +0300369 __flush_tlb_all();
Andrey Ryabinin85155222015-07-02 12:09:37 +0300370
Andrey Ryabinin69e02102016-01-11 15:51:18 +0300371 /*
372 * kasan_zero_page has been used as early shadow memory, thus it may
Andrey Ryabinin063fb3e2016-01-11 15:51:19 +0300373 * contain some garbage. Now we can clear and write protect it, since
374 * after the TLB flush no one should write to it.
Andrey Ryabinin69e02102016-01-11 15:51:18 +0300375 */
376 memset(kasan_zero_page, 0, PAGE_SIZE);
Andrey Ryabinin063fb3e2016-01-11 15:51:19 +0300377 for (i = 0; i < PTRS_PER_PTE; i++) {
Dave Hansenfb43d6c2018-04-06 13:55:09 -0700378 pte_t pte;
379 pgprot_t prot;
380
381 prot = __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC);
382 pgprot_val(prot) &= __default_kernel_pte_mask;
383
384 pte = __pte(__pa(kasan_zero_page) | pgprot_val(prot));
Andrey Ryabinin063fb3e2016-01-11 15:51:19 +0300385 set_pte(&kasan_zero_pte[i], pte);
386 }
387 /* Flush TLBs again to be sure that write protection applied. */
388 __flush_tlb_all();
Andrey Ryabinin69e02102016-01-11 15:51:18 +0300389
390 init_task.kasan_depth = 0;
Andrey Konovalov25add7e2015-11-05 18:51:03 -0800391 pr_info("KernelAddressSanitizer initialized\n");
Andrey Ryabininef7f0d62015-02-13 14:39:25 -0800392}