blob: ac31a2e572a446d1bce60118ba2753cd34698e18 [file] [log] [blame]
Andrey Ryabinin39d114d2015-10-12 18:52:58 +03001/*
2 * This file contains kasan initialization code for ARM64.
3 *
4 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12
13#define pr_fmt(fmt) "kasan: " fmt
14#include <linux/kasan.h>
15#include <linux/kernel.h>
16#include <linux/memblock.h>
17#include <linux/start_kernel.h>
Laura Abbottf8fee94e2017-01-10 13:35:49 -080018#include <linux/mm.h>
Andrey Ryabinin39d114d2015-10-12 18:52:58 +030019
Mark Rutlandc1a88e92016-01-25 11:45:02 +000020#include <asm/mmu_context.h>
Ard Biesheuvelf9040772016-02-16 13:52:40 +010021#include <asm/kernel-pgtable.h>
Andrey Ryabinin39d114d2015-10-12 18:52:58 +030022#include <asm/page.h>
23#include <asm/pgalloc.h>
24#include <asm/pgtable.h>
Ard Biesheuvelf9040772016-02-16 13:52:40 +010025#include <asm/sections.h>
Andrey Ryabinin39d114d2015-10-12 18:52:58 +030026#include <asm/tlbflush.h>
27
28static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
29
Laura Abbottf8fee94e2017-01-10 13:35:49 -080030/*
31 * The p*d_populate functions call virt_to_phys implicitly so they can't be used
32 * directly on kernel symbols (bm_p*d). All the early functions are called too
33 * early to use lm_alias so __p*d_populate functions must be used to populate
34 * with the physical address from __pa_symbol.
35 */
36
Andrey Ryabinin39d114d2015-10-12 18:52:58 +030037static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
38 unsigned long end)
39{
40 pte_t *pte;
41 unsigned long next;
42
43 if (pmd_none(*pmd))
Laura Abbottf8fee94e2017-01-10 13:35:49 -080044 __pmd_populate(pmd, __pa_symbol(kasan_zero_pte), PMD_TYPE_TABLE);
Andrey Ryabinin39d114d2015-10-12 18:52:58 +030045
Ard Biesheuvelf9040772016-02-16 13:52:40 +010046 pte = pte_offset_kimg(pmd, addr);
Andrey Ryabinin39d114d2015-10-12 18:52:58 +030047 do {
48 next = addr + PAGE_SIZE;
Laura Abbottf8fee94e2017-01-10 13:35:49 -080049 set_pte(pte, pfn_pte(sym_to_pfn(kasan_zero_page),
Andrey Ryabinin39d114d2015-10-12 18:52:58 +030050 PAGE_KERNEL));
51 } while (pte++, addr = next, addr != end && pte_none(*pte));
52}
53
54static void __init kasan_early_pmd_populate(pud_t *pud,
55 unsigned long addr,
56 unsigned long end)
57{
58 pmd_t *pmd;
59 unsigned long next;
60
61 if (pud_none(*pud))
Laura Abbottf8fee94e2017-01-10 13:35:49 -080062 __pud_populate(pud, __pa_symbol(kasan_zero_pmd), PMD_TYPE_TABLE);
Andrey Ryabinin39d114d2015-10-12 18:52:58 +030063
Ard Biesheuvelf9040772016-02-16 13:52:40 +010064 pmd = pmd_offset_kimg(pud, addr);
Andrey Ryabinin39d114d2015-10-12 18:52:58 +030065 do {
66 next = pmd_addr_end(addr, end);
67 kasan_early_pte_populate(pmd, addr, next);
68 } while (pmd++, addr = next, addr != end && pmd_none(*pmd));
69}
70
71static void __init kasan_early_pud_populate(pgd_t *pgd,
72 unsigned long addr,
73 unsigned long end)
74{
75 pud_t *pud;
76 unsigned long next;
77
78 if (pgd_none(*pgd))
Laura Abbottf8fee94e2017-01-10 13:35:49 -080079 __pgd_populate(pgd, __pa_symbol(kasan_zero_pud), PUD_TYPE_TABLE);
Andrey Ryabinin39d114d2015-10-12 18:52:58 +030080
Ard Biesheuvelf9040772016-02-16 13:52:40 +010081 pud = pud_offset_kimg(pgd, addr);
Andrey Ryabinin39d114d2015-10-12 18:52:58 +030082 do {
83 next = pud_addr_end(addr, end);
84 kasan_early_pmd_populate(pud, addr, next);
85 } while (pud++, addr = next, addr != end && pud_none(*pud));
86}
87
88static void __init kasan_map_early_shadow(void)
89{
90 unsigned long addr = KASAN_SHADOW_START;
91 unsigned long end = KASAN_SHADOW_END;
92 unsigned long next;
93 pgd_t *pgd;
94
95 pgd = pgd_offset_k(addr);
96 do {
97 next = pgd_addr_end(addr, end);
98 kasan_early_pud_populate(pgd, addr, next);
99 } while (pgd++, addr = next, addr != end);
100}
101
Will Deacon83040122015-10-13 14:01:06 +0100102asmlinkage void __init kasan_early_init(void)
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300103{
104 BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_END - (1UL << 61));
105 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
106 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
107 kasan_map_early_shadow();
108}
109
Mark Rutland068a17a2016-01-25 11:45:12 +0000110/*
111 * Copy the current shadow region into a new pgdir.
112 */
113void __init kasan_copy_shadow(pgd_t *pgdir)
114{
115 pgd_t *pgd, *pgd_new, *pgd_end;
116
117 pgd = pgd_offset_k(KASAN_SHADOW_START);
118 pgd_end = pgd_offset_k(KASAN_SHADOW_END);
119 pgd_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
120 do {
121 set_pgd(pgd_new, *pgd);
122 } while (pgd++, pgd_new++, pgd != pgd_end);
123}
124
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300125static void __init clear_pgds(unsigned long start,
126 unsigned long end)
127{
128 /*
129 * Remove references to kasan page tables from
130 * swapper_pg_dir. pgd_clear() can't be used
131 * here because it's nop on 2,3-level pagetable setups
132 */
133 for (; start < end; start += PGDIR_SIZE)
134 set_pgd(pgd_offset_k(start), __pgd(0));
135}
136
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300137void __init kasan_init(void)
138{
Ard Biesheuvelf9040772016-02-16 13:52:40 +0100139 u64 kimg_shadow_start, kimg_shadow_end;
Ard Biesheuvelf80fb3a2016-01-26 14:12:01 +0100140 u64 mod_shadow_start, mod_shadow_end;
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300141 struct memblock_region *reg;
Ard Biesheuvel7b1af972016-01-11 14:50:21 +0100142 int i;
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300143
Ard Biesheuvelf9040772016-02-16 13:52:40 +0100144 kimg_shadow_start = (u64)kasan_mem_to_shadow(_text);
145 kimg_shadow_end = (u64)kasan_mem_to_shadow(_end);
146
Ard Biesheuvelf80fb3a2016-01-26 14:12:01 +0100147 mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
148 mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
149
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300150 /*
151 * We are going to perform proper setup of shadow memory.
152 * At first we should unmap early shadow (clear_pgds() call bellow).
153 * However, instrumented code couldn't execute without shadow memory.
154 * tmp_pg_dir used to keep early shadow mapped until full shadow
155 * setup will be finished.
156 */
157 memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
Mark Rutlandc1a88e92016-01-25 11:45:02 +0000158 dsb(ishst);
Laura Abbottf8fee94e2017-01-10 13:35:49 -0800159 cpu_replace_ttbr1(lm_alias(tmp_pg_dir));
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300160
161 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
162
Catalin Marinas2f769692016-03-10 18:30:56 +0000163 vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
Mark Rutlandb46a4c22017-03-06 19:06:40 +0000164 pfn_to_nid(virt_to_pfn(lm_alias(_text))));
Ard Biesheuvelf9040772016-02-16 13:52:40 +0100165
166 /*
167 * vmemmap_populate() has populated the shadow region that covers the
168 * kernel image with SWAPPER_BLOCK_SIZE mappings, so we have to round
169 * the start and end addresses to SWAPPER_BLOCK_SIZE as well, to prevent
Catalin Marinas2776e0e2016-03-10 18:41:16 +0000170 * kasan_populate_zero_shadow() from replacing the page table entries
171 * (PMD or PTE) at the edges of the shadow region for the kernel
172 * image.
Ard Biesheuvelf9040772016-02-16 13:52:40 +0100173 */
Catalin Marinas2776e0e2016-03-10 18:41:16 +0000174 kimg_shadow_start = round_down(kimg_shadow_start, SWAPPER_BLOCK_SIZE);
175 kimg_shadow_end = round_up(kimg_shadow_end, SWAPPER_BLOCK_SIZE);
Ard Biesheuvelf9040772016-02-16 13:52:40 +0100176
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300177 kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
Ard Biesheuvelf80fb3a2016-01-26 14:12:01 +0100178 (void *)mod_shadow_start);
Ard Biesheuvelf9040772016-02-16 13:52:40 +0100179 kasan_populate_zero_shadow((void *)kimg_shadow_end,
Ard Biesheuvelf80fb3a2016-01-26 14:12:01 +0100180 kasan_mem_to_shadow((void *)PAGE_OFFSET));
181
182 if (kimg_shadow_start > mod_shadow_end)
183 kasan_populate_zero_shadow((void *)mod_shadow_end,
184 (void *)kimg_shadow_start);
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300185
186 for_each_memblock(memory, reg) {
187 void *start = (void *)__phys_to_virt(reg->base);
188 void *end = (void *)__phys_to_virt(reg->base + reg->size);
189
190 if (start >= end)
191 break;
192
193 /*
194 * end + 1 here is intentional. We check several shadow bytes in
195 * advance to slightly speed up fastpath. In some rare cases
196 * we could cross boundary of mapped shadow, so we just map
197 * some more here.
198 */
199 vmemmap_populate((unsigned long)kasan_mem_to_shadow(start),
200 (unsigned long)kasan_mem_to_shadow(end) + 1,
201 pfn_to_nid(virt_to_pfn(start)));
202 }
203
Ard Biesheuvel7b1af972016-01-11 14:50:21 +0100204 /*
205 * KAsan may reuse the contents of kasan_zero_pte directly, so we
206 * should make sure that it maps the zero page read-only.
207 */
208 for (i = 0; i < PTRS_PER_PTE; i++)
209 set_pte(&kasan_zero_pte[i],
Laura Abbottf8fee94e2017-01-10 13:35:49 -0800210 pfn_pte(sym_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
Ard Biesheuvel7b1af972016-01-11 14:50:21 +0100211
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300212 memset(kasan_zero_page, 0, PAGE_SIZE);
Laura Abbottf8fee94e2017-01-10 13:35:49 -0800213 cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300214
215 /* At this point kasan is fully initialized. Enable error messages */
216 init_task.kasan_depth = 0;
217 pr_info("KernelAddressSanitizer initialized\n");
218}