blob: 31238dad85fbc6c630963323c236811c0e4f44f5 [file] [log] [blame]
Andrey Ryabinin69786cdb2015-08-13 08:37:24 +03001/*
2 * This file contains some kasan initialization code.
3 *
4 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12
13#include <linux/bootmem.h>
14#include <linux/init.h>
15#include <linux/kasan.h>
16#include <linux/kernel.h>
17#include <linux/memblock.h>
Laura Abbottc6fc6222017-01-10 13:35:44 -080018#include <linux/mm.h>
Andrey Ryabinin69786cdb2015-08-13 08:37:24 +030019#include <linux/pfn.h>
20
21#include <asm/page.h>
22#include <asm/pgalloc.h>
23
24/*
25 * This page serves two purposes:
26 * - It used as early shadow memory. The entire shadow region populated
27 * with this page, before we will be able to setup normal shadow memory.
28 * - Latter it reused it as zero shadow to cover large ranges of memory
29 * that allowed to access, but not handled by kasan (vmalloc/vmemmap ...).
30 */
31unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
32
33#if CONFIG_PGTABLE_LEVELS > 3
34pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
35#endif
36#if CONFIG_PGTABLE_LEVELS > 2
37pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
38#endif
39pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
40
41static __init void *early_alloc(size_t size, int node)
42{
43 return memblock_virt_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
44 BOOTMEM_ALLOC_ACCESSIBLE, node);
45}
46
47static void __init zero_pte_populate(pmd_t *pmd, unsigned long addr,
48 unsigned long end)
49{
50 pte_t *pte = pte_offset_kernel(pmd, addr);
51 pte_t zero_pte;
52
Laura Abbottc6fc6222017-01-10 13:35:44 -080053 zero_pte = pfn_pte(PFN_DOWN(__pa_symbol(kasan_zero_page)), PAGE_KERNEL);
Andrey Ryabinin69786cdb2015-08-13 08:37:24 +030054 zero_pte = pte_wrprotect(zero_pte);
55
56 while (addr + PAGE_SIZE <= end) {
57 set_pte_at(&init_mm, addr, pte, zero_pte);
58 addr += PAGE_SIZE;
59 pte = pte_offset_kernel(pmd, addr);
60 }
61}
62
63static void __init zero_pmd_populate(pud_t *pud, unsigned long addr,
64 unsigned long end)
65{
66 pmd_t *pmd = pmd_offset(pud, addr);
67 unsigned long next;
68
69 do {
70 next = pmd_addr_end(addr, end);
71
72 if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
Laura Abbottc6fc6222017-01-10 13:35:44 -080073 pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
Andrey Ryabinin69786cdb2015-08-13 08:37:24 +030074 continue;
75 }
76
77 if (pmd_none(*pmd)) {
78 pmd_populate_kernel(&init_mm, pmd,
79 early_alloc(PAGE_SIZE, NUMA_NO_NODE));
80 }
81 zero_pte_populate(pmd, addr, next);
82 } while (pmd++, addr = next, addr != end);
83}
84
85static void __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
86 unsigned long end)
87{
88 pud_t *pud = pud_offset(pgd, addr);
89 unsigned long next;
90
91 do {
92 next = pud_addr_end(addr, end);
93 if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
94 pmd_t *pmd;
95
Laura Abbottc6fc6222017-01-10 13:35:44 -080096 pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd));
Andrey Ryabinin69786cdb2015-08-13 08:37:24 +030097 pmd = pmd_offset(pud, addr);
Laura Abbottc6fc6222017-01-10 13:35:44 -080098 pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
Andrey Ryabinin69786cdb2015-08-13 08:37:24 +030099 continue;
100 }
101
102 if (pud_none(*pud)) {
103 pud_populate(&init_mm, pud,
104 early_alloc(PAGE_SIZE, NUMA_NO_NODE));
105 }
106 zero_pmd_populate(pud, addr, next);
107 } while (pud++, addr = next, addr != end);
108}
109
110/**
111 * kasan_populate_zero_shadow - populate shadow memory region with
112 * kasan_zero_page
113 * @shadow_start - start of the memory range to populate
114 * @shadow_end - end of the memory range to populate
115 */
116void __init kasan_populate_zero_shadow(const void *shadow_start,
117 const void *shadow_end)
118{
119 unsigned long addr = (unsigned long)shadow_start;
120 unsigned long end = (unsigned long)shadow_end;
121 pgd_t *pgd = pgd_offset_k(addr);
122 unsigned long next;
123
124 do {
125 next = pgd_addr_end(addr, end);
126
127 if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
128 pud_t *pud;
129 pmd_t *pmd;
130
131 /*
132 * kasan_zero_pud should be populated with pmds
133 * at this moment.
134 * [pud,pmd]_populate*() below needed only for
135 * 3,2 - level page tables where we don't have
136 * puds,pmds, so pgd_populate(), pud_populate()
137 * is noops.
138 */
Laura Abbottc6fc6222017-01-10 13:35:44 -0800139 pgd_populate(&init_mm, pgd, lm_alias(kasan_zero_pud));
Andrey Ryabinin69786cdb2015-08-13 08:37:24 +0300140 pud = pud_offset(pgd, addr);
Laura Abbottc6fc6222017-01-10 13:35:44 -0800141 pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd));
Andrey Ryabinin69786cdb2015-08-13 08:37:24 +0300142 pmd = pmd_offset(pud, addr);
Laura Abbottc6fc6222017-01-10 13:35:44 -0800143 pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
Andrey Ryabinin69786cdb2015-08-13 08:37:24 +0300144 continue;
145 }
146
147 if (pgd_none(*pgd)) {
148 pgd_populate(&init_mm, pgd,
149 early_alloc(PAGE_SIZE, NUMA_NO_NODE));
150 }
151 zero_pud_populate(pgd, addr, next);
152 } while (pgd++, addr = next, addr != end);
153}