Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Andrey Ryabinin | be3606f | 2017-03-13 19:33:37 +0300 | [diff] [blame] | 2 | #define DISABLE_BRANCH_PROFILING |
Andrey Ryabinin | 8515522 | 2015-07-02 12:09:37 +0300 | [diff] [blame] | 3 | #define pr_fmt(fmt) "kasan: " fmt |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 4 | #include <linux/bootmem.h> |
| 5 | #include <linux/kasan.h> |
| 6 | #include <linux/kdebug.h> |
Andrey Ryabinin | d17a1d9 | 2017-11-15 17:36:35 -0800 | [diff] [blame] | 7 | #include <linux/memblock.h> |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 8 | #include <linux/mm.h> |
| 9 | #include <linux/sched.h> |
Ingo Molnar | 9164bb4 | 2017-02-04 01:20:53 +0100 | [diff] [blame] | 10 | #include <linux/sched/task.h> |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 11 | #include <linux/vmalloc.h> |
| 12 | |
Ingo Molnar | 5520b7e | 2017-01-27 11:59:46 +0100 | [diff] [blame] | 13 | #include <asm/e820/types.h> |
Andrey Ryabinin | d17a1d9 | 2017-11-15 17:36:35 -0800 | [diff] [blame] | 14 | #include <asm/pgalloc.h> |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 15 | #include <asm/tlbflush.h> |
| 16 | #include <asm/sections.h> |
Tom Lendacky | b9d0520 | 2017-07-17 16:10:11 -0500 | [diff] [blame] | 17 | #include <asm/pgtable.h> |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 18 | |
Ingo Molnar | 08b46d5 | 2017-01-28 17:29:08 +0100 | [diff] [blame] | 19 | extern struct range pfn_mapped[E820_MAX_ENTRIES]; |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 20 | |
Andrey Ryabinin | 12a8cc7 | 2017-09-29 17:08:18 +0300 | [diff] [blame] | 21 | static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); |
| 22 | |
Andrey Ryabinin | d17a1d9 | 2017-11-15 17:36:35 -0800 | [diff] [blame] | 23 | static __init void *early_alloc(size_t size, int nid) |
| 24 | { |
| 25 | return memblock_virt_alloc_try_nid_nopanic(size, size, |
| 26 | __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); |
| 27 | } |
| 28 | |
| 29 | static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr, |
| 30 | unsigned long end, int nid) |
| 31 | { |
| 32 | pte_t *pte; |
| 33 | |
| 34 | if (pmd_none(*pmd)) { |
| 35 | void *p; |
| 36 | |
| 37 | if (boot_cpu_has(X86_FEATURE_PSE) && |
| 38 | ((end - addr) == PMD_SIZE) && |
| 39 | IS_ALIGNED(addr, PMD_SIZE)) { |
| 40 | p = early_alloc(PMD_SIZE, nid); |
| 41 | if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL)) |
| 42 | return; |
| 43 | else if (p) |
| 44 | memblock_free(__pa(p), PMD_SIZE); |
| 45 | } |
| 46 | |
| 47 | p = early_alloc(PAGE_SIZE, nid); |
| 48 | pmd_populate_kernel(&init_mm, pmd, p); |
| 49 | } |
| 50 | |
| 51 | pte = pte_offset_kernel(pmd, addr); |
| 52 | do { |
| 53 | pte_t entry; |
| 54 | void *p; |
| 55 | |
| 56 | if (!pte_none(*pte)) |
| 57 | continue; |
| 58 | |
| 59 | p = early_alloc(PAGE_SIZE, nid); |
| 60 | entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL); |
| 61 | set_pte_at(&init_mm, addr, pte, entry); |
| 62 | } while (pte++, addr += PAGE_SIZE, addr != end); |
| 63 | } |
| 64 | |
| 65 | static void __init kasan_populate_pud(pud_t *pud, unsigned long addr, |
| 66 | unsigned long end, int nid) |
| 67 | { |
| 68 | pmd_t *pmd; |
| 69 | unsigned long next; |
| 70 | |
| 71 | if (pud_none(*pud)) { |
| 72 | void *p; |
| 73 | |
| 74 | if (boot_cpu_has(X86_FEATURE_GBPAGES) && |
| 75 | ((end - addr) == PUD_SIZE) && |
| 76 | IS_ALIGNED(addr, PUD_SIZE)) { |
| 77 | p = early_alloc(PUD_SIZE, nid); |
| 78 | if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL)) |
| 79 | return; |
| 80 | else if (p) |
| 81 | memblock_free(__pa(p), PUD_SIZE); |
| 82 | } |
| 83 | |
| 84 | p = early_alloc(PAGE_SIZE, nid); |
| 85 | pud_populate(&init_mm, pud, p); |
| 86 | } |
| 87 | |
| 88 | pmd = pmd_offset(pud, addr); |
| 89 | do { |
| 90 | next = pmd_addr_end(addr, end); |
| 91 | if (!pmd_large(*pmd)) |
| 92 | kasan_populate_pmd(pmd, addr, next, nid); |
| 93 | } while (pmd++, addr = next, addr != end); |
| 94 | } |
| 95 | |
| 96 | static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr, |
| 97 | unsigned long end, int nid) |
| 98 | { |
| 99 | pud_t *pud; |
| 100 | unsigned long next; |
| 101 | |
| 102 | if (p4d_none(*p4d)) { |
| 103 | void *p = early_alloc(PAGE_SIZE, nid); |
| 104 | |
| 105 | p4d_populate(&init_mm, p4d, p); |
| 106 | } |
| 107 | |
| 108 | pud = pud_offset(p4d, addr); |
| 109 | do { |
| 110 | next = pud_addr_end(addr, end); |
| 111 | if (!pud_large(*pud)) |
| 112 | kasan_populate_pud(pud, addr, next, nid); |
| 113 | } while (pud++, addr = next, addr != end); |
| 114 | } |
| 115 | |
| 116 | static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr, |
| 117 | unsigned long end, int nid) |
| 118 | { |
| 119 | void *p; |
| 120 | p4d_t *p4d; |
| 121 | unsigned long next; |
| 122 | |
| 123 | if (pgd_none(*pgd)) { |
| 124 | p = early_alloc(PAGE_SIZE, nid); |
| 125 | pgd_populate(&init_mm, pgd, p); |
| 126 | } |
| 127 | |
| 128 | p4d = p4d_offset(pgd, addr); |
| 129 | do { |
| 130 | next = p4d_addr_end(addr, end); |
| 131 | kasan_populate_p4d(p4d, addr, next, nid); |
| 132 | } while (p4d++, addr = next, addr != end); |
| 133 | } |
| 134 | |
| 135 | static void __init kasan_populate_shadow(unsigned long addr, unsigned long end, |
| 136 | int nid) |
| 137 | { |
| 138 | pgd_t *pgd; |
| 139 | unsigned long next; |
| 140 | |
| 141 | addr = addr & PAGE_MASK; |
| 142 | end = round_up(end, PAGE_SIZE); |
| 143 | pgd = pgd_offset_k(addr); |
| 144 | do { |
| 145 | next = pgd_addr_end(addr, end); |
| 146 | kasan_populate_pgd(pgd, addr, next, nid); |
| 147 | } while (pgd++, addr = next, addr != end); |
| 148 | } |
| 149 | |
| 150 | static void __init map_range(struct range *range) |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 151 | { |
| 152 | unsigned long start; |
| 153 | unsigned long end; |
| 154 | |
| 155 | start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start)); |
| 156 | end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end)); |
| 157 | |
Andrey Ryabinin | d17a1d9 | 2017-11-15 17:36:35 -0800 | [diff] [blame] | 158 | kasan_populate_shadow(start, end, early_pfn_to_nid(range->start)); |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 159 | } |
| 160 | |
| 161 | static void __init clear_pgds(unsigned long start, |
| 162 | unsigned long end) |
| 163 | { |
Kirill A. Shutemov | d691a3c | 2017-03-17 21:55:13 +0300 | [diff] [blame] | 164 | pgd_t *pgd; |
Andrey Ryabinin | 12a8cc7 | 2017-09-29 17:08:18 +0300 | [diff] [blame] | 165 | /* See comment in kasan_init() */ |
| 166 | unsigned long pgd_end = end & PGDIR_MASK; |
Kirill A. Shutemov | d691a3c | 2017-03-17 21:55:13 +0300 | [diff] [blame] | 167 | |
Andrey Ryabinin | 12a8cc7 | 2017-09-29 17:08:18 +0300 | [diff] [blame] | 168 | for (; start < pgd_end; start += PGDIR_SIZE) { |
Kirill A. Shutemov | d691a3c | 2017-03-17 21:55:13 +0300 | [diff] [blame] | 169 | pgd = pgd_offset_k(start); |
| 170 | /* |
| 171 | * With folded p4d, pgd_clear() is nop, use p4d_clear() |
| 172 | * instead. |
| 173 | */ |
| 174 | if (CONFIG_PGTABLE_LEVELS < 5) |
| 175 | p4d_clear(p4d_offset(pgd, start)); |
| 176 | else |
| 177 | pgd_clear(pgd); |
| 178 | } |
Andrey Ryabinin | 12a8cc7 | 2017-09-29 17:08:18 +0300 | [diff] [blame] | 179 | |
| 180 | pgd = pgd_offset_k(start); |
| 181 | for (; start < end; start += P4D_SIZE) |
| 182 | p4d_clear(p4d_offset(pgd, start)); |
| 183 | } |
| 184 | |
| 185 | static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr) |
| 186 | { |
| 187 | unsigned long p4d; |
| 188 | |
| 189 | if (!IS_ENABLED(CONFIG_X86_5LEVEL)) |
| 190 | return (p4d_t *)pgd; |
| 191 | |
| 192 | p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK; |
| 193 | p4d += __START_KERNEL_map - phys_base; |
| 194 | return (p4d_t *)p4d + p4d_index(addr); |
| 195 | } |
| 196 | |
| 197 | static void __init kasan_early_p4d_populate(pgd_t *pgd, |
| 198 | unsigned long addr, |
| 199 | unsigned long end) |
| 200 | { |
| 201 | pgd_t pgd_entry; |
| 202 | p4d_t *p4d, p4d_entry; |
| 203 | unsigned long next; |
| 204 | |
| 205 | if (pgd_none(*pgd)) { |
| 206 | pgd_entry = __pgd(_KERNPG_TABLE | __pa_nodebug(kasan_zero_p4d)); |
| 207 | set_pgd(pgd, pgd_entry); |
| 208 | } |
| 209 | |
| 210 | p4d = early_p4d_offset(pgd, addr); |
| 211 | do { |
| 212 | next = p4d_addr_end(addr, end); |
| 213 | |
| 214 | if (!p4d_none(*p4d)) |
| 215 | continue; |
| 216 | |
| 217 | p4d_entry = __p4d(_KERNPG_TABLE | __pa_nodebug(kasan_zero_pud)); |
| 218 | set_p4d(p4d, p4d_entry); |
| 219 | } while (p4d++, addr = next, addr != end && p4d_none(*p4d)); |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 220 | } |
| 221 | |
Alexander Popov | 5d5aa3c | 2015-07-02 12:09:34 +0300 | [diff] [blame] | 222 | static void __init kasan_map_early_shadow(pgd_t *pgd) |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 223 | { |
Andrey Ryabinin | 12a8cc7 | 2017-09-29 17:08:18 +0300 | [diff] [blame] | 224 | /* See comment in kasan_init() */ |
| 225 | unsigned long addr = KASAN_SHADOW_START & PGDIR_MASK; |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 226 | unsigned long end = KASAN_SHADOW_END; |
Andrey Ryabinin | 12a8cc7 | 2017-09-29 17:08:18 +0300 | [diff] [blame] | 227 | unsigned long next; |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 228 | |
Andrey Ryabinin | 12a8cc7 | 2017-09-29 17:08:18 +0300 | [diff] [blame] | 229 | pgd += pgd_index(addr); |
| 230 | do { |
| 231 | next = pgd_addr_end(addr, end); |
| 232 | kasan_early_p4d_populate(pgd, addr, next); |
| 233 | } while (pgd++, addr = next, addr != end); |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 234 | } |
| 235 | |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 236 | #ifdef CONFIG_KASAN_INLINE |
| 237 | static int kasan_die_handler(struct notifier_block *self, |
| 238 | unsigned long val, |
| 239 | void *data) |
| 240 | { |
| 241 | if (val == DIE_GPF) { |
Dmitry Vyukov | 2ba7805 | 2016-07-14 12:06:53 -0700 | [diff] [blame] | 242 | pr_emerg("CONFIG_KASAN_INLINE enabled\n"); |
| 243 | pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n"); |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 244 | } |
| 245 | return NOTIFY_OK; |
| 246 | } |
| 247 | |
| 248 | static struct notifier_block kasan_die_notifier = { |
| 249 | .notifier_call = kasan_die_handler, |
| 250 | }; |
| 251 | #endif |
| 252 | |
Alexander Popov | 5d5aa3c | 2015-07-02 12:09:34 +0300 | [diff] [blame] | 253 | void __init kasan_early_init(void) |
| 254 | { |
| 255 | int i; |
Tom Lendacky | 21729f8 | 2017-07-17 16:10:07 -0500 | [diff] [blame] | 256 | pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL | _PAGE_ENC; |
Alexander Popov | 5d5aa3c | 2015-07-02 12:09:34 +0300 | [diff] [blame] | 257 | pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE; |
| 258 | pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE; |
Kirill A. Shutemov | 5480bb6 | 2017-03-30 11:07:30 +0300 | [diff] [blame] | 259 | p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE; |
Alexander Popov | 5d5aa3c | 2015-07-02 12:09:34 +0300 | [diff] [blame] | 260 | |
| 261 | for (i = 0; i < PTRS_PER_PTE; i++) |
| 262 | kasan_zero_pte[i] = __pte(pte_val); |
| 263 | |
| 264 | for (i = 0; i < PTRS_PER_PMD; i++) |
| 265 | kasan_zero_pmd[i] = __pmd(pmd_val); |
| 266 | |
| 267 | for (i = 0; i < PTRS_PER_PUD; i++) |
| 268 | kasan_zero_pud[i] = __pud(pud_val); |
| 269 | |
Andrey Ryabinin | 12a8cc7 | 2017-09-29 17:08:18 +0300 | [diff] [blame] | 270 | for (i = 0; IS_ENABLED(CONFIG_X86_5LEVEL) && i < PTRS_PER_P4D; i++) |
Kirill A. Shutemov | 5480bb6 | 2017-03-30 11:07:30 +0300 | [diff] [blame] | 271 | kasan_zero_p4d[i] = __p4d(p4d_val); |
| 272 | |
Kirill A. Shutemov | 65ade2f | 2017-06-06 14:31:27 +0300 | [diff] [blame] | 273 | kasan_map_early_shadow(early_top_pgt); |
| 274 | kasan_map_early_shadow(init_top_pgt); |
Alexander Popov | 5d5aa3c | 2015-07-02 12:09:34 +0300 | [diff] [blame] | 275 | } |
| 276 | |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 277 | void __init kasan_init(void) |
| 278 | { |
| 279 | int i; |
| 280 | |
| 281 | #ifdef CONFIG_KASAN_INLINE |
| 282 | register_die_notifier(&kasan_die_notifier); |
| 283 | #endif |
| 284 | |
Kirill A. Shutemov | 65ade2f | 2017-06-06 14:31:27 +0300 | [diff] [blame] | 285 | memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt)); |
Andrey Ryabinin | 12a8cc7 | 2017-09-29 17:08:18 +0300 | [diff] [blame] | 286 | |
| 287 | /* |
| 288 | * We use the same shadow offset for 4- and 5-level paging to |
| 289 | * facilitate boot-time switching between paging modes. |
| 290 | * As result in 5-level paging mode KASAN_SHADOW_START and |
| 291 | * KASAN_SHADOW_END are not aligned to PGD boundary. |
| 292 | * |
| 293 | * KASAN_SHADOW_START doesn't share PGD with anything else. |
| 294 | * We claim whole PGD entry to make things easier. |
| 295 | * |
| 296 | * KASAN_SHADOW_END lands in the last PGD entry and it collides with |
| 297 | * bunch of things like kernel code, modules, EFI mapping, etc. |
| 298 | * We need to take extra steps to not overwrite them. |
| 299 | */ |
| 300 | if (IS_ENABLED(CONFIG_X86_5LEVEL)) { |
| 301 | void *ptr; |
| 302 | |
| 303 | ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END)); |
| 304 | memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table)); |
| 305 | set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)], |
| 306 | __pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE)); |
| 307 | } |
| 308 | |
Kirill A. Shutemov | 65ade2f | 2017-06-06 14:31:27 +0300 | [diff] [blame] | 309 | load_cr3(early_top_pgt); |
Andrey Ryabinin | 241d2c5 | 2015-07-02 12:09:35 +0300 | [diff] [blame] | 310 | __flush_tlb_all(); |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 311 | |
Andrey Ryabinin | 12a8cc7 | 2017-09-29 17:08:18 +0300 | [diff] [blame] | 312 | clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END); |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 313 | |
Andrey Ryabinin | 12a8cc7 | 2017-09-29 17:08:18 +0300 | [diff] [blame] | 314 | kasan_populate_zero_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK), |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 315 | kasan_mem_to_shadow((void *)PAGE_OFFSET)); |
| 316 | |
Ingo Molnar | 08b46d5 | 2017-01-28 17:29:08 +0100 | [diff] [blame] | 317 | for (i = 0; i < E820_MAX_ENTRIES; i++) { |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 318 | if (pfn_mapped[i].end == 0) |
| 319 | break; |
| 320 | |
Andrey Ryabinin | d17a1d9 | 2017-11-15 17:36:35 -0800 | [diff] [blame] | 321 | map_range(&pfn_mapped[i]); |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 322 | } |
Andrey Ryabinin | d17a1d9 | 2017-11-15 17:36:35 -0800 | [diff] [blame] | 323 | |
Andrey Ryabinin | 69786cdb | 2015-08-13 08:37:24 +0300 | [diff] [blame] | 324 | kasan_populate_zero_shadow( |
| 325 | kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), |
| 326 | kasan_mem_to_shadow((void *)__START_KERNEL_map)); |
Andrey Ryabinin | c420f16 | 2015-02-13 14:39:59 -0800 | [diff] [blame] | 327 | |
Andrey Ryabinin | d17a1d9 | 2017-11-15 17:36:35 -0800 | [diff] [blame] | 328 | kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext), |
| 329 | (unsigned long)kasan_mem_to_shadow(_end), |
| 330 | early_pfn_to_nid(__pa(_stext))); |
Andrey Ryabinin | c420f16 | 2015-02-13 14:39:59 -0800 | [diff] [blame] | 331 | |
Andrey Ryabinin | 69786cdb | 2015-08-13 08:37:24 +0300 | [diff] [blame] | 332 | kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), |
Andrey Ryabinin | c420f16 | 2015-02-13 14:39:59 -0800 | [diff] [blame] | 333 | (void *)KASAN_SHADOW_END); |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 334 | |
Kirill A. Shutemov | 65ade2f | 2017-06-06 14:31:27 +0300 | [diff] [blame] | 335 | load_cr3(init_top_pgt); |
Andrey Ryabinin | 241d2c5 | 2015-07-02 12:09:35 +0300 | [diff] [blame] | 336 | __flush_tlb_all(); |
Andrey Ryabinin | 8515522 | 2015-07-02 12:09:37 +0300 | [diff] [blame] | 337 | |
Andrey Ryabinin | 69e0210 | 2016-01-11 15:51:18 +0300 | [diff] [blame] | 338 | /* |
| 339 | * kasan_zero_page has been used as early shadow memory, thus it may |
Andrey Ryabinin | 063fb3e | 2016-01-11 15:51:19 +0300 | [diff] [blame] | 340 | * contain some garbage. Now we can clear and write protect it, since |
| 341 | * after the TLB flush no one should write to it. |
Andrey Ryabinin | 69e0210 | 2016-01-11 15:51:18 +0300 | [diff] [blame] | 342 | */ |
| 343 | memset(kasan_zero_page, 0, PAGE_SIZE); |
Andrey Ryabinin | 063fb3e | 2016-01-11 15:51:19 +0300 | [diff] [blame] | 344 | for (i = 0; i < PTRS_PER_PTE; i++) { |
Tom Lendacky | 21729f8 | 2017-07-17 16:10:07 -0500 | [diff] [blame] | 345 | pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO | _PAGE_ENC); |
Andrey Ryabinin | 063fb3e | 2016-01-11 15:51:19 +0300 | [diff] [blame] | 346 | set_pte(&kasan_zero_pte[i], pte); |
| 347 | } |
| 348 | /* Flush TLBs again to be sure that write protection applied. */ |
| 349 | __flush_tlb_all(); |
Andrey Ryabinin | 69e0210 | 2016-01-11 15:51:18 +0300 | [diff] [blame] | 350 | |
| 351 | init_task.kasan_depth = 0; |
Andrey Konovalov | 25add7e | 2015-11-05 18:51:03 -0800 | [diff] [blame] | 352 | pr_info("KernelAddressSanitizer initialized\n"); |
Andrey Ryabinin | ef7f0d6 | 2015-02-13 14:39:25 -0800 | [diff] [blame] | 353 | } |