Andrey Ryabinin | 69786cdb | 2015-08-13 08:37:24 +0300 | [diff] [blame] | 1 | /* |
| 2 | * This file contains some kasan initialization code. |
| 3 | * |
| 4 | * Copyright (c) 2015 Samsung Electronics Co., Ltd. |
| 5 | * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | * |
| 11 | */ |
| 12 | |
| 13 | #include <linux/bootmem.h> |
| 14 | #include <linux/init.h> |
| 15 | #include <linux/kasan.h> |
| 16 | #include <linux/kernel.h> |
| 17 | #include <linux/memblock.h> |
| 18 | #include <linux/pfn.h> |
| 19 | |
| 20 | #include <asm/page.h> |
| 21 | #include <asm/pgalloc.h> |
| 22 | |
| 23 | /* |
| 24 | * This page serves two purposes: |
| 25 | * - It used as early shadow memory. The entire shadow region populated |
| 26 | * with this page, before we will be able to setup normal shadow memory. |
| 27 | * - Latter it reused it as zero shadow to cover large ranges of memory |
| 28 | * that allowed to access, but not handled by kasan (vmalloc/vmemmap ...). |
| 29 | */ |
| 30 | unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss; |
| 31 | |
| 32 | #if CONFIG_PGTABLE_LEVELS > 3 |
| 33 | pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss; |
| 34 | #endif |
| 35 | #if CONFIG_PGTABLE_LEVELS > 2 |
| 36 | pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss; |
| 37 | #endif |
| 38 | pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss; |
| 39 | |
| 40 | static __init void *early_alloc(size_t size, int node) |
| 41 | { |
| 42 | return memblock_virt_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), |
| 43 | BOOTMEM_ALLOC_ACCESSIBLE, node); |
| 44 | } |
| 45 | |
| 46 | static void __init zero_pte_populate(pmd_t *pmd, unsigned long addr, |
| 47 | unsigned long end) |
| 48 | { |
| 49 | pte_t *pte = pte_offset_kernel(pmd, addr); |
| 50 | pte_t zero_pte; |
| 51 | |
| 52 | zero_pte = pfn_pte(PFN_DOWN(__pa(kasan_zero_page)), PAGE_KERNEL); |
| 53 | zero_pte = pte_wrprotect(zero_pte); |
| 54 | |
| 55 | while (addr + PAGE_SIZE <= end) { |
| 56 | set_pte_at(&init_mm, addr, pte, zero_pte); |
| 57 | addr += PAGE_SIZE; |
| 58 | pte = pte_offset_kernel(pmd, addr); |
| 59 | } |
| 60 | } |
| 61 | |
| 62 | static void __init zero_pmd_populate(pud_t *pud, unsigned long addr, |
| 63 | unsigned long end) |
| 64 | { |
| 65 | pmd_t *pmd = pmd_offset(pud, addr); |
| 66 | unsigned long next; |
| 67 | |
| 68 | do { |
| 69 | next = pmd_addr_end(addr, end); |
| 70 | |
| 71 | if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) { |
| 72 | pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte); |
| 73 | continue; |
| 74 | } |
| 75 | |
| 76 | if (pmd_none(*pmd)) { |
| 77 | pmd_populate_kernel(&init_mm, pmd, |
| 78 | early_alloc(PAGE_SIZE, NUMA_NO_NODE)); |
| 79 | } |
| 80 | zero_pte_populate(pmd, addr, next); |
| 81 | } while (pmd++, addr = next, addr != end); |
| 82 | } |
| 83 | |
| 84 | static void __init zero_pud_populate(pgd_t *pgd, unsigned long addr, |
| 85 | unsigned long end) |
| 86 | { |
| 87 | pud_t *pud = pud_offset(pgd, addr); |
| 88 | unsigned long next; |
| 89 | |
| 90 | do { |
| 91 | next = pud_addr_end(addr, end); |
| 92 | if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) { |
| 93 | pmd_t *pmd; |
| 94 | |
| 95 | pud_populate(&init_mm, pud, kasan_zero_pmd); |
| 96 | pmd = pmd_offset(pud, addr); |
| 97 | pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte); |
| 98 | continue; |
| 99 | } |
| 100 | |
| 101 | if (pud_none(*pud)) { |
| 102 | pud_populate(&init_mm, pud, |
| 103 | early_alloc(PAGE_SIZE, NUMA_NO_NODE)); |
| 104 | } |
| 105 | zero_pmd_populate(pud, addr, next); |
| 106 | } while (pud++, addr = next, addr != end); |
| 107 | } |
| 108 | |
| 109 | /** |
| 110 | * kasan_populate_zero_shadow - populate shadow memory region with |
| 111 | * kasan_zero_page |
| 112 | * @shadow_start - start of the memory range to populate |
| 113 | * @shadow_end - end of the memory range to populate |
| 114 | */ |
| 115 | void __init kasan_populate_zero_shadow(const void *shadow_start, |
| 116 | const void *shadow_end) |
| 117 | { |
| 118 | unsigned long addr = (unsigned long)shadow_start; |
| 119 | unsigned long end = (unsigned long)shadow_end; |
| 120 | pgd_t *pgd = pgd_offset_k(addr); |
| 121 | unsigned long next; |
| 122 | |
| 123 | do { |
| 124 | next = pgd_addr_end(addr, end); |
| 125 | |
| 126 | if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) { |
| 127 | pud_t *pud; |
| 128 | pmd_t *pmd; |
| 129 | |
| 130 | /* |
| 131 | * kasan_zero_pud should be populated with pmds |
| 132 | * at this moment. |
| 133 | * [pud,pmd]_populate*() below needed only for |
| 134 | * 3,2 - level page tables where we don't have |
| 135 | * puds,pmds, so pgd_populate(), pud_populate() |
| 136 | * is noops. |
| 137 | */ |
| 138 | pgd_populate(&init_mm, pgd, kasan_zero_pud); |
| 139 | pud = pud_offset(pgd, addr); |
| 140 | pud_populate(&init_mm, pud, kasan_zero_pmd); |
| 141 | pmd = pmd_offset(pud, addr); |
| 142 | pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte); |
| 143 | continue; |
| 144 | } |
| 145 | |
| 146 | if (pgd_none(*pgd)) { |
| 147 | pgd_populate(&init_mm, pgd, |
| 148 | early_alloc(PAGE_SIZE, NUMA_NO_NODE)); |
| 149 | } |
| 150 | zero_pud_populate(pgd, addr, next); |
| 151 | } while (pgd++, addr = next, addr != end); |
| 152 | } |