Thomas Garnier | 0483e1f | 2016-06-21 17:47:02 -0700 | [diff] [blame] | 1 | /* |
| 2 | * This file implements KASLR memory randomization for x86_64. It randomizes |
| 3 | * the virtual address space of kernel memory regions (physical memory |
| 4 | * mapping, vmalloc & vmemmap) for x86_64. This security feature mitigates |
| 5 | * exploits relying on predictable kernel addresses. |
| 6 | * |
| 7 | * Entropy is generated using the KASLR early boot functions now shared in |
| 8 | * the lib directory (originally written by Kees Cook). Randomization is |
Kirill A. Shutemov | 8624c1f | 2017-06-06 14:31:31 +0300 | [diff] [blame^] | 9 | * done on PGD & P4D/PUD page table levels to increase possible addresses. |
| 10 | * The physical memory mapping code was adapted to support P4D/PUD level |
| 11 | * virtual addresses. This implementation on the best configuration provides |
| 12 | * 30,000 possible virtual addresses in average for each memory region. |
| 13 | * An additional low memory page is used to ensure each CPU can start with |
| 14 | * a PGD aligned virtual address (for realmode). |
Thomas Garnier | 0483e1f | 2016-06-21 17:47:02 -0700 | [diff] [blame] | 15 | * |
| 16 | * The order of each memory region is not changed. The feature looks at |
| 17 | * the available space for the regions based on different configuration |
| 18 | * options and randomizes the base and space between each. The size of the |
| 19 | * physical memory mapping is the available physical memory. |
| 20 | */ |
| 21 | |
| 22 | #include <linux/kernel.h> |
| 23 | #include <linux/init.h> |
| 24 | #include <linux/random.h> |
| 25 | |
| 26 | #include <asm/pgalloc.h> |
| 27 | #include <asm/pgtable.h> |
| 28 | #include <asm/setup.h> |
| 29 | #include <asm/kaslr.h> |
| 30 | |
| 31 | #include "mm_internal.h" |
| 32 | |
| 33 | #define TB_SHIFT 40 |
| 34 | |
| 35 | /* |
| 36 | * Virtual address start and end range for randomization. The end changes base |
| 37 | * on configuration to have the highest amount of space for randomization. |
| 38 | * It increases the possible random position for each randomized region. |
| 39 | * |
| 40 | * You need to add an if/def entry if you introduce a new memory region |
| 41 | * compatible with KASLR. Your entry must be in logical order with memory |
| 42 | * layout. For example, ESPFIX is before EFI because its virtual address is |
Thomas Garnier | 25dfe47 | 2016-07-27 08:59:56 -0700 | [diff] [blame] | 43 | * before. You also need to add a BUILD_BUG_ON() in kernel_randomize_memory() to |
Thomas Garnier | 0483e1f | 2016-06-21 17:47:02 -0700 | [diff] [blame] | 44 | * ensure that this order is correct and won't be changed. |
| 45 | */ |
Thomas Garnier | 021182e | 2016-06-21 17:47:03 -0700 | [diff] [blame] | 46 | static const unsigned long vaddr_start = __PAGE_OFFSET_BASE; |
Thomas Garnier | 25dfe47 | 2016-07-27 08:59:56 -0700 | [diff] [blame] | 47 | |
| 48 | #if defined(CONFIG_X86_ESPFIX64) |
| 49 | static const unsigned long vaddr_end = ESPFIX_BASE_ADDR; |
| 50 | #elif defined(CONFIG_EFI) |
Baoquan He | a46f60d | 2017-03-24 12:59:52 +0800 | [diff] [blame] | 51 | static const unsigned long vaddr_end = EFI_VA_END; |
Thomas Garnier | 25dfe47 | 2016-07-27 08:59:56 -0700 | [diff] [blame] | 52 | #else |
| 53 | static const unsigned long vaddr_end = __START_KERNEL_map; |
| 54 | #endif |
Thomas Garnier | 021182e | 2016-06-21 17:47:03 -0700 | [diff] [blame] | 55 | |
| 56 | /* Default values */ |
| 57 | unsigned long page_offset_base = __PAGE_OFFSET_BASE; |
| 58 | EXPORT_SYMBOL(page_offset_base); |
Thomas Garnier | a95ae27 | 2016-06-21 17:47:04 -0700 | [diff] [blame] | 59 | unsigned long vmalloc_base = __VMALLOC_BASE; |
| 60 | EXPORT_SYMBOL(vmalloc_base); |
Thomas Garnier | 25dfe47 | 2016-07-27 08:59:56 -0700 | [diff] [blame] | 61 | unsigned long vmemmap_base = __VMEMMAP_BASE; |
| 62 | EXPORT_SYMBOL(vmemmap_base); |
Thomas Garnier | 0483e1f | 2016-06-21 17:47:02 -0700 | [diff] [blame] | 63 | |
| 64 | /* |
| 65 | * Memory regions randomized by KASLR (except modules that use a separate logic |
| 66 | * earlier during boot). The list is ordered based on virtual addresses. This |
| 67 | * order is kept after randomization. |
| 68 | */ |
| 69 | static __initdata struct kaslr_memory_region { |
| 70 | unsigned long *base; |
| 71 | unsigned long size_tb; |
| 72 | } kaslr_regions[] = { |
Kirill A. Shutemov | 8624c1f | 2017-06-06 14:31:31 +0300 | [diff] [blame^] | 73 | { &page_offset_base, 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT) /* Maximum */ }, |
Thomas Garnier | a95ae27 | 2016-06-21 17:47:04 -0700 | [diff] [blame] | 74 | { &vmalloc_base, VMALLOC_SIZE_TB }, |
Thomas Garnier | 25dfe47 | 2016-07-27 08:59:56 -0700 | [diff] [blame] | 75 | { &vmemmap_base, 1 }, |
Thomas Garnier | 0483e1f | 2016-06-21 17:47:02 -0700 | [diff] [blame] | 76 | }; |
| 77 | |
| 78 | /* Get size in bytes used by the memory region */ |
| 79 | static inline unsigned long get_padding(struct kaslr_memory_region *region) |
| 80 | { |
| 81 | return (region->size_tb << TB_SHIFT); |
| 82 | } |
| 83 | |
| 84 | /* |
| 85 | * Apply no randomization if KASLR was disabled at boot or if KASAN |
| 86 | * is enabled. KASAN shadow mappings rely on regions being PGD aligned. |
| 87 | */ |
| 88 | static inline bool kaslr_memory_enabled(void) |
| 89 | { |
Masahiro Yamada | a5ff1b3 | 2016-08-25 15:17:02 -0700 | [diff] [blame] | 90 | return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN); |
Thomas Garnier | 0483e1f | 2016-06-21 17:47:02 -0700 | [diff] [blame] | 91 | } |
| 92 | |
| 93 | /* Initialize base and padding for each memory region randomized with KASLR */ |
| 94 | void __init kernel_randomize_memory(void) |
| 95 | { |
| 96 | size_t i; |
| 97 | unsigned long vaddr = vaddr_start; |
Thomas Garnier | 021182e | 2016-06-21 17:47:03 -0700 | [diff] [blame] | 98 | unsigned long rand, memory_tb; |
Thomas Garnier | 0483e1f | 2016-06-21 17:47:02 -0700 | [diff] [blame] | 99 | struct rnd_state rand_state; |
| 100 | unsigned long remain_entropy; |
| 101 | |
Thomas Garnier | 25dfe47 | 2016-07-27 08:59:56 -0700 | [diff] [blame] | 102 | /* |
| 103 | * All these BUILD_BUG_ON checks ensures the memory layout is |
| 104 | * consistent with the vaddr_start/vaddr_end variables. |
| 105 | */ |
| 106 | BUILD_BUG_ON(vaddr_start >= vaddr_end); |
Masahiro Yamada | c0a0aba | 2016-10-27 17:46:38 -0700 | [diff] [blame] | 107 | BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) && |
Baoquan He | a46f60d | 2017-03-24 12:59:52 +0800 | [diff] [blame] | 108 | vaddr_end >= EFI_VA_END); |
Masahiro Yamada | c0a0aba | 2016-10-27 17:46:38 -0700 | [diff] [blame] | 109 | BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) || |
| 110 | IS_ENABLED(CONFIG_EFI)) && |
Thomas Garnier | 25dfe47 | 2016-07-27 08:59:56 -0700 | [diff] [blame] | 111 | vaddr_end >= __START_KERNEL_map); |
| 112 | BUILD_BUG_ON(vaddr_end > __START_KERNEL_map); |
| 113 | |
Thomas Garnier | 0483e1f | 2016-06-21 17:47:02 -0700 | [diff] [blame] | 114 | if (!kaslr_memory_enabled()) |
| 115 | return; |
| 116 | |
Thomas Garnier | 90397a4 | 2016-06-21 17:47:06 -0700 | [diff] [blame] | 117 | /* |
| 118 | * Update Physical memory mapping to available and |
| 119 | * add padding if needed (especially for memory hotplug support). |
| 120 | */ |
Thomas Garnier | 021182e | 2016-06-21 17:47:03 -0700 | [diff] [blame] | 121 | BUG_ON(kaslr_regions[0].base != &page_offset_base); |
Thomas Garnier | c7d2361 | 2016-08-09 10:11:04 -0700 | [diff] [blame] | 122 | memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) + |
Thomas Garnier | 90397a4 | 2016-06-21 17:47:06 -0700 | [diff] [blame] | 123 | CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING; |
Thomas Garnier | 021182e | 2016-06-21 17:47:03 -0700 | [diff] [blame] | 124 | |
| 125 | /* Adapt phyiscal memory region size based on available memory */ |
| 126 | if (memory_tb < kaslr_regions[0].size_tb) |
| 127 | kaslr_regions[0].size_tb = memory_tb; |
| 128 | |
Thomas Garnier | 0483e1f | 2016-06-21 17:47:02 -0700 | [diff] [blame] | 129 | /* Calculate entropy available between regions */ |
| 130 | remain_entropy = vaddr_end - vaddr_start; |
| 131 | for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++) |
| 132 | remain_entropy -= get_padding(&kaslr_regions[i]); |
| 133 | |
| 134 | prandom_seed_state(&rand_state, kaslr_get_random_long("Memory")); |
| 135 | |
| 136 | for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++) { |
| 137 | unsigned long entropy; |
| 138 | |
| 139 | /* |
| 140 | * Select a random virtual address using the extra entropy |
| 141 | * available. |
| 142 | */ |
| 143 | entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i); |
| 144 | prandom_bytes_state(&rand_state, &rand, sizeof(rand)); |
Kirill A. Shutemov | 8624c1f | 2017-06-06 14:31:31 +0300 | [diff] [blame^] | 145 | if (IS_ENABLED(CONFIG_X86_5LEVEL)) |
| 146 | entropy = (rand % (entropy + 1)) & P4D_MASK; |
| 147 | else |
| 148 | entropy = (rand % (entropy + 1)) & PUD_MASK; |
Thomas Garnier | 0483e1f | 2016-06-21 17:47:02 -0700 | [diff] [blame] | 149 | vaddr += entropy; |
| 150 | *kaslr_regions[i].base = vaddr; |
| 151 | |
| 152 | /* |
| 153 | * Jump the region and add a minimum padding based on |
| 154 | * randomization alignment. |
| 155 | */ |
| 156 | vaddr += get_padding(&kaslr_regions[i]); |
Kirill A. Shutemov | 8624c1f | 2017-06-06 14:31:31 +0300 | [diff] [blame^] | 157 | if (IS_ENABLED(CONFIG_X86_5LEVEL)) |
| 158 | vaddr = round_up(vaddr + 1, P4D_SIZE); |
| 159 | else |
| 160 | vaddr = round_up(vaddr + 1, PUD_SIZE); |
Thomas Garnier | 0483e1f | 2016-06-21 17:47:02 -0700 | [diff] [blame] | 161 | remain_entropy -= entropy; |
| 162 | } |
| 163 | } |
| 164 | |
Kirill A. Shutemov | 8624c1f | 2017-06-06 14:31:31 +0300 | [diff] [blame^] | 165 | static void __meminit init_trampoline_pud(void) |
Thomas Garnier | 0483e1f | 2016-06-21 17:47:02 -0700 | [diff] [blame] | 166 | { |
| 167 | unsigned long paddr, paddr_next; |
| 168 | pgd_t *pgd; |
| 169 | pud_t *pud_page, *pud_page_tramp; |
| 170 | int i; |
| 171 | |
Thomas Garnier | 0483e1f | 2016-06-21 17:47:02 -0700 | [diff] [blame] | 172 | pud_page_tramp = alloc_low_page(); |
| 173 | |
| 174 | paddr = 0; |
| 175 | pgd = pgd_offset_k((unsigned long)__va(paddr)); |
| 176 | pud_page = (pud_t *) pgd_page_vaddr(*pgd); |
| 177 | |
| 178 | for (i = pud_index(paddr); i < PTRS_PER_PUD; i++, paddr = paddr_next) { |
| 179 | pud_t *pud, *pud_tramp; |
| 180 | unsigned long vaddr = (unsigned long)__va(paddr); |
| 181 | |
| 182 | pud_tramp = pud_page_tramp + pud_index(paddr); |
| 183 | pud = pud_page + pud_index(vaddr); |
| 184 | paddr_next = (paddr & PUD_MASK) + PUD_SIZE; |
| 185 | |
| 186 | *pud_tramp = *pud; |
| 187 | } |
| 188 | |
| 189 | set_pgd(&trampoline_pgd_entry, |
| 190 | __pgd(_KERNPG_TABLE | __pa(pud_page_tramp))); |
| 191 | } |
Kirill A. Shutemov | 8624c1f | 2017-06-06 14:31:31 +0300 | [diff] [blame^] | 192 | |
| 193 | static void __meminit init_trampoline_p4d(void) |
| 194 | { |
| 195 | unsigned long paddr, paddr_next; |
| 196 | pgd_t *pgd; |
| 197 | p4d_t *p4d_page, *p4d_page_tramp; |
| 198 | int i; |
| 199 | |
| 200 | p4d_page_tramp = alloc_low_page(); |
| 201 | |
| 202 | paddr = 0; |
| 203 | pgd = pgd_offset_k((unsigned long)__va(paddr)); |
| 204 | p4d_page = (p4d_t *) pgd_page_vaddr(*pgd); |
| 205 | |
| 206 | for (i = p4d_index(paddr); i < PTRS_PER_P4D; i++, paddr = paddr_next) { |
| 207 | p4d_t *p4d, *p4d_tramp; |
| 208 | unsigned long vaddr = (unsigned long)__va(paddr); |
| 209 | |
| 210 | p4d_tramp = p4d_page_tramp + p4d_index(paddr); |
| 211 | p4d = p4d_page + p4d_index(vaddr); |
| 212 | paddr_next = (paddr & P4D_MASK) + P4D_SIZE; |
| 213 | |
| 214 | *p4d_tramp = *p4d; |
| 215 | } |
| 216 | |
| 217 | set_pgd(&trampoline_pgd_entry, |
| 218 | __pgd(_KERNPG_TABLE | __pa(p4d_page_tramp))); |
| 219 | } |
| 220 | |
| 221 | /* |
| 222 | * Create PGD aligned trampoline table to allow real mode initialization |
| 223 | * of additional CPUs. Consume only 1 low memory page. |
| 224 | */ |
| 225 | void __meminit init_trampoline(void) |
| 226 | { |
| 227 | |
| 228 | if (!kaslr_memory_enabled()) { |
| 229 | init_trampoline_default(); |
| 230 | return; |
| 231 | } |
| 232 | |
| 233 | if (IS_ENABLED(CONFIG_X86_5LEVEL)) |
| 234 | init_trampoline_p4d(); |
| 235 | else |
| 236 | init_trampoline_pud(); |
| 237 | } |