blob: ec8654f117d8e5152f359c66a23740f9d1a85160 [file] [log] [blame]
Thomas Garnier0483e1f2016-06-21 17:47:02 -07001/*
2 * This file implements KASLR memory randomization for x86_64. It randomizes
3 * the virtual address space of kernel memory regions (physical memory
4 * mapping, vmalloc & vmemmap) for x86_64. This security feature mitigates
5 * exploits relying on predictable kernel addresses.
6 *
7 * Entropy is generated using the KASLR early boot functions now shared in
8 * the lib directory (originally written by Kees Cook). Randomization is
9 * done on PGD & PUD page table levels to increase possible addresses. The
10 * physical memory mapping code was adapted to support PUD level virtual
11 * addresses. This implementation on the best configuration provides 30,000
12 * possible virtual addresses in average for each memory region. An additional
13 * low memory page is used to ensure each CPU can start with a PGD aligned
14 * virtual address (for realmode).
15 *
16 * The order of each memory region is not changed. The feature looks at
17 * the available space for the regions based on different configuration
18 * options and randomizes the base and space between each. The size of the
19 * physical memory mapping is the available physical memory.
20 */
21
22#include <linux/kernel.h>
23#include <linux/init.h>
24#include <linux/random.h>
25
26#include <asm/pgalloc.h>
27#include <asm/pgtable.h>
28#include <asm/setup.h>
29#include <asm/kaslr.h>
30
31#include "mm_internal.h"
32
33#define TB_SHIFT 40
34
35/*
36 * Virtual address start and end range for randomization. The end changes base
37 * on configuration to have the highest amount of space for randomization.
38 * It increases the possible random position for each randomized region.
39 *
40 * You need to add an if/def entry if you introduce a new memory region
41 * compatible with KASLR. Your entry must be in logical order with memory
42 * layout. For example, ESPFIX is before EFI because its virtual address is
43 * before. You also need to add a BUILD_BUG_ON in kernel_randomize_memory to
44 * ensure that this order is correct and won't be changed.
45 */
Thomas Garnier021182e2016-06-21 17:47:03 -070046static const unsigned long vaddr_start = __PAGE_OFFSET_BASE;
Thomas Garniera95ae272016-06-21 17:47:04 -070047static const unsigned long vaddr_end = VMEMMAP_START;
Thomas Garnier021182e2016-06-21 17:47:03 -070048
49/* Default values */
50unsigned long page_offset_base = __PAGE_OFFSET_BASE;
51EXPORT_SYMBOL(page_offset_base);
Thomas Garniera95ae272016-06-21 17:47:04 -070052unsigned long vmalloc_base = __VMALLOC_BASE;
53EXPORT_SYMBOL(vmalloc_base);
Thomas Garnier0483e1f2016-06-21 17:47:02 -070054
55/*
56 * Memory regions randomized by KASLR (except modules that use a separate logic
57 * earlier during boot). The list is ordered based on virtual addresses. This
58 * order is kept after randomization.
59 */
60static __initdata struct kaslr_memory_region {
61 unsigned long *base;
62 unsigned long size_tb;
63} kaslr_regions[] = {
Thomas Garnier021182e2016-06-21 17:47:03 -070064 { &page_offset_base, 64/* Maximum */ },
Thomas Garniera95ae272016-06-21 17:47:04 -070065 { &vmalloc_base, VMALLOC_SIZE_TB },
Thomas Garnier0483e1f2016-06-21 17:47:02 -070066};
67
68/* Get size in bytes used by the memory region */
69static inline unsigned long get_padding(struct kaslr_memory_region *region)
70{
71 return (region->size_tb << TB_SHIFT);
72}
73
74/*
75 * Apply no randomization if KASLR was disabled at boot or if KASAN
76 * is enabled. KASAN shadow mappings rely on regions being PGD aligned.
77 */
78static inline bool kaslr_memory_enabled(void)
79{
80 return kaslr_enabled() && !config_enabled(CONFIG_KASAN);
81}
82
83/* Initialize base and padding for each memory region randomized with KASLR */
84void __init kernel_randomize_memory(void)
85{
86 size_t i;
87 unsigned long vaddr = vaddr_start;
Thomas Garnier021182e2016-06-21 17:47:03 -070088 unsigned long rand, memory_tb;
Thomas Garnier0483e1f2016-06-21 17:47:02 -070089 struct rnd_state rand_state;
90 unsigned long remain_entropy;
91
92 if (!kaslr_memory_enabled())
93 return;
94
Thomas Garnier90397a42016-06-21 17:47:06 -070095 /*
96 * Update Physical memory mapping to available and
97 * add padding if needed (especially for memory hotplug support).
98 */
Thomas Garnier021182e2016-06-21 17:47:03 -070099 BUG_ON(kaslr_regions[0].base != &page_offset_base);
Thomas Garnierc7d23612016-08-09 10:11:04 -0700100 memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
Thomas Garnier90397a42016-06-21 17:47:06 -0700101 CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
Thomas Garnier021182e2016-06-21 17:47:03 -0700102
103 /* Adapt phyiscal memory region size based on available memory */
104 if (memory_tb < kaslr_regions[0].size_tb)
105 kaslr_regions[0].size_tb = memory_tb;
106
Thomas Garnier0483e1f2016-06-21 17:47:02 -0700107 /* Calculate entropy available between regions */
108 remain_entropy = vaddr_end - vaddr_start;
109 for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++)
110 remain_entropy -= get_padding(&kaslr_regions[i]);
111
112 prandom_seed_state(&rand_state, kaslr_get_random_long("Memory"));
113
114 for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++) {
115 unsigned long entropy;
116
117 /*
118 * Select a random virtual address using the extra entropy
119 * available.
120 */
121 entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i);
122 prandom_bytes_state(&rand_state, &rand, sizeof(rand));
123 entropy = (rand % (entropy + 1)) & PUD_MASK;
124 vaddr += entropy;
125 *kaslr_regions[i].base = vaddr;
126
127 /*
128 * Jump the region and add a minimum padding based on
129 * randomization alignment.
130 */
131 vaddr += get_padding(&kaslr_regions[i]);
132 vaddr = round_up(vaddr + 1, PUD_SIZE);
133 remain_entropy -= entropy;
134 }
135}
136
137/*
138 * Create PGD aligned trampoline table to allow real mode initialization
139 * of additional CPUs. Consume only 1 low memory page.
140 */
141void __meminit init_trampoline(void)
142{
143 unsigned long paddr, paddr_next;
144 pgd_t *pgd;
145 pud_t *pud_page, *pud_page_tramp;
146 int i;
147
148 if (!kaslr_memory_enabled()) {
149 init_trampoline_default();
150 return;
151 }
152
153 pud_page_tramp = alloc_low_page();
154
155 paddr = 0;
156 pgd = pgd_offset_k((unsigned long)__va(paddr));
157 pud_page = (pud_t *) pgd_page_vaddr(*pgd);
158
159 for (i = pud_index(paddr); i < PTRS_PER_PUD; i++, paddr = paddr_next) {
160 pud_t *pud, *pud_tramp;
161 unsigned long vaddr = (unsigned long)__va(paddr);
162
163 pud_tramp = pud_page_tramp + pud_index(paddr);
164 pud = pud_page + pud_index(vaddr);
165 paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
166
167 *pud_tramp = *pud;
168 }
169
170 set_pgd(&trampoline_pgd_entry,
171 __pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
172}