blob: fe318b44f7b80717469c07d306141e40cfa1f24e [file] [log] [blame]
Kees Cook7de828d2016-04-18 09:42:14 -07001/*
2 * kaslr.c
3 *
4 * This contains the routines needed to generate a reasonable level of
5 * entropy to choose a randomized kernel base address offset in support
6 * of Kernel Address Space Layout Randomization (KASLR). Additionally
7 * handles walking the physical memory maps (and tracking memory regions
8 * to avoid) in order to select a physical memory location that can
9 * contain the entire properly aligned running kernel image.
10 *
11 */
Baoquan Hed52e7d52017-05-13 13:46:28 +080012
13/*
14 * isspace() in linux/ctype.h is expected by next_args() to filter
15 * out "space/lf/tab". While boot/ctype.h conflicts with linux/ctype.h,
16 * since isdigit() is implemented in both of them. Hence disable it
17 * here.
18 */
19#define BOOT_CTYPE_H
20
21/*
22 * _ctype[] in lib/ctype.c is needed by isspace() of linux/ctype.h.
23 * While both lib/ctype.c and lib/cmdline.c will bring EXPORT_SYMBOL
24 * which is meaningless and will cause compiling error in some cases.
25 * So do not include linux/export.h and define EXPORT_SYMBOL(sym)
26 * as empty.
27 */
28#define _LINUX_EXPORT_H
29#define EXPORT_SYMBOL(sym)
30
Kees Cook8ab38202013-10-10 17:18:14 -070031#include "misc.h"
Kees Cookdc425a62016-05-02 15:51:00 -070032#include "error.h"
Arnd Bergmann5b8b9cf2017-05-30 11:14:17 +020033#include "../string.h"
Kees Cook8ab38202013-10-10 17:18:14 -070034
Kees Cooka653f352013-11-11 14:28:39 -080035#include <generated/compile.h>
36#include <linux/module.h>
37#include <linux/uts.h>
38#include <linux/utsname.h>
Baoquan Hed52e7d52017-05-13 13:46:28 +080039#include <linux/ctype.h>
Kees Cooka653f352013-11-11 14:28:39 -080040#include <generated/utsrelease.h>
Kees Cooka653f352013-11-11 14:28:39 -080041
Baoquan Hed52e7d52017-05-13 13:46:28 +080042/* Macros used by the included decompressor code below. */
43#define STATIC
44#include <linux/decompress/mm.h>
45
46extern unsigned long get_cmd_line_ptr(void);
47
Kees Cooka653f352013-11-11 14:28:39 -080048/* Simplified build-specific string for starting entropy. */
Kees Cook327f7d72013-11-12 08:56:07 -080049static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
Kees Cooka653f352013-11-11 14:28:39 -080050 LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
51
Kees Cooka653f352013-11-11 14:28:39 -080052static unsigned long rotate_xor(unsigned long hash, const void *area,
53 size_t size)
54{
55 size_t i;
56 unsigned long *ptr = (unsigned long *)area;
57
58 for (i = 0; i < size / sizeof(hash); i++) {
59 /* Rotate by odd number of bits and XOR. */
60 hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
61 hash ^= ptr[i];
62 }
63
64 return hash;
65}
66
67/* Attempt to create a simple but unpredictable starting entropy. */
Thomas Garnierd899a7d2016-06-21 17:46:58 -070068static unsigned long get_boot_seed(void)
Kees Cooka653f352013-11-11 14:28:39 -080069{
70 unsigned long hash = 0;
71
72 hash = rotate_xor(hash, build_str, sizeof(build_str));
Kees Cook6655e0a2016-04-18 09:42:12 -070073 hash = rotate_xor(hash, boot_params, sizeof(*boot_params));
Kees Cooka653f352013-11-11 14:28:39 -080074
75 return hash;
76}
77
Thomas Garnierd899a7d2016-06-21 17:46:58 -070078#define KASLR_COMPRESSED_BOOT
79#include "../../lib/kaslr.c"
Kees Cook8ab38202013-10-10 17:18:14 -070080
Kees Cook82fa9632013-10-10 17:18:16 -070081struct mem_vector {
Dave Jiangf2844242017-01-11 16:20:01 -070082 unsigned long long start;
83 unsigned long long size;
Kees Cook82fa9632013-10-10 17:18:16 -070084};
85
Dave Jiangf2844242017-01-11 16:20:01 -070086/* Only supporting at most 4 unusable memmap regions with kaslr */
87#define MAX_MEMMAP_REGIONS 4
88
89static bool memmap_too_large;
90
Baoquan Hed52e7d52017-05-13 13:46:28 +080091
Baoquan He4cdba142017-05-13 13:46:29 +080092/* Store memory limit specified by "mem=nn[KMG]" or "memmap=nn[KMG]" */
93unsigned long long mem_limit = ULLONG_MAX;
94
95
Kees Cooked09acd2016-05-06 12:44:59 -070096enum mem_avoid_index {
97 MEM_AVOID_ZO_RANGE = 0,
98 MEM_AVOID_INITRD,
99 MEM_AVOID_CMDLINE,
100 MEM_AVOID_BOOTPARAMS,
Dave Jiangf2844242017-01-11 16:20:01 -0700101 MEM_AVOID_MEMMAP_BEGIN,
102 MEM_AVOID_MEMMAP_END = MEM_AVOID_MEMMAP_BEGIN + MAX_MEMMAP_REGIONS - 1,
Kees Cooked09acd2016-05-06 12:44:59 -0700103 MEM_AVOID_MAX,
104};
105
Kees Cooke290e8c2014-02-09 13:56:44 -0800106static struct mem_vector mem_avoid[MEM_AVOID_MAX];
Kees Cook82fa9632013-10-10 17:18:16 -0700107
Kees Cook82fa9632013-10-10 17:18:16 -0700108static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two)
109{
110 /* Item one is entirely before item two. */
111 if (one->start + one->size <= two->start)
112 return false;
113 /* Item one is entirely after item two. */
114 if (one->start >= two->start + two->size)
115 return false;
116 return true;
117}
118
Baoquan Hed52e7d52017-05-13 13:46:28 +0800119char *skip_spaces(const char *str)
Dave Jiangf2844242017-01-11 16:20:01 -0700120{
Baoquan Hed52e7d52017-05-13 13:46:28 +0800121 while (isspace(*str))
122 ++str;
123 return (char *)str;
Dave Jiangf2844242017-01-11 16:20:01 -0700124}
Baoquan Hed52e7d52017-05-13 13:46:28 +0800125#include "../../../../lib/ctype.c"
126#include "../../../../lib/cmdline.c"
Dave Jiangf2844242017-01-11 16:20:01 -0700127
128static int
129parse_memmap(char *p, unsigned long long *start, unsigned long long *size)
130{
131 char *oldp;
132
133 if (!p)
134 return -EINVAL;
135
136 /* We don't care about this option here */
137 if (!strncmp(p, "exactmap", 8))
138 return -EINVAL;
139
140 oldp = p;
Baoquan Hed52e7d52017-05-13 13:46:28 +0800141 *size = memparse(p, &p);
Dave Jiangf2844242017-01-11 16:20:01 -0700142 if (p == oldp)
143 return -EINVAL;
144
145 switch (*p) {
Dave Jiangf2844242017-01-11 16:20:01 -0700146 case '#':
147 case '$':
148 case '!':
Baoquan Hed52e7d52017-05-13 13:46:28 +0800149 *start = memparse(p + 1, &p);
Dave Jiangf2844242017-01-11 16:20:01 -0700150 return 0;
Baoquan He4cdba142017-05-13 13:46:29 +0800151 case '@':
152 /* memmap=nn@ss specifies usable region, should be skipped */
153 *size = 0;
154 /* Fall through */
155 default:
156 /*
157 * If w/o offset, only size specified, memmap=nn[KMG] has the
158 * same behaviour as mem=nn[KMG]. It limits the max address
159 * system can use. Region above the limit should be avoided.
160 */
161 *start = 0;
162 return 0;
Dave Jiangf2844242017-01-11 16:20:01 -0700163 }
164
165 return -EINVAL;
166}
167
Baoquan Hed52e7d52017-05-13 13:46:28 +0800168static void mem_avoid_memmap(char *str)
Dave Jiangf2844242017-01-11 16:20:01 -0700169{
Baoquan Hed52e7d52017-05-13 13:46:28 +0800170 static int i;
Dave Jiangf2844242017-01-11 16:20:01 -0700171 int rc;
Dave Jiangf2844242017-01-11 16:20:01 -0700172
Baoquan Hed52e7d52017-05-13 13:46:28 +0800173 if (i >= MAX_MEMMAP_REGIONS)
Dave Jiangf2844242017-01-11 16:20:01 -0700174 return;
175
Dave Jiangf2844242017-01-11 16:20:01 -0700176 while (str && (i < MAX_MEMMAP_REGIONS)) {
177 int rc;
178 unsigned long long start, size;
179 char *k = strchr(str, ',');
180
181 if (k)
182 *k++ = 0;
183
184 rc = parse_memmap(str, &start, &size);
185 if (rc < 0)
186 break;
187 str = k;
Baoquan He4cdba142017-05-13 13:46:29 +0800188
189 if (start == 0) {
190 /* Store the specified memory limit if size > 0 */
191 if (size > 0)
192 mem_limit = size;
193
Dave Jiangf2844242017-01-11 16:20:01 -0700194 continue;
Baoquan He4cdba142017-05-13 13:46:29 +0800195 }
Dave Jiangf2844242017-01-11 16:20:01 -0700196
197 mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].start = start;
198 mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].size = size;
199 i++;
200 }
201
202 /* More than 4 memmaps, fail kaslr */
203 if ((i >= MAX_MEMMAP_REGIONS) && str)
204 memmap_too_large = true;
205}
206
Baoquan Hed52e7d52017-05-13 13:46:28 +0800207static int handle_mem_memmap(void)
208{
209 char *args = (char *)get_cmd_line_ptr();
210 size_t len = strlen((char *)args);
211 char *tmp_cmdline;
212 char *param, *val;
Baoquan He4cdba142017-05-13 13:46:29 +0800213 u64 mem_size;
Baoquan Hed52e7d52017-05-13 13:46:28 +0800214
Baoquan He4cdba142017-05-13 13:46:29 +0800215 if (!strstr(args, "memmap=") && !strstr(args, "mem="))
Baoquan Hed52e7d52017-05-13 13:46:28 +0800216 return 0;
217
218 tmp_cmdline = malloc(len + 1);
219 if (!tmp_cmdline )
220 error("Failed to allocate space for tmp_cmdline");
221
222 memcpy(tmp_cmdline, args, len);
223 tmp_cmdline[len] = 0;
224 args = tmp_cmdline;
225
226 /* Chew leading spaces */
227 args = skip_spaces(args);
228
229 while (*args) {
230 args = next_arg(args, &param, &val);
231 /* Stop at -- */
232 if (!val && strcmp(param, "--") == 0) {
233 warn("Only '--' specified in cmdline");
234 free(tmp_cmdline);
235 return -1;
236 }
237
Baoquan He4cdba142017-05-13 13:46:29 +0800238 if (!strcmp(param, "memmap")) {
Baoquan Hed52e7d52017-05-13 13:46:28 +0800239 mem_avoid_memmap(val);
Baoquan He4cdba142017-05-13 13:46:29 +0800240 } else if (!strcmp(param, "mem")) {
241 char *p = val;
242
243 if (!strcmp(p, "nopentium"))
244 continue;
245 mem_size = memparse(p, &p);
246 if (mem_size == 0) {
247 free(tmp_cmdline);
248 return -EINVAL;
249 }
250 mem_limit = mem_size;
251 }
Baoquan Hed52e7d52017-05-13 13:46:28 +0800252 }
253
254 free(tmp_cmdline);
255 return 0;
256}
257
Yinghai Lu9dc19692016-05-05 15:13:47 -0700258/*
Kees Cooked09acd2016-05-06 12:44:59 -0700259 * In theory, KASLR can put the kernel anywhere in the range of [16M, 64T).
260 * The mem_avoid array is used to store the ranges that need to be avoided
261 * when KASLR searches for an appropriate random address. We must avoid any
Yinghai Lu9dc19692016-05-05 15:13:47 -0700262 * regions that are unsafe to overlap with during decompression, and other
Kees Cooked09acd2016-05-06 12:44:59 -0700263 * things like the initrd, cmdline and boot_params. This comment seeks to
264 * explain mem_avoid as clearly as possible since incorrect mem_avoid
265 * memory ranges lead to really hard to debug boot failures.
Yinghai Lu9dc19692016-05-05 15:13:47 -0700266 *
Kees Cooked09acd2016-05-06 12:44:59 -0700267 * The initrd, cmdline, and boot_params are trivial to identify for
Kees Cookcb18ef02016-05-09 13:22:05 -0700268 * avoiding. They are MEM_AVOID_INITRD, MEM_AVOID_CMDLINE, and
Kees Cooked09acd2016-05-06 12:44:59 -0700269 * MEM_AVOID_BOOTPARAMS respectively below.
Yinghai Lu9dc19692016-05-05 15:13:47 -0700270 *
Kees Cooked09acd2016-05-06 12:44:59 -0700271 * What is not obvious how to avoid is the range of memory that is used
272 * during decompression (MEM_AVOID_ZO_RANGE below). This range must cover
273 * the compressed kernel (ZO) and its run space, which is used to extract
274 * the uncompressed kernel (VO) and relocs.
Yinghai Lu9dc19692016-05-05 15:13:47 -0700275 *
Kees Cooked09acd2016-05-06 12:44:59 -0700276 * ZO's full run size sits against the end of the decompression buffer, so
277 * we can calculate where text, data, bss, etc of ZO are positioned more
278 * easily.
Yinghai Lu9dc19692016-05-05 15:13:47 -0700279 *
Kees Cooked09acd2016-05-06 12:44:59 -0700280 * For additional background, the decompression calculations can be found
281 * in header.S, and the memory diagram is based on the one found in misc.c.
Yinghai Lu9dc19692016-05-05 15:13:47 -0700282 *
Kees Cooked09acd2016-05-06 12:44:59 -0700283 * The following conditions are already enforced by the image layouts and
284 * associated code:
285 * - input + input_size >= output + output_size
286 * - kernel_total_size <= init_size
287 * - kernel_total_size <= output_size (see Note below)
288 * - output + init_size >= output + output_size
Yinghai Lu9dc19692016-05-05 15:13:47 -0700289 *
Kees Cooked09acd2016-05-06 12:44:59 -0700290 * (Note that kernel_total_size and output_size have no fundamental
291 * relationship, but output_size is passed to choose_random_location
292 * as a maximum of the two. The diagram is showing a case where
293 * kernel_total_size is larger than output_size, but this case is
294 * handled by bumping output_size.)
Yinghai Lu9dc19692016-05-05 15:13:47 -0700295 *
Kees Cooked09acd2016-05-06 12:44:59 -0700296 * The above conditions can be illustrated by a diagram:
Yinghai Lu9dc19692016-05-05 15:13:47 -0700297 *
Kees Cooked09acd2016-05-06 12:44:59 -0700298 * 0 output input input+input_size output+init_size
299 * | | | | |
300 * | | | | |
301 * |-----|--------|--------|--------------|-----------|--|-------------|
302 * | | |
303 * | | |
304 * output+init_size-ZO_INIT_SIZE output+output_size output+kernel_total_size
Yinghai Lu9dc19692016-05-05 15:13:47 -0700305 *
Kees Cooked09acd2016-05-06 12:44:59 -0700306 * [output, output+init_size) is the entire memory range used for
307 * extracting the compressed image.
Yinghai Lu9dc19692016-05-05 15:13:47 -0700308 *
Kees Cooked09acd2016-05-06 12:44:59 -0700309 * [output, output+kernel_total_size) is the range needed for the
310 * uncompressed kernel (VO) and its run size (bss, brk, etc).
311 *
312 * [output, output+output_size) is VO plus relocs (i.e. the entire
313 * uncompressed payload contained by ZO). This is the area of the buffer
314 * written to during decompression.
315 *
316 * [output+init_size-ZO_INIT_SIZE, output+init_size) is the worst-case
317 * range of the copied ZO and decompression code. (i.e. the range
318 * covered backwards of size ZO_INIT_SIZE, starting from output+init_size.)
319 *
320 * [input, input+input_size) is the original copied compressed image (ZO)
321 * (i.e. it does not include its run size). This range must be avoided
322 * because it contains the data used for decompression.
323 *
324 * [input+input_size, output+init_size) is [_text, _end) for ZO. This
325 * range includes ZO's heap and stack, and must be avoided since it
326 * performs the decompression.
327 *
328 * Since the above two ranges need to be avoided and they are adjacent,
329 * they can be merged, resulting in: [input, output+init_size) which
330 * becomes the MEM_AVOID_ZO_RANGE below.
Yinghai Lu9dc19692016-05-05 15:13:47 -0700331 */
Kees Cook82fa9632013-10-10 17:18:16 -0700332static void mem_avoid_init(unsigned long input, unsigned long input_size,
Yinghai Lu9dc19692016-05-05 15:13:47 -0700333 unsigned long output)
Kees Cook82fa9632013-10-10 17:18:16 -0700334{
Yinghai Lu9dc19692016-05-05 15:13:47 -0700335 unsigned long init_size = boot_params->hdr.init_size;
Kees Cook82fa9632013-10-10 17:18:16 -0700336 u64 initrd_start, initrd_size;
337 u64 cmd_line, cmd_line_size;
Kees Cook82fa9632013-10-10 17:18:16 -0700338 char *ptr;
339
340 /*
341 * Avoid the region that is unsafe to overlap during
Yinghai Lu9dc19692016-05-05 15:13:47 -0700342 * decompression.
Kees Cook82fa9632013-10-10 17:18:16 -0700343 */
Kees Cooked09acd2016-05-06 12:44:59 -0700344 mem_avoid[MEM_AVOID_ZO_RANGE].start = input;
345 mem_avoid[MEM_AVOID_ZO_RANGE].size = (output + init_size) - input;
Kees Cook3a947072016-05-06 15:01:35 -0700346 add_identity_map(mem_avoid[MEM_AVOID_ZO_RANGE].start,
347 mem_avoid[MEM_AVOID_ZO_RANGE].size);
Kees Cook82fa9632013-10-10 17:18:16 -0700348
349 /* Avoid initrd. */
Kees Cook6655e0a2016-04-18 09:42:12 -0700350 initrd_start = (u64)boot_params->ext_ramdisk_image << 32;
351 initrd_start |= boot_params->hdr.ramdisk_image;
352 initrd_size = (u64)boot_params->ext_ramdisk_size << 32;
353 initrd_size |= boot_params->hdr.ramdisk_size;
Kees Cooked09acd2016-05-06 12:44:59 -0700354 mem_avoid[MEM_AVOID_INITRD].start = initrd_start;
355 mem_avoid[MEM_AVOID_INITRD].size = initrd_size;
Kees Cook3a947072016-05-06 15:01:35 -0700356 /* No need to set mapping for initrd, it will be handled in VO. */
Kees Cook82fa9632013-10-10 17:18:16 -0700357
358 /* Avoid kernel command line. */
Kees Cook6655e0a2016-04-18 09:42:12 -0700359 cmd_line = (u64)boot_params->ext_cmd_line_ptr << 32;
360 cmd_line |= boot_params->hdr.cmd_line_ptr;
Kees Cook82fa9632013-10-10 17:18:16 -0700361 /* Calculate size of cmd_line. */
362 ptr = (char *)(unsigned long)cmd_line;
363 for (cmd_line_size = 0; ptr[cmd_line_size++]; )
364 ;
Kees Cooked09acd2016-05-06 12:44:59 -0700365 mem_avoid[MEM_AVOID_CMDLINE].start = cmd_line;
366 mem_avoid[MEM_AVOID_CMDLINE].size = cmd_line_size;
Kees Cook3a947072016-05-06 15:01:35 -0700367 add_identity_map(mem_avoid[MEM_AVOID_CMDLINE].start,
368 mem_avoid[MEM_AVOID_CMDLINE].size);
Kees Cook82fa9632013-10-10 17:18:16 -0700369
Kees Cooked09acd2016-05-06 12:44:59 -0700370 /* Avoid boot parameters. */
371 mem_avoid[MEM_AVOID_BOOTPARAMS].start = (unsigned long)boot_params;
372 mem_avoid[MEM_AVOID_BOOTPARAMS].size = sizeof(*boot_params);
Kees Cook3a947072016-05-06 15:01:35 -0700373 add_identity_map(mem_avoid[MEM_AVOID_BOOTPARAMS].start,
374 mem_avoid[MEM_AVOID_BOOTPARAMS].size);
375
376 /* We don't need to set a mapping for setup_data. */
377
Dave Jiangf2844242017-01-11 16:20:01 -0700378 /* Mark the memmap regions we need to avoid */
Baoquan Hed52e7d52017-05-13 13:46:28 +0800379 handle_mem_memmap();
Dave Jiangf2844242017-01-11 16:20:01 -0700380
Kees Cook3a947072016-05-06 15:01:35 -0700381#ifdef CONFIG_X86_VERBOSE_BOOTUP
382 /* Make sure video RAM can be used. */
383 add_identity_map(0, PMD_SIZE);
384#endif
Kees Cook82fa9632013-10-10 17:18:16 -0700385}
386
Kees Cook06486d62016-05-09 13:22:07 -0700387/*
388 * Does this memory vector overlap a known avoided area? If so, record the
389 * overlap region with the lowest address.
390 */
391static bool mem_avoid_overlap(struct mem_vector *img,
392 struct mem_vector *overlap)
Kees Cook82fa9632013-10-10 17:18:16 -0700393{
394 int i;
Kees Cook0cacbfb2014-09-11 09:19:31 -0700395 struct setup_data *ptr;
Kees Cook06486d62016-05-09 13:22:07 -0700396 unsigned long earliest = img->start + img->size;
397 bool is_overlapping = false;
Kees Cook82fa9632013-10-10 17:18:16 -0700398
399 for (i = 0; i < MEM_AVOID_MAX; i++) {
Kees Cook06486d62016-05-09 13:22:07 -0700400 if (mem_overlaps(img, &mem_avoid[i]) &&
401 mem_avoid[i].start < earliest) {
402 *overlap = mem_avoid[i];
Baoquan He6daa2ec2016-07-01 15:34:40 +0800403 earliest = overlap->start;
Kees Cook06486d62016-05-09 13:22:07 -0700404 is_overlapping = true;
405 }
Kees Cook82fa9632013-10-10 17:18:16 -0700406 }
407
Kees Cook0cacbfb2014-09-11 09:19:31 -0700408 /* Avoid all entries in the setup_data linked list. */
Kees Cook6655e0a2016-04-18 09:42:12 -0700409 ptr = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data;
Kees Cook0cacbfb2014-09-11 09:19:31 -0700410 while (ptr) {
411 struct mem_vector avoid;
412
Kees Cook20cc2882014-10-01 11:36:32 -0700413 avoid.start = (unsigned long)ptr;
Kees Cook0cacbfb2014-09-11 09:19:31 -0700414 avoid.size = sizeof(*ptr) + ptr->len;
415
Kees Cook06486d62016-05-09 13:22:07 -0700416 if (mem_overlaps(img, &avoid) && (avoid.start < earliest)) {
417 *overlap = avoid;
Baoquan He6daa2ec2016-07-01 15:34:40 +0800418 earliest = overlap->start;
Kees Cook06486d62016-05-09 13:22:07 -0700419 is_overlapping = true;
420 }
Kees Cook0cacbfb2014-09-11 09:19:31 -0700421
422 ptr = (struct setup_data *)(unsigned long)ptr->next;
423 }
424
Kees Cook06486d62016-05-09 13:22:07 -0700425 return is_overlapping;
Kees Cook82fa9632013-10-10 17:18:16 -0700426}
427
Baoquan Hec401cf12016-05-09 13:22:06 -0700428struct slot_area {
429 unsigned long addr;
430 int num;
431};
432
433#define MAX_SLOT_AREA 100
434
435static struct slot_area slot_areas[MAX_SLOT_AREA];
436
Kees Cooke290e8c2014-02-09 13:56:44 -0800437static unsigned long slot_max;
Kees Cook82fa9632013-10-10 17:18:16 -0700438
Baoquan Hec401cf12016-05-09 13:22:06 -0700439static unsigned long slot_area_index;
440
441static void store_slot_info(struct mem_vector *region, unsigned long image_size)
442{
443 struct slot_area slot_area;
444
445 if (slot_area_index == MAX_SLOT_AREA)
446 return;
447
448 slot_area.addr = region->start;
449 slot_area.num = (region->size - image_size) /
450 CONFIG_PHYSICAL_ALIGN + 1;
451
452 if (slot_area.num > 0) {
453 slot_areas[slot_area_index++] = slot_area;
454 slot_max += slot_area.num;
455 }
456}
457
Kees Cook82fa9632013-10-10 17:18:16 -0700458static unsigned long slots_fetch_random(void)
459{
Kees Cooked9f0072016-05-25 15:45:33 -0700460 unsigned long slot;
461 int i;
462
Kees Cook82fa9632013-10-10 17:18:16 -0700463 /* Handle case of no slots stored. */
464 if (slot_max == 0)
465 return 0;
466
Thomas Garnierd899a7d2016-06-21 17:46:58 -0700467 slot = kaslr_get_random_long("Physical") % slot_max;
Kees Cooked9f0072016-05-25 15:45:33 -0700468
469 for (i = 0; i < slot_area_index; i++) {
470 if (slot >= slot_areas[i].num) {
471 slot -= slot_areas[i].num;
472 continue;
473 }
474 return slot_areas[i].addr + slot * CONFIG_PHYSICAL_ALIGN;
475 }
476
477 if (i == slot_area_index)
478 debug_putstr("slots_fetch_random() failed!?\n");
479 return 0;
Kees Cook82fa9632013-10-10 17:18:16 -0700480}
481
Ingo Molnar7410aa12017-01-29 12:56:13 +0100482static void process_e820_entry(struct boot_e820_entry *entry,
Kees Cook82fa9632013-10-10 17:18:16 -0700483 unsigned long minimum,
484 unsigned long image_size)
485{
Kees Cooked9f0072016-05-25 15:45:33 -0700486 struct mem_vector region, overlap;
487 struct slot_area slot_area;
Baoquan He4cdba142017-05-13 13:46:29 +0800488 unsigned long start_orig, end;
489 struct boot_e820_entry cur_entry;
Kees Cook82fa9632013-10-10 17:18:16 -0700490
491 /* Skip non-RAM entries. */
Ingo Molnar09821ff2017-01-28 17:09:33 +0100492 if (entry->type != E820_TYPE_RAM)
Kees Cook82fa9632013-10-10 17:18:16 -0700493 return;
494
Kees Cooked9f0072016-05-25 15:45:33 -0700495 /* On 32-bit, ignore entries entirely above our maximum. */
496 if (IS_ENABLED(CONFIG_X86_32) && entry->addr >= KERNEL_IMAGE_SIZE)
Kees Cook82fa9632013-10-10 17:18:16 -0700497 return;
498
499 /* Ignore entries entirely below our minimum. */
500 if (entry->addr + entry->size < minimum)
501 return;
502
Baoquan He4cdba142017-05-13 13:46:29 +0800503 /* Ignore entries above memory limit */
504 end = min(entry->size + entry->addr, mem_limit);
505 if (entry->addr >= end)
506 return;
507 cur_entry.addr = entry->addr;
508 cur_entry.size = end - entry->addr;
509
510 region.start = cur_entry.addr;
511 region.size = cur_entry.size;
Kees Cook82fa9632013-10-10 17:18:16 -0700512
Kees Cooked9f0072016-05-25 15:45:33 -0700513 /* Give up if slot area array is full. */
514 while (slot_area_index < MAX_SLOT_AREA) {
515 start_orig = region.start;
Kees Cook82fa9632013-10-10 17:18:16 -0700516
Kees Cooked9f0072016-05-25 15:45:33 -0700517 /* Potentially raise address to minimum location. */
518 if (region.start < minimum)
519 region.start = minimum;
Kees Cook82fa9632013-10-10 17:18:16 -0700520
Kees Cooked9f0072016-05-25 15:45:33 -0700521 /* Potentially raise address to meet alignment needs. */
522 region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);
Kees Cook82fa9632013-10-10 17:18:16 -0700523
Kees Cooked9f0072016-05-25 15:45:33 -0700524 /* Did we raise the address above this e820 region? */
Baoquan He4cdba142017-05-13 13:46:29 +0800525 if (region.start > cur_entry.addr + cur_entry.size)
Kees Cooked9f0072016-05-25 15:45:33 -0700526 return;
Kees Cook82fa9632013-10-10 17:18:16 -0700527
Kees Cooked9f0072016-05-25 15:45:33 -0700528 /* Reduce size by any delta from the original address. */
529 region.size -= region.start - start_orig;
Kees Cook82fa9632013-10-10 17:18:16 -0700530
Kees Cooked9f0072016-05-25 15:45:33 -0700531 /* On 32-bit, reduce region size to fit within max size. */
532 if (IS_ENABLED(CONFIG_X86_32) &&
533 region.start + region.size > KERNEL_IMAGE_SIZE)
534 region.size = KERNEL_IMAGE_SIZE - region.start;
535
536 /* Return if region can't contain decompressed kernel */
537 if (region.size < image_size)
538 return;
539
540 /* If nothing overlaps, store the region and return. */
541 if (!mem_avoid_overlap(&region, &overlap)) {
542 store_slot_info(&region, image_size);
543 return;
544 }
545
546 /* Store beginning of region if holds at least image_size. */
547 if (overlap.start > region.start + image_size) {
548 struct mem_vector beginning;
549
550 beginning.start = region.start;
551 beginning.size = overlap.start - region.start;
552 store_slot_info(&beginning, image_size);
553 }
554
555 /* Return if overlap extends to or past end of region. */
556 if (overlap.start + overlap.size >= region.start + region.size)
557 return;
558
559 /* Clip off the overlapping region and start over. */
560 region.size -= overlap.start - region.start + overlap.size;
561 region.start = overlap.start + overlap.size;
Kees Cook82fa9632013-10-10 17:18:16 -0700562 }
563}
564
Baoquan He071a7492016-05-09 13:22:08 -0700565static unsigned long find_random_phys_addr(unsigned long minimum,
566 unsigned long image_size)
Kees Cook82fa9632013-10-10 17:18:16 -0700567{
568 int i;
569 unsigned long addr;
570
Dave Jiangf2844242017-01-11 16:20:01 -0700571 /* Check if we had too many memmaps. */
572 if (memmap_too_large) {
573 debug_putstr("Aborted e820 scan (more than 4 memmap= args)!\n");
574 return 0;
575 }
576
Kees Cook82fa9632013-10-10 17:18:16 -0700577 /* Make sure minimum is aligned. */
578 minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
579
580 /* Verify potential e820 positions, appending to slots list. */
Kees Cook6655e0a2016-04-18 09:42:12 -0700581 for (i = 0; i < boot_params->e820_entries; i++) {
Ingo Molnar61a50102017-01-27 13:54:38 +0100582 process_e820_entry(&boot_params->e820_table[i], minimum,
Baoquan He071a7492016-05-09 13:22:08 -0700583 image_size);
Kees Cooked9f0072016-05-25 15:45:33 -0700584 if (slot_area_index == MAX_SLOT_AREA) {
585 debug_putstr("Aborted e820 scan (slot_areas full)!\n");
586 break;
587 }
Kees Cook82fa9632013-10-10 17:18:16 -0700588 }
589
590 return slots_fetch_random();
591}
592
Baoquan He071a7492016-05-09 13:22:08 -0700593static unsigned long find_random_virt_addr(unsigned long minimum,
594 unsigned long image_size)
595{
596 unsigned long slots, random_addr;
597
598 /* Make sure minimum is aligned. */
599 minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
600 /* Align image_size for easy slot calculations. */
601 image_size = ALIGN(image_size, CONFIG_PHYSICAL_ALIGN);
602
603 /*
604 * There are how many CONFIG_PHYSICAL_ALIGN-sized slots
605 * that can hold image_size within the range of minimum to
606 * KERNEL_IMAGE_SIZE?
607 */
608 slots = (KERNEL_IMAGE_SIZE - minimum - image_size) /
609 CONFIG_PHYSICAL_ALIGN + 1;
610
Thomas Garnierd899a7d2016-06-21 17:46:58 -0700611 random_addr = kaslr_get_random_long("Virtual") % slots;
Baoquan He071a7492016-05-09 13:22:08 -0700612
613 return random_addr * CONFIG_PHYSICAL_ALIGN + minimum;
614}
615
Borislav Petkov549f90d2016-05-06 13:50:15 +0200616/*
617 * Since this function examines addresses much more numerically,
618 * it takes the input and output pointers as 'unsigned long'.
619 */
Baoquan He8391c732016-05-25 15:45:32 -0700620void choose_random_location(unsigned long input,
621 unsigned long input_size,
622 unsigned long *output,
623 unsigned long output_size,
624 unsigned long *virt_addr)
Kees Cook8ab38202013-10-10 17:18:14 -0700625{
Yinghai Lue066cc42016-05-25 15:45:34 -0700626 unsigned long random_addr, min_addr;
Kees Cook8ab38202013-10-10 17:18:14 -0700627
Baoquan He8391c732016-05-25 15:45:32 -0700628 /* By default, keep output position unchanged. */
629 *virt_addr = *output;
630
Kees Cook24f2e022014-06-13 13:30:36 -0700631 if (cmdline_find_option_bool("nokaslr")) {
Kees Cook0f8ede1b2016-04-20 13:55:46 -0700632 warn("KASLR disabled: 'nokaslr' on cmdline.");
Baoquan He8391c732016-05-25 15:45:32 -0700633 return;
Kees Cook24f2e022014-06-13 13:30:36 -0700634 }
Kees Cook8ab38202013-10-10 17:18:14 -0700635
Kees Cook6655e0a2016-04-18 09:42:12 -0700636 boot_params->hdr.loadflags |= KASLR_FLAG;
Borislav Petkov78cac482015-04-01 12:49:52 +0200637
Kees Cook11fdf972016-05-25 15:45:31 -0700638 /* Prepare to add new identity pagetables on demand. */
639 initialize_identity_maps();
640
Kees Cook82fa9632013-10-10 17:18:16 -0700641 /* Record the various known unsafe memory ranges. */
Baoquan He8391c732016-05-25 15:45:32 -0700642 mem_avoid_init(input, input_size, *output);
Kees Cook8ab38202013-10-10 17:18:14 -0700643
Yinghai Lue066cc42016-05-25 15:45:34 -0700644 /*
645 * Low end of the randomization range should be the
646 * smaller of 512M or the initial kernel image
647 * location:
648 */
649 min_addr = min(*output, 512UL << 20);
650
Kees Cook82fa9632013-10-10 17:18:16 -0700651 /* Walk e820 and find a random address. */
Yinghai Lue066cc42016-05-25 15:45:34 -0700652 random_addr = find_random_phys_addr(min_addr, output_size);
Kees Cook90168752016-04-18 09:42:15 -0700653 if (!random_addr) {
Dave Jiangf2844242017-01-11 16:20:01 -0700654 warn("Physical KASLR disabled: no suitable memory region!");
Baoquan He8391c732016-05-25 15:45:32 -0700655 } else {
656 /* Update the new physical address location. */
657 if (*output != random_addr) {
658 add_identity_map(random_addr, output_size);
659 *output = random_addr;
660 }
Baoquan Heda63b6b2017-04-27 15:42:20 +0800661
662 /*
663 * This loads the identity mapping page table.
664 * This should only be done if a new physical address
665 * is found for the kernel, otherwise we should keep
666 * the old page table to make it be like the "nokaslr"
667 * case.
668 */
669 finalize_identity_maps();
Kees Cook82fa9632013-10-10 17:18:16 -0700670 }
671
Baoquan He8391c732016-05-25 15:45:32 -0700672
673 /* Pick random virtual address starting from LOAD_PHYSICAL_ADDR. */
674 if (IS_ENABLED(CONFIG_X86_64))
675 random_addr = find_random_virt_addr(LOAD_PHYSICAL_ADDR, output_size);
676 *virt_addr = random_addr;
Kees Cook8ab38202013-10-10 17:18:14 -0700677}