blob: bf4d3bc41a7a85e5144eeecb1231418f9870f54e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/arm/mm/init.c
3 *
Russell King90072052005-10-28 14:48:37 +01004 * Copyright (C) 1995-2005 Russell King
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/kernel.h>
11#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/swap.h>
13#include <linux/init.h>
14#include <linux/bootmem.h>
15#include <linux/mman.h>
Paul Gortmakerdc280942011-07-31 16:17:29 -040016#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/nodemask.h>
18#include <linux/initrd.h>
Grant Likely9eb8f672011-04-28 14:27:20 -060019#include <linux/of_fdt.h>
Nicolas Pitre3835f6c2008-09-17 15:21:55 -040020#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/gfp.h>
Russell King2778f622010-07-09 16:27:52 +010022#include <linux/memblock.h>
Marek Szyprowskic7909502011-12-29 13:09:51 +010023#include <linux/dma-contiguous.h>
Alessandro Rubini158e8bf2012-06-24 12:46:26 +010024#include <linux/sizes.h>
Laura Abbott08925c22015-11-30 19:36:28 +010025#include <linux/stop_machine.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
Russell Kingb4b20ad82014-04-13 18:57:29 +010027#include <asm/cp15.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <asm/mach-types.h>
Russell King716a3dc2012-01-13 15:00:51 +000029#include <asm/memblock.h>
Afzal Mohammedd2ca5f22017-01-29 17:31:32 +010030#include <asm/memory.h>
Grant Likely93c02ab2011-04-28 14:27:21 -060031#include <asm/prom.h>
Russell King37efe642008-12-01 11:53:07 +000032#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <asm/setup.h>
Kees Cook1e6b4812014-04-03 17:28:11 -070034#include <asm/system_info.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/tlb.h>
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +010036#include <asm/fixmap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
38#include <asm/mach/arch.h>
39#include <asm/mach/map.h>
40
Russell King1b2e2b72006-08-21 17:06:38 +010041#include "mm.h"
42
Russell Kingb4b20ad82014-04-13 18:57:29 +010043#ifdef CONFIG_CPU_CP15_MMU
44unsigned long __init __clear_cr(unsigned long mask)
45{
Russell Kingb4b20ad82014-04-13 18:57:29 +010046 cr_alignment = cr_alignment & ~mask;
47 return cr_alignment;
48}
49#endif
50
Vitaly Andrianovde22cc62012-06-22 14:26:04 -040051static phys_addr_t phys_initrd_start __initdata = 0;
Russell King012d1f42008-09-06 10:57:03 +010052static unsigned long phys_initrd_size __initdata = 0;
53
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +010054static int __init early_initrd(char *p)
Russell King012d1f42008-09-06 10:57:03 +010055{
Vitaly Andrianovde22cc62012-06-22 14:26:04 -040056 phys_addr_t start;
57 unsigned long size;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +010058 char *endp;
Russell King012d1f42008-09-06 10:57:03 +010059
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +010060 start = memparse(p, &endp);
61 if (*endp == ',') {
62 size = memparse(endp + 1, NULL);
Russell King012d1f42008-09-06 10:57:03 +010063
64 phys_initrd_start = start;
65 phys_initrd_size = size;
66 }
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +010067 return 0;
Russell King012d1f42008-09-06 10:57:03 +010068}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +010069early_param("initrd", early_initrd);
Russell King012d1f42008-09-06 10:57:03 +010070
71static int __init parse_tag_initrd(const struct tag *tag)
72{
Russell King4ed89f22014-10-28 11:26:42 +000073 pr_warn("ATAG_INITRD is deprecated; "
Russell King012d1f42008-09-06 10:57:03 +010074 "please update your bootloader.\n");
75 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
76 phys_initrd_size = tag->u.initrd.size;
77 return 0;
78}
79
80__tagtable(ATAG_INITRD, parse_tag_initrd);
81
82static int __init parse_tag_initrd2(const struct tag *tag)
83{
84 phys_initrd_start = tag->u.initrd.start;
85 phys_initrd_size = tag->u.initrd.size;
86 return 0;
87}
88
89__tagtable(ATAG_INITRD2, parse_tag_initrd2);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Russell Kingf25b4b42010-10-27 19:49:33 +010091static void __init find_limits(unsigned long *min, unsigned long *max_low,
Nicolas Pitre27a3f0e2011-08-25 19:10:29 -040092 unsigned long *max_high)
Russell Kingdde58282009-08-15 12:36:00 +010093{
Laura Abbott1c2f87c2014-04-13 22:54:58 +010094 *max_low = PFN_DOWN(memblock_get_current_limit());
95 *min = PFN_UP(memblock_start_of_DRAM());
96 *max_high = PFN_DOWN(memblock_end_of_DRAM());
Russell Kingdde58282009-08-15 12:36:00 +010097}
98
Russell Kingbe209022011-05-11 15:39:00 +010099#ifdef CONFIG_ZONE_DMA
Nicolas Pitre65032012011-07-18 15:05:10 -0400100
Rob Herring364230b2013-08-01 15:29:29 -0500101phys_addr_t arm_dma_zone_size __read_mostly;
Nicolas Pitre65032012011-07-18 15:05:10 -0400102EXPORT_SYMBOL(arm_dma_zone_size);
103
Russell King022ae532011-07-08 21:26:59 +0100104/*
105 * The DMA mask corresponding to the maximum bus address allocatable
106 * using GFP_DMA. The default here places no restriction on DMA
107 * allocations. This must be the smallest DMA mask in the system,
108 * so a successful GFP_DMA allocation will always satisfy this.
109 */
Marek Szyprowski4986e5c2012-06-06 12:05:01 +0200110phys_addr_t arm_dma_limit;
Russell King4dcfa602013-07-09 12:14:49 +0100111unsigned long arm_dma_pfn_limit;
Russell King022ae532011-07-08 21:26:59 +0100112
Russell Kingbe209022011-05-11 15:39:00 +0100113static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
114 unsigned long dma_size)
115{
116 if (size[0] <= dma_size)
117 return;
118
119 size[ZONE_NORMAL] = size[0] - dma_size;
120 size[ZONE_DMA] = dma_size;
121 hole[ZONE_NORMAL] = hole[0];
122 hole[ZONE_DMA] = 0;
123}
124#endif
125
Russell Kingff69a4c2013-07-26 14:55:59 +0100126void __init setup_dma_zone(const struct machine_desc *mdesc)
Marek Szyprowskic7909502011-12-29 13:09:51 +0100127{
128#ifdef CONFIG_ZONE_DMA
129 if (mdesc->dma_zone_size) {
130 arm_dma_zone_size = mdesc->dma_zone_size;
Russell King6bcac802014-01-07 17:53:54 +0000131 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
Marek Szyprowskic7909502011-12-29 13:09:51 +0100132 } else
133 arm_dma_limit = 0xffffffff;
Russell King4dcfa602013-07-09 12:14:49 +0100134 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
Marek Szyprowskic7909502011-12-29 13:09:51 +0100135#endif
136}
137
Santosh Shilimkar84f452b2013-06-30 00:28:46 -0400138static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
Russell Kinga2c54d22010-10-27 19:17:31 +0100139 unsigned long max_high)
Russell Kingb7a69ac2008-10-01 16:58:32 +0100140{
141 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
Russell Kinga2c54d22010-10-27 19:17:31 +0100142 struct memblock_region *reg;
Russell Kingb7a69ac2008-10-01 16:58:32 +0100143
Russell King90072052005-10-28 14:48:37 +0100144 /*
Russell Kingbe370302010-05-07 17:40:33 +0100145 * initialise the zones.
Russell King90072052005-10-28 14:48:37 +0100146 */
147 memset(zone_size, 0, sizeof(zone_size));
Russell King90072052005-10-28 14:48:37 +0100148
149 /*
Russell Kingbe370302010-05-07 17:40:33 +0100150 * The memory size has already been determined. If we need
151 * to do anything fancy with the allocation of this memory
152 * to the zones, now is the time to do it.
Russell King90072052005-10-28 14:48:37 +0100153 */
Russell Kingdde58282009-08-15 12:36:00 +0100154 zone_size[0] = max_low - min;
155#ifdef CONFIG_HIGHMEM
156 zone_size[ZONE_HIGHMEM] = max_high - max_low;
157#endif
Russell King90072052005-10-28 14:48:37 +0100158
159 /*
Russell Kingbe370302010-05-07 17:40:33 +0100160 * Calculate the size of the holes.
161 * holes = node_size - sum(bank_sizes)
Russell King90072052005-10-28 14:48:37 +0100162 */
Russell Kingdde58282009-08-15 12:36:00 +0100163 memcpy(zhole_size, zone_size, sizeof(zhole_size));
Russell Kinga2c54d22010-10-27 19:17:31 +0100164 for_each_memblock(memory, reg) {
165 unsigned long start = memblock_region_memory_base_pfn(reg);
166 unsigned long end = memblock_region_memory_end_pfn(reg);
167
168 if (start < max_low) {
169 unsigned long low_end = min(end, max_low);
170 zhole_size[0] -= low_end - start;
171 }
Russell Kingdde58282009-08-15 12:36:00 +0100172#ifdef CONFIG_HIGHMEM
Russell Kinga2c54d22010-10-27 19:17:31 +0100173 if (end > max_low) {
174 unsigned long high_start = max(start, max_low);
175 zhole_size[ZONE_HIGHMEM] -= end - high_start;
176 }
Russell Kingdde58282009-08-15 12:36:00 +0100177#endif
Russell Kingdde58282009-08-15 12:36:00 +0100178 }
Russell King90072052005-10-28 14:48:37 +0100179
Nicolas Pitre65032012011-07-18 15:05:10 -0400180#ifdef CONFIG_ZONE_DMA
Russell King90072052005-10-28 14:48:37 +0100181 /*
182 * Adjust the sizes according to any special requirements for
183 * this machine type.
184 */
Marek Szyprowskic7909502011-12-29 13:09:51 +0100185 if (arm_dma_zone_size)
Nicolas Pitre65032012011-07-18 15:05:10 -0400186 arm_adjust_dma_zone(zone_size, zhole_size,
187 arm_dma_zone_size >> PAGE_SHIFT);
Russell Kingbe209022011-05-11 15:39:00 +0100188#endif
Russell King90072052005-10-28 14:48:37 +0100189
Russell Kingbe370302010-05-07 17:40:33 +0100190 free_area_init_node(0, zone_size, min, zhole_size);
Russell King90072052005-10-28 14:48:37 +0100191}
192
Will Deacon7b7bf492011-05-19 13:21:14 +0100193#ifdef CONFIG_HAVE_ARCH_PFN_VALID
Russell Kingb7cfda92009-09-07 15:06:42 +0100194int pfn_valid(unsigned long pfn)
195{
Ard Biesheuvel09414d02015-10-01 17:58:11 +0200196 return memblock_is_map_memory(__pfn_to_phys(pfn));
Russell Kingb7cfda92009-09-07 15:06:42 +0100197}
198EXPORT_SYMBOL(pfn_valid);
Will Deacon7b7bf492011-05-19 13:21:14 +0100199#endif
Russell King657e12f2009-10-29 17:06:17 +0000200
Will Deacon7b7bf492011-05-19 13:21:14 +0100201#ifndef CONFIG_SPARSEMEM
Stephen Boyd14904922012-04-27 01:40:10 +0100202static void __init arm_memory_present(void)
Russell King657e12f2009-10-29 17:06:17 +0000203{
204}
205#else
Stephen Boyd14904922012-04-27 01:40:10 +0100206static void __init arm_memory_present(void)
Russell King657e12f2009-10-29 17:06:17 +0000207{
Benjamin Herrenschmidt719c15142010-08-05 12:55:55 +1000208 struct memblock_region *reg;
Benjamin Herrenschmidt719c15142010-08-05 12:55:55 +1000209
Yinghai Lu7c996362010-09-16 00:20:36 -0700210 for_each_memblock(memory, reg)
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700211 memory_present(0, memblock_region_memory_base_pfn(reg),
212 memblock_region_memory_end_pfn(reg));
Russell King657e12f2009-10-29 17:06:17 +0000213}
Russell Kingb7cfda92009-09-07 15:06:42 +0100214#endif
215
Russell King716a3dc2012-01-13 15:00:51 +0000216static bool arm_memblock_steal_permitted = true;
217
Russell Kingbc2827d2012-01-19 14:35:19 +0000218phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
Russell King716a3dc2012-01-13 15:00:51 +0000219{
220 phys_addr_t phys;
221
222 BUG_ON(!arm_memblock_steal_permitted);
223
Russell King7ac68a42012-08-13 00:22:28 +0100224 phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
Russell King716a3dc2012-01-13 15:00:51 +0000225 memblock_free(phys, size);
226 memblock_remove(phys, size);
227
228 return phys;
229}
230
Russell King39286242017-01-16 15:11:10 +0000231static void __init arm_initrd_init(void)
Russell King2778f622010-07-09 16:27:52 +0100232{
Russell King2778f622010-07-09 16:27:52 +0100233#ifdef CONFIG_BLK_DEV_INITRD
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000234 phys_addr_t start;
235 unsigned long size;
236
Rob Herring65939302013-08-30 10:54:26 -0500237 /* FDT scan will populate initrd_start */
Ben Peddell4c235cb2014-01-13 23:25:18 +0100238 if (initrd_start && !phys_initrd_size) {
Rob Herring65939302013-08-30 10:54:26 -0500239 phys_initrd_start = __virt_to_phys(initrd_start);
240 phys_initrd_size = initrd_end - initrd_start;
241 }
Russell King2778f622010-07-09 16:27:52 +0100242
Russell King2778f622010-07-09 16:27:52 +0100243 initrd_start = initrd_end = 0;
Russell King68b32f32017-01-16 15:13:25 +0000244
245 if (!phys_initrd_size)
246 return;
247
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000248 /*
249 * Round the memory region to page boundaries as per free_initrd_mem()
250 * This allows us to detect whether the pages overlapping the initrd
251 * are in use, but more importantly, reserves the entire set of pages
252 * as we don't want these pages allocated for other purposes.
253 */
254 start = round_down(phys_initrd_start, PAGE_SIZE);
255 size = phys_initrd_size + (phys_initrd_start - start);
256 size = round_up(size, PAGE_SIZE);
257
258 if (!memblock_is_region_memory(start, size)) {
Russell King2778f622010-07-09 16:27:52 +0100259 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000260 (u64)start, size);
Russell King68b32f32017-01-16 15:13:25 +0000261 return;
Russell King2778f622010-07-09 16:27:52 +0100262 }
Russell King68b32f32017-01-16 15:13:25 +0000263
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000264 if (memblock_is_region_reserved(start, size)) {
Russell King2778f622010-07-09 16:27:52 +0100265 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000266 (u64)start, size);
Russell King68b32f32017-01-16 15:13:25 +0000267 return;
Russell King2778f622010-07-09 16:27:52 +0100268 }
Russell King2778f622010-07-09 16:27:52 +0100269
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000270 memblock_reserve(start, size);
Russell King68b32f32017-01-16 15:13:25 +0000271
272 /* Now convert initrd to virtual addresses */
273 initrd_start = __phys_to_virt(phys_initrd_start);
274 initrd_end = initrd_start + phys_initrd_size;
Russell King2778f622010-07-09 16:27:52 +0100275#endif
Russell King39286242017-01-16 15:11:10 +0000276}
277
278void __init arm_memblock_init(const struct machine_desc *mdesc)
279{
280 /* Register the kernel text, kernel data and initrd with memblock. */
281 memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
282
283 arm_initrd_init();
Russell King2778f622010-07-09 16:27:52 +0100284
285 arm_mm_memblock_reserve();
286
Russell King8d717a52010-05-22 19:47:18 +0100287 /* reserve any platform specific memblock areas */
288 if (mdesc->reserve)
289 mdesc->reserve();
290
Ard Biesheuvel24bbd922015-06-01 13:40:31 +0200291 early_init_fdt_reserve_self();
Marek Szyprowskibcedb5f2014-02-28 14:42:54 +0100292 early_init_fdt_scan_reserved_mem();
293
George G. Davis99a468d2015-01-16 11:21:05 +0100294 /* reserve memory for DMA contiguous allocations */
Marek Szyprowski95b0e652014-10-09 15:26:49 -0700295 dma_contiguous_reserve(arm_dma_limit);
Marek Szyprowskic7909502011-12-29 13:09:51 +0100296
Russell King716a3dc2012-01-13 15:00:51 +0000297 arm_memblock_steal_permitted = false;
Russell King2778f622010-07-09 16:27:52 +0100298 memblock_dump_all();
299}
300
Russell King8d717a52010-05-22 19:47:18 +0100301void __init bootmem_init(void)
Russell King90072052005-10-28 14:48:37 +0100302{
Russell Kingdde58282009-08-15 12:36:00 +0100303 unsigned long min, max_low, max_high;
Russell King90072052005-10-28 14:48:37 +0100304
Grygorii Strashko8e58cae2013-11-23 14:42:18 -0500305 memblock_allow_resize();
Russell Kingdde58282009-08-15 12:36:00 +0100306 max_low = max_high = 0;
307
Russell Kingf25b4b42010-10-27 19:49:33 +0100308 find_limits(&min, &max_low, &max_high);
Russell Kingbe370302010-05-07 17:40:33 +0100309
Vladimir Murzind30eae42015-04-14 15:48:37 -0700310 early_memtest((phys_addr_t)min << PAGE_SHIFT,
311 (phys_addr_t)max_low << PAGE_SHIFT);
312
Russell Kingbe370302010-05-07 17:40:33 +0100313 /*
Russell Kingbe370302010-05-07 17:40:33 +0100314 * Sparsemem tries to allocate bootmem in memory_present(),
315 * so must be done after the fixed reservations
316 */
Russell Kingeda2e5d2010-07-01 12:00:57 +0100317 arm_memory_present();
Russell King90072052005-10-28 14:48:37 +0100318
Russell Kingb7a69ac2008-10-01 16:58:32 +0100319 /*
320 * sparse_init() needs the bootmem allocator up and running.
321 */
322 sparse_init();
323
324 /*
Russell Kingbe370302010-05-07 17:40:33 +0100325 * Now free the memory - free_area_init_node needs
Russell Kingb7a69ac2008-10-01 16:58:32 +0100326 * the sparse mem_map arrays initialized by sparse_init()
327 * for memmap_init_zone(), otherwise all PFNs are invalid.
328 */
Santosh Shilimkar84f452b2013-06-30 00:28:46 -0400329 zone_sizes_init(min, max_low, max_high);
Russell Kingb7a69ac2008-10-01 16:58:32 +0100330
Russell King90072052005-10-28 14:48:37 +0100331 /*
332 * This doesn't seem to be used by the Linux memory manager any
333 * more, but is used by ll_rw_block. If we can get rid of it, we
334 * also get rid of some of the stuff above as well.
Russell King90072052005-10-28 14:48:37 +0100335 */
Santosh Shilimkar26ba47b2013-08-01 03:12:01 +0100336 min_low_pfn = min;
337 max_low_pfn = max_low;
338 max_pfn = max_high;
Russell King90072052005-10-28 14:48:37 +0100339}
340
Stephen Boyd54d52572011-07-07 18:43:36 +0100341/*
342 * Poison init memory with an undefined instruction (ARM) or a branch to an
343 * undefined instruction (Thumb).
344 */
345static inline void poison_init_mem(void *s, size_t count)
346{
347 u32 *p = (u32 *)s;
Jamie Ilesbf912d92011-08-04 09:39:31 +0100348 for (; count != 0; count -= 4)
Stephen Boyd54d52572011-07-07 18:43:36 +0100349 *p++ = 0xe7fddef0;
350}
351
Russell Kinga0130532005-06-27 14:16:47 +0100352static inline void
Russell Kingbe370302010-05-07 17:40:33 +0100353free_memmap(unsigned long start_pfn, unsigned long end_pfn)
Russell Kinga0130532005-06-27 14:16:47 +0100354{
355 struct page *start_pg, *end_pg;
Vitaly Andrianov56bc6282012-06-21 08:09:05 -0400356 phys_addr_t pg, pgend;
Russell Kinga0130532005-06-27 14:16:47 +0100357
358 /*
359 * Convert start_pfn/end_pfn to a struct page pointer.
360 */
Catalin Marinas3257f432009-10-06 17:57:22 +0100361 start_pg = pfn_to_page(start_pfn - 1) + 1;
Will Deacon9af386c2011-04-28 18:44:31 +0100362 end_pg = pfn_to_page(end_pfn - 1) + 1;
Russell Kinga0130532005-06-27 14:16:47 +0100363
364 /*
365 * Convert to physical addresses, and
366 * round start upwards and end downwards.
367 */
Vitaly Andrianov56bc6282012-06-21 08:09:05 -0400368 pg = PAGE_ALIGN(__pa(start_pg));
369 pgend = __pa(end_pg) & PAGE_MASK;
Russell Kinga0130532005-06-27 14:16:47 +0100370
371 /*
372 * If there are free pages between these,
373 * free the section of the memmap array.
374 */
375 if (pg < pgend)
Santosh Shilimkarcfb665862014-01-21 15:50:49 -0800376 memblock_free_early(pg, pgend - pg);
Russell Kinga0130532005-06-27 14:16:47 +0100377}
378
379/*
380 * The mem_map array can get very big. Free the unused area of the memory map.
381 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100382static void __init free_unused_memmap(void)
Russell Kinga0130532005-06-27 14:16:47 +0100383{
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100384 unsigned long start, prev_end = 0;
385 struct memblock_region *reg;
Russell Kinga0130532005-06-27 14:16:47 +0100386
387 /*
Michael Bohan3260e522010-06-14 13:06:56 -0700388 * This relies on each bank being in address order.
389 * The banks are sorted previously in bootmem_init().
Russell Kinga0130532005-06-27 14:16:47 +0100390 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100391 for_each_memblock(memory, reg) {
392 start = memblock_region_memory_base_pfn(reg);
Russell Kinga0130532005-06-27 14:16:47 +0100393
Will Deacon9af386c2011-04-28 18:44:31 +0100394#ifdef CONFIG_SPARSEMEM
395 /*
396 * Take care not to free memmap entries that don't exist
397 * due to SPARSEMEM sections which aren't present.
398 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100399 start = min(start,
400 ALIGN(prev_end, PAGES_PER_SECTION));
Linus Walleij002ea9eef2011-09-29 09:37:23 +0100401#else
402 /*
403 * Align down here since the VM subsystem insists that the
404 * memmap entries are valid from the bank start aligned to
405 * MAX_ORDER_NR_PAGES.
406 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100407 start = round_down(start, MAX_ORDER_NR_PAGES);
Will Deacon9af386c2011-04-28 18:44:31 +0100408#endif
Russell Kinga0130532005-06-27 14:16:47 +0100409 /*
410 * If we had a previous bank, and there is a space
411 * between the current bank and the previous, free it.
412 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100413 if (prev_end && prev_end < start)
414 free_memmap(prev_end, start);
Russell Kinga0130532005-06-27 14:16:47 +0100415
Michael Bohan3260e522010-06-14 13:06:56 -0700416 /*
417 * Align up here since the VM subsystem insists that the
418 * memmap entries are valid from the bank end aligned to
419 * MAX_ORDER_NR_PAGES.
420 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100421 prev_end = ALIGN(memblock_region_memory_end_pfn(reg),
422 MAX_ORDER_NR_PAGES);
Russell Kinga0130532005-06-27 14:16:47 +0100423 }
Will Deacon9af386c2011-04-28 18:44:31 +0100424
425#ifdef CONFIG_SPARSEMEM
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100426 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
427 free_memmap(prev_end,
428 ALIGN(prev_end, PAGES_PER_SECTION));
Will Deacon9af386c2011-04-28 18:44:31 +0100429#endif
Russell Kinga0130532005-06-27 14:16:47 +0100430}
431
Jiang Liu83db0382013-04-29 15:06:26 -0700432#ifdef CONFIG_HIGHMEM
433static inline void free_area_high(unsigned long pfn, unsigned long end)
434{
Jiang Liudd6911e2013-04-29 15:07:03 -0700435 for (; pfn < end; pfn++)
436 free_highmem_page(pfn_to_page(pfn));
Jiang Liu83db0382013-04-29 15:06:26 -0700437}
438#endif
439
Russell Kingd0e775a2010-10-27 19:37:06 +0100440static void __init free_highpages(void)
441{
442#ifdef CONFIG_HIGHMEM
Santosh Shilimkar26ba47b2013-08-01 03:12:01 +0100443 unsigned long max_low = max_low_pfn;
Russell Kingdf4f14c2010-10-27 19:45:49 +0100444 struct memblock_region *mem, *res;
Russell Kingd0e775a2010-10-27 19:37:06 +0100445
446 /* set highmem page free */
Russell Kingdf4f14c2010-10-27 19:45:49 +0100447 for_each_memblock(memory, mem) {
448 unsigned long start = memblock_region_memory_base_pfn(mem);
449 unsigned long end = memblock_region_memory_end_pfn(mem);
450
451 /* Ignore complete lowmem entries */
452 if (end <= max_low)
453 continue;
454
Ard Biesheuvel09414d02015-10-01 17:58:11 +0200455 if (memblock_is_nomap(mem))
456 continue;
457
Russell Kingdf4f14c2010-10-27 19:45:49 +0100458 /* Truncate partial highmem entries */
459 if (start < max_low)
460 start = max_low;
461
462 /* Find and exclude any reserved regions */
463 for_each_memblock(reserved, res) {
464 unsigned long res_start, res_end;
465
466 res_start = memblock_region_reserved_base_pfn(res);
467 res_end = memblock_region_reserved_end_pfn(res);
468
469 if (res_end < start)
470 continue;
471 if (res_start < start)
472 res_start = start;
473 if (res_start > end)
474 res_start = end;
475 if (res_end > end)
476 res_end = end;
477 if (res_start != start)
Jiang Liu83db0382013-04-29 15:06:26 -0700478 free_area_high(start, res_start);
Russell Kingdf4f14c2010-10-27 19:45:49 +0100479 start = res_end;
480 if (start == end)
481 break;
482 }
483
484 /* And now free anything which remains */
485 if (start < end)
Jiang Liu83db0382013-04-29 15:06:26 -0700486 free_area_high(start, end);
Russell Kingd0e775a2010-10-27 19:37:06 +0100487 }
Russell Kingd0e775a2010-10-27 19:37:06 +0100488#endif
489}
490
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491/*
492 * mem_init() marks the free areas in the mem_map and tells us how much
493 * memory is free. This is done after various parts of the system have
494 * claimed their memory after the kernel image.
495 */
496void __init mem_init(void)
497{
Linus Walleij1dbd30e2010-07-12 21:53:28 +0100498#ifdef CONFIG_HAVE_TCM
499 /* These pointers are filled in on TCM detection */
500 extern u32 dtcm_end;
501 extern u32 itcm_end;
502#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503
Santosh Shilimkarb3ba41f2013-11-23 14:36:42 -0500504 set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 /* this will put all unused low memory onto the freelists */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100507 free_unused_memmap();
Jiang Liu0c988532013-07-03 15:03:24 -0700508 free_all_bootmem();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509
510#ifdef CONFIG_SA1111
511 /* now that our DMA memory is actually so designated, we can free it */
Linus Torvaldsbfd65dd2013-07-13 14:58:36 -0700512 free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513#endif
514
Russell Kingd0e775a2010-10-27 19:37:06 +0100515 free_highpages();
Nicolas Pitre3835f6c2008-09-17 15:21:55 -0400516
Jiang Liu2450c972013-07-03 15:03:48 -0700517 mem_init_print_info(NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100519#define MLK(b, t) b, t, ((t) - (b)) >> 10
520#define MLM(b, t) b, t, ((t) - (b)) >> 20
521#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
522
Russell King4ed89f22014-10-28 11:26:42 +0000523 pr_notice("Virtual kernel memory layout:\n"
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100524 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
Linus Walleij07d2a5c2010-07-12 21:52:34 +0100525#ifdef CONFIG_HAVE_TCM
Linus Walleij07d2a5c2010-07-12 21:52:34 +0100526 " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
Linus Walleij07d2a5c2010-07-12 21:52:34 +0100527 " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
528#endif
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100529 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
530 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
531 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
532#ifdef CONFIG_HIGHMEM
533 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
534#endif
Uwe Kleine-Königd9277d52012-02-01 11:16:51 +0100535#ifdef CONFIG_MODULES
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100536 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
Uwe Kleine-Königd9277d52012-02-01 11:16:51 +0100537#endif
Russell King178c3df2014-10-19 22:42:42 +0100538 " .text : 0x%p" " - 0x%p" " (%4td kB)\n"
539 " .init : 0x%p" " - 0x%p" " (%4td kB)\n"
540 " .data : 0x%p" " - 0x%p" " (%4td kB)\n"
541 " .bss : 0x%p" " - 0x%p" " (%4td kB)\n",
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100542
Afzal Mohammedd2ca5f22017-01-29 17:31:32 +0100543 MLK(VECTORS_BASE, VECTORS_BASE + PAGE_SIZE),
Linus Walleij07d2a5c2010-07-12 21:52:34 +0100544#ifdef CONFIG_HAVE_TCM
Linus Walleij1dbd30e2010-07-12 21:53:28 +0100545 MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
546 MLK(ITCM_OFFSET, (unsigned long) itcm_end),
Linus Walleij07d2a5c2010-07-12 21:52:34 +0100547#endif
Mark Salterb615bbb2014-08-13 09:04:49 -0700548 MLK(FIXADDR_START, FIXADDR_END),
Fenkart/Bostandzhyanc931b4f2010-02-07 21:47:17 +0100549 MLM(VMALLOC_START, VMALLOC_END),
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100550 MLM(PAGE_OFFSET, (unsigned long)high_memory),
551#ifdef CONFIG_HIGHMEM
552 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
553 (PAGE_SIZE)),
554#endif
Uwe Kleine-Königd9277d52012-02-01 11:16:51 +0100555#ifdef CONFIG_MODULES
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100556 MLM(MODULES_VADDR, MODULES_END),
Uwe Kleine-Königd9277d52012-02-01 11:16:51 +0100557#endif
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100558
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100559 MLK_ROUNDUP(_text, _etext),
Russell King3835d692011-07-06 10:39:34 +0100560 MLK_ROUNDUP(__init_begin, __init_end),
Rabin Vincent45f6d7e2011-06-02 15:01:36 +0100561 MLK_ROUNDUP(_sdata, _edata),
562 MLK_ROUNDUP(__bss_start, __bss_stop));
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +0100563
564#undef MLK
565#undef MLM
566#undef MLK_ROUNDUP
567
Fenkart/Bostandzhyana1839272010-02-07 21:47:58 +0100568 /*
569 * Check boundaries twice: Some fundamental inconsistencies can
570 * be detected at build time already.
571 */
572#ifdef CONFIG_MMU
Fenkart/Bostandzhyana1839272010-02-07 21:47:58 +0100573 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
574 BUG_ON(TASK_SIZE > MODULES_VADDR);
575#endif
576
577#ifdef CONFIG_HIGHMEM
578 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
579 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
580#endif
581
Jiang Liu2450c972013-07-03 15:03:48 -0700582 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 extern int sysctl_overcommit_memory;
584 /*
585 * On a machine this small we won't get
586 * anywhere without overcommit, so turn
587 * it on by default.
588 */
589 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
590 }
591}
592
Laura Abbott0f5bf6d2017-02-06 16:31:58 -0800593#ifdef CONFIG_STRICT_KERNEL_RWX
Kees Cook1e6b4812014-04-03 17:28:11 -0700594struct section_perm {
Kees Cook25362dc2016-01-26 01:19:36 +0100595 const char *name;
Kees Cook1e6b4812014-04-03 17:28:11 -0700596 unsigned long start;
597 unsigned long end;
598 pmdval_t mask;
599 pmdval_t prot;
Kees Cook80d6b0c2014-04-03 13:29:50 -0700600 pmdval_t clear;
Kees Cook1e6b4812014-04-03 17:28:11 -0700601};
602
Kees Cook64ac2e72016-01-26 01:20:21 +0100603/* First section-aligned location at or after __start_rodata. */
604extern char __start_rodata_section_aligned[];
605
Kees Cook80d6b0c2014-04-03 13:29:50 -0700606static struct section_perm nx_perms[] = {
Kees Cook1e6b4812014-04-03 17:28:11 -0700607 /* Make pages tables, etc before _stext RW (set NX). */
608 {
Kees Cook25362dc2016-01-26 01:19:36 +0100609 .name = "pre-text NX",
Kees Cook1e6b4812014-04-03 17:28:11 -0700610 .start = PAGE_OFFSET,
611 .end = (unsigned long)_stext,
612 .mask = ~PMD_SECT_XN,
613 .prot = PMD_SECT_XN,
614 },
615 /* Make init RW (set NX). */
616 {
Kees Cook25362dc2016-01-26 01:19:36 +0100617 .name = "init NX",
Kees Cook1e6b4812014-04-03 17:28:11 -0700618 .start = (unsigned long)__init_begin,
619 .end = (unsigned long)_sdata,
620 .mask = ~PMD_SECT_XN,
621 .prot = PMD_SECT_XN,
622 },
Kees Cook80d6b0c2014-04-03 13:29:50 -0700623 /* Make rodata NX (set RO in ro_perms below). */
624 {
Kees Cook25362dc2016-01-26 01:19:36 +0100625 .name = "rodata NX",
Kees Cook64ac2e72016-01-26 01:20:21 +0100626 .start = (unsigned long)__start_rodata_section_aligned,
Kees Cook80d6b0c2014-04-03 13:29:50 -0700627 .end = (unsigned long)__init_begin,
628 .mask = ~PMD_SECT_XN,
629 .prot = PMD_SECT_XN,
630 },
Kees Cook1e6b4812014-04-03 17:28:11 -0700631};
632
Kees Cook80d6b0c2014-04-03 13:29:50 -0700633static struct section_perm ro_perms[] = {
634 /* Make kernel code and rodata RX (set RO). */
635 {
Kees Cook25362dc2016-01-26 01:19:36 +0100636 .name = "text/rodata RO",
Kees Cook80d6b0c2014-04-03 13:29:50 -0700637 .start = (unsigned long)_stext,
638 .end = (unsigned long)__init_begin,
639#ifdef CONFIG_ARM_LPAE
Victor Kamensky1e347922015-01-09 18:55:45 +0100640 .mask = ~L_PMD_SECT_RDONLY,
641 .prot = L_PMD_SECT_RDONLY,
Kees Cook80d6b0c2014-04-03 13:29:50 -0700642#else
643 .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
644 .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
645 .clear = PMD_SECT_AP_WRITE,
646#endif
647 },
648};
Kees Cook80d6b0c2014-04-03 13:29:50 -0700649
Kees Cook1e6b4812014-04-03 17:28:11 -0700650/*
651 * Updates section permissions only for the current mm (sections are
652 * copied into each mm). During startup, this is the init_mm. Is only
653 * safe to be called with preemption disabled, as under stop_machine().
654 */
655static inline void section_update(unsigned long addr, pmdval_t mask,
Laura Abbott08925c22015-11-30 19:36:28 +0100656 pmdval_t prot, struct mm_struct *mm)
Kees Cook1e6b4812014-04-03 17:28:11 -0700657{
Kees Cook1e6b4812014-04-03 17:28:11 -0700658 pmd_t *pmd;
659
Kees Cook1e6b4812014-04-03 17:28:11 -0700660 pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
661
662#ifdef CONFIG_ARM_LPAE
663 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
664#else
665 if (addr & SECTION_SIZE)
666 pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
667 else
668 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
669#endif
670 flush_pmd_entry(pmd);
671 local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
672}
673
674/* Make sure extended page tables are in use. */
675static inline bool arch_has_strict_perms(void)
676{
677 if (cpu_architecture() < CPU_ARCH_ARMv6)
678 return false;
679
680 return !!(get_cr() & CR_XP);
681}
682
Laura Abbott08925c22015-11-30 19:36:28 +0100683void set_section_perms(struct section_perm *perms, int n, bool set,
684 struct mm_struct *mm)
685{
686 size_t i;
687 unsigned long addr;
688
689 if (!arch_has_strict_perms())
690 return;
691
692 for (i = 0; i < n; i++) {
693 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
694 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
Kees Cook25362dc2016-01-26 01:19:36 +0100695 pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
696 perms[i].name, perms[i].start, perms[i].end,
Laura Abbott08925c22015-11-30 19:36:28 +0100697 SECTION_SIZE);
698 continue;
699 }
700
701 for (addr = perms[i].start;
702 addr < perms[i].end;
703 addr += SECTION_SIZE)
704 section_update(addr, perms[i].mask,
705 set ? perms[i].prot : perms[i].clear, mm);
706 }
707
Kees Cook1e6b4812014-04-03 17:28:11 -0700708}
709
Laura Abbott08925c22015-11-30 19:36:28 +0100710static void update_sections_early(struct section_perm perms[], int n)
Kees Cook1e6b4812014-04-03 17:28:11 -0700711{
Laura Abbott08925c22015-11-30 19:36:28 +0100712 struct task_struct *t, *s;
713
714 read_lock(&tasklist_lock);
715 for_each_process(t) {
716 if (t->flags & PF_KTHREAD)
717 continue;
718 for_each_thread(t, s)
719 set_section_perms(perms, n, true, s->mm);
720 }
721 read_unlock(&tasklist_lock);
722 set_section_perms(perms, n, true, current->active_mm);
723 set_section_perms(perms, n, true, &init_mm);
724}
725
726int __fix_kernmem_perms(void *unused)
727{
728 update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
729 return 0;
730}
731
732void fix_kernmem_perms(void)
733{
734 stop_machine(__fix_kernmem_perms, NULL, NULL);
Kees Cook1e6b4812014-04-03 17:28:11 -0700735}
Kees Cook80d6b0c2014-04-03 13:29:50 -0700736
Laura Abbott08925c22015-11-30 19:36:28 +0100737int __mark_rodata_ro(void *unused)
738{
739 update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
740 return 0;
741}
742
Kees Cook80d6b0c2014-04-03 13:29:50 -0700743void mark_rodata_ro(void)
744{
Laura Abbott08925c22015-11-30 19:36:28 +0100745 stop_machine(__mark_rodata_ro, NULL, NULL);
Kees Cook80d6b0c2014-04-03 13:29:50 -0700746}
747
748void set_kernel_text_rw(void)
749{
Laura Abbott08925c22015-11-30 19:36:28 +0100750 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
751 current->active_mm);
Kees Cook80d6b0c2014-04-03 13:29:50 -0700752}
753
754void set_kernel_text_ro(void)
755{
Laura Abbott08925c22015-11-30 19:36:28 +0100756 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
757 current->active_mm);
Kees Cook80d6b0c2014-04-03 13:29:50 -0700758}
Kees Cook80d6b0c2014-04-03 13:29:50 -0700759
Kees Cook1e6b4812014-04-03 17:28:11 -0700760#else
761static inline void fix_kernmem_perms(void) { }
Laura Abbott0f5bf6d2017-02-06 16:31:58 -0800762#endif /* CONFIG_STRICT_KERNEL_RWX */
Kees Cook1e6b4812014-04-03 17:28:11 -0700763
764void free_tcmmem(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765{
Linus Walleijbc581772009-09-15 17:30:37 +0100766#ifdef CONFIG_HAVE_TCM
Linus Walleijea208f62010-05-26 07:37:57 +0100767 extern char __tcm_start, __tcm_end;
Linus Walleijbc581772009-09-15 17:30:37 +0100768
Stephen Boyd54d52572011-07-07 18:43:36 +0100769 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
Jiang Liudbe67df2013-07-03 15:02:51 -0700770 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
Linus Walleijbc581772009-09-15 17:30:37 +0100771#endif
Kees Cook1e6b4812014-04-03 17:28:11 -0700772}
773
774void free_initmem(void)
775{
776 fix_kernmem_perms();
777 free_tcmmem();
Linus Walleijbc581772009-09-15 17:30:37 +0100778
Stephen Boyd54d52572011-07-07 18:43:36 +0100779 poison_init_mem(__init_begin, __init_end - __init_begin);
Nicolas Pitre6db015e2008-09-17 14:50:42 -0400780 if (!machine_is_integrator() && !machine_is_cintegrator())
Jiang Liudbe67df2013-07-03 15:02:51 -0700781 free_initmem_default(-1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782}
783
784#ifdef CONFIG_BLK_DEV_INITRD
785
786static int keep_initrd;
787
788void free_initrd_mem(unsigned long start, unsigned long end)
789{
Stephen Boyd54d52572011-07-07 18:43:36 +0100790 if (!keep_initrd) {
Yalin Wang421520b2014-09-26 03:07:09 +0100791 if (start == initrd_start)
792 start = round_down(start, PAGE_SIZE);
793 if (end == initrd_end)
794 end = round_up(end, PAGE_SIZE);
795
Stephen Boyd54d52572011-07-07 18:43:36 +0100796 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
Jiang Liudbe67df2013-07-03 15:02:51 -0700797 free_reserved_area((void *)start, (void *)end, -1, "initrd");
Stephen Boyd54d52572011-07-07 18:43:36 +0100798 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799}
800
801static int __init keepinitrd_setup(char *__unused)
802{
803 keep_initrd = 1;
804 return 1;
805}
806
807__setup("keepinitrd", keepinitrd_setup);
808#endif