blob: 285745b2ca38d4aacd9c855585bf1a742fb4692d [file] [log] [blame]
Catalin Marinasc1cc1552012-03-05 11:49:27 +00001/*
2 * Based on arch/arm/mm/init.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/kernel.h>
21#include <linux/export.h>
22#include <linux/errno.h>
23#include <linux/swap.h>
24#include <linux/init.h>
25#include <linux/bootmem.h>
Jisheng Zhang5a9e3e12016-08-15 14:45:46 +080026#include <linux/cache.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000027#include <linux/mman.h>
28#include <linux/nodemask.h>
29#include <linux/initrd.h>
30#include <linux/gfp.h>
31#include <linux/memblock.h>
32#include <linux/sort.h>
AKASHI Takahiro764b51e2017-04-03 11:24:32 +090033#include <linux/of.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000034#include <linux/of_fdt.h>
Catalin Marinas19e76402014-02-27 12:09:22 +000035#include <linux/dma-mapping.h>
Laura Abbott6ac21042013-12-12 19:28:33 +000036#include <linux/dma-contiguous.h>
Leif Lindholm86c8b272014-07-28 19:03:03 +010037#include <linux/efi.h>
Catalin Marinasa1e50a82015-02-05 18:01:53 +000038#include <linux/swiotlb.h>
Kefeng Wangdae8c232016-09-05 19:30:22 +080039#include <linux/vmalloc.h>
Laura Abbott2077be62017-01-10 13:35:49 -080040#include <linux/mm.h>
AKASHI Takahiro764b51e2017-04-03 11:24:32 +090041#include <linux/kexec.h>
AKASHI Takahiroe62aaea2017-04-03 11:24:38 +090042#include <linux/crash_dump.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000043
Ard Biesheuvela7f8de12016-02-16 13:52:42 +010044#include <asm/boot.h>
Catalin Marinas08375192014-07-16 17:42:43 +010045#include <asm/fixmap.h>
Ard Biesheuvelf9040772016-02-16 13:52:40 +010046#include <asm/kasan.h>
Ard Biesheuvela7f8de12016-02-16 13:52:42 +010047#include <asm/kernel-pgtable.h>
Mark Rutlandaa03c422015-01-22 18:20:35 +000048#include <asm/memory.h>
Ganapatrao Kulkarni1a2db302016-04-08 15:50:27 -070049#include <asm/numa.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000050#include <asm/sections.h>
51#include <asm/setup.h>
52#include <asm/sizes.h>
53#include <asm/tlb.h>
Andre Przywarae039ee42014-11-14 15:54:08 +000054#include <asm/alternative.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000055
Ard Biesheuvela7f8de12016-02-16 13:52:42 +010056/*
57 * We need to be able to catch inadvertent references to memstart_addr
58 * that occur (potentially in generic code) before arm64_memblock_init()
59 * executes, which assigns it its actual value. So use a default value
60 * that cannot be mistaken for a real physical address.
61 */
Jisheng Zhang5a9e3e12016-08-15 14:45:46 +080062s64 memstart_addr __ro_after_init = -1;
63phys_addr_t arm64_dma_phys_limit __ro_after_init;
Catalin Marinasc1cc1552012-03-05 11:49:27 +000064
Rob Herringec2eaa72013-08-25 16:47:48 -050065#ifdef CONFIG_BLK_DEV_INITRD
Catalin Marinasc1cc1552012-03-05 11:49:27 +000066static int __init early_initrd(char *p)
67{
68 unsigned long start, size;
69 char *endp;
70
71 start = memparse(p, &endp);
72 if (*endp == ',') {
73 size = memparse(endp + 1, NULL);
74
Ard Biesheuvela89dea52016-02-16 13:52:41 +010075 initrd_start = start;
76 initrd_end = start + size;
Catalin Marinasc1cc1552012-03-05 11:49:27 +000077 }
78 return 0;
79}
80early_param("initrd", early_initrd);
Rob Herringec2eaa72013-08-25 16:47:48 -050081#endif
Catalin Marinasc1cc1552012-03-05 11:49:27 +000082
AKASHI Takahiro764b51e2017-04-03 11:24:32 +090083#ifdef CONFIG_KEXEC_CORE
84/*
85 * reserve_crashkernel() - reserves memory for crash kernel
86 *
87 * This function reserves memory area given in "crashkernel=" kernel command
88 * line parameter. The memory reserved is used by dump capture kernel when
89 * primary kernel is crashing.
90 */
91static void __init reserve_crashkernel(void)
92{
93 unsigned long long crash_base, crash_size;
94 int ret;
95
96 ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
97 &crash_size, &crash_base);
98 /* no crashkernel= or invalid value specified */
99 if (ret || !crash_size)
100 return;
101
102 crash_size = PAGE_ALIGN(crash_size);
103
104 if (crash_base == 0) {
105 /* Current arm64 boot protocol requires 2MB alignment */
106 crash_base = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT,
107 crash_size, SZ_2M);
108 if (crash_base == 0) {
109 pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
110 crash_size);
111 return;
112 }
113 } else {
114 /* User specifies base address explicitly. */
115 if (!memblock_is_region_memory(crash_base, crash_size)) {
116 pr_warn("cannot reserve crashkernel: region is not memory\n");
117 return;
118 }
119
120 if (memblock_is_region_reserved(crash_base, crash_size)) {
121 pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n");
122 return;
123 }
124
125 if (!IS_ALIGNED(crash_base, SZ_2M)) {
126 pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n");
127 return;
128 }
129 }
130 memblock_reserve(crash_base, crash_size);
131
132 pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
133 crash_base, crash_base + crash_size, crash_size >> 20);
134
135 crashk_res.start = crash_base;
136 crashk_res.end = crash_base + crash_size - 1;
137}
AKASHI Takahiro254a41c2017-04-03 11:24:35 +0900138
139static void __init kexec_reserve_crashkres_pages(void)
140{
141#ifdef CONFIG_HIBERNATION
142 phys_addr_t addr;
143 struct page *page;
144
145 if (!crashk_res.end)
146 return;
147
148 /*
149 * To reduce the size of hibernation image, all the pages are
150 * marked as Reserved initially.
151 */
152 for (addr = crashk_res.start; addr < (crashk_res.end + 1);
153 addr += PAGE_SIZE) {
154 page = phys_to_page(addr);
155 SetPageReserved(page);
156 }
157#endif
158}
AKASHI Takahiro764b51e2017-04-03 11:24:32 +0900159#else
160static void __init reserve_crashkernel(void)
161{
162}
AKASHI Takahiro254a41c2017-04-03 11:24:35 +0900163
164static void __init kexec_reserve_crashkres_pages(void)
165{
166}
AKASHI Takahiro764b51e2017-04-03 11:24:32 +0900167#endif /* CONFIG_KEXEC_CORE */
168
AKASHI Takahiroe62aaea2017-04-03 11:24:38 +0900169#ifdef CONFIG_CRASH_DUMP
170static int __init early_init_dt_scan_elfcorehdr(unsigned long node,
171 const char *uname, int depth, void *data)
172{
173 const __be32 *reg;
174 int len;
175
176 if (depth != 1 || strcmp(uname, "chosen") != 0)
177 return 0;
178
179 reg = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len);
180 if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
181 return 1;
182
183 elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, &reg);
184 elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, &reg);
185
186 return 1;
187}
188
189/*
190 * reserve_elfcorehdr() - reserves memory for elf core header
191 *
192 * This function reserves the memory occupied by an elf core header
193 * described in the device tree. This region contains all the
194 * information about primary kernel's core image and is used by a dump
195 * capture kernel to access the system memory on primary kernel.
196 */
197static void __init reserve_elfcorehdr(void)
198{
199 of_scan_flat_dt(early_init_dt_scan_elfcorehdr, NULL);
200
201 if (!elfcorehdr_size)
202 return;
203
204 if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
205 pr_warn("elfcorehdr is overlapped\n");
206 return;
207 }
208
209 memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
210
211 pr_info("Reserving %lldKB of memory at 0x%llx for elfcorehdr\n",
212 elfcorehdr_size >> 10, elfcorehdr_addr);
213}
214#else
215static void __init reserve_elfcorehdr(void)
216{
217}
218#endif /* CONFIG_CRASH_DUMP */
Catalin Marinasd50314a2014-07-18 11:54:37 +0100219/*
220 * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
221 * currently assumes that for memory starting above 4G, 32-bit devices will
222 * use a DMA offset.
223 */
Jisheng Zhanga7c61a32015-11-20 17:59:10 +0800224static phys_addr_t __init max_zone_dma_phys(void)
Catalin Marinasd50314a2014-07-18 11:54:37 +0100225{
226 phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
227 return min(offset + (1ULL << 32), memblock_end_of_DRAM());
228}
229
Ganapatrao Kulkarni1a2db302016-04-08 15:50:27 -0700230#ifdef CONFIG_NUMA
231
232static void __init zone_sizes_init(unsigned long min, unsigned long max)
233{
234 unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
235
236 if (IS_ENABLED(CONFIG_ZONE_DMA))
237 max_zone_pfns[ZONE_DMA] = PFN_DOWN(max_zone_dma_phys());
238 max_zone_pfns[ZONE_NORMAL] = max;
239
240 free_area_init_nodes(max_zone_pfns);
241}
242
243#else
244
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000245static void __init zone_sizes_init(unsigned long min, unsigned long max)
246{
247 struct memblock_region *reg;
248 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
Catalin Marinas19e76402014-02-27 12:09:22 +0000249 unsigned long max_dma = min;
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000250
251 memset(zone_size, 0, sizeof(zone_size));
252
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000253 /* 4GB maximum for 32-bit only capable devices */
Robin Murphy86a59062015-10-27 17:40:26 +0000254#ifdef CONFIG_ZONE_DMA
255 max_dma = PFN_DOWN(arm64_dma_phys_limit);
256 zone_size[ZONE_DMA] = max_dma - min;
257#endif
Catalin Marinas19e76402014-02-27 12:09:22 +0000258 zone_size[ZONE_NORMAL] = max - max_dma;
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000259
260 memcpy(zhole_size, zone_size, sizeof(zhole_size));
261
262 for_each_memblock(memory, reg) {
263 unsigned long start = memblock_region_memory_base_pfn(reg);
264 unsigned long end = memblock_region_memory_end_pfn(reg);
265
266 if (start >= max)
267 continue;
Catalin Marinas19e76402014-02-27 12:09:22 +0000268
Robin Murphy86a59062015-10-27 17:40:26 +0000269#ifdef CONFIG_ZONE_DMA
270 if (start < max_dma) {
Catalin Marinas19e76402014-02-27 12:09:22 +0000271 unsigned long dma_end = min(end, max_dma);
272 zhole_size[ZONE_DMA] -= dma_end - start;
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000273 }
Robin Murphy86a59062015-10-27 17:40:26 +0000274#endif
Catalin Marinas19e76402014-02-27 12:09:22 +0000275 if (end > max_dma) {
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000276 unsigned long normal_end = min(end, max);
Catalin Marinas19e76402014-02-27 12:09:22 +0000277 unsigned long normal_start = max(start, max_dma);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000278 zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
279 }
280 }
281
282 free_area_init_node(0, zone_size, min, zhole_size);
283}
284
Ganapatrao Kulkarni1a2db302016-04-08 15:50:27 -0700285#endif /* CONFIG_NUMA */
286
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000287#ifdef CONFIG_HAVE_ARCH_PFN_VALID
288int pfn_valid(unsigned long pfn)
289{
Ard Biesheuvel68709f42015-11-30 13:28:16 +0100290 return memblock_is_map_memory(pfn << PAGE_SHIFT);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000291}
292EXPORT_SYMBOL(pfn_valid);
293#endif
294
295#ifndef CONFIG_SPARSEMEM
Jisheng Zhanga7c61a32015-11-20 17:59:10 +0800296static void __init arm64_memory_present(void)
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000297{
298}
299#else
Jisheng Zhanga7c61a32015-11-20 17:59:10 +0800300static void __init arm64_memory_present(void)
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000301{
302 struct memblock_region *reg;
303
Ganapatrao Kulkarni1a2db302016-04-08 15:50:27 -0700304 for_each_memblock(memory, reg) {
Mark Rutlandea2cbee2016-06-22 12:13:45 +0100305 int nid = memblock_get_region_node(reg);
306
Ganapatrao Kulkarni1a2db302016-04-08 15:50:27 -0700307 memory_present(nid, memblock_region_memory_base_pfn(reg),
308 memblock_region_memory_end_pfn(reg));
309 }
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000310}
311#endif
312
Mark Rutland6083fe742015-01-15 16:42:14 +0000313static phys_addr_t memory_limit = (phys_addr_t)ULLONG_MAX;
314
315/*
316 * Limit the memory size that was specified via FDT.
317 */
318static int __init early_mem(char *p)
319{
320 if (!p)
321 return 1;
322
323 memory_limit = memparse(p, &p) & PAGE_MASK;
324 pr_notice("Memory limited to %lldMB\n", memory_limit >> 20);
325
326 return 0;
327}
328early_param("mem", early_mem);
329
AKASHI Takahiro8f579b12017-04-03 11:24:31 +0900330static int __init early_init_dt_scan_usablemem(unsigned long node,
331 const char *uname, int depth, void *data)
332{
333 struct memblock_region *usablemem = data;
334 const __be32 *reg;
335 int len;
336
337 if (depth != 1 || strcmp(uname, "chosen") != 0)
338 return 0;
339
340 reg = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len);
341 if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
342 return 1;
343
344 usablemem->base = dt_mem_next_cell(dt_root_addr_cells, &reg);
345 usablemem->size = dt_mem_next_cell(dt_root_size_cells, &reg);
346
347 return 1;
348}
349
350static void __init fdt_enforce_memory_region(void)
351{
352 struct memblock_region reg = {
353 .size = 0,
354 };
355
356 of_scan_flat_dt(early_init_dt_scan_usablemem, &reg);
357
358 if (reg.size)
359 memblock_cap_memory_range(reg.base, reg.size);
360}
361
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000362void __init arm64_memblock_init(void)
363{
Ard Biesheuvela7f8de12016-02-16 13:52:42 +0100364 const s64 linear_region_size = -(s64)PAGE_OFFSET;
365
AKASHI Takahiro8f579b12017-04-03 11:24:31 +0900366 /* Handle linux,usable-memory-range property */
367 fdt_enforce_memory_region();
368
Kristina Martsenkoe9eaa802018-01-18 19:13:11 +0000369 /* Remove memory above our supported physical address size */
370 memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX);
371
Ard Biesheuvela7f8de12016-02-16 13:52:42 +0100372 /*
Ard Biesheuvel6d2aa549de2016-03-02 09:47:13 +0100373 * Ensure that the linear region takes up exactly half of the kernel
374 * virtual address space. This way, we can distinguish a linear address
375 * from a kernel/module/vmalloc address by testing a single bit.
376 */
377 BUILD_BUG_ON(linear_region_size != BIT(VA_BITS - 1));
378
379 /*
Ard Biesheuvela7f8de12016-02-16 13:52:42 +0100380 * Select a suitable value for the base of physical memory.
381 */
382 memstart_addr = round_down(memblock_start_of_DRAM(),
383 ARM64_MEMSTART_ALIGN);
384
385 /*
386 * Remove the memory that we will not be able to cover with the
387 * linear mapping. Take care not to clip the kernel which may be
388 * high in memory.
389 */
Laura Abbott2077be62017-01-10 13:35:49 -0800390 memblock_remove(max_t(u64, memstart_addr + linear_region_size,
391 __pa_symbol(_end)), ULLONG_MAX);
Ard Biesheuvel29589872016-03-30 14:25:46 +0200392 if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
393 /* ensure that memstart_addr remains sufficiently aligned */
394 memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
395 ARM64_MEMSTART_ALIGN);
396 memblock_remove(0, memstart_addr);
397 }
Ard Biesheuvela7f8de12016-02-16 13:52:42 +0100398
399 /*
400 * Apply the memory limit if it was set. Since the kernel may be loaded
401 * high up in memory, add back the kernel region that must be accessible
402 * via the linear mapping.
403 */
404 if (memory_limit != (phys_addr_t)ULLONG_MAX) {
Dennis Chencb0a6502016-07-28 15:48:29 -0700405 memblock_mem_limit_remove_map(memory_limit);
Laura Abbott2077be62017-01-10 13:35:49 -0800406 memblock_add(__pa_symbol(_text), (u64)(_end - _text));
Ard Biesheuvela7f8de12016-02-16 13:52:42 +0100407 }
Mark Rutland6083fe742015-01-15 16:42:14 +0000408
Ard Biesheuvel177e15f2016-03-30 15:18:42 +0200409 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_start) {
410 /*
411 * Add back the memory we just removed if it results in the
412 * initrd to become inaccessible via the linear mapping.
413 * Otherwise, this is a no-op
414 */
415 u64 base = initrd_start & PAGE_MASK;
416 u64 size = PAGE_ALIGN(initrd_end) - base;
417
418 /*
419 * We can only add back the initrd memory if we don't end up
420 * with more memory than we can address via the linear mapping.
421 * It is up to the bootloader to position the kernel and the
422 * initrd reasonably close to each other (i.e., within 32 GB of
423 * each other) so that all granule/#levels combinations can
424 * always access both.
425 */
426 if (WARN(base < memblock_start_of_DRAM() ||
427 base + size > memblock_start_of_DRAM() +
428 linear_region_size,
429 "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
430 initrd_start = 0;
431 } else {
432 memblock_remove(base, size); /* clear MEMBLOCK_ flags */
433 memblock_add(base, size);
434 memblock_reserve(base, size);
435 }
436 }
437
Ard Biesheuvelc031a422016-01-29 11:59:03 +0100438 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
439 extern u16 memstart_offset_seed;
440 u64 range = linear_region_size -
441 (memblock_end_of_DRAM() - memblock_start_of_DRAM());
442
443 /*
444 * If the size of the linear region exceeds, by a sufficient
445 * margin, the size of the region that the available physical
446 * memory spans, randomize the linear region as well.
447 */
448 if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
449 range = range / ARM64_MEMSTART_ALIGN + 1;
450 memstart_addr -= ARM64_MEMSTART_ALIGN *
451 ((range * memstart_offset_seed) >> 16);
452 }
453 }
Catalin Marinas2d5a5612014-06-13 13:41:20 +0100454
Mark Rutlandbd00cd5f2014-06-24 16:51:35 +0100455 /*
456 * Register the kernel text, kernel data, initrd, and initial
457 * pagetables with memblock.
458 */
Laura Abbott2077be62017-01-10 13:35:49 -0800459 memblock_reserve(__pa_symbol(_text), _end - _text);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000460#ifdef CONFIG_BLK_DEV_INITRD
Ard Biesheuvela89dea52016-02-16 13:52:41 +0100461 if (initrd_start) {
462 memblock_reserve(initrd_start, initrd_end - initrd_start);
463
464 /* the generic initrd code expects virtual addresses */
465 initrd_start = __phys_to_virt(initrd_start);
466 initrd_end = __phys_to_virt(initrd_end);
467 }
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000468#endif
469
Mark Salter0ceac9e2014-09-08 13:01:08 -0400470 early_init_fdt_scan_reserved_mem();
Catalin Marinas2d5a5612014-06-13 13:41:20 +0100471
472 /* 4GB maximum for 32-bit only capable devices */
473 if (IS_ENABLED(CONFIG_ZONE_DMA))
Catalin Marinasa1e50a82015-02-05 18:01:53 +0000474 arm64_dma_phys_limit = max_zone_dma_phys();
475 else
476 arm64_dma_phys_limit = PHYS_MASK + 1;
AKASHI Takahiro764b51e2017-04-03 11:24:32 +0900477
478 reserve_crashkernel();
479
AKASHI Takahiroe62aaea2017-04-03 11:24:38 +0900480 reserve_elfcorehdr();
481
Catalin Marinasa1e50a82015-02-05 18:01:53 +0000482 dma_contiguous_reserve(arm64_dma_phys_limit);
Laura Abbott6ac21042013-12-12 19:28:33 +0000483
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000484 memblock_allow_resize();
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000485}
486
487void __init bootmem_init(void)
488{
489 unsigned long min, max;
490
491 min = PFN_UP(memblock_start_of_DRAM());
492 max = PFN_DOWN(memblock_end_of_DRAM());
493
Vladimir Murzin36dd9082015-04-14 15:48:33 -0700494 early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT);
495
Ganapatrao Kulkarni1a2db302016-04-08 15:50:27 -0700496 max_pfn = max_low_pfn = max;
497
498 arm64_numa_init();
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000499 /*
500 * Sparsemem tries to allocate bootmem in memory_present(), so must be
501 * done after the fixed reservations.
502 */
503 arm64_memory_present();
504
505 sparse_init();
506 zone_sizes_init(min, max);
507
508 high_memory = __va((max << PAGE_SHIFT) - 1) + 1;
Ganapatrao Kulkarni1a2db302016-04-08 15:50:27 -0700509 memblock_dump_all();
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000510}
511
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000512#ifndef CONFIG_SPARSEMEM_VMEMMAP
513static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
514{
515 struct page *start_pg, *end_pg;
516 unsigned long pg, pgend;
517
518 /*
519 * Convert start_pfn/end_pfn to a struct page pointer.
520 */
521 start_pg = pfn_to_page(start_pfn - 1) + 1;
522 end_pg = pfn_to_page(end_pfn - 1) + 1;
523
524 /*
525 * Convert to physical addresses, and round start upwards and end
526 * downwards.
527 */
528 pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
529 pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
530
531 /*
532 * If there are free pages between these, free the section of the
533 * memmap array.
534 */
535 if (pg < pgend)
536 free_bootmem(pg, pgend - pg);
537}
538
539/*
540 * The mem_map array can get very big. Free the unused area of the memory map.
541 */
542static void __init free_unused_memmap(void)
543{
544 unsigned long start, prev_end = 0;
545 struct memblock_region *reg;
546
547 for_each_memblock(memory, reg) {
548 start = __phys_to_pfn(reg->base);
549
550#ifdef CONFIG_SPARSEMEM
551 /*
552 * Take care not to free memmap entries that don't exist due
553 * to SPARSEMEM sections which aren't present.
554 */
555 start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
556#endif
557 /*
558 * If we had a previous bank, and there is a space between the
559 * current bank and the previous, free it.
560 */
561 if (prev_end && prev_end < start)
562 free_memmap(prev_end, start);
563
564 /*
565 * Align up here since the VM subsystem insists that the
566 * memmap entries are valid from the bank end aligned to
567 * MAX_ORDER_NR_PAGES.
568 */
Dave P Martinb9bcc912015-06-16 17:38:47 +0100569 prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size),
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000570 MAX_ORDER_NR_PAGES);
571 }
572
573#ifdef CONFIG_SPARSEMEM
574 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
575 free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
576#endif
577}
578#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
579
580/*
581 * mem_init() marks the free areas in the mem_map and tells us how much memory
582 * is free. This is done after various parts of the system have claimed their
583 * memory after the kernel image.
584 */
585void __init mem_init(void)
586{
Geert Uytterhoevenae7871b2016-12-16 14:28:41 +0100587 if (swiotlb_force == SWIOTLB_FORCE ||
588 max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
Jisheng Zhangb67a8b22016-06-08 15:53:46 +0800589 swiotlb_init(1);
Alexander Graf524dabe2017-01-16 12:46:33 +0100590 else
591 swiotlb_force = SWIOTLB_NO_FORCE;
Catalin Marinasa1e50a82015-02-05 18:01:53 +0000592
Ganapatrao Kulkarnia6583c72014-09-16 18:53:54 +0100593 set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000594
595#ifndef CONFIG_SPARSEMEM_VMEMMAP
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000596 free_unused_memmap();
597#endif
Jiang Liubee4ebd2013-07-03 15:03:49 -0700598 /* this will put all unused low memory onto the freelists */
Jiang Liu0c988532013-07-03 15:03:24 -0700599 free_all_bootmem();
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000600
AKASHI Takahiro254a41c2017-04-03 11:24:35 +0900601 kexec_reserve_crashkres_pages();
602
Jiang Liu6879ea82013-07-03 15:04:02 -0700603 mem_init_print_info(NULL);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000604
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000605 /*
606 * Check boundaries twice: Some fundamental inconsistencies can be
607 * detected at build time already.
608 */
609#ifdef CONFIG_COMPAT
610 BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64);
611#endif
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000612
Ard Biesheuvel3e1907d2016-03-30 16:46:00 +0200613 /*
614 * Make sure we chose the upper bound of sizeof(struct page)
615 * correctly.
616 */
617 BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT));
618
Jiang Liubee4ebd2013-07-03 15:03:49 -0700619 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000620 extern int sysctl_overcommit_memory;
621 /*
622 * On a machine this small we won't get anywhere without
623 * overcommit, so turn it on by default.
624 */
625 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
626 }
627}
628
629void free_initmem(void)
630{
Laura Abbott2077be62017-01-10 13:35:49 -0800631 free_reserved_area(lm_alias(__init_begin),
632 lm_alias(__init_end),
Ard Biesheuveld3868252016-03-30 16:45:57 +0200633 0, "unused kernel");
Kefeng Wangdae8c232016-09-05 19:30:22 +0800634 /*
635 * Unmap the __init region but leave the VM area in place. This
636 * prevents the region from being reused for kernel modules, which
637 * is not supported by kallsyms.
638 */
639 unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000640}
641
642#ifdef CONFIG_BLK_DEV_INITRD
643
Wang Long662ba3d2015-07-27 03:32:53 +0100644static int keep_initrd __initdata;
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000645
Wang Long662ba3d2015-07-27 03:32:53 +0100646void __init free_initrd_mem(unsigned long start, unsigned long end)
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000647{
Catalin Marinas01450582015-01-16 13:56:38 +0000648 if (!keep_initrd)
Jiang Liu9af5b802013-07-03 15:02:54 -0700649 free_reserved_area((void *)start, (void *)end, 0, "initrd");
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000650}
651
652static int __init keepinitrd_setup(char *__unused)
653{
654 keep_initrd = 1;
655 return 1;
656}
657
658__setup("keepinitrd", keepinitrd_setup);
659#endif
Ard Biesheuvela7f8de12016-02-16 13:52:42 +0100660
661/*
662 * Dump out memory limit information on panic.
663 */
664static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p)
665{
666 if (memory_limit != (phys_addr_t)ULLONG_MAX) {
667 pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
668 } else {
669 pr_emerg("Memory Limit: none\n");
670 }
671 return 0;
672}
673
674static struct notifier_block mem_limit_notifier = {
675 .notifier_call = dump_mem_limit,
676};
677
678static int __init register_mem_limit_dumper(void)
679{
680 atomic_notifier_chain_register(&panic_notifier_list,
681 &mem_limit_notifier);
682 return 0;
683}
684__initcall(register_mem_limit_dumper);