blob: c4b16cae2bc9bdfec99120b14478a4c46ae94627 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Yinghai Lu95f72d12010-07-12 14:36:09 +10002/*
3 * Procedures for maintaining information about logical memory blocks.
4 *
5 * Peter Bergner, IBM Corp. June 2001.
6 * Copyright (C) 2001 Peter Bergner.
Yinghai Lu95f72d12010-07-12 14:36:09 +10007 */
8
9#include <linux/kernel.h>
Benjamin Herrenschmidt142b45a2010-07-06 15:39:13 -070010#include <linux/slab.h>
Yinghai Lu95f72d12010-07-12 14:36:09 +100011#include <linux/init.h>
12#include <linux/bitops.h>
Benjamin Herrenschmidt449e8df2010-07-06 15:39:07 -070013#include <linux/poison.h>
Benjamin Herrenschmidtc196f762010-07-06 15:39:16 -070014#include <linux/pfn.h>
Benjamin Herrenschmidt6d03b882010-07-06 15:39:19 -070015#include <linux/debugfs.h>
Randy Dunlap514c6032018-04-05 16:25:34 -070016#include <linux/kmemleak.h>
Benjamin Herrenschmidt6d03b882010-07-06 15:39:19 -070017#include <linux/seq_file.h>
Yinghai Lu95f72d12010-07-12 14:36:09 +100018#include <linux/memblock.h>
19
Christoph Hellwigc4c5ad62016-07-28 15:48:06 -070020#include <asm/sections.h>
Santosh Shilimkar26f09e92014-01-21 15:50:19 -080021#include <linux/io.h>
22
23#include "internal.h"
Tang Chen79442ed2013-11-12 15:07:59 -080024
Ard Biesheuvel8a5b403d2019-02-15 13:33:32 +010025#define INIT_MEMBLOCK_REGIONS 128
26#define INIT_PHYSMEM_REGIONS 4
27
28#ifndef INIT_MEMBLOCK_RESERVED_REGIONS
29# define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
30#endif
31
Mike Rapoport3e039c52018-06-30 17:55:05 +030032/**
33 * DOC: memblock overview
34 *
35 * Memblock is a method of managing memory regions during the early
36 * boot period when the usual kernel memory allocators are not up and
37 * running.
38 *
39 * Memblock views the system memory as collections of contiguous
40 * regions. There are several types of these collections:
41 *
42 * * ``memory`` - describes the physical memory available to the
43 * kernel; this may differ from the actual physical memory installed
44 * in the system, for instance when the memory is restricted with
45 * ``mem=`` command line parameter
46 * * ``reserved`` - describes the regions that were allocated
47 * * ``physmap`` - describes the actual physical memory regardless of
48 * the possible restrictions; the ``physmap`` type is only available
49 * on some architectures.
50 *
51 * Each region is represented by :c:type:`struct memblock_region` that
52 * defines the region extents, its attributes and NUMA node id on NUMA
53 * systems. Every memory type is described by the :c:type:`struct
54 * memblock_type` which contains an array of memory regions along with
55 * the allocator metadata. The memory types are nicely wrapped with
56 * :c:type:`struct memblock`. This structure is statically initialzed
57 * at build time. The region arrays for the "memory" and "reserved"
58 * types are initially sized to %INIT_MEMBLOCK_REGIONS and for the
59 * "physmap" type to %INIT_PHYSMEM_REGIONS.
60 * The :c:func:`memblock_allow_resize` enables automatic resizing of
61 * the region arrays during addition of new regions. This feature
62 * should be used with care so that memory allocated for the region
63 * array will not overlap with areas that should be reserved, for
64 * example initrd.
65 *
66 * The early architecture setup should tell memblock what the physical
67 * memory layout is by using :c:func:`memblock_add` or
68 * :c:func:`memblock_add_node` functions. The first function does not
69 * assign the region to a NUMA node and it is appropriate for UMA
70 * systems. Yet, it is possible to use it on NUMA systems as well and
71 * assign the region to a NUMA node later in the setup process using
72 * :c:func:`memblock_set_node`. The :c:func:`memblock_add_node`
73 * performs such an assignment directly.
74 *
Mike Rapoporta2974132019-03-11 23:30:54 -070075 * Once memblock is setup the memory can be allocated using one of the
76 * API variants:
77 *
78 * * :c:func:`memblock_phys_alloc*` - these functions return the
79 * **physical** address of the allocated memory
80 * * :c:func:`memblock_alloc*` - these functions return the **virtual**
81 * address of the allocated memory.
82 *
83 * Note, that both API variants use implict assumptions about allowed
84 * memory ranges and the fallback methods. Consult the documentation
85 * of :c:func:`memblock_alloc_internal` and
86 * :c:func:`memblock_alloc_range_nid` functions for more elaboarte
87 * description.
Mike Rapoport3e039c52018-06-30 17:55:05 +030088 *
89 * As the system boot progresses, the architecture specific
90 * :c:func:`mem_init` function frees all the memory to the buddy page
91 * allocator.
92 *
Mike Rapoport350e88b2019-05-13 17:22:59 -070093 * Unless an architecure enables %CONFIG_ARCH_KEEP_MEMBLOCK, the
Mike Rapoport3e039c52018-06-30 17:55:05 +030094 * memblock data structures will be discarded after the system
95 * initialization compltes.
96 */
97
Mike Rapoportbda49a82018-10-30 15:09:40 -070098#ifndef CONFIG_NEED_MULTIPLE_NODES
99struct pglist_data __refdata contig_page_data;
100EXPORT_SYMBOL(contig_page_data);
101#endif
102
103unsigned long max_low_pfn;
104unsigned long min_low_pfn;
105unsigned long max_pfn;
106unsigned long long max_possible_pfn;
107
Tejun Heofe091c22011-12-08 10:22:07 -0800108static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
Ard Biesheuvel8a5b403d2019-02-15 13:33:32 +0100109static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
Philipp Hachtmann70210ed2014-01-29 18:16:01 +0100110#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
111static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
112#endif
Tejun Heofe091c22011-12-08 10:22:07 -0800113
114struct memblock memblock __initdata_memblock = {
115 .memory.regions = memblock_memory_init_regions,
116 .memory.cnt = 1, /* empty dummy entry */
117 .memory.max = INIT_MEMBLOCK_REGIONS,
Heiko Carstens0262d9c2017-02-24 14:55:59 -0800118 .memory.name = "memory",
Tejun Heofe091c22011-12-08 10:22:07 -0800119
120 .reserved.regions = memblock_reserved_init_regions,
121 .reserved.cnt = 1, /* empty dummy entry */
Ard Biesheuvel8a5b403d2019-02-15 13:33:32 +0100122 .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS,
Heiko Carstens0262d9c2017-02-24 14:55:59 -0800123 .reserved.name = "reserved",
Tejun Heofe091c22011-12-08 10:22:07 -0800124
Philipp Hachtmann70210ed2014-01-29 18:16:01 +0100125#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
126 .physmem.regions = memblock_physmem_init_regions,
127 .physmem.cnt = 1, /* empty dummy entry */
128 .physmem.max = INIT_PHYSMEM_REGIONS,
Heiko Carstens0262d9c2017-02-24 14:55:59 -0800129 .physmem.name = "physmem",
Philipp Hachtmann70210ed2014-01-29 18:16:01 +0100130#endif
131
Tang Chen79442ed2013-11-12 15:07:59 -0800132 .bottom_up = false,
Tejun Heofe091c22011-12-08 10:22:07 -0800133 .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
134};
Yinghai Lu95f72d12010-07-12 14:36:09 +1000135
Yinghai Lu10d06432010-07-28 15:43:02 +1000136int memblock_debug __initdata_memblock;
Tony Lucka3f5baf2015-06-24 16:58:12 -0700137static bool system_has_some_mirror __initdata_memblock = false;
Tejun Heo1aadc052011-12-08 10:22:08 -0800138static int memblock_can_resize __initdata_memblock;
Gavin Shan181eb392012-05-29 15:06:50 -0700139static int memblock_memory_in_slab __initdata_memblock = 0;
140static int memblock_reserved_in_slab __initdata_memblock = 0;
Yinghai Lu95f72d12010-07-12 14:36:09 +1000141
Mike Rapoportc366ea82019-03-11 23:29:46 -0700142static enum memblock_flags __init_memblock choose_memblock_flags(void)
Tony Lucka3f5baf2015-06-24 16:58:12 -0700143{
144 return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
145}
146
Tejun Heoeb18f1b2011-12-08 10:22:07 -0800147/* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
148static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
149{
Stefan Agner1c4bc432018-06-07 17:06:15 -0700150 return *size = min(*size, PHYS_ADDR_MAX - base);
Tejun Heoeb18f1b2011-12-08 10:22:07 -0800151}
152
Benjamin Herrenschmidt6ed311b2010-07-12 14:36:48 +1000153/*
154 * Address comparison utilities
155 */
Yinghai Lu10d06432010-07-28 15:43:02 +1000156static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
Benjamin Herrenschmidt2898cc42010-08-04 13:34:42 +1000157 phys_addr_t base2, phys_addr_t size2)
Yinghai Lu95f72d12010-07-12 14:36:09 +1000158{
159 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
160}
161
Tang Chen95cf82e2015-09-08 15:02:03 -0700162bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
H Hartley Sweeten2d7d3eb2011-10-31 17:09:15 -0700163 phys_addr_t base, phys_addr_t size)
Benjamin Herrenschmidt6ed311b2010-07-12 14:36:48 +1000164{
165 unsigned long i;
166
Alexander Kuleshovf14516f2016-01-14 15:20:39 -0800167 for (i = 0; i < type->cnt; i++)
168 if (memblock_addrs_overlap(base, size, type->regions[i].base,
169 type->regions[i].size))
Benjamin Herrenschmidt6ed311b2010-07-12 14:36:48 +1000170 break;
Tang Chenc5c5c9d2015-09-08 15:02:00 -0700171 return i < type->cnt;
Benjamin Herrenschmidt6ed311b2010-07-12 14:36:48 +1000172}
173
Mike Rapoport47cec442018-06-30 17:55:02 +0300174/**
Tang Chen79442ed2013-11-12 15:07:59 -0800175 * __memblock_find_range_bottom_up - find free area utility in bottom-up
176 * @start: start of candidate range
Mike Rapoport47cec442018-06-30 17:55:02 +0300177 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
178 * %MEMBLOCK_ALLOC_ACCESSIBLE
Tang Chen79442ed2013-11-12 15:07:59 -0800179 * @size: size of free area to find
180 * @align: alignment of free area to find
Grygorii Strashkob1154232014-01-21 15:50:16 -0800181 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
Tony Luckfc6daaf2015-06-24 16:58:09 -0700182 * @flags: pick from blocks based on memory attributes
Tang Chen79442ed2013-11-12 15:07:59 -0800183 *
184 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
185 *
Mike Rapoport47cec442018-06-30 17:55:02 +0300186 * Return:
Tang Chen79442ed2013-11-12 15:07:59 -0800187 * Found address on success, 0 on failure.
188 */
189static phys_addr_t __init_memblock
190__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
Tony Luckfc6daaf2015-06-24 16:58:09 -0700191 phys_addr_t size, phys_addr_t align, int nid,
Mike Rapoporte1720fe2018-06-30 17:55:01 +0300192 enum memblock_flags flags)
Tang Chen79442ed2013-11-12 15:07:59 -0800193{
194 phys_addr_t this_start, this_end, cand;
195 u64 i;
196
Tony Luckfc6daaf2015-06-24 16:58:09 -0700197 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
Tang Chen79442ed2013-11-12 15:07:59 -0800198 this_start = clamp(this_start, start, end);
199 this_end = clamp(this_end, start, end);
200
201 cand = round_up(this_start, align);
202 if (cand < this_end && this_end - cand >= size)
203 return cand;
204 }
205
206 return 0;
207}
208
Tejun Heo7bd0b0f2011-12-08 10:22:09 -0800209/**
Tang Chen14028992013-11-12 15:07:57 -0800210 * __memblock_find_range_top_down - find free area utility, in top-down
211 * @start: start of candidate range
Mike Rapoport47cec442018-06-30 17:55:02 +0300212 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
213 * %MEMBLOCK_ALLOC_ACCESSIBLE
Tang Chen14028992013-11-12 15:07:57 -0800214 * @size: size of free area to find
215 * @align: alignment of free area to find
Grygorii Strashkob1154232014-01-21 15:50:16 -0800216 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
Tony Luckfc6daaf2015-06-24 16:58:09 -0700217 * @flags: pick from blocks based on memory attributes
Tang Chen14028992013-11-12 15:07:57 -0800218 *
219 * Utility called from memblock_find_in_range_node(), find free area top-down.
220 *
Mike Rapoport47cec442018-06-30 17:55:02 +0300221 * Return:
Tang Chen79442ed2013-11-12 15:07:59 -0800222 * Found address on success, 0 on failure.
Tang Chen14028992013-11-12 15:07:57 -0800223 */
224static phys_addr_t __init_memblock
225__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
Tony Luckfc6daaf2015-06-24 16:58:09 -0700226 phys_addr_t size, phys_addr_t align, int nid,
Mike Rapoporte1720fe2018-06-30 17:55:01 +0300227 enum memblock_flags flags)
Tang Chen14028992013-11-12 15:07:57 -0800228{
229 phys_addr_t this_start, this_end, cand;
230 u64 i;
231
Tony Luckfc6daaf2015-06-24 16:58:09 -0700232 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
233 NULL) {
Tang Chen14028992013-11-12 15:07:57 -0800234 this_start = clamp(this_start, start, end);
235 this_end = clamp(this_end, start, end);
236
237 if (this_end < size)
238 continue;
239
240 cand = round_down(this_end - size, align);
241 if (cand >= this_start)
242 return cand;
243 }
244
245 return 0;
246}
247
248/**
Tejun Heo7bd0b0f2011-12-08 10:22:09 -0800249 * memblock_find_in_range_node - find free area in given range and node
Tejun Heo7bd0b0f2011-12-08 10:22:09 -0800250 * @size: size of free area to find
251 * @align: alignment of free area to find
Grygorii Strashko87029ee2014-01-21 15:50:14 -0800252 * @start: start of candidate range
Mike Rapoport47cec442018-06-30 17:55:02 +0300253 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
254 * %MEMBLOCK_ALLOC_ACCESSIBLE
Grygorii Strashkob1154232014-01-21 15:50:16 -0800255 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
Tony Luckfc6daaf2015-06-24 16:58:09 -0700256 * @flags: pick from blocks based on memory attributes
Tejun Heo7bd0b0f2011-12-08 10:22:09 -0800257 *
258 * Find @size free area aligned to @align in the specified range and node.
259 *
Tang Chen79442ed2013-11-12 15:07:59 -0800260 * When allocation direction is bottom-up, the @start should be greater
261 * than the end of the kernel image. Otherwise, it will be trimmed. The
262 * reason is that we want the bottom-up allocation just near the kernel
263 * image so it is highly likely that the allocated memory and the kernel
264 * will reside in the same node.
265 *
266 * If bottom-up allocation failed, will try to allocate memory top-down.
267 *
Mike Rapoport47cec442018-06-30 17:55:02 +0300268 * Return:
Tang Chen79442ed2013-11-12 15:07:59 -0800269 * Found address on success, 0 on failure.
Benjamin Herrenschmidt6ed311b2010-07-12 14:36:48 +1000270 */
Mike Rapoportc366ea82019-03-11 23:29:46 -0700271static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
Grygorii Strashko87029ee2014-01-21 15:50:14 -0800272 phys_addr_t align, phys_addr_t start,
Mike Rapoporte1720fe2018-06-30 17:55:01 +0300273 phys_addr_t end, int nid,
274 enum memblock_flags flags)
Tang Chenf7210e62013-02-22 16:33:51 -0800275{
Tang Chen0cfb8f02014-08-29 15:18:31 -0700276 phys_addr_t kernel_end, ret;
Tang Chen79442ed2013-11-12 15:07:59 -0800277
Tang Chenf7210e62013-02-22 16:33:51 -0800278 /* pump up @end */
Qian Caifed84c72018-12-28 00:36:29 -0800279 if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
280 end == MEMBLOCK_ALLOC_KASAN)
Tang Chenf7210e62013-02-22 16:33:51 -0800281 end = memblock.current_limit;
282
283 /* avoid allocating the first page */
284 start = max_t(phys_addr_t, start, PAGE_SIZE);
285 end = max(start, end);
Tang Chen79442ed2013-11-12 15:07:59 -0800286 kernel_end = __pa_symbol(_end);
287
288 /*
289 * try bottom-up allocation only when bottom-up mode
290 * is set and @end is above the kernel image.
291 */
292 if (memblock_bottom_up() && end > kernel_end) {
293 phys_addr_t bottom_up_start;
294
295 /* make sure we will allocate above the kernel */
296 bottom_up_start = max(start, kernel_end);
297
298 /* ok, try bottom-up allocation first */
299 ret = __memblock_find_range_bottom_up(bottom_up_start, end,
Tony Luckfc6daaf2015-06-24 16:58:09 -0700300 size, align, nid, flags);
Tang Chen79442ed2013-11-12 15:07:59 -0800301 if (ret)
302 return ret;
303
304 /*
305 * we always limit bottom-up allocation above the kernel,
306 * but top-down allocation doesn't have the limit, so
307 * retrying top-down allocation may succeed when bottom-up
308 * allocation failed.
309 *
310 * bottom-up allocation is expected to be fail very rarely,
311 * so we use WARN_ONCE() here to see the stack trace if
312 * fail happens.
313 */
Michal Hockoe3d301c2018-07-13 16:59:16 -0700314 WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE),
315 "memblock: bottom-up allocation failed, memory hotremove may be affected\n");
Tang Chen79442ed2013-11-12 15:07:59 -0800316 }
Tang Chenf7210e62013-02-22 16:33:51 -0800317
Tony Luckfc6daaf2015-06-24 16:58:09 -0700318 return __memblock_find_range_top_down(start, end, size, align, nid,
319 flags);
Tang Chenf7210e62013-02-22 16:33:51 -0800320}
Benjamin Herrenschmidt6ed311b2010-07-12 14:36:48 +1000321
Tejun Heo7bd0b0f2011-12-08 10:22:09 -0800322/**
323 * memblock_find_in_range - find free area in given range
324 * @start: start of candidate range
Mike Rapoport47cec442018-06-30 17:55:02 +0300325 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
326 * %MEMBLOCK_ALLOC_ACCESSIBLE
Tejun Heo7bd0b0f2011-12-08 10:22:09 -0800327 * @size: size of free area to find
328 * @align: alignment of free area to find
329 *
330 * Find @size free area aligned to @align in the specified range.
331 *
Mike Rapoport47cec442018-06-30 17:55:02 +0300332 * Return:
Tang Chen79442ed2013-11-12 15:07:59 -0800333 * Found address on success, 0 on failure.
Tejun Heo7bd0b0f2011-12-08 10:22:09 -0800334 */
335phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
336 phys_addr_t end, phys_addr_t size,
337 phys_addr_t align)
338{
Tony Lucka3f5baf2015-06-24 16:58:12 -0700339 phys_addr_t ret;
Mike Rapoporte1720fe2018-06-30 17:55:01 +0300340 enum memblock_flags flags = choose_memblock_flags();
Tony Lucka3f5baf2015-06-24 16:58:12 -0700341
342again:
343 ret = memblock_find_in_range_node(size, align, start, end,
344 NUMA_NO_NODE, flags);
345
346 if (!ret && (flags & MEMBLOCK_MIRROR)) {
347 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
348 &size);
349 flags &= ~MEMBLOCK_MIRROR;
350 goto again;
351 }
352
353 return ret;
Tejun Heo7bd0b0f2011-12-08 10:22:09 -0800354}
355
Yinghai Lu10d06432010-07-28 15:43:02 +1000356static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
Yinghai Lu95f72d12010-07-12 14:36:09 +1000357{
Tejun Heo1440c4e2011-12-08 10:22:08 -0800358 type->total_size -= type->regions[r].size;
Tejun Heo7c0caeb2011-07-14 11:43:42 +0200359 memmove(&type->regions[r], &type->regions[r + 1],
360 (type->cnt - (r + 1)) * sizeof(type->regions[r]));
Benjamin Herrenschmidte3239ff2010-08-04 14:06:41 +1000361 type->cnt--;
Yinghai Lu95f72d12010-07-12 14:36:09 +1000362
Benjamin Herrenschmidt8f7a6602011-03-22 16:33:43 -0700363 /* Special case for empty arrays */
364 if (type->cnt == 0) {
Tejun Heo1440c4e2011-12-08 10:22:08 -0800365 WARN_ON(type->total_size != 0);
Benjamin Herrenschmidt8f7a6602011-03-22 16:33:43 -0700366 type->cnt = 1;
367 type->regions[0].base = 0;
368 type->regions[0].size = 0;
Tang Chen66a20752014-01-21 15:49:20 -0800369 type->regions[0].flags = 0;
Tejun Heo7c0caeb2011-07-14 11:43:42 +0200370 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
Benjamin Herrenschmidt8f7a6602011-03-22 16:33:43 -0700371 }
Yinghai Lu95f72d12010-07-12 14:36:09 +1000372}
373
Mike Rapoport350e88b2019-05-13 17:22:59 -0700374#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
Pavel Tatashin3010f872017-08-18 15:16:05 -0700375/**
Mike Rapoport47cec442018-06-30 17:55:02 +0300376 * memblock_discard - discard memory and reserved arrays if they were allocated
Pavel Tatashin3010f872017-08-18 15:16:05 -0700377 */
378void __init memblock_discard(void)
Yinghai Lu29f67382012-07-11 14:02:56 -0700379{
Pavel Tatashin3010f872017-08-18 15:16:05 -0700380 phys_addr_t addr, size;
Yinghai Lu29f67382012-07-11 14:02:56 -0700381
Pavel Tatashin3010f872017-08-18 15:16:05 -0700382 if (memblock.reserved.regions != memblock_reserved_init_regions) {
383 addr = __pa(memblock.reserved.regions);
384 size = PAGE_ALIGN(sizeof(struct memblock_region) *
385 memblock.reserved.max);
386 __memblock_free_late(addr, size);
387 }
Yinghai Lu29f67382012-07-11 14:02:56 -0700388
Pavel Tatashin91b540f2017-08-25 15:55:46 -0700389 if (memblock.memory.regions != memblock_memory_init_regions) {
Pavel Tatashin3010f872017-08-18 15:16:05 -0700390 addr = __pa(memblock.memory.regions);
391 size = PAGE_ALIGN(sizeof(struct memblock_region) *
392 memblock.memory.max);
393 __memblock_free_late(addr, size);
394 }
Yinghai Lu29f67382012-07-11 14:02:56 -0700395}
Philipp Hachtmann5e270e22014-01-23 15:53:11 -0800396#endif
397
Greg Pearson48c3b582012-06-20 12:53:05 -0700398/**
399 * memblock_double_array - double the size of the memblock regions array
400 * @type: memblock type of the regions array being doubled
401 * @new_area_start: starting address of memory range to avoid overlap with
402 * @new_area_size: size of memory range to avoid overlap with
403 *
404 * Double the size of the @type regions array. If memblock is being used to
405 * allocate memory for a new reserved regions array and there is a previously
Mike Rapoport47cec442018-06-30 17:55:02 +0300406 * allocated memory range [@new_area_start, @new_area_start + @new_area_size]
Greg Pearson48c3b582012-06-20 12:53:05 -0700407 * waiting to be reserved, ensure the memory used by the new array does
408 * not overlap.
409 *
Mike Rapoport47cec442018-06-30 17:55:02 +0300410 * Return:
Greg Pearson48c3b582012-06-20 12:53:05 -0700411 * 0 on success, -1 on failure.
412 */
413static int __init_memblock memblock_double_array(struct memblock_type *type,
414 phys_addr_t new_area_start,
415 phys_addr_t new_area_size)
Benjamin Herrenschmidt142b45a2010-07-06 15:39:13 -0700416{
417 struct memblock_region *new_array, *old_array;
Yinghai Lu29f67382012-07-11 14:02:56 -0700418 phys_addr_t old_alloc_size, new_alloc_size;
Mike Rapoporta36aab82018-08-17 15:47:17 -0700419 phys_addr_t old_size, new_size, addr, new_end;
Benjamin Herrenschmidt142b45a2010-07-06 15:39:13 -0700420 int use_slab = slab_is_available();
Gavin Shan181eb392012-05-29 15:06:50 -0700421 int *in_slab;
Benjamin Herrenschmidt142b45a2010-07-06 15:39:13 -0700422
423 /* We don't allow resizing until we know about the reserved regions
424 * of memory that aren't suitable for allocation
425 */
426 if (!memblock_can_resize)
427 return -1;
428
Benjamin Herrenschmidt142b45a2010-07-06 15:39:13 -0700429 /* Calculate new doubled size */
430 old_size = type->max * sizeof(struct memblock_region);
431 new_size = old_size << 1;
Yinghai Lu29f67382012-07-11 14:02:56 -0700432 /*
433 * We need to allocated new one align to PAGE_SIZE,
434 * so we can free them completely later.
435 */
436 old_alloc_size = PAGE_ALIGN(old_size);
437 new_alloc_size = PAGE_ALIGN(new_size);
Benjamin Herrenschmidt142b45a2010-07-06 15:39:13 -0700438
Gavin Shan181eb392012-05-29 15:06:50 -0700439 /* Retrieve the slab flag */
440 if (type == &memblock.memory)
441 in_slab = &memblock_memory_in_slab;
442 else
443 in_slab = &memblock_reserved_in_slab;
444
Mike Rapoporta2974132019-03-11 23:30:54 -0700445 /* Try to find some space for it */
Benjamin Herrenschmidt142b45a2010-07-06 15:39:13 -0700446 if (use_slab) {
447 new_array = kmalloc(new_size, GFP_KERNEL);
Tejun Heo1f5026a2011-07-12 09:58:09 +0200448 addr = new_array ? __pa(new_array) : 0;
Gavin Shan4e2f0772012-05-29 15:06:50 -0700449 } else {
Greg Pearson48c3b582012-06-20 12:53:05 -0700450 /* only exclude range when trying to double reserved.regions */
451 if (type != &memblock.reserved)
452 new_area_start = new_area_size = 0;
453
454 addr = memblock_find_in_range(new_area_start + new_area_size,
455 memblock.current_limit,
Yinghai Lu29f67382012-07-11 14:02:56 -0700456 new_alloc_size, PAGE_SIZE);
Greg Pearson48c3b582012-06-20 12:53:05 -0700457 if (!addr && new_area_size)
458 addr = memblock_find_in_range(0,
Andrew Mortonfd073832012-07-31 16:42:40 -0700459 min(new_area_start, memblock.current_limit),
460 new_alloc_size, PAGE_SIZE);
Greg Pearson48c3b582012-06-20 12:53:05 -0700461
Sachin Kamat15674862012-09-04 13:55:05 +0530462 new_array = addr ? __va(addr) : NULL;
Gavin Shan4e2f0772012-05-29 15:06:50 -0700463 }
Tejun Heo1f5026a2011-07-12 09:58:09 +0200464 if (!addr) {
Benjamin Herrenschmidt142b45a2010-07-06 15:39:13 -0700465 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
Heiko Carstens0262d9c2017-02-24 14:55:59 -0800466 type->name, type->max, type->max * 2);
Benjamin Herrenschmidt142b45a2010-07-06 15:39:13 -0700467 return -1;
468 }
Benjamin Herrenschmidt142b45a2010-07-06 15:39:13 -0700469
Mike Rapoporta36aab82018-08-17 15:47:17 -0700470 new_end = addr + new_size - 1;
471 memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
472 type->name, type->max * 2, &addr, &new_end);
Yinghai Luea9e4372010-07-28 15:13:22 +1000473
Andrew Mortonfd073832012-07-31 16:42:40 -0700474 /*
475 * Found space, we now need to move the array over before we add the
476 * reserved region since it may be our reserved array itself that is
477 * full.
Benjamin Herrenschmidt142b45a2010-07-06 15:39:13 -0700478 */
479 memcpy(new_array, type->regions, old_size);
480 memset(new_array + type->max, 0, old_size);
481 old_array = type->regions;
482 type->regions = new_array;
483 type->max <<= 1;
484
Andrew Mortonfd073832012-07-31 16:42:40 -0700485 /* Free old array. We needn't free it if the array is the static one */
Gavin Shan181eb392012-05-29 15:06:50 -0700486 if (*in_slab)
487 kfree(old_array);
488 else if (old_array != memblock_memory_init_regions &&
489 old_array != memblock_reserved_init_regions)
Yinghai Lu29f67382012-07-11 14:02:56 -0700490 memblock_free(__pa(old_array), old_alloc_size);
Benjamin Herrenschmidt142b45a2010-07-06 15:39:13 -0700491
Andrew Mortonfd073832012-07-31 16:42:40 -0700492 /*
493 * Reserve the new array if that comes from the memblock. Otherwise, we
494 * needn't do it
Gavin Shan181eb392012-05-29 15:06:50 -0700495 */
496 if (!use_slab)
Yinghai Lu29f67382012-07-11 14:02:56 -0700497 BUG_ON(memblock_reserve(addr, new_alloc_size));
Gavin Shan181eb392012-05-29 15:06:50 -0700498
499 /* Update slab flag */
500 *in_slab = use_slab;
501
Benjamin Herrenschmidt142b45a2010-07-06 15:39:13 -0700502 return 0;
503}
504
Tejun Heo784656f92011-07-12 11:15:55 +0200505/**
506 * memblock_merge_regions - merge neighboring compatible regions
507 * @type: memblock type to scan
508 *
509 * Scan @type and merge neighboring compatible regions.
510 */
511static void __init_memblock memblock_merge_regions(struct memblock_type *type)
512{
513 int i = 0;
514
515 /* cnt never goes below 1 */
516 while (i < type->cnt - 1) {
517 struct memblock_region *this = &type->regions[i];
518 struct memblock_region *next = &type->regions[i + 1];
519
Tejun Heo7c0caeb2011-07-14 11:43:42 +0200520 if (this->base + this->size != next->base ||
521 memblock_get_region_node(this) !=
Tang Chen66a20752014-01-21 15:49:20 -0800522 memblock_get_region_node(next) ||
523 this->flags != next->flags) {
Tejun Heo784656f92011-07-12 11:15:55 +0200524 BUG_ON(this->base + this->size > next->base);
525 i++;
526 continue;
527 }
528
529 this->size += next->size;
Lin Fengc0232ae2013-01-11 14:31:44 -0800530 /* move forward from next + 1, index of which is i + 2 */
531 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
Tejun Heo784656f92011-07-12 11:15:55 +0200532 type->cnt--;
533 }
534}
535
536/**
537 * memblock_insert_region - insert new memblock region
Tang Chen209ff862013-04-29 15:08:41 -0700538 * @type: memblock type to insert into
539 * @idx: index for the insertion point
540 * @base: base address of the new region
541 * @size: size of the new region
542 * @nid: node id of the new region
Tang Chen66a20752014-01-21 15:49:20 -0800543 * @flags: flags of the new region
Tejun Heo784656f92011-07-12 11:15:55 +0200544 *
Mike Rapoport47cec442018-06-30 17:55:02 +0300545 * Insert new memblock region [@base, @base + @size) into @type at @idx.
Alexander Kuleshov412d0002016-08-04 15:31:46 -0700546 * @type must already have extra room to accommodate the new region.
Tejun Heo784656f92011-07-12 11:15:55 +0200547 */
548static void __init_memblock memblock_insert_region(struct memblock_type *type,
549 int idx, phys_addr_t base,
Tang Chen66a20752014-01-21 15:49:20 -0800550 phys_addr_t size,
Mike Rapoporte1720fe2018-06-30 17:55:01 +0300551 int nid,
552 enum memblock_flags flags)
Tejun Heo784656f92011-07-12 11:15:55 +0200553{
554 struct memblock_region *rgn = &type->regions[idx];
555
556 BUG_ON(type->cnt >= type->max);
557 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
558 rgn->base = base;
559 rgn->size = size;
Tang Chen66a20752014-01-21 15:49:20 -0800560 rgn->flags = flags;
Tejun Heo7c0caeb2011-07-14 11:43:42 +0200561 memblock_set_region_node(rgn, nid);
Tejun Heo784656f92011-07-12 11:15:55 +0200562 type->cnt++;
Tejun Heo1440c4e2011-12-08 10:22:08 -0800563 type->total_size += size;
Tejun Heo784656f92011-07-12 11:15:55 +0200564}
565
566/**
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100567 * memblock_add_range - add new memblock region
Tejun Heo784656f92011-07-12 11:15:55 +0200568 * @type: memblock type to add new region into
569 * @base: base address of the new region
570 * @size: size of the new region
Tejun Heo7fb0bc32011-12-08 10:22:08 -0800571 * @nid: nid of the new region
Tang Chen66a20752014-01-21 15:49:20 -0800572 * @flags: flags of the new region
Tejun Heo784656f92011-07-12 11:15:55 +0200573 *
Mike Rapoport47cec442018-06-30 17:55:02 +0300574 * Add new memblock region [@base, @base + @size) into @type. The new region
Tejun Heo784656f92011-07-12 11:15:55 +0200575 * is allowed to overlap with existing ones - overlaps don't affect already
576 * existing regions. @type is guaranteed to be minimal (all neighbouring
577 * compatible regions are merged) after the addition.
578 *
Mike Rapoport47cec442018-06-30 17:55:02 +0300579 * Return:
Tejun Heo784656f92011-07-12 11:15:55 +0200580 * 0 on success, -errno on failure.
581 */
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100582int __init_memblock memblock_add_range(struct memblock_type *type,
Tang Chen66a20752014-01-21 15:49:20 -0800583 phys_addr_t base, phys_addr_t size,
Mike Rapoporte1720fe2018-06-30 17:55:01 +0300584 int nid, enum memblock_flags flags)
Yinghai Lu95f72d12010-07-12 14:36:09 +1000585{
Tejun Heo784656f92011-07-12 11:15:55 +0200586 bool insert = false;
Tejun Heoeb18f1b2011-12-08 10:22:07 -0800587 phys_addr_t obase = base;
588 phys_addr_t end = base + memblock_cap_size(base, &size);
Alexander Kuleshov8c9c1702016-01-14 15:20:42 -0800589 int idx, nr_new;
590 struct memblock_region *rgn;
Yinghai Lu95f72d12010-07-12 14:36:09 +1000591
Tejun Heob3dc6272012-04-20 08:31:34 -0700592 if (!size)
593 return 0;
594
Tejun Heo784656f92011-07-12 11:15:55 +0200595 /* special case for empty array */
596 if (type->regions[0].size == 0) {
Tejun Heo1440c4e2011-12-08 10:22:08 -0800597 WARN_ON(type->cnt != 1 || type->total_size);
Benjamin Herrenschmidte3239ff2010-08-04 14:06:41 +1000598 type->regions[0].base = base;
599 type->regions[0].size = size;
Tang Chen66a20752014-01-21 15:49:20 -0800600 type->regions[0].flags = flags;
Tejun Heo7fb0bc32011-12-08 10:22:08 -0800601 memblock_set_region_node(&type->regions[0], nid);
Tejun Heo1440c4e2011-12-08 10:22:08 -0800602 type->total_size = size;
Yinghai Lu95f72d12010-07-12 14:36:09 +1000603 return 0;
604 }
Tejun Heo784656f92011-07-12 11:15:55 +0200605repeat:
606 /*
607 * The following is executed twice. Once with %false @insert and
608 * then with %true. The first counts the number of regions needed
Alexander Kuleshov412d0002016-08-04 15:31:46 -0700609 * to accommodate the new area. The second actually inserts them.
Benjamin Herrenschmidt142b45a2010-07-06 15:39:13 -0700610 */
Tejun Heo784656f92011-07-12 11:15:55 +0200611 base = obase;
612 nr_new = 0;
Yinghai Lu95f72d12010-07-12 14:36:09 +1000613
Gioh Kim66e8b432017-11-15 17:33:42 -0800614 for_each_memblock_type(idx, type, rgn) {
Tejun Heo784656f92011-07-12 11:15:55 +0200615 phys_addr_t rbase = rgn->base;
616 phys_addr_t rend = rbase + rgn->size;
617
618 if (rbase >= end)
Yinghai Lu95f72d12010-07-12 14:36:09 +1000619 break;
Tejun Heo784656f92011-07-12 11:15:55 +0200620 if (rend <= base)
621 continue;
622 /*
623 * @rgn overlaps. If it separates the lower part of new
624 * area, insert that portion.
625 */
626 if (rbase > base) {
Wei Yangc0a29492015-09-04 15:47:38 -0700627#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
628 WARN_ON(nid != memblock_get_region_node(rgn));
629#endif
Wei Yang4fcab5f2015-09-08 14:59:53 -0700630 WARN_ON(flags != rgn->flags);
Tejun Heo784656f92011-07-12 11:15:55 +0200631 nr_new++;
632 if (insert)
Alexander Kuleshov8c9c1702016-01-14 15:20:42 -0800633 memblock_insert_region(type, idx++, base,
Tang Chen66a20752014-01-21 15:49:20 -0800634 rbase - base, nid,
635 flags);
Yinghai Lu95f72d12010-07-12 14:36:09 +1000636 }
Tejun Heo784656f92011-07-12 11:15:55 +0200637 /* area below @rend is dealt with, forget about it */
638 base = min(rend, end);
Yinghai Lu95f72d12010-07-12 14:36:09 +1000639 }
Yinghai Lu95f72d12010-07-12 14:36:09 +1000640
Tejun Heo784656f92011-07-12 11:15:55 +0200641 /* insert the remaining portion */
642 if (base < end) {
643 nr_new++;
644 if (insert)
Alexander Kuleshov8c9c1702016-01-14 15:20:42 -0800645 memblock_insert_region(type, idx, base, end - base,
Tang Chen66a20752014-01-21 15:49:20 -0800646 nid, flags);
Tejun Heo784656f92011-07-12 11:15:55 +0200647 }
648
nimisoloef3cc4d2016-07-26 15:24:56 -0700649 if (!nr_new)
650 return 0;
651
Tejun Heo784656f92011-07-12 11:15:55 +0200652 /*
653 * If this was the first round, resize array and repeat for actual
654 * insertions; otherwise, merge and return.
Benjamin Herrenschmidt142b45a2010-07-06 15:39:13 -0700655 */
Tejun Heo784656f92011-07-12 11:15:55 +0200656 if (!insert) {
657 while (type->cnt + nr_new > type->max)
Greg Pearson48c3b582012-06-20 12:53:05 -0700658 if (memblock_double_array(type, obase, size) < 0)
Tejun Heo784656f92011-07-12 11:15:55 +0200659 return -ENOMEM;
660 insert = true;
661 goto repeat;
662 } else {
663 memblock_merge_regions(type);
664 return 0;
Benjamin Herrenschmidt142b45a2010-07-06 15:39:13 -0700665 }
Yinghai Lu95f72d12010-07-12 14:36:09 +1000666}
667
Mike Rapoport48a833c2018-06-30 17:55:03 +0300668/**
669 * memblock_add_node - add new memblock region within a NUMA node
670 * @base: base address of the new region
671 * @size: size of the new region
672 * @nid: nid of the new region
673 *
674 * Add new memblock region [@base, @base + @size) to the "memory"
675 * type. See memblock_add_range() description for mode details
676 *
677 * Return:
678 * 0 on success, -errno on failure.
679 */
Tejun Heo7fb0bc32011-12-08 10:22:08 -0800680int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
681 int nid)
682{
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100683 return memblock_add_range(&memblock.memory, base, size, nid, 0);
Tejun Heo7fb0bc32011-12-08 10:22:08 -0800684}
685
Mike Rapoport48a833c2018-06-30 17:55:03 +0300686/**
687 * memblock_add - add new memblock region
688 * @base: base address of the new region
689 * @size: size of the new region
690 *
691 * Add new memblock region [@base, @base + @size) to the "memory"
692 * type. See memblock_add_range() description for mode details
693 *
694 * Return:
695 * 0 on success, -errno on failure.
696 */
Alexander Kuleshovf705ac42016-05-20 16:57:35 -0700697int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
Alexander Kuleshov6a4055b2015-04-15 16:14:44 -0700698{
Miles Chen5d63f812017-02-22 15:46:42 -0800699 phys_addr_t end = base + size - 1;
700
Sakari Ailusd75f7732019-03-25 21:32:28 +0200701 memblock_dbg("memblock_add: [%pa-%pa] %pS\n",
Miles Chen5d63f812017-02-22 15:46:42 -0800702 &base, &end, (void *)_RET_IP_);
Alexander Kuleshov6a4055b2015-04-15 16:14:44 -0700703
Alexander Kuleshovf705ac42016-05-20 16:57:35 -0700704 return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
Yinghai Lu95f72d12010-07-12 14:36:09 +1000705}
706
Tejun Heo6a9ceb32011-12-08 10:22:07 -0800707/**
708 * memblock_isolate_range - isolate given range into disjoint memblocks
709 * @type: memblock type to isolate range for
710 * @base: base of range to isolate
711 * @size: size of range to isolate
712 * @start_rgn: out parameter for the start of isolated region
713 * @end_rgn: out parameter for the end of isolated region
714 *
715 * Walk @type and ensure that regions don't cross the boundaries defined by
Mike Rapoport47cec442018-06-30 17:55:02 +0300716 * [@base, @base + @size). Crossing regions are split at the boundaries,
Tejun Heo6a9ceb32011-12-08 10:22:07 -0800717 * which may create at most two more regions. The index of the first
718 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
719 *
Mike Rapoport47cec442018-06-30 17:55:02 +0300720 * Return:
Tejun Heo6a9ceb32011-12-08 10:22:07 -0800721 * 0 on success, -errno on failure.
722 */
723static int __init_memblock memblock_isolate_range(struct memblock_type *type,
724 phys_addr_t base, phys_addr_t size,
725 int *start_rgn, int *end_rgn)
726{
Tejun Heoeb18f1b2011-12-08 10:22:07 -0800727 phys_addr_t end = base + memblock_cap_size(base, &size);
Alexander Kuleshov8c9c1702016-01-14 15:20:42 -0800728 int idx;
729 struct memblock_region *rgn;
Tejun Heo6a9ceb32011-12-08 10:22:07 -0800730
731 *start_rgn = *end_rgn = 0;
732
Tejun Heob3dc6272012-04-20 08:31:34 -0700733 if (!size)
734 return 0;
735
Tejun Heo6a9ceb32011-12-08 10:22:07 -0800736 /* we'll create at most two more regions */
737 while (type->cnt + 2 > type->max)
Greg Pearson48c3b582012-06-20 12:53:05 -0700738 if (memblock_double_array(type, base, size) < 0)
Tejun Heo6a9ceb32011-12-08 10:22:07 -0800739 return -ENOMEM;
740
Gioh Kim66e8b432017-11-15 17:33:42 -0800741 for_each_memblock_type(idx, type, rgn) {
Tejun Heo6a9ceb32011-12-08 10:22:07 -0800742 phys_addr_t rbase = rgn->base;
743 phys_addr_t rend = rbase + rgn->size;
744
745 if (rbase >= end)
746 break;
747 if (rend <= base)
748 continue;
749
750 if (rbase < base) {
751 /*
752 * @rgn intersects from below. Split and continue
753 * to process the next region - the new top half.
754 */
755 rgn->base = base;
Tejun Heo1440c4e2011-12-08 10:22:08 -0800756 rgn->size -= base - rbase;
757 type->total_size -= base - rbase;
Alexander Kuleshov8c9c1702016-01-14 15:20:42 -0800758 memblock_insert_region(type, idx, rbase, base - rbase,
Tang Chen66a20752014-01-21 15:49:20 -0800759 memblock_get_region_node(rgn),
760 rgn->flags);
Tejun Heo6a9ceb32011-12-08 10:22:07 -0800761 } else if (rend > end) {
762 /*
763 * @rgn intersects from above. Split and redo the
764 * current region - the new bottom half.
765 */
766 rgn->base = end;
Tejun Heo1440c4e2011-12-08 10:22:08 -0800767 rgn->size -= end - rbase;
768 type->total_size -= end - rbase;
Alexander Kuleshov8c9c1702016-01-14 15:20:42 -0800769 memblock_insert_region(type, idx--, rbase, end - rbase,
Tang Chen66a20752014-01-21 15:49:20 -0800770 memblock_get_region_node(rgn),
771 rgn->flags);
Tejun Heo6a9ceb32011-12-08 10:22:07 -0800772 } else {
773 /* @rgn is fully contained, record it */
774 if (!*end_rgn)
Alexander Kuleshov8c9c1702016-01-14 15:20:42 -0800775 *start_rgn = idx;
776 *end_rgn = idx + 1;
Tejun Heo6a9ceb32011-12-08 10:22:07 -0800777 }
778 }
779
780 return 0;
781}
Tejun Heo6a9ceb32011-12-08 10:22:07 -0800782
Alexander Kuleshov35bd16a2015-11-05 18:47:00 -0800783static int __init_memblock memblock_remove_range(struct memblock_type *type,
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100784 phys_addr_t base, phys_addr_t size)
Yinghai Lu95f72d12010-07-12 14:36:09 +1000785{
Tejun Heo71936182011-12-08 10:22:07 -0800786 int start_rgn, end_rgn;
787 int i, ret;
Yinghai Lu95f72d12010-07-12 14:36:09 +1000788
Tejun Heo71936182011-12-08 10:22:07 -0800789 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
790 if (ret)
791 return ret;
Yinghai Lu95f72d12010-07-12 14:36:09 +1000792
Tejun Heo71936182011-12-08 10:22:07 -0800793 for (i = end_rgn - 1; i >= start_rgn; i--)
794 memblock_remove_region(type, i);
Benjamin Herrenschmidt8f7a6602011-03-22 16:33:43 -0700795 return 0;
Yinghai Lu95f72d12010-07-12 14:36:09 +1000796}
797
Tejun Heo581adcb2011-12-08 10:22:06 -0800798int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
Yinghai Lu95f72d12010-07-12 14:36:09 +1000799{
Minchan Kim25cf23d2018-06-07 17:07:35 -0700800 phys_addr_t end = base + size - 1;
801
802 memblock_dbg("memblock_remove: [%pa-%pa] %pS\n",
803 &base, &end, (void *)_RET_IP_);
804
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100805 return memblock_remove_range(&memblock.memory, base, size);
Yinghai Lu95f72d12010-07-12 14:36:09 +1000806}
807
Mike Rapoport4d728682018-12-28 00:35:29 -0800808/**
809 * memblock_free - free boot memory block
810 * @base: phys starting address of the boot memory block
811 * @size: size of the boot memory block in bytes
812 *
813 * Free boot memory block previously allocated by memblock_alloc_xx() API.
814 * The freeing memory will not be released to the buddy allocator.
815 */
Tejun Heo581adcb2011-12-08 10:22:06 -0800816int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
Yinghai Lu95f72d12010-07-12 14:36:09 +1000817{
Miles Chen5d63f812017-02-22 15:46:42 -0800818 phys_addr_t end = base + size - 1;
819
Sakari Ailusd75f7732019-03-25 21:32:28 +0200820 memblock_dbg(" memblock_free: [%pa-%pa] %pS\n",
Miles Chen5d63f812017-02-22 15:46:42 -0800821 &base, &end, (void *)_RET_IP_);
Tejun Heo24aa0782011-07-12 11:16:06 +0200822
Catalin Marinas9099dae2016-10-11 13:55:11 -0700823 kmemleak_free_part_phys(base, size);
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100824 return memblock_remove_range(&memblock.reserved, base, size);
Yinghai Lu95f72d12010-07-12 14:36:09 +1000825}
826
Alexander Kuleshovf705ac42016-05-20 16:57:35 -0700827int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
Yinghai Lu95f72d12010-07-12 14:36:09 +1000828{
Miles Chen5d63f812017-02-22 15:46:42 -0800829 phys_addr_t end = base + size - 1;
830
Sakari Ailusd75f7732019-03-25 21:32:28 +0200831 memblock_dbg("memblock_reserve: [%pa-%pa] %pS\n",
Miles Chen5d63f812017-02-22 15:46:42 -0800832 &base, &end, (void *)_RET_IP_);
Yinghai Lu95f72d12010-07-12 14:36:09 +1000833
Alexander Kuleshovf705ac42016-05-20 16:57:35 -0700834 return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
Yinghai Lu95f72d12010-07-12 14:36:09 +1000835}
836
Tejun Heo35fd0802011-07-12 11:15:59 +0200837/**
Mike Rapoport47cec442018-06-30 17:55:02 +0300838 * memblock_setclr_flag - set or clear flag for a memory region
839 * @base: base address of the region
840 * @size: size of the region
841 * @set: set or clear the flag
842 * @flag: the flag to udpate
Tang Chen66b16ed2014-01-21 15:49:23 -0800843 *
Tony Luck4308ce12014-12-12 16:54:59 -0800844 * This function isolates region [@base, @base + @size), and sets/clears flag
Tang Chen66b16ed2014-01-21 15:49:23 -0800845 *
Mike Rapoport47cec442018-06-30 17:55:02 +0300846 * Return: 0 on success, -errno on failure.
Tang Chen66b16ed2014-01-21 15:49:23 -0800847 */
Tony Luck4308ce12014-12-12 16:54:59 -0800848static int __init_memblock memblock_setclr_flag(phys_addr_t base,
849 phys_addr_t size, int set, int flag)
Tang Chen66b16ed2014-01-21 15:49:23 -0800850{
851 struct memblock_type *type = &memblock.memory;
852 int i, ret, start_rgn, end_rgn;
853
854 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
855 if (ret)
856 return ret;
857
Mike Rapoportfe145122019-03-11 23:30:46 -0700858 for (i = start_rgn; i < end_rgn; i++) {
859 struct memblock_region *r = &type->regions[i];
860
Tony Luck4308ce12014-12-12 16:54:59 -0800861 if (set)
Mike Rapoportfe145122019-03-11 23:30:46 -0700862 r->flags |= flag;
Tony Luck4308ce12014-12-12 16:54:59 -0800863 else
Mike Rapoportfe145122019-03-11 23:30:46 -0700864 r->flags &= ~flag;
865 }
Tang Chen66b16ed2014-01-21 15:49:23 -0800866
867 memblock_merge_regions(type);
868 return 0;
869}
870
871/**
Tony Luck4308ce12014-12-12 16:54:59 -0800872 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
873 * @base: the base phys addr of the region
874 * @size: the size of the region
875 *
Mike Rapoport47cec442018-06-30 17:55:02 +0300876 * Return: 0 on success, -errno on failure.
Tony Luck4308ce12014-12-12 16:54:59 -0800877 */
878int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
879{
880 return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
881}
882
883/**
Tang Chen66b16ed2014-01-21 15:49:23 -0800884 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
885 * @base: the base phys addr of the region
886 * @size: the size of the region
887 *
Mike Rapoport47cec442018-06-30 17:55:02 +0300888 * Return: 0 on success, -errno on failure.
Tang Chen66b16ed2014-01-21 15:49:23 -0800889 */
890int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
891{
Tony Luck4308ce12014-12-12 16:54:59 -0800892 return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
Tang Chen66b16ed2014-01-21 15:49:23 -0800893}
894
895/**
Tony Lucka3f5baf2015-06-24 16:58:12 -0700896 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
897 * @base: the base phys addr of the region
898 * @size: the size of the region
899 *
Mike Rapoport47cec442018-06-30 17:55:02 +0300900 * Return: 0 on success, -errno on failure.
Tony Lucka3f5baf2015-06-24 16:58:12 -0700901 */
902int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
903{
904 system_has_some_mirror = true;
905
906 return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
907}
908
Ard Biesheuvelbf3d3cc2015-11-30 13:28:15 +0100909/**
910 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
911 * @base: the base phys addr of the region
912 * @size: the size of the region
913 *
Mike Rapoport47cec442018-06-30 17:55:02 +0300914 * Return: 0 on success, -errno on failure.
Ard Biesheuvelbf3d3cc2015-11-30 13:28:15 +0100915 */
916int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
917{
918 return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
919}
Tony Lucka3f5baf2015-06-24 16:58:12 -0700920
921/**
AKASHI Takahiro4c546b82017-04-03 11:23:54 +0900922 * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
923 * @base: the base phys addr of the region
924 * @size: the size of the region
925 *
Mike Rapoport47cec442018-06-30 17:55:02 +0300926 * Return: 0 on success, -errno on failure.
AKASHI Takahiro4c546b82017-04-03 11:23:54 +0900927 */
928int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
929{
930 return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
931}
932
933/**
Robin Holt8e7a7f82015-06-30 14:56:41 -0700934 * __next_reserved_mem_region - next function for for_each_reserved_region()
935 * @idx: pointer to u64 loop variable
936 * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL
937 * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL
938 *
939 * Iterate over all reserved memory regions.
940 */
941void __init_memblock __next_reserved_mem_region(u64 *idx,
942 phys_addr_t *out_start,
943 phys_addr_t *out_end)
944{
Alexander Kuleshov567d1172015-09-08 15:03:33 -0700945 struct memblock_type *type = &memblock.reserved;
Robin Holt8e7a7f82015-06-30 14:56:41 -0700946
Richard Leitnercd33a762016-05-20 16:58:33 -0700947 if (*idx < type->cnt) {
Alexander Kuleshov567d1172015-09-08 15:03:33 -0700948 struct memblock_region *r = &type->regions[*idx];
Robin Holt8e7a7f82015-06-30 14:56:41 -0700949 phys_addr_t base = r->base;
950 phys_addr_t size = r->size;
951
952 if (out_start)
953 *out_start = base;
954 if (out_end)
955 *out_end = base + size - 1;
956
957 *idx += 1;
958 return;
959 }
960
961 /* signal end of iteration */
962 *idx = ULLONG_MAX;
963}
964
Mike Rapoportc9a688a2019-03-11 23:30:50 -0700965static bool should_skip_region(struct memblock_region *m, int nid, int flags)
966{
967 int m_nid = memblock_get_region_node(m);
968
969 /* only memory regions are associated with nodes, check it */
970 if (nid != NUMA_NO_NODE && nid != m_nid)
971 return true;
972
973 /* skip hotpluggable memory regions if needed */
974 if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
975 return true;
976
977 /* if we want mirror memory skip non-mirror memory regions */
978 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
979 return true;
980
981 /* skip nomap memory unless we were asked for it explicitly */
982 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
983 return true;
984
985 return false;
986}
987
Robin Holt8e7a7f82015-06-30 14:56:41 -0700988/**
Mike Rapoporta2974132019-03-11 23:30:54 -0700989 * __next_mem_range - next function for for_each_free_mem_range() etc.
Tejun Heo35fd0802011-07-12 11:15:59 +0200990 * @idx: pointer to u64 loop variable
Grygorii Strashkob1154232014-01-21 15:50:16 -0800991 * @nid: node selector, %NUMA_NO_NODE for all nodes
Tony Luckfc6daaf2015-06-24 16:58:09 -0700992 * @flags: pick from blocks based on memory attributes
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100993 * @type_a: pointer to memblock_type from where the range is taken
994 * @type_b: pointer to memblock_type which excludes memory from being taken
Wanpeng Lidad75572012-06-20 12:53:01 -0700995 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
996 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
997 * @out_nid: ptr to int for nid of the range, can be %NULL
Tejun Heo35fd0802011-07-12 11:15:59 +0200998 *
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +0100999 * Find the first area from *@idx which matches @nid, fill the out
Tejun Heo35fd0802011-07-12 11:15:59 +02001000 * parameters, and update *@idx for the next iteration. The lower 32bit of
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001001 * *@idx contains index into type_a and the upper 32bit indexes the
1002 * areas before each region in type_b. For example, if type_b regions
Tejun Heo35fd0802011-07-12 11:15:59 +02001003 * look like the following,
1004 *
1005 * 0:[0-16), 1:[32-48), 2:[128-130)
1006 *
1007 * The upper 32bit indexes the following regions.
1008 *
1009 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
1010 *
1011 * As both region arrays are sorted, the function advances the two indices
1012 * in lockstep and returns each intersection.
1013 */
Mike Rapoporte1720fe2018-06-30 17:55:01 +03001014void __init_memblock __next_mem_range(u64 *idx, int nid,
1015 enum memblock_flags flags,
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001016 struct memblock_type *type_a,
1017 struct memblock_type *type_b,
1018 phys_addr_t *out_start,
1019 phys_addr_t *out_end, int *out_nid)
Tejun Heo35fd0802011-07-12 11:15:59 +02001020{
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001021 int idx_a = *idx & 0xffffffff;
1022 int idx_b = *idx >> 32;
Grygorii Strashkob1154232014-01-21 15:50:16 -08001023
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001024 if (WARN_ONCE(nid == MAX_NUMNODES,
1025 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
Grygorii Strashko560dca272014-01-21 15:50:55 -08001026 nid = NUMA_NO_NODE;
Tejun Heo35fd0802011-07-12 11:15:59 +02001027
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001028 for (; idx_a < type_a->cnt; idx_a++) {
1029 struct memblock_region *m = &type_a->regions[idx_a];
1030
Tejun Heo35fd0802011-07-12 11:15:59 +02001031 phys_addr_t m_start = m->base;
1032 phys_addr_t m_end = m->base + m->size;
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001033 int m_nid = memblock_get_region_node(m);
Tejun Heo35fd0802011-07-12 11:15:59 +02001034
Mike Rapoportc9a688a2019-03-11 23:30:50 -07001035 if (should_skip_region(m, nid, flags))
Ard Biesheuvelbf3d3cc2015-11-30 13:28:15 +01001036 continue;
1037
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001038 if (!type_b) {
1039 if (out_start)
1040 *out_start = m_start;
1041 if (out_end)
1042 *out_end = m_end;
1043 if (out_nid)
1044 *out_nid = m_nid;
1045 idx_a++;
1046 *idx = (u32)idx_a | (u64)idx_b << 32;
1047 return;
1048 }
Tejun Heo35fd0802011-07-12 11:15:59 +02001049
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001050 /* scan areas before each reservation */
1051 for (; idx_b < type_b->cnt + 1; idx_b++) {
1052 struct memblock_region *r;
1053 phys_addr_t r_start;
1054 phys_addr_t r_end;
1055
1056 r = &type_b->regions[idx_b];
1057 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1058 r_end = idx_b < type_b->cnt ?
Stefan Agner1c4bc432018-06-07 17:06:15 -07001059 r->base : PHYS_ADDR_MAX;
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001060
1061 /*
1062 * if idx_b advanced past idx_a,
1063 * break out to advance idx_a
1064 */
Tejun Heo35fd0802011-07-12 11:15:59 +02001065 if (r_start >= m_end)
1066 break;
1067 /* if the two regions intersect, we're done */
1068 if (m_start < r_end) {
1069 if (out_start)
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001070 *out_start =
1071 max(m_start, r_start);
Tejun Heo35fd0802011-07-12 11:15:59 +02001072 if (out_end)
1073 *out_end = min(m_end, r_end);
1074 if (out_nid)
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001075 *out_nid = m_nid;
Tejun Heo35fd0802011-07-12 11:15:59 +02001076 /*
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001077 * The region which ends first is
1078 * advanced for the next iteration.
Tejun Heo35fd0802011-07-12 11:15:59 +02001079 */
1080 if (m_end <= r_end)
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001081 idx_a++;
Tejun Heo35fd0802011-07-12 11:15:59 +02001082 else
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001083 idx_b++;
1084 *idx = (u32)idx_a | (u64)idx_b << 32;
Tejun Heo35fd0802011-07-12 11:15:59 +02001085 return;
1086 }
1087 }
1088 }
1089
1090 /* signal end of iteration */
1091 *idx = ULLONG_MAX;
1092}
1093
Tejun Heo7bd0b0f2011-12-08 10:22:09 -08001094/**
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001095 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
1096 *
Tejun Heo7bd0b0f2011-12-08 10:22:09 -08001097 * @idx: pointer to u64 loop variable
Alexander Kuleshovad5ea8c2015-09-08 15:04:22 -07001098 * @nid: node selector, %NUMA_NO_NODE for all nodes
Tony Luckfc6daaf2015-06-24 16:58:09 -07001099 * @flags: pick from blocks based on memory attributes
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001100 * @type_a: pointer to memblock_type from where the range is taken
1101 * @type_b: pointer to memblock_type which excludes memory from being taken
Wanpeng Lidad75572012-06-20 12:53:01 -07001102 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1103 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1104 * @out_nid: ptr to int for nid of the range, can be %NULL
Tejun Heo7bd0b0f2011-12-08 10:22:09 -08001105 *
Mike Rapoport47cec442018-06-30 17:55:02 +03001106 * Finds the next range from type_a which is not marked as unsuitable
1107 * in type_b.
1108 *
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001109 * Reverse of __next_mem_range().
Tejun Heo7bd0b0f2011-12-08 10:22:09 -08001110 */
Mike Rapoporte1720fe2018-06-30 17:55:01 +03001111void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
1112 enum memblock_flags flags,
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001113 struct memblock_type *type_a,
1114 struct memblock_type *type_b,
1115 phys_addr_t *out_start,
1116 phys_addr_t *out_end, int *out_nid)
Tejun Heo7bd0b0f2011-12-08 10:22:09 -08001117{
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001118 int idx_a = *idx & 0xffffffff;
1119 int idx_b = *idx >> 32;
Grygorii Strashkob1154232014-01-21 15:50:16 -08001120
Grygorii Strashko560dca272014-01-21 15:50:55 -08001121 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1122 nid = NUMA_NO_NODE;
Tejun Heo7bd0b0f2011-12-08 10:22:09 -08001123
1124 if (*idx == (u64)ULLONG_MAX) {
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001125 idx_a = type_a->cnt - 1;
zijun_hue47608a2016-08-04 15:32:00 -07001126 if (type_b != NULL)
1127 idx_b = type_b->cnt;
1128 else
1129 idx_b = 0;
Tejun Heo7bd0b0f2011-12-08 10:22:09 -08001130 }
1131
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001132 for (; idx_a >= 0; idx_a--) {
1133 struct memblock_region *m = &type_a->regions[idx_a];
1134
Tejun Heo7bd0b0f2011-12-08 10:22:09 -08001135 phys_addr_t m_start = m->base;
1136 phys_addr_t m_end = m->base + m->size;
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001137 int m_nid = memblock_get_region_node(m);
Tejun Heo7bd0b0f2011-12-08 10:22:09 -08001138
Mike Rapoportc9a688a2019-03-11 23:30:50 -07001139 if (should_skip_region(m, nid, flags))
Ard Biesheuvelbf3d3cc2015-11-30 13:28:15 +01001140 continue;
1141
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001142 if (!type_b) {
1143 if (out_start)
1144 *out_start = m_start;
1145 if (out_end)
1146 *out_end = m_end;
1147 if (out_nid)
1148 *out_nid = m_nid;
zijun_hufb399b42016-07-28 15:48:56 -07001149 idx_a--;
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001150 *idx = (u32)idx_a | (u64)idx_b << 32;
1151 return;
1152 }
Tejun Heo7bd0b0f2011-12-08 10:22:09 -08001153
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001154 /* scan areas before each reservation */
1155 for (; idx_b >= 0; idx_b--) {
1156 struct memblock_region *r;
1157 phys_addr_t r_start;
1158 phys_addr_t r_end;
1159
1160 r = &type_b->regions[idx_b];
1161 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1162 r_end = idx_b < type_b->cnt ?
Stefan Agner1c4bc432018-06-07 17:06:15 -07001163 r->base : PHYS_ADDR_MAX;
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001164 /*
1165 * if idx_b advanced past idx_a,
1166 * break out to advance idx_a
1167 */
1168
Tejun Heo7bd0b0f2011-12-08 10:22:09 -08001169 if (r_end <= m_start)
1170 break;
1171 /* if the two regions intersect, we're done */
1172 if (m_end > r_start) {
1173 if (out_start)
1174 *out_start = max(m_start, r_start);
1175 if (out_end)
1176 *out_end = min(m_end, r_end);
1177 if (out_nid)
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001178 *out_nid = m_nid;
Tejun Heo7bd0b0f2011-12-08 10:22:09 -08001179 if (m_start >= r_start)
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001180 idx_a--;
Tejun Heo7bd0b0f2011-12-08 10:22:09 -08001181 else
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001182 idx_b--;
1183 *idx = (u32)idx_a | (u64)idx_b << 32;
Tejun Heo7bd0b0f2011-12-08 10:22:09 -08001184 return;
1185 }
1186 }
1187 }
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001188 /* signal end of iteration */
Tejun Heo7bd0b0f2011-12-08 10:22:09 -08001189 *idx = ULLONG_MAX;
1190}
1191
Tejun Heo7c0caeb2011-07-14 11:43:42 +02001192#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1193/*
Chen Chang45e79812018-11-16 15:08:57 -08001194 * Common iterator interface used to define for_each_mem_pfn_range().
Tejun Heo7c0caeb2011-07-14 11:43:42 +02001195 */
1196void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1197 unsigned long *out_start_pfn,
1198 unsigned long *out_end_pfn, int *out_nid)
1199{
1200 struct memblock_type *type = &memblock.memory;
1201 struct memblock_region *r;
1202
1203 while (++*idx < type->cnt) {
1204 r = &type->regions[*idx];
1205
1206 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1207 continue;
1208 if (nid == MAX_NUMNODES || nid == r->nid)
1209 break;
1210 }
1211 if (*idx >= type->cnt) {
1212 *idx = -1;
1213 return;
1214 }
1215
1216 if (out_start_pfn)
1217 *out_start_pfn = PFN_UP(r->base);
1218 if (out_end_pfn)
1219 *out_end_pfn = PFN_DOWN(r->base + r->size);
1220 if (out_nid)
1221 *out_nid = r->nid;
1222}
1223
1224/**
1225 * memblock_set_node - set node ID on memblock regions
1226 * @base: base of area to set node ID for
1227 * @size: size of area to set node ID for
Tang Chene7e8de52014-01-21 15:49:26 -08001228 * @type: memblock type to set node ID for
Tejun Heo7c0caeb2011-07-14 11:43:42 +02001229 * @nid: node ID to set
1230 *
Mike Rapoport47cec442018-06-30 17:55:02 +03001231 * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
Tejun Heo7c0caeb2011-07-14 11:43:42 +02001232 * Regions which cross the area boundaries are split as necessary.
1233 *
Mike Rapoport47cec442018-06-30 17:55:02 +03001234 * Return:
Tejun Heo7c0caeb2011-07-14 11:43:42 +02001235 * 0 on success, -errno on failure.
1236 */
1237int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
Tang Chene7e8de52014-01-21 15:49:26 -08001238 struct memblock_type *type, int nid)
Tejun Heo7c0caeb2011-07-14 11:43:42 +02001239{
Tejun Heo6a9ceb32011-12-08 10:22:07 -08001240 int start_rgn, end_rgn;
1241 int i, ret;
Tejun Heo7c0caeb2011-07-14 11:43:42 +02001242
Tejun Heo6a9ceb32011-12-08 10:22:07 -08001243 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1244 if (ret)
1245 return ret;
Tejun Heo7c0caeb2011-07-14 11:43:42 +02001246
Tejun Heo6a9ceb32011-12-08 10:22:07 -08001247 for (i = start_rgn; i < end_rgn; i++)
Wanpeng Lie9d24ad2012-10-08 16:32:21 -07001248 memblock_set_region_node(&type->regions[i], nid);
Tejun Heo7c0caeb2011-07-14 11:43:42 +02001249
1250 memblock_merge_regions(type);
1251 return 0;
1252}
1253#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
Alexander Duyck837566e2019-05-13 17:21:17 -07001254#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1255/**
1256 * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone()
1257 *
1258 * @idx: pointer to u64 loop variable
1259 * @zone: zone in which all of the memory blocks reside
1260 * @out_spfn: ptr to ulong for start pfn of the range, can be %NULL
1261 * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL
1262 *
1263 * This function is meant to be a zone/pfn specific wrapper for the
1264 * for_each_mem_range type iterators. Specifically they are used in the
1265 * deferred memory init routines and as such we were duplicating much of
1266 * this logic throughout the code. So instead of having it in multiple
1267 * locations it seemed like it would make more sense to centralize this to
1268 * one new iterator that does everything they need.
1269 */
1270void __init_memblock
1271__next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
1272 unsigned long *out_spfn, unsigned long *out_epfn)
1273{
1274 int zone_nid = zone_to_nid(zone);
1275 phys_addr_t spa, epa;
1276 int nid;
1277
1278 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1279 &memblock.memory, &memblock.reserved,
1280 &spa, &epa, &nid);
1281
1282 while (*idx != U64_MAX) {
1283 unsigned long epfn = PFN_DOWN(epa);
1284 unsigned long spfn = PFN_UP(spa);
1285
1286 /*
1287 * Verify the end is at least past the start of the zone and
1288 * that we have at least one PFN to initialize.
1289 */
1290 if (zone->zone_start_pfn < epfn && spfn < epfn) {
1291 /* if we went too far just stop searching */
1292 if (zone_end_pfn(zone) <= spfn) {
1293 *idx = U64_MAX;
1294 break;
1295 }
1296
1297 if (out_spfn)
1298 *out_spfn = max(zone->zone_start_pfn, spfn);
1299 if (out_epfn)
1300 *out_epfn = min(zone_end_pfn(zone), epfn);
1301
1302 return;
1303 }
1304
1305 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1306 &memblock.memory, &memblock.reserved,
1307 &spa, &epa, &nid);
1308 }
1309
1310 /* signal end of iteration */
1311 if (out_spfn)
1312 *out_spfn = ULONG_MAX;
1313 if (out_epfn)
1314 *out_epfn = 0;
1315}
1316
1317#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
Tejun Heo7c0caeb2011-07-14 11:43:42 +02001318
Mike Rapoport92d12f92019-03-11 23:29:41 -07001319/**
1320 * memblock_alloc_range_nid - allocate boot memory block
1321 * @size: size of memory block to be allocated in bytes
1322 * @align: alignment of the region and block's size
1323 * @start: the lower bound of the memory region to allocate (phys address)
1324 * @end: the upper bound of the memory region to allocate (phys address)
1325 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1326 *
1327 * The allocation is performed from memory region limited by
1328 * memblock.current_limit if @max_addr == %MEMBLOCK_ALLOC_ACCESSIBLE.
1329 *
1330 * If the specified node can not hold the requested memory the
1331 * allocation falls back to any node in the system
1332 *
1333 * For systems with memory mirroring, the allocation is attempted first
1334 * from the regions with mirroring enabled and then retried from any
1335 * memory region.
1336 *
1337 * In addition, function sets the min_count to 0 using kmemleak_alloc_phys for
1338 * allocated boot memory block, so that it is never reported as leaks.
1339 *
1340 * Return:
1341 * Physical address of allocated memory block on success, %0 on failure.
1342 */
Akinobu Mita2bfc2862014-06-04 16:06:53 -07001343static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1344 phys_addr_t align, phys_addr_t start,
Mike Rapoport92d12f92019-03-11 23:29:41 -07001345 phys_addr_t end, int nid)
Yinghai Lu95f72d12010-07-12 14:36:09 +10001346{
Mike Rapoport92d12f92019-03-11 23:29:41 -07001347 enum memblock_flags flags = choose_memblock_flags();
Benjamin Herrenschmidt6ed311b2010-07-12 14:36:48 +10001348 phys_addr_t found;
Yinghai Lu95f72d12010-07-12 14:36:09 +10001349
Mike Rapoport92d12f92019-03-11 23:29:41 -07001350 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1351 nid = NUMA_NO_NODE;
1352
Mike Rapoport2f770802018-10-30 15:10:01 -07001353 if (!align) {
1354 /* Can't use WARNs this early in boot on powerpc */
1355 dump_stack();
1356 align = SMP_CACHE_BYTES;
1357 }
1358
Mike Rapoport92d12f92019-03-11 23:29:41 -07001359again:
Tony Luckfc6daaf2015-06-24 16:58:09 -07001360 found = memblock_find_in_range_node(size, align, start, end, nid,
1361 flags);
Mike Rapoport92d12f92019-03-11 23:29:41 -07001362 if (found && !memblock_reserve(found, size))
1363 goto done;
1364
1365 if (nid != NUMA_NO_NODE) {
1366 found = memblock_find_in_range_node(size, align, start,
1367 end, NUMA_NO_NODE,
1368 flags);
1369 if (found && !memblock_reserve(found, size))
1370 goto done;
1371 }
1372
1373 if (flags & MEMBLOCK_MIRROR) {
1374 flags &= ~MEMBLOCK_MIRROR;
1375 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1376 &size);
1377 goto again;
1378 }
1379
1380 return 0;
1381
1382done:
1383 /* Skip kmemleak for kasan_init() due to high volume. */
1384 if (end != MEMBLOCK_ALLOC_KASAN)
Catalin Marinasaedf95e2014-06-06 14:38:20 -07001385 /*
Mike Rapoport92d12f92019-03-11 23:29:41 -07001386 * The min_count is set to 0 so that memblock allocated
1387 * blocks are never reported as leaks. This is because many
1388 * of these blocks are only referred via the physical
1389 * address which is not looked up by kmemleak.
Catalin Marinasaedf95e2014-06-06 14:38:20 -07001390 */
Catalin Marinas9099dae2016-10-11 13:55:11 -07001391 kmemleak_alloc_phys(found, size, 0, 0);
Mike Rapoport92d12f92019-03-11 23:29:41 -07001392
1393 return found;
Yinghai Lu95f72d12010-07-12 14:36:09 +10001394}
1395
Mike Rapoporta2974132019-03-11 23:30:54 -07001396/**
1397 * memblock_phys_alloc_range - allocate a memory block inside specified range
1398 * @size: size of memory block to be allocated in bytes
1399 * @align: alignment of the region and block's size
1400 * @start: the lower bound of the memory region to allocate (physical address)
1401 * @end: the upper bound of the memory region to allocate (physical address)
1402 *
1403 * Allocate @size bytes in the between @start and @end.
1404 *
1405 * Return: physical address of the allocated memory block on success,
1406 * %0 on failure.
1407 */
Mike Rapoport8a770c22019-03-11 23:29:16 -07001408phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
1409 phys_addr_t align,
1410 phys_addr_t start,
1411 phys_addr_t end)
Akinobu Mita2bfc2862014-06-04 16:06:53 -07001412{
Mike Rapoport92d12f92019-03-11 23:29:41 -07001413 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE);
Tejun Heo7bd0b0f2011-12-08 10:22:09 -08001414}
1415
Mike Rapoporta2974132019-03-11 23:30:54 -07001416/**
1417 * memblock_phys_alloc_try_nid - allocate a memory block from specified MUMA node
1418 * @size: size of memory block to be allocated in bytes
1419 * @align: alignment of the region and block's size
1420 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1421 *
1422 * Allocates memory block from the specified NUMA node. If the node
1423 * has no available memory, attempts to allocated from any node in the
1424 * system.
1425 *
1426 * Return: physical address of the allocated memory block on success,
1427 * %0 on failure.
1428 */
Mike Rapoport9a8dd702018-10-30 15:07:59 -07001429phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
Benjamin Herrenschmidt9d1e2492010-07-06 15:39:17 -07001430{
Mike Rapoport33755572019-03-11 23:29:21 -07001431 return memblock_alloc_range_nid(size, align, 0,
Mike Rapoport92d12f92019-03-11 23:29:41 -07001432 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
Yinghai Lu95f72d12010-07-12 14:36:09 +10001433}
1434
Santosh Shilimkar26f09e92014-01-21 15:50:19 -08001435/**
Mike Rapoporteb31d552018-10-30 15:08:04 -07001436 * memblock_alloc_internal - allocate boot memory block
Santosh Shilimkar26f09e92014-01-21 15:50:19 -08001437 * @size: size of memory block to be allocated in bytes
1438 * @align: alignment of the region and block's size
1439 * @min_addr: the lower bound of the memory region to allocate (phys address)
1440 * @max_addr: the upper bound of the memory region to allocate (phys address)
1441 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1442 *
Mike Rapoport92d12f92019-03-11 23:29:41 -07001443 * Allocates memory block using memblock_alloc_range_nid() and
1444 * converts the returned physical address to virtual.
1445 *
Santosh Shilimkar26f09e92014-01-21 15:50:19 -08001446 * The @min_addr limit is dropped if it can not be satisfied and the allocation
Mike Rapoport92d12f92019-03-11 23:29:41 -07001447 * will fall back to memory below @min_addr. Other constraints, such
1448 * as node and mirrored memory will be handled again in
1449 * memblock_alloc_range_nid().
Santosh Shilimkar26f09e92014-01-21 15:50:19 -08001450 *
Mike Rapoport47cec442018-06-30 17:55:02 +03001451 * Return:
Santosh Shilimkar26f09e92014-01-21 15:50:19 -08001452 * Virtual address of allocated memory block on success, NULL on failure.
1453 */
Mike Rapoporteb31d552018-10-30 15:08:04 -07001454static void * __init memblock_alloc_internal(
Santosh Shilimkar26f09e92014-01-21 15:50:19 -08001455 phys_addr_t size, phys_addr_t align,
1456 phys_addr_t min_addr, phys_addr_t max_addr,
1457 int nid)
1458{
1459 phys_addr_t alloc;
Santosh Shilimkar26f09e92014-01-21 15:50:19 -08001460
1461 /*
1462 * Detect any accidental use of these APIs after slab is ready, as at
1463 * this moment memblock may be deinitialized already and its
Mike Rapoportc6ffc5c2018-10-30 15:09:30 -07001464 * internal data may be destroyed (after execution of memblock_free_all)
Santosh Shilimkar26f09e92014-01-21 15:50:19 -08001465 */
1466 if (WARN_ON_ONCE(slab_is_available()))
1467 return kzalloc_node(size, GFP_NOWAIT, nid);
1468
Mike Rapoportf3057ad2019-10-18 20:20:01 -07001469 if (max_addr > memblock.current_limit)
1470 max_addr = memblock.current_limit;
1471
Mike Rapoport92d12f92019-03-11 23:29:41 -07001472 alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid);
Mike Rapoport2f770802018-10-30 15:10:01 -07001473
Mike Rapoport92d12f92019-03-11 23:29:41 -07001474 /* retry allocation without lower limit */
1475 if (!alloc && min_addr)
1476 alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid);
Santosh Shilimkar26f09e92014-01-21 15:50:19 -08001477
Mike Rapoport92d12f92019-03-11 23:29:41 -07001478 if (!alloc)
1479 return NULL;
Santosh Shilimkar26f09e92014-01-21 15:50:19 -08001480
Mike Rapoport92d12f92019-03-11 23:29:41 -07001481 return phys_to_virt(alloc);
Santosh Shilimkar26f09e92014-01-21 15:50:19 -08001482}
1483
1484/**
Mike Rapoporteb31d552018-10-30 15:08:04 -07001485 * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing
Pavel Tatashinea1f5f32017-11-15 17:36:27 -08001486 * memory and without panicking
1487 * @size: size of memory block to be allocated in bytes
1488 * @align: alignment of the region and block's size
1489 * @min_addr: the lower bound of the memory region from where the allocation
1490 * is preferred (phys address)
1491 * @max_addr: the upper bound of the memory region from where the allocation
Mike Rapoport97ad1082018-10-30 15:09:44 -07001492 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
Pavel Tatashinea1f5f32017-11-15 17:36:27 -08001493 * allocate only from memory limited by memblock.current_limit value
1494 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1495 *
1496 * Public function, provides additional debug information (including caller
1497 * info), if enabled. Does not zero allocated memory, does not panic if request
1498 * cannot be satisfied.
1499 *
Mike Rapoport47cec442018-06-30 17:55:02 +03001500 * Return:
Pavel Tatashinea1f5f32017-11-15 17:36:27 -08001501 * Virtual address of allocated memory block on success, NULL on failure.
1502 */
Mike Rapoporteb31d552018-10-30 15:08:04 -07001503void * __init memblock_alloc_try_nid_raw(
Pavel Tatashinea1f5f32017-11-15 17:36:27 -08001504 phys_addr_t size, phys_addr_t align,
1505 phys_addr_t min_addr, phys_addr_t max_addr,
1506 int nid)
1507{
1508 void *ptr;
1509
Sakari Ailusd75f7732019-03-25 21:32:28 +02001510 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
Mike Rapoporta36aab82018-08-17 15:47:17 -07001511 __func__, (u64)size, (u64)align, nid, &min_addr,
1512 &max_addr, (void *)_RET_IP_);
Pavel Tatashinea1f5f32017-11-15 17:36:27 -08001513
Mike Rapoporteb31d552018-10-30 15:08:04 -07001514 ptr = memblock_alloc_internal(size, align,
Pavel Tatashinea1f5f32017-11-15 17:36:27 -08001515 min_addr, max_addr, nid);
Pavel Tatashinea1f5f32017-11-15 17:36:27 -08001516 if (ptr && size > 0)
Alexander Duyckf682a972018-10-26 15:07:45 -07001517 page_init_poison(ptr, size);
1518
Pavel Tatashinea1f5f32017-11-15 17:36:27 -08001519 return ptr;
1520}
1521
1522/**
Mike Rapoportc0dbe822019-03-11 23:30:37 -07001523 * memblock_alloc_try_nid - allocate boot memory block
Santosh Shilimkar26f09e92014-01-21 15:50:19 -08001524 * @size: size of memory block to be allocated in bytes
1525 * @align: alignment of the region and block's size
1526 * @min_addr: the lower bound of the memory region from where the allocation
1527 * is preferred (phys address)
1528 * @max_addr: the upper bound of the memory region from where the allocation
Mike Rapoport97ad1082018-10-30 15:09:44 -07001529 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
Santosh Shilimkar26f09e92014-01-21 15:50:19 -08001530 * allocate only from memory limited by memblock.current_limit value
1531 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1532 *
Mike Rapoportc0dbe822019-03-11 23:30:37 -07001533 * Public function, provides additional debug information (including caller
1534 * info), if enabled. This function zeroes the allocated memory.
Santosh Shilimkar26f09e92014-01-21 15:50:19 -08001535 *
Mike Rapoport47cec442018-06-30 17:55:02 +03001536 * Return:
Santosh Shilimkar26f09e92014-01-21 15:50:19 -08001537 * Virtual address of allocated memory block on success, NULL on failure.
1538 */
Mike Rapoporteb31d552018-10-30 15:08:04 -07001539void * __init memblock_alloc_try_nid(
Santosh Shilimkar26f09e92014-01-21 15:50:19 -08001540 phys_addr_t size, phys_addr_t align,
1541 phys_addr_t min_addr, phys_addr_t max_addr,
1542 int nid)
1543{
1544 void *ptr;
1545
Sakari Ailusd75f7732019-03-25 21:32:28 +02001546 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
Mike Rapoporta36aab82018-08-17 15:47:17 -07001547 __func__, (u64)size, (u64)align, nid, &min_addr,
1548 &max_addr, (void *)_RET_IP_);
Mike Rapoporteb31d552018-10-30 15:08:04 -07001549 ptr = memblock_alloc_internal(size, align,
Santosh Shilimkar26f09e92014-01-21 15:50:19 -08001550 min_addr, max_addr, nid);
Mike Rapoportc0dbe822019-03-11 23:30:37 -07001551 if (ptr)
Pavel Tatashinea1f5f32017-11-15 17:36:27 -08001552 memset(ptr, 0, size);
Santosh Shilimkar26f09e92014-01-21 15:50:19 -08001553
Mike Rapoportc0dbe822019-03-11 23:30:37 -07001554 return ptr;
Santosh Shilimkar26f09e92014-01-21 15:50:19 -08001555}
1556
1557/**
Mike Rapoporta2974132019-03-11 23:30:54 -07001558 * __memblock_free_late - free pages directly to buddy allocator
Mike Rapoport48a833c2018-06-30 17:55:03 +03001559 * @base: phys starting address of the boot memory block
Santosh Shilimkar26f09e92014-01-21 15:50:19 -08001560 * @size: size of the boot memory block in bytes
1561 *
Mike Rapoporta2974132019-03-11 23:30:54 -07001562 * This is only useful when the memblock allocator has already been torn
Santosh Shilimkar26f09e92014-01-21 15:50:19 -08001563 * down, but we are still initializing the system. Pages are released directly
Mike Rapoporta2974132019-03-11 23:30:54 -07001564 * to the buddy allocator.
Santosh Shilimkar26f09e92014-01-21 15:50:19 -08001565 */
1566void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1567{
Mike Rapoporta36aab82018-08-17 15:47:17 -07001568 phys_addr_t cursor, end;
Santosh Shilimkar26f09e92014-01-21 15:50:19 -08001569
Mike Rapoporta36aab82018-08-17 15:47:17 -07001570 end = base + size - 1;
Sakari Ailusd75f7732019-03-25 21:32:28 +02001571 memblock_dbg("%s: [%pa-%pa] %pS\n",
Mike Rapoporta36aab82018-08-17 15:47:17 -07001572 __func__, &base, &end, (void *)_RET_IP_);
Catalin Marinas9099dae2016-10-11 13:55:11 -07001573 kmemleak_free_part_phys(base, size);
Santosh Shilimkar26f09e92014-01-21 15:50:19 -08001574 cursor = PFN_UP(base);
1575 end = PFN_DOWN(base + size);
1576
1577 for (; cursor < end; cursor++) {
Mike Rapoport7c2ee342018-10-30 15:09:36 -07001578 memblock_free_pages(pfn_to_page(cursor), cursor, 0);
Arun KSca79b0c2018-12-28 00:34:29 -08001579 totalram_pages_inc();
Santosh Shilimkar26f09e92014-01-21 15:50:19 -08001580 }
1581}
Benjamin Herrenschmidt9d1e2492010-07-06 15:39:17 -07001582
1583/*
1584 * Remaining API functions
1585 */
1586
David Gibson1f1ffb8a2016-02-05 15:36:19 -08001587phys_addr_t __init_memblock memblock_phys_mem_size(void)
Yinghai Lu95f72d12010-07-12 14:36:09 +10001588{
Tejun Heo1440c4e2011-12-08 10:22:08 -08001589 return memblock.memory.total_size;
Yinghai Lu95f72d12010-07-12 14:36:09 +10001590}
1591
Srikar Dronamraju8907de52016-10-07 16:59:18 -07001592phys_addr_t __init_memblock memblock_reserved_size(void)
1593{
1594 return memblock.reserved.total_size;
1595}
1596
Yinghai Lu595ad9a2013-01-24 12:20:09 -08001597phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
1598{
1599 unsigned long pages = 0;
1600 struct memblock_region *r;
1601 unsigned long start_pfn, end_pfn;
1602
1603 for_each_memblock(memory, r) {
1604 start_pfn = memblock_region_memory_base_pfn(r);
1605 end_pfn = memblock_region_memory_end_pfn(r);
1606 start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
1607 end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
1608 pages += end_pfn - start_pfn;
1609 }
1610
Fabian Frederick16763232014-04-07 15:37:53 -07001611 return PFN_PHYS(pages);
Yinghai Lu595ad9a2013-01-24 12:20:09 -08001612}
1613
Sam Ravnborg0a93ebe2011-10-31 17:08:16 -07001614/* lowest address */
1615phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1616{
1617 return memblock.memory.regions[0].base;
1618}
1619
Yinghai Lu10d06432010-07-28 15:43:02 +10001620phys_addr_t __init_memblock memblock_end_of_DRAM(void)
Yinghai Lu95f72d12010-07-12 14:36:09 +10001621{
1622 int idx = memblock.memory.cnt - 1;
1623
Benjamin Herrenschmidte3239ff2010-08-04 14:06:41 +10001624 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
Yinghai Lu95f72d12010-07-12 14:36:09 +10001625}
1626
Dennis Chena571d4e2016-07-28 15:48:26 -07001627static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
Yinghai Lu95f72d12010-07-12 14:36:09 +10001628{
Stefan Agner1c4bc432018-06-07 17:06:15 -07001629 phys_addr_t max_addr = PHYS_ADDR_MAX;
Emil Medve136199f2014-04-07 15:37:52 -07001630 struct memblock_region *r;
Yinghai Lu95f72d12010-07-12 14:36:09 +10001631
Dennis Chena571d4e2016-07-28 15:48:26 -07001632 /*
1633 * translate the memory @limit size into the max address within one of
1634 * the memory memblock regions, if the @limit exceeds the total size
Stefan Agner1c4bc432018-06-07 17:06:15 -07001635 * of those regions, max_addr will keep original value PHYS_ADDR_MAX
Dennis Chena571d4e2016-07-28 15:48:26 -07001636 */
Emil Medve136199f2014-04-07 15:37:52 -07001637 for_each_memblock(memory, r) {
Tejun Heoc0ce8fe2011-12-08 10:22:07 -08001638 if (limit <= r->size) {
1639 max_addr = r->base + limit;
1640 break;
1641 }
1642 limit -= r->size;
Yinghai Lu95f72d12010-07-12 14:36:09 +10001643 }
1644
Dennis Chena571d4e2016-07-28 15:48:26 -07001645 return max_addr;
1646}
1647
1648void __init memblock_enforce_memory_limit(phys_addr_t limit)
1649{
Stefan Agner1c4bc432018-06-07 17:06:15 -07001650 phys_addr_t max_addr = PHYS_ADDR_MAX;
Dennis Chena571d4e2016-07-28 15:48:26 -07001651
1652 if (!limit)
1653 return;
1654
1655 max_addr = __find_max_addr(limit);
1656
1657 /* @limit exceeds the total size of the memory, do nothing */
Stefan Agner1c4bc432018-06-07 17:06:15 -07001658 if (max_addr == PHYS_ADDR_MAX)
Dennis Chena571d4e2016-07-28 15:48:26 -07001659 return;
1660
Tejun Heoc0ce8fe2011-12-08 10:22:07 -08001661 /* truncate both memory and reserved regions */
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001662 memblock_remove_range(&memblock.memory, max_addr,
Stefan Agner1c4bc432018-06-07 17:06:15 -07001663 PHYS_ADDR_MAX);
Philipp Hachtmannf1af9d32014-01-29 18:16:01 +01001664 memblock_remove_range(&memblock.reserved, max_addr,
Stefan Agner1c4bc432018-06-07 17:06:15 -07001665 PHYS_ADDR_MAX);
Yinghai Lu95f72d12010-07-12 14:36:09 +10001666}
1667
AKASHI Takahiroc9ca9b42017-04-03 11:23:55 +09001668void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
1669{
1670 int start_rgn, end_rgn;
1671 int i, ret;
1672
1673 if (!size)
1674 return;
1675
1676 ret = memblock_isolate_range(&memblock.memory, base, size,
1677 &start_rgn, &end_rgn);
1678 if (ret)
1679 return;
1680
1681 /* remove all the MAP regions */
1682 for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
1683 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1684 memblock_remove_region(&memblock.memory, i);
1685
1686 for (i = start_rgn - 1; i >= 0; i--)
1687 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1688 memblock_remove_region(&memblock.memory, i);
1689
1690 /* truncate the reserved regions */
1691 memblock_remove_range(&memblock.reserved, 0, base);
1692 memblock_remove_range(&memblock.reserved,
Stefan Agner1c4bc432018-06-07 17:06:15 -07001693 base + size, PHYS_ADDR_MAX);
AKASHI Takahiroc9ca9b42017-04-03 11:23:55 +09001694}
1695
Dennis Chena571d4e2016-07-28 15:48:26 -07001696void __init memblock_mem_limit_remove_map(phys_addr_t limit)
1697{
Dennis Chena571d4e2016-07-28 15:48:26 -07001698 phys_addr_t max_addr;
Dennis Chena571d4e2016-07-28 15:48:26 -07001699
1700 if (!limit)
1701 return;
1702
1703 max_addr = __find_max_addr(limit);
1704
1705 /* @limit exceeds the total size of the memory, do nothing */
Stefan Agner1c4bc432018-06-07 17:06:15 -07001706 if (max_addr == PHYS_ADDR_MAX)
Dennis Chena571d4e2016-07-28 15:48:26 -07001707 return;
1708
AKASHI Takahiroc9ca9b42017-04-03 11:23:55 +09001709 memblock_cap_memory_range(0, max_addr);
Dennis Chena571d4e2016-07-28 15:48:26 -07001710}
1711
Yinghai Lucd794812010-10-11 12:34:09 -07001712static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
Benjamin Herrenschmidt72d4b0b2010-08-04 14:38:47 +10001713{
1714 unsigned int left = 0, right = type->cnt;
1715
1716 do {
1717 unsigned int mid = (right + left) / 2;
1718
1719 if (addr < type->regions[mid].base)
1720 right = mid;
1721 else if (addr >= (type->regions[mid].base +
1722 type->regions[mid].size))
1723 left = mid + 1;
1724 else
1725 return mid;
1726 } while (left < right);
1727 return -1;
1728}
1729
Yueyi Lif5a222d2018-12-14 14:17:06 -08001730bool __init_memblock memblock_is_reserved(phys_addr_t addr)
Yinghai Lu95f72d12010-07-12 14:36:09 +10001731{
Benjamin Herrenschmidt72d4b0b2010-08-04 14:38:47 +10001732 return memblock_search(&memblock.reserved, addr) != -1;
1733}
Yinghai Lu95f72d12010-07-12 14:36:09 +10001734
Yaowei Baib4ad0c72016-01-14 15:18:54 -08001735bool __init_memblock memblock_is_memory(phys_addr_t addr)
Benjamin Herrenschmidt72d4b0b2010-08-04 14:38:47 +10001736{
1737 return memblock_search(&memblock.memory, addr) != -1;
1738}
1739
Yaowei Bai937f0c22018-02-06 15:41:18 -08001740bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
Ard Biesheuvelbf3d3cc2015-11-30 13:28:15 +01001741{
1742 int i = memblock_search(&memblock.memory, addr);
1743
1744 if (i == -1)
1745 return false;
1746 return !memblock_is_nomap(&memblock.memory.regions[i]);
1747}
1748
Yinghai Lue76b63f2013-09-11 14:22:17 -07001749#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1750int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1751 unsigned long *start_pfn, unsigned long *end_pfn)
1752{
1753 struct memblock_type *type = &memblock.memory;
Fabian Frederick16763232014-04-07 15:37:53 -07001754 int mid = memblock_search(type, PFN_PHYS(pfn));
Yinghai Lue76b63f2013-09-11 14:22:17 -07001755
1756 if (mid == -1)
1757 return -1;
1758
Fabian Frederickf7e2f7e2014-06-04 16:07:51 -07001759 *start_pfn = PFN_DOWN(type->regions[mid].base);
1760 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
Yinghai Lue76b63f2013-09-11 14:22:17 -07001761
1762 return type->regions[mid].nid;
1763}
1764#endif
1765
Stephen Boydeab30942012-05-24 00:45:21 -07001766/**
1767 * memblock_is_region_memory - check if a region is a subset of memory
1768 * @base: base of region to check
1769 * @size: size of region to check
1770 *
Mike Rapoport47cec442018-06-30 17:55:02 +03001771 * Check if the region [@base, @base + @size) is a subset of a memory block.
Stephen Boydeab30942012-05-24 00:45:21 -07001772 *
Mike Rapoport47cec442018-06-30 17:55:02 +03001773 * Return:
Stephen Boydeab30942012-05-24 00:45:21 -07001774 * 0 if false, non-zero if true
1775 */
Yaowei Bai937f0c22018-02-06 15:41:18 -08001776bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
Benjamin Herrenschmidt72d4b0b2010-08-04 14:38:47 +10001777{
Tomi Valkeinenabb65272011-01-20 14:44:20 -08001778 int idx = memblock_search(&memblock.memory, base);
Tejun Heoeb18f1b2011-12-08 10:22:07 -08001779 phys_addr_t end = base + memblock_cap_size(base, &size);
Benjamin Herrenschmidt72d4b0b2010-08-04 14:38:47 +10001780
1781 if (idx == -1)
Yaowei Bai937f0c22018-02-06 15:41:18 -08001782 return false;
Wei Yangef415ef2017-02-22 15:45:04 -08001783 return (memblock.memory.regions[idx].base +
Tejun Heoeb18f1b2011-12-08 10:22:07 -08001784 memblock.memory.regions[idx].size) >= end;
Yinghai Lu95f72d12010-07-12 14:36:09 +10001785}
1786
Stephen Boydeab30942012-05-24 00:45:21 -07001787/**
1788 * memblock_is_region_reserved - check if a region intersects reserved memory
1789 * @base: base of region to check
1790 * @size: size of region to check
1791 *
Mike Rapoport47cec442018-06-30 17:55:02 +03001792 * Check if the region [@base, @base + @size) intersects a reserved
1793 * memory block.
Stephen Boydeab30942012-05-24 00:45:21 -07001794 *
Mike Rapoport47cec442018-06-30 17:55:02 +03001795 * Return:
Tang Chenc5c5c9d2015-09-08 15:02:00 -07001796 * True if they intersect, false if not.
Stephen Boydeab30942012-05-24 00:45:21 -07001797 */
Tang Chenc5c5c9d2015-09-08 15:02:00 -07001798bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
Yinghai Lu95f72d12010-07-12 14:36:09 +10001799{
Tejun Heoeb18f1b2011-12-08 10:22:07 -08001800 memblock_cap_size(base, &size);
Tang Chenc5c5c9d2015-09-08 15:02:00 -07001801 return memblock_overlaps_region(&memblock.reserved, base, size);
Yinghai Lu95f72d12010-07-12 14:36:09 +10001802}
1803
Yinghai Lu6ede1fd2012-10-22 16:35:18 -07001804void __init_memblock memblock_trim_memory(phys_addr_t align)
1805{
Yinghai Lu6ede1fd2012-10-22 16:35:18 -07001806 phys_addr_t start, end, orig_start, orig_end;
Emil Medve136199f2014-04-07 15:37:52 -07001807 struct memblock_region *r;
Yinghai Lu6ede1fd2012-10-22 16:35:18 -07001808
Emil Medve136199f2014-04-07 15:37:52 -07001809 for_each_memblock(memory, r) {
1810 orig_start = r->base;
1811 orig_end = r->base + r->size;
Yinghai Lu6ede1fd2012-10-22 16:35:18 -07001812 start = round_up(orig_start, align);
1813 end = round_down(orig_end, align);
1814
1815 if (start == orig_start && end == orig_end)
1816 continue;
1817
1818 if (start < end) {
Emil Medve136199f2014-04-07 15:37:52 -07001819 r->base = start;
1820 r->size = end - start;
Yinghai Lu6ede1fd2012-10-22 16:35:18 -07001821 } else {
Emil Medve136199f2014-04-07 15:37:52 -07001822 memblock_remove_region(&memblock.memory,
1823 r - memblock.memory.regions);
1824 r--;
Yinghai Lu6ede1fd2012-10-22 16:35:18 -07001825 }
1826 }
1827}
Benjamin Herrenschmidte63075a2010-07-06 15:39:01 -07001828
Yinghai Lu3661ca62010-09-15 13:05:29 -07001829void __init_memblock memblock_set_current_limit(phys_addr_t limit)
Benjamin Herrenschmidte63075a2010-07-06 15:39:01 -07001830{
1831 memblock.current_limit = limit;
1832}
1833
Laura Abbottfec51012014-02-27 01:23:43 +01001834phys_addr_t __init_memblock memblock_get_current_limit(void)
1835{
1836 return memblock.current_limit;
1837}
1838
Heiko Carstens0262d9c2017-02-24 14:55:59 -08001839static void __init_memblock memblock_dump(struct memblock_type *type)
Benjamin Herrenschmidt6ed311b2010-07-12 14:36:48 +10001840{
Miles Chen5d63f812017-02-22 15:46:42 -08001841 phys_addr_t base, end, size;
Mike Rapoporte1720fe2018-06-30 17:55:01 +03001842 enum memblock_flags flags;
Alexander Kuleshov8c9c1702016-01-14 15:20:42 -08001843 int idx;
1844 struct memblock_region *rgn;
Benjamin Herrenschmidt6ed311b2010-07-12 14:36:48 +10001845
Heiko Carstens0262d9c2017-02-24 14:55:59 -08001846 pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt);
Benjamin Herrenschmidt6ed311b2010-07-12 14:36:48 +10001847
Gioh Kim66e8b432017-11-15 17:33:42 -08001848 for_each_memblock_type(idx, type, rgn) {
Tejun Heo7c0caeb2011-07-14 11:43:42 +02001849 char nid_buf[32] = "";
Benjamin Herrenschmidt6ed311b2010-07-12 14:36:48 +10001850
Tejun Heo7c0caeb2011-07-14 11:43:42 +02001851 base = rgn->base;
1852 size = rgn->size;
Miles Chen5d63f812017-02-22 15:46:42 -08001853 end = base + size - 1;
Tang Chen66a20752014-01-21 15:49:20 -08001854 flags = rgn->flags;
Tejun Heo7c0caeb2011-07-14 11:43:42 +02001855#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1856 if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1857 snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1858 memblock_get_region_node(rgn));
1859#endif
Mike Rapoporte1720fe2018-06-30 17:55:01 +03001860 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
Heiko Carstens0262d9c2017-02-24 14:55:59 -08001861 type->name, idx, &base, &end, &size, nid_buf, flags);
Benjamin Herrenschmidt6ed311b2010-07-12 14:36:48 +10001862 }
1863}
1864
Tejun Heo4ff7b822011-12-08 10:22:06 -08001865void __init_memblock __memblock_dump_all(void)
Benjamin Herrenschmidt6ed311b2010-07-12 14:36:48 +10001866{
Benjamin Herrenschmidt6ed311b2010-07-12 14:36:48 +10001867 pr_info("MEMBLOCK configuration:\n");
Miles Chen5d63f812017-02-22 15:46:42 -08001868 pr_info(" memory size = %pa reserved size = %pa\n",
1869 &memblock.memory.total_size,
1870 &memblock.reserved.total_size);
Benjamin Herrenschmidt6ed311b2010-07-12 14:36:48 +10001871
Heiko Carstens0262d9c2017-02-24 14:55:59 -08001872 memblock_dump(&memblock.memory);
1873 memblock_dump(&memblock.reserved);
Heiko Carstens409efd42017-02-24 14:55:56 -08001874#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
Heiko Carstens0262d9c2017-02-24 14:55:59 -08001875 memblock_dump(&memblock.physmem);
Heiko Carstens409efd42017-02-24 14:55:56 -08001876#endif
Benjamin Herrenschmidt6ed311b2010-07-12 14:36:48 +10001877}
1878
Tejun Heo1aadc052011-12-08 10:22:08 -08001879void __init memblock_allow_resize(void)
Benjamin Herrenschmidt6ed311b2010-07-12 14:36:48 +10001880{
Benjamin Herrenschmidt142b45a2010-07-06 15:39:13 -07001881 memblock_can_resize = 1;
Benjamin Herrenschmidt6ed311b2010-07-12 14:36:48 +10001882}
1883
Benjamin Herrenschmidt6ed311b2010-07-12 14:36:48 +10001884static int __init early_memblock(char *p)
1885{
1886 if (p && strstr(p, "debug"))
1887 memblock_debug = 1;
1888 return 0;
1889}
1890early_param("memblock", early_memblock);
1891
Mike Rapoportbda49a82018-10-30 15:09:40 -07001892static void __init __free_pages_memory(unsigned long start, unsigned long end)
1893{
1894 int order;
1895
1896 while (start < end) {
1897 order = min(MAX_ORDER - 1UL, __ffs(start));
1898
1899 while (start + (1UL << order) > end)
1900 order--;
1901
1902 memblock_free_pages(pfn_to_page(start), start, order);
1903
1904 start += (1UL << order);
1905 }
1906}
1907
1908static unsigned long __init __free_memory_core(phys_addr_t start,
1909 phys_addr_t end)
1910{
1911 unsigned long start_pfn = PFN_UP(start);
1912 unsigned long end_pfn = min_t(unsigned long,
1913 PFN_DOWN(end), max_low_pfn);
1914
1915 if (start_pfn >= end_pfn)
1916 return 0;
1917
1918 __free_pages_memory(start_pfn, end_pfn);
1919
1920 return end_pfn - start_pfn;
1921}
1922
1923static unsigned long __init free_low_memory_core_early(void)
1924{
1925 unsigned long count = 0;
1926 phys_addr_t start, end;
1927 u64 i;
1928
1929 memblock_clear_hotplug(0, -1);
1930
1931 for_each_reserved_mem_region(i, &start, &end)
1932 reserve_bootmem_region(start, end);
1933
1934 /*
1935 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
1936 * because in some case like Node0 doesn't have RAM installed
1937 * low ram will be on Node1
1938 */
1939 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
1940 NULL)
1941 count += __free_memory_core(start, end);
1942
1943 return count;
1944}
1945
1946static int reset_managed_pages_done __initdata;
1947
1948void reset_node_managed_pages(pg_data_t *pgdat)
1949{
1950 struct zone *z;
1951
1952 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
Arun KS9705bea2018-12-28 00:34:24 -08001953 atomic_long_set(&z->managed_pages, 0);
Mike Rapoportbda49a82018-10-30 15:09:40 -07001954}
1955
1956void __init reset_all_zones_managed_pages(void)
1957{
1958 struct pglist_data *pgdat;
1959
1960 if (reset_managed_pages_done)
1961 return;
1962
1963 for_each_online_pgdat(pgdat)
1964 reset_node_managed_pages(pgdat);
1965
1966 reset_managed_pages_done = 1;
1967}
1968
1969/**
1970 * memblock_free_all - release free pages to the buddy allocator
1971 *
1972 * Return: the number of pages actually released.
1973 */
1974unsigned long __init memblock_free_all(void)
1975{
1976 unsigned long pages;
1977
1978 reset_all_zones_managed_pages();
1979
1980 pages = free_low_memory_core_early();
Arun KSca79b0c2018-12-28 00:34:29 -08001981 totalram_pages_add(pages);
Mike Rapoportbda49a82018-10-30 15:09:40 -07001982
1983 return pages;
1984}
1985
Mike Rapoport350e88b2019-05-13 17:22:59 -07001986#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK)
Benjamin Herrenschmidt6d03b882010-07-06 15:39:19 -07001987
1988static int memblock_debug_show(struct seq_file *m, void *private)
1989{
1990 struct memblock_type *type = m->private;
1991 struct memblock_region *reg;
1992 int i;
Miles Chen5d63f812017-02-22 15:46:42 -08001993 phys_addr_t end;
Benjamin Herrenschmidt6d03b882010-07-06 15:39:19 -07001994
1995 for (i = 0; i < type->cnt; i++) {
1996 reg = &type->regions[i];
Miles Chen5d63f812017-02-22 15:46:42 -08001997 end = reg->base + reg->size - 1;
Benjamin Herrenschmidt6d03b882010-07-06 15:39:19 -07001998
Miles Chen5d63f812017-02-22 15:46:42 -08001999 seq_printf(m, "%4d: ", i);
2000 seq_printf(m, "%pa..%pa\n", &reg->base, &end);
Benjamin Herrenschmidt6d03b882010-07-06 15:39:19 -07002001 }
2002 return 0;
2003}
Andy Shevchenko5ad35092018-04-05 16:23:16 -07002004DEFINE_SHOW_ATTRIBUTE(memblock_debug);
Benjamin Herrenschmidt6d03b882010-07-06 15:39:19 -07002005
2006static int __init memblock_init_debugfs(void)
2007{
2008 struct dentry *root = debugfs_create_dir("memblock", NULL);
Greg Kroah-Hartmand9f79792019-03-05 15:46:09 -08002009
Joe Perches0825a6f2018-06-14 15:27:58 -07002010 debugfs_create_file("memory", 0444, root,
2011 &memblock.memory, &memblock_debug_fops);
2012 debugfs_create_file("reserved", 0444, root,
2013 &memblock.reserved, &memblock_debug_fops);
Philipp Hachtmann70210ed2014-01-29 18:16:01 +01002014#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
Joe Perches0825a6f2018-06-14 15:27:58 -07002015 debugfs_create_file("physmem", 0444, root,
2016 &memblock.physmem, &memblock_debug_fops);
Philipp Hachtmann70210ed2014-01-29 18:16:01 +01002017#endif
Benjamin Herrenschmidt6d03b882010-07-06 15:39:19 -07002018
2019 return 0;
2020}
2021__initcall(memblock_init_debugfs);
2022
2023#endif /* CONFIG_DEBUG_FS */