Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1 | /* |
| 2 | * Procedures for maintaining information about logical memory blocks. |
| 3 | * |
| 4 | * Peter Bergner, IBM Corp. June 2001. |
| 5 | * Copyright (C) 2001 Peter Bergner. |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or |
| 8 | * modify it under the terms of the GNU General Public License |
| 9 | * as published by the Free Software Foundation; either version |
| 10 | * 2 of the License, or (at your option) any later version. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/kernel.h> |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 14 | #include <linux/slab.h> |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 15 | #include <linux/init.h> |
| 16 | #include <linux/bitops.h> |
Benjamin Herrenschmidt | 449e8df | 2010-07-06 15:39:07 -0700 | [diff] [blame] | 17 | #include <linux/poison.h> |
Benjamin Herrenschmidt | c196f76 | 2010-07-06 15:39:16 -0700 | [diff] [blame] | 18 | #include <linux/pfn.h> |
Benjamin Herrenschmidt | 6d03b88 | 2010-07-06 15:39:19 -0700 | [diff] [blame] | 19 | #include <linux/debugfs.h> |
| 20 | #include <linux/seq_file.h> |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 21 | #include <linux/memblock.h> |
| 22 | |
Christoph Hellwig | c4c5ad6 | 2016-07-28 15:48:06 -0700 | [diff] [blame] | 23 | #include <asm/sections.h> |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 24 | #include <linux/io.h> |
| 25 | |
| 26 | #include "internal.h" |
Tang Chen | 79442ed | 2013-11-12 15:07:59 -0800 | [diff] [blame] | 27 | |
Tejun Heo | fe091c2 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 28 | static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; |
| 29 | static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; |
Philipp Hachtmann | 70210ed | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 30 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
| 31 | static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock; |
| 32 | #endif |
Tejun Heo | fe091c2 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 33 | |
| 34 | struct memblock memblock __initdata_memblock = { |
| 35 | .memory.regions = memblock_memory_init_regions, |
| 36 | .memory.cnt = 1, /* empty dummy entry */ |
| 37 | .memory.max = INIT_MEMBLOCK_REGIONS, |
| 38 | |
| 39 | .reserved.regions = memblock_reserved_init_regions, |
| 40 | .reserved.cnt = 1, /* empty dummy entry */ |
| 41 | .reserved.max = INIT_MEMBLOCK_REGIONS, |
| 42 | |
Philipp Hachtmann | 70210ed | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 43 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
| 44 | .physmem.regions = memblock_physmem_init_regions, |
| 45 | .physmem.cnt = 1, /* empty dummy entry */ |
| 46 | .physmem.max = INIT_PHYSMEM_REGIONS, |
| 47 | #endif |
| 48 | |
Tang Chen | 79442ed | 2013-11-12 15:07:59 -0800 | [diff] [blame] | 49 | .bottom_up = false, |
Tejun Heo | fe091c2 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 50 | .current_limit = MEMBLOCK_ALLOC_ANYWHERE, |
| 51 | }; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 52 | |
Yinghai Lu | 10d0643 | 2010-07-28 15:43:02 +1000 | [diff] [blame] | 53 | int memblock_debug __initdata_memblock; |
Tang Chen | 55ac590 | 2014-01-21 15:49:35 -0800 | [diff] [blame] | 54 | #ifdef CONFIG_MOVABLE_NODE |
| 55 | bool movable_node_enabled __initdata_memblock = false; |
| 56 | #endif |
Tony Luck | a3f5baf | 2015-06-24 16:58:12 -0700 | [diff] [blame] | 57 | static bool system_has_some_mirror __initdata_memblock = false; |
Tejun Heo | 1aadc05 | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 58 | static int memblock_can_resize __initdata_memblock; |
Gavin Shan | 181eb39 | 2012-05-29 15:06:50 -0700 | [diff] [blame] | 59 | static int memblock_memory_in_slab __initdata_memblock = 0; |
| 60 | static int memblock_reserved_in_slab __initdata_memblock = 0; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 61 | |
Tony Luck | a3f5baf | 2015-06-24 16:58:12 -0700 | [diff] [blame] | 62 | ulong __init_memblock choose_memblock_flags(void) |
| 63 | { |
| 64 | return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE; |
| 65 | } |
| 66 | |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 67 | /* inline so we don't get a warning when pr_debug is compiled out */ |
Raghavendra D Prabhu | c223311 | 2012-10-08 16:33:55 -0700 | [diff] [blame] | 68 | static __init_memblock const char * |
| 69 | memblock_type_name(struct memblock_type *type) |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 70 | { |
| 71 | if (type == &memblock.memory) |
| 72 | return "memory"; |
| 73 | else if (type == &memblock.reserved) |
| 74 | return "reserved"; |
| 75 | else |
| 76 | return "unknown"; |
| 77 | } |
| 78 | |
Tejun Heo | eb18f1b | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 79 | /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */ |
| 80 | static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) |
| 81 | { |
| 82 | return *size = min(*size, (phys_addr_t)ULLONG_MAX - base); |
| 83 | } |
| 84 | |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 85 | /* |
| 86 | * Address comparison utilities |
| 87 | */ |
Yinghai Lu | 10d0643 | 2010-07-28 15:43:02 +1000 | [diff] [blame] | 88 | static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, |
Benjamin Herrenschmidt | 2898cc4 | 2010-08-04 13:34:42 +1000 | [diff] [blame] | 89 | phys_addr_t base2, phys_addr_t size2) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 90 | { |
| 91 | return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); |
| 92 | } |
| 93 | |
Tang Chen | 95cf82e | 2015-09-08 15:02:03 -0700 | [diff] [blame] | 94 | bool __init_memblock memblock_overlaps_region(struct memblock_type *type, |
H Hartley Sweeten | 2d7d3eb | 2011-10-31 17:09:15 -0700 | [diff] [blame] | 95 | phys_addr_t base, phys_addr_t size) |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 96 | { |
| 97 | unsigned long i; |
| 98 | |
Alexander Kuleshov | f14516f | 2016-01-14 15:20:39 -0800 | [diff] [blame] | 99 | for (i = 0; i < type->cnt; i++) |
| 100 | if (memblock_addrs_overlap(base, size, type->regions[i].base, |
| 101 | type->regions[i].size)) |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 102 | break; |
Tang Chen | c5c5c9d | 2015-09-08 15:02:00 -0700 | [diff] [blame] | 103 | return i < type->cnt; |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 104 | } |
| 105 | |
Tang Chen | 79442ed | 2013-11-12 15:07:59 -0800 | [diff] [blame] | 106 | /* |
| 107 | * __memblock_find_range_bottom_up - find free area utility in bottom-up |
| 108 | * @start: start of candidate range |
| 109 | * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} |
| 110 | * @size: size of free area to find |
| 111 | * @align: alignment of free area to find |
Grygorii Strashko | b115423 | 2014-01-21 15:50:16 -0800 | [diff] [blame] | 112 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 113 | * @flags: pick from blocks based on memory attributes |
Tang Chen | 79442ed | 2013-11-12 15:07:59 -0800 | [diff] [blame] | 114 | * |
| 115 | * Utility called from memblock_find_in_range_node(), find free area bottom-up. |
| 116 | * |
| 117 | * RETURNS: |
| 118 | * Found address on success, 0 on failure. |
| 119 | */ |
| 120 | static phys_addr_t __init_memblock |
| 121 | __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 122 | phys_addr_t size, phys_addr_t align, int nid, |
| 123 | ulong flags) |
Tang Chen | 79442ed | 2013-11-12 15:07:59 -0800 | [diff] [blame] | 124 | { |
| 125 | phys_addr_t this_start, this_end, cand; |
| 126 | u64 i; |
| 127 | |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 128 | for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) { |
Tang Chen | 79442ed | 2013-11-12 15:07:59 -0800 | [diff] [blame] | 129 | this_start = clamp(this_start, start, end); |
| 130 | this_end = clamp(this_end, start, end); |
| 131 | |
| 132 | cand = round_up(this_start, align); |
| 133 | if (cand < this_end && this_end - cand >= size) |
| 134 | return cand; |
| 135 | } |
| 136 | |
| 137 | return 0; |
| 138 | } |
| 139 | |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 140 | /** |
Tang Chen | 1402899 | 2013-11-12 15:07:57 -0800 | [diff] [blame] | 141 | * __memblock_find_range_top_down - find free area utility, in top-down |
| 142 | * @start: start of candidate range |
| 143 | * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} |
| 144 | * @size: size of free area to find |
| 145 | * @align: alignment of free area to find |
Grygorii Strashko | b115423 | 2014-01-21 15:50:16 -0800 | [diff] [blame] | 146 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 147 | * @flags: pick from blocks based on memory attributes |
Tang Chen | 1402899 | 2013-11-12 15:07:57 -0800 | [diff] [blame] | 148 | * |
| 149 | * Utility called from memblock_find_in_range_node(), find free area top-down. |
| 150 | * |
| 151 | * RETURNS: |
Tang Chen | 79442ed | 2013-11-12 15:07:59 -0800 | [diff] [blame] | 152 | * Found address on success, 0 on failure. |
Tang Chen | 1402899 | 2013-11-12 15:07:57 -0800 | [diff] [blame] | 153 | */ |
| 154 | static phys_addr_t __init_memblock |
| 155 | __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 156 | phys_addr_t size, phys_addr_t align, int nid, |
| 157 | ulong flags) |
Tang Chen | 1402899 | 2013-11-12 15:07:57 -0800 | [diff] [blame] | 158 | { |
| 159 | phys_addr_t this_start, this_end, cand; |
| 160 | u64 i; |
| 161 | |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 162 | for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end, |
| 163 | NULL) { |
Tang Chen | 1402899 | 2013-11-12 15:07:57 -0800 | [diff] [blame] | 164 | this_start = clamp(this_start, start, end); |
| 165 | this_end = clamp(this_end, start, end); |
| 166 | |
| 167 | if (this_end < size) |
| 168 | continue; |
| 169 | |
| 170 | cand = round_down(this_end - size, align); |
| 171 | if (cand >= this_start) |
| 172 | return cand; |
| 173 | } |
| 174 | |
| 175 | return 0; |
| 176 | } |
| 177 | |
| 178 | /** |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 179 | * memblock_find_in_range_node - find free area in given range and node |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 180 | * @size: size of free area to find |
| 181 | * @align: alignment of free area to find |
Grygorii Strashko | 87029ee | 2014-01-21 15:50:14 -0800 | [diff] [blame] | 182 | * @start: start of candidate range |
| 183 | * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} |
Grygorii Strashko | b115423 | 2014-01-21 15:50:16 -0800 | [diff] [blame] | 184 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 185 | * @flags: pick from blocks based on memory attributes |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 186 | * |
| 187 | * Find @size free area aligned to @align in the specified range and node. |
| 188 | * |
Tang Chen | 79442ed | 2013-11-12 15:07:59 -0800 | [diff] [blame] | 189 | * When allocation direction is bottom-up, the @start should be greater |
| 190 | * than the end of the kernel image. Otherwise, it will be trimmed. The |
| 191 | * reason is that we want the bottom-up allocation just near the kernel |
| 192 | * image so it is highly likely that the allocated memory and the kernel |
| 193 | * will reside in the same node. |
| 194 | * |
| 195 | * If bottom-up allocation failed, will try to allocate memory top-down. |
| 196 | * |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 197 | * RETURNS: |
Tang Chen | 79442ed | 2013-11-12 15:07:59 -0800 | [diff] [blame] | 198 | * Found address on success, 0 on failure. |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 199 | */ |
Grygorii Strashko | 87029ee | 2014-01-21 15:50:14 -0800 | [diff] [blame] | 200 | phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, |
| 201 | phys_addr_t align, phys_addr_t start, |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 202 | phys_addr_t end, int nid, ulong flags) |
Tang Chen | f7210e6 | 2013-02-22 16:33:51 -0800 | [diff] [blame] | 203 | { |
Tang Chen | 0cfb8f0 | 2014-08-29 15:18:31 -0700 | [diff] [blame] | 204 | phys_addr_t kernel_end, ret; |
Tang Chen | 79442ed | 2013-11-12 15:07:59 -0800 | [diff] [blame] | 205 | |
Tang Chen | f7210e6 | 2013-02-22 16:33:51 -0800 | [diff] [blame] | 206 | /* pump up @end */ |
| 207 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE) |
| 208 | end = memblock.current_limit; |
| 209 | |
| 210 | /* avoid allocating the first page */ |
| 211 | start = max_t(phys_addr_t, start, PAGE_SIZE); |
| 212 | end = max(start, end); |
Tang Chen | 79442ed | 2013-11-12 15:07:59 -0800 | [diff] [blame] | 213 | kernel_end = __pa_symbol(_end); |
| 214 | |
| 215 | /* |
| 216 | * try bottom-up allocation only when bottom-up mode |
| 217 | * is set and @end is above the kernel image. |
| 218 | */ |
| 219 | if (memblock_bottom_up() && end > kernel_end) { |
| 220 | phys_addr_t bottom_up_start; |
| 221 | |
| 222 | /* make sure we will allocate above the kernel */ |
| 223 | bottom_up_start = max(start, kernel_end); |
| 224 | |
| 225 | /* ok, try bottom-up allocation first */ |
| 226 | ret = __memblock_find_range_bottom_up(bottom_up_start, end, |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 227 | size, align, nid, flags); |
Tang Chen | 79442ed | 2013-11-12 15:07:59 -0800 | [diff] [blame] | 228 | if (ret) |
| 229 | return ret; |
| 230 | |
| 231 | /* |
| 232 | * we always limit bottom-up allocation above the kernel, |
| 233 | * but top-down allocation doesn't have the limit, so |
| 234 | * retrying top-down allocation may succeed when bottom-up |
| 235 | * allocation failed. |
| 236 | * |
| 237 | * bottom-up allocation is expected to be fail very rarely, |
| 238 | * so we use WARN_ONCE() here to see the stack trace if |
| 239 | * fail happens. |
| 240 | */ |
Joe Perches | 756a025f0 | 2016-03-17 14:19:47 -0700 | [diff] [blame] | 241 | WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n"); |
Tang Chen | 79442ed | 2013-11-12 15:07:59 -0800 | [diff] [blame] | 242 | } |
Tang Chen | f7210e6 | 2013-02-22 16:33:51 -0800 | [diff] [blame] | 243 | |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 244 | return __memblock_find_range_top_down(start, end, size, align, nid, |
| 245 | flags); |
Tang Chen | f7210e6 | 2013-02-22 16:33:51 -0800 | [diff] [blame] | 246 | } |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 247 | |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 248 | /** |
| 249 | * memblock_find_in_range - find free area in given range |
| 250 | * @start: start of candidate range |
| 251 | * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} |
| 252 | * @size: size of free area to find |
| 253 | * @align: alignment of free area to find |
| 254 | * |
| 255 | * Find @size free area aligned to @align in the specified range. |
| 256 | * |
| 257 | * RETURNS: |
Tang Chen | 79442ed | 2013-11-12 15:07:59 -0800 | [diff] [blame] | 258 | * Found address on success, 0 on failure. |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 259 | */ |
| 260 | phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, |
| 261 | phys_addr_t end, phys_addr_t size, |
| 262 | phys_addr_t align) |
| 263 | { |
Tony Luck | a3f5baf | 2015-06-24 16:58:12 -0700 | [diff] [blame] | 264 | phys_addr_t ret; |
| 265 | ulong flags = choose_memblock_flags(); |
| 266 | |
| 267 | again: |
| 268 | ret = memblock_find_in_range_node(size, align, start, end, |
| 269 | NUMA_NO_NODE, flags); |
| 270 | |
| 271 | if (!ret && (flags & MEMBLOCK_MIRROR)) { |
| 272 | pr_warn("Could not allocate %pap bytes of mirrored memory\n", |
| 273 | &size); |
| 274 | flags &= ~MEMBLOCK_MIRROR; |
| 275 | goto again; |
| 276 | } |
| 277 | |
| 278 | return ret; |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 279 | } |
| 280 | |
Yinghai Lu | 10d0643 | 2010-07-28 15:43:02 +1000 | [diff] [blame] | 281 | static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 282 | { |
Tejun Heo | 1440c4e | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 283 | type->total_size -= type->regions[r].size; |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 284 | memmove(&type->regions[r], &type->regions[r + 1], |
| 285 | (type->cnt - (r + 1)) * sizeof(type->regions[r])); |
Benjamin Herrenschmidt | e3239ff | 2010-08-04 14:06:41 +1000 | [diff] [blame] | 286 | type->cnt--; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 287 | |
Benjamin Herrenschmidt | 8f7a660 | 2011-03-22 16:33:43 -0700 | [diff] [blame] | 288 | /* Special case for empty arrays */ |
| 289 | if (type->cnt == 0) { |
Tejun Heo | 1440c4e | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 290 | WARN_ON(type->total_size != 0); |
Benjamin Herrenschmidt | 8f7a660 | 2011-03-22 16:33:43 -0700 | [diff] [blame] | 291 | type->cnt = 1; |
| 292 | type->regions[0].base = 0; |
| 293 | type->regions[0].size = 0; |
Tang Chen | 66a2075 | 2014-01-21 15:49:20 -0800 | [diff] [blame] | 294 | type->regions[0].flags = 0; |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 295 | memblock_set_region_node(&type->regions[0], MAX_NUMNODES); |
Benjamin Herrenschmidt | 8f7a660 | 2011-03-22 16:33:43 -0700 | [diff] [blame] | 296 | } |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 297 | } |
| 298 | |
Philipp Hachtmann | 354f17e | 2014-01-23 15:53:24 -0800 | [diff] [blame] | 299 | #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK |
| 300 | |
Yinghai Lu | 29f6738 | 2012-07-11 14:02:56 -0700 | [diff] [blame] | 301 | phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info( |
| 302 | phys_addr_t *addr) |
| 303 | { |
| 304 | if (memblock.reserved.regions == memblock_reserved_init_regions) |
| 305 | return 0; |
| 306 | |
| 307 | *addr = __pa(memblock.reserved.regions); |
| 308 | |
| 309 | return PAGE_ALIGN(sizeof(struct memblock_region) * |
| 310 | memblock.reserved.max); |
| 311 | } |
| 312 | |
Philipp Hachtmann | 5e270e2 | 2014-01-23 15:53:11 -0800 | [diff] [blame] | 313 | phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info( |
| 314 | phys_addr_t *addr) |
| 315 | { |
| 316 | if (memblock.memory.regions == memblock_memory_init_regions) |
| 317 | return 0; |
| 318 | |
| 319 | *addr = __pa(memblock.memory.regions); |
| 320 | |
| 321 | return PAGE_ALIGN(sizeof(struct memblock_region) * |
| 322 | memblock.memory.max); |
| 323 | } |
| 324 | |
| 325 | #endif |
| 326 | |
Greg Pearson | 48c3b58 | 2012-06-20 12:53:05 -0700 | [diff] [blame] | 327 | /** |
| 328 | * memblock_double_array - double the size of the memblock regions array |
| 329 | * @type: memblock type of the regions array being doubled |
| 330 | * @new_area_start: starting address of memory range to avoid overlap with |
| 331 | * @new_area_size: size of memory range to avoid overlap with |
| 332 | * |
| 333 | * Double the size of the @type regions array. If memblock is being used to |
| 334 | * allocate memory for a new reserved regions array and there is a previously |
| 335 | * allocated memory range [@new_area_start,@new_area_start+@new_area_size] |
| 336 | * waiting to be reserved, ensure the memory used by the new array does |
| 337 | * not overlap. |
| 338 | * |
| 339 | * RETURNS: |
| 340 | * 0 on success, -1 on failure. |
| 341 | */ |
| 342 | static int __init_memblock memblock_double_array(struct memblock_type *type, |
| 343 | phys_addr_t new_area_start, |
| 344 | phys_addr_t new_area_size) |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 345 | { |
| 346 | struct memblock_region *new_array, *old_array; |
Yinghai Lu | 29f6738 | 2012-07-11 14:02:56 -0700 | [diff] [blame] | 347 | phys_addr_t old_alloc_size, new_alloc_size; |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 348 | phys_addr_t old_size, new_size, addr; |
| 349 | int use_slab = slab_is_available(); |
Gavin Shan | 181eb39 | 2012-05-29 15:06:50 -0700 | [diff] [blame] | 350 | int *in_slab; |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 351 | |
| 352 | /* We don't allow resizing until we know about the reserved regions |
| 353 | * of memory that aren't suitable for allocation |
| 354 | */ |
| 355 | if (!memblock_can_resize) |
| 356 | return -1; |
| 357 | |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 358 | /* Calculate new doubled size */ |
| 359 | old_size = type->max * sizeof(struct memblock_region); |
| 360 | new_size = old_size << 1; |
Yinghai Lu | 29f6738 | 2012-07-11 14:02:56 -0700 | [diff] [blame] | 361 | /* |
| 362 | * We need to allocated new one align to PAGE_SIZE, |
| 363 | * so we can free them completely later. |
| 364 | */ |
| 365 | old_alloc_size = PAGE_ALIGN(old_size); |
| 366 | new_alloc_size = PAGE_ALIGN(new_size); |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 367 | |
Gavin Shan | 181eb39 | 2012-05-29 15:06:50 -0700 | [diff] [blame] | 368 | /* Retrieve the slab flag */ |
| 369 | if (type == &memblock.memory) |
| 370 | in_slab = &memblock_memory_in_slab; |
| 371 | else |
| 372 | in_slab = &memblock_reserved_in_slab; |
| 373 | |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 374 | /* Try to find some space for it. |
| 375 | * |
| 376 | * WARNING: We assume that either slab_is_available() and we use it or |
Andrew Morton | fd07383 | 2012-07-31 16:42:40 -0700 | [diff] [blame] | 377 | * we use MEMBLOCK for allocations. That means that this is unsafe to |
| 378 | * use when bootmem is currently active (unless bootmem itself is |
| 379 | * implemented on top of MEMBLOCK which isn't the case yet) |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 380 | * |
| 381 | * This should however not be an issue for now, as we currently only |
Andrew Morton | fd07383 | 2012-07-31 16:42:40 -0700 | [diff] [blame] | 382 | * call into MEMBLOCK while it's still active, or much later when slab |
| 383 | * is active for memory hotplug operations |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 384 | */ |
| 385 | if (use_slab) { |
| 386 | new_array = kmalloc(new_size, GFP_KERNEL); |
Tejun Heo | 1f5026a | 2011-07-12 09:58:09 +0200 | [diff] [blame] | 387 | addr = new_array ? __pa(new_array) : 0; |
Gavin Shan | 4e2f077 | 2012-05-29 15:06:50 -0700 | [diff] [blame] | 388 | } else { |
Greg Pearson | 48c3b58 | 2012-06-20 12:53:05 -0700 | [diff] [blame] | 389 | /* only exclude range when trying to double reserved.regions */ |
| 390 | if (type != &memblock.reserved) |
| 391 | new_area_start = new_area_size = 0; |
| 392 | |
| 393 | addr = memblock_find_in_range(new_area_start + new_area_size, |
| 394 | memblock.current_limit, |
Yinghai Lu | 29f6738 | 2012-07-11 14:02:56 -0700 | [diff] [blame] | 395 | new_alloc_size, PAGE_SIZE); |
Greg Pearson | 48c3b58 | 2012-06-20 12:53:05 -0700 | [diff] [blame] | 396 | if (!addr && new_area_size) |
| 397 | addr = memblock_find_in_range(0, |
Andrew Morton | fd07383 | 2012-07-31 16:42:40 -0700 | [diff] [blame] | 398 | min(new_area_start, memblock.current_limit), |
| 399 | new_alloc_size, PAGE_SIZE); |
Greg Pearson | 48c3b58 | 2012-06-20 12:53:05 -0700 | [diff] [blame] | 400 | |
Sachin Kamat | 1567486 | 2012-09-04 13:55:05 +0530 | [diff] [blame] | 401 | new_array = addr ? __va(addr) : NULL; |
Gavin Shan | 4e2f077 | 2012-05-29 15:06:50 -0700 | [diff] [blame] | 402 | } |
Tejun Heo | 1f5026a | 2011-07-12 09:58:09 +0200 | [diff] [blame] | 403 | if (!addr) { |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 404 | pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", |
| 405 | memblock_type_name(type), type->max, type->max * 2); |
| 406 | return -1; |
| 407 | } |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 408 | |
Andrew Morton | fd07383 | 2012-07-31 16:42:40 -0700 | [diff] [blame] | 409 | memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]", |
| 410 | memblock_type_name(type), type->max * 2, (u64)addr, |
| 411 | (u64)addr + new_size - 1); |
Yinghai Lu | ea9e437 | 2010-07-28 15:13:22 +1000 | [diff] [blame] | 412 | |
Andrew Morton | fd07383 | 2012-07-31 16:42:40 -0700 | [diff] [blame] | 413 | /* |
| 414 | * Found space, we now need to move the array over before we add the |
| 415 | * reserved region since it may be our reserved array itself that is |
| 416 | * full. |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 417 | */ |
| 418 | memcpy(new_array, type->regions, old_size); |
| 419 | memset(new_array + type->max, 0, old_size); |
| 420 | old_array = type->regions; |
| 421 | type->regions = new_array; |
| 422 | type->max <<= 1; |
| 423 | |
Andrew Morton | fd07383 | 2012-07-31 16:42:40 -0700 | [diff] [blame] | 424 | /* Free old array. We needn't free it if the array is the static one */ |
Gavin Shan | 181eb39 | 2012-05-29 15:06:50 -0700 | [diff] [blame] | 425 | if (*in_slab) |
| 426 | kfree(old_array); |
| 427 | else if (old_array != memblock_memory_init_regions && |
| 428 | old_array != memblock_reserved_init_regions) |
Yinghai Lu | 29f6738 | 2012-07-11 14:02:56 -0700 | [diff] [blame] | 429 | memblock_free(__pa(old_array), old_alloc_size); |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 430 | |
Andrew Morton | fd07383 | 2012-07-31 16:42:40 -0700 | [diff] [blame] | 431 | /* |
| 432 | * Reserve the new array if that comes from the memblock. Otherwise, we |
| 433 | * needn't do it |
Gavin Shan | 181eb39 | 2012-05-29 15:06:50 -0700 | [diff] [blame] | 434 | */ |
| 435 | if (!use_slab) |
Yinghai Lu | 29f6738 | 2012-07-11 14:02:56 -0700 | [diff] [blame] | 436 | BUG_ON(memblock_reserve(addr, new_alloc_size)); |
Gavin Shan | 181eb39 | 2012-05-29 15:06:50 -0700 | [diff] [blame] | 437 | |
| 438 | /* Update slab flag */ |
| 439 | *in_slab = use_slab; |
| 440 | |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 441 | return 0; |
| 442 | } |
| 443 | |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 444 | /** |
| 445 | * memblock_merge_regions - merge neighboring compatible regions |
| 446 | * @type: memblock type to scan |
| 447 | * |
| 448 | * Scan @type and merge neighboring compatible regions. |
| 449 | */ |
| 450 | static void __init_memblock memblock_merge_regions(struct memblock_type *type) |
| 451 | { |
| 452 | int i = 0; |
| 453 | |
| 454 | /* cnt never goes below 1 */ |
| 455 | while (i < type->cnt - 1) { |
| 456 | struct memblock_region *this = &type->regions[i]; |
| 457 | struct memblock_region *next = &type->regions[i + 1]; |
| 458 | |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 459 | if (this->base + this->size != next->base || |
| 460 | memblock_get_region_node(this) != |
Tang Chen | 66a2075 | 2014-01-21 15:49:20 -0800 | [diff] [blame] | 461 | memblock_get_region_node(next) || |
| 462 | this->flags != next->flags) { |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 463 | BUG_ON(this->base + this->size > next->base); |
| 464 | i++; |
| 465 | continue; |
| 466 | } |
| 467 | |
| 468 | this->size += next->size; |
Lin Feng | c0232ae | 2013-01-11 14:31:44 -0800 | [diff] [blame] | 469 | /* move forward from next + 1, index of which is i + 2 */ |
| 470 | memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next)); |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 471 | type->cnt--; |
| 472 | } |
| 473 | } |
| 474 | |
| 475 | /** |
| 476 | * memblock_insert_region - insert new memblock region |
Tang Chen | 209ff86 | 2013-04-29 15:08:41 -0700 | [diff] [blame] | 477 | * @type: memblock type to insert into |
| 478 | * @idx: index for the insertion point |
| 479 | * @base: base address of the new region |
| 480 | * @size: size of the new region |
| 481 | * @nid: node id of the new region |
Tang Chen | 66a2075 | 2014-01-21 15:49:20 -0800 | [diff] [blame] | 482 | * @flags: flags of the new region |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 483 | * |
| 484 | * Insert new memblock region [@base,@base+@size) into @type at @idx. |
| 485 | * @type must already have extra room to accomodate the new region. |
| 486 | */ |
| 487 | static void __init_memblock memblock_insert_region(struct memblock_type *type, |
| 488 | int idx, phys_addr_t base, |
Tang Chen | 66a2075 | 2014-01-21 15:49:20 -0800 | [diff] [blame] | 489 | phys_addr_t size, |
| 490 | int nid, unsigned long flags) |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 491 | { |
| 492 | struct memblock_region *rgn = &type->regions[idx]; |
| 493 | |
| 494 | BUG_ON(type->cnt >= type->max); |
| 495 | memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); |
| 496 | rgn->base = base; |
| 497 | rgn->size = size; |
Tang Chen | 66a2075 | 2014-01-21 15:49:20 -0800 | [diff] [blame] | 498 | rgn->flags = flags; |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 499 | memblock_set_region_node(rgn, nid); |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 500 | type->cnt++; |
Tejun Heo | 1440c4e | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 501 | type->total_size += size; |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 502 | } |
| 503 | |
| 504 | /** |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 505 | * memblock_add_range - add new memblock region |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 506 | * @type: memblock type to add new region into |
| 507 | * @base: base address of the new region |
| 508 | * @size: size of the new region |
Tejun Heo | 7fb0bc3 | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 509 | * @nid: nid of the new region |
Tang Chen | 66a2075 | 2014-01-21 15:49:20 -0800 | [diff] [blame] | 510 | * @flags: flags of the new region |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 511 | * |
| 512 | * Add new memblock region [@base,@base+@size) into @type. The new region |
| 513 | * is allowed to overlap with existing ones - overlaps don't affect already |
| 514 | * existing regions. @type is guaranteed to be minimal (all neighbouring |
| 515 | * compatible regions are merged) after the addition. |
| 516 | * |
| 517 | * RETURNS: |
| 518 | * 0 on success, -errno on failure. |
| 519 | */ |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 520 | int __init_memblock memblock_add_range(struct memblock_type *type, |
Tang Chen | 66a2075 | 2014-01-21 15:49:20 -0800 | [diff] [blame] | 521 | phys_addr_t base, phys_addr_t size, |
| 522 | int nid, unsigned long flags) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 523 | { |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 524 | bool insert = false; |
Tejun Heo | eb18f1b | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 525 | phys_addr_t obase = base; |
| 526 | phys_addr_t end = base + memblock_cap_size(base, &size); |
Alexander Kuleshov | 8c9c170 | 2016-01-14 15:20:42 -0800 | [diff] [blame] | 527 | int idx, nr_new; |
| 528 | struct memblock_region *rgn; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 529 | |
Tejun Heo | b3dc627 | 2012-04-20 08:31:34 -0700 | [diff] [blame] | 530 | if (!size) |
| 531 | return 0; |
| 532 | |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 533 | /* special case for empty array */ |
| 534 | if (type->regions[0].size == 0) { |
Tejun Heo | 1440c4e | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 535 | WARN_ON(type->cnt != 1 || type->total_size); |
Benjamin Herrenschmidt | e3239ff | 2010-08-04 14:06:41 +1000 | [diff] [blame] | 536 | type->regions[0].base = base; |
| 537 | type->regions[0].size = size; |
Tang Chen | 66a2075 | 2014-01-21 15:49:20 -0800 | [diff] [blame] | 538 | type->regions[0].flags = flags; |
Tejun Heo | 7fb0bc3 | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 539 | memblock_set_region_node(&type->regions[0], nid); |
Tejun Heo | 1440c4e | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 540 | type->total_size = size; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 541 | return 0; |
| 542 | } |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 543 | repeat: |
| 544 | /* |
| 545 | * The following is executed twice. Once with %false @insert and |
| 546 | * then with %true. The first counts the number of regions needed |
| 547 | * to accomodate the new area. The second actually inserts them. |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 548 | */ |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 549 | base = obase; |
| 550 | nr_new = 0; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 551 | |
Alexander Kuleshov | 8c9c170 | 2016-01-14 15:20:42 -0800 | [diff] [blame] | 552 | for_each_memblock_type(type, rgn) { |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 553 | phys_addr_t rbase = rgn->base; |
| 554 | phys_addr_t rend = rbase + rgn->size; |
| 555 | |
| 556 | if (rbase >= end) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 557 | break; |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 558 | if (rend <= base) |
| 559 | continue; |
| 560 | /* |
| 561 | * @rgn overlaps. If it separates the lower part of new |
| 562 | * area, insert that portion. |
| 563 | */ |
| 564 | if (rbase > base) { |
Wei Yang | c0a2949 | 2015-09-04 15:47:38 -0700 | [diff] [blame] | 565 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
| 566 | WARN_ON(nid != memblock_get_region_node(rgn)); |
| 567 | #endif |
Wei Yang | 4fcab5f | 2015-09-08 14:59:53 -0700 | [diff] [blame] | 568 | WARN_ON(flags != rgn->flags); |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 569 | nr_new++; |
| 570 | if (insert) |
Alexander Kuleshov | 8c9c170 | 2016-01-14 15:20:42 -0800 | [diff] [blame] | 571 | memblock_insert_region(type, idx++, base, |
Tang Chen | 66a2075 | 2014-01-21 15:49:20 -0800 | [diff] [blame] | 572 | rbase - base, nid, |
| 573 | flags); |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 574 | } |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 575 | /* area below @rend is dealt with, forget about it */ |
| 576 | base = min(rend, end); |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 577 | } |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 578 | |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 579 | /* insert the remaining portion */ |
| 580 | if (base < end) { |
| 581 | nr_new++; |
| 582 | if (insert) |
Alexander Kuleshov | 8c9c170 | 2016-01-14 15:20:42 -0800 | [diff] [blame] | 583 | memblock_insert_region(type, idx, base, end - base, |
Tang Chen | 66a2075 | 2014-01-21 15:49:20 -0800 | [diff] [blame] | 584 | nid, flags); |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 585 | } |
| 586 | |
nimisolo | ef3cc4d | 2016-07-26 15:24:56 -0700 | [diff] [blame] | 587 | if (!nr_new) |
| 588 | return 0; |
| 589 | |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 590 | /* |
| 591 | * If this was the first round, resize array and repeat for actual |
| 592 | * insertions; otherwise, merge and return. |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 593 | */ |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 594 | if (!insert) { |
| 595 | while (type->cnt + nr_new > type->max) |
Greg Pearson | 48c3b58 | 2012-06-20 12:53:05 -0700 | [diff] [blame] | 596 | if (memblock_double_array(type, obase, size) < 0) |
Tejun Heo | 784656f9 | 2011-07-12 11:15:55 +0200 | [diff] [blame] | 597 | return -ENOMEM; |
| 598 | insert = true; |
| 599 | goto repeat; |
| 600 | } else { |
| 601 | memblock_merge_regions(type); |
| 602 | return 0; |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 603 | } |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 604 | } |
| 605 | |
Tejun Heo | 7fb0bc3 | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 606 | int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, |
| 607 | int nid) |
| 608 | { |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 609 | return memblock_add_range(&memblock.memory, base, size, nid, 0); |
Tejun Heo | 7fb0bc3 | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 610 | } |
| 611 | |
Alexander Kuleshov | f705ac4 | 2016-05-20 16:57:35 -0700 | [diff] [blame] | 612 | int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) |
Alexander Kuleshov | 6a4055b | 2015-04-15 16:14:44 -0700 | [diff] [blame] | 613 | { |
Alexander Kuleshov | 6a4055b | 2015-04-15 16:14:44 -0700 | [diff] [blame] | 614 | memblock_dbg("memblock_add: [%#016llx-%#016llx] flags %#02lx %pF\n", |
| 615 | (unsigned long long)base, |
| 616 | (unsigned long long)base + size - 1, |
Alexander Kuleshov | f705ac4 | 2016-05-20 16:57:35 -0700 | [diff] [blame] | 617 | 0UL, (void *)_RET_IP_); |
Alexander Kuleshov | 6a4055b | 2015-04-15 16:14:44 -0700 | [diff] [blame] | 618 | |
Alexander Kuleshov | f705ac4 | 2016-05-20 16:57:35 -0700 | [diff] [blame] | 619 | return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0); |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 620 | } |
| 621 | |
Tejun Heo | 6a9ceb3 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 622 | /** |
| 623 | * memblock_isolate_range - isolate given range into disjoint memblocks |
| 624 | * @type: memblock type to isolate range for |
| 625 | * @base: base of range to isolate |
| 626 | * @size: size of range to isolate |
| 627 | * @start_rgn: out parameter for the start of isolated region |
| 628 | * @end_rgn: out parameter for the end of isolated region |
| 629 | * |
| 630 | * Walk @type and ensure that regions don't cross the boundaries defined by |
| 631 | * [@base,@base+@size). Crossing regions are split at the boundaries, |
| 632 | * which may create at most two more regions. The index of the first |
| 633 | * region inside the range is returned in *@start_rgn and end in *@end_rgn. |
| 634 | * |
| 635 | * RETURNS: |
| 636 | * 0 on success, -errno on failure. |
| 637 | */ |
| 638 | static int __init_memblock memblock_isolate_range(struct memblock_type *type, |
| 639 | phys_addr_t base, phys_addr_t size, |
| 640 | int *start_rgn, int *end_rgn) |
| 641 | { |
Tejun Heo | eb18f1b | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 642 | phys_addr_t end = base + memblock_cap_size(base, &size); |
Alexander Kuleshov | 8c9c170 | 2016-01-14 15:20:42 -0800 | [diff] [blame] | 643 | int idx; |
| 644 | struct memblock_region *rgn; |
Tejun Heo | 6a9ceb3 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 645 | |
| 646 | *start_rgn = *end_rgn = 0; |
| 647 | |
Tejun Heo | b3dc627 | 2012-04-20 08:31:34 -0700 | [diff] [blame] | 648 | if (!size) |
| 649 | return 0; |
| 650 | |
Tejun Heo | 6a9ceb3 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 651 | /* we'll create at most two more regions */ |
| 652 | while (type->cnt + 2 > type->max) |
Greg Pearson | 48c3b58 | 2012-06-20 12:53:05 -0700 | [diff] [blame] | 653 | if (memblock_double_array(type, base, size) < 0) |
Tejun Heo | 6a9ceb3 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 654 | return -ENOMEM; |
| 655 | |
Alexander Kuleshov | 8c9c170 | 2016-01-14 15:20:42 -0800 | [diff] [blame] | 656 | for_each_memblock_type(type, rgn) { |
Tejun Heo | 6a9ceb3 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 657 | phys_addr_t rbase = rgn->base; |
| 658 | phys_addr_t rend = rbase + rgn->size; |
| 659 | |
| 660 | if (rbase >= end) |
| 661 | break; |
| 662 | if (rend <= base) |
| 663 | continue; |
| 664 | |
| 665 | if (rbase < base) { |
| 666 | /* |
| 667 | * @rgn intersects from below. Split and continue |
| 668 | * to process the next region - the new top half. |
| 669 | */ |
| 670 | rgn->base = base; |
Tejun Heo | 1440c4e | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 671 | rgn->size -= base - rbase; |
| 672 | type->total_size -= base - rbase; |
Alexander Kuleshov | 8c9c170 | 2016-01-14 15:20:42 -0800 | [diff] [blame] | 673 | memblock_insert_region(type, idx, rbase, base - rbase, |
Tang Chen | 66a2075 | 2014-01-21 15:49:20 -0800 | [diff] [blame] | 674 | memblock_get_region_node(rgn), |
| 675 | rgn->flags); |
Tejun Heo | 6a9ceb3 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 676 | } else if (rend > end) { |
| 677 | /* |
| 678 | * @rgn intersects from above. Split and redo the |
| 679 | * current region - the new bottom half. |
| 680 | */ |
| 681 | rgn->base = end; |
Tejun Heo | 1440c4e | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 682 | rgn->size -= end - rbase; |
| 683 | type->total_size -= end - rbase; |
Alexander Kuleshov | 8c9c170 | 2016-01-14 15:20:42 -0800 | [diff] [blame] | 684 | memblock_insert_region(type, idx--, rbase, end - rbase, |
Tang Chen | 66a2075 | 2014-01-21 15:49:20 -0800 | [diff] [blame] | 685 | memblock_get_region_node(rgn), |
| 686 | rgn->flags); |
Tejun Heo | 6a9ceb3 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 687 | } else { |
| 688 | /* @rgn is fully contained, record it */ |
| 689 | if (!*end_rgn) |
Alexander Kuleshov | 8c9c170 | 2016-01-14 15:20:42 -0800 | [diff] [blame] | 690 | *start_rgn = idx; |
| 691 | *end_rgn = idx + 1; |
Tejun Heo | 6a9ceb3 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 692 | } |
| 693 | } |
| 694 | |
| 695 | return 0; |
| 696 | } |
Tejun Heo | 6a9ceb3 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 697 | |
Alexander Kuleshov | 35bd16a | 2015-11-05 18:47:00 -0800 | [diff] [blame] | 698 | static int __init_memblock memblock_remove_range(struct memblock_type *type, |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 699 | phys_addr_t base, phys_addr_t size) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 700 | { |
Tejun Heo | 7193618 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 701 | int start_rgn, end_rgn; |
| 702 | int i, ret; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 703 | |
Tejun Heo | 7193618 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 704 | ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); |
| 705 | if (ret) |
| 706 | return ret; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 707 | |
Tejun Heo | 7193618 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 708 | for (i = end_rgn - 1; i >= start_rgn; i--) |
| 709 | memblock_remove_region(type, i); |
Benjamin Herrenschmidt | 8f7a660 | 2011-03-22 16:33:43 -0700 | [diff] [blame] | 710 | return 0; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 711 | } |
| 712 | |
Tejun Heo | 581adcb | 2011-12-08 10:22:06 -0800 | [diff] [blame] | 713 | int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 714 | { |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 715 | return memblock_remove_range(&memblock.memory, base, size); |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 716 | } |
| 717 | |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 718 | |
Tejun Heo | 581adcb | 2011-12-08 10:22:06 -0800 | [diff] [blame] | 719 | int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 720 | { |
Tejun Heo | 24aa078 | 2011-07-12 11:16:06 +0200 | [diff] [blame] | 721 | memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n", |
H. Peter Anvin | a150439 | 2011-07-14 11:57:10 -0700 | [diff] [blame] | 722 | (unsigned long long)base, |
Grygorii Strashko | 931d13f | 2014-01-21 15:49:17 -0800 | [diff] [blame] | 723 | (unsigned long long)base + size - 1, |
H. Peter Anvin | a150439 | 2011-07-14 11:57:10 -0700 | [diff] [blame] | 724 | (void *)_RET_IP_); |
Tejun Heo | 24aa078 | 2011-07-12 11:16:06 +0200 | [diff] [blame] | 725 | |
Catalin Marinas | aedf95e | 2014-06-06 14:38:20 -0700 | [diff] [blame] | 726 | kmemleak_free_part(__va(base), size); |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 727 | return memblock_remove_range(&memblock.reserved, base, size); |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 728 | } |
| 729 | |
Alexander Kuleshov | f705ac4 | 2016-05-20 16:57:35 -0700 | [diff] [blame] | 730 | int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 731 | { |
Tang Chen | 66a2075 | 2014-01-21 15:49:20 -0800 | [diff] [blame] | 732 | memblock_dbg("memblock_reserve: [%#016llx-%#016llx] flags %#02lx %pF\n", |
H. Peter Anvin | a150439 | 2011-07-14 11:57:10 -0700 | [diff] [blame] | 733 | (unsigned long long)base, |
Grygorii Strashko | 931d13f | 2014-01-21 15:49:17 -0800 | [diff] [blame] | 734 | (unsigned long long)base + size - 1, |
Alexander Kuleshov | f705ac4 | 2016-05-20 16:57:35 -0700 | [diff] [blame] | 735 | 0UL, (void *)_RET_IP_); |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 736 | |
Alexander Kuleshov | f705ac4 | 2016-05-20 16:57:35 -0700 | [diff] [blame] | 737 | return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0); |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 738 | } |
| 739 | |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 740 | /** |
Tang Chen | 66b16ed | 2014-01-21 15:49:23 -0800 | [diff] [blame] | 741 | * |
Tony Luck | 4308ce1 | 2014-12-12 16:54:59 -0800 | [diff] [blame] | 742 | * This function isolates region [@base, @base + @size), and sets/clears flag |
Tang Chen | 66b16ed | 2014-01-21 15:49:23 -0800 | [diff] [blame] | 743 | * |
Alexander Kuleshov | c115393 | 2015-09-08 15:04:02 -0700 | [diff] [blame] | 744 | * Return 0 on success, -errno on failure. |
Tang Chen | 66b16ed | 2014-01-21 15:49:23 -0800 | [diff] [blame] | 745 | */ |
Tony Luck | 4308ce1 | 2014-12-12 16:54:59 -0800 | [diff] [blame] | 746 | static int __init_memblock memblock_setclr_flag(phys_addr_t base, |
| 747 | phys_addr_t size, int set, int flag) |
Tang Chen | 66b16ed | 2014-01-21 15:49:23 -0800 | [diff] [blame] | 748 | { |
| 749 | struct memblock_type *type = &memblock.memory; |
| 750 | int i, ret, start_rgn, end_rgn; |
| 751 | |
| 752 | ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); |
| 753 | if (ret) |
| 754 | return ret; |
| 755 | |
| 756 | for (i = start_rgn; i < end_rgn; i++) |
Tony Luck | 4308ce1 | 2014-12-12 16:54:59 -0800 | [diff] [blame] | 757 | if (set) |
| 758 | memblock_set_region_flags(&type->regions[i], flag); |
| 759 | else |
| 760 | memblock_clear_region_flags(&type->regions[i], flag); |
Tang Chen | 66b16ed | 2014-01-21 15:49:23 -0800 | [diff] [blame] | 761 | |
| 762 | memblock_merge_regions(type); |
| 763 | return 0; |
| 764 | } |
| 765 | |
| 766 | /** |
Tony Luck | 4308ce1 | 2014-12-12 16:54:59 -0800 | [diff] [blame] | 767 | * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG. |
| 768 | * @base: the base phys addr of the region |
| 769 | * @size: the size of the region |
| 770 | * |
Alexander Kuleshov | c115393 | 2015-09-08 15:04:02 -0700 | [diff] [blame] | 771 | * Return 0 on success, -errno on failure. |
Tony Luck | 4308ce1 | 2014-12-12 16:54:59 -0800 | [diff] [blame] | 772 | */ |
| 773 | int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) |
| 774 | { |
| 775 | return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG); |
| 776 | } |
| 777 | |
| 778 | /** |
Tang Chen | 66b16ed | 2014-01-21 15:49:23 -0800 | [diff] [blame] | 779 | * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region. |
| 780 | * @base: the base phys addr of the region |
| 781 | * @size: the size of the region |
| 782 | * |
Alexander Kuleshov | c115393 | 2015-09-08 15:04:02 -0700 | [diff] [blame] | 783 | * Return 0 on success, -errno on failure. |
Tang Chen | 66b16ed | 2014-01-21 15:49:23 -0800 | [diff] [blame] | 784 | */ |
| 785 | int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) |
| 786 | { |
Tony Luck | 4308ce1 | 2014-12-12 16:54:59 -0800 | [diff] [blame] | 787 | return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG); |
Tang Chen | 66b16ed | 2014-01-21 15:49:23 -0800 | [diff] [blame] | 788 | } |
| 789 | |
| 790 | /** |
Tony Luck | a3f5baf | 2015-06-24 16:58:12 -0700 | [diff] [blame] | 791 | * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR. |
| 792 | * @base: the base phys addr of the region |
| 793 | * @size: the size of the region |
| 794 | * |
Alexander Kuleshov | c115393 | 2015-09-08 15:04:02 -0700 | [diff] [blame] | 795 | * Return 0 on success, -errno on failure. |
Tony Luck | a3f5baf | 2015-06-24 16:58:12 -0700 | [diff] [blame] | 796 | */ |
| 797 | int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) |
| 798 | { |
| 799 | system_has_some_mirror = true; |
| 800 | |
| 801 | return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR); |
| 802 | } |
| 803 | |
Ard Biesheuvel | bf3d3cc | 2015-11-30 13:28:15 +0100 | [diff] [blame] | 804 | /** |
| 805 | * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP. |
| 806 | * @base: the base phys addr of the region |
| 807 | * @size: the size of the region |
| 808 | * |
| 809 | * Return 0 on success, -errno on failure. |
| 810 | */ |
| 811 | int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size) |
| 812 | { |
| 813 | return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP); |
| 814 | } |
Tony Luck | a3f5baf | 2015-06-24 16:58:12 -0700 | [diff] [blame] | 815 | |
| 816 | /** |
Robin Holt | 8e7a7f8 | 2015-06-30 14:56:41 -0700 | [diff] [blame] | 817 | * __next_reserved_mem_region - next function for for_each_reserved_region() |
| 818 | * @idx: pointer to u64 loop variable |
| 819 | * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL |
| 820 | * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL |
| 821 | * |
| 822 | * Iterate over all reserved memory regions. |
| 823 | */ |
| 824 | void __init_memblock __next_reserved_mem_region(u64 *idx, |
| 825 | phys_addr_t *out_start, |
| 826 | phys_addr_t *out_end) |
| 827 | { |
Alexander Kuleshov | 567d117 | 2015-09-08 15:03:33 -0700 | [diff] [blame] | 828 | struct memblock_type *type = &memblock.reserved; |
Robin Holt | 8e7a7f8 | 2015-06-30 14:56:41 -0700 | [diff] [blame] | 829 | |
Richard Leitner | cd33a76 | 2016-05-20 16:58:33 -0700 | [diff] [blame] | 830 | if (*idx < type->cnt) { |
Alexander Kuleshov | 567d117 | 2015-09-08 15:03:33 -0700 | [diff] [blame] | 831 | struct memblock_region *r = &type->regions[*idx]; |
Robin Holt | 8e7a7f8 | 2015-06-30 14:56:41 -0700 | [diff] [blame] | 832 | phys_addr_t base = r->base; |
| 833 | phys_addr_t size = r->size; |
| 834 | |
| 835 | if (out_start) |
| 836 | *out_start = base; |
| 837 | if (out_end) |
| 838 | *out_end = base + size - 1; |
| 839 | |
| 840 | *idx += 1; |
| 841 | return; |
| 842 | } |
| 843 | |
| 844 | /* signal end of iteration */ |
| 845 | *idx = ULLONG_MAX; |
| 846 | } |
| 847 | |
| 848 | /** |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 849 | * __next__mem_range - next function for for_each_free_mem_range() etc. |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 850 | * @idx: pointer to u64 loop variable |
Grygorii Strashko | b115423 | 2014-01-21 15:50:16 -0800 | [diff] [blame] | 851 | * @nid: node selector, %NUMA_NO_NODE for all nodes |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 852 | * @flags: pick from blocks based on memory attributes |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 853 | * @type_a: pointer to memblock_type from where the range is taken |
| 854 | * @type_b: pointer to memblock_type which excludes memory from being taken |
Wanpeng Li | dad7557 | 2012-06-20 12:53:01 -0700 | [diff] [blame] | 855 | * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL |
| 856 | * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL |
| 857 | * @out_nid: ptr to int for nid of the range, can be %NULL |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 858 | * |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 859 | * Find the first area from *@idx which matches @nid, fill the out |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 860 | * parameters, and update *@idx for the next iteration. The lower 32bit of |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 861 | * *@idx contains index into type_a and the upper 32bit indexes the |
| 862 | * areas before each region in type_b. For example, if type_b regions |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 863 | * look like the following, |
| 864 | * |
| 865 | * 0:[0-16), 1:[32-48), 2:[128-130) |
| 866 | * |
| 867 | * The upper 32bit indexes the following regions. |
| 868 | * |
| 869 | * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX) |
| 870 | * |
| 871 | * As both region arrays are sorted, the function advances the two indices |
| 872 | * in lockstep and returns each intersection. |
| 873 | */ |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 874 | void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags, |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 875 | struct memblock_type *type_a, |
| 876 | struct memblock_type *type_b, |
| 877 | phys_addr_t *out_start, |
| 878 | phys_addr_t *out_end, int *out_nid) |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 879 | { |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 880 | int idx_a = *idx & 0xffffffff; |
| 881 | int idx_b = *idx >> 32; |
Grygorii Strashko | b115423 | 2014-01-21 15:50:16 -0800 | [diff] [blame] | 882 | |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 883 | if (WARN_ONCE(nid == MAX_NUMNODES, |
| 884 | "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) |
Grygorii Strashko | 560dca27 | 2014-01-21 15:50:55 -0800 | [diff] [blame] | 885 | nid = NUMA_NO_NODE; |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 886 | |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 887 | for (; idx_a < type_a->cnt; idx_a++) { |
| 888 | struct memblock_region *m = &type_a->regions[idx_a]; |
| 889 | |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 890 | phys_addr_t m_start = m->base; |
| 891 | phys_addr_t m_end = m->base + m->size; |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 892 | int m_nid = memblock_get_region_node(m); |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 893 | |
| 894 | /* only memory regions are associated with nodes, check it */ |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 895 | if (nid != NUMA_NO_NODE && nid != m_nid) |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 896 | continue; |
| 897 | |
Xishi Qiu | 0a313a9 | 2014-09-09 14:50:46 -0700 | [diff] [blame] | 898 | /* skip hotpluggable memory regions if needed */ |
| 899 | if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) |
| 900 | continue; |
| 901 | |
Tony Luck | a3f5baf | 2015-06-24 16:58:12 -0700 | [diff] [blame] | 902 | /* if we want mirror memory skip non-mirror memory regions */ |
| 903 | if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) |
| 904 | continue; |
| 905 | |
Ard Biesheuvel | bf3d3cc | 2015-11-30 13:28:15 +0100 | [diff] [blame] | 906 | /* skip nomap memory unless we were asked for it explicitly */ |
| 907 | if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m)) |
| 908 | continue; |
| 909 | |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 910 | if (!type_b) { |
| 911 | if (out_start) |
| 912 | *out_start = m_start; |
| 913 | if (out_end) |
| 914 | *out_end = m_end; |
| 915 | if (out_nid) |
| 916 | *out_nid = m_nid; |
| 917 | idx_a++; |
| 918 | *idx = (u32)idx_a | (u64)idx_b << 32; |
| 919 | return; |
| 920 | } |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 921 | |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 922 | /* scan areas before each reservation */ |
| 923 | for (; idx_b < type_b->cnt + 1; idx_b++) { |
| 924 | struct memblock_region *r; |
| 925 | phys_addr_t r_start; |
| 926 | phys_addr_t r_end; |
| 927 | |
| 928 | r = &type_b->regions[idx_b]; |
| 929 | r_start = idx_b ? r[-1].base + r[-1].size : 0; |
| 930 | r_end = idx_b < type_b->cnt ? |
| 931 | r->base : ULLONG_MAX; |
| 932 | |
| 933 | /* |
| 934 | * if idx_b advanced past idx_a, |
| 935 | * break out to advance idx_a |
| 936 | */ |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 937 | if (r_start >= m_end) |
| 938 | break; |
| 939 | /* if the two regions intersect, we're done */ |
| 940 | if (m_start < r_end) { |
| 941 | if (out_start) |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 942 | *out_start = |
| 943 | max(m_start, r_start); |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 944 | if (out_end) |
| 945 | *out_end = min(m_end, r_end); |
| 946 | if (out_nid) |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 947 | *out_nid = m_nid; |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 948 | /* |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 949 | * The region which ends first is |
| 950 | * advanced for the next iteration. |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 951 | */ |
| 952 | if (m_end <= r_end) |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 953 | idx_a++; |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 954 | else |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 955 | idx_b++; |
| 956 | *idx = (u32)idx_a | (u64)idx_b << 32; |
Tejun Heo | 35fd080 | 2011-07-12 11:15:59 +0200 | [diff] [blame] | 957 | return; |
| 958 | } |
| 959 | } |
| 960 | } |
| 961 | |
| 962 | /* signal end of iteration */ |
| 963 | *idx = ULLONG_MAX; |
| 964 | } |
| 965 | |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 966 | /** |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 967 | * __next_mem_range_rev - generic next function for for_each_*_range_rev() |
| 968 | * |
| 969 | * Finds the next range from type_a which is not marked as unsuitable |
| 970 | * in type_b. |
| 971 | * |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 972 | * @idx: pointer to u64 loop variable |
Alexander Kuleshov | ad5ea8c | 2015-09-08 15:04:22 -0700 | [diff] [blame] | 973 | * @nid: node selector, %NUMA_NO_NODE for all nodes |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 974 | * @flags: pick from blocks based on memory attributes |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 975 | * @type_a: pointer to memblock_type from where the range is taken |
| 976 | * @type_b: pointer to memblock_type which excludes memory from being taken |
Wanpeng Li | dad7557 | 2012-06-20 12:53:01 -0700 | [diff] [blame] | 977 | * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL |
| 978 | * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL |
| 979 | * @out_nid: ptr to int for nid of the range, can be %NULL |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 980 | * |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 981 | * Reverse of __next_mem_range(). |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 982 | */ |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 983 | void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags, |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 984 | struct memblock_type *type_a, |
| 985 | struct memblock_type *type_b, |
| 986 | phys_addr_t *out_start, |
| 987 | phys_addr_t *out_end, int *out_nid) |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 988 | { |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 989 | int idx_a = *idx & 0xffffffff; |
| 990 | int idx_b = *idx >> 32; |
Grygorii Strashko | b115423 | 2014-01-21 15:50:16 -0800 | [diff] [blame] | 991 | |
Grygorii Strashko | 560dca27 | 2014-01-21 15:50:55 -0800 | [diff] [blame] | 992 | if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) |
| 993 | nid = NUMA_NO_NODE; |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 994 | |
| 995 | if (*idx == (u64)ULLONG_MAX) { |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 996 | idx_a = type_a->cnt - 1; |
| 997 | idx_b = type_b->cnt; |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 998 | } |
| 999 | |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1000 | for (; idx_a >= 0; idx_a--) { |
| 1001 | struct memblock_region *m = &type_a->regions[idx_a]; |
| 1002 | |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 1003 | phys_addr_t m_start = m->base; |
| 1004 | phys_addr_t m_end = m->base + m->size; |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1005 | int m_nid = memblock_get_region_node(m); |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 1006 | |
| 1007 | /* only memory regions are associated with nodes, check it */ |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1008 | if (nid != NUMA_NO_NODE && nid != m_nid) |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 1009 | continue; |
| 1010 | |
Tang Chen | 55ac590 | 2014-01-21 15:49:35 -0800 | [diff] [blame] | 1011 | /* skip hotpluggable memory regions if needed */ |
| 1012 | if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) |
| 1013 | continue; |
| 1014 | |
Tony Luck | a3f5baf | 2015-06-24 16:58:12 -0700 | [diff] [blame] | 1015 | /* if we want mirror memory skip non-mirror memory regions */ |
| 1016 | if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) |
| 1017 | continue; |
| 1018 | |
Ard Biesheuvel | bf3d3cc | 2015-11-30 13:28:15 +0100 | [diff] [blame] | 1019 | /* skip nomap memory unless we were asked for it explicitly */ |
| 1020 | if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m)) |
| 1021 | continue; |
| 1022 | |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1023 | if (!type_b) { |
| 1024 | if (out_start) |
| 1025 | *out_start = m_start; |
| 1026 | if (out_end) |
| 1027 | *out_end = m_end; |
| 1028 | if (out_nid) |
| 1029 | *out_nid = m_nid; |
zijun_hu | fb399b4 | 2016-07-28 15:48:56 -0700 | [diff] [blame] | 1030 | idx_a--; |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1031 | *idx = (u32)idx_a | (u64)idx_b << 32; |
| 1032 | return; |
| 1033 | } |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 1034 | |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1035 | /* scan areas before each reservation */ |
| 1036 | for (; idx_b >= 0; idx_b--) { |
| 1037 | struct memblock_region *r; |
| 1038 | phys_addr_t r_start; |
| 1039 | phys_addr_t r_end; |
| 1040 | |
| 1041 | r = &type_b->regions[idx_b]; |
| 1042 | r_start = idx_b ? r[-1].base + r[-1].size : 0; |
| 1043 | r_end = idx_b < type_b->cnt ? |
| 1044 | r->base : ULLONG_MAX; |
| 1045 | /* |
| 1046 | * if idx_b advanced past idx_a, |
| 1047 | * break out to advance idx_a |
| 1048 | */ |
| 1049 | |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 1050 | if (r_end <= m_start) |
| 1051 | break; |
| 1052 | /* if the two regions intersect, we're done */ |
| 1053 | if (m_end > r_start) { |
| 1054 | if (out_start) |
| 1055 | *out_start = max(m_start, r_start); |
| 1056 | if (out_end) |
| 1057 | *out_end = min(m_end, r_end); |
| 1058 | if (out_nid) |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1059 | *out_nid = m_nid; |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 1060 | if (m_start >= r_start) |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1061 | idx_a--; |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 1062 | else |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1063 | idx_b--; |
| 1064 | *idx = (u32)idx_a | (u64)idx_b << 32; |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 1065 | return; |
| 1066 | } |
| 1067 | } |
| 1068 | } |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1069 | /* signal end of iteration */ |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 1070 | *idx = ULLONG_MAX; |
| 1071 | } |
| 1072 | |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1073 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
| 1074 | /* |
| 1075 | * Common iterator interface used to define for_each_mem_range(). |
| 1076 | */ |
| 1077 | void __init_memblock __next_mem_pfn_range(int *idx, int nid, |
| 1078 | unsigned long *out_start_pfn, |
| 1079 | unsigned long *out_end_pfn, int *out_nid) |
| 1080 | { |
| 1081 | struct memblock_type *type = &memblock.memory; |
| 1082 | struct memblock_region *r; |
| 1083 | |
| 1084 | while (++*idx < type->cnt) { |
| 1085 | r = &type->regions[*idx]; |
| 1086 | |
| 1087 | if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) |
| 1088 | continue; |
| 1089 | if (nid == MAX_NUMNODES || nid == r->nid) |
| 1090 | break; |
| 1091 | } |
| 1092 | if (*idx >= type->cnt) { |
| 1093 | *idx = -1; |
| 1094 | return; |
| 1095 | } |
| 1096 | |
| 1097 | if (out_start_pfn) |
| 1098 | *out_start_pfn = PFN_UP(r->base); |
| 1099 | if (out_end_pfn) |
| 1100 | *out_end_pfn = PFN_DOWN(r->base + r->size); |
| 1101 | if (out_nid) |
| 1102 | *out_nid = r->nid; |
| 1103 | } |
| 1104 | |
| 1105 | /** |
| 1106 | * memblock_set_node - set node ID on memblock regions |
| 1107 | * @base: base of area to set node ID for |
| 1108 | * @size: size of area to set node ID for |
Tang Chen | e7e8de5 | 2014-01-21 15:49:26 -0800 | [diff] [blame] | 1109 | * @type: memblock type to set node ID for |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1110 | * @nid: node ID to set |
| 1111 | * |
Tang Chen | e7e8de5 | 2014-01-21 15:49:26 -0800 | [diff] [blame] | 1112 | * Set the nid of memblock @type regions in [@base,@base+@size) to @nid. |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1113 | * Regions which cross the area boundaries are split as necessary. |
| 1114 | * |
| 1115 | * RETURNS: |
| 1116 | * 0 on success, -errno on failure. |
| 1117 | */ |
| 1118 | int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, |
Tang Chen | e7e8de5 | 2014-01-21 15:49:26 -0800 | [diff] [blame] | 1119 | struct memblock_type *type, int nid) |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1120 | { |
Tejun Heo | 6a9ceb3 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 1121 | int start_rgn, end_rgn; |
| 1122 | int i, ret; |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1123 | |
Tejun Heo | 6a9ceb3 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 1124 | ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); |
| 1125 | if (ret) |
| 1126 | return ret; |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1127 | |
Tejun Heo | 6a9ceb3 | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 1128 | for (i = start_rgn; i < end_rgn; i++) |
Wanpeng Li | e9d24ad | 2012-10-08 16:32:21 -0700 | [diff] [blame] | 1129 | memblock_set_region_node(&type->regions[i], nid); |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1130 | |
| 1131 | memblock_merge_regions(type); |
| 1132 | return 0; |
| 1133 | } |
| 1134 | #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ |
| 1135 | |
Akinobu Mita | 2bfc286 | 2014-06-04 16:06:53 -0700 | [diff] [blame] | 1136 | static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, |
| 1137 | phys_addr_t align, phys_addr_t start, |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 1138 | phys_addr_t end, int nid, ulong flags) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1139 | { |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1140 | phys_addr_t found; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1141 | |
Grygorii Strashko | 79f40fa | 2014-01-21 15:50:12 -0800 | [diff] [blame] | 1142 | if (!align) |
| 1143 | align = SMP_CACHE_BYTES; |
Vineet Gupta | 94f3d3a | 2013-04-29 15:06:15 -0700 | [diff] [blame] | 1144 | |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 1145 | found = memblock_find_in_range_node(size, align, start, end, nid, |
| 1146 | flags); |
Catalin Marinas | aedf95e | 2014-06-06 14:38:20 -0700 | [diff] [blame] | 1147 | if (found && !memblock_reserve(found, size)) { |
| 1148 | /* |
| 1149 | * The min_count is set to 0 so that memblock allocations are |
| 1150 | * never reported as leaks. |
| 1151 | */ |
| 1152 | kmemleak_alloc(__va(found), size, 0, 0); |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1153 | return found; |
Catalin Marinas | aedf95e | 2014-06-06 14:38:20 -0700 | [diff] [blame] | 1154 | } |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1155 | return 0; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1156 | } |
| 1157 | |
Akinobu Mita | 2bfc286 | 2014-06-04 16:06:53 -0700 | [diff] [blame] | 1158 | phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 1159 | phys_addr_t start, phys_addr_t end, |
| 1160 | ulong flags) |
Akinobu Mita | 2bfc286 | 2014-06-04 16:06:53 -0700 | [diff] [blame] | 1161 | { |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 1162 | return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE, |
| 1163 | flags); |
Akinobu Mita | 2bfc286 | 2014-06-04 16:06:53 -0700 | [diff] [blame] | 1164 | } |
| 1165 | |
| 1166 | static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, |
| 1167 | phys_addr_t align, phys_addr_t max_addr, |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 1168 | int nid, ulong flags) |
Akinobu Mita | 2bfc286 | 2014-06-04 16:06:53 -0700 | [diff] [blame] | 1169 | { |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 1170 | return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags); |
Akinobu Mita | 2bfc286 | 2014-06-04 16:06:53 -0700 | [diff] [blame] | 1171 | } |
| 1172 | |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 1173 | phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) |
| 1174 | { |
Tony Luck | a3f5baf | 2015-06-24 16:58:12 -0700 | [diff] [blame] | 1175 | ulong flags = choose_memblock_flags(); |
| 1176 | phys_addr_t ret; |
| 1177 | |
| 1178 | again: |
| 1179 | ret = memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, |
| 1180 | nid, flags); |
| 1181 | |
| 1182 | if (!ret && (flags & MEMBLOCK_MIRROR)) { |
| 1183 | flags &= ~MEMBLOCK_MIRROR; |
| 1184 | goto again; |
| 1185 | } |
| 1186 | return ret; |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 1187 | } |
| 1188 | |
| 1189 | phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) |
| 1190 | { |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 1191 | return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE, |
| 1192 | MEMBLOCK_NONE); |
Tejun Heo | 7bd0b0f | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 1193 | } |
| 1194 | |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1195 | phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1196 | { |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1197 | phys_addr_t alloc; |
| 1198 | |
| 1199 | alloc = __memblock_alloc_base(size, align, max_addr); |
| 1200 | |
| 1201 | if (alloc == 0) |
| 1202 | panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", |
| 1203 | (unsigned long long) size, (unsigned long long) max_addr); |
| 1204 | |
| 1205 | return alloc; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1206 | } |
| 1207 | |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1208 | phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1209 | { |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1210 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1211 | } |
| 1212 | |
Benjamin Herrenschmidt | 9d1e249 | 2010-07-06 15:39:17 -0700 | [diff] [blame] | 1213 | phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) |
| 1214 | { |
| 1215 | phys_addr_t res = memblock_alloc_nid(size, align, nid); |
| 1216 | |
| 1217 | if (res) |
| 1218 | return res; |
Tejun Heo | 15fb097 | 2011-07-12 09:58:07 +0200 | [diff] [blame] | 1219 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1220 | } |
| 1221 | |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1222 | /** |
| 1223 | * memblock_virt_alloc_internal - allocate boot memory block |
| 1224 | * @size: size of memory block to be allocated in bytes |
| 1225 | * @align: alignment of the region and block's size |
| 1226 | * @min_addr: the lower bound of the memory region to allocate (phys address) |
| 1227 | * @max_addr: the upper bound of the memory region to allocate (phys address) |
| 1228 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
| 1229 | * |
| 1230 | * The @min_addr limit is dropped if it can not be satisfied and the allocation |
| 1231 | * will fall back to memory below @min_addr. Also, allocation may fall back |
| 1232 | * to any node in the system if the specified node can not |
| 1233 | * hold the requested memory. |
| 1234 | * |
| 1235 | * The allocation is performed from memory region limited by |
| 1236 | * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE. |
| 1237 | * |
| 1238 | * The memory block is aligned on SMP_CACHE_BYTES if @align == 0. |
| 1239 | * |
| 1240 | * The phys address of allocated boot memory block is converted to virtual and |
| 1241 | * allocated memory is reset to 0. |
| 1242 | * |
| 1243 | * In addition, function sets the min_count to 0 using kmemleak_alloc for |
| 1244 | * allocated boot memory block, so that it is never reported as leaks. |
| 1245 | * |
| 1246 | * RETURNS: |
| 1247 | * Virtual address of allocated memory block on success, NULL on failure. |
| 1248 | */ |
| 1249 | static void * __init memblock_virt_alloc_internal( |
| 1250 | phys_addr_t size, phys_addr_t align, |
| 1251 | phys_addr_t min_addr, phys_addr_t max_addr, |
| 1252 | int nid) |
| 1253 | { |
| 1254 | phys_addr_t alloc; |
| 1255 | void *ptr; |
Tony Luck | a3f5baf | 2015-06-24 16:58:12 -0700 | [diff] [blame] | 1256 | ulong flags = choose_memblock_flags(); |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1257 | |
Grygorii Strashko | 560dca27 | 2014-01-21 15:50:55 -0800 | [diff] [blame] | 1258 | if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) |
| 1259 | nid = NUMA_NO_NODE; |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1260 | |
| 1261 | /* |
| 1262 | * Detect any accidental use of these APIs after slab is ready, as at |
| 1263 | * this moment memblock may be deinitialized already and its |
| 1264 | * internal data may be destroyed (after execution of free_all_bootmem) |
| 1265 | */ |
| 1266 | if (WARN_ON_ONCE(slab_is_available())) |
| 1267 | return kzalloc_node(size, GFP_NOWAIT, nid); |
| 1268 | |
| 1269 | if (!align) |
| 1270 | align = SMP_CACHE_BYTES; |
| 1271 | |
Yinghai Lu | f544e14 | 2014-01-29 14:05:52 -0800 | [diff] [blame] | 1272 | if (max_addr > memblock.current_limit) |
| 1273 | max_addr = memblock.current_limit; |
| 1274 | |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1275 | again: |
| 1276 | alloc = memblock_find_in_range_node(size, align, min_addr, max_addr, |
Tony Luck | a3f5baf | 2015-06-24 16:58:12 -0700 | [diff] [blame] | 1277 | nid, flags); |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1278 | if (alloc) |
| 1279 | goto done; |
| 1280 | |
| 1281 | if (nid != NUMA_NO_NODE) { |
| 1282 | alloc = memblock_find_in_range_node(size, align, min_addr, |
Tony Luck | fc6daaf | 2015-06-24 16:58:09 -0700 | [diff] [blame] | 1283 | max_addr, NUMA_NO_NODE, |
Tony Luck | a3f5baf | 2015-06-24 16:58:12 -0700 | [diff] [blame] | 1284 | flags); |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1285 | if (alloc) |
| 1286 | goto done; |
| 1287 | } |
| 1288 | |
| 1289 | if (min_addr) { |
| 1290 | min_addr = 0; |
| 1291 | goto again; |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1292 | } |
| 1293 | |
Tony Luck | a3f5baf | 2015-06-24 16:58:12 -0700 | [diff] [blame] | 1294 | if (flags & MEMBLOCK_MIRROR) { |
| 1295 | flags &= ~MEMBLOCK_MIRROR; |
| 1296 | pr_warn("Could not allocate %pap bytes of mirrored memory\n", |
| 1297 | &size); |
| 1298 | goto again; |
| 1299 | } |
| 1300 | |
| 1301 | return NULL; |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1302 | done: |
| 1303 | memblock_reserve(alloc, size); |
| 1304 | ptr = phys_to_virt(alloc); |
| 1305 | memset(ptr, 0, size); |
| 1306 | |
| 1307 | /* |
| 1308 | * The min_count is set to 0 so that bootmem allocated blocks |
| 1309 | * are never reported as leaks. This is because many of these blocks |
| 1310 | * are only referred via the physical address which is not |
| 1311 | * looked up by kmemleak. |
| 1312 | */ |
| 1313 | kmemleak_alloc(ptr, size, 0, 0); |
| 1314 | |
| 1315 | return ptr; |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1316 | } |
| 1317 | |
| 1318 | /** |
| 1319 | * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block |
| 1320 | * @size: size of memory block to be allocated in bytes |
| 1321 | * @align: alignment of the region and block's size |
| 1322 | * @min_addr: the lower bound of the memory region from where the allocation |
| 1323 | * is preferred (phys address) |
| 1324 | * @max_addr: the upper bound of the memory region from where the allocation |
| 1325 | * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to |
| 1326 | * allocate only from memory limited by memblock.current_limit value |
| 1327 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
| 1328 | * |
| 1329 | * Public version of _memblock_virt_alloc_try_nid_nopanic() which provides |
| 1330 | * additional debug information (including caller info), if enabled. |
| 1331 | * |
| 1332 | * RETURNS: |
| 1333 | * Virtual address of allocated memory block on success, NULL on failure. |
| 1334 | */ |
| 1335 | void * __init memblock_virt_alloc_try_nid_nopanic( |
| 1336 | phys_addr_t size, phys_addr_t align, |
| 1337 | phys_addr_t min_addr, phys_addr_t max_addr, |
| 1338 | int nid) |
| 1339 | { |
| 1340 | memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n", |
| 1341 | __func__, (u64)size, (u64)align, nid, (u64)min_addr, |
| 1342 | (u64)max_addr, (void *)_RET_IP_); |
| 1343 | return memblock_virt_alloc_internal(size, align, min_addr, |
| 1344 | max_addr, nid); |
| 1345 | } |
| 1346 | |
| 1347 | /** |
| 1348 | * memblock_virt_alloc_try_nid - allocate boot memory block with panicking |
| 1349 | * @size: size of memory block to be allocated in bytes |
| 1350 | * @align: alignment of the region and block's size |
| 1351 | * @min_addr: the lower bound of the memory region from where the allocation |
| 1352 | * is preferred (phys address) |
| 1353 | * @max_addr: the upper bound of the memory region from where the allocation |
| 1354 | * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to |
| 1355 | * allocate only from memory limited by memblock.current_limit value |
| 1356 | * @nid: nid of the free area to find, %NUMA_NO_NODE for any node |
| 1357 | * |
| 1358 | * Public panicking version of _memblock_virt_alloc_try_nid_nopanic() |
| 1359 | * which provides debug information (including caller info), if enabled, |
| 1360 | * and panics if the request can not be satisfied. |
| 1361 | * |
| 1362 | * RETURNS: |
| 1363 | * Virtual address of allocated memory block on success, NULL on failure. |
| 1364 | */ |
| 1365 | void * __init memblock_virt_alloc_try_nid( |
| 1366 | phys_addr_t size, phys_addr_t align, |
| 1367 | phys_addr_t min_addr, phys_addr_t max_addr, |
| 1368 | int nid) |
| 1369 | { |
| 1370 | void *ptr; |
| 1371 | |
| 1372 | memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n", |
| 1373 | __func__, (u64)size, (u64)align, nid, (u64)min_addr, |
| 1374 | (u64)max_addr, (void *)_RET_IP_); |
| 1375 | ptr = memblock_virt_alloc_internal(size, align, |
| 1376 | min_addr, max_addr, nid); |
| 1377 | if (ptr) |
| 1378 | return ptr; |
| 1379 | |
| 1380 | panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n", |
| 1381 | __func__, (u64)size, (u64)align, nid, (u64)min_addr, |
| 1382 | (u64)max_addr); |
| 1383 | return NULL; |
| 1384 | } |
| 1385 | |
| 1386 | /** |
| 1387 | * __memblock_free_early - free boot memory block |
| 1388 | * @base: phys starting address of the boot memory block |
| 1389 | * @size: size of the boot memory block in bytes |
| 1390 | * |
| 1391 | * Free boot memory block previously allocated by memblock_virt_alloc_xx() API. |
| 1392 | * The freeing memory will not be released to the buddy allocator. |
| 1393 | */ |
| 1394 | void __init __memblock_free_early(phys_addr_t base, phys_addr_t size) |
| 1395 | { |
| 1396 | memblock_dbg("%s: [%#016llx-%#016llx] %pF\n", |
| 1397 | __func__, (u64)base, (u64)base + size - 1, |
| 1398 | (void *)_RET_IP_); |
| 1399 | kmemleak_free_part(__va(base), size); |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1400 | memblock_remove_range(&memblock.reserved, base, size); |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1401 | } |
| 1402 | |
| 1403 | /* |
| 1404 | * __memblock_free_late - free bootmem block pages directly to buddy allocator |
| 1405 | * @addr: phys starting address of the boot memory block |
| 1406 | * @size: size of the boot memory block in bytes |
| 1407 | * |
| 1408 | * This is only useful when the bootmem allocator has already been torn |
| 1409 | * down, but we are still initializing the system. Pages are released directly |
| 1410 | * to the buddy allocator, no bootmem metadata is updated because it is gone. |
| 1411 | */ |
| 1412 | void __init __memblock_free_late(phys_addr_t base, phys_addr_t size) |
| 1413 | { |
| 1414 | u64 cursor, end; |
| 1415 | |
| 1416 | memblock_dbg("%s: [%#016llx-%#016llx] %pF\n", |
| 1417 | __func__, (u64)base, (u64)base + size - 1, |
| 1418 | (void *)_RET_IP_); |
| 1419 | kmemleak_free_part(__va(base), size); |
| 1420 | cursor = PFN_UP(base); |
| 1421 | end = PFN_DOWN(base + size); |
| 1422 | |
| 1423 | for (; cursor < end; cursor++) { |
Mel Gorman | d70ddd7 | 2015-06-30 14:56:52 -0700 | [diff] [blame] | 1424 | __free_pages_bootmem(pfn_to_page(cursor), cursor, 0); |
Santosh Shilimkar | 26f09e9 | 2014-01-21 15:50:19 -0800 | [diff] [blame] | 1425 | totalram_pages++; |
| 1426 | } |
| 1427 | } |
Benjamin Herrenschmidt | 9d1e249 | 2010-07-06 15:39:17 -0700 | [diff] [blame] | 1428 | |
| 1429 | /* |
| 1430 | * Remaining API functions |
| 1431 | */ |
| 1432 | |
David Gibson | 1f1ffb8a | 2016-02-05 15:36:19 -0800 | [diff] [blame] | 1433 | phys_addr_t __init_memblock memblock_phys_mem_size(void) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1434 | { |
Tejun Heo | 1440c4e | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 1435 | return memblock.memory.total_size; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1436 | } |
| 1437 | |
Yinghai Lu | 595ad9a | 2013-01-24 12:20:09 -0800 | [diff] [blame] | 1438 | phys_addr_t __init memblock_mem_size(unsigned long limit_pfn) |
| 1439 | { |
| 1440 | unsigned long pages = 0; |
| 1441 | struct memblock_region *r; |
| 1442 | unsigned long start_pfn, end_pfn; |
| 1443 | |
| 1444 | for_each_memblock(memory, r) { |
| 1445 | start_pfn = memblock_region_memory_base_pfn(r); |
| 1446 | end_pfn = memblock_region_memory_end_pfn(r); |
| 1447 | start_pfn = min_t(unsigned long, start_pfn, limit_pfn); |
| 1448 | end_pfn = min_t(unsigned long, end_pfn, limit_pfn); |
| 1449 | pages += end_pfn - start_pfn; |
| 1450 | } |
| 1451 | |
Fabian Frederick | 1676323 | 2014-04-07 15:37:53 -0700 | [diff] [blame] | 1452 | return PFN_PHYS(pages); |
Yinghai Lu | 595ad9a | 2013-01-24 12:20:09 -0800 | [diff] [blame] | 1453 | } |
| 1454 | |
Sam Ravnborg | 0a93ebe | 2011-10-31 17:08:16 -0700 | [diff] [blame] | 1455 | /* lowest address */ |
| 1456 | phys_addr_t __init_memblock memblock_start_of_DRAM(void) |
| 1457 | { |
| 1458 | return memblock.memory.regions[0].base; |
| 1459 | } |
| 1460 | |
Yinghai Lu | 10d0643 | 2010-07-28 15:43:02 +1000 | [diff] [blame] | 1461 | phys_addr_t __init_memblock memblock_end_of_DRAM(void) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1462 | { |
| 1463 | int idx = memblock.memory.cnt - 1; |
| 1464 | |
Benjamin Herrenschmidt | e3239ff | 2010-08-04 14:06:41 +1000 | [diff] [blame] | 1465 | return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1466 | } |
| 1467 | |
Dennis Chen | a571d4e | 2016-07-28 15:48:26 -0700 | [diff] [blame] | 1468 | static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1469 | { |
Tejun Heo | c0ce8fe | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 1470 | phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX; |
Emil Medve | 136199f | 2014-04-07 15:37:52 -0700 | [diff] [blame] | 1471 | struct memblock_region *r; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1472 | |
Dennis Chen | a571d4e | 2016-07-28 15:48:26 -0700 | [diff] [blame] | 1473 | /* |
| 1474 | * translate the memory @limit size into the max address within one of |
| 1475 | * the memory memblock regions, if the @limit exceeds the total size |
| 1476 | * of those regions, max_addr will keep original value ULLONG_MAX |
| 1477 | */ |
Emil Medve | 136199f | 2014-04-07 15:37:52 -0700 | [diff] [blame] | 1478 | for_each_memblock(memory, r) { |
Tejun Heo | c0ce8fe | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 1479 | if (limit <= r->size) { |
| 1480 | max_addr = r->base + limit; |
| 1481 | break; |
| 1482 | } |
| 1483 | limit -= r->size; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1484 | } |
| 1485 | |
Dennis Chen | a571d4e | 2016-07-28 15:48:26 -0700 | [diff] [blame] | 1486 | return max_addr; |
| 1487 | } |
| 1488 | |
| 1489 | void __init memblock_enforce_memory_limit(phys_addr_t limit) |
| 1490 | { |
| 1491 | phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX; |
| 1492 | |
| 1493 | if (!limit) |
| 1494 | return; |
| 1495 | |
| 1496 | max_addr = __find_max_addr(limit); |
| 1497 | |
| 1498 | /* @limit exceeds the total size of the memory, do nothing */ |
| 1499 | if (max_addr == (phys_addr_t)ULLONG_MAX) |
| 1500 | return; |
| 1501 | |
Tejun Heo | c0ce8fe | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 1502 | /* truncate both memory and reserved regions */ |
Philipp Hachtmann | f1af9d3 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1503 | memblock_remove_range(&memblock.memory, max_addr, |
| 1504 | (phys_addr_t)ULLONG_MAX); |
| 1505 | memblock_remove_range(&memblock.reserved, max_addr, |
| 1506 | (phys_addr_t)ULLONG_MAX); |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1507 | } |
| 1508 | |
Dennis Chen | a571d4e | 2016-07-28 15:48:26 -0700 | [diff] [blame] | 1509 | void __init memblock_mem_limit_remove_map(phys_addr_t limit) |
| 1510 | { |
| 1511 | struct memblock_type *type = &memblock.memory; |
| 1512 | phys_addr_t max_addr; |
| 1513 | int i, ret, start_rgn, end_rgn; |
| 1514 | |
| 1515 | if (!limit) |
| 1516 | return; |
| 1517 | |
| 1518 | max_addr = __find_max_addr(limit); |
| 1519 | |
| 1520 | /* @limit exceeds the total size of the memory, do nothing */ |
| 1521 | if (max_addr == (phys_addr_t)ULLONG_MAX) |
| 1522 | return; |
| 1523 | |
| 1524 | ret = memblock_isolate_range(type, max_addr, (phys_addr_t)ULLONG_MAX, |
| 1525 | &start_rgn, &end_rgn); |
| 1526 | if (ret) |
| 1527 | return; |
| 1528 | |
| 1529 | /* remove all the MAP regions above the limit */ |
| 1530 | for (i = end_rgn - 1; i >= start_rgn; i--) { |
| 1531 | if (!memblock_is_nomap(&type->regions[i])) |
| 1532 | memblock_remove_region(type, i); |
| 1533 | } |
| 1534 | /* truncate the reserved regions */ |
| 1535 | memblock_remove_range(&memblock.reserved, max_addr, |
| 1536 | (phys_addr_t)ULLONG_MAX); |
| 1537 | } |
| 1538 | |
Yinghai Lu | cd79481 | 2010-10-11 12:34:09 -0700 | [diff] [blame] | 1539 | static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) |
Benjamin Herrenschmidt | 72d4b0b | 2010-08-04 14:38:47 +1000 | [diff] [blame] | 1540 | { |
| 1541 | unsigned int left = 0, right = type->cnt; |
| 1542 | |
| 1543 | do { |
| 1544 | unsigned int mid = (right + left) / 2; |
| 1545 | |
| 1546 | if (addr < type->regions[mid].base) |
| 1547 | right = mid; |
| 1548 | else if (addr >= (type->regions[mid].base + |
| 1549 | type->regions[mid].size)) |
| 1550 | left = mid + 1; |
| 1551 | else |
| 1552 | return mid; |
| 1553 | } while (left < right); |
| 1554 | return -1; |
| 1555 | } |
| 1556 | |
Yaowei Bai | b4ad0c7 | 2016-01-14 15:18:54 -0800 | [diff] [blame] | 1557 | bool __init memblock_is_reserved(phys_addr_t addr) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1558 | { |
Benjamin Herrenschmidt | 72d4b0b | 2010-08-04 14:38:47 +1000 | [diff] [blame] | 1559 | return memblock_search(&memblock.reserved, addr) != -1; |
| 1560 | } |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1561 | |
Yaowei Bai | b4ad0c7 | 2016-01-14 15:18:54 -0800 | [diff] [blame] | 1562 | bool __init_memblock memblock_is_memory(phys_addr_t addr) |
Benjamin Herrenschmidt | 72d4b0b | 2010-08-04 14:38:47 +1000 | [diff] [blame] | 1563 | { |
| 1564 | return memblock_search(&memblock.memory, addr) != -1; |
| 1565 | } |
| 1566 | |
Ard Biesheuvel | bf3d3cc | 2015-11-30 13:28:15 +0100 | [diff] [blame] | 1567 | int __init_memblock memblock_is_map_memory(phys_addr_t addr) |
| 1568 | { |
| 1569 | int i = memblock_search(&memblock.memory, addr); |
| 1570 | |
| 1571 | if (i == -1) |
| 1572 | return false; |
| 1573 | return !memblock_is_nomap(&memblock.memory.regions[i]); |
| 1574 | } |
| 1575 | |
Yinghai Lu | e76b63f | 2013-09-11 14:22:17 -0700 | [diff] [blame] | 1576 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
| 1577 | int __init_memblock memblock_search_pfn_nid(unsigned long pfn, |
| 1578 | unsigned long *start_pfn, unsigned long *end_pfn) |
| 1579 | { |
| 1580 | struct memblock_type *type = &memblock.memory; |
Fabian Frederick | 1676323 | 2014-04-07 15:37:53 -0700 | [diff] [blame] | 1581 | int mid = memblock_search(type, PFN_PHYS(pfn)); |
Yinghai Lu | e76b63f | 2013-09-11 14:22:17 -0700 | [diff] [blame] | 1582 | |
| 1583 | if (mid == -1) |
| 1584 | return -1; |
| 1585 | |
Fabian Frederick | f7e2f7e | 2014-06-04 16:07:51 -0700 | [diff] [blame] | 1586 | *start_pfn = PFN_DOWN(type->regions[mid].base); |
| 1587 | *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size); |
Yinghai Lu | e76b63f | 2013-09-11 14:22:17 -0700 | [diff] [blame] | 1588 | |
| 1589 | return type->regions[mid].nid; |
| 1590 | } |
| 1591 | #endif |
| 1592 | |
Stephen Boyd | eab3094 | 2012-05-24 00:45:21 -0700 | [diff] [blame] | 1593 | /** |
| 1594 | * memblock_is_region_memory - check if a region is a subset of memory |
| 1595 | * @base: base of region to check |
| 1596 | * @size: size of region to check |
| 1597 | * |
| 1598 | * Check if the region [@base, @base+@size) is a subset of a memory block. |
| 1599 | * |
| 1600 | * RETURNS: |
| 1601 | * 0 if false, non-zero if true |
| 1602 | */ |
Yinghai Lu | 3661ca6 | 2010-09-15 13:05:29 -0700 | [diff] [blame] | 1603 | int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) |
Benjamin Herrenschmidt | 72d4b0b | 2010-08-04 14:38:47 +1000 | [diff] [blame] | 1604 | { |
Tomi Valkeinen | abb6527 | 2011-01-20 14:44:20 -0800 | [diff] [blame] | 1605 | int idx = memblock_search(&memblock.memory, base); |
Tejun Heo | eb18f1b | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 1606 | phys_addr_t end = base + memblock_cap_size(base, &size); |
Benjamin Herrenschmidt | 72d4b0b | 2010-08-04 14:38:47 +1000 | [diff] [blame] | 1607 | |
| 1608 | if (idx == -1) |
| 1609 | return 0; |
Tomi Valkeinen | abb6527 | 2011-01-20 14:44:20 -0800 | [diff] [blame] | 1610 | return memblock.memory.regions[idx].base <= base && |
| 1611 | (memblock.memory.regions[idx].base + |
Tejun Heo | eb18f1b | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 1612 | memblock.memory.regions[idx].size) >= end; |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1613 | } |
| 1614 | |
Stephen Boyd | eab3094 | 2012-05-24 00:45:21 -0700 | [diff] [blame] | 1615 | /** |
| 1616 | * memblock_is_region_reserved - check if a region intersects reserved memory |
| 1617 | * @base: base of region to check |
| 1618 | * @size: size of region to check |
| 1619 | * |
| 1620 | * Check if the region [@base, @base+@size) intersects a reserved memory block. |
| 1621 | * |
| 1622 | * RETURNS: |
Tang Chen | c5c5c9d | 2015-09-08 15:02:00 -0700 | [diff] [blame] | 1623 | * True if they intersect, false if not. |
Stephen Boyd | eab3094 | 2012-05-24 00:45:21 -0700 | [diff] [blame] | 1624 | */ |
Tang Chen | c5c5c9d | 2015-09-08 15:02:00 -0700 | [diff] [blame] | 1625 | bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1626 | { |
Tejun Heo | eb18f1b | 2011-12-08 10:22:07 -0800 | [diff] [blame] | 1627 | memblock_cap_size(base, &size); |
Tang Chen | c5c5c9d | 2015-09-08 15:02:00 -0700 | [diff] [blame] | 1628 | return memblock_overlaps_region(&memblock.reserved, base, size); |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 1629 | } |
| 1630 | |
Yinghai Lu | 6ede1fd | 2012-10-22 16:35:18 -0700 | [diff] [blame] | 1631 | void __init_memblock memblock_trim_memory(phys_addr_t align) |
| 1632 | { |
Yinghai Lu | 6ede1fd | 2012-10-22 16:35:18 -0700 | [diff] [blame] | 1633 | phys_addr_t start, end, orig_start, orig_end; |
Emil Medve | 136199f | 2014-04-07 15:37:52 -0700 | [diff] [blame] | 1634 | struct memblock_region *r; |
Yinghai Lu | 6ede1fd | 2012-10-22 16:35:18 -0700 | [diff] [blame] | 1635 | |
Emil Medve | 136199f | 2014-04-07 15:37:52 -0700 | [diff] [blame] | 1636 | for_each_memblock(memory, r) { |
| 1637 | orig_start = r->base; |
| 1638 | orig_end = r->base + r->size; |
Yinghai Lu | 6ede1fd | 2012-10-22 16:35:18 -0700 | [diff] [blame] | 1639 | start = round_up(orig_start, align); |
| 1640 | end = round_down(orig_end, align); |
| 1641 | |
| 1642 | if (start == orig_start && end == orig_end) |
| 1643 | continue; |
| 1644 | |
| 1645 | if (start < end) { |
Emil Medve | 136199f | 2014-04-07 15:37:52 -0700 | [diff] [blame] | 1646 | r->base = start; |
| 1647 | r->size = end - start; |
Yinghai Lu | 6ede1fd | 2012-10-22 16:35:18 -0700 | [diff] [blame] | 1648 | } else { |
Emil Medve | 136199f | 2014-04-07 15:37:52 -0700 | [diff] [blame] | 1649 | memblock_remove_region(&memblock.memory, |
| 1650 | r - memblock.memory.regions); |
| 1651 | r--; |
Yinghai Lu | 6ede1fd | 2012-10-22 16:35:18 -0700 | [diff] [blame] | 1652 | } |
| 1653 | } |
| 1654 | } |
Benjamin Herrenschmidt | e63075a | 2010-07-06 15:39:01 -0700 | [diff] [blame] | 1655 | |
Yinghai Lu | 3661ca6 | 2010-09-15 13:05:29 -0700 | [diff] [blame] | 1656 | void __init_memblock memblock_set_current_limit(phys_addr_t limit) |
Benjamin Herrenschmidt | e63075a | 2010-07-06 15:39:01 -0700 | [diff] [blame] | 1657 | { |
| 1658 | memblock.current_limit = limit; |
| 1659 | } |
| 1660 | |
Laura Abbott | fec5101 | 2014-02-27 01:23:43 +0100 | [diff] [blame] | 1661 | phys_addr_t __init_memblock memblock_get_current_limit(void) |
| 1662 | { |
| 1663 | return memblock.current_limit; |
| 1664 | } |
| 1665 | |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1666 | static void __init_memblock memblock_dump(struct memblock_type *type, char *name) |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1667 | { |
| 1668 | unsigned long long base, size; |
Tang Chen | 66a2075 | 2014-01-21 15:49:20 -0800 | [diff] [blame] | 1669 | unsigned long flags; |
Alexander Kuleshov | 8c9c170 | 2016-01-14 15:20:42 -0800 | [diff] [blame] | 1670 | int idx; |
| 1671 | struct memblock_region *rgn; |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1672 | |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1673 | pr_info(" %s.cnt = 0x%lx\n", name, type->cnt); |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1674 | |
Alexander Kuleshov | 8c9c170 | 2016-01-14 15:20:42 -0800 | [diff] [blame] | 1675 | for_each_memblock_type(type, rgn) { |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1676 | char nid_buf[32] = ""; |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1677 | |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1678 | base = rgn->base; |
| 1679 | size = rgn->size; |
Tang Chen | 66a2075 | 2014-01-21 15:49:20 -0800 | [diff] [blame] | 1680 | flags = rgn->flags; |
Tejun Heo | 7c0caeb | 2011-07-14 11:43:42 +0200 | [diff] [blame] | 1681 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
| 1682 | if (memblock_get_region_node(rgn) != MAX_NUMNODES) |
| 1683 | snprintf(nid_buf, sizeof(nid_buf), " on node %d", |
| 1684 | memblock_get_region_node(rgn)); |
| 1685 | #endif |
Tang Chen | 66a2075 | 2014-01-21 15:49:20 -0800 | [diff] [blame] | 1686 | pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s flags: %#lx\n", |
Alexander Kuleshov | 8c9c170 | 2016-01-14 15:20:42 -0800 | [diff] [blame] | 1687 | name, idx, base, base + size - 1, size, nid_buf, flags); |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1688 | } |
| 1689 | } |
| 1690 | |
Tejun Heo | 4ff7b82 | 2011-12-08 10:22:06 -0800 | [diff] [blame] | 1691 | void __init_memblock __memblock_dump_all(void) |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1692 | { |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1693 | pr_info("MEMBLOCK configuration:\n"); |
Tejun Heo | 1440c4e | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 1694 | pr_info(" memory size = %#llx reserved size = %#llx\n", |
| 1695 | (unsigned long long)memblock.memory.total_size, |
| 1696 | (unsigned long long)memblock.reserved.total_size); |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1697 | |
| 1698 | memblock_dump(&memblock.memory, "memory"); |
| 1699 | memblock_dump(&memblock.reserved, "reserved"); |
| 1700 | } |
| 1701 | |
Tejun Heo | 1aadc05 | 2011-12-08 10:22:08 -0800 | [diff] [blame] | 1702 | void __init memblock_allow_resize(void) |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1703 | { |
Benjamin Herrenschmidt | 142b45a | 2010-07-06 15:39:13 -0700 | [diff] [blame] | 1704 | memblock_can_resize = 1; |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1705 | } |
| 1706 | |
Benjamin Herrenschmidt | 6ed311b | 2010-07-12 14:36:48 +1000 | [diff] [blame] | 1707 | static int __init early_memblock(char *p) |
| 1708 | { |
| 1709 | if (p && strstr(p, "debug")) |
| 1710 | memblock_debug = 1; |
| 1711 | return 0; |
| 1712 | } |
| 1713 | early_param("memblock", early_memblock); |
| 1714 | |
Tejun Heo | c378ddd | 2011-07-14 11:46:03 +0200 | [diff] [blame] | 1715 | #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK) |
Benjamin Herrenschmidt | 6d03b88 | 2010-07-06 15:39:19 -0700 | [diff] [blame] | 1716 | |
| 1717 | static int memblock_debug_show(struct seq_file *m, void *private) |
| 1718 | { |
| 1719 | struct memblock_type *type = m->private; |
| 1720 | struct memblock_region *reg; |
| 1721 | int i; |
| 1722 | |
| 1723 | for (i = 0; i < type->cnt; i++) { |
| 1724 | reg = &type->regions[i]; |
| 1725 | seq_printf(m, "%4d: ", i); |
| 1726 | if (sizeof(phys_addr_t) == 4) |
| 1727 | seq_printf(m, "0x%08lx..0x%08lx\n", |
| 1728 | (unsigned long)reg->base, |
| 1729 | (unsigned long)(reg->base + reg->size - 1)); |
| 1730 | else |
| 1731 | seq_printf(m, "0x%016llx..0x%016llx\n", |
| 1732 | (unsigned long long)reg->base, |
| 1733 | (unsigned long long)(reg->base + reg->size - 1)); |
| 1734 | |
| 1735 | } |
| 1736 | return 0; |
| 1737 | } |
| 1738 | |
| 1739 | static int memblock_debug_open(struct inode *inode, struct file *file) |
| 1740 | { |
| 1741 | return single_open(file, memblock_debug_show, inode->i_private); |
| 1742 | } |
| 1743 | |
| 1744 | static const struct file_operations memblock_debug_fops = { |
| 1745 | .open = memblock_debug_open, |
| 1746 | .read = seq_read, |
| 1747 | .llseek = seq_lseek, |
| 1748 | .release = single_release, |
| 1749 | }; |
| 1750 | |
| 1751 | static int __init memblock_init_debugfs(void) |
| 1752 | { |
| 1753 | struct dentry *root = debugfs_create_dir("memblock", NULL); |
| 1754 | if (!root) |
| 1755 | return -ENXIO; |
| 1756 | debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops); |
| 1757 | debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops); |
Philipp Hachtmann | 70210ed | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1758 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
| 1759 | debugfs_create_file("physmem", S_IRUGO, root, &memblock.physmem, &memblock_debug_fops); |
| 1760 | #endif |
Benjamin Herrenschmidt | 6d03b88 | 2010-07-06 15:39:19 -0700 | [diff] [blame] | 1761 | |
| 1762 | return 0; |
| 1763 | } |
| 1764 | __initcall(memblock_init_debugfs); |
| 1765 | |
| 1766 | #endif /* CONFIG_DEBUG_FS */ |