Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Virtual Memory Map support |
| 4 | * |
Christoph Lameter | cde5353 | 2008-07-04 09:59:22 -0700 | [diff] [blame] | 5 | * (C) 2007 sgi. Christoph Lameter. |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 6 | * |
| 7 | * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn, |
| 8 | * virt_to_page, page_address() to be implemented as a base offset |
| 9 | * calculation without memory access. |
| 10 | * |
| 11 | * However, virtual mappings need a page table and TLBs. Many Linux |
| 12 | * architectures already map their physical space using 1-1 mappings |
Uwe Kleine-König | b595076 | 2010-11-01 15:38:34 -0400 | [diff] [blame] | 13 | * via TLBs. For those arches the virtual memory map is essentially |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 14 | * for free if we use the same page size as the 1-1 mappings. In that |
| 15 | * case the overhead consists of a few additional pages that are |
| 16 | * allocated to create a view of memory for vmemmap. |
| 17 | * |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 18 | * The architecture is expected to provide a vmemmap_populate() function |
| 19 | * to instantiate the mapping. |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 20 | */ |
| 21 | #include <linux/mm.h> |
| 22 | #include <linux/mmzone.h> |
| 23 | #include <linux/bootmem.h> |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 24 | #include <linux/memremap.h> |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 25 | #include <linux/highmem.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 26 | #include <linux/slab.h> |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 27 | #include <linux/spinlock.h> |
| 28 | #include <linux/vmalloc.h> |
Glauber de Oliveira Costa | 8bca44b | 2007-10-29 14:37:19 -0700 | [diff] [blame] | 29 | #include <linux/sched.h> |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 30 | #include <asm/dma.h> |
| 31 | #include <asm/pgalloc.h> |
| 32 | #include <asm/pgtable.h> |
| 33 | |
| 34 | /* |
| 35 | * Allocate a block of memory to be used to back the virtual memory map |
| 36 | * or to back the page tables that are used to create the mapping. |
| 37 | * Uses the main allocators if they are available, else bootmem. |
| 38 | */ |
KAMEZAWA Hiroyuki | e0dc3a5 | 2007-11-28 16:21:57 -0800 | [diff] [blame] | 39 | |
Fabian Frederick | bd721ea | 2016-08-02 14:03:33 -0700 | [diff] [blame] | 40 | static void * __ref __earlyonly_bootmem_alloc(int node, |
KAMEZAWA Hiroyuki | e0dc3a5 | 2007-11-28 16:21:57 -0800 | [diff] [blame] | 41 | unsigned long size, |
| 42 | unsigned long align, |
| 43 | unsigned long goal) |
| 44 | { |
Pavel Tatashin | f7f9910 | 2017-11-15 17:36:44 -0800 | [diff] [blame] | 45 | return memblock_virt_alloc_try_nid_raw(size, align, goal, |
Santosh Shilimkar | bb016b8 | 2014-01-21 15:50:34 -0800 | [diff] [blame] | 46 | BOOTMEM_ALLOC_ACCESSIBLE, node); |
KAMEZAWA Hiroyuki | e0dc3a5 | 2007-11-28 16:21:57 -0800 | [diff] [blame] | 47 | } |
| 48 | |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 49 | static void *vmemmap_buf; |
| 50 | static void *vmemmap_buf_end; |
KAMEZAWA Hiroyuki | e0dc3a5 | 2007-11-28 16:21:57 -0800 | [diff] [blame] | 51 | |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 52 | void * __meminit vmemmap_alloc_block(unsigned long size, int node) |
| 53 | { |
| 54 | /* If the main allocator is up use that, fallback to bootmem. */ |
| 55 | if (slab_is_available()) { |
Michal Hocko | fcdaf842 | 2017-11-15 17:38:56 -0800 | [diff] [blame] | 56 | gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN; |
| 57 | int order = get_order(size); |
| 58 | static bool warned; |
Shaohua Li | f52407c | 2009-09-21 17:01:19 -0700 | [diff] [blame] | 59 | struct page *page; |
| 60 | |
Michal Hocko | fcdaf842 | 2017-11-15 17:38:56 -0800 | [diff] [blame] | 61 | page = alloc_pages_node(node, gfp_mask, order); |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 62 | if (page) |
| 63 | return page_address(page); |
Michal Hocko | fcdaf842 | 2017-11-15 17:38:56 -0800 | [diff] [blame] | 64 | |
| 65 | if (!warned) { |
| 66 | warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL, |
| 67 | "vmemmap alloc failure: order:%u", order); |
| 68 | warned = true; |
| 69 | } |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 70 | return NULL; |
| 71 | } else |
KAMEZAWA Hiroyuki | e0dc3a5 | 2007-11-28 16:21:57 -0800 | [diff] [blame] | 72 | return __earlyonly_bootmem_alloc(node, size, size, |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 73 | __pa(MAX_DMA_ADDRESS)); |
| 74 | } |
| 75 | |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 76 | /* need to make sure size is all the same during early stage */ |
Christoph Hellwig | a8fc357 | 2017-12-29 08:53:58 +0100 | [diff] [blame] | 77 | void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node) |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 78 | { |
| 79 | void *ptr; |
| 80 | |
| 81 | if (!vmemmap_buf) |
| 82 | return vmemmap_alloc_block(size, node); |
| 83 | |
| 84 | /* take the from buf */ |
| 85 | ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size); |
| 86 | if (ptr + size > vmemmap_buf_end) |
| 87 | return vmemmap_alloc_block(size, node); |
| 88 | |
| 89 | vmemmap_buf = ptr + size; |
| 90 | |
| 91 | return ptr; |
| 92 | } |
| 93 | |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 94 | static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap) |
| 95 | { |
| 96 | return altmap->base_pfn + altmap->reserve + altmap->alloc |
| 97 | + altmap->align; |
| 98 | } |
| 99 | |
| 100 | static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap) |
| 101 | { |
| 102 | unsigned long allocated = altmap->alloc + altmap->align; |
| 103 | |
| 104 | if (altmap->free > allocated) |
| 105 | return altmap->free - allocated; |
| 106 | return 0; |
| 107 | } |
| 108 | |
| 109 | /** |
Christoph Hellwig | eb80453 | 2017-12-29 08:53:59 +0100 | [diff] [blame] | 110 | * altmap_alloc_block_buf - allocate pages from the device page map |
| 111 | * @altmap: device page map |
| 112 | * @size: size (in bytes) of the allocation |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 113 | * |
Christoph Hellwig | eb80453 | 2017-12-29 08:53:59 +0100 | [diff] [blame] | 114 | * Allocations are aligned to the size of the request. |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 115 | */ |
Christoph Hellwig | a8fc357 | 2017-12-29 08:53:58 +0100 | [diff] [blame] | 116 | void * __meminit altmap_alloc_block_buf(unsigned long size, |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 117 | struct vmem_altmap *altmap) |
| 118 | { |
Christoph Hellwig | eb80453 | 2017-12-29 08:53:59 +0100 | [diff] [blame] | 119 | unsigned long pfn, nr_pfns, nr_align; |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 120 | |
| 121 | if (size & ~PAGE_MASK) { |
| 122 | pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n", |
| 123 | __func__, size); |
| 124 | return NULL; |
| 125 | } |
| 126 | |
Christoph Hellwig | eb80453 | 2017-12-29 08:53:59 +0100 | [diff] [blame] | 127 | pfn = vmem_altmap_next_pfn(altmap); |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 128 | nr_pfns = size >> PAGE_SHIFT; |
Christoph Hellwig | eb80453 | 2017-12-29 08:53:59 +0100 | [diff] [blame] | 129 | nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG); |
| 130 | nr_align = ALIGN(pfn, nr_align) - pfn; |
| 131 | if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap)) |
| 132 | return NULL; |
| 133 | |
| 134 | altmap->alloc += nr_pfns; |
| 135 | altmap->align += nr_align; |
| 136 | pfn += nr_align; |
| 137 | |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 138 | pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n", |
| 139 | __func__, pfn, altmap->alloc, altmap->align, nr_pfns); |
Christoph Hellwig | eb80453 | 2017-12-29 08:53:59 +0100 | [diff] [blame] | 140 | return __va(__pfn_to_phys(pfn)); |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 141 | } |
| 142 | |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 143 | void __meminit vmemmap_verify(pte_t *pte, int node, |
| 144 | unsigned long start, unsigned long end) |
| 145 | { |
| 146 | unsigned long pfn = pte_pfn(*pte); |
| 147 | int actual_node = early_pfn_to_nid(pfn); |
| 148 | |
David Rientjes | b41ad14 | 2008-11-06 12:53:31 -0800 | [diff] [blame] | 149 | if (node_distance(actual_node, node) > LOCAL_DISTANCE) |
Joe Perches | 1170532 | 2016-03-17 14:19:50 -0700 | [diff] [blame] | 150 | pr_warn("[%lx-%lx] potential offnode page_structs\n", |
| 151 | start, end - 1); |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 152 | } |
| 153 | |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 154 | pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 155 | { |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 156 | pte_t *pte = pte_offset_kernel(pmd, addr); |
| 157 | if (pte_none(*pte)) { |
| 158 | pte_t entry; |
Christoph Hellwig | a8fc357 | 2017-12-29 08:53:58 +0100 | [diff] [blame] | 159 | void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node); |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 160 | if (!p) |
Al Viro | 9dce07f | 2008-03-29 03:07:28 +0000 | [diff] [blame] | 161 | return NULL; |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 162 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); |
| 163 | set_pte_at(&init_mm, addr, pte, entry); |
| 164 | } |
| 165 | return pte; |
| 166 | } |
| 167 | |
Pavel Tatashin | f7f9910 | 2017-11-15 17:36:44 -0800 | [diff] [blame] | 168 | static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node) |
| 169 | { |
| 170 | void *p = vmemmap_alloc_block(size, node); |
| 171 | |
| 172 | if (!p) |
| 173 | return NULL; |
| 174 | memset(p, 0, size); |
| 175 | |
| 176 | return p; |
| 177 | } |
| 178 | |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 179 | pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) |
| 180 | { |
| 181 | pmd_t *pmd = pmd_offset(pud, addr); |
| 182 | if (pmd_none(*pmd)) { |
Pavel Tatashin | f7f9910 | 2017-11-15 17:36:44 -0800 | [diff] [blame] | 183 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 184 | if (!p) |
Al Viro | 9dce07f | 2008-03-29 03:07:28 +0000 | [diff] [blame] | 185 | return NULL; |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 186 | pmd_populate_kernel(&init_mm, pmd, p); |
| 187 | } |
| 188 | return pmd; |
| 189 | } |
| 190 | |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 191 | pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node) |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 192 | { |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 193 | pud_t *pud = pud_offset(p4d, addr); |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 194 | if (pud_none(*pud)) { |
Pavel Tatashin | f7f9910 | 2017-11-15 17:36:44 -0800 | [diff] [blame] | 195 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 196 | if (!p) |
Al Viro | 9dce07f | 2008-03-29 03:07:28 +0000 | [diff] [blame] | 197 | return NULL; |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 198 | pud_populate(&init_mm, pud, p); |
| 199 | } |
| 200 | return pud; |
| 201 | } |
| 202 | |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 203 | p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node) |
| 204 | { |
| 205 | p4d_t *p4d = p4d_offset(pgd, addr); |
| 206 | if (p4d_none(*p4d)) { |
Pavel Tatashin | f7f9910 | 2017-11-15 17:36:44 -0800 | [diff] [blame] | 207 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 208 | if (!p) |
| 209 | return NULL; |
| 210 | p4d_populate(&init_mm, p4d, p); |
| 211 | } |
| 212 | return p4d; |
| 213 | } |
| 214 | |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 215 | pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) |
| 216 | { |
| 217 | pgd_t *pgd = pgd_offset_k(addr); |
| 218 | if (pgd_none(*pgd)) { |
Pavel Tatashin | f7f9910 | 2017-11-15 17:36:44 -0800 | [diff] [blame] | 219 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 220 | if (!p) |
Al Viro | 9dce07f | 2008-03-29 03:07:28 +0000 | [diff] [blame] | 221 | return NULL; |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 222 | pgd_populate(&init_mm, pgd, p); |
| 223 | } |
| 224 | return pgd; |
| 225 | } |
| 226 | |
Johannes Weiner | 0aad818 | 2013-04-29 15:07:50 -0700 | [diff] [blame] | 227 | int __meminit vmemmap_populate_basepages(unsigned long start, |
| 228 | unsigned long end, int node) |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 229 | { |
Johannes Weiner | 0aad818 | 2013-04-29 15:07:50 -0700 | [diff] [blame] | 230 | unsigned long addr = start; |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 231 | pgd_t *pgd; |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 232 | p4d_t *p4d; |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 233 | pud_t *pud; |
| 234 | pmd_t *pmd; |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 235 | pte_t *pte; |
| 236 | |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 237 | for (; addr < end; addr += PAGE_SIZE) { |
| 238 | pgd = vmemmap_pgd_populate(addr, node); |
| 239 | if (!pgd) |
| 240 | return -ENOMEM; |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 241 | p4d = vmemmap_p4d_populate(pgd, addr, node); |
| 242 | if (!p4d) |
| 243 | return -ENOMEM; |
| 244 | pud = vmemmap_pud_populate(p4d, addr, node); |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 245 | if (!pud) |
| 246 | return -ENOMEM; |
| 247 | pmd = vmemmap_pmd_populate(pud, addr, node); |
| 248 | if (!pmd) |
| 249 | return -ENOMEM; |
| 250 | pte = vmemmap_pte_populate(pmd, addr, node); |
| 251 | if (!pte) |
| 252 | return -ENOMEM; |
| 253 | vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); |
| 254 | } |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 255 | |
| 256 | return 0; |
| 257 | } |
| 258 | |
Christoph Hellwig | 7b73d97 | 2017-12-29 08:53:54 +0100 | [diff] [blame] | 259 | struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid, |
| 260 | struct vmem_altmap *altmap) |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 261 | { |
Johannes Weiner | 0aad818 | 2013-04-29 15:07:50 -0700 | [diff] [blame] | 262 | unsigned long start; |
| 263 | unsigned long end; |
| 264 | struct page *map; |
| 265 | |
| 266 | map = pfn_to_page(pnum * PAGES_PER_SECTION); |
| 267 | start = (unsigned long)map; |
| 268 | end = (unsigned long)(map + PAGES_PER_SECTION); |
| 269 | |
Christoph Hellwig | 7b73d97 | 2017-12-29 08:53:54 +0100 | [diff] [blame] | 270 | if (vmemmap_populate(start, end, nid, altmap)) |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 271 | return NULL; |
| 272 | |
| 273 | return map; |
| 274 | } |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 275 | |
| 276 | void __init sparse_mem_maps_populate_node(struct page **map_map, |
| 277 | unsigned long pnum_begin, |
| 278 | unsigned long pnum_end, |
| 279 | unsigned long map_count, int nodeid) |
| 280 | { |
| 281 | unsigned long pnum; |
| 282 | unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; |
| 283 | void *vmemmap_buf_start; |
| 284 | |
| 285 | size = ALIGN(size, PMD_SIZE); |
| 286 | vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count, |
| 287 | PMD_SIZE, __pa(MAX_DMA_ADDRESS)); |
| 288 | |
| 289 | if (vmemmap_buf_start) { |
| 290 | vmemmap_buf = vmemmap_buf_start; |
| 291 | vmemmap_buf_end = vmemmap_buf_start + size * map_count; |
| 292 | } |
| 293 | |
| 294 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { |
| 295 | struct mem_section *ms; |
| 296 | |
| 297 | if (!present_section_nr(pnum)) |
| 298 | continue; |
| 299 | |
Christoph Hellwig | 7b73d97 | 2017-12-29 08:53:54 +0100 | [diff] [blame] | 300 | map_map[pnum] = sparse_mem_map_populate(pnum, nodeid, NULL); |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 301 | if (map_map[pnum]) |
| 302 | continue; |
| 303 | ms = __nr_to_section(pnum); |
Joe Perches | 1170532 | 2016-03-17 14:19:50 -0700 | [diff] [blame] | 304 | pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", |
Joe Perches | 756a025 | 2016-03-17 14:19:47 -0700 | [diff] [blame] | 305 | __func__); |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 306 | ms->section_mem_map = 0; |
| 307 | } |
| 308 | |
| 309 | if (vmemmap_buf_start) { |
| 310 | /* need to free left buf */ |
Santosh Shilimkar | bb016b8 | 2014-01-21 15:50:34 -0800 | [diff] [blame] | 311 | memblock_free_early(__pa(vmemmap_buf), |
| 312 | vmemmap_buf_end - vmemmap_buf); |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 313 | vmemmap_buf = NULL; |
| 314 | vmemmap_buf_end = NULL; |
| 315 | } |
| 316 | } |