blob: 8301293331a27963b8a83bb190a6a7a905bcb74d [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Christoph Lameter8f6aac42007-10-16 01:24:13 -07002/*
3 * Virtual Memory Map support
4 *
Christoph Lametercde53532008-07-04 09:59:22 -07005 * (C) 2007 sgi. Christoph Lameter.
Christoph Lameter8f6aac42007-10-16 01:24:13 -07006 *
7 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
8 * virt_to_page, page_address() to be implemented as a base offset
9 * calculation without memory access.
10 *
11 * However, virtual mappings need a page table and TLBs. Many Linux
12 * architectures already map their physical space using 1-1 mappings
Uwe Kleine-Königb5950762010-11-01 15:38:34 -040013 * via TLBs. For those arches the virtual memory map is essentially
Christoph Lameter8f6aac42007-10-16 01:24:13 -070014 * for free if we use the same page size as the 1-1 mappings. In that
15 * case the overhead consists of a few additional pages that are
16 * allocated to create a view of memory for vmemmap.
17 *
Andy Whitcroft29c71112007-10-16 01:24:14 -070018 * The architecture is expected to provide a vmemmap_populate() function
19 * to instantiate the mapping.
Christoph Lameter8f6aac42007-10-16 01:24:13 -070020 */
21#include <linux/mm.h>
22#include <linux/mmzone.h>
23#include <linux/bootmem.h>
Dan Williams4b94ffd2016-01-15 16:56:22 -080024#include <linux/memremap.h>
Christoph Lameter8f6aac42007-10-16 01:24:13 -070025#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090026#include <linux/slab.h>
Christoph Lameter8f6aac42007-10-16 01:24:13 -070027#include <linux/spinlock.h>
28#include <linux/vmalloc.h>
Glauber de Oliveira Costa8bca44b2007-10-29 14:37:19 -070029#include <linux/sched.h>
Christoph Lameter8f6aac42007-10-16 01:24:13 -070030#include <asm/dma.h>
31#include <asm/pgalloc.h>
32#include <asm/pgtable.h>
33
34/*
35 * Allocate a block of memory to be used to back the virtual memory map
36 * or to back the page tables that are used to create the mapping.
37 * Uses the main allocators if they are available, else bootmem.
38 */
KAMEZAWA Hiroyukie0dc3a52007-11-28 16:21:57 -080039
Fabian Frederickbd721ea2016-08-02 14:03:33 -070040static void * __ref __earlyonly_bootmem_alloc(int node,
KAMEZAWA Hiroyukie0dc3a52007-11-28 16:21:57 -080041 unsigned long size,
42 unsigned long align,
43 unsigned long goal)
44{
Pavel Tatashinf7f99102017-11-15 17:36:44 -080045 return memblock_virt_alloc_try_nid_raw(size, align, goal,
Pavel Tatashin35fd1eb2018-08-17 15:49:21 -070046 BOOTMEM_ALLOC_ACCESSIBLE, node);
KAMEZAWA Hiroyukie0dc3a52007-11-28 16:21:57 -080047}
48
Christoph Lameter8f6aac42007-10-16 01:24:13 -070049void * __meminit vmemmap_alloc_block(unsigned long size, int node)
50{
51 /* If the main allocator is up use that, fallback to bootmem. */
52 if (slab_is_available()) {
Michal Hockofcdaf8422017-11-15 17:38:56 -080053 gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
54 int order = get_order(size);
55 static bool warned;
Shaohua Lif52407c2009-09-21 17:01:19 -070056 struct page *page;
57
Michal Hockofcdaf8422017-11-15 17:38:56 -080058 page = alloc_pages_node(node, gfp_mask, order);
Christoph Lameter8f6aac42007-10-16 01:24:13 -070059 if (page)
60 return page_address(page);
Michal Hockofcdaf8422017-11-15 17:38:56 -080061
62 if (!warned) {
63 warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL,
64 "vmemmap alloc failure: order:%u", order);
65 warned = true;
66 }
Christoph Lameter8f6aac42007-10-16 01:24:13 -070067 return NULL;
68 } else
KAMEZAWA Hiroyukie0dc3a52007-11-28 16:21:57 -080069 return __earlyonly_bootmem_alloc(node, size, size,
Christoph Lameter8f6aac42007-10-16 01:24:13 -070070 __pa(MAX_DMA_ADDRESS));
71}
72
Yinghai Lu9bdac912010-02-10 01:20:22 -080073/* need to make sure size is all the same during early stage */
Christoph Hellwiga8fc3572017-12-29 08:53:58 +010074void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node)
Yinghai Lu9bdac912010-02-10 01:20:22 -080075{
Pavel Tatashin35fd1eb2018-08-17 15:49:21 -070076 void *ptr = sparse_buffer_alloc(size);
Yinghai Lu9bdac912010-02-10 01:20:22 -080077
Pavel Tatashin35fd1eb2018-08-17 15:49:21 -070078 if (!ptr)
79 ptr = vmemmap_alloc_block(size, node);
Yinghai Lu9bdac912010-02-10 01:20:22 -080080 return ptr;
81}
82
Dan Williams4b94ffd2016-01-15 16:56:22 -080083static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap)
84{
85 return altmap->base_pfn + altmap->reserve + altmap->alloc
86 + altmap->align;
87}
88
89static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
90{
91 unsigned long allocated = altmap->alloc + altmap->align;
92
93 if (altmap->free > allocated)
94 return altmap->free - allocated;
95 return 0;
96}
97
98/**
Christoph Hellwigeb804532017-12-29 08:53:59 +010099 * altmap_alloc_block_buf - allocate pages from the device page map
100 * @altmap: device page map
101 * @size: size (in bytes) of the allocation
Dan Williams4b94ffd2016-01-15 16:56:22 -0800102 *
Christoph Hellwigeb804532017-12-29 08:53:59 +0100103 * Allocations are aligned to the size of the request.
Dan Williams4b94ffd2016-01-15 16:56:22 -0800104 */
Christoph Hellwiga8fc3572017-12-29 08:53:58 +0100105void * __meminit altmap_alloc_block_buf(unsigned long size,
Dan Williams4b94ffd2016-01-15 16:56:22 -0800106 struct vmem_altmap *altmap)
107{
Christoph Hellwigeb804532017-12-29 08:53:59 +0100108 unsigned long pfn, nr_pfns, nr_align;
Dan Williams4b94ffd2016-01-15 16:56:22 -0800109
110 if (size & ~PAGE_MASK) {
111 pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
112 __func__, size);
113 return NULL;
114 }
115
Christoph Hellwigeb804532017-12-29 08:53:59 +0100116 pfn = vmem_altmap_next_pfn(altmap);
Dan Williams4b94ffd2016-01-15 16:56:22 -0800117 nr_pfns = size >> PAGE_SHIFT;
Christoph Hellwigeb804532017-12-29 08:53:59 +0100118 nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
119 nr_align = ALIGN(pfn, nr_align) - pfn;
120 if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
121 return NULL;
122
123 altmap->alloc += nr_pfns;
124 altmap->align += nr_align;
125 pfn += nr_align;
126
Dan Williams4b94ffd2016-01-15 16:56:22 -0800127 pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
128 __func__, pfn, altmap->alloc, altmap->align, nr_pfns);
Christoph Hellwigeb804532017-12-29 08:53:59 +0100129 return __va(__pfn_to_phys(pfn));
Dan Williams4b94ffd2016-01-15 16:56:22 -0800130}
131
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700132void __meminit vmemmap_verify(pte_t *pte, int node,
133 unsigned long start, unsigned long end)
134{
135 unsigned long pfn = pte_pfn(*pte);
136 int actual_node = early_pfn_to_nid(pfn);
137
David Rientjesb41ad142008-11-06 12:53:31 -0800138 if (node_distance(actual_node, node) > LOCAL_DISTANCE)
Joe Perches11705322016-03-17 14:19:50 -0700139 pr_warn("[%lx-%lx] potential offnode page_structs\n",
140 start, end - 1);
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700141}
142
Andy Whitcroft29c71112007-10-16 01:24:14 -0700143pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700144{
Andy Whitcroft29c71112007-10-16 01:24:14 -0700145 pte_t *pte = pte_offset_kernel(pmd, addr);
146 if (pte_none(*pte)) {
147 pte_t entry;
Christoph Hellwiga8fc3572017-12-29 08:53:58 +0100148 void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node);
Andy Whitcroft29c71112007-10-16 01:24:14 -0700149 if (!p)
Al Viro9dce07f2008-03-29 03:07:28 +0000150 return NULL;
Andy Whitcroft29c71112007-10-16 01:24:14 -0700151 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
152 set_pte_at(&init_mm, addr, pte, entry);
153 }
154 return pte;
155}
156
Pavel Tatashinf7f99102017-11-15 17:36:44 -0800157static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node)
158{
159 void *p = vmemmap_alloc_block(size, node);
160
161 if (!p)
162 return NULL;
163 memset(p, 0, size);
164
165 return p;
166}
167
Andy Whitcroft29c71112007-10-16 01:24:14 -0700168pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
169{
170 pmd_t *pmd = pmd_offset(pud, addr);
171 if (pmd_none(*pmd)) {
Pavel Tatashinf7f99102017-11-15 17:36:44 -0800172 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
Andy Whitcroft29c71112007-10-16 01:24:14 -0700173 if (!p)
Al Viro9dce07f2008-03-29 03:07:28 +0000174 return NULL;
Andy Whitcroft29c71112007-10-16 01:24:14 -0700175 pmd_populate_kernel(&init_mm, pmd, p);
176 }
177 return pmd;
178}
179
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300180pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
Andy Whitcroft29c71112007-10-16 01:24:14 -0700181{
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300182 pud_t *pud = pud_offset(p4d, addr);
Andy Whitcroft29c71112007-10-16 01:24:14 -0700183 if (pud_none(*pud)) {
Pavel Tatashinf7f99102017-11-15 17:36:44 -0800184 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
Andy Whitcroft29c71112007-10-16 01:24:14 -0700185 if (!p)
Al Viro9dce07f2008-03-29 03:07:28 +0000186 return NULL;
Andy Whitcroft29c71112007-10-16 01:24:14 -0700187 pud_populate(&init_mm, pud, p);
188 }
189 return pud;
190}
191
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300192p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
193{
194 p4d_t *p4d = p4d_offset(pgd, addr);
195 if (p4d_none(*p4d)) {
Pavel Tatashinf7f99102017-11-15 17:36:44 -0800196 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300197 if (!p)
198 return NULL;
199 p4d_populate(&init_mm, p4d, p);
200 }
201 return p4d;
202}
203
Andy Whitcroft29c71112007-10-16 01:24:14 -0700204pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
205{
206 pgd_t *pgd = pgd_offset_k(addr);
207 if (pgd_none(*pgd)) {
Pavel Tatashinf7f99102017-11-15 17:36:44 -0800208 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
Andy Whitcroft29c71112007-10-16 01:24:14 -0700209 if (!p)
Al Viro9dce07f2008-03-29 03:07:28 +0000210 return NULL;
Andy Whitcroft29c71112007-10-16 01:24:14 -0700211 pgd_populate(&init_mm, pgd, p);
212 }
213 return pgd;
214}
215
Johannes Weiner0aad8182013-04-29 15:07:50 -0700216int __meminit vmemmap_populate_basepages(unsigned long start,
217 unsigned long end, int node)
Andy Whitcroft29c71112007-10-16 01:24:14 -0700218{
Johannes Weiner0aad8182013-04-29 15:07:50 -0700219 unsigned long addr = start;
Andy Whitcroft29c71112007-10-16 01:24:14 -0700220 pgd_t *pgd;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300221 p4d_t *p4d;
Andy Whitcroft29c71112007-10-16 01:24:14 -0700222 pud_t *pud;
223 pmd_t *pmd;
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700224 pte_t *pte;
225
Andy Whitcroft29c71112007-10-16 01:24:14 -0700226 for (; addr < end; addr += PAGE_SIZE) {
227 pgd = vmemmap_pgd_populate(addr, node);
228 if (!pgd)
229 return -ENOMEM;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300230 p4d = vmemmap_p4d_populate(pgd, addr, node);
231 if (!p4d)
232 return -ENOMEM;
233 pud = vmemmap_pud_populate(p4d, addr, node);
Andy Whitcroft29c71112007-10-16 01:24:14 -0700234 if (!pud)
235 return -ENOMEM;
236 pmd = vmemmap_pmd_populate(pud, addr, node);
237 if (!pmd)
238 return -ENOMEM;
239 pte = vmemmap_pte_populate(pmd, addr, node);
240 if (!pte)
241 return -ENOMEM;
242 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
243 }
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700244
245 return 0;
246}
247
Christoph Hellwig7b73d972017-12-29 08:53:54 +0100248struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid,
249 struct vmem_altmap *altmap)
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700250{
Johannes Weiner0aad8182013-04-29 15:07:50 -0700251 unsigned long start;
252 unsigned long end;
253 struct page *map;
254
255 map = pfn_to_page(pnum * PAGES_PER_SECTION);
256 start = (unsigned long)map;
257 end = (unsigned long)(map + PAGES_PER_SECTION);
258
Christoph Hellwig7b73d972017-12-29 08:53:54 +0100259 if (vmemmap_populate(start, end, nid, altmap))
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700260 return NULL;
261
262 return map;
263}