blob: 27eeab3be757e8c9bf04a18dba9dfd7f7aaa5ab1 [file] [log] [blame]
Christoph Lameter8f6aac42007-10-16 01:24:13 -07001/*
2 * Virtual Memory Map support
3 *
Christoph Lametercde53532008-07-04 09:59:22 -07004 * (C) 2007 sgi. Christoph Lameter.
Christoph Lameter8f6aac42007-10-16 01:24:13 -07005 *
6 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
7 * virt_to_page, page_address() to be implemented as a base offset
8 * calculation without memory access.
9 *
10 * However, virtual mappings need a page table and TLBs. Many Linux
11 * architectures already map their physical space using 1-1 mappings
Uwe Kleine-Königb5950762010-11-01 15:38:34 -040012 * via TLBs. For those arches the virtual memory map is essentially
Christoph Lameter8f6aac42007-10-16 01:24:13 -070013 * for free if we use the same page size as the 1-1 mappings. In that
14 * case the overhead consists of a few additional pages that are
15 * allocated to create a view of memory for vmemmap.
16 *
Andy Whitcroft29c71112007-10-16 01:24:14 -070017 * The architecture is expected to provide a vmemmap_populate() function
18 * to instantiate the mapping.
Christoph Lameter8f6aac42007-10-16 01:24:13 -070019 */
20#include <linux/mm.h>
21#include <linux/mmzone.h>
22#include <linux/bootmem.h>
23#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090024#include <linux/slab.h>
Christoph Lameter8f6aac42007-10-16 01:24:13 -070025#include <linux/spinlock.h>
26#include <linux/vmalloc.h>
Glauber de Oliveira Costa8bca44b2007-10-29 14:37:19 -070027#include <linux/sched.h>
Christoph Lameter8f6aac42007-10-16 01:24:13 -070028#include <asm/dma.h>
29#include <asm/pgalloc.h>
30#include <asm/pgtable.h>
31
32/*
33 * Allocate a block of memory to be used to back the virtual memory map
34 * or to back the page tables that are used to create the mapping.
35 * Uses the main allocators if they are available, else bootmem.
36 */
KAMEZAWA Hiroyukie0dc3a52007-11-28 16:21:57 -080037
38static void * __init_refok __earlyonly_bootmem_alloc(int node,
39 unsigned long size,
40 unsigned long align,
41 unsigned long goal)
42{
Yinghai Lu08677212010-02-10 01:20:20 -080043 return __alloc_bootmem_node_high(NODE_DATA(node), size, align, goal);
KAMEZAWA Hiroyukie0dc3a52007-11-28 16:21:57 -080044}
45
Yinghai Lu9bdac912010-02-10 01:20:22 -080046static void *vmemmap_buf;
47static void *vmemmap_buf_end;
KAMEZAWA Hiroyukie0dc3a52007-11-28 16:21:57 -080048
Christoph Lameter8f6aac42007-10-16 01:24:13 -070049void * __meminit vmemmap_alloc_block(unsigned long size, int node)
50{
51 /* If the main allocator is up use that, fallback to bootmem. */
52 if (slab_is_available()) {
Shaohua Lif52407c2009-09-21 17:01:19 -070053 struct page *page;
54
55 if (node_state(node, N_HIGH_MEMORY))
Ben Hutchings055e4fd2013-04-29 15:07:49 -070056 page = alloc_pages_node(
57 node, GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT,
58 get_order(size));
Shaohua Lif52407c2009-09-21 17:01:19 -070059 else
Ben Hutchings055e4fd2013-04-29 15:07:49 -070060 page = alloc_pages(
61 GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT,
Shaohua Lif52407c2009-09-21 17:01:19 -070062 get_order(size));
Christoph Lameter8f6aac42007-10-16 01:24:13 -070063 if (page)
64 return page_address(page);
65 return NULL;
66 } else
KAMEZAWA Hiroyukie0dc3a52007-11-28 16:21:57 -080067 return __earlyonly_bootmem_alloc(node, size, size,
Christoph Lameter8f6aac42007-10-16 01:24:13 -070068 __pa(MAX_DMA_ADDRESS));
69}
70
Yinghai Lu9bdac912010-02-10 01:20:22 -080071/* need to make sure size is all the same during early stage */
72void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node)
73{
74 void *ptr;
75
76 if (!vmemmap_buf)
77 return vmemmap_alloc_block(size, node);
78
79 /* take the from buf */
80 ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size);
81 if (ptr + size > vmemmap_buf_end)
82 return vmemmap_alloc_block(size, node);
83
84 vmemmap_buf = ptr + size;
85
86 return ptr;
87}
88
Christoph Lameter8f6aac42007-10-16 01:24:13 -070089void __meminit vmemmap_verify(pte_t *pte, int node,
90 unsigned long start, unsigned long end)
91{
92 unsigned long pfn = pte_pfn(*pte);
93 int actual_node = early_pfn_to_nid(pfn);
94
David Rientjesb41ad142008-11-06 12:53:31 -080095 if (node_distance(actual_node, node) > LOCAL_DISTANCE)
Christoph Lameter8f6aac42007-10-16 01:24:13 -070096 printk(KERN_WARNING "[%lx-%lx] potential offnode "
97 "page_structs\n", start, end - 1);
98}
99
Andy Whitcroft29c71112007-10-16 01:24:14 -0700100pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700101{
Andy Whitcroft29c71112007-10-16 01:24:14 -0700102 pte_t *pte = pte_offset_kernel(pmd, addr);
103 if (pte_none(*pte)) {
104 pte_t entry;
Yinghai Lu9bdac912010-02-10 01:20:22 -0800105 void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node);
Andy Whitcroft29c71112007-10-16 01:24:14 -0700106 if (!p)
Al Viro9dce07f2008-03-29 03:07:28 +0000107 return NULL;
Andy Whitcroft29c71112007-10-16 01:24:14 -0700108 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
109 set_pte_at(&init_mm, addr, pte, entry);
110 }
111 return pte;
112}
113
114pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
115{
116 pmd_t *pmd = pmd_offset(pud, addr);
117 if (pmd_none(*pmd)) {
118 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
119 if (!p)
Al Viro9dce07f2008-03-29 03:07:28 +0000120 return NULL;
Andy Whitcroft29c71112007-10-16 01:24:14 -0700121 pmd_populate_kernel(&init_mm, pmd, p);
122 }
123 return pmd;
124}
125
126pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
127{
128 pud_t *pud = pud_offset(pgd, addr);
129 if (pud_none(*pud)) {
130 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
131 if (!p)
Al Viro9dce07f2008-03-29 03:07:28 +0000132 return NULL;
Andy Whitcroft29c71112007-10-16 01:24:14 -0700133 pud_populate(&init_mm, pud, p);
134 }
135 return pud;
136}
137
138pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
139{
140 pgd_t *pgd = pgd_offset_k(addr);
141 if (pgd_none(*pgd)) {
142 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
143 if (!p)
Al Viro9dce07f2008-03-29 03:07:28 +0000144 return NULL;
Andy Whitcroft29c71112007-10-16 01:24:14 -0700145 pgd_populate(&init_mm, pgd, p);
146 }
147 return pgd;
148}
149
Johannes Weiner0aad8182013-04-29 15:07:50 -0700150int __meminit vmemmap_populate_basepages(unsigned long start,
151 unsigned long end, int node)
Andy Whitcroft29c71112007-10-16 01:24:14 -0700152{
Johannes Weiner0aad8182013-04-29 15:07:50 -0700153 unsigned long addr = start;
Andy Whitcroft29c71112007-10-16 01:24:14 -0700154 pgd_t *pgd;
155 pud_t *pud;
156 pmd_t *pmd;
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700157 pte_t *pte;
158
Andy Whitcroft29c71112007-10-16 01:24:14 -0700159 for (; addr < end; addr += PAGE_SIZE) {
160 pgd = vmemmap_pgd_populate(addr, node);
161 if (!pgd)
162 return -ENOMEM;
163 pud = vmemmap_pud_populate(pgd, addr, node);
164 if (!pud)
165 return -ENOMEM;
166 pmd = vmemmap_pmd_populate(pud, addr, node);
167 if (!pmd)
168 return -ENOMEM;
169 pte = vmemmap_pte_populate(pmd, addr, node);
170 if (!pte)
171 return -ENOMEM;
172 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
173 }
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700174
175 return 0;
176}
177
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700178struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid)
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700179{
Johannes Weiner0aad8182013-04-29 15:07:50 -0700180 unsigned long start;
181 unsigned long end;
182 struct page *map;
183
184 map = pfn_to_page(pnum * PAGES_PER_SECTION);
185 start = (unsigned long)map;
186 end = (unsigned long)(map + PAGES_PER_SECTION);
187
188 if (vmemmap_populate(start, end, nid))
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700189 return NULL;
190
191 return map;
192}
Yinghai Lu9bdac912010-02-10 01:20:22 -0800193
194void __init sparse_mem_maps_populate_node(struct page **map_map,
195 unsigned long pnum_begin,
196 unsigned long pnum_end,
197 unsigned long map_count, int nodeid)
198{
199 unsigned long pnum;
200 unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
201 void *vmemmap_buf_start;
202
203 size = ALIGN(size, PMD_SIZE);
204 vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count,
205 PMD_SIZE, __pa(MAX_DMA_ADDRESS));
206
207 if (vmemmap_buf_start) {
208 vmemmap_buf = vmemmap_buf_start;
209 vmemmap_buf_end = vmemmap_buf_start + size * map_count;
210 }
211
212 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
213 struct mem_section *ms;
214
215 if (!present_section_nr(pnum))
216 continue;
217
218 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
219 if (map_map[pnum])
220 continue;
221 ms = __nr_to_section(pnum);
222 printk(KERN_ERR "%s: sparsemem memory map backing failed "
223 "some memory will not be available.\n", __func__);
224 ms->section_mem_map = 0;
225 }
226
227 if (vmemmap_buf_start) {
228 /* need to free left buf */
Yinghai Lu9bdac912010-02-10 01:20:22 -0800229 free_bootmem(__pa(vmemmap_buf), vmemmap_buf_end - vmemmap_buf);
Yinghai Lu9bdac912010-02-10 01:20:22 -0800230 vmemmap_buf = NULL;
231 vmemmap_buf_end = NULL;
232 }
233}