blob: 22620f6a976bd3909a92ad1d8f1720ab09e4dce4 [file] [log] [blame]
Christoph Lameter8f6aac42007-10-16 01:24:13 -07001/*
2 * Virtual Memory Map support
3 *
4 * (C) 2007 sgi. Christoph Lameter <clameter@sgi.com>.
5 *
6 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
7 * virt_to_page, page_address() to be implemented as a base offset
8 * calculation without memory access.
9 *
10 * However, virtual mappings need a page table and TLBs. Many Linux
11 * architectures already map their physical space using 1-1 mappings
12 * via TLBs. For those arches the virtual memmory map is essentially
13 * for free if we use the same page size as the 1-1 mappings. In that
14 * case the overhead consists of a few additional pages that are
15 * allocated to create a view of memory for vmemmap.
16 *
Andy Whitcroft29c71112007-10-16 01:24:14 -070017 * The architecture is expected to provide a vmemmap_populate() function
18 * to instantiate the mapping.
Christoph Lameter8f6aac42007-10-16 01:24:13 -070019 */
20#include <linux/mm.h>
21#include <linux/mmzone.h>
22#include <linux/bootmem.h>
23#include <linux/highmem.h>
24#include <linux/module.h>
25#include <linux/spinlock.h>
26#include <linux/vmalloc.h>
Glauber de Oliveira Costa8bca44b2007-10-29 14:37:19 -070027#include <linux/sched.h>
Christoph Lameter8f6aac42007-10-16 01:24:13 -070028#include <asm/dma.h>
29#include <asm/pgalloc.h>
30#include <asm/pgtable.h>
31
32/*
33 * Allocate a block of memory to be used to back the virtual memory map
34 * or to back the page tables that are used to create the mapping.
35 * Uses the main allocators if they are available, else bootmem.
36 */
37void * __meminit vmemmap_alloc_block(unsigned long size, int node)
38{
39 /* If the main allocator is up use that, fallback to bootmem. */
40 if (slab_is_available()) {
41 struct page *page = alloc_pages_node(node,
42 GFP_KERNEL | __GFP_ZERO, get_order(size));
43 if (page)
44 return page_address(page);
45 return NULL;
46 } else
47 return __alloc_bootmem_node(NODE_DATA(node), size, size,
48 __pa(MAX_DMA_ADDRESS));
49}
50
Christoph Lameter8f6aac42007-10-16 01:24:13 -070051void __meminit vmemmap_verify(pte_t *pte, int node,
52 unsigned long start, unsigned long end)
53{
54 unsigned long pfn = pte_pfn(*pte);
55 int actual_node = early_pfn_to_nid(pfn);
56
57 if (actual_node != node)
58 printk(KERN_WARNING "[%lx-%lx] potential offnode "
59 "page_structs\n", start, end - 1);
60}
61
Andy Whitcroft29c71112007-10-16 01:24:14 -070062pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
Christoph Lameter8f6aac42007-10-16 01:24:13 -070063{
Andy Whitcroft29c71112007-10-16 01:24:14 -070064 pte_t *pte = pte_offset_kernel(pmd, addr);
65 if (pte_none(*pte)) {
66 pte_t entry;
67 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
68 if (!p)
69 return 0;
70 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
71 set_pte_at(&init_mm, addr, pte, entry);
72 }
73 return pte;
74}
75
76pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
77{
78 pmd_t *pmd = pmd_offset(pud, addr);
79 if (pmd_none(*pmd)) {
80 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
81 if (!p)
82 return 0;
83 pmd_populate_kernel(&init_mm, pmd, p);
84 }
85 return pmd;
86}
87
88pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
89{
90 pud_t *pud = pud_offset(pgd, addr);
91 if (pud_none(*pud)) {
92 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
93 if (!p)
94 return 0;
95 pud_populate(&init_mm, pud, p);
96 }
97 return pud;
98}
99
100pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
101{
102 pgd_t *pgd = pgd_offset_k(addr);
103 if (pgd_none(*pgd)) {
104 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
105 if (!p)
106 return 0;
107 pgd_populate(&init_mm, pgd, p);
108 }
109 return pgd;
110}
111
112int __meminit vmemmap_populate_basepages(struct page *start_page,
113 unsigned long size, int node)
114{
115 unsigned long addr = (unsigned long)start_page;
116 unsigned long end = (unsigned long)(start_page + size);
117 pgd_t *pgd;
118 pud_t *pud;
119 pmd_t *pmd;
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700120 pte_t *pte;
121
Andy Whitcroft29c71112007-10-16 01:24:14 -0700122 for (; addr < end; addr += PAGE_SIZE) {
123 pgd = vmemmap_pgd_populate(addr, node);
124 if (!pgd)
125 return -ENOMEM;
126 pud = vmemmap_pud_populate(pgd, addr, node);
127 if (!pud)
128 return -ENOMEM;
129 pmd = vmemmap_pmd_populate(pud, addr, node);
130 if (!pmd)
131 return -ENOMEM;
132 pte = vmemmap_pte_populate(pmd, addr, node);
133 if (!pte)
134 return -ENOMEM;
135 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
136 }
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700137
138 return 0;
139}
140
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700141struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid)
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700142{
143 struct page *map = pfn_to_page(pnum * PAGES_PER_SECTION);
144 int error = vmemmap_populate(map, PAGES_PER_SECTION, nid);
145 if (error)
146 return NULL;
147
148 return map;
149}