Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Virtual Memory Map support |
| 3 | * |
Christoph Lameter | cde5353 | 2008-07-04 09:59:22 -0700 | [diff] [blame] | 4 | * (C) 2007 sgi. Christoph Lameter. |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 5 | * |
| 6 | * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn, |
| 7 | * virt_to_page, page_address() to be implemented as a base offset |
| 8 | * calculation without memory access. |
| 9 | * |
| 10 | * However, virtual mappings need a page table and TLBs. Many Linux |
| 11 | * architectures already map their physical space using 1-1 mappings |
Uwe Kleine-König | b595076 | 2010-11-01 15:38:34 -0400 | [diff] [blame] | 12 | * via TLBs. For those arches the virtual memory map is essentially |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 13 | * for free if we use the same page size as the 1-1 mappings. In that |
| 14 | * case the overhead consists of a few additional pages that are |
| 15 | * allocated to create a view of memory for vmemmap. |
| 16 | * |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 17 | * The architecture is expected to provide a vmemmap_populate() function |
| 18 | * to instantiate the mapping. |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 19 | */ |
| 20 | #include <linux/mm.h> |
| 21 | #include <linux/mmzone.h> |
| 22 | #include <linux/bootmem.h> |
| 23 | #include <linux/highmem.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 24 | #include <linux/slab.h> |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 25 | #include <linux/spinlock.h> |
| 26 | #include <linux/vmalloc.h> |
Glauber de Oliveira Costa | 8bca44b | 2007-10-29 14:37:19 -0700 | [diff] [blame] | 27 | #include <linux/sched.h> |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 28 | #include <asm/dma.h> |
| 29 | #include <asm/pgalloc.h> |
| 30 | #include <asm/pgtable.h> |
| 31 | |
| 32 | /* |
| 33 | * Allocate a block of memory to be used to back the virtual memory map |
| 34 | * or to back the page tables that are used to create the mapping. |
| 35 | * Uses the main allocators if they are available, else bootmem. |
| 36 | */ |
KAMEZAWA Hiroyuki | e0dc3a5 | 2007-11-28 16:21:57 -0800 | [diff] [blame] | 37 | |
| 38 | static void * __init_refok __earlyonly_bootmem_alloc(int node, |
| 39 | unsigned long size, |
| 40 | unsigned long align, |
| 41 | unsigned long goal) |
| 42 | { |
Yinghai Lu | 0867721 | 2010-02-10 01:20:20 -0800 | [diff] [blame] | 43 | return __alloc_bootmem_node_high(NODE_DATA(node), size, align, goal); |
KAMEZAWA Hiroyuki | e0dc3a5 | 2007-11-28 16:21:57 -0800 | [diff] [blame] | 44 | } |
| 45 | |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 46 | static void *vmemmap_buf; |
| 47 | static void *vmemmap_buf_end; |
KAMEZAWA Hiroyuki | e0dc3a5 | 2007-11-28 16:21:57 -0800 | [diff] [blame] | 48 | |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 49 | void * __meminit vmemmap_alloc_block(unsigned long size, int node) |
| 50 | { |
| 51 | /* If the main allocator is up use that, fallback to bootmem. */ |
| 52 | if (slab_is_available()) { |
Shaohua Li | f52407c | 2009-09-21 17:01:19 -0700 | [diff] [blame] | 53 | struct page *page; |
| 54 | |
| 55 | if (node_state(node, N_HIGH_MEMORY)) |
| 56 | page = alloc_pages_node(node, |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 57 | GFP_KERNEL | __GFP_ZERO, get_order(size)); |
Shaohua Li | f52407c | 2009-09-21 17:01:19 -0700 | [diff] [blame] | 58 | else |
| 59 | page = alloc_pages(GFP_KERNEL | __GFP_ZERO, |
| 60 | get_order(size)); |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 61 | if (page) |
| 62 | return page_address(page); |
| 63 | return NULL; |
| 64 | } else |
KAMEZAWA Hiroyuki | e0dc3a5 | 2007-11-28 16:21:57 -0800 | [diff] [blame] | 65 | return __earlyonly_bootmem_alloc(node, size, size, |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 66 | __pa(MAX_DMA_ADDRESS)); |
| 67 | } |
| 68 | |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 69 | /* need to make sure size is all the same during early stage */ |
| 70 | void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node) |
| 71 | { |
| 72 | void *ptr; |
| 73 | |
| 74 | if (!vmemmap_buf) |
| 75 | return vmemmap_alloc_block(size, node); |
| 76 | |
| 77 | /* take the from buf */ |
| 78 | ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size); |
| 79 | if (ptr + size > vmemmap_buf_end) |
| 80 | return vmemmap_alloc_block(size, node); |
| 81 | |
| 82 | vmemmap_buf = ptr + size; |
| 83 | |
| 84 | return ptr; |
| 85 | } |
| 86 | |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 87 | void __meminit vmemmap_verify(pte_t *pte, int node, |
| 88 | unsigned long start, unsigned long end) |
| 89 | { |
| 90 | unsigned long pfn = pte_pfn(*pte); |
| 91 | int actual_node = early_pfn_to_nid(pfn); |
| 92 | |
David Rientjes | b41ad14 | 2008-11-06 12:53:31 -0800 | [diff] [blame] | 93 | if (node_distance(actual_node, node) > LOCAL_DISTANCE) |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 94 | printk(KERN_WARNING "[%lx-%lx] potential offnode " |
| 95 | "page_structs\n", start, end - 1); |
| 96 | } |
| 97 | |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 98 | pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 99 | { |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 100 | pte_t *pte = pte_offset_kernel(pmd, addr); |
| 101 | if (pte_none(*pte)) { |
| 102 | pte_t entry; |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 103 | void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node); |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 104 | if (!p) |
Al Viro | 9dce07f | 2008-03-29 03:07:28 +0000 | [diff] [blame] | 105 | return NULL; |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 106 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); |
| 107 | set_pte_at(&init_mm, addr, pte, entry); |
| 108 | } |
| 109 | return pte; |
| 110 | } |
| 111 | |
| 112 | pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) |
| 113 | { |
| 114 | pmd_t *pmd = pmd_offset(pud, addr); |
| 115 | if (pmd_none(*pmd)) { |
| 116 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); |
| 117 | if (!p) |
Al Viro | 9dce07f | 2008-03-29 03:07:28 +0000 | [diff] [blame] | 118 | return NULL; |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 119 | pmd_populate_kernel(&init_mm, pmd, p); |
| 120 | } |
| 121 | return pmd; |
| 122 | } |
| 123 | |
| 124 | pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node) |
| 125 | { |
| 126 | pud_t *pud = pud_offset(pgd, addr); |
| 127 | if (pud_none(*pud)) { |
| 128 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); |
| 129 | if (!p) |
Al Viro | 9dce07f | 2008-03-29 03:07:28 +0000 | [diff] [blame] | 130 | return NULL; |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 131 | pud_populate(&init_mm, pud, p); |
| 132 | } |
| 133 | return pud; |
| 134 | } |
| 135 | |
| 136 | pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) |
| 137 | { |
| 138 | pgd_t *pgd = pgd_offset_k(addr); |
| 139 | if (pgd_none(*pgd)) { |
| 140 | void *p = vmemmap_alloc_block(PAGE_SIZE, node); |
| 141 | if (!p) |
Al Viro | 9dce07f | 2008-03-29 03:07:28 +0000 | [diff] [blame] | 142 | return NULL; |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 143 | pgd_populate(&init_mm, pgd, p); |
| 144 | } |
| 145 | return pgd; |
| 146 | } |
| 147 | |
| 148 | int __meminit vmemmap_populate_basepages(struct page *start_page, |
| 149 | unsigned long size, int node) |
| 150 | { |
| 151 | unsigned long addr = (unsigned long)start_page; |
| 152 | unsigned long end = (unsigned long)(start_page + size); |
| 153 | pgd_t *pgd; |
| 154 | pud_t *pud; |
| 155 | pmd_t *pmd; |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 156 | pte_t *pte; |
| 157 | |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 158 | for (; addr < end; addr += PAGE_SIZE) { |
| 159 | pgd = vmemmap_pgd_populate(addr, node); |
| 160 | if (!pgd) |
| 161 | return -ENOMEM; |
| 162 | pud = vmemmap_pud_populate(pgd, addr, node); |
| 163 | if (!pud) |
| 164 | return -ENOMEM; |
| 165 | pmd = vmemmap_pmd_populate(pud, addr, node); |
| 166 | if (!pmd) |
| 167 | return -ENOMEM; |
| 168 | pte = vmemmap_pte_populate(pmd, addr, node); |
| 169 | if (!pte) |
| 170 | return -ENOMEM; |
| 171 | vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); |
| 172 | } |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 173 | |
| 174 | return 0; |
| 175 | } |
| 176 | |
Yasunori Goto | 98f3cfc | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 177 | struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid) |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 178 | { |
| 179 | struct page *map = pfn_to_page(pnum * PAGES_PER_SECTION); |
| 180 | int error = vmemmap_populate(map, PAGES_PER_SECTION, nid); |
| 181 | if (error) |
| 182 | return NULL; |
| 183 | |
| 184 | return map; |
| 185 | } |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 186 | |
| 187 | void __init sparse_mem_maps_populate_node(struct page **map_map, |
| 188 | unsigned long pnum_begin, |
| 189 | unsigned long pnum_end, |
| 190 | unsigned long map_count, int nodeid) |
| 191 | { |
| 192 | unsigned long pnum; |
| 193 | unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; |
| 194 | void *vmemmap_buf_start; |
| 195 | |
| 196 | size = ALIGN(size, PMD_SIZE); |
| 197 | vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count, |
| 198 | PMD_SIZE, __pa(MAX_DMA_ADDRESS)); |
| 199 | |
| 200 | if (vmemmap_buf_start) { |
| 201 | vmemmap_buf = vmemmap_buf_start; |
| 202 | vmemmap_buf_end = vmemmap_buf_start + size * map_count; |
| 203 | } |
| 204 | |
| 205 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { |
| 206 | struct mem_section *ms; |
| 207 | |
| 208 | if (!present_section_nr(pnum)) |
| 209 | continue; |
| 210 | |
| 211 | map_map[pnum] = sparse_mem_map_populate(pnum, nodeid); |
| 212 | if (map_map[pnum]) |
| 213 | continue; |
| 214 | ms = __nr_to_section(pnum); |
| 215 | printk(KERN_ERR "%s: sparsemem memory map backing failed " |
| 216 | "some memory will not be available.\n", __func__); |
| 217 | ms->section_mem_map = 0; |
| 218 | } |
| 219 | |
| 220 | if (vmemmap_buf_start) { |
| 221 | /* need to free left buf */ |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 222 | free_bootmem(__pa(vmemmap_buf), vmemmap_buf_end - vmemmap_buf); |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 223 | vmemmap_buf = NULL; |
| 224 | vmemmap_buf_end = NULL; |
| 225 | } |
| 226 | } |