Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Written by: Patricia Gaughen <gone@us.ibm.com>, IBM Corporation |
| 3 | * August 2002: added remote node KVA remap - Martin J. Bligh |
| 4 | * |
| 5 | * Copyright (C) 2002, IBM Corp. |
| 6 | * |
| 7 | * All rights reserved. |
| 8 | * |
| 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of the GNU General Public License as published by |
| 11 | * the Free Software Foundation; either version 2 of the License, or |
| 12 | * (at your option) any later version. |
| 13 | * |
| 14 | * This program is distributed in the hope that it will be useful, but |
| 15 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 16 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or |
| 17 | * NON INFRINGEMENT. See the GNU General Public License for more |
| 18 | * details. |
| 19 | * |
| 20 | * You should have received a copy of the GNU General Public License |
| 21 | * along with this program; if not, write to the Free Software |
| 22 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 23 | */ |
| 24 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <linux/mm.h> |
| 26 | #include <linux/bootmem.h> |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 27 | #include <linux/memblock.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #include <linux/mmzone.h> |
| 29 | #include <linux/highmem.h> |
| 30 | #include <linux/initrd.h> |
| 31 | #include <linux/nodemask.h> |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 32 | #include <linux/module.h> |
Eric W. Biederman | 1bc3b91 | 2005-06-25 14:58:01 -0700 | [diff] [blame] | 33 | #include <linux/kexec.h> |
Dave Hansen | 22a9835 | 2006-03-27 01:16:04 -0800 | [diff] [blame] | 34 | #include <linux/pfn.h> |
Linus Torvalds | 28aa483 | 2007-05-15 18:45:49 -0700 | [diff] [blame] | 35 | #include <linux/swap.h> |
Mel Gorman | 1b000a5 | 2008-01-30 13:33:25 +0100 | [diff] [blame] | 36 | #include <linux/acpi.h> |
Eric W. Biederman | 1bc3b91 | 2005-06-25 14:58:01 -0700 | [diff] [blame] | 37 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | #include <asm/e820.h> |
| 39 | #include <asm/setup.h> |
| 40 | #include <asm/mmzone.h> |
Alexey Starikovskiy | ce3fe6b | 2008-03-17 22:08:17 +0300 | [diff] [blame] | 41 | #include <asm/bios_ebda.h> |
Yinghai Lu | 287572c | 2008-06-01 21:06:31 -0700 | [diff] [blame] | 42 | #include <asm/proto.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | |
Ravikiran G Thirumalai | 6c231b7 | 2005-09-06 15:17:45 -0700 | [diff] [blame] | 44 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 45 | EXPORT_SYMBOL(node_data); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | |
| 47 | /* |
Adrian Bunk | d254c8f | 2006-06-30 18:29:51 +0200 | [diff] [blame] | 48 | * numa interface - we expect the numa architecture specific code to have |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | * populated the following initialisation. |
| 50 | * |
| 51 | * 1) node_online_map - the map of all nodes configured (online) in the system |
Andy Whitcroft | 05b79bd | 2005-06-23 00:07:57 -0700 | [diff] [blame] | 52 | * 2) node_start_pfn - the starting page frame number for a node |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | * 3) node_end_pfn - the ending page fram number for a node |
| 54 | */ |
Ravikiran G Thirumalai | 6c231b7 | 2005-09-06 15:17:45 -0700 | [diff] [blame] | 55 | unsigned long node_start_pfn[MAX_NUMNODES] __read_mostly; |
| 56 | unsigned long node_end_pfn[MAX_NUMNODES] __read_mostly; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | |
Andy Whitcroft | 05b79bd | 2005-06-23 00:07:57 -0700 | [diff] [blame] | 58 | |
| 59 | #ifdef CONFIG_DISCONTIGMEM |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | /* |
Andy Whitcroft | 05b79bd | 2005-06-23 00:07:57 -0700 | [diff] [blame] | 61 | * 4) physnode_map - the mapping between a pfn and owning node |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | * physnode_map keeps track of the physical memory layout of a generic |
Yinghai Lu | ba924c8 | 2008-05-31 22:51:51 -0700 | [diff] [blame] | 63 | * numa node on a 64Mb break (each element of the array will |
| 64 | * represent 64Mb of memory and will be marked by the node id. so, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | * if the first gig is on node 0, and the second gig is on node 1 |
| 66 | * physnode_map will contain: |
| 67 | * |
Yinghai Lu | ba924c8 | 2008-05-31 22:51:51 -0700 | [diff] [blame] | 68 | * physnode_map[0-15] = 0; |
| 69 | * physnode_map[16-31] = 1; |
| 70 | * physnode_map[32- ] = -1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | */ |
Ravikiran G Thirumalai | 6c231b7 | 2005-09-06 15:17:45 -0700 | [diff] [blame] | 72 | s8 physnode_map[MAX_ELEMENTS] __read_mostly = { [0 ... (MAX_ELEMENTS - 1)] = -1}; |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 73 | EXPORT_SYMBOL(physnode_map); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | |
| 75 | void memory_present(int nid, unsigned long start, unsigned long end) |
| 76 | { |
| 77 | unsigned long pfn; |
| 78 | |
Yinghai Lu | c094345 | 2008-06-23 16:41:30 -0700 | [diff] [blame] | 79 | printk(KERN_INFO "Node: %d, start_pfn: %lx, end_pfn: %lx\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | nid, start, end); |
| 81 | printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid); |
| 82 | printk(KERN_DEBUG " "); |
| 83 | for (pfn = start; pfn < end; pfn += PAGES_PER_ELEMENT) { |
| 84 | physnode_map[pfn / PAGES_PER_ELEMENT] = nid; |
Yinghai Lu | c094345 | 2008-06-23 16:41:30 -0700 | [diff] [blame] | 85 | printk(KERN_CONT "%lx ", pfn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | } |
Yinghai Lu | ba924c8 | 2008-05-31 22:51:51 -0700 | [diff] [blame] | 87 | printk(KERN_CONT "\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | } |
| 89 | |
| 90 | unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn, |
| 91 | unsigned long end_pfn) |
| 92 | { |
| 93 | unsigned long nr_pages = end_pfn - start_pfn; |
| 94 | |
| 95 | if (!nr_pages) |
| 96 | return 0; |
| 97 | |
| 98 | return (nr_pages + 1) * sizeof(struct page); |
| 99 | } |
Andy Whitcroft | 05b79bd | 2005-06-23 00:07:57 -0700 | [diff] [blame] | 100 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | |
| 102 | extern unsigned long find_max_low_pfn(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | extern unsigned long highend_pfn, highstart_pfn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | |
| 105 | #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE) |
| 106 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | unsigned long node_remap_size[MAX_NUMNODES]; |
Adrian Bunk | 59659f1 | 2007-10-17 18:04:36 +0200 | [diff] [blame] | 108 | static void *node_remap_start_vaddr[MAX_NUMNODES]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); |
| 110 | |
keith mannthey | 9102330 | 2006-09-25 23:31:03 -0700 | [diff] [blame] | 111 | static unsigned long kva_start_pfn; |
| 112 | static unsigned long kva_pages; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | /* |
| 114 | * FLAT - support for basic PC memory model with discontig enabled, essentially |
| 115 | * a single node with all available processors in it with a flat |
| 116 | * memory map. |
| 117 | */ |
| 118 | int __init get_memcfg_numa_flat(void) |
| 119 | { |
Yinghai Lu | c094345 | 2008-06-23 16:41:30 -0700 | [diff] [blame] | 120 | printk(KERN_DEBUG "NUMA - single node, flat memory mode\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | node_start_pfn[0] = 0; |
| 123 | node_end_pfn[0] = max_pfn; |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 124 | memblock_x86_register_active_regions(0, 0, max_pfn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | memory_present(0, 0, max_pfn); |
Yinghai Lu | b66cd72 | 2008-05-31 22:53:47 -0700 | [diff] [blame] | 126 | node_remap_size[0] = node_memmap_size_bytes(0, 0, max_pfn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | |
| 128 | /* Indicate there is one node available. */ |
| 129 | nodes_clear(node_online_map); |
| 130 | node_set_online(0); |
| 131 | return 1; |
| 132 | } |
| 133 | |
| 134 | /* |
| 135 | * Find the highest page frame number we have available for the node |
| 136 | */ |
Ingo Molnar | fa5c463 | 2008-04-16 02:29:42 +0200 | [diff] [blame] | 137 | static void __init propagate_e820_map_node(int nid) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | { |
| 139 | if (node_end_pfn[nid] > max_pfn) |
| 140 | node_end_pfn[nid] = max_pfn; |
| 141 | /* |
| 142 | * if a user has given mem=XXXX, then we need to make sure |
| 143 | * that the node _starts_ before that, too, not just ends |
| 144 | */ |
| 145 | if (node_start_pfn[nid] > max_pfn) |
| 146 | node_start_pfn[nid] = max_pfn; |
Eric Sesterhenn | 8d8f3cb | 2006-10-03 23:34:58 +0200 | [diff] [blame] | 147 | BUG_ON(node_start_pfn[nid] > node_end_pfn[nid]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | } |
| 149 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | /* |
| 151 | * Allocate memory for the pg_data_t for this node via a crude pre-bootmem |
| 152 | * method. For node zero take this from the bottom of memory, for |
| 153 | * subsequent nodes place them at node_remap_start_vaddr which contains |
| 154 | * node local data in physically node local memory. See setup_memory() |
| 155 | * for details. |
| 156 | */ |
| 157 | static void __init allocate_pgdat(int nid) |
| 158 | { |
Yinghai Lu | 996cf44 | 2008-06-30 18:34:58 -0700 | [diff] [blame] | 159 | char buf[16]; |
| 160 | |
| 161 | if (node_has_online_mem(nid) && node_remap_start_vaddr[nid]) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | NODE_DATA(nid) = (pg_data_t *)node_remap_start_vaddr[nid]; |
| 163 | else { |
Yinghai Lu | 1638729 | 2008-05-29 12:57:22 -0700 | [diff] [blame] | 164 | unsigned long pgdat_phys; |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 165 | pgdat_phys = memblock_find_in_range(min_low_pfn<<PAGE_SHIFT, |
Yinghai Lu | 996cf44 | 2008-06-30 18:34:58 -0700 | [diff] [blame] | 166 | max_pfn_mapped<<PAGE_SHIFT, |
Yinghai Lu | 6af61a7 | 2008-06-01 23:53:50 -0700 | [diff] [blame] | 167 | sizeof(pg_data_t), |
Yinghai Lu | 1638729 | 2008-05-29 12:57:22 -0700 | [diff] [blame] | 168 | PAGE_SIZE); |
| 169 | NODE_DATA(nid) = (pg_data_t *)(pfn_to_kaddr(pgdat_phys>>PAGE_SHIFT)); |
Yinghai Lu | 996cf44 | 2008-06-30 18:34:58 -0700 | [diff] [blame] | 170 | memset(buf, 0, sizeof(buf)); |
| 171 | sprintf(buf, "NODE_DATA %d", nid); |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 172 | memblock_x86_reserve_range(pgdat_phys, pgdat_phys + sizeof(pg_data_t), buf); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | } |
Yinghai Lu | e8c27ac | 2008-06-01 13:15:22 -0700 | [diff] [blame] | 174 | printk(KERN_DEBUG "allocate_pgdat: node %d NODE_DATA %08lx\n", |
| 175 | nid, (unsigned long)NODE_DATA(nid)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | } |
| 177 | |
Mel Gorman | 1b000a5 | 2008-01-30 13:33:25 +0100 | [diff] [blame] | 178 | /* |
Andy Whitcroft | b9ada42 | 2008-05-20 11:01:08 +0100 | [diff] [blame] | 179 | * In the DISCONTIGMEM and SPARSEMEM memory model, a portion of the kernel |
| 180 | * virtual address space (KVA) is reserved and portions of nodes are mapped |
| 181 | * using it. This is to allow node-local memory to be allocated for |
| 182 | * structures that would normally require ZONE_NORMAL. The memory is |
| 183 | * allocated with alloc_remap() and callers should be prepared to allocate |
| 184 | * from the bootmem allocator instead. |
Mel Gorman | 1b000a5 | 2008-01-30 13:33:25 +0100 | [diff] [blame] | 185 | */ |
| 186 | static unsigned long node_remap_start_pfn[MAX_NUMNODES]; |
| 187 | static void *node_remap_end_vaddr[MAX_NUMNODES]; |
| 188 | static void *node_remap_alloc_vaddr[MAX_NUMNODES]; |
| 189 | static unsigned long node_remap_offset[MAX_NUMNODES]; |
| 190 | |
Dave Hansen | 6f167ec | 2005-06-23 00:07:39 -0700 | [diff] [blame] | 191 | void *alloc_remap(int nid, unsigned long size) |
| 192 | { |
| 193 | void *allocation = node_remap_alloc_vaddr[nid]; |
| 194 | |
| 195 | size = ALIGN(size, L1_CACHE_BYTES); |
| 196 | |
| 197 | if (!allocation || (allocation + size) >= node_remap_end_vaddr[nid]) |
Hannes Eder | 2366c29 | 2009-02-22 01:01:13 +0100 | [diff] [blame] | 198 | return NULL; |
Dave Hansen | 6f167ec | 2005-06-23 00:07:39 -0700 | [diff] [blame] | 199 | |
| 200 | node_remap_alloc_vaddr[nid] += size; |
| 201 | memset(allocation, 0, size); |
| 202 | |
| 203 | return allocation; |
| 204 | } |
| 205 | |
Yinghai Lu | 3a58a2a6 | 2008-06-24 12:19:41 -0700 | [diff] [blame] | 206 | static void __init remap_numa_kva(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | { |
| 208 | void *vaddr; |
| 209 | unsigned long pfn; |
| 210 | int node; |
| 211 | |
| 212 | for_each_online_node(node) { |
Yinghai Lu | e8c27ac | 2008-06-01 13:15:22 -0700 | [diff] [blame] | 213 | printk(KERN_DEBUG "remap_numa_kva: node %d\n", node); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | for (pfn=0; pfn < node_remap_size[node]; pfn += PTRS_PER_PTE) { |
| 215 | vaddr = node_remap_start_vaddr[node]+(pfn<<PAGE_SHIFT); |
Yinghai Lu | e8c27ac | 2008-06-01 13:15:22 -0700 | [diff] [blame] | 216 | printk(KERN_DEBUG "remap_numa_kva: %08lx to pfn %08lx\n", |
| 217 | (unsigned long)vaddr, |
| 218 | node_remap_start_pfn[node] + pfn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | set_pmd_pfn((ulong) vaddr, |
| 220 | node_remap_start_pfn[node] + pfn, |
| 221 | PAGE_KERNEL_LARGE); |
| 222 | } |
| 223 | } |
| 224 | } |
| 225 | |
Rafael J. Wysocki | 97a70e5 | 2008-11-12 23:22:35 +0100 | [diff] [blame] | 226 | #ifdef CONFIG_HIBERNATION |
| 227 | /** |
| 228 | * resume_map_numa_kva - add KVA mapping to the temporary page tables created |
| 229 | * during resume from hibernation |
| 230 | * @pgd_base - temporary resume page directory |
| 231 | */ |
| 232 | void resume_map_numa_kva(pgd_t *pgd_base) |
| 233 | { |
| 234 | int node; |
| 235 | |
| 236 | for_each_online_node(node) { |
| 237 | unsigned long start_va, start_pfn, size, pfn; |
| 238 | |
| 239 | start_va = (unsigned long)node_remap_start_vaddr[node]; |
| 240 | start_pfn = node_remap_start_pfn[node]; |
| 241 | size = node_remap_size[node]; |
| 242 | |
Harvey Harrison | 9b4778f | 2009-01-07 14:42:41 -0800 | [diff] [blame] | 243 | printk(KERN_DEBUG "%s: node %d\n", __func__, node); |
Rafael J. Wysocki | 97a70e5 | 2008-11-12 23:22:35 +0100 | [diff] [blame] | 244 | |
| 245 | for (pfn = 0; pfn < size; pfn += PTRS_PER_PTE) { |
| 246 | unsigned long vaddr = start_va + (pfn << PAGE_SHIFT); |
| 247 | pgd_t *pgd = pgd_base + pgd_index(vaddr); |
| 248 | pud_t *pud = pud_offset(pgd, vaddr); |
| 249 | pmd_t *pmd = pmd_offset(pud, vaddr); |
| 250 | |
| 251 | set_pmd(pmd, pfn_pmd(start_pfn + pfn, |
| 252 | PAGE_KERNEL_LARGE_EXEC)); |
| 253 | |
| 254 | printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n", |
Harvey Harrison | 9b4778f | 2009-01-07 14:42:41 -0800 | [diff] [blame] | 255 | __func__, vaddr, start_pfn + pfn); |
Rafael J. Wysocki | 97a70e5 | 2008-11-12 23:22:35 +0100 | [diff] [blame] | 256 | } |
| 257 | } |
| 258 | } |
| 259 | #endif |
| 260 | |
Jaswinder Singh Rajput | a81b631 | 2009-04-17 23:31:20 +0530 | [diff] [blame] | 261 | static __init unsigned long calculate_numa_remap_pages(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 | { |
| 263 | int nid; |
| 264 | unsigned long size, reserve_pages = 0; |
| 265 | |
| 266 | for_each_online_node(nid) { |
Yinghai Lu | cc1050b | 2008-06-13 19:08:52 -0700 | [diff] [blame] | 267 | u64 node_kva_target; |
| 268 | u64 node_kva_final; |
Mel Gorman | 4cfee88 | 2006-09-27 01:49:51 -0700 | [diff] [blame] | 269 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | /* |
| 271 | * The acpi/srat node info can show hot-add memroy zones |
| 272 | * where memory could be added but not currently present. |
| 273 | */ |
Yinghai Lu | c094345 | 2008-06-23 16:41:30 -0700 | [diff] [blame] | 274 | printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n", |
Yinghai Lu | 9043f00 | 2008-06-06 18:53:33 -0700 | [diff] [blame] | 275 | nid, node_start_pfn[nid], node_end_pfn[nid]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | if (node_start_pfn[nid] > max_pfn) |
| 277 | continue; |
Yinghai Lu | 9043f00 | 2008-06-06 18:53:33 -0700 | [diff] [blame] | 278 | if (!node_end_pfn[nid]) |
| 279 | continue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | if (node_end_pfn[nid] > max_pfn) |
| 281 | node_end_pfn[nid] = max_pfn; |
| 282 | |
| 283 | /* ensure the remap includes space for the pgdat. */ |
| 284 | size = node_remap_size[nid] + sizeof(pg_data_t); |
| 285 | |
| 286 | /* convert size to large (pmd size) pages, rounding up */ |
| 287 | size = (size + LARGE_PAGE_BYTES - 1) / LARGE_PAGE_BYTES; |
| 288 | /* now the roundup is correct, convert to PAGE_SIZE pages */ |
| 289 | size = size * PTRS_PER_PTE; |
Dave Hansen | 5b505b9 | 2005-06-23 00:07:41 -0700 | [diff] [blame] | 290 | |
Yinghai Lu | cc1050b | 2008-06-13 19:08:52 -0700 | [diff] [blame] | 291 | node_kva_target = round_down(node_end_pfn[nid] - size, |
Yinghai Lu | 9043f00 | 2008-06-06 18:53:33 -0700 | [diff] [blame] | 292 | PTRS_PER_PTE); |
Yinghai Lu | cc1050b | 2008-06-13 19:08:52 -0700 | [diff] [blame] | 293 | node_kva_target <<= PAGE_SHIFT; |
Yinghai Lu | 9043f00 | 2008-06-06 18:53:33 -0700 | [diff] [blame] | 294 | do { |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 295 | node_kva_final = memblock_find_in_range(node_kva_target, |
Yinghai Lu | 9043f00 | 2008-06-06 18:53:33 -0700 | [diff] [blame] | 296 | ((u64)node_end_pfn[nid])<<PAGE_SHIFT, |
| 297 | ((u64)size)<<PAGE_SHIFT, |
| 298 | LARGE_PAGE_BYTES); |
Yinghai Lu | cc1050b | 2008-06-13 19:08:52 -0700 | [diff] [blame] | 299 | node_kva_target -= LARGE_PAGE_BYTES; |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 300 | } while (node_kva_final == MEMBLOCK_ERROR && |
Yinghai Lu | cc1050b | 2008-06-13 19:08:52 -0700 | [diff] [blame] | 301 | (node_kva_target>>PAGE_SHIFT) > (node_start_pfn[nid])); |
Dave Hansen | 5b505b9 | 2005-06-23 00:07:41 -0700 | [diff] [blame] | 302 | |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 303 | if (node_kva_final == MEMBLOCK_ERROR) |
Yinghai Lu | 9043f00 | 2008-06-06 18:53:33 -0700 | [diff] [blame] | 304 | panic("Can not get kva ram\n"); |
Dave Hansen | 5b505b9 | 2005-06-23 00:07:41 -0700 | [diff] [blame] | 305 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | node_remap_size[nid] = size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | node_remap_offset[nid] = reserve_pages; |
Dave Hansen | 6f167ec | 2005-06-23 00:07:39 -0700 | [diff] [blame] | 308 | reserve_pages += size; |
Yinghai Lu | c094345 | 2008-06-23 16:41:30 -0700 | [diff] [blame] | 309 | printk(KERN_DEBUG "Reserving %ld pages of KVA for lmem_map of" |
| 310 | " node %d at %llx\n", |
Yinghai Lu | cc1050b | 2008-06-13 19:08:52 -0700 | [diff] [blame] | 311 | size, nid, node_kva_final>>PAGE_SHIFT); |
Ravikiran G Thirumalai | 4b0271e | 2005-08-07 09:42:50 -0700 | [diff] [blame] | 312 | |
Yinghai Lu | 9043f00 | 2008-06-06 18:53:33 -0700 | [diff] [blame] | 313 | /* |
| 314 | * prevent kva address below max_low_pfn want it on system |
| 315 | * with less memory later. |
| 316 | * layout will be: KVA address , KVA RAM |
Yinghai Lu | cc1050b | 2008-06-13 19:08:52 -0700 | [diff] [blame] | 317 | * |
| 318 | * we are supposed to only record the one less then max_low_pfn |
| 319 | * but we could have some hole in high memory, and it will only |
| 320 | * check page_is_ram(pfn) && !page_is_reserved_early(pfn) to decide |
| 321 | * to use it as free. |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 322 | * So memblock_x86_reserve_range here, hope we don't run out of that array |
Yinghai Lu | 9043f00 | 2008-06-06 18:53:33 -0700 | [diff] [blame] | 323 | */ |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 324 | memblock_x86_reserve_range(node_kva_final, |
Yinghai Lu | cc1050b | 2008-06-13 19:08:52 -0700 | [diff] [blame] | 325 | node_kva_final+(((u64)size)<<PAGE_SHIFT), |
| 326 | "KVA RAM"); |
Ravikiran G Thirumalai | 4b0271e | 2005-08-07 09:42:50 -0700 | [diff] [blame] | 327 | |
Yinghai Lu | cc1050b | 2008-06-13 19:08:52 -0700 | [diff] [blame] | 328 | node_remap_start_pfn[nid] = node_kva_final>>PAGE_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | } |
Yinghai Lu | c094345 | 2008-06-23 16:41:30 -0700 | [diff] [blame] | 330 | printk(KERN_INFO "Reserving total of %lx pages for numa KVA remap\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | reserve_pages); |
| 332 | return reserve_pages; |
| 333 | } |
| 334 | |
Mel Gorman | 1b000a5 | 2008-01-30 13:33:25 +0100 | [diff] [blame] | 335 | static void init_remap_allocator(int nid) |
| 336 | { |
| 337 | node_remap_start_vaddr[nid] = pfn_to_kaddr( |
| 338 | kva_start_pfn + node_remap_offset[nid]); |
| 339 | node_remap_end_vaddr[nid] = node_remap_start_vaddr[nid] + |
| 340 | (node_remap_size[nid] * PAGE_SIZE); |
| 341 | node_remap_alloc_vaddr[nid] = node_remap_start_vaddr[nid] + |
| 342 | ALIGN(sizeof(pg_data_t), PAGE_SIZE); |
| 343 | |
Yinghai Lu | c094345 | 2008-06-23 16:41:30 -0700 | [diff] [blame] | 344 | printk(KERN_DEBUG "node %d will remap to vaddr %08lx - %08lx\n", nid, |
Mel Gorman | 1b000a5 | 2008-01-30 13:33:25 +0100 | [diff] [blame] | 345 | (ulong) node_remap_start_vaddr[nid], |
Yinghai Lu | e8c27ac | 2008-06-01 13:15:22 -0700 | [diff] [blame] | 346 | (ulong) node_remap_end_vaddr[nid]); |
Mel Gorman | 1b000a5 | 2008-01-30 13:33:25 +0100 | [diff] [blame] | 347 | } |
Mel Gorman | 1b000a5 | 2008-01-30 13:33:25 +0100 | [diff] [blame] | 348 | |
David Rientjes | 8ee2deb | 2009-09-25 15:20:00 -0700 | [diff] [blame] | 349 | void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, |
| 350 | int acpi, int k8) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 351 | { |
| 352 | int nid; |
Yinghai Lu | 84b56fa | 2008-06-03 19:32:30 -0700 | [diff] [blame] | 353 | long kva_target_pfn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 | |
| 355 | /* |
| 356 | * When mapping a NUMA machine we allocate the node_mem_map arrays |
| 357 | * from node local memory. They are then mapped directly into KVA |
| 358 | * between zone normal and vmalloc space. Calculate the size of |
Simon Arlott | 27b46d7 | 2007-10-20 01:13:56 +0200 | [diff] [blame] | 359 | * this space and use it to adjust the boundary between ZONE_NORMAL |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | * and ZONE_HIGHMEM. |
| 361 | */ |
Yinghai Lu | 7b2a0a6 | 2008-06-03 19:35:04 -0700 | [diff] [blame] | 362 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | get_memcfg_numa(); |
| 364 | |
Joerg Roedel | 17f3ab7 | 2008-07-25 16:48:59 +0200 | [diff] [blame] | 365 | kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | |
Yinghai Lu | 84b56fa | 2008-06-03 19:32:30 -0700 | [diff] [blame] | 367 | kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE); |
| 368 | do { |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 369 | kva_start_pfn = memblock_find_in_range(kva_target_pfn<<PAGE_SHIFT, |
Yinghai Lu | 84b56fa | 2008-06-03 19:32:30 -0700 | [diff] [blame] | 370 | max_low_pfn<<PAGE_SHIFT, |
| 371 | kva_pages<<PAGE_SHIFT, |
| 372 | PTRS_PER_PTE<<PAGE_SHIFT) >> PAGE_SHIFT; |
| 373 | kva_target_pfn -= PTRS_PER_PTE; |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 374 | } while (kva_start_pfn == MEMBLOCK_ERROR && kva_target_pfn > min_low_pfn); |
keith mannthey | 9102330 | 2006-09-25 23:31:03 -0700 | [diff] [blame] | 375 | |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 376 | if (kva_start_pfn == MEMBLOCK_ERROR) |
Yinghai Lu | 84b56fa | 2008-06-03 19:32:30 -0700 | [diff] [blame] | 377 | panic("Can not get kva space\n"); |
Mel Gorman | bac4894 | 2008-01-30 13:32:54 +0100 | [diff] [blame] | 378 | |
Yinghai Lu | c094345 | 2008-06-23 16:41:30 -0700 | [diff] [blame] | 379 | printk(KERN_INFO "kva_start_pfn ~ %lx max_low_pfn ~ %lx\n", |
keith mannthey | 9102330 | 2006-09-25 23:31:03 -0700 | [diff] [blame] | 380 | kva_start_pfn, max_low_pfn); |
Yinghai Lu | c094345 | 2008-06-23 16:41:30 -0700 | [diff] [blame] | 381 | printk(KERN_INFO "max_pfn = %lx\n", max_pfn); |
Yinghai Lu | a548128 | 2008-05-29 12:58:37 -0700 | [diff] [blame] | 382 | |
| 383 | /* avoid clash with initrd */ |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 384 | memblock_x86_reserve_range(kva_start_pfn<<PAGE_SHIFT, |
Yinghai Lu | a548128 | 2008-05-29 12:58:37 -0700 | [diff] [blame] | 385 | (kva_start_pfn + kva_pages)<<PAGE_SHIFT, |
| 386 | "KVA PG"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | #ifdef CONFIG_HIGHMEM |
| 388 | highstart_pfn = highend_pfn = max_pfn; |
Yinghai Lu | 2ec65f8 | 2008-06-23 03:05:30 -0700 | [diff] [blame] | 389 | if (max_pfn > max_low_pfn) |
| 390 | highstart_pfn = max_low_pfn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", |
| 392 | pages_to_mb(highend_pfn - highstart_pfn)); |
Jan Beulich | ba9c231 | 2006-09-26 10:52:31 +0200 | [diff] [blame] | 393 | num_physpages = highend_pfn; |
| 394 | high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; |
| 395 | #else |
Yinghai Lu | 2ec65f8 | 2008-06-23 03:05:30 -0700 | [diff] [blame] | 396 | num_physpages = max_low_pfn; |
| 397 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 | #endif |
| 399 | printk(KERN_NOTICE "%ldMB LOWMEM available.\n", |
Yinghai Lu | 2ec65f8 | 2008-06-23 03:05:30 -0700 | [diff] [blame] | 400 | pages_to_mb(max_low_pfn)); |
Yinghai Lu | c094345 | 2008-06-23 16:41:30 -0700 | [diff] [blame] | 401 | printk(KERN_DEBUG "max_low_pfn = %lx, highstart_pfn = %lx\n", |
| 402 | max_low_pfn, highstart_pfn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 403 | |
Yinghai Lu | c094345 | 2008-06-23 16:41:30 -0700 | [diff] [blame] | 404 | printk(KERN_DEBUG "Low memory ends at vaddr %08lx\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 405 | (ulong) pfn_to_kaddr(max_low_pfn)); |
| 406 | for_each_online_node(nid) { |
Mel Gorman | 1b000a5 | 2008-01-30 13:33:25 +0100 | [diff] [blame] | 407 | init_remap_allocator(nid); |
Dave Hansen | 6f167ec | 2005-06-23 00:07:39 -0700 | [diff] [blame] | 408 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 409 | allocate_pgdat(nid); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | } |
Yinghai Lu | 3a58a2a6 | 2008-06-24 12:19:41 -0700 | [diff] [blame] | 411 | remap_numa_kva(); |
| 412 | |
Yinghai Lu | c094345 | 2008-06-23 16:41:30 -0700 | [diff] [blame] | 413 | printk(KERN_DEBUG "High memory starts at vaddr %08lx\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 414 | (ulong) pfn_to_kaddr(highstart_pfn)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | for_each_online_node(nid) |
Ingo Molnar | fa5c463 | 2008-04-16 02:29:42 +0200 | [diff] [blame] | 416 | propagate_e820_map_node(nid); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | |
Yinghai Lu | a71edd1 | 2009-03-04 01:22:35 -0800 | [diff] [blame] | 418 | for_each_online_node(nid) { |
Yinghai Lu | 3a58a2a6 | 2008-06-24 12:19:41 -0700 | [diff] [blame] | 419 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); |
Yinghai Lu | 59be5a8 | 2010-02-10 01:20:28 -0800 | [diff] [blame] | 420 | NODE_DATA(nid)->node_id = nid; |
Yinghai Lu | a71edd1 | 2009-03-04 01:22:35 -0800 | [diff] [blame] | 421 | } |
Yinghai Lu | 3a58a2a6 | 2008-06-24 12:19:41 -0700 | [diff] [blame] | 422 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 | setup_bootmem_allocator(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | } |
| 425 | |
Yasunori Goto | 7c7e942 | 2006-12-22 01:11:13 -0800 | [diff] [blame] | 426 | #ifdef CONFIG_MEMORY_HOTPLUG |
Adrian Bunk | fb8c177 | 2007-10-24 18:24:47 +0200 | [diff] [blame] | 427 | static int paddr_to_nid(u64 addr) |
Yasunori Goto | 7c7e942 | 2006-12-22 01:11:13 -0800 | [diff] [blame] | 428 | { |
| 429 | int nid; |
| 430 | unsigned long pfn = PFN_DOWN(addr); |
| 431 | |
| 432 | for_each_node(nid) |
| 433 | if (node_start_pfn[nid] <= pfn && |
| 434 | pfn < node_end_pfn[nid]) |
| 435 | return nid; |
| 436 | |
| 437 | return -1; |
| 438 | } |
| 439 | |
| 440 | /* |
| 441 | * This function is used to ask node id BEFORE memmap and mem_section's |
| 442 | * initialization (pfn_to_nid() can't be used yet). |
| 443 | * If _PXM is not defined on ACPI's DSDT, node id must be found by this. |
| 444 | */ |
| 445 | int memory_add_physaddr_to_nid(u64 addr) |
| 446 | { |
| 447 | int nid = paddr_to_nid(addr); |
| 448 | return (nid >= 0) ? nid : 0; |
| 449 | } |
| 450 | |
| 451 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); |
| 452 | #endif |
Ingo Molnar | 2772f54 | 2008-06-03 10:09:45 +0200 | [diff] [blame] | 453 | |