Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Generic VM initialization for x86-64 NUMA setups. |
| 3 | * Copyright 2002,2003 Andi Kleen, SuSE Labs. |
| 4 | */ |
| 5 | #include <linux/kernel.h> |
| 6 | #include <linux/mm.h> |
| 7 | #include <linux/string.h> |
| 8 | #include <linux/init.h> |
| 9 | #include <linux/bootmem.h> |
| 10 | #include <linux/mmzone.h> |
| 11 | #include <linux/ctype.h> |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/nodemask.h> |
| 14 | |
| 15 | #include <asm/e820.h> |
| 16 | #include <asm/proto.h> |
| 17 | #include <asm/dma.h> |
| 18 | #include <asm/numa.h> |
| 19 | #include <asm/acpi.h> |
| 20 | |
| 21 | #ifndef Dprintk |
| 22 | #define Dprintk(x...) |
| 23 | #endif |
| 24 | |
Ravikiran G Thirumalai | 6c231b7 | 2005-09-06 15:17:45 -0700 | [diff] [blame] | 25 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | bootmem_data_t plat_node_bdata[MAX_NUMNODES]; |
| 27 | |
Eric Dumazet | dcf36bf | 2006-03-25 16:31:46 +0100 | [diff] [blame] | 28 | struct memnode memnode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | |
Andi Kleen | 3f098c2 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 30 | unsigned char cpu_to_node[NR_CPUS] __read_mostly = { |
| 31 | [0 ... NR_CPUS-1] = NUMA_NO_NODE |
Andi Kleen | 0b07e98 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 32 | }; |
Andi Kleen | 3f098c2 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 33 | unsigned char apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { |
| 34 | [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE |
| 35 | }; |
| 36 | cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | |
| 38 | int numa_off __initdata; |
Amul Shah | 076422d2 | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 39 | unsigned long __initdata nodemap_addr; |
| 40 | unsigned long __initdata nodemap_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | |
Eric Dumazet | 529a340 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 42 | |
| 43 | /* |
| 44 | * Given a shift value, try to populate memnodemap[] |
| 45 | * Returns : |
| 46 | * 1 if OK |
| 47 | * 0 if memnodmap[] too small (of shift too small) |
| 48 | * -1 if node overlap or lost ram (shift too big) |
| 49 | */ |
Andi Kleen | d18ff47 | 2006-01-11 22:44:30 +0100 | [diff] [blame] | 50 | static int __init |
Andi Kleen | abe059e | 2006-03-25 16:29:12 +0100 | [diff] [blame] | 51 | populate_memnodemap(const struct bootnode *nodes, int numnodes, int shift) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | { |
| 53 | int i; |
Eric Dumazet | 529a340 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 54 | int res = -1; |
| 55 | unsigned long addr, end; |
Keith Mannthey | b684664 | 2005-07-28 21:15:38 -0700 | [diff] [blame] | 56 | |
Amul Shah | 076422d2 | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 57 | memset(memnodemap, 0xff, memnodemapsize); |
Eric Dumazet | 529a340 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 58 | for (i = 0; i < numnodes; i++) { |
| 59 | addr = nodes[i].start; |
| 60 | end = nodes[i].end; |
| 61 | if (addr >= end) |
| 62 | continue; |
Amul Shah | 076422d2 | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 63 | if ((end >> shift) >= memnodemapsize) |
Eric Dumazet | 529a340 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 64 | return 0; |
| 65 | do { |
| 66 | if (memnodemap[addr >> shift] != 0xff) |
| 67 | return -1; |
| 68 | memnodemap[addr >> shift] = i; |
Amul Shah | 076422d2 | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 69 | addr += (1UL << shift); |
Eric Dumazet | 529a340 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 70 | } while (addr < end); |
| 71 | res = 1; |
| 72 | } |
| 73 | return res; |
| 74 | } |
| 75 | |
Amul Shah | 076422d2 | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 76 | static int __init allocate_cachealigned_memnodemap(void) |
| 77 | { |
| 78 | unsigned long pad, pad_addr; |
| 79 | |
| 80 | memnodemap = memnode.embedded_map; |
Amul Shah | 5441392 | 2007-02-13 13:26:20 +0100 | [diff] [blame] | 81 | if (memnodemapsize <= 48) |
Amul Shah | 076422d2 | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 82 | return 0; |
Amul Shah | 076422d2 | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 83 | |
| 84 | pad = L1_CACHE_BYTES - 1; |
| 85 | pad_addr = 0x8000; |
| 86 | nodemap_size = pad + memnodemapsize; |
| 87 | nodemap_addr = find_e820_area(pad_addr, end_pfn<<PAGE_SHIFT, |
| 88 | nodemap_size); |
| 89 | if (nodemap_addr == -1UL) { |
| 90 | printk(KERN_ERR |
| 91 | "NUMA: Unable to allocate Memory to Node hash map\n"); |
| 92 | nodemap_addr = nodemap_size = 0; |
| 93 | return -1; |
| 94 | } |
| 95 | pad_addr = (nodemap_addr + pad) & ~pad; |
| 96 | memnodemap = phys_to_virt(pad_addr); |
| 97 | |
| 98 | printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n", |
| 99 | nodemap_addr, nodemap_addr + nodemap_size); |
| 100 | return 0; |
| 101 | } |
| 102 | |
| 103 | /* |
| 104 | * The LSB of all start and end addresses in the node map is the value of the |
| 105 | * maximum possible shift. |
| 106 | */ |
| 107 | static int __init |
| 108 | extract_lsb_from_nodes (const struct bootnode *nodes, int numnodes) |
| 109 | { |
Amul Shah | 5441392 | 2007-02-13 13:26:20 +0100 | [diff] [blame] | 110 | int i, nodes_used = 0; |
Amul Shah | 076422d2 | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 111 | unsigned long start, end; |
| 112 | unsigned long bitfield = 0, memtop = 0; |
| 113 | |
| 114 | for (i = 0; i < numnodes; i++) { |
| 115 | start = nodes[i].start; |
| 116 | end = nodes[i].end; |
| 117 | if (start >= end) |
| 118 | continue; |
Amul Shah | 5441392 | 2007-02-13 13:26:20 +0100 | [diff] [blame] | 119 | bitfield |= start; |
| 120 | nodes_used++; |
Amul Shah | 076422d2 | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 121 | if (end > memtop) |
| 122 | memtop = end; |
| 123 | } |
Amul Shah | 5441392 | 2007-02-13 13:26:20 +0100 | [diff] [blame] | 124 | if (nodes_used <= 1) |
| 125 | i = 63; |
| 126 | else |
| 127 | i = find_first_bit(&bitfield, sizeof(unsigned long)*8); |
Amul Shah | 076422d2 | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 128 | memnodemapsize = (memtop >> i)+1; |
| 129 | return i; |
| 130 | } |
| 131 | |
Andi Kleen | abe059e | 2006-03-25 16:29:12 +0100 | [diff] [blame] | 132 | int __init compute_hash_shift(struct bootnode *nodes, int numnodes) |
Eric Dumazet | 529a340 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 133 | { |
Amul Shah | 076422d2 | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 134 | int shift; |
Eric Dumazet | 529a340 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 135 | |
Amul Shah | 076422d2 | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 136 | shift = extract_lsb_from_nodes(nodes, numnodes); |
| 137 | if (allocate_cachealigned_memnodemap()) |
| 138 | return -1; |
Andi Kleen | 6b050f8 | 2006-01-11 22:44:33 +0100 | [diff] [blame] | 139 | printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n", |
Eric Dumazet | 529a340 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 140 | shift); |
| 141 | |
| 142 | if (populate_memnodemap(nodes, numnodes, shift) != 1) { |
| 143 | printk(KERN_INFO |
Keith Mannthey | b684664 | 2005-07-28 21:15:38 -0700 | [diff] [blame] | 144 | "Your memory is not aligned you need to rebuild your kernel " |
Eric Dumazet | 529a340 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 145 | "with a bigger NODEMAPSIZE shift=%d\n", |
| 146 | shift); |
| 147 | return -1; |
| 148 | } |
Keith Mannthey | b684664 | 2005-07-28 21:15:38 -0700 | [diff] [blame] | 149 | return shift; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | } |
| 151 | |
Matt Tolentino | bbfceef | 2005-06-23 00:08:07 -0700 | [diff] [blame] | 152 | #ifdef CONFIG_SPARSEMEM |
| 153 | int early_pfn_to_nid(unsigned long pfn) |
| 154 | { |
| 155 | return phys_to_nid(pfn << PAGE_SHIFT); |
| 156 | } |
| 157 | #endif |
| 158 | |
Andi Kleen | a806223 | 2006-04-07 19:49:21 +0200 | [diff] [blame] | 159 | static void * __init |
| 160 | early_node_mem(int nodeid, unsigned long start, unsigned long end, |
| 161 | unsigned long size) |
| 162 | { |
| 163 | unsigned long mem = find_e820_area(start, end, size); |
| 164 | void *ptr; |
| 165 | if (mem != -1L) |
| 166 | return __va(mem); |
| 167 | ptr = __alloc_bootmem_nopanic(size, |
| 168 | SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)); |
| 169 | if (ptr == 0) { |
| 170 | printk(KERN_ERR "Cannot find %lu bytes in node %d\n", |
| 171 | size, nodeid); |
| 172 | return NULL; |
| 173 | } |
| 174 | return ptr; |
| 175 | } |
| 176 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | /* Initialize bootmem allocator for a node */ |
| 178 | void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) |
| 179 | { |
| 180 | unsigned long start_pfn, end_pfn, bootmap_pages, bootmap_size, bootmap_start; |
| 181 | unsigned long nodedata_phys; |
Andi Kleen | a806223 | 2006-04-07 19:49:21 +0200 | [diff] [blame] | 182 | void *bootmap; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE); |
| 184 | |
| 185 | start = round_up(start, ZONE_ALIGN); |
| 186 | |
Andi Kleen | 6b050f8 | 2006-01-11 22:44:33 +0100 | [diff] [blame] | 187 | printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, start, end); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | |
| 189 | start_pfn = start >> PAGE_SHIFT; |
| 190 | end_pfn = end >> PAGE_SHIFT; |
| 191 | |
Andi Kleen | a806223 | 2006-04-07 19:49:21 +0200 | [diff] [blame] | 192 | node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size); |
| 193 | if (node_data[nodeid] == NULL) |
| 194 | return; |
| 195 | nodedata_phys = __pa(node_data[nodeid]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t)); |
| 198 | NODE_DATA(nodeid)->bdata = &plat_node_bdata[nodeid]; |
| 199 | NODE_DATA(nodeid)->node_start_pfn = start_pfn; |
| 200 | NODE_DATA(nodeid)->node_spanned_pages = end_pfn - start_pfn; |
| 201 | |
| 202 | /* Find a place for the bootmem map */ |
| 203 | bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); |
| 204 | bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE); |
Andi Kleen | a806223 | 2006-04-07 19:49:21 +0200 | [diff] [blame] | 205 | bootmap = early_node_mem(nodeid, bootmap_start, end, |
| 206 | bootmap_pages<<PAGE_SHIFT); |
| 207 | if (bootmap == NULL) { |
| 208 | if (nodedata_phys < start || nodedata_phys >= end) |
| 209 | free_bootmem((unsigned long)node_data[nodeid],pgdat_size); |
| 210 | node_data[nodeid] = NULL; |
| 211 | return; |
| 212 | } |
| 213 | bootmap_start = __pa(bootmap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | Dprintk("bootmap start %lu pages %lu\n", bootmap_start, bootmap_pages); |
| 215 | |
| 216 | bootmap_size = init_bootmem_node(NODE_DATA(nodeid), |
| 217 | bootmap_start >> PAGE_SHIFT, |
| 218 | start_pfn, end_pfn); |
| 219 | |
Mel Gorman | 5cb248a | 2006-09-27 01:49:52 -0700 | [diff] [blame] | 220 | free_bootmem_with_active_regions(nodeid, end); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | |
| 222 | reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size); |
| 223 | reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start, bootmap_pages<<PAGE_SHIFT); |
Andi Kleen | 68a3a7f | 2006-04-07 19:49:18 +0200 | [diff] [blame] | 224 | #ifdef CONFIG_ACPI_NUMA |
| 225 | srat_reserve_add_area(nodeid); |
| 226 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | node_set_online(nodeid); |
| 228 | } |
| 229 | |
| 230 | /* Initialize final allocator for a zone */ |
| 231 | void __init setup_node_zones(int nodeid) |
| 232 | { |
Andi Kleen | 267b480 | 2006-03-25 16:31:10 +0100 | [diff] [blame] | 233 | unsigned long start_pfn, end_pfn, memmapsize, limit; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | |
Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 235 | start_pfn = node_start_pfn(nodeid); |
| 236 | end_pfn = node_end_pfn(nodeid); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | |
Mel Gorman | 5cb248a | 2006-09-27 01:49:52 -0700 | [diff] [blame] | 238 | Dprintk(KERN_INFO "Setting up memmap for node %d %lx-%lx\n", |
Andi Kleen | a2f1b42 | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 239 | nodeid, start_pfn, end_pfn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | |
Andi Kleen | 267b480 | 2006-03-25 16:31:10 +0100 | [diff] [blame] | 241 | /* Try to allocate mem_map at end to not fill up precious <4GB |
| 242 | memory. */ |
| 243 | memmapsize = sizeof(struct page) * (end_pfn-start_pfn); |
| 244 | limit = end_pfn << PAGE_SHIFT; |
Andy Whitcroft | 3b5fd59 | 2006-04-22 02:35:41 -0700 | [diff] [blame] | 245 | #ifdef CONFIG_FLAT_NODE_MEM_MAP |
Andi Kleen | 267b480 | 2006-03-25 16:31:10 +0100 | [diff] [blame] | 246 | NODE_DATA(nodeid)->node_mem_map = |
| 247 | __alloc_bootmem_core(NODE_DATA(nodeid)->bdata, |
| 248 | memmapsize, SMP_CACHE_BYTES, |
| 249 | round_down(limit - memmapsize, PAGE_SIZE), |
| 250 | limit); |
Andy Whitcroft | 3b5fd59 | 2006-04-22 02:35:41 -0700 | [diff] [blame] | 251 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | } |
| 253 | |
| 254 | void __init numa_init_array(void) |
| 255 | { |
| 256 | int rr, i; |
| 257 | /* There are unfortunately some poorly designed mainboards around |
| 258 | that only connect memory to a single CPU. This breaks the 1:1 cpu->node |
| 259 | mapping. To avoid this fill in the mapping for all possible |
| 260 | CPUs, as the number of CPUs is not known yet. |
| 261 | We round robin the existing nodes. */ |
Ravikiran G Thirumalai | 85cc513 | 2005-09-30 11:59:22 -0700 | [diff] [blame] | 262 | rr = first_node(node_online_map); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | for (i = 0; i < NR_CPUS; i++) { |
| 264 | if (cpu_to_node[i] != NUMA_NO_NODE) |
| 265 | continue; |
Andi Kleen | 69d81fc | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 266 | numa_set_node(i, rr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | rr = next_node(rr, node_online_map); |
| 268 | if (rr == MAX_NUMNODES) |
| 269 | rr = first_node(node_online_map); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | } |
| 271 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | } |
| 273 | |
| 274 | #ifdef CONFIG_NUMA_EMU |
Rohit Seth | 53fee04 | 2007-02-13 13:26:22 +0100 | [diff] [blame] | 275 | /* Numa emulation */ |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 276 | char *cmdline __initdata; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | |
Rohit Seth | 53fee04 | 2007-02-13 13:26:22 +0100 | [diff] [blame] | 278 | /* |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 279 | * Setups up nid to range from addr to addr + size. If the end boundary is |
| 280 | * greater than max_addr, then max_addr is used instead. The return value is 0 |
| 281 | * if there is additional memory left for allocation past addr and -1 otherwise. |
| 282 | * addr is adjusted to be at the end of the node. |
Rohit Seth | 53fee04 | 2007-02-13 13:26:22 +0100 | [diff] [blame] | 283 | */ |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 284 | static int __init setup_node_range(int nid, struct bootnode *nodes, u64 *addr, |
| 285 | u64 size, u64 max_addr) |
Rohit Seth | 53fee04 | 2007-02-13 13:26:22 +0100 | [diff] [blame] | 286 | { |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 287 | int ret = 0; |
| 288 | nodes[nid].start = *addr; |
| 289 | *addr += size; |
| 290 | if (*addr >= max_addr) { |
| 291 | *addr = max_addr; |
| 292 | ret = -1; |
| 293 | } |
| 294 | nodes[nid].end = *addr; |
Suresh Siddha | e3f1cae | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 295 | node_set(nid, node_possible_map); |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 296 | printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid, |
| 297 | nodes[nid].start, nodes[nid].end, |
| 298 | (nodes[nid].end - nodes[nid].start) >> 20); |
| 299 | return ret; |
Rohit Seth | 53fee04 | 2007-02-13 13:26:22 +0100 | [diff] [blame] | 300 | } |
| 301 | |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 302 | /* |
| 303 | * Splits num_nodes nodes up equally starting at node_start. The return value |
| 304 | * is the number of nodes split up and addr is adjusted to be at the end of the |
| 305 | * last node allocated. |
| 306 | */ |
| 307 | static int __init split_nodes_equally(struct bootnode *nodes, u64 *addr, |
| 308 | u64 max_addr, int node_start, |
| 309 | int num_nodes) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | { |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 311 | unsigned int big; |
| 312 | u64 size; |
| 313 | int i; |
Rohit Seth | 53fee04 | 2007-02-13 13:26:22 +0100 | [diff] [blame] | 314 | |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 315 | if (num_nodes <= 0) |
| 316 | return -1; |
| 317 | if (num_nodes > MAX_NUMNODES) |
| 318 | num_nodes = MAX_NUMNODES; |
David Rientjes | a7e9662 | 2007-07-21 17:11:29 +0200 | [diff] [blame^] | 319 | size = (max_addr - *addr - e820_hole_size(*addr, max_addr)) / |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 320 | num_nodes; |
Rohit Seth | 53fee04 | 2007-02-13 13:26:22 +0100 | [diff] [blame] | 321 | /* |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 322 | * Calculate the number of big nodes that can be allocated as a result |
| 323 | * of consolidating the leftovers. |
Rohit Seth | 53fee04 | 2007-02-13 13:26:22 +0100 | [diff] [blame] | 324 | */ |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 325 | big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * num_nodes) / |
| 326 | FAKE_NODE_MIN_SIZE; |
Rohit Seth | 53fee04 | 2007-02-13 13:26:22 +0100 | [diff] [blame] | 327 | |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 328 | /* Round down to nearest FAKE_NODE_MIN_SIZE. */ |
| 329 | size &= FAKE_NODE_MIN_HASH_MASK; |
| 330 | if (!size) { |
| 331 | printk(KERN_ERR "Not enough memory for each node. " |
| 332 | "NUMA emulation disabled.\n"); |
| 333 | return -1; |
Rohit Seth | 53fee04 | 2007-02-13 13:26:22 +0100 | [diff] [blame] | 334 | } |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 335 | |
| 336 | for (i = node_start; i < num_nodes + node_start; i++) { |
| 337 | u64 end = *addr + size; |
Rohit Seth | 53fee04 | 2007-02-13 13:26:22 +0100 | [diff] [blame] | 338 | if (i < big) |
| 339 | end += FAKE_NODE_MIN_SIZE; |
| 340 | /* |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 341 | * The final node can have the remaining system RAM. Other |
| 342 | * nodes receive roughly the same amount of available pages. |
Rohit Seth | 53fee04 | 2007-02-13 13:26:22 +0100 | [diff] [blame] | 343 | */ |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 344 | if (i == num_nodes + node_start - 1) |
Rohit Seth | 53fee04 | 2007-02-13 13:26:22 +0100 | [diff] [blame] | 345 | end = max_addr; |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 346 | else |
David Rientjes | a7e9662 | 2007-07-21 17:11:29 +0200 | [diff] [blame^] | 347 | while (end - *addr - e820_hole_size(*addr, end) < |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 348 | size) { |
| 349 | end += FAKE_NODE_MIN_SIZE; |
| 350 | if (end > max_addr) { |
| 351 | end = max_addr; |
| 352 | break; |
| 353 | } |
| 354 | } |
| 355 | if (setup_node_range(i, nodes, addr, end - *addr, max_addr) < 0) |
| 356 | break; |
| 357 | } |
| 358 | return i - node_start + 1; |
| 359 | } |
| 360 | |
| 361 | /* |
David Rientjes | 382591d | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 362 | * Splits the remaining system RAM into chunks of size. The remaining memory is |
| 363 | * always assigned to a final node and can be asymmetric. Returns the number of |
| 364 | * nodes split. |
| 365 | */ |
| 366 | static int __init split_nodes_by_size(struct bootnode *nodes, u64 *addr, |
| 367 | u64 max_addr, int node_start, u64 size) |
| 368 | { |
| 369 | int i = node_start; |
| 370 | size = (size << 20) & FAKE_NODE_MIN_HASH_MASK; |
| 371 | while (!setup_node_range(i++, nodes, addr, size, max_addr)) |
| 372 | ; |
| 373 | return i - node_start; |
| 374 | } |
| 375 | |
| 376 | /* |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 377 | * Sets up the system RAM area from start_pfn to end_pfn according to the |
| 378 | * numa=fake command-line option. |
| 379 | */ |
| 380 | static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn) |
| 381 | { |
| 382 | struct bootnode nodes[MAX_NUMNODES]; |
| 383 | u64 addr = start_pfn << PAGE_SHIFT; |
| 384 | u64 max_addr = end_pfn << PAGE_SHIFT; |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 385 | int num_nodes = 0; |
David Rientjes | 382591d | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 386 | int coeff_flag; |
| 387 | int coeff = -1; |
| 388 | int num = 0; |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 389 | u64 size; |
| 390 | int i; |
| 391 | |
| 392 | memset(&nodes, 0, sizeof(nodes)); |
| 393 | /* |
| 394 | * If the numa=fake command-line is just a single number N, split the |
| 395 | * system RAM into N fake nodes. |
| 396 | */ |
| 397 | if (!strchr(cmdline, '*') && !strchr(cmdline, ',')) { |
| 398 | num_nodes = split_nodes_equally(nodes, &addr, max_addr, 0, |
| 399 | simple_strtol(cmdline, NULL, 0)); |
| 400 | if (num_nodes < 0) |
| 401 | return num_nodes; |
| 402 | goto out; |
| 403 | } |
| 404 | |
| 405 | /* Parse the command line. */ |
David Rientjes | 382591d | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 406 | for (coeff_flag = 0; ; cmdline++) { |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 407 | if (*cmdline && isdigit(*cmdline)) { |
| 408 | num = num * 10 + *cmdline - '0'; |
| 409 | continue; |
| 410 | } |
David Rientjes | 382591d | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 411 | if (*cmdline == '*') { |
| 412 | if (num > 0) |
| 413 | coeff = num; |
| 414 | coeff_flag = 1; |
| 415 | } |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 416 | if (!*cmdline || *cmdline == ',') { |
David Rientjes | 382591d | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 417 | if (!coeff_flag) |
| 418 | coeff = 1; |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 419 | /* |
| 420 | * Round down to the nearest FAKE_NODE_MIN_SIZE. |
| 421 | * Command-line coefficients are in megabytes. |
| 422 | */ |
| 423 | size = ((u64)num << 20) & FAKE_NODE_MIN_HASH_MASK; |
David Rientjes | 382591d | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 424 | if (size) |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 425 | for (i = 0; i < coeff; i++, num_nodes++) |
| 426 | if (setup_node_range(num_nodes, nodes, |
| 427 | &addr, size, max_addr) < 0) |
| 428 | goto done; |
David Rientjes | 382591d | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 429 | if (!*cmdline) |
| 430 | break; |
| 431 | coeff_flag = 0; |
| 432 | coeff = -1; |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 433 | } |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 434 | num = 0; |
| 435 | } |
| 436 | done: |
| 437 | if (!num_nodes) |
| 438 | return -1; |
David Rientjes | 14694d7 | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 439 | /* Fill remainder of system RAM, if appropriate. */ |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 440 | if (addr < max_addr) { |
David Rientjes | 382591d | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 441 | if (coeff_flag && coeff < 0) { |
| 442 | /* Split remaining nodes into num-sized chunks */ |
| 443 | num_nodes += split_nodes_by_size(nodes, &addr, max_addr, |
| 444 | num_nodes, num); |
| 445 | goto out; |
| 446 | } |
David Rientjes | 14694d7 | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 447 | switch (*(cmdline - 1)) { |
| 448 | case '*': |
| 449 | /* Split remaining nodes into coeff chunks */ |
| 450 | if (coeff <= 0) |
| 451 | break; |
| 452 | num_nodes += split_nodes_equally(nodes, &addr, max_addr, |
| 453 | num_nodes, coeff); |
| 454 | break; |
| 455 | case ',': |
| 456 | /* Do not allocate remaining system RAM */ |
| 457 | break; |
| 458 | default: |
| 459 | /* Give one final node */ |
| 460 | setup_node_range(num_nodes, nodes, &addr, |
| 461 | max_addr - addr, max_addr); |
| 462 | num_nodes++; |
| 463 | } |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 464 | } |
| 465 | out: |
| 466 | memnode_shift = compute_hash_shift(nodes, num_nodes); |
| 467 | if (memnode_shift < 0) { |
| 468 | memnode_shift = 0; |
| 469 | printk(KERN_ERR "No NUMA hash function found. NUMA emulation " |
| 470 | "disabled.\n"); |
| 471 | return -1; |
| 472 | } |
| 473 | |
| 474 | /* |
| 475 | * We need to vacate all active ranges that may have been registered by |
| 476 | * SRAT. |
| 477 | */ |
| 478 | remove_all_active_ranges(); |
Suresh Siddha | e3f1cae | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 479 | for_each_node_mask(i, node_possible_map) { |
Mel Gorman | 5cb248a | 2006-09-27 01:49:52 -0700 | [diff] [blame] | 480 | e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, |
| 481 | nodes[i].end >> PAGE_SHIFT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 | setup_node_bootmem(i, nodes[i].start, nodes[i].end); |
Mel Gorman | 5cb248a | 2006-09-27 01:49:52 -0700 | [diff] [blame] | 483 | } |
David Rientjes | 3484d79 | 2007-07-21 17:10:32 +0200 | [diff] [blame] | 484 | acpi_fake_nodes(nodes, num_nodes); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 485 | numa_init_array(); |
| 486 | return 0; |
| 487 | } |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 488 | #endif /* CONFIG_NUMA_EMU */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 489 | |
| 490 | void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn) |
| 491 | { |
| 492 | int i; |
| 493 | |
Suresh Siddha | e3f1cae | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 494 | nodes_clear(node_possible_map); |
| 495 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 496 | #ifdef CONFIG_NUMA_EMU |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 497 | if (cmdline && !numa_emulation(start_pfn, end_pfn)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 498 | return; |
Suresh Siddha | e3f1cae | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 499 | nodes_clear(node_possible_map); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 500 | #endif |
| 501 | |
| 502 | #ifdef CONFIG_ACPI_NUMA |
| 503 | if (!numa_off && !acpi_scan_nodes(start_pfn << PAGE_SHIFT, |
| 504 | end_pfn << PAGE_SHIFT)) |
| 505 | return; |
Suresh Siddha | e3f1cae | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 506 | nodes_clear(node_possible_map); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 507 | #endif |
| 508 | |
| 509 | #ifdef CONFIG_K8_NUMA |
| 510 | if (!numa_off && !k8_scan_nodes(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT)) |
| 511 | return; |
Suresh Siddha | e3f1cae | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 512 | nodes_clear(node_possible_map); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | #endif |
| 514 | printk(KERN_INFO "%s\n", |
| 515 | numa_off ? "NUMA turned off" : "No NUMA configuration found"); |
| 516 | |
| 517 | printk(KERN_INFO "Faking a node at %016lx-%016lx\n", |
| 518 | start_pfn << PAGE_SHIFT, |
| 519 | end_pfn << PAGE_SHIFT); |
| 520 | /* setup dummy node covering all memory */ |
| 521 | memnode_shift = 63; |
Amul Shah | 076422d2 | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 522 | memnodemap = memnode.embedded_map; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 523 | memnodemap[0] = 0; |
| 524 | nodes_clear(node_online_map); |
| 525 | node_set_online(0); |
Suresh Siddha | e3f1cae | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 526 | node_set(0, node_possible_map); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 527 | for (i = 0; i < NR_CPUS; i++) |
Andi Kleen | 69d81fc | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 528 | numa_set_node(i, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 529 | node_to_cpumask[0] = cpumask_of_cpu(0); |
Mel Gorman | 5cb248a | 2006-09-27 01:49:52 -0700 | [diff] [blame] | 530 | e820_register_active_regions(0, start_pfn, end_pfn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 531 | setup_node_bootmem(0, start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT); |
| 532 | } |
| 533 | |
Ashok Raj | e6982c6 | 2005-06-25 14:54:58 -0700 | [diff] [blame] | 534 | __cpuinit void numa_add_cpu(int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 535 | { |
Ravikiran G Thirumalai | e6a045a | 2005-09-30 11:59:21 -0700 | [diff] [blame] | 536 | set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 537 | } |
| 538 | |
Andi Kleen | 69d81fc | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 539 | void __cpuinit numa_set_node(int cpu, int node) |
| 540 | { |
Ravikiran G Thirumalai | df79efd | 2006-01-11 22:45:39 +0100 | [diff] [blame] | 541 | cpu_pda(cpu)->nodenumber = node; |
Andi Kleen | 69d81fc | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 542 | cpu_to_node[cpu] = node; |
| 543 | } |
| 544 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | unsigned long __init numa_free_all_bootmem(void) |
| 546 | { |
| 547 | int i; |
| 548 | unsigned long pages = 0; |
| 549 | for_each_online_node(i) { |
| 550 | pages += free_all_bootmem_node(NODE_DATA(i)); |
| 551 | } |
| 552 | return pages; |
| 553 | } |
| 554 | |
| 555 | void __init paging_init(void) |
| 556 | { |
| 557 | int i; |
Mel Gorman | 6391af1 | 2006-10-11 01:20:39 -0700 | [diff] [blame] | 558 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
| 559 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
| 560 | max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; |
| 561 | max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; |
| 562 | max_zone_pfns[ZONE_NORMAL] = end_pfn; |
Bob Picco | d3ee871 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 563 | |
Bob Picco | f0a5a58 | 2007-02-13 13:26:25 +0100 | [diff] [blame] | 564 | sparse_memory_present_with_active_regions(MAX_NUMNODES); |
| 565 | sparse_init(); |
Bob Picco | d3ee871 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 566 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 567 | for_each_online_node(i) { |
| 568 | setup_node_zones(i); |
| 569 | } |
Mel Gorman | 5cb248a | 2006-09-27 01:49:52 -0700 | [diff] [blame] | 570 | |
| 571 | free_area_init_nodes(max_zone_pfns); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 572 | } |
| 573 | |
Andi Kleen | 2c8c0e6 | 2006-09-26 10:52:32 +0200 | [diff] [blame] | 574 | static __init int numa_setup(char *opt) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 575 | { |
Andi Kleen | 2c8c0e6 | 2006-09-26 10:52:32 +0200 | [diff] [blame] | 576 | if (!opt) |
| 577 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | if (!strncmp(opt,"off",3)) |
| 579 | numa_off = 1; |
| 580 | #ifdef CONFIG_NUMA_EMU |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 581 | if (!strncmp(opt, "fake=", 5)) |
| 582 | cmdline = opt + 5; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 583 | #endif |
| 584 | #ifdef CONFIG_ACPI_NUMA |
| 585 | if (!strncmp(opt,"noacpi",6)) |
| 586 | acpi_numa = -1; |
Andi Kleen | 68a3a7f | 2006-04-07 19:49:18 +0200 | [diff] [blame] | 587 | if (!strncmp(opt,"hotadd=", 7)) |
| 588 | hotadd_percent = simple_strtoul(opt+7, NULL, 10); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 589 | #endif |
Andi Kleen | 2c8c0e6 | 2006-09-26 10:52:32 +0200 | [diff] [blame] | 590 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 591 | } |
| 592 | |
Andi Kleen | 2c8c0e6 | 2006-09-26 10:52:32 +0200 | [diff] [blame] | 593 | early_param("numa", numa_setup); |
| 594 | |
Ravikiran Thirumalai | 05b3cbd | 2006-01-11 22:45:36 +0100 | [diff] [blame] | 595 | /* |
| 596 | * Setup early cpu_to_node. |
| 597 | * |
| 598 | * Populate cpu_to_node[] only if x86_cpu_to_apicid[], |
| 599 | * and apicid_to_node[] tables have valid entries for a CPU. |
| 600 | * This means we skip cpu_to_node[] initialisation for NUMA |
| 601 | * emulation and faking node case (when running a kernel compiled |
| 602 | * for NUMA on a non NUMA box), which is OK as cpu_to_node[] |
| 603 | * is already initialized in a round robin manner at numa_init_array, |
| 604 | * prior to this call, and this initialization is good enough |
| 605 | * for the fake NUMA cases. |
| 606 | */ |
| 607 | void __init init_cpu_to_node(void) |
| 608 | { |
| 609 | int i; |
| 610 | for (i = 0; i < NR_CPUS; i++) { |
| 611 | u8 apicid = x86_cpu_to_apicid[i]; |
| 612 | if (apicid == BAD_APICID) |
| 613 | continue; |
| 614 | if (apicid_to_node[apicid] == NUMA_NO_NODE) |
| 615 | continue; |
Daniel Yeisley | d1db4ec | 2006-02-15 15:17:41 -0800 | [diff] [blame] | 616 | numa_set_node(i,apicid_to_node[apicid]); |
Ravikiran Thirumalai | 05b3cbd | 2006-01-11 22:45:36 +0100 | [diff] [blame] | 617 | } |
| 618 | } |
| 619 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 620 | EXPORT_SYMBOL(cpu_to_node); |
| 621 | EXPORT_SYMBOL(node_to_cpumask); |
Eric Dumazet | dcf36bf | 2006-03-25 16:31:46 +0100 | [diff] [blame] | 622 | EXPORT_SYMBOL(memnode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 623 | EXPORT_SYMBOL(node_data); |
Andi Kleen | cf05013 | 2006-01-11 22:46:27 +0100 | [diff] [blame] | 624 | |
| 625 | #ifdef CONFIG_DISCONTIGMEM |
| 626 | /* |
| 627 | * Functions to convert PFNs from/to per node page addresses. |
| 628 | * These are out of line because they are quite big. |
| 629 | * They could be all tuned by pre caching more state. |
| 630 | * Should do that. |
| 631 | */ |
| 632 | |
Andi Kleen | cf05013 | 2006-01-11 22:46:27 +0100 | [diff] [blame] | 633 | int pfn_valid(unsigned long pfn) |
| 634 | { |
| 635 | unsigned nid; |
| 636 | if (pfn >= num_physpages) |
| 637 | return 0; |
| 638 | nid = pfn_to_nid(pfn); |
| 639 | if (nid == 0xff) |
| 640 | return 0; |
| 641 | return pfn >= node_start_pfn(nid) && (pfn) < node_end_pfn(nid); |
| 642 | } |
| 643 | EXPORT_SYMBOL(pfn_valid); |
| 644 | #endif |