Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Generic VM initialization for x86-64 NUMA setups. |
| 3 | * Copyright 2002,2003 Andi Kleen, SuSE Labs. |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 4 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | #include <linux/kernel.h> |
| 6 | #include <linux/mm.h> |
| 7 | #include <linux/string.h> |
| 8 | #include <linux/init.h> |
| 9 | #include <linux/bootmem.h> |
Yinghai Lu | 72d7c3b | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 10 | #include <linux/memblock.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <linux/mmzone.h> |
| 12 | #include <linux/ctype.h> |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/nodemask.h> |
travis@sgi.com | 3cc87e3 | 2008-01-30 13:33:11 +0100 | [diff] [blame] | 15 | #include <linux/sched.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | |
| 17 | #include <asm/e820.h> |
| 18 | #include <asm/proto.h> |
| 19 | #include <asm/dma.h> |
| 20 | #include <asm/numa.h> |
| 21 | #include <asm/acpi.h> |
Andreas Herrmann | 23ac4ae | 2010-09-17 18:03:43 +0200 | [diff] [blame] | 22 | #include <asm/amd_nb.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
Ravikiran G Thirumalai | 6c231b7 | 2005-09-06 15:17:45 -0700 | [diff] [blame] | 24 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 25 | EXPORT_SYMBOL(node_data); |
| 26 | |
Eric Dumazet | dcf36bf | 2006-03-25 16:31:46 +0100 | [diff] [blame] | 27 | struct memnode memnode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | |
travis@sgi.com | 4323838 | 2008-01-30 13:33:25 +0100 | [diff] [blame] | 29 | s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 30 | [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE |
Andi Kleen | 3f098c2 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 31 | }; |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 32 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | int numa_off __initdata; |
Thomas Gleixner | 864fc31 | 2008-05-12 15:43:36 +0200 | [diff] [blame] | 34 | static unsigned long __initdata nodemap_addr; |
| 35 | static unsigned long __initdata nodemap_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | |
Brian Gerst | 6470aff | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 37 | /* |
| 38 | * Map cpu index to node index |
| 39 | */ |
| 40 | DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); |
| 41 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); |
| 42 | |
| 43 | /* |
Eric Dumazet | 529a340 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 44 | * Given a shift value, try to populate memnodemap[] |
| 45 | * Returns : |
| 46 | * 1 if OK |
| 47 | * 0 if memnodmap[] too small (of shift too small) |
| 48 | * -1 if node overlap or lost ram (shift too big) |
| 49 | */ |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 50 | static int __init populate_memnodemap(const struct bootnode *nodes, |
Suresh Siddha | 6ec6e0d | 2008-03-25 10:14:35 -0700 | [diff] [blame] | 51 | int numnodes, int shift, int *nodeids) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | { |
Eric Dumazet | 529a340 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 53 | unsigned long addr, end; |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 54 | int i, res = -1; |
Keith Mannthey | b684664 | 2005-07-28 21:15:38 -0700 | [diff] [blame] | 55 | |
travis@sgi.com | 4323838 | 2008-01-30 13:33:25 +0100 | [diff] [blame] | 56 | memset(memnodemap, 0xff, sizeof(s16)*memnodemapsize); |
Eric Dumazet | 529a340 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 57 | for (i = 0; i < numnodes; i++) { |
| 58 | addr = nodes[i].start; |
| 59 | end = nodes[i].end; |
| 60 | if (addr >= end) |
| 61 | continue; |
Amul Shah | 076422d | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 62 | if ((end >> shift) >= memnodemapsize) |
Eric Dumazet | 529a340 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 63 | return 0; |
| 64 | do { |
travis@sgi.com | 4323838 | 2008-01-30 13:33:25 +0100 | [diff] [blame] | 65 | if (memnodemap[addr >> shift] != NUMA_NO_NODE) |
Eric Dumazet | 529a340 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 66 | return -1; |
Suresh Siddha | 6ec6e0d | 2008-03-25 10:14:35 -0700 | [diff] [blame] | 67 | |
| 68 | if (!nodeids) |
| 69 | memnodemap[addr >> shift] = i; |
| 70 | else |
| 71 | memnodemap[addr >> shift] = nodeids[i]; |
| 72 | |
Amul Shah | 076422d | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 73 | addr += (1UL << shift); |
Eric Dumazet | 529a340 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 74 | } while (addr < end); |
| 75 | res = 1; |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 76 | } |
Eric Dumazet | 529a340 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 77 | return res; |
| 78 | } |
| 79 | |
Amul Shah | 076422d | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 80 | static int __init allocate_cachealigned_memnodemap(void) |
| 81 | { |
Yinghai Lu | 24a5da7 | 2008-02-01 17:49:41 +0100 | [diff] [blame] | 82 | unsigned long addr; |
Amul Shah | 076422d | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 83 | |
| 84 | memnodemap = memnode.embedded_map; |
travis@sgi.com | 316390b | 2008-01-30 13:33:15 +0100 | [diff] [blame] | 85 | if (memnodemapsize <= ARRAY_SIZE(memnode.embedded_map)) |
Amul Shah | 076422d | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 86 | return 0; |
Amul Shah | 076422d | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 87 | |
Yinghai Lu | 24a5da7 | 2008-02-01 17:49:41 +0100 | [diff] [blame] | 88 | addr = 0x8000; |
Joerg Roedel | be3e89e | 2008-07-25 16:48:58 +0200 | [diff] [blame] | 89 | nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES); |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 90 | nodemap_addr = memblock_find_in_range(addr, max_pfn<<PAGE_SHIFT, |
Yinghai Lu | 24a5da7 | 2008-02-01 17:49:41 +0100 | [diff] [blame] | 91 | nodemap_size, L1_CACHE_BYTES); |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 92 | if (nodemap_addr == MEMBLOCK_ERROR) { |
Amul Shah | 076422d | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 93 | printk(KERN_ERR |
| 94 | "NUMA: Unable to allocate Memory to Node hash map\n"); |
| 95 | nodemap_addr = nodemap_size = 0; |
| 96 | return -1; |
| 97 | } |
Yinghai Lu | 24a5da7 | 2008-02-01 17:49:41 +0100 | [diff] [blame] | 98 | memnodemap = phys_to_virt(nodemap_addr); |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 99 | memblock_x86_reserve_range(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP"); |
Amul Shah | 076422d | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 100 | |
| 101 | printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n", |
| 102 | nodemap_addr, nodemap_addr + nodemap_size); |
| 103 | return 0; |
| 104 | } |
| 105 | |
| 106 | /* |
| 107 | * The LSB of all start and end addresses in the node map is the value of the |
| 108 | * maximum possible shift. |
| 109 | */ |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 110 | static int __init extract_lsb_from_nodes(const struct bootnode *nodes, |
| 111 | int numnodes) |
Amul Shah | 076422d | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 112 | { |
Amul Shah | 5441392 | 2007-02-13 13:26:20 +0100 | [diff] [blame] | 113 | int i, nodes_used = 0; |
Amul Shah | 076422d | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 114 | unsigned long start, end; |
| 115 | unsigned long bitfield = 0, memtop = 0; |
| 116 | |
| 117 | for (i = 0; i < numnodes; i++) { |
| 118 | start = nodes[i].start; |
| 119 | end = nodes[i].end; |
| 120 | if (start >= end) |
| 121 | continue; |
Amul Shah | 5441392 | 2007-02-13 13:26:20 +0100 | [diff] [blame] | 122 | bitfield |= start; |
| 123 | nodes_used++; |
Amul Shah | 076422d | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 124 | if (end > memtop) |
| 125 | memtop = end; |
| 126 | } |
Amul Shah | 5441392 | 2007-02-13 13:26:20 +0100 | [diff] [blame] | 127 | if (nodes_used <= 1) |
| 128 | i = 63; |
| 129 | else |
| 130 | i = find_first_bit(&bitfield, sizeof(unsigned long)*8); |
Amul Shah | 076422d | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 131 | memnodemapsize = (memtop >> i)+1; |
| 132 | return i; |
| 133 | } |
| 134 | |
Suresh Siddha | 6ec6e0d | 2008-03-25 10:14:35 -0700 | [diff] [blame] | 135 | int __init compute_hash_shift(struct bootnode *nodes, int numnodes, |
| 136 | int *nodeids) |
Eric Dumazet | 529a340 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 137 | { |
Amul Shah | 076422d | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 138 | int shift; |
Eric Dumazet | 529a340 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 139 | |
Amul Shah | 076422d | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 140 | shift = extract_lsb_from_nodes(nodes, numnodes); |
| 141 | if (allocate_cachealigned_memnodemap()) |
| 142 | return -1; |
Andi Kleen | 6b050f8 | 2006-01-11 22:44:33 +0100 | [diff] [blame] | 143 | printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n", |
Eric Dumazet | 529a340 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 144 | shift); |
| 145 | |
Suresh Siddha | 6ec6e0d | 2008-03-25 10:14:35 -0700 | [diff] [blame] | 146 | if (populate_memnodemap(nodes, numnodes, shift, nodeids) != 1) { |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 147 | printk(KERN_INFO "Your memory is not aligned you need to " |
| 148 | "rebuild your kernel with a bigger NODEMAPSIZE " |
| 149 | "shift=%d\n", shift); |
Eric Dumazet | 529a340 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 150 | return -1; |
| 151 | } |
Keith Mannthey | b684664 | 2005-07-28 21:15:38 -0700 | [diff] [blame] | 152 | return shift; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | } |
| 154 | |
KAMEZAWA Hiroyuki | f2dbcfa | 2009-02-18 14:48:32 -0800 | [diff] [blame] | 155 | int __meminit __early_pfn_to_nid(unsigned long pfn) |
Matt Tolentino | bbfceef | 2005-06-23 00:08:07 -0700 | [diff] [blame] | 156 | { |
| 157 | return phys_to_nid(pfn << PAGE_SHIFT); |
| 158 | } |
Matt Tolentino | bbfceef | 2005-06-23 00:08:07 -0700 | [diff] [blame] | 159 | |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 160 | static void * __init early_node_mem(int nodeid, unsigned long start, |
Yinghai Lu | 24a5da7 | 2008-02-01 17:49:41 +0100 | [diff] [blame] | 161 | unsigned long end, unsigned long size, |
| 162 | unsigned long align) |
Andi Kleen | a806223 | 2006-04-07 19:49:21 +0200 | [diff] [blame] | 163 | { |
Yinghai Lu | cef625e | 2010-02-10 01:20:18 -0800 | [diff] [blame] | 164 | unsigned long mem; |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 165 | |
Yinghai Lu | cef625e | 2010-02-10 01:20:18 -0800 | [diff] [blame] | 166 | /* |
| 167 | * put it on high as possible |
| 168 | * something will go with NODE_DATA |
| 169 | */ |
| 170 | if (start < (MAX_DMA_PFN<<PAGE_SHIFT)) |
| 171 | start = MAX_DMA_PFN<<PAGE_SHIFT; |
| 172 | if (start < (MAX_DMA32_PFN<<PAGE_SHIFT) && |
| 173 | end > (MAX_DMA32_PFN<<PAGE_SHIFT)) |
| 174 | start = MAX_DMA32_PFN<<PAGE_SHIFT; |
Yinghai Lu | 72d7c3b | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 175 | mem = memblock_x86_find_in_range_node(nodeid, start, end, size, align); |
| 176 | if (mem != MEMBLOCK_ERROR) |
Andi Kleen | a806223 | 2006-04-07 19:49:21 +0200 | [diff] [blame] | 177 | return __va(mem); |
Yinghai Lu | 9347e0b | 2008-02-01 17:49:42 +0100 | [diff] [blame] | 178 | |
Yinghai Lu | cef625e | 2010-02-10 01:20:18 -0800 | [diff] [blame] | 179 | /* extend the search scope */ |
| 180 | end = max_pfn_mapped << PAGE_SHIFT; |
Yinghai Lu | 419db27 | 2010-10-28 09:50:17 -0700 | [diff] [blame] | 181 | start = MAX_DMA_PFN << PAGE_SHIFT; |
| 182 | mem = memblock_find_in_range(start, end, size, align); |
Yinghai Lu | 72d7c3b | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 183 | if (mem != MEMBLOCK_ERROR) |
Yinghai Lu | 1842f90 | 2010-02-10 01:20:15 -0800 | [diff] [blame] | 184 | return __va(mem); |
| 185 | |
| 186 | printk(KERN_ERR "Cannot find %lu bytes in node %d\n", |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 187 | size, nodeid); |
Yinghai Lu | 1842f90 | 2010-02-10 01:20:15 -0800 | [diff] [blame] | 188 | |
| 189 | return NULL; |
Andi Kleen | a806223 | 2006-04-07 19:49:21 +0200 | [diff] [blame] | 190 | } |
| 191 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | /* Initialize bootmem allocator for a node */ |
Yinghai Lu | 7c43769 | 2009-05-15 13:59:37 -0700 | [diff] [blame] | 193 | void __init |
| 194 | setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 195 | { |
Yinghai Lu | 0867721 | 2010-02-10 01:20:20 -0800 | [diff] [blame] | 196 | unsigned long start_pfn, last_pfn, nodedata_phys; |
Yinghai Lu | 7c43769 | 2009-05-15 13:59:37 -0700 | [diff] [blame] | 197 | const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE); |
Yinghai Lu | 1a27fc0 | 2008-03-18 12:52:37 -0700 | [diff] [blame] | 198 | int nid; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | |
Yinghai Lu | 4c31e92 | 2009-04-22 14:19:27 -0700 | [diff] [blame] | 200 | if (!end) |
| 201 | return; |
| 202 | |
Yinghai Lu | 7c43769 | 2009-05-15 13:59:37 -0700 | [diff] [blame] | 203 | /* |
| 204 | * Don't confuse VM with a node that doesn't have the |
| 205 | * minimum amount of memory: |
| 206 | */ |
| 207 | if (end && (end - start) < NODE_MIN_SIZE) |
| 208 | return; |
| 209 | |
Joerg Roedel | be3e89e | 2008-07-25 16:48:58 +0200 | [diff] [blame] | 210 | start = roundup(start, ZONE_ALIGN); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | |
Yinghai Lu | 0867721 | 2010-02-10 01:20:20 -0800 | [diff] [blame] | 212 | printk(KERN_INFO "Initmem setup node %d %016lx-%016lx\n", nodeid, |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 213 | start, end); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | |
| 215 | start_pfn = start >> PAGE_SHIFT; |
Thomas Gleixner | 886533a | 2008-05-12 15:43:36 +0200 | [diff] [blame] | 216 | last_pfn = end >> PAGE_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | |
Yinghai Lu | 24a5da7 | 2008-02-01 17:49:41 +0100 | [diff] [blame] | 218 | node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size, |
| 219 | SMP_CACHE_BYTES); |
Andi Kleen | a806223 | 2006-04-07 19:49:21 +0200 | [diff] [blame] | 220 | if (node_data[nodeid] == NULL) |
| 221 | return; |
| 222 | nodedata_phys = __pa(node_data[nodeid]); |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 223 | memblock_x86_reserve_range(nodedata_phys, nodedata_phys + pgdat_size, "NODE_DATA"); |
Yinghai Lu | 6118f76 | 2008-02-04 16:47:56 +0100 | [diff] [blame] | 224 | printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", nodedata_phys, |
| 225 | nodedata_phys + pgdat_size - 1); |
Yinghai Lu | 1842f90 | 2010-02-10 01:20:15 -0800 | [diff] [blame] | 226 | nid = phys_to_nid(nodedata_phys); |
| 227 | if (nid != nodeid) |
| 228 | printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nodeid, nid); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t)); |
Yinghai Lu | 0867721 | 2010-02-10 01:20:20 -0800 | [diff] [blame] | 231 | NODE_DATA(nodeid)->node_id = nodeid; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | NODE_DATA(nodeid)->node_start_pfn = start_pfn; |
Thomas Gleixner | 886533a | 2008-05-12 15:43:36 +0200 | [diff] [blame] | 233 | NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | node_set_online(nodeid); |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 236 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 238 | /* |
| 239 | * There are unfortunately some poorly designed mainboards around that |
| 240 | * only connect memory to a single CPU. This breaks the 1:1 cpu->node |
| 241 | * mapping. To avoid this fill in the mapping for all possible CPUs, |
| 242 | * as the number of CPUs is not known yet. We round robin the existing |
| 243 | * nodes. |
| 244 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | void __init numa_init_array(void) |
| 246 | { |
| 247 | int rr, i; |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 248 | |
Ravikiran G Thirumalai | 85cc513 | 2005-09-30 11:59:22 -0700 | [diff] [blame] | 249 | rr = first_node(node_online_map); |
Mike Travis | 168ef54 | 2008-12-16 17:34:01 -0800 | [diff] [blame] | 250 | for (i = 0; i < nr_cpu_ids; i++) { |
travis@sgi.com | 1ce3571 | 2008-01-30 13:33:33 +0100 | [diff] [blame] | 251 | if (early_cpu_to_node(i) != NUMA_NO_NODE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | continue; |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 253 | numa_set_node(i, rr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | rr = next_node(rr, node_online_map); |
| 255 | if (rr == MAX_NUMNODES) |
| 256 | rr = first_node(node_online_map); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | } |
| 259 | |
| 260 | #ifdef CONFIG_NUMA_EMU |
Rohit Seth | 53fee04 | 2007-02-13 13:26:22 +0100 | [diff] [blame] | 261 | /* Numa emulation */ |
David Rientjes | adc1938 | 2009-09-25 15:20:09 -0700 | [diff] [blame] | 262 | static struct bootnode nodes[MAX_NUMNODES] __initdata; |
| 263 | static struct bootnode physnodes[MAX_NUMNODES] __initdata; |
Thomas Gleixner | 864fc31 | 2008-05-12 15:43:36 +0200 | [diff] [blame] | 264 | static char *cmdline __initdata; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | |
David Rientjes | adc1938 | 2009-09-25 15:20:09 -0700 | [diff] [blame] | 266 | static int __init setup_physnodes(unsigned long start, unsigned long end, |
Hans Rosenfeld | eec1d4f | 2010-10-29 17:14:30 +0200 | [diff] [blame^] | 267 | int acpi, int amd) |
David Rientjes | adc1938 | 2009-09-25 15:20:09 -0700 | [diff] [blame] | 268 | { |
| 269 | int nr_nodes = 0; |
| 270 | int ret = 0; |
| 271 | int i; |
| 272 | |
| 273 | #ifdef CONFIG_ACPI_NUMA |
| 274 | if (acpi) |
| 275 | nr_nodes = acpi_get_nodes(physnodes); |
| 276 | #endif |
Hans Rosenfeld | eec1d4f | 2010-10-29 17:14:30 +0200 | [diff] [blame^] | 277 | #ifdef CONFIG_AMD_NUMA |
| 278 | if (amd) |
| 279 | nr_nodes = amd_get_nodes(physnodes); |
David Rientjes | adc1938 | 2009-09-25 15:20:09 -0700 | [diff] [blame] | 280 | #endif |
| 281 | /* |
| 282 | * Basic sanity checking on the physical node map: there may be errors |
Hans Rosenfeld | eec1d4f | 2010-10-29 17:14:30 +0200 | [diff] [blame^] | 283 | * if the SRAT or AMD code incorrectly reported the topology or the mem= |
David Rientjes | adc1938 | 2009-09-25 15:20:09 -0700 | [diff] [blame] | 284 | * kernel parameter is used. |
| 285 | */ |
| 286 | for (i = 0; i < nr_nodes; i++) { |
| 287 | if (physnodes[i].start == physnodes[i].end) |
| 288 | continue; |
| 289 | if (physnodes[i].start > end) { |
| 290 | physnodes[i].end = physnodes[i].start; |
| 291 | continue; |
| 292 | } |
| 293 | if (physnodes[i].end < start) { |
| 294 | physnodes[i].start = physnodes[i].end; |
| 295 | continue; |
| 296 | } |
| 297 | if (physnodes[i].start < start) |
| 298 | physnodes[i].start = start; |
| 299 | if (physnodes[i].end > end) |
| 300 | physnodes[i].end = end; |
| 301 | } |
| 302 | |
| 303 | /* |
| 304 | * Remove all nodes that have no memory or were truncated because of the |
| 305 | * limited address range. |
| 306 | */ |
| 307 | for (i = 0; i < nr_nodes; i++) { |
| 308 | if (physnodes[i].start == physnodes[i].end) |
| 309 | continue; |
| 310 | physnodes[ret].start = physnodes[i].start; |
| 311 | physnodes[ret].end = physnodes[i].end; |
| 312 | ret++; |
| 313 | } |
| 314 | |
| 315 | /* |
| 316 | * If no physical topology was detected, a single node is faked to cover |
| 317 | * the entire address space. |
| 318 | */ |
| 319 | if (!ret) { |
| 320 | physnodes[ret].start = start; |
| 321 | physnodes[ret].end = end; |
| 322 | ret = 1; |
| 323 | } |
| 324 | return ret; |
| 325 | } |
| 326 | |
Rohit Seth | 53fee04 | 2007-02-13 13:26:22 +0100 | [diff] [blame] | 327 | /* |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 328 | * Setups up nid to range from addr to addr + size. If the end |
| 329 | * boundary is greater than max_addr, then max_addr is used instead. |
| 330 | * The return value is 0 if there is additional memory left for |
| 331 | * allocation past addr and -1 otherwise. addr is adjusted to be at |
| 332 | * the end of the node. |
Rohit Seth | 53fee04 | 2007-02-13 13:26:22 +0100 | [diff] [blame] | 333 | */ |
David Rientjes | adc1938 | 2009-09-25 15:20:09 -0700 | [diff] [blame] | 334 | static int __init setup_node_range(int nid, u64 *addr, u64 size, u64 max_addr) |
Rohit Seth | 53fee04 | 2007-02-13 13:26:22 +0100 | [diff] [blame] | 335 | { |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 336 | int ret = 0; |
| 337 | nodes[nid].start = *addr; |
| 338 | *addr += size; |
| 339 | if (*addr >= max_addr) { |
| 340 | *addr = max_addr; |
| 341 | ret = -1; |
| 342 | } |
| 343 | nodes[nid].end = *addr; |
Suresh Siddha | e3f1cae | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 344 | node_set(nid, node_possible_map); |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 345 | printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid, |
| 346 | nodes[nid].start, nodes[nid].end, |
| 347 | (nodes[nid].end - nodes[nid].start) >> 20); |
| 348 | return ret; |
Rohit Seth | 53fee04 | 2007-02-13 13:26:22 +0100 | [diff] [blame] | 349 | } |
| 350 | |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 351 | /* |
David Rientjes | adc1938 | 2009-09-25 15:20:09 -0700 | [diff] [blame] | 352 | * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr |
| 353 | * to max_addr. The return value is the number of nodes allocated. |
| 354 | */ |
| 355 | static int __init split_nodes_interleave(u64 addr, u64 max_addr, |
| 356 | int nr_phys_nodes, int nr_nodes) |
| 357 | { |
| 358 | nodemask_t physnode_mask = NODE_MASK_NONE; |
| 359 | u64 size; |
| 360 | int big; |
| 361 | int ret = 0; |
| 362 | int i; |
| 363 | |
| 364 | if (nr_nodes <= 0) |
| 365 | return -1; |
| 366 | if (nr_nodes > MAX_NUMNODES) { |
| 367 | pr_info("numa=fake=%d too large, reducing to %d\n", |
| 368 | nr_nodes, MAX_NUMNODES); |
| 369 | nr_nodes = MAX_NUMNODES; |
| 370 | } |
| 371 | |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 372 | size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / nr_nodes; |
David Rientjes | adc1938 | 2009-09-25 15:20:09 -0700 | [diff] [blame] | 373 | /* |
| 374 | * Calculate the number of big nodes that can be allocated as a result |
| 375 | * of consolidating the remainder. |
| 376 | */ |
David Rientjes | 68fd111 | 2010-02-15 13:43:25 -0800 | [diff] [blame] | 377 | big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * nr_nodes) / |
David Rientjes | adc1938 | 2009-09-25 15:20:09 -0700 | [diff] [blame] | 378 | FAKE_NODE_MIN_SIZE; |
| 379 | |
| 380 | size &= FAKE_NODE_MIN_HASH_MASK; |
| 381 | if (!size) { |
| 382 | pr_err("Not enough memory for each node. " |
| 383 | "NUMA emulation disabled.\n"); |
| 384 | return -1; |
| 385 | } |
| 386 | |
| 387 | for (i = 0; i < nr_phys_nodes; i++) |
| 388 | if (physnodes[i].start != physnodes[i].end) |
| 389 | node_set(i, physnode_mask); |
| 390 | |
| 391 | /* |
| 392 | * Continue to fill physical nodes with fake nodes until there is no |
| 393 | * memory left on any of them. |
| 394 | */ |
| 395 | while (nodes_weight(physnode_mask)) { |
| 396 | for_each_node_mask(i, physnode_mask) { |
| 397 | u64 end = physnodes[i].start + size; |
| 398 | u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN); |
| 399 | |
| 400 | if (ret < big) |
| 401 | end += FAKE_NODE_MIN_SIZE; |
| 402 | |
| 403 | /* |
| 404 | * Continue to add memory to this fake node if its |
| 405 | * non-reserved memory is less than the per-node size. |
| 406 | */ |
| 407 | while (end - physnodes[i].start - |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 408 | memblock_x86_hole_size(physnodes[i].start, end) < size) { |
David Rientjes | adc1938 | 2009-09-25 15:20:09 -0700 | [diff] [blame] | 409 | end += FAKE_NODE_MIN_SIZE; |
| 410 | if (end > physnodes[i].end) { |
| 411 | end = physnodes[i].end; |
| 412 | break; |
| 413 | } |
| 414 | } |
| 415 | |
| 416 | /* |
| 417 | * If there won't be at least FAKE_NODE_MIN_SIZE of |
| 418 | * non-reserved memory in ZONE_DMA32 for the next node, |
| 419 | * this one must extend to the boundary. |
| 420 | */ |
| 421 | if (end < dma32_end && dma32_end - end - |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 422 | memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) |
David Rientjes | adc1938 | 2009-09-25 15:20:09 -0700 | [diff] [blame] | 423 | end = dma32_end; |
| 424 | |
| 425 | /* |
| 426 | * If there won't be enough non-reserved memory for the |
| 427 | * next node, this one must extend to the end of the |
| 428 | * physical node. |
| 429 | */ |
| 430 | if (physnodes[i].end - end - |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 431 | memblock_x86_hole_size(end, physnodes[i].end) < size) |
David Rientjes | adc1938 | 2009-09-25 15:20:09 -0700 | [diff] [blame] | 432 | end = physnodes[i].end; |
| 433 | |
| 434 | /* |
| 435 | * Avoid allocating more nodes than requested, which can |
| 436 | * happen as a result of rounding down each node's size |
| 437 | * to FAKE_NODE_MIN_SIZE. |
| 438 | */ |
| 439 | if (nodes_weight(physnode_mask) + ret >= nr_nodes) |
| 440 | end = physnodes[i].end; |
| 441 | |
| 442 | if (setup_node_range(ret++, &physnodes[i].start, |
| 443 | end - physnodes[i].start, |
| 444 | physnodes[i].end) < 0) |
| 445 | node_clear(i, physnode_mask); |
| 446 | } |
| 447 | } |
| 448 | return ret; |
| 449 | } |
| 450 | |
| 451 | /* |
David Rientjes | 8df5bb34 | 2010-02-15 13:43:30 -0800 | [diff] [blame] | 452 | * Returns the end address of a node so that there is at least `size' amount of |
| 453 | * non-reserved memory or `max_addr' is reached. |
| 454 | */ |
| 455 | static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size) |
| 456 | { |
| 457 | u64 end = start + size; |
| 458 | |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 459 | while (end - start - memblock_x86_hole_size(start, end) < size) { |
David Rientjes | 8df5bb34 | 2010-02-15 13:43:30 -0800 | [diff] [blame] | 460 | end += FAKE_NODE_MIN_SIZE; |
| 461 | if (end > max_addr) { |
| 462 | end = max_addr; |
| 463 | break; |
| 464 | } |
| 465 | } |
| 466 | return end; |
| 467 | } |
| 468 | |
| 469 | /* |
| 470 | * Sets up fake nodes of `size' interleaved over physical nodes ranging from |
| 471 | * `addr' to `max_addr'. The return value is the number of nodes allocated. |
| 472 | */ |
| 473 | static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size) |
| 474 | { |
| 475 | nodemask_t physnode_mask = NODE_MASK_NONE; |
| 476 | u64 min_size; |
| 477 | int ret = 0; |
| 478 | int i; |
| 479 | |
| 480 | if (!size) |
| 481 | return -1; |
| 482 | /* |
| 483 | * The limit on emulated nodes is MAX_NUMNODES, so the size per node is |
| 484 | * increased accordingly if the requested size is too small. This |
| 485 | * creates a uniform distribution of node sizes across the entire |
| 486 | * machine (but not necessarily over physical nodes). |
| 487 | */ |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 488 | min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / |
David Rientjes | 8df5bb34 | 2010-02-15 13:43:30 -0800 | [diff] [blame] | 489 | MAX_NUMNODES; |
| 490 | min_size = max(min_size, FAKE_NODE_MIN_SIZE); |
| 491 | if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size) |
| 492 | min_size = (min_size + FAKE_NODE_MIN_SIZE) & |
| 493 | FAKE_NODE_MIN_HASH_MASK; |
| 494 | if (size < min_size) { |
| 495 | pr_err("Fake node size %LuMB too small, increasing to %LuMB\n", |
| 496 | size >> 20, min_size >> 20); |
| 497 | size = min_size; |
| 498 | } |
| 499 | size &= FAKE_NODE_MIN_HASH_MASK; |
| 500 | |
| 501 | for (i = 0; i < MAX_NUMNODES; i++) |
| 502 | if (physnodes[i].start != physnodes[i].end) |
| 503 | node_set(i, physnode_mask); |
| 504 | /* |
| 505 | * Fill physical nodes with fake nodes of size until there is no memory |
| 506 | * left on any of them. |
| 507 | */ |
| 508 | while (nodes_weight(physnode_mask)) { |
| 509 | for_each_node_mask(i, physnode_mask) { |
| 510 | u64 dma32_end = MAX_DMA32_PFN << PAGE_SHIFT; |
| 511 | u64 end; |
| 512 | |
| 513 | end = find_end_of_node(physnodes[i].start, |
| 514 | physnodes[i].end, size); |
| 515 | /* |
| 516 | * If there won't be at least FAKE_NODE_MIN_SIZE of |
| 517 | * non-reserved memory in ZONE_DMA32 for the next node, |
| 518 | * this one must extend to the boundary. |
| 519 | */ |
| 520 | if (end < dma32_end && dma32_end - end - |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 521 | memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) |
David Rientjes | 8df5bb34 | 2010-02-15 13:43:30 -0800 | [diff] [blame] | 522 | end = dma32_end; |
| 523 | |
| 524 | /* |
| 525 | * If there won't be enough non-reserved memory for the |
| 526 | * next node, this one must extend to the end of the |
| 527 | * physical node. |
| 528 | */ |
| 529 | if (physnodes[i].end - end - |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 530 | memblock_x86_hole_size(end, physnodes[i].end) < size) |
David Rientjes | 8df5bb34 | 2010-02-15 13:43:30 -0800 | [diff] [blame] | 531 | end = physnodes[i].end; |
| 532 | |
| 533 | /* |
| 534 | * Setup the fake node that will be allocated as bootmem |
| 535 | * later. If setup_node_range() returns non-zero, there |
| 536 | * is no more memory available on this physical node. |
| 537 | */ |
| 538 | if (setup_node_range(ret++, &physnodes[i].start, |
| 539 | end - physnodes[i].start, |
| 540 | physnodes[i].end) < 0) |
| 541 | node_clear(i, physnode_mask); |
| 542 | } |
| 543 | } |
| 544 | return ret; |
| 545 | } |
| 546 | |
| 547 | /* |
Thomas Gleixner | 886533a | 2008-05-12 15:43:36 +0200 | [diff] [blame] | 548 | * Sets up the system RAM area from start_pfn to last_pfn according to the |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 549 | * numa=fake command-line option. |
| 550 | */ |
David Rientjes | adc1938 | 2009-09-25 15:20:09 -0700 | [diff] [blame] | 551 | static int __init numa_emulation(unsigned long start_pfn, |
Hans Rosenfeld | eec1d4f | 2010-10-29 17:14:30 +0200 | [diff] [blame^] | 552 | unsigned long last_pfn, int acpi, int amd) |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 553 | { |
David Rientjes | ca2107c | 2010-02-15 13:43:33 -0800 | [diff] [blame] | 554 | u64 addr = start_pfn << PAGE_SHIFT; |
Thomas Gleixner | 886533a | 2008-05-12 15:43:36 +0200 | [diff] [blame] | 555 | u64 max_addr = last_pfn << PAGE_SHIFT; |
David Rientjes | adc1938 | 2009-09-25 15:20:09 -0700 | [diff] [blame] | 556 | int num_phys_nodes; |
David Rientjes | ca2107c | 2010-02-15 13:43:33 -0800 | [diff] [blame] | 557 | int num_nodes; |
| 558 | int i; |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 559 | |
Hans Rosenfeld | eec1d4f | 2010-10-29 17:14:30 +0200 | [diff] [blame^] | 560 | num_phys_nodes = setup_physnodes(addr, max_addr, acpi, amd); |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 561 | /* |
David Rientjes | 8df5bb34 | 2010-02-15 13:43:30 -0800 | [diff] [blame] | 562 | * If the numa=fake command-line contains a 'M' or 'G', it represents |
David Rientjes | ca2107c | 2010-02-15 13:43:33 -0800 | [diff] [blame] | 563 | * the fixed node size. Otherwise, if it is just a single number N, |
| 564 | * split the system RAM into N fake nodes. |
David Rientjes | 8df5bb34 | 2010-02-15 13:43:30 -0800 | [diff] [blame] | 565 | */ |
| 566 | if (strchr(cmdline, 'M') || strchr(cmdline, 'G')) { |
David Rientjes | ca2107c | 2010-02-15 13:43:33 -0800 | [diff] [blame] | 567 | u64 size; |
| 568 | |
David Rientjes | 8df5bb34 | 2010-02-15 13:43:30 -0800 | [diff] [blame] | 569 | size = memparse(cmdline, &cmdline); |
| 570 | num_nodes = split_nodes_size_interleave(addr, max_addr, size); |
David Rientjes | ca2107c | 2010-02-15 13:43:33 -0800 | [diff] [blame] | 571 | } else { |
| 572 | unsigned long n; |
| 573 | |
| 574 | n = simple_strtoul(cmdline, NULL, 0); |
| 575 | num_nodes = split_nodes_interleave(addr, max_addr, num_phys_nodes, n); |
David Rientjes | 8df5bb34 | 2010-02-15 13:43:30 -0800 | [diff] [blame] | 576 | } |
| 577 | |
David Rientjes | ca2107c | 2010-02-15 13:43:33 -0800 | [diff] [blame] | 578 | if (num_nodes < 0) |
| 579 | return num_nodes; |
Suresh Siddha | 6ec6e0d | 2008-03-25 10:14:35 -0700 | [diff] [blame] | 580 | memnode_shift = compute_hash_shift(nodes, num_nodes, NULL); |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 581 | if (memnode_shift < 0) { |
| 582 | memnode_shift = 0; |
| 583 | printk(KERN_ERR "No NUMA hash function found. NUMA emulation " |
| 584 | "disabled.\n"); |
| 585 | return -1; |
| 586 | } |
| 587 | |
| 588 | /* |
David Rientjes | adc1938 | 2009-09-25 15:20:09 -0700 | [diff] [blame] | 589 | * We need to vacate all active ranges that may have been registered for |
| 590 | * the e820 memory map. |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 591 | */ |
| 592 | remove_all_active_ranges(); |
Suresh Siddha | e3f1cae | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 593 | for_each_node_mask(i, node_possible_map) { |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 594 | memblock_x86_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, |
Mel Gorman | 5cb248a | 2006-09-27 01:49:52 -0700 | [diff] [blame] | 595 | nodes[i].end >> PAGE_SHIFT); |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 596 | setup_node_bootmem(i, nodes[i].start, nodes[i].end); |
Mel Gorman | 5cb248a | 2006-09-27 01:49:52 -0700 | [diff] [blame] | 597 | } |
David Rientjes | 3484d79 | 2007-07-21 17:10:32 +0200 | [diff] [blame] | 598 | acpi_fake_nodes(nodes, num_nodes); |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 599 | numa_init_array(); |
| 600 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 601 | } |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 602 | #endif /* CONFIG_NUMA_EMU */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 603 | |
David Rientjes | 8ee2deb | 2009-09-25 15:20:00 -0700 | [diff] [blame] | 604 | void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn, |
Hans Rosenfeld | eec1d4f | 2010-10-29 17:14:30 +0200 | [diff] [blame^] | 605 | int acpi, int amd) |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 606 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 607 | int i; |
| 608 | |
Suresh Siddha | e3f1cae | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 609 | nodes_clear(node_possible_map); |
Yinghai Lu | b7ad149 | 2008-02-17 02:02:21 -0800 | [diff] [blame] | 610 | nodes_clear(node_online_map); |
Suresh Siddha | e3f1cae | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 611 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 612 | #ifdef CONFIG_NUMA_EMU |
Hans Rosenfeld | eec1d4f | 2010-10-29 17:14:30 +0200 | [diff] [blame^] | 613 | if (cmdline && !numa_emulation(start_pfn, last_pfn, acpi, amd)) |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 614 | return; |
Suresh Siddha | e3f1cae | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 615 | nodes_clear(node_possible_map); |
Yinghai Lu | b7ad149 | 2008-02-17 02:02:21 -0800 | [diff] [blame] | 616 | nodes_clear(node_online_map); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 617 | #endif |
| 618 | |
| 619 | #ifdef CONFIG_ACPI_NUMA |
David Rientjes | 8716273 | 2009-09-25 15:20:04 -0700 | [diff] [blame] | 620 | if (!numa_off && acpi && !acpi_scan_nodes(start_pfn << PAGE_SHIFT, |
| 621 | last_pfn << PAGE_SHIFT)) |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 622 | return; |
Suresh Siddha | e3f1cae | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 623 | nodes_clear(node_possible_map); |
Yinghai Lu | b7ad149 | 2008-02-17 02:02:21 -0800 | [diff] [blame] | 624 | nodes_clear(node_online_map); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 625 | #endif |
| 626 | |
Hans Rosenfeld | eec1d4f | 2010-10-29 17:14:30 +0200 | [diff] [blame^] | 627 | #ifdef CONFIG_AMD_NUMA |
| 628 | if (!numa_off && amd && !amd_scan_nodes()) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 629 | return; |
Suresh Siddha | e3f1cae | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 630 | nodes_clear(node_possible_map); |
Yinghai Lu | b7ad149 | 2008-02-17 02:02:21 -0800 | [diff] [blame] | 631 | nodes_clear(node_online_map); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 632 | #endif |
| 633 | printk(KERN_INFO "%s\n", |
| 634 | numa_off ? "NUMA turned off" : "No NUMA configuration found"); |
| 635 | |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 636 | printk(KERN_INFO "Faking a node at %016lx-%016lx\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 637 | start_pfn << PAGE_SHIFT, |
Thomas Gleixner | 886533a | 2008-05-12 15:43:36 +0200 | [diff] [blame] | 638 | last_pfn << PAGE_SHIFT); |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 639 | /* setup dummy node covering all memory */ |
| 640 | memnode_shift = 63; |
Amul Shah | 076422d | 2007-02-13 13:26:19 +0100 | [diff] [blame] | 641 | memnodemap = memnode.embedded_map; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 642 | memnodemap[0] = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 643 | node_set_online(0); |
Suresh Siddha | e3f1cae | 2007-05-02 19:27:20 +0200 | [diff] [blame] | 644 | node_set(0, node_possible_map); |
Mike Travis | 168ef54 | 2008-12-16 17:34:01 -0800 | [diff] [blame] | 645 | for (i = 0; i < nr_cpu_ids; i++) |
Andi Kleen | 69d81fc | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 646 | numa_set_node(i, 0); |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 647 | memblock_x86_register_active_regions(0, start_pfn, last_pfn); |
Thomas Gleixner | 886533a | 2008-05-12 15:43:36 +0200 | [diff] [blame] | 648 | setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT); |
Andi Kleen | 69d81fc | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 649 | } |
| 650 | |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 651 | unsigned long __init numa_free_all_bootmem(void) |
| 652 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 653 | unsigned long pages = 0; |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 654 | int i; |
| 655 | |
| 656 | for_each_online_node(i) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 657 | pages += free_all_bootmem_node(NODE_DATA(i)); |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 658 | |
Yinghai Lu | 0867721 | 2010-02-10 01:20:20 -0800 | [diff] [blame] | 659 | pages += free_all_memory_core_early(MAX_NUMNODES); |
Yinghai Lu | 0867721 | 2010-02-10 01:20:20 -0800 | [diff] [blame] | 660 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 661 | return pages; |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 662 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 663 | |
Andi Kleen | 2c8c0e6 | 2006-09-26 10:52:32 +0200 | [diff] [blame] | 664 | static __init int numa_setup(char *opt) |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 665 | { |
Andi Kleen | 2c8c0e6 | 2006-09-26 10:52:32 +0200 | [diff] [blame] | 666 | if (!opt) |
| 667 | return -EINVAL; |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 668 | if (!strncmp(opt, "off", 3)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 669 | numa_off = 1; |
| 670 | #ifdef CONFIG_NUMA_EMU |
David Rientjes | 8b8ca80e | 2007-05-02 19:27:09 +0200 | [diff] [blame] | 671 | if (!strncmp(opt, "fake=", 5)) |
| 672 | cmdline = opt + 5; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 673 | #endif |
| 674 | #ifdef CONFIG_ACPI_NUMA |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 675 | if (!strncmp(opt, "noacpi", 6)) |
| 676 | acpi_numa = -1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 677 | #endif |
Andi Kleen | 2c8c0e6 | 2006-09-26 10:52:32 +0200 | [diff] [blame] | 678 | return 0; |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 679 | } |
Andi Kleen | 2c8c0e6 | 2006-09-26 10:52:32 +0200 | [diff] [blame] | 680 | early_param("numa", numa_setup); |
| 681 | |
Mike Travis | 23ca4bb | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 682 | #ifdef CONFIG_NUMA |
Yinghai Lu | d9c2d5a | 2009-11-21 00:23:37 -0800 | [diff] [blame] | 683 | |
| 684 | static __init int find_near_online_node(int node) |
| 685 | { |
| 686 | int n, val; |
| 687 | int min_val = INT_MAX; |
| 688 | int best_node = -1; |
| 689 | |
| 690 | for_each_online_node(n) { |
| 691 | val = node_distance(node, n); |
| 692 | |
| 693 | if (val < min_val) { |
| 694 | min_val = val; |
| 695 | best_node = n; |
| 696 | } |
| 697 | } |
| 698 | |
| 699 | return best_node; |
| 700 | } |
| 701 | |
Ravikiran Thirumalai | 05b3cbd | 2006-01-11 22:45:36 +0100 | [diff] [blame] | 702 | /* |
| 703 | * Setup early cpu_to_node. |
| 704 | * |
| 705 | * Populate cpu_to_node[] only if x86_cpu_to_apicid[], |
| 706 | * and apicid_to_node[] tables have valid entries for a CPU. |
| 707 | * This means we skip cpu_to_node[] initialisation for NUMA |
| 708 | * emulation and faking node case (when running a kernel compiled |
| 709 | * for NUMA on a non NUMA box), which is OK as cpu_to_node[] |
| 710 | * is already initialized in a round robin manner at numa_init_array, |
| 711 | * prior to this call, and this initialization is good enough |
| 712 | * for the fake NUMA cases. |
Mike Travis | 23ca4bb | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 713 | * |
| 714 | * Called before the per_cpu areas are setup. |
Ravikiran Thirumalai | 05b3cbd | 2006-01-11 22:45:36 +0100 | [diff] [blame] | 715 | */ |
| 716 | void __init init_cpu_to_node(void) |
| 717 | { |
Mike Travis | 23ca4bb | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 718 | int cpu; |
| 719 | u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 720 | |
Mike Travis | 23ca4bb | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 721 | BUG_ON(cpu_to_apicid == NULL); |
| 722 | |
| 723 | for_each_possible_cpu(cpu) { |
Yinghai Lu | 7c9e92b6 | 2008-02-19 15:35:54 -0800 | [diff] [blame] | 724 | int node; |
Mike Travis | 23ca4bb | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 725 | u16 apicid = cpu_to_apicid[cpu]; |
Thomas Gleixner | e3cfe52 | 2008-01-30 13:30:37 +0100 | [diff] [blame] | 726 | |
Ravikiran Thirumalai | 05b3cbd | 2006-01-11 22:45:36 +0100 | [diff] [blame] | 727 | if (apicid == BAD_APICID) |
| 728 | continue; |
Yinghai Lu | 7c9e92b6 | 2008-02-19 15:35:54 -0800 | [diff] [blame] | 729 | node = apicid_to_node[apicid]; |
| 730 | if (node == NUMA_NO_NODE) |
Ravikiran Thirumalai | 05b3cbd | 2006-01-11 22:45:36 +0100 | [diff] [blame] | 731 | continue; |
Yinghai Lu | 7c9e92b6 | 2008-02-19 15:35:54 -0800 | [diff] [blame] | 732 | if (!node_online(node)) |
Yinghai Lu | d9c2d5a | 2009-11-21 00:23:37 -0800 | [diff] [blame] | 733 | node = find_near_online_node(node); |
Mike Travis | 23ca4bb | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 734 | numa_set_node(cpu, node); |
Ravikiran Thirumalai | 05b3cbd | 2006-01-11 22:45:36 +0100 | [diff] [blame] | 735 | } |
| 736 | } |
Mike Travis | 23ca4bb | 2008-05-12 21:21:12 +0200 | [diff] [blame] | 737 | #endif |
Ravikiran Thirumalai | 05b3cbd | 2006-01-11 22:45:36 +0100 | [diff] [blame] | 738 | |
Andi Kleen | cf05013 | 2006-01-11 22:46:27 +0100 | [diff] [blame] | 739 | |
Brian Gerst | 6470aff | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 740 | void __cpuinit numa_set_node(int cpu, int node) |
| 741 | { |
| 742 | int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); |
| 743 | |
| 744 | /* early setting, no percpu area yet */ |
| 745 | if (cpu_to_node_map) { |
| 746 | cpu_to_node_map[cpu] = node; |
| 747 | return; |
| 748 | } |
| 749 | |
| 750 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS |
Brian Gerst | 44581a2 | 2009-02-08 09:58:40 -0500 | [diff] [blame] | 751 | if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { |
Brian Gerst | 6470aff | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 752 | printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); |
| 753 | dump_stack(); |
| 754 | return; |
| 755 | } |
| 756 | #endif |
| 757 | per_cpu(x86_cpu_to_node_map, cpu) = node; |
| 758 | |
| 759 | if (node != NUMA_NO_NODE) |
Lee Schermerhorn | e534c7c | 2010-05-26 14:44:58 -0700 | [diff] [blame] | 760 | set_cpu_numa_node(cpu, node); |
Brian Gerst | 6470aff | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 761 | } |
| 762 | |
| 763 | void __cpuinit numa_clear_node(int cpu) |
| 764 | { |
| 765 | numa_set_node(cpu, NUMA_NO_NODE); |
| 766 | } |
| 767 | |
| 768 | #ifndef CONFIG_DEBUG_PER_CPU_MAPS |
| 769 | |
| 770 | void __cpuinit numa_add_cpu(int cpu) |
| 771 | { |
Rusty Russell | c032ef60 | 2009-03-13 14:49:53 +1030 | [diff] [blame] | 772 | cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); |
Brian Gerst | 6470aff | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 773 | } |
| 774 | |
| 775 | void __cpuinit numa_remove_cpu(int cpu) |
| 776 | { |
Rusty Russell | c032ef60 | 2009-03-13 14:49:53 +1030 | [diff] [blame] | 777 | cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); |
Brian Gerst | 6470aff | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 778 | } |
| 779 | |
| 780 | #else /* CONFIG_DEBUG_PER_CPU_MAPS */ |
| 781 | |
| 782 | /* |
| 783 | * --------- debug versions of the numa functions --------- |
| 784 | */ |
| 785 | static void __cpuinit numa_set_cpumask(int cpu, int enable) |
| 786 | { |
| 787 | int node = early_cpu_to_node(cpu); |
Rusty Russell | 73e907d | 2009-03-13 14:49:57 +1030 | [diff] [blame] | 788 | struct cpumask *mask; |
Brian Gerst | 6470aff | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 789 | char buf[64]; |
| 790 | |
Rusty Russell | c032ef60 | 2009-03-13 14:49:53 +1030 | [diff] [blame] | 791 | mask = node_to_cpumask_map[node]; |
| 792 | if (mask == NULL) { |
| 793 | printk(KERN_ERR "node_to_cpumask_map[%i] NULL\n", node); |
Brian Gerst | 6470aff | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 794 | dump_stack(); |
| 795 | return; |
| 796 | } |
| 797 | |
Brian Gerst | 6470aff | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 798 | if (enable) |
Rusty Russell | c032ef60 | 2009-03-13 14:49:53 +1030 | [diff] [blame] | 799 | cpumask_set_cpu(cpu, mask); |
Brian Gerst | 6470aff | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 800 | else |
Rusty Russell | c032ef60 | 2009-03-13 14:49:53 +1030 | [diff] [blame] | 801 | cpumask_clear_cpu(cpu, mask); |
Brian Gerst | 6470aff | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 802 | |
| 803 | cpulist_scnprintf(buf, sizeof(buf), mask); |
| 804 | printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", |
| 805 | enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf); |
| 806 | } |
| 807 | |
| 808 | void __cpuinit numa_add_cpu(int cpu) |
| 809 | { |
| 810 | numa_set_cpumask(cpu, 1); |
| 811 | } |
| 812 | |
| 813 | void __cpuinit numa_remove_cpu(int cpu) |
| 814 | { |
| 815 | numa_set_cpumask(cpu, 0); |
| 816 | } |
| 817 | |
Lee Schermerhorn | e534c7c | 2010-05-26 14:44:58 -0700 | [diff] [blame] | 818 | int __cpu_to_node(int cpu) |
Brian Gerst | 6470aff | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 819 | { |
| 820 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) { |
| 821 | printk(KERN_WARNING |
| 822 | "cpu_to_node(%d): usage too early!\n", cpu); |
| 823 | dump_stack(); |
| 824 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; |
| 825 | } |
| 826 | return per_cpu(x86_cpu_to_node_map, cpu); |
| 827 | } |
Lee Schermerhorn | e534c7c | 2010-05-26 14:44:58 -0700 | [diff] [blame] | 828 | EXPORT_SYMBOL(__cpu_to_node); |
Brian Gerst | 6470aff | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 829 | |
| 830 | /* |
| 831 | * Same function as cpu_to_node() but used if called before the |
| 832 | * per_cpu areas are setup. |
| 833 | */ |
| 834 | int early_cpu_to_node(int cpu) |
| 835 | { |
| 836 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) |
| 837 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; |
| 838 | |
Brian Gerst | 44581a2 | 2009-02-08 09:58:40 -0500 | [diff] [blame] | 839 | if (!cpu_possible(cpu)) { |
Brian Gerst | 6470aff | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 840 | printk(KERN_WARNING |
| 841 | "early_cpu_to_node(%d): no per_cpu area!\n", cpu); |
| 842 | dump_stack(); |
| 843 | return NUMA_NO_NODE; |
| 844 | } |
| 845 | return per_cpu(x86_cpu_to_node_map, cpu); |
| 846 | } |
| 847 | |
Brian Gerst | 6470aff | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 848 | /* |
Brian Gerst | 6470aff | 2009-01-27 12:56:47 +0900 | [diff] [blame] | 849 | * --------- end of debug versions of the numa functions --------- |
| 850 | */ |
| 851 | |
| 852 | #endif /* CONFIG_DEBUG_PER_CPU_MAPS */ |