blob: 4404e1d649acdee57b9550a282c5bb59de01bf8c [file] [log] [blame]
Thomas Gleixnere3cfe522008-01-30 13:30:37 +01001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Generic VM initialization for x86-64 NUMA setups.
3 * Copyright 2002,2003 Andi Kleen, SuSE Labs.
Thomas Gleixnere3cfe522008-01-30 13:30:37 +01004 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/string.h>
8#include <linux/init.h>
9#include <linux/bootmem.h>
Yinghai Lu72d7c3b2010-08-25 13:39:17 -070010#include <linux/memblock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/mmzone.h>
12#include <linux/ctype.h>
13#include <linux/module.h>
14#include <linux/nodemask.h>
travis@sgi.com3cc87e32008-01-30 13:33:11 +010015#include <linux/sched.h>
Tejun Heod8fc3af2011-02-16 12:13:06 +010016#include <linux/acpi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
18#include <asm/e820.h>
19#include <asm/proto.h>
20#include <asm/dma.h>
21#include <asm/numa.h>
22#include <asm/acpi.h>
Andreas Herrmann23ac4ae2010-09-17 18:03:43 +020023#include <asm/amd_nb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Ravikiran G Thirumalai6c231b72005-09-06 15:17:45 -070025struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
Thomas Gleixnere3cfe522008-01-30 13:30:37 +010026EXPORT_SYMBOL(node_data);
27
Tejun Heoec8cf29b2011-02-16 12:13:07 +010028nodemask_t cpu_nodes_parsed __initdata;
29nodemask_t mem_nodes_parsed __initdata;
30
Eric Dumazetdcf36bf2006-03-25 16:31:46 +010031struct memnode memnode;
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Thomas Gleixner864fc312008-05-12 15:43:36 +020033static unsigned long __initdata nodemap_addr;
34static unsigned long __initdata nodemap_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
Brian Gerst6470aff2009-01-27 12:56:47 +090036/*
Eric Dumazet529a3402005-11-05 17:25:54 +010037 * Given a shift value, try to populate memnodemap[]
38 * Returns :
39 * 1 if OK
40 * 0 if memnodmap[] too small (of shift too small)
41 * -1 if node overlap or lost ram (shift too big)
42 */
Thomas Gleixnere3cfe522008-01-30 13:30:37 +010043static int __init populate_memnodemap(const struct bootnode *nodes,
Suresh Siddha6ec6e0d2008-03-25 10:14:35 -070044 int numnodes, int shift, int *nodeids)
Linus Torvalds1da177e2005-04-16 15:20:36 -070045{
Eric Dumazet529a3402005-11-05 17:25:54 +010046 unsigned long addr, end;
Thomas Gleixnere3cfe522008-01-30 13:30:37 +010047 int i, res = -1;
Keith Manntheyb6846642005-07-28 21:15:38 -070048
travis@sgi.com43238382008-01-30 13:33:25 +010049 memset(memnodemap, 0xff, sizeof(s16)*memnodemapsize);
Eric Dumazet529a3402005-11-05 17:25:54 +010050 for (i = 0; i < numnodes; i++) {
51 addr = nodes[i].start;
52 end = nodes[i].end;
53 if (addr >= end)
54 continue;
Amul Shah076422d2007-02-13 13:26:19 +010055 if ((end >> shift) >= memnodemapsize)
Eric Dumazet529a3402005-11-05 17:25:54 +010056 return 0;
57 do {
travis@sgi.com43238382008-01-30 13:33:25 +010058 if (memnodemap[addr >> shift] != NUMA_NO_NODE)
Eric Dumazet529a3402005-11-05 17:25:54 +010059 return -1;
Suresh Siddha6ec6e0d2008-03-25 10:14:35 -070060
61 if (!nodeids)
62 memnodemap[addr >> shift] = i;
63 else
64 memnodemap[addr >> shift] = nodeids[i];
65
Amul Shah076422d2007-02-13 13:26:19 +010066 addr += (1UL << shift);
Eric Dumazet529a3402005-11-05 17:25:54 +010067 } while (addr < end);
68 res = 1;
Thomas Gleixnere3cfe522008-01-30 13:30:37 +010069 }
Eric Dumazet529a3402005-11-05 17:25:54 +010070 return res;
71}
72
Amul Shah076422d2007-02-13 13:26:19 +010073static int __init allocate_cachealigned_memnodemap(void)
74{
Yinghai Lu24a5da72008-02-01 17:49:41 +010075 unsigned long addr;
Amul Shah076422d2007-02-13 13:26:19 +010076
77 memnodemap = memnode.embedded_map;
travis@sgi.com316390b2008-01-30 13:33:15 +010078 if (memnodemapsize <= ARRAY_SIZE(memnode.embedded_map))
Amul Shah076422d2007-02-13 13:26:19 +010079 return 0;
Amul Shah076422d2007-02-13 13:26:19 +010080
Yinghai Lu24a5da72008-02-01 17:49:41 +010081 addr = 0x8000;
Joerg Roedelbe3e89e2008-07-25 16:48:58 +020082 nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES);
Yinghai Ludbef7b52010-12-27 16:48:08 -080083 nodemap_addr = memblock_find_in_range(addr, get_max_mapped(),
Yinghai Lu24a5da72008-02-01 17:49:41 +010084 nodemap_size, L1_CACHE_BYTES);
Yinghai Lua9ce6bc2010-08-25 13:39:17 -070085 if (nodemap_addr == MEMBLOCK_ERROR) {
Amul Shah076422d2007-02-13 13:26:19 +010086 printk(KERN_ERR
87 "NUMA: Unable to allocate Memory to Node hash map\n");
88 nodemap_addr = nodemap_size = 0;
89 return -1;
90 }
Yinghai Lu24a5da72008-02-01 17:49:41 +010091 memnodemap = phys_to_virt(nodemap_addr);
Yinghai Lua9ce6bc2010-08-25 13:39:17 -070092 memblock_x86_reserve_range(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP");
Amul Shah076422d2007-02-13 13:26:19 +010093
94 printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
95 nodemap_addr, nodemap_addr + nodemap_size);
96 return 0;
97}
98
99/*
100 * The LSB of all start and end addresses in the node map is the value of the
101 * maximum possible shift.
102 */
Thomas Gleixnere3cfe522008-01-30 13:30:37 +0100103static int __init extract_lsb_from_nodes(const struct bootnode *nodes,
104 int numnodes)
Amul Shah076422d2007-02-13 13:26:19 +0100105{
Amul Shah54413922007-02-13 13:26:20 +0100106 int i, nodes_used = 0;
Amul Shah076422d2007-02-13 13:26:19 +0100107 unsigned long start, end;
108 unsigned long bitfield = 0, memtop = 0;
109
110 for (i = 0; i < numnodes; i++) {
111 start = nodes[i].start;
112 end = nodes[i].end;
113 if (start >= end)
114 continue;
Amul Shah54413922007-02-13 13:26:20 +0100115 bitfield |= start;
116 nodes_used++;
Amul Shah076422d2007-02-13 13:26:19 +0100117 if (end > memtop)
118 memtop = end;
119 }
Amul Shah54413922007-02-13 13:26:20 +0100120 if (nodes_used <= 1)
121 i = 63;
122 else
123 i = find_first_bit(&bitfield, sizeof(unsigned long)*8);
Amul Shah076422d2007-02-13 13:26:19 +0100124 memnodemapsize = (memtop >> i)+1;
125 return i;
126}
127
Suresh Siddha6ec6e0d2008-03-25 10:14:35 -0700128int __init compute_hash_shift(struct bootnode *nodes, int numnodes,
129 int *nodeids)
Eric Dumazet529a3402005-11-05 17:25:54 +0100130{
Amul Shah076422d2007-02-13 13:26:19 +0100131 int shift;
Eric Dumazet529a3402005-11-05 17:25:54 +0100132
Amul Shah076422d2007-02-13 13:26:19 +0100133 shift = extract_lsb_from_nodes(nodes, numnodes);
134 if (allocate_cachealigned_memnodemap())
135 return -1;
Andi Kleen6b050f82006-01-11 22:44:33 +0100136 printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
Eric Dumazet529a3402005-11-05 17:25:54 +0100137 shift);
138
Suresh Siddha6ec6e0d2008-03-25 10:14:35 -0700139 if (populate_memnodemap(nodes, numnodes, shift, nodeids) != 1) {
Thomas Gleixnere3cfe522008-01-30 13:30:37 +0100140 printk(KERN_INFO "Your memory is not aligned you need to "
141 "rebuild your kernel with a bigger NODEMAPSIZE "
142 "shift=%d\n", shift);
Eric Dumazet529a3402005-11-05 17:25:54 +0100143 return -1;
144 }
Keith Manntheyb6846642005-07-28 21:15:38 -0700145 return shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146}
147
KAMEZAWA Hiroyukif2dbcfa2009-02-18 14:48:32 -0800148int __meminit __early_pfn_to_nid(unsigned long pfn)
Matt Tolentinobbfceef2005-06-23 00:08:07 -0700149{
150 return phys_to_nid(pfn << PAGE_SHIFT);
151}
Matt Tolentinobbfceef2005-06-23 00:08:07 -0700152
Thomas Gleixnere3cfe522008-01-30 13:30:37 +0100153static void * __init early_node_mem(int nodeid, unsigned long start,
Yinghai Lu24a5da72008-02-01 17:49:41 +0100154 unsigned long end, unsigned long size,
155 unsigned long align)
Andi Kleena8062232006-04-07 19:49:21 +0200156{
Yinghai Lucef625e2010-02-10 01:20:18 -0800157 unsigned long mem;
Thomas Gleixnere3cfe522008-01-30 13:30:37 +0100158
Yinghai Lucef625e2010-02-10 01:20:18 -0800159 /*
160 * put it on high as possible
161 * something will go with NODE_DATA
162 */
163 if (start < (MAX_DMA_PFN<<PAGE_SHIFT))
164 start = MAX_DMA_PFN<<PAGE_SHIFT;
165 if (start < (MAX_DMA32_PFN<<PAGE_SHIFT) &&
166 end > (MAX_DMA32_PFN<<PAGE_SHIFT))
167 start = MAX_DMA32_PFN<<PAGE_SHIFT;
Yinghai Lu72d7c3b2010-08-25 13:39:17 -0700168 mem = memblock_x86_find_in_range_node(nodeid, start, end, size, align);
169 if (mem != MEMBLOCK_ERROR)
Andi Kleena8062232006-04-07 19:49:21 +0200170 return __va(mem);
Yinghai Lu9347e0b2008-02-01 17:49:42 +0100171
Yinghai Lucef625e2010-02-10 01:20:18 -0800172 /* extend the search scope */
173 end = max_pfn_mapped << PAGE_SHIFT;
Yinghai Lu419db272010-10-28 09:50:17 -0700174 start = MAX_DMA_PFN << PAGE_SHIFT;
175 mem = memblock_find_in_range(start, end, size, align);
Yinghai Lu72d7c3b2010-08-25 13:39:17 -0700176 if (mem != MEMBLOCK_ERROR)
Yinghai Lu1842f902010-02-10 01:20:15 -0800177 return __va(mem);
178
179 printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
Thomas Gleixnere3cfe522008-01-30 13:30:37 +0100180 size, nodeid);
Yinghai Lu1842f902010-02-10 01:20:15 -0800181
182 return NULL;
Andi Kleena8062232006-04-07 19:49:21 +0200183}
184
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185/* Initialize bootmem allocator for a node */
Yinghai Lu7c437692009-05-15 13:59:37 -0700186void __init
187setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
Thomas Gleixnere3cfe522008-01-30 13:30:37 +0100188{
Yinghai Lu08677212010-02-10 01:20:20 -0800189 unsigned long start_pfn, last_pfn, nodedata_phys;
Yinghai Lu7c437692009-05-15 13:59:37 -0700190 const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
Yinghai Lu1a27fc02008-03-18 12:52:37 -0700191 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192
Yinghai Lu4c31e922009-04-22 14:19:27 -0700193 if (!end)
194 return;
195
Yinghai Lu7c437692009-05-15 13:59:37 -0700196 /*
197 * Don't confuse VM with a node that doesn't have the
198 * minimum amount of memory:
199 */
200 if (end && (end - start) < NODE_MIN_SIZE)
201 return;
202
Joerg Roedelbe3e89e2008-07-25 16:48:58 +0200203 start = roundup(start, ZONE_ALIGN);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204
Yinghai Lu08677212010-02-10 01:20:20 -0800205 printk(KERN_INFO "Initmem setup node %d %016lx-%016lx\n", nodeid,
Thomas Gleixnere3cfe522008-01-30 13:30:37 +0100206 start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
208 start_pfn = start >> PAGE_SHIFT;
Thomas Gleixner886533a2008-05-12 15:43:36 +0200209 last_pfn = end >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
Yinghai Lu24a5da72008-02-01 17:49:41 +0100211 node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size,
212 SMP_CACHE_BYTES);
Andi Kleena8062232006-04-07 19:49:21 +0200213 if (node_data[nodeid] == NULL)
214 return;
215 nodedata_phys = __pa(node_data[nodeid]);
Yinghai Lua9ce6bc2010-08-25 13:39:17 -0700216 memblock_x86_reserve_range(nodedata_phys, nodedata_phys + pgdat_size, "NODE_DATA");
Yinghai Lu6118f762008-02-04 16:47:56 +0100217 printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", nodedata_phys,
218 nodedata_phys + pgdat_size - 1);
Yinghai Lu1842f902010-02-10 01:20:15 -0800219 nid = phys_to_nid(nodedata_phys);
220 if (nid != nodeid)
221 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nodeid, nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t));
Yinghai Lu08677212010-02-10 01:20:20 -0800224 NODE_DATA(nodeid)->node_id = nodeid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 NODE_DATA(nodeid)->node_start_pfn = start_pfn;
Thomas Gleixner886533a2008-05-12 15:43:36 +0200226 NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 node_set_online(nodeid);
Thomas Gleixnere3cfe522008-01-30 13:30:37 +0100229}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231#ifdef CONFIG_NUMA_EMU
Rohit Seth53fee042007-02-13 13:26:22 +0100232/* Numa emulation */
David Rientjesadc19382009-09-25 15:20:09 -0700233static struct bootnode nodes[MAX_NUMNODES] __initdata;
David Rientjesc1c34432010-12-22 17:23:54 -0800234static struct bootnode physnodes[MAX_NUMNODES] __cpuinitdata;
Thomas Gleixner864fc312008-05-12 15:43:36 +0200235static char *cmdline __initdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236
Jan Beulich90321602011-01-19 08:57:21 +0000237void __init numa_emu_cmdline(char *str)
238{
239 cmdline = str;
240}
241
David Rientjesadc19382009-09-25 15:20:09 -0700242static int __init setup_physnodes(unsigned long start, unsigned long end,
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200243 int acpi, int amd)
David Rientjesadc19382009-09-25 15:20:09 -0700244{
David Rientjesadc19382009-09-25 15:20:09 -0700245 int ret = 0;
246 int i;
247
David Rientjesc1c34432010-12-22 17:23:54 -0800248 memset(physnodes, 0, sizeof(physnodes));
David Rientjesadc19382009-09-25 15:20:09 -0700249#ifdef CONFIG_ACPI_NUMA
250 if (acpi)
David Rientjesa387e952010-12-22 17:23:56 -0800251 acpi_get_nodes(physnodes, start, end);
David Rientjesadc19382009-09-25 15:20:09 -0700252#endif
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200253#ifdef CONFIG_AMD_NUMA
254 if (amd)
David Rientjesa387e952010-12-22 17:23:56 -0800255 amd_get_nodes(physnodes);
David Rientjesadc19382009-09-25 15:20:09 -0700256#endif
257 /*
258 * Basic sanity checking on the physical node map: there may be errors
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200259 * if the SRAT or AMD code incorrectly reported the topology or the mem=
David Rientjesadc19382009-09-25 15:20:09 -0700260 * kernel parameter is used.
261 */
David Rientjesa387e952010-12-22 17:23:56 -0800262 for (i = 0; i < MAX_NUMNODES; i++) {
David Rientjesadc19382009-09-25 15:20:09 -0700263 if (physnodes[i].start == physnodes[i].end)
264 continue;
265 if (physnodes[i].start > end) {
266 physnodes[i].end = physnodes[i].start;
267 continue;
268 }
269 if (physnodes[i].end < start) {
270 physnodes[i].start = physnodes[i].end;
271 continue;
272 }
273 if (physnodes[i].start < start)
274 physnodes[i].start = start;
275 if (physnodes[i].end > end)
276 physnodes[i].end = end;
David Rientjesadc19382009-09-25 15:20:09 -0700277 ret++;
278 }
279
280 /*
281 * If no physical topology was detected, a single node is faked to cover
282 * the entire address space.
283 */
284 if (!ret) {
285 physnodes[ret].start = start;
286 physnodes[ret].end = end;
287 ret = 1;
288 }
289 return ret;
290}
291
David Rientjesf51bf302010-12-22 17:23:51 -0800292static void __init fake_physnodes(int acpi, int amd, int nr_nodes)
293{
294 int i;
295
296 BUG_ON(acpi && amd);
297#ifdef CONFIG_ACPI_NUMA
298 if (acpi)
299 acpi_fake_nodes(nodes, nr_nodes);
300#endif
301#ifdef CONFIG_AMD_NUMA
302 if (amd)
303 amd_fake_nodes(nodes, nr_nodes);
304#endif
305 if (!acpi && !amd)
306 for (i = 0; i < nr_cpu_ids; i++)
307 numa_set_node(i, 0);
308}
309
Rohit Seth53fee042007-02-13 13:26:22 +0100310/*
Thomas Gleixnere3cfe522008-01-30 13:30:37 +0100311 * Setups up nid to range from addr to addr + size. If the end
312 * boundary is greater than max_addr, then max_addr is used instead.
313 * The return value is 0 if there is additional memory left for
314 * allocation past addr and -1 otherwise. addr is adjusted to be at
315 * the end of the node.
Rohit Seth53fee042007-02-13 13:26:22 +0100316 */
David Rientjesadc19382009-09-25 15:20:09 -0700317static int __init setup_node_range(int nid, u64 *addr, u64 size, u64 max_addr)
Rohit Seth53fee042007-02-13 13:26:22 +0100318{
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200319 int ret = 0;
320 nodes[nid].start = *addr;
321 *addr += size;
322 if (*addr >= max_addr) {
323 *addr = max_addr;
324 ret = -1;
325 }
326 nodes[nid].end = *addr;
Suresh Siddhae3f1cae2007-05-02 19:27:20 +0200327 node_set(nid, node_possible_map);
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200328 printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid,
329 nodes[nid].start, nodes[nid].end,
330 (nodes[nid].end - nodes[nid].start) >> 20);
331 return ret;
Rohit Seth53fee042007-02-13 13:26:22 +0100332}
333
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200334/*
David Rientjesadc19382009-09-25 15:20:09 -0700335 * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr
336 * to max_addr. The return value is the number of nodes allocated.
337 */
David Rientjesc1c34432010-12-22 17:23:54 -0800338static int __init split_nodes_interleave(u64 addr, u64 max_addr, int nr_nodes)
David Rientjesadc19382009-09-25 15:20:09 -0700339{
340 nodemask_t physnode_mask = NODE_MASK_NONE;
341 u64 size;
342 int big;
343 int ret = 0;
344 int i;
345
346 if (nr_nodes <= 0)
347 return -1;
348 if (nr_nodes > MAX_NUMNODES) {
349 pr_info("numa=fake=%d too large, reducing to %d\n",
350 nr_nodes, MAX_NUMNODES);
351 nr_nodes = MAX_NUMNODES;
352 }
353
Yinghai Lua9ce6bc2010-08-25 13:39:17 -0700354 size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / nr_nodes;
David Rientjesadc19382009-09-25 15:20:09 -0700355 /*
356 * Calculate the number of big nodes that can be allocated as a result
357 * of consolidating the remainder.
358 */
David Rientjes68fd1112010-02-15 13:43:25 -0800359 big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * nr_nodes) /
David Rientjesadc19382009-09-25 15:20:09 -0700360 FAKE_NODE_MIN_SIZE;
361
362 size &= FAKE_NODE_MIN_HASH_MASK;
363 if (!size) {
364 pr_err("Not enough memory for each node. "
365 "NUMA emulation disabled.\n");
366 return -1;
367 }
368
David Rientjesc1c34432010-12-22 17:23:54 -0800369 for (i = 0; i < MAX_NUMNODES; i++)
David Rientjesadc19382009-09-25 15:20:09 -0700370 if (physnodes[i].start != physnodes[i].end)
371 node_set(i, physnode_mask);
372
373 /*
374 * Continue to fill physical nodes with fake nodes until there is no
375 * memory left on any of them.
376 */
377 while (nodes_weight(physnode_mask)) {
378 for_each_node_mask(i, physnode_mask) {
379 u64 end = physnodes[i].start + size;
380 u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN);
381
382 if (ret < big)
383 end += FAKE_NODE_MIN_SIZE;
384
385 /*
386 * Continue to add memory to this fake node if its
387 * non-reserved memory is less than the per-node size.
388 */
389 while (end - physnodes[i].start -
Yinghai Lua9ce6bc2010-08-25 13:39:17 -0700390 memblock_x86_hole_size(physnodes[i].start, end) < size) {
David Rientjesadc19382009-09-25 15:20:09 -0700391 end += FAKE_NODE_MIN_SIZE;
392 if (end > physnodes[i].end) {
393 end = physnodes[i].end;
394 break;
395 }
396 }
397
398 /*
399 * If there won't be at least FAKE_NODE_MIN_SIZE of
400 * non-reserved memory in ZONE_DMA32 for the next node,
401 * this one must extend to the boundary.
402 */
403 if (end < dma32_end && dma32_end - end -
Yinghai Lua9ce6bc2010-08-25 13:39:17 -0700404 memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
David Rientjesadc19382009-09-25 15:20:09 -0700405 end = dma32_end;
406
407 /*
408 * If there won't be enough non-reserved memory for the
409 * next node, this one must extend to the end of the
410 * physical node.
411 */
412 if (physnodes[i].end - end -
Yinghai Lua9ce6bc2010-08-25 13:39:17 -0700413 memblock_x86_hole_size(end, physnodes[i].end) < size)
David Rientjesadc19382009-09-25 15:20:09 -0700414 end = physnodes[i].end;
415
416 /*
417 * Avoid allocating more nodes than requested, which can
418 * happen as a result of rounding down each node's size
419 * to FAKE_NODE_MIN_SIZE.
420 */
421 if (nodes_weight(physnode_mask) + ret >= nr_nodes)
422 end = physnodes[i].end;
423
424 if (setup_node_range(ret++, &physnodes[i].start,
425 end - physnodes[i].start,
426 physnodes[i].end) < 0)
427 node_clear(i, physnode_mask);
428 }
429 }
430 return ret;
431}
432
433/*
David Rientjes8df5bb342010-02-15 13:43:30 -0800434 * Returns the end address of a node so that there is at least `size' amount of
435 * non-reserved memory or `max_addr' is reached.
436 */
437static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
438{
439 u64 end = start + size;
440
Yinghai Lua9ce6bc2010-08-25 13:39:17 -0700441 while (end - start - memblock_x86_hole_size(start, end) < size) {
David Rientjes8df5bb342010-02-15 13:43:30 -0800442 end += FAKE_NODE_MIN_SIZE;
443 if (end > max_addr) {
444 end = max_addr;
445 break;
446 }
447 }
448 return end;
449}
450
451/*
452 * Sets up fake nodes of `size' interleaved over physical nodes ranging from
453 * `addr' to `max_addr'. The return value is the number of nodes allocated.
454 */
455static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size)
456{
457 nodemask_t physnode_mask = NODE_MASK_NONE;
458 u64 min_size;
459 int ret = 0;
460 int i;
461
462 if (!size)
463 return -1;
464 /*
465 * The limit on emulated nodes is MAX_NUMNODES, so the size per node is
466 * increased accordingly if the requested size is too small. This
467 * creates a uniform distribution of node sizes across the entire
468 * machine (but not necessarily over physical nodes).
469 */
Yinghai Lua9ce6bc2010-08-25 13:39:17 -0700470 min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) /
David Rientjes8df5bb342010-02-15 13:43:30 -0800471 MAX_NUMNODES;
472 min_size = max(min_size, FAKE_NODE_MIN_SIZE);
473 if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
474 min_size = (min_size + FAKE_NODE_MIN_SIZE) &
475 FAKE_NODE_MIN_HASH_MASK;
476 if (size < min_size) {
477 pr_err("Fake node size %LuMB too small, increasing to %LuMB\n",
478 size >> 20, min_size >> 20);
479 size = min_size;
480 }
481 size &= FAKE_NODE_MIN_HASH_MASK;
482
483 for (i = 0; i < MAX_NUMNODES; i++)
484 if (physnodes[i].start != physnodes[i].end)
485 node_set(i, physnode_mask);
486 /*
487 * Fill physical nodes with fake nodes of size until there is no memory
488 * left on any of them.
489 */
490 while (nodes_weight(physnode_mask)) {
491 for_each_node_mask(i, physnode_mask) {
492 u64 dma32_end = MAX_DMA32_PFN << PAGE_SHIFT;
493 u64 end;
494
495 end = find_end_of_node(physnodes[i].start,
496 physnodes[i].end, size);
497 /*
498 * If there won't be at least FAKE_NODE_MIN_SIZE of
499 * non-reserved memory in ZONE_DMA32 for the next node,
500 * this one must extend to the boundary.
501 */
502 if (end < dma32_end && dma32_end - end -
Yinghai Lua9ce6bc2010-08-25 13:39:17 -0700503 memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
David Rientjes8df5bb342010-02-15 13:43:30 -0800504 end = dma32_end;
505
506 /*
507 * If there won't be enough non-reserved memory for the
508 * next node, this one must extend to the end of the
509 * physical node.
510 */
511 if (physnodes[i].end - end -
Yinghai Lua9ce6bc2010-08-25 13:39:17 -0700512 memblock_x86_hole_size(end, physnodes[i].end) < size)
David Rientjes8df5bb342010-02-15 13:43:30 -0800513 end = physnodes[i].end;
514
515 /*
516 * Setup the fake node that will be allocated as bootmem
517 * later. If setup_node_range() returns non-zero, there
518 * is no more memory available on this physical node.
519 */
520 if (setup_node_range(ret++, &physnodes[i].start,
521 end - physnodes[i].start,
522 physnodes[i].end) < 0)
523 node_clear(i, physnode_mask);
524 }
525 }
526 return ret;
527}
528
529/*
Thomas Gleixner886533a2008-05-12 15:43:36 +0200530 * Sets up the system RAM area from start_pfn to last_pfn according to the
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200531 * numa=fake command-line option.
532 */
David Rientjesadc19382009-09-25 15:20:09 -0700533static int __init numa_emulation(unsigned long start_pfn,
Hans Rosenfeldeec1d4f2010-10-29 17:14:30 +0200534 unsigned long last_pfn, int acpi, int amd)
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200535{
David Rientjesca2107c2010-02-15 13:43:33 -0800536 u64 addr = start_pfn << PAGE_SHIFT;
Thomas Gleixner886533a2008-05-12 15:43:36 +0200537 u64 max_addr = last_pfn << PAGE_SHIFT;
David Rientjesca2107c2010-02-15 13:43:33 -0800538 int num_nodes;
539 int i;
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200540
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200541 /*
David Rientjes8df5bb342010-02-15 13:43:30 -0800542 * If the numa=fake command-line contains a 'M' or 'G', it represents
David Rientjesca2107c2010-02-15 13:43:33 -0800543 * the fixed node size. Otherwise, if it is just a single number N,
544 * split the system RAM into N fake nodes.
David Rientjes8df5bb342010-02-15 13:43:30 -0800545 */
546 if (strchr(cmdline, 'M') || strchr(cmdline, 'G')) {
David Rientjesca2107c2010-02-15 13:43:33 -0800547 u64 size;
548
David Rientjes8df5bb342010-02-15 13:43:30 -0800549 size = memparse(cmdline, &cmdline);
550 num_nodes = split_nodes_size_interleave(addr, max_addr, size);
David Rientjesca2107c2010-02-15 13:43:33 -0800551 } else {
552 unsigned long n;
553
554 n = simple_strtoul(cmdline, NULL, 0);
David Rientjesc1c34432010-12-22 17:23:54 -0800555 num_nodes = split_nodes_interleave(addr, max_addr, n);
David Rientjes8df5bb342010-02-15 13:43:30 -0800556 }
557
David Rientjesca2107c2010-02-15 13:43:33 -0800558 if (num_nodes < 0)
559 return num_nodes;
Suresh Siddha6ec6e0d2008-03-25 10:14:35 -0700560 memnode_shift = compute_hash_shift(nodes, num_nodes, NULL);
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200561 if (memnode_shift < 0) {
562 memnode_shift = 0;
563 printk(KERN_ERR "No NUMA hash function found. NUMA emulation "
564 "disabled.\n");
565 return -1;
566 }
567
568 /*
David Rientjesadc19382009-09-25 15:20:09 -0700569 * We need to vacate all active ranges that may have been registered for
570 * the e820 memory map.
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200571 */
572 remove_all_active_ranges();
Yinghai Lu1411e0e2010-12-27 16:48:17 -0800573 for_each_node_mask(i, node_possible_map)
Yinghai Lua9ce6bc2010-08-25 13:39:17 -0700574 memblock_x86_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
Mel Gorman5cb248a2006-09-27 01:49:52 -0700575 nodes[i].end >> PAGE_SHIFT);
Yinghai Lu1411e0e2010-12-27 16:48:17 -0800576 init_memory_mapping_high();
577 for_each_node_mask(i, node_possible_map)
Thomas Gleixnere3cfe522008-01-30 13:30:37 +0100578 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
David Rientjesc1c34432010-12-22 17:23:54 -0800579 setup_physnodes(addr, max_addr, acpi, amd);
David Rientjesf51bf302010-12-22 17:23:51 -0800580 fake_physnodes(acpi, amd, num_nodes);
Thomas Gleixnere3cfe522008-01-30 13:30:37 +0100581 numa_init_array();
582 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583}
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200584#endif /* CONFIG_NUMA_EMU */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585
Tejun Heoffe77a42011-02-16 12:13:06 +0100586static int dummy_numa_init(void)
Thomas Gleixnere3cfe522008-01-30 13:30:37 +0100587{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 printk(KERN_INFO "%s\n",
589 numa_off ? "NUMA turned off" : "No NUMA configuration found");
Thomas Gleixnere3cfe522008-01-30 13:30:37 +0100590 printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
Tejun Heo86ef4db2011-02-16 12:13:06 +0100591 0LU, max_pfn << PAGE_SHIFT);
Tejun Heoffe77a42011-02-16 12:13:06 +0100592
Tejun Heoec8cf29b2011-02-16 12:13:07 +0100593 node_set(0, cpu_nodes_parsed);
594 node_set(0, mem_nodes_parsed);
595
596 return 0;
597}
598
599static int dummy_scan_nodes(void)
600{
Thomas Gleixnere3cfe522008-01-30 13:30:37 +0100601 /* setup dummy node covering all memory */
602 memnode_shift = 63;
Amul Shah076422d2007-02-13 13:26:19 +0100603 memnodemap = memnode.embedded_map;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 memnodemap[0] = 0;
Tejun Heo86ef4db2011-02-16 12:13:06 +0100605 memblock_x86_register_active_regions(0, 0, max_pfn);
Yinghai Lu1411e0e2010-12-27 16:48:17 -0800606 init_memory_mapping_high();
Tejun Heo86ef4db2011-02-16 12:13:06 +0100607 setup_node_bootmem(0, 0, max_pfn << PAGE_SHIFT);
Tejun Heo7d36b7b2011-02-16 12:13:06 +0100608 numa_init_array();
Tejun Heoffe77a42011-02-16 12:13:06 +0100609
610 return 0;
611}
612
613void __init initmem_init(void)
614{
615 int (*numa_init[])(void) = { [2] = dummy_numa_init };
616 int (*scan_nodes[])(void) = { [2] = dummy_scan_nodes };
617 int i, j;
618
619 if (!numa_off) {
620#ifdef CONFIG_ACPI_NUMA
621 numa_init[0] = x86_acpi_numa_init;
622 scan_nodes[0] = acpi_scan_nodes;
623#endif
624#ifdef CONFIG_AMD_NUMA
625 numa_init[1] = amd_numa_init;
626 scan_nodes[1] = amd_scan_nodes;
627#endif
628 }
629
630 for (i = 0; i < ARRAY_SIZE(numa_init); i++) {
631 if (!numa_init[i])
632 continue;
633
634 for (j = 0; j < MAX_LOCAL_APIC; j++)
635 set_apicid_to_node(j, NUMA_NO_NODE);
636
Tejun Heoec8cf29b2011-02-16 12:13:07 +0100637 nodes_clear(cpu_nodes_parsed);
638 nodes_clear(mem_nodes_parsed);
Tejun Heoffe77a42011-02-16 12:13:06 +0100639 nodes_clear(node_possible_map);
640 nodes_clear(node_online_map);
641
642 if (numa_init[i]() < 0)
643 continue;
644#ifdef CONFIG_NUMA_EMU
645 setup_physnodes(0, max_pfn << PAGE_SHIFT, i == 0, i == 1);
646 if (cmdline && !numa_emulation(0, max_pfn, i == 0, i == 1))
647 return;
648 setup_physnodes(0, max_pfn << PAGE_SHIFT, i == 0, i == 1);
649 nodes_clear(node_possible_map);
650 nodes_clear(node_online_map);
651#endif
Tejun Heoec8cf29b2011-02-16 12:13:07 +0100652 /* Account for nodes with cpus and no memory */
653 nodes_or(node_possible_map, mem_nodes_parsed, cpu_nodes_parsed);
654 if (WARN_ON(nodes_empty(node_possible_map)))
655 continue;
656
Tejun Heoffe77a42011-02-16 12:13:06 +0100657 if (!scan_nodes[i]())
658 return;
659 }
660 BUG();
Andi Kleen69d81fc2005-11-05 17:25:53 +0100661}
662
Thomas Gleixnere3cfe522008-01-30 13:30:37 +0100663unsigned long __init numa_free_all_bootmem(void)
664{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 unsigned long pages = 0;
Thomas Gleixnere3cfe522008-01-30 13:30:37 +0100666 int i;
667
668 for_each_online_node(i)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 pages += free_all_bootmem_node(NODE_DATA(i));
Thomas Gleixnere3cfe522008-01-30 13:30:37 +0100670
Yinghai Lu08677212010-02-10 01:20:20 -0800671 pages += free_all_memory_core_early(MAX_NUMNODES);
Yinghai Lu08677212010-02-10 01:20:20 -0800672
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 return pages;
Thomas Gleixnere3cfe522008-01-30 13:30:37 +0100674}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675
Tejun Heobbc9e2f2011-01-23 14:37:39 +0100676int __cpuinit numa_cpu_node(int cpu)
Yinghai Lud9c2d5a2009-11-21 00:23:37 -0800677{
Tejun Heobbc9e2f2011-01-23 14:37:39 +0100678 int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
Yinghai Lud9c2d5a2009-11-21 00:23:37 -0800679
Tejun Heobbc9e2f2011-01-23 14:37:39 +0100680 if (apicid != BAD_APICID)
681 return __apicid_to_node[apicid];
682 return NUMA_NO_NODE;
Yinghai Lud9c2d5a2009-11-21 00:23:37 -0800683}
684
Ravikiran Thirumalai05b3cbd2006-01-11 22:45:36 +0100685/*
Tejun Heode2d9442011-01-23 14:37:41 +0100686 * UGLINESS AHEAD: Currently, CONFIG_NUMA_EMU is 64bit only and makes use
687 * of 64bit specific data structures. The distinction is artificial and
688 * should be removed. numa_{add|remove}_cpu() are implemented in numa.c
689 * for both 32 and 64bit when CONFIG_NUMA_EMU is disabled but here when
690 * enabled.
Ravikiran Thirumalai05b3cbd2006-01-11 22:45:36 +0100691 *
Tejun Heode2d9442011-01-23 14:37:41 +0100692 * NUMA emulation is planned to be made generic and the following and other
693 * related code should be moved to numa.c.
Ravikiran Thirumalai05b3cbd2006-01-11 22:45:36 +0100694 */
Tejun Heode2d9442011-01-23 14:37:41 +0100695#ifdef CONFIG_NUMA_EMU
696# ifndef CONFIG_DEBUG_PER_CPU_MAPS
David Rientjesc1c34432010-12-22 17:23:54 -0800697void __cpuinit numa_add_cpu(int cpu)
698{
699 unsigned long addr;
Tejun Heobbc9e2f2011-01-23 14:37:39 +0100700 int physnid, nid;
David Rientjesc1c34432010-12-22 17:23:54 -0800701
Tejun Heobbc9e2f2011-01-23 14:37:39 +0100702 nid = numa_cpu_node(cpu);
David Rientjesc1c34432010-12-22 17:23:54 -0800703 if (nid == NUMA_NO_NODE)
704 nid = early_cpu_to_node(cpu);
705 BUG_ON(nid == NUMA_NO_NODE || !node_online(nid));
706
707 /*
708 * Use the starting address of the emulated node to find which physical
709 * node it is allocated on.
710 */
711 addr = node_start_pfn(nid) << PAGE_SHIFT;
712 for (physnid = 0; physnid < MAX_NUMNODES; physnid++)
713 if (addr >= physnodes[physnid].start &&
714 addr < physnodes[physnid].end)
715 break;
716
717 /*
718 * Map the cpu to each emulated node that is allocated on the physical
719 * node of the cpu's apic id.
720 */
721 for_each_online_node(nid) {
722 addr = node_start_pfn(nid) << PAGE_SHIFT;
723 if (addr >= physnodes[physnid].start &&
724 addr < physnodes[physnid].end)
725 cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
726 }
727}
728
729void __cpuinit numa_remove_cpu(int cpu)
730{
731 int i;
732
733 for_each_online_node(i)
734 cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
735}
Tejun Heode2d9442011-01-23 14:37:41 +0100736# else /* !CONFIG_DEBUG_PER_CPU_MAPS */
David Rientjesd906f0e2010-12-30 10:54:16 -0800737static void __cpuinit numa_set_cpumask(int cpu, int enable)
738{
739 int node = early_cpu_to_node(cpu);
740 struct cpumask *mask;
David Rientjesc1c34432010-12-22 17:23:54 -0800741 int i;
Brian Gerst6470aff2009-01-27 12:56:47 +0900742
David Rientjes14392fd2011-02-07 14:08:53 -0800743 if (node == NUMA_NO_NODE) {
744 /* early_cpu_to_node() already emits a warning and trace */
745 return;
746 }
David Rientjesc1c34432010-12-22 17:23:54 -0800747 for_each_online_node(i) {
748 unsigned long addr;
749
750 addr = node_start_pfn(i) << PAGE_SHIFT;
751 if (addr < physnodes[node].start ||
752 addr >= physnodes[node].end)
753 continue;
David Rientjesd906f0e2010-12-30 10:54:16 -0800754 mask = debug_cpumask_set_cpu(cpu, enable);
755 if (!mask)
David Rientjesc1c34432010-12-22 17:23:54 -0800756 return;
David Rientjesc1c34432010-12-22 17:23:54 -0800757
758 if (enable)
759 cpumask_set_cpu(cpu, mask);
760 else
761 cpumask_clear_cpu(cpu, mask);
Brian Gerst6470aff2009-01-27 12:56:47 +0900762 }
Brian Gerst6470aff2009-01-27 12:56:47 +0900763}
764
765void __cpuinit numa_add_cpu(int cpu)
766{
767 numa_set_cpumask(cpu, 1);
768}
769
770void __cpuinit numa_remove_cpu(int cpu)
771{
772 numa_set_cpumask(cpu, 0);
773}
Tejun Heode2d9442011-01-23 14:37:41 +0100774# endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
775#endif /* CONFIG_NUMA_EMU */