blob: 6da235522269e856fc000807888db8e345891a90 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Generic VM initialization for x86-64 NUMA setups.
3 * Copyright 2002,2003 Andi Kleen, SuSE Labs.
4 */
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/string.h>
8#include <linux/init.h>
9#include <linux/bootmem.h>
10#include <linux/mmzone.h>
11#include <linux/ctype.h>
12#include <linux/module.h>
13#include <linux/nodemask.h>
14
15#include <asm/e820.h>
16#include <asm/proto.h>
17#include <asm/dma.h>
18#include <asm/numa.h>
19#include <asm/acpi.h>
20
21#ifndef Dprintk
22#define Dprintk(x...)
23#endif
24
Ravikiran G Thirumalai6c231b72005-09-06 15:17:45 -070025struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070026bootmem_data_t plat_node_bdata[MAX_NUMNODES];
27
Eric Dumazetdcf36bf2006-03-25 16:31:46 +010028struct memnode memnode;
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Andi Kleen3f098c22005-09-12 18:49:24 +020030unsigned char cpu_to_node[NR_CPUS] __read_mostly = {
31 [0 ... NR_CPUS-1] = NUMA_NO_NODE
Andi Kleen0b07e982005-09-12 18:49:24 +020032};
Andi Kleen3f098c22005-09-12 18:49:24 +020033unsigned char apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
34 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
35};
36cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
38int numa_off __initdata;
Amul Shah076422d22007-02-13 13:26:19 +010039unsigned long __initdata nodemap_addr;
40unsigned long __initdata nodemap_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Eric Dumazet529a3402005-11-05 17:25:54 +010042
43/*
44 * Given a shift value, try to populate memnodemap[]
45 * Returns :
46 * 1 if OK
47 * 0 if memnodmap[] too small (of shift too small)
48 * -1 if node overlap or lost ram (shift too big)
49 */
Andi Kleend18ff472006-01-11 22:44:30 +010050static int __init
Andi Kleenabe059e2006-03-25 16:29:12 +010051populate_memnodemap(const struct bootnode *nodes, int numnodes, int shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -070052{
53 int i;
Eric Dumazet529a3402005-11-05 17:25:54 +010054 int res = -1;
55 unsigned long addr, end;
Keith Manntheyb6846642005-07-28 21:15:38 -070056
Amul Shah076422d22007-02-13 13:26:19 +010057 memset(memnodemap, 0xff, memnodemapsize);
Eric Dumazet529a3402005-11-05 17:25:54 +010058 for (i = 0; i < numnodes; i++) {
59 addr = nodes[i].start;
60 end = nodes[i].end;
61 if (addr >= end)
62 continue;
Amul Shah076422d22007-02-13 13:26:19 +010063 if ((end >> shift) >= memnodemapsize)
Eric Dumazet529a3402005-11-05 17:25:54 +010064 return 0;
65 do {
66 if (memnodemap[addr >> shift] != 0xff)
67 return -1;
68 memnodemap[addr >> shift] = i;
Amul Shah076422d22007-02-13 13:26:19 +010069 addr += (1UL << shift);
Eric Dumazet529a3402005-11-05 17:25:54 +010070 } while (addr < end);
71 res = 1;
72 }
73 return res;
74}
75
Amul Shah076422d22007-02-13 13:26:19 +010076static int __init allocate_cachealigned_memnodemap(void)
77{
78 unsigned long pad, pad_addr;
79
80 memnodemap = memnode.embedded_map;
Amul Shah54413922007-02-13 13:26:20 +010081 if (memnodemapsize <= 48)
Amul Shah076422d22007-02-13 13:26:19 +010082 return 0;
Amul Shah076422d22007-02-13 13:26:19 +010083
84 pad = L1_CACHE_BYTES - 1;
85 pad_addr = 0x8000;
86 nodemap_size = pad + memnodemapsize;
87 nodemap_addr = find_e820_area(pad_addr, end_pfn<<PAGE_SHIFT,
88 nodemap_size);
89 if (nodemap_addr == -1UL) {
90 printk(KERN_ERR
91 "NUMA: Unable to allocate Memory to Node hash map\n");
92 nodemap_addr = nodemap_size = 0;
93 return -1;
94 }
95 pad_addr = (nodemap_addr + pad) & ~pad;
96 memnodemap = phys_to_virt(pad_addr);
97
98 printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
99 nodemap_addr, nodemap_addr + nodemap_size);
100 return 0;
101}
102
103/*
104 * The LSB of all start and end addresses in the node map is the value of the
105 * maximum possible shift.
106 */
107static int __init
108extract_lsb_from_nodes (const struct bootnode *nodes, int numnodes)
109{
Amul Shah54413922007-02-13 13:26:20 +0100110 int i, nodes_used = 0;
Amul Shah076422d22007-02-13 13:26:19 +0100111 unsigned long start, end;
112 unsigned long bitfield = 0, memtop = 0;
113
114 for (i = 0; i < numnodes; i++) {
115 start = nodes[i].start;
116 end = nodes[i].end;
117 if (start >= end)
118 continue;
Amul Shah54413922007-02-13 13:26:20 +0100119 bitfield |= start;
120 nodes_used++;
Amul Shah076422d22007-02-13 13:26:19 +0100121 if (end > memtop)
122 memtop = end;
123 }
Amul Shah54413922007-02-13 13:26:20 +0100124 if (nodes_used <= 1)
125 i = 63;
126 else
127 i = find_first_bit(&bitfield, sizeof(unsigned long)*8);
Amul Shah076422d22007-02-13 13:26:19 +0100128 memnodemapsize = (memtop >> i)+1;
129 return i;
130}
131
Andi Kleenabe059e2006-03-25 16:29:12 +0100132int __init compute_hash_shift(struct bootnode *nodes, int numnodes)
Eric Dumazet529a3402005-11-05 17:25:54 +0100133{
Amul Shah076422d22007-02-13 13:26:19 +0100134 int shift;
Eric Dumazet529a3402005-11-05 17:25:54 +0100135
Amul Shah076422d22007-02-13 13:26:19 +0100136 shift = extract_lsb_from_nodes(nodes, numnodes);
137 if (allocate_cachealigned_memnodemap())
138 return -1;
Andi Kleen6b050f82006-01-11 22:44:33 +0100139 printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
Eric Dumazet529a3402005-11-05 17:25:54 +0100140 shift);
141
142 if (populate_memnodemap(nodes, numnodes, shift) != 1) {
143 printk(KERN_INFO
Keith Manntheyb6846642005-07-28 21:15:38 -0700144 "Your memory is not aligned you need to rebuild your kernel "
Eric Dumazet529a3402005-11-05 17:25:54 +0100145 "with a bigger NODEMAPSIZE shift=%d\n",
146 shift);
147 return -1;
148 }
Keith Manntheyb6846642005-07-28 21:15:38 -0700149 return shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150}
151
Matt Tolentinobbfceef2005-06-23 00:08:07 -0700152#ifdef CONFIG_SPARSEMEM
153int early_pfn_to_nid(unsigned long pfn)
154{
155 return phys_to_nid(pfn << PAGE_SHIFT);
156}
157#endif
158
Andi Kleena8062232006-04-07 19:49:21 +0200159static void * __init
160early_node_mem(int nodeid, unsigned long start, unsigned long end,
161 unsigned long size)
162{
163 unsigned long mem = find_e820_area(start, end, size);
164 void *ptr;
165 if (mem != -1L)
166 return __va(mem);
167 ptr = __alloc_bootmem_nopanic(size,
168 SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS));
169 if (ptr == 0) {
170 printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
171 size, nodeid);
172 return NULL;
173 }
174 return ptr;
175}
176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177/* Initialize bootmem allocator for a node */
178void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
179{
180 unsigned long start_pfn, end_pfn, bootmap_pages, bootmap_size, bootmap_start;
181 unsigned long nodedata_phys;
Andi Kleena8062232006-04-07 19:49:21 +0200182 void *bootmap;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE);
184
185 start = round_up(start, ZONE_ALIGN);
186
Andi Kleen6b050f82006-01-11 22:44:33 +0100187 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
189 start_pfn = start >> PAGE_SHIFT;
190 end_pfn = end >> PAGE_SHIFT;
191
Andi Kleena8062232006-04-07 19:49:21 +0200192 node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size);
193 if (node_data[nodeid] == NULL)
194 return;
195 nodedata_phys = __pa(node_data[nodeid]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t));
198 NODE_DATA(nodeid)->bdata = &plat_node_bdata[nodeid];
199 NODE_DATA(nodeid)->node_start_pfn = start_pfn;
200 NODE_DATA(nodeid)->node_spanned_pages = end_pfn - start_pfn;
201
202 /* Find a place for the bootmem map */
203 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
204 bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE);
Andi Kleena8062232006-04-07 19:49:21 +0200205 bootmap = early_node_mem(nodeid, bootmap_start, end,
206 bootmap_pages<<PAGE_SHIFT);
207 if (bootmap == NULL) {
208 if (nodedata_phys < start || nodedata_phys >= end)
209 free_bootmem((unsigned long)node_data[nodeid],pgdat_size);
210 node_data[nodeid] = NULL;
211 return;
212 }
213 bootmap_start = __pa(bootmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 Dprintk("bootmap start %lu pages %lu\n", bootmap_start, bootmap_pages);
215
216 bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
217 bootmap_start >> PAGE_SHIFT,
218 start_pfn, end_pfn);
219
Mel Gorman5cb248a2006-09-27 01:49:52 -0700220 free_bootmem_with_active_regions(nodeid, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
222 reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size);
223 reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start, bootmap_pages<<PAGE_SHIFT);
Andi Kleen68a3a7f2006-04-07 19:49:18 +0200224#ifdef CONFIG_ACPI_NUMA
225 srat_reserve_add_area(nodeid);
226#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 node_set_online(nodeid);
228}
229
230/* Initialize final allocator for a zone */
231void __init setup_node_zones(int nodeid)
232{
Andi Kleen267b4802006-03-25 16:31:10 +0100233 unsigned long start_pfn, end_pfn, memmapsize, limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234
Andi Kleena2f1b422005-11-05 17:25:53 +0100235 start_pfn = node_start_pfn(nodeid);
236 end_pfn = node_end_pfn(nodeid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
Mel Gorman5cb248a2006-09-27 01:49:52 -0700238 Dprintk(KERN_INFO "Setting up memmap for node %d %lx-%lx\n",
Andi Kleena2f1b422005-11-05 17:25:53 +0100239 nodeid, start_pfn, end_pfn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
Andi Kleen267b4802006-03-25 16:31:10 +0100241 /* Try to allocate mem_map at end to not fill up precious <4GB
242 memory. */
243 memmapsize = sizeof(struct page) * (end_pfn-start_pfn);
244 limit = end_pfn << PAGE_SHIFT;
Andy Whitcroft3b5fd592006-04-22 02:35:41 -0700245#ifdef CONFIG_FLAT_NODE_MEM_MAP
Andi Kleen267b4802006-03-25 16:31:10 +0100246 NODE_DATA(nodeid)->node_mem_map =
247 __alloc_bootmem_core(NODE_DATA(nodeid)->bdata,
248 memmapsize, SMP_CACHE_BYTES,
249 round_down(limit - memmapsize, PAGE_SIZE),
250 limit);
Andy Whitcroft3b5fd592006-04-22 02:35:41 -0700251#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252}
253
254void __init numa_init_array(void)
255{
256 int rr, i;
257 /* There are unfortunately some poorly designed mainboards around
258 that only connect memory to a single CPU. This breaks the 1:1 cpu->node
259 mapping. To avoid this fill in the mapping for all possible
260 CPUs, as the number of CPUs is not known yet.
261 We round robin the existing nodes. */
Ravikiran G Thirumalai85cc5132005-09-30 11:59:22 -0700262 rr = first_node(node_online_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 for (i = 0; i < NR_CPUS; i++) {
264 if (cpu_to_node[i] != NUMA_NO_NODE)
265 continue;
Andi Kleen69d81fc2005-11-05 17:25:53 +0100266 numa_set_node(i, rr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 rr = next_node(rr, node_online_map);
268 if (rr == MAX_NUMNODES)
269 rr = first_node(node_online_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 }
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272}
273
274#ifdef CONFIG_NUMA_EMU
Rohit Seth53fee042007-02-13 13:26:22 +0100275/* Numa emulation */
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200276char *cmdline __initdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
Rohit Seth53fee042007-02-13 13:26:22 +0100278/*
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200279 * Setups up nid to range from addr to addr + size. If the end boundary is
280 * greater than max_addr, then max_addr is used instead. The return value is 0
281 * if there is additional memory left for allocation past addr and -1 otherwise.
282 * addr is adjusted to be at the end of the node.
Rohit Seth53fee042007-02-13 13:26:22 +0100283 */
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200284static int __init setup_node_range(int nid, struct bootnode *nodes, u64 *addr,
285 u64 size, u64 max_addr)
Rohit Seth53fee042007-02-13 13:26:22 +0100286{
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200287 int ret = 0;
288 nodes[nid].start = *addr;
289 *addr += size;
290 if (*addr >= max_addr) {
291 *addr = max_addr;
292 ret = -1;
293 }
294 nodes[nid].end = *addr;
Suresh Siddhae3f1cae2007-05-02 19:27:20 +0200295 node_set(nid, node_possible_map);
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200296 printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid,
297 nodes[nid].start, nodes[nid].end,
298 (nodes[nid].end - nodes[nid].start) >> 20);
299 return ret;
Rohit Seth53fee042007-02-13 13:26:22 +0100300}
301
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200302/*
303 * Splits num_nodes nodes up equally starting at node_start. The return value
304 * is the number of nodes split up and addr is adjusted to be at the end of the
305 * last node allocated.
306 */
307static int __init split_nodes_equally(struct bootnode *nodes, u64 *addr,
308 u64 max_addr, int node_start,
309 int num_nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310{
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200311 unsigned int big;
312 u64 size;
313 int i;
Rohit Seth53fee042007-02-13 13:26:22 +0100314
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200315 if (num_nodes <= 0)
316 return -1;
317 if (num_nodes > MAX_NUMNODES)
318 num_nodes = MAX_NUMNODES;
David Rientjesa7e96622007-07-21 17:11:29 +0200319 size = (max_addr - *addr - e820_hole_size(*addr, max_addr)) /
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200320 num_nodes;
Rohit Seth53fee042007-02-13 13:26:22 +0100321 /*
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200322 * Calculate the number of big nodes that can be allocated as a result
323 * of consolidating the leftovers.
Rohit Seth53fee042007-02-13 13:26:22 +0100324 */
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200325 big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * num_nodes) /
326 FAKE_NODE_MIN_SIZE;
Rohit Seth53fee042007-02-13 13:26:22 +0100327
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200328 /* Round down to nearest FAKE_NODE_MIN_SIZE. */
329 size &= FAKE_NODE_MIN_HASH_MASK;
330 if (!size) {
331 printk(KERN_ERR "Not enough memory for each node. "
332 "NUMA emulation disabled.\n");
333 return -1;
Rohit Seth53fee042007-02-13 13:26:22 +0100334 }
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200335
336 for (i = node_start; i < num_nodes + node_start; i++) {
337 u64 end = *addr + size;
Rohit Seth53fee042007-02-13 13:26:22 +0100338 if (i < big)
339 end += FAKE_NODE_MIN_SIZE;
340 /*
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200341 * The final node can have the remaining system RAM. Other
342 * nodes receive roughly the same amount of available pages.
Rohit Seth53fee042007-02-13 13:26:22 +0100343 */
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200344 if (i == num_nodes + node_start - 1)
Rohit Seth53fee042007-02-13 13:26:22 +0100345 end = max_addr;
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200346 else
David Rientjesa7e96622007-07-21 17:11:29 +0200347 while (end - *addr - e820_hole_size(*addr, end) <
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200348 size) {
349 end += FAKE_NODE_MIN_SIZE;
350 if (end > max_addr) {
351 end = max_addr;
352 break;
353 }
354 }
355 if (setup_node_range(i, nodes, addr, end - *addr, max_addr) < 0)
356 break;
357 }
358 return i - node_start + 1;
359}
360
361/*
David Rientjes382591d2007-05-02 19:27:09 +0200362 * Splits the remaining system RAM into chunks of size. The remaining memory is
363 * always assigned to a final node and can be asymmetric. Returns the number of
364 * nodes split.
365 */
366static int __init split_nodes_by_size(struct bootnode *nodes, u64 *addr,
367 u64 max_addr, int node_start, u64 size)
368{
369 int i = node_start;
370 size = (size << 20) & FAKE_NODE_MIN_HASH_MASK;
371 while (!setup_node_range(i++, nodes, addr, size, max_addr))
372 ;
373 return i - node_start;
374}
375
376/*
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200377 * Sets up the system RAM area from start_pfn to end_pfn according to the
378 * numa=fake command-line option.
379 */
380static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn)
381{
382 struct bootnode nodes[MAX_NUMNODES];
383 u64 addr = start_pfn << PAGE_SHIFT;
384 u64 max_addr = end_pfn << PAGE_SHIFT;
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200385 int num_nodes = 0;
David Rientjes382591d2007-05-02 19:27:09 +0200386 int coeff_flag;
387 int coeff = -1;
388 int num = 0;
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200389 u64 size;
390 int i;
391
392 memset(&nodes, 0, sizeof(nodes));
393 /*
394 * If the numa=fake command-line is just a single number N, split the
395 * system RAM into N fake nodes.
396 */
397 if (!strchr(cmdline, '*') && !strchr(cmdline, ',')) {
398 num_nodes = split_nodes_equally(nodes, &addr, max_addr, 0,
399 simple_strtol(cmdline, NULL, 0));
400 if (num_nodes < 0)
401 return num_nodes;
402 goto out;
403 }
404
405 /* Parse the command line. */
David Rientjes382591d2007-05-02 19:27:09 +0200406 for (coeff_flag = 0; ; cmdline++) {
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200407 if (*cmdline && isdigit(*cmdline)) {
408 num = num * 10 + *cmdline - '0';
409 continue;
410 }
David Rientjes382591d2007-05-02 19:27:09 +0200411 if (*cmdline == '*') {
412 if (num > 0)
413 coeff = num;
414 coeff_flag = 1;
415 }
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200416 if (!*cmdline || *cmdline == ',') {
David Rientjes382591d2007-05-02 19:27:09 +0200417 if (!coeff_flag)
418 coeff = 1;
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200419 /*
420 * Round down to the nearest FAKE_NODE_MIN_SIZE.
421 * Command-line coefficients are in megabytes.
422 */
423 size = ((u64)num << 20) & FAKE_NODE_MIN_HASH_MASK;
David Rientjes382591d2007-05-02 19:27:09 +0200424 if (size)
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200425 for (i = 0; i < coeff; i++, num_nodes++)
426 if (setup_node_range(num_nodes, nodes,
427 &addr, size, max_addr) < 0)
428 goto done;
David Rientjes382591d2007-05-02 19:27:09 +0200429 if (!*cmdline)
430 break;
431 coeff_flag = 0;
432 coeff = -1;
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200433 }
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200434 num = 0;
435 }
436done:
437 if (!num_nodes)
438 return -1;
David Rientjes14694d72007-05-02 19:27:09 +0200439 /* Fill remainder of system RAM, if appropriate. */
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200440 if (addr < max_addr) {
David Rientjes382591d2007-05-02 19:27:09 +0200441 if (coeff_flag && coeff < 0) {
442 /* Split remaining nodes into num-sized chunks */
443 num_nodes += split_nodes_by_size(nodes, &addr, max_addr,
444 num_nodes, num);
445 goto out;
446 }
David Rientjes14694d72007-05-02 19:27:09 +0200447 switch (*(cmdline - 1)) {
448 case '*':
449 /* Split remaining nodes into coeff chunks */
450 if (coeff <= 0)
451 break;
452 num_nodes += split_nodes_equally(nodes, &addr, max_addr,
453 num_nodes, coeff);
454 break;
455 case ',':
456 /* Do not allocate remaining system RAM */
457 break;
458 default:
459 /* Give one final node */
460 setup_node_range(num_nodes, nodes, &addr,
461 max_addr - addr, max_addr);
462 num_nodes++;
463 }
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200464 }
465out:
466 memnode_shift = compute_hash_shift(nodes, num_nodes);
467 if (memnode_shift < 0) {
468 memnode_shift = 0;
469 printk(KERN_ERR "No NUMA hash function found. NUMA emulation "
470 "disabled.\n");
471 return -1;
472 }
473
474 /*
475 * We need to vacate all active ranges that may have been registered by
David Rientjes1c05f092007-07-21 17:11:30 +0200476 * SRAT and set acpi_numa to -1 so that srat_disabled() always returns
477 * true. NUMA emulation has succeeded so we will not scan ACPI nodes.
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200478 */
479 remove_all_active_ranges();
David Rientjes1c05f092007-07-21 17:11:30 +0200480#ifdef CONFIG_ACPI_NUMA
481 acpi_numa = -1;
482#endif
Suresh Siddhae3f1cae2007-05-02 19:27:20 +0200483 for_each_node_mask(i, node_possible_map) {
Mel Gorman5cb248a2006-09-27 01:49:52 -0700484 e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
485 nodes[i].end >> PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
Mel Gorman5cb248a2006-09-27 01:49:52 -0700487 }
David Rientjes3484d792007-07-21 17:10:32 +0200488 acpi_fake_nodes(nodes, num_nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 numa_init_array();
490 return 0;
491}
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200492#endif /* CONFIG_NUMA_EMU */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493
494void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
495{
496 int i;
497
Suresh Siddhae3f1cae2007-05-02 19:27:20 +0200498 nodes_clear(node_possible_map);
499
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500#ifdef CONFIG_NUMA_EMU
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200501 if (cmdline && !numa_emulation(start_pfn, end_pfn))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 return;
Suresh Siddhae3f1cae2007-05-02 19:27:20 +0200503 nodes_clear(node_possible_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504#endif
505
506#ifdef CONFIG_ACPI_NUMA
507 if (!numa_off && !acpi_scan_nodes(start_pfn << PAGE_SHIFT,
508 end_pfn << PAGE_SHIFT))
509 return;
Suresh Siddhae3f1cae2007-05-02 19:27:20 +0200510 nodes_clear(node_possible_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511#endif
512
513#ifdef CONFIG_K8_NUMA
514 if (!numa_off && !k8_scan_nodes(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT))
515 return;
Suresh Siddhae3f1cae2007-05-02 19:27:20 +0200516 nodes_clear(node_possible_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517#endif
518 printk(KERN_INFO "%s\n",
519 numa_off ? "NUMA turned off" : "No NUMA configuration found");
520
521 printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
522 start_pfn << PAGE_SHIFT,
523 end_pfn << PAGE_SHIFT);
524 /* setup dummy node covering all memory */
525 memnode_shift = 63;
Amul Shah076422d22007-02-13 13:26:19 +0100526 memnodemap = memnode.embedded_map;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 memnodemap[0] = 0;
528 nodes_clear(node_online_map);
529 node_set_online(0);
Suresh Siddhae3f1cae2007-05-02 19:27:20 +0200530 node_set(0, node_possible_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 for (i = 0; i < NR_CPUS; i++)
Andi Kleen69d81fc2005-11-05 17:25:53 +0100532 numa_set_node(i, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 node_to_cpumask[0] = cpumask_of_cpu(0);
Mel Gorman5cb248a2006-09-27 01:49:52 -0700534 e820_register_active_regions(0, start_pfn, end_pfn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 setup_node_bootmem(0, start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);
536}
537
Ashok Raje6982c62005-06-25 14:54:58 -0700538__cpuinit void numa_add_cpu(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539{
Ravikiran G Thirumalaie6a045a2005-09-30 11:59:21 -0700540 set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541}
542
Andi Kleen69d81fc2005-11-05 17:25:53 +0100543void __cpuinit numa_set_node(int cpu, int node)
544{
Ravikiran G Thirumalaidf79efd2006-01-11 22:45:39 +0100545 cpu_pda(cpu)->nodenumber = node;
Andi Kleen69d81fc2005-11-05 17:25:53 +0100546 cpu_to_node[cpu] = node;
547}
548
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549unsigned long __init numa_free_all_bootmem(void)
550{
551 int i;
552 unsigned long pages = 0;
553 for_each_online_node(i) {
554 pages += free_all_bootmem_node(NODE_DATA(i));
555 }
556 return pages;
557}
558
559void __init paging_init(void)
560{
561 int i;
Mel Gorman6391af12006-10-11 01:20:39 -0700562 unsigned long max_zone_pfns[MAX_NR_ZONES];
563 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
564 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
565 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
566 max_zone_pfns[ZONE_NORMAL] = end_pfn;
Bob Piccod3ee8712005-11-05 17:25:54 +0100567
Bob Piccof0a5a582007-02-13 13:26:25 +0100568 sparse_memory_present_with_active_regions(MAX_NUMNODES);
569 sparse_init();
Bob Piccod3ee8712005-11-05 17:25:54 +0100570
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 for_each_online_node(i) {
572 setup_node_zones(i);
573 }
Mel Gorman5cb248a2006-09-27 01:49:52 -0700574
575 free_area_init_nodes(max_zone_pfns);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576}
577
Andi Kleen2c8c0e62006-09-26 10:52:32 +0200578static __init int numa_setup(char *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579{
Andi Kleen2c8c0e62006-09-26 10:52:32 +0200580 if (!opt)
581 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 if (!strncmp(opt,"off",3))
583 numa_off = 1;
584#ifdef CONFIG_NUMA_EMU
David Rientjes8b8ca80e2007-05-02 19:27:09 +0200585 if (!strncmp(opt, "fake=", 5))
586 cmdline = opt + 5;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587#endif
588#ifdef CONFIG_ACPI_NUMA
589 if (!strncmp(opt,"noacpi",6))
590 acpi_numa = -1;
Andi Kleen68a3a7f2006-04-07 19:49:18 +0200591 if (!strncmp(opt,"hotadd=", 7))
592 hotadd_percent = simple_strtoul(opt+7, NULL, 10);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593#endif
Andi Kleen2c8c0e62006-09-26 10:52:32 +0200594 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595}
596
Andi Kleen2c8c0e62006-09-26 10:52:32 +0200597early_param("numa", numa_setup);
598
Ravikiran Thirumalai05b3cbd2006-01-11 22:45:36 +0100599/*
600 * Setup early cpu_to_node.
601 *
602 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
603 * and apicid_to_node[] tables have valid entries for a CPU.
604 * This means we skip cpu_to_node[] initialisation for NUMA
605 * emulation and faking node case (when running a kernel compiled
606 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
607 * is already initialized in a round robin manner at numa_init_array,
608 * prior to this call, and this initialization is good enough
609 * for the fake NUMA cases.
610 */
611void __init init_cpu_to_node(void)
612{
613 int i;
614 for (i = 0; i < NR_CPUS; i++) {
615 u8 apicid = x86_cpu_to_apicid[i];
616 if (apicid == BAD_APICID)
617 continue;
618 if (apicid_to_node[apicid] == NUMA_NO_NODE)
619 continue;
Daniel Yeisleyd1db4ec2006-02-15 15:17:41 -0800620 numa_set_node(i,apicid_to_node[apicid]);
Ravikiran Thirumalai05b3cbd2006-01-11 22:45:36 +0100621 }
622}
623
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624EXPORT_SYMBOL(cpu_to_node);
625EXPORT_SYMBOL(node_to_cpumask);
Eric Dumazetdcf36bf2006-03-25 16:31:46 +0100626EXPORT_SYMBOL(memnode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627EXPORT_SYMBOL(node_data);
Andi Kleencf050132006-01-11 22:46:27 +0100628
629#ifdef CONFIG_DISCONTIGMEM
630/*
631 * Functions to convert PFNs from/to per node page addresses.
632 * These are out of line because they are quite big.
633 * They could be all tuned by pre caching more state.
634 * Should do that.
635 */
636
Andi Kleencf050132006-01-11 22:46:27 +0100637int pfn_valid(unsigned long pfn)
638{
639 unsigned nid;
640 if (pfn >= num_physpages)
641 return 0;
642 nid = pfn_to_nid(pfn);
643 if (nid == 0xff)
644 return 0;
645 return pfn >= node_start_pfn(nid) && (pfn) < node_end_pfn(nid);
646}
647EXPORT_SYMBOL(pfn_valid);
648#endif