blob: 63c72641b73740d13e2842d5d9c9f36db6b5ed07 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Generic VM initialization for x86-64 NUMA setups.
3 * Copyright 2002,2003 Andi Kleen, SuSE Labs.
4 */
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/string.h>
8#include <linux/init.h>
9#include <linux/bootmem.h>
10#include <linux/mmzone.h>
11#include <linux/ctype.h>
12#include <linux/module.h>
13#include <linux/nodemask.h>
14
15#include <asm/e820.h>
16#include <asm/proto.h>
17#include <asm/dma.h>
18#include <asm/numa.h>
19#include <asm/acpi.h>
20
21#ifndef Dprintk
22#define Dprintk(x...)
23#endif
24
Ravikiran G Thirumalai6c231b72005-09-06 15:17:45 -070025struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070026bootmem_data_t plat_node_bdata[MAX_NUMNODES];
27
Eric Dumazetdcf36bf2006-03-25 16:31:46 +010028struct memnode memnode;
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Andi Kleen3f098c22005-09-12 18:49:24 +020030unsigned char cpu_to_node[NR_CPUS] __read_mostly = {
31 [0 ... NR_CPUS-1] = NUMA_NO_NODE
Andi Kleen0b07e982005-09-12 18:49:24 +020032};
Andi Kleen3f098c22005-09-12 18:49:24 +020033unsigned char apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
34 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
35};
36cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
38int numa_off __initdata;
39
Eric Dumazet529a3402005-11-05 17:25:54 +010040
41/*
42 * Given a shift value, try to populate memnodemap[]
43 * Returns :
44 * 1 if OK
45 * 0 if memnodmap[] too small (of shift too small)
46 * -1 if node overlap or lost ram (shift too big)
47 */
Andi Kleend18ff472006-01-11 22:44:30 +010048static int __init
Andi Kleenabe059e2006-03-25 16:29:12 +010049populate_memnodemap(const struct bootnode *nodes, int numnodes, int shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -070050{
51 int i;
Eric Dumazet529a3402005-11-05 17:25:54 +010052 int res = -1;
53 unsigned long addr, end;
Keith Manntheyb6846642005-07-28 21:15:38 -070054
Eric Dumazet8309cf62005-12-12 22:17:14 -080055 if (shift >= 64)
56 return -1;
Eric Dumazet529a3402005-11-05 17:25:54 +010057 memset(memnodemap, 0xff, sizeof(memnodemap));
58 for (i = 0; i < numnodes; i++) {
59 addr = nodes[i].start;
60 end = nodes[i].end;
61 if (addr >= end)
62 continue;
63 if ((end >> shift) >= NODEMAPSIZE)
64 return 0;
65 do {
66 if (memnodemap[addr >> shift] != 0xff)
67 return -1;
68 memnodemap[addr >> shift] = i;
Eric Dumazet8309cf62005-12-12 22:17:14 -080069 addr += (1UL << shift);
Eric Dumazet529a3402005-11-05 17:25:54 +010070 } while (addr < end);
71 res = 1;
72 }
73 return res;
74}
75
Andi Kleenabe059e2006-03-25 16:29:12 +010076int __init compute_hash_shift(struct bootnode *nodes, int numnodes)
Eric Dumazet529a3402005-11-05 17:25:54 +010077{
78 int shift = 20;
79
80 while (populate_memnodemap(nodes, numnodes, shift + 1) >= 0)
Keith Manntheyb6846642005-07-28 21:15:38 -070081 shift++;
82
Andi Kleen6b050f82006-01-11 22:44:33 +010083 printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
Eric Dumazet529a3402005-11-05 17:25:54 +010084 shift);
85
86 if (populate_memnodemap(nodes, numnodes, shift) != 1) {
87 printk(KERN_INFO
Keith Manntheyb6846642005-07-28 21:15:38 -070088 "Your memory is not aligned you need to rebuild your kernel "
Eric Dumazet529a3402005-11-05 17:25:54 +010089 "with a bigger NODEMAPSIZE shift=%d\n",
90 shift);
91 return -1;
92 }
Keith Manntheyb6846642005-07-28 21:15:38 -070093 return shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -070094}
95
Matt Tolentinobbfceef2005-06-23 00:08:07 -070096#ifdef CONFIG_SPARSEMEM
97int early_pfn_to_nid(unsigned long pfn)
98{
99 return phys_to_nid(pfn << PAGE_SHIFT);
100}
101#endif
102
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103/* Initialize bootmem allocator for a node */
104void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
105{
106 unsigned long start_pfn, end_pfn, bootmap_pages, bootmap_size, bootmap_start;
107 unsigned long nodedata_phys;
108 const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE);
109
110 start = round_up(start, ZONE_ALIGN);
111
Andi Kleen6b050f82006-01-11 22:44:33 +0100112 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
114 start_pfn = start >> PAGE_SHIFT;
115 end_pfn = end >> PAGE_SHIFT;
116
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 nodedata_phys = find_e820_area(start, end, pgdat_size);
118 if (nodedata_phys == -1L)
119 panic("Cannot find memory pgdat in node %d\n", nodeid);
120
121 Dprintk("nodedata_phys %lx\n", nodedata_phys);
122
123 node_data[nodeid] = phys_to_virt(nodedata_phys);
124 memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t));
125 NODE_DATA(nodeid)->bdata = &plat_node_bdata[nodeid];
126 NODE_DATA(nodeid)->node_start_pfn = start_pfn;
127 NODE_DATA(nodeid)->node_spanned_pages = end_pfn - start_pfn;
128
129 /* Find a place for the bootmem map */
130 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
131 bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE);
132 bootmap_start = find_e820_area(bootmap_start, end, bootmap_pages<<PAGE_SHIFT);
133 if (bootmap_start == -1L)
134 panic("Not enough continuous space for bootmap on node %d", nodeid);
135 Dprintk("bootmap start %lu pages %lu\n", bootmap_start, bootmap_pages);
136
137 bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
138 bootmap_start >> PAGE_SHIFT,
139 start_pfn, end_pfn);
140
141 e820_bootmem_free(NODE_DATA(nodeid), start, end);
142
143 reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size);
144 reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start, bootmap_pages<<PAGE_SHIFT);
145 node_set_online(nodeid);
146}
147
148/* Initialize final allocator for a zone */
149void __init setup_node_zones(int nodeid)
150{
Andi Kleen267b4802006-03-25 16:31:10 +0100151 unsigned long start_pfn, end_pfn, memmapsize, limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 unsigned long zones[MAX_NR_ZONES];
Andi Kleen485761b2005-08-26 18:34:10 -0700153 unsigned long holes[MAX_NR_ZONES];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154
Andi Kleena2f1b422005-11-05 17:25:53 +0100155 start_pfn = node_start_pfn(nodeid);
156 end_pfn = node_end_pfn(nodeid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Andi Kleen6b050f82006-01-11 22:44:33 +0100158 Dprintk(KERN_INFO "Setting up node %d %lx-%lx\n",
Andi Kleena2f1b422005-11-05 17:25:53 +0100159 nodeid, start_pfn, end_pfn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
Andi Kleen267b4802006-03-25 16:31:10 +0100161 /* Try to allocate mem_map at end to not fill up precious <4GB
162 memory. */
163 memmapsize = sizeof(struct page) * (end_pfn-start_pfn);
164 limit = end_pfn << PAGE_SHIFT;
165 NODE_DATA(nodeid)->node_mem_map =
166 __alloc_bootmem_core(NODE_DATA(nodeid)->bdata,
167 memmapsize, SMP_CACHE_BYTES,
168 round_down(limit - memmapsize, PAGE_SIZE),
169 limit);
170
Andi Kleena2f1b422005-11-05 17:25:53 +0100171 size_zones(zones, holes, start_pfn, end_pfn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 free_area_init_node(nodeid, NODE_DATA(nodeid), zones,
Andi Kleen485761b2005-08-26 18:34:10 -0700173 start_pfn, holes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174}
175
176void __init numa_init_array(void)
177{
178 int rr, i;
179 /* There are unfortunately some poorly designed mainboards around
180 that only connect memory to a single CPU. This breaks the 1:1 cpu->node
181 mapping. To avoid this fill in the mapping for all possible
182 CPUs, as the number of CPUs is not known yet.
183 We round robin the existing nodes. */
Ravikiran G Thirumalai85cc5132005-09-30 11:59:22 -0700184 rr = first_node(node_online_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 for (i = 0; i < NR_CPUS; i++) {
186 if (cpu_to_node[i] != NUMA_NO_NODE)
187 continue;
Andi Kleen69d81fc2005-11-05 17:25:53 +0100188 numa_set_node(i, rr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 rr = next_node(rr, node_online_map);
190 if (rr == MAX_NUMNODES)
191 rr = first_node(node_online_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 }
193
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194}
195
196#ifdef CONFIG_NUMA_EMU
197int numa_fake __initdata = 0;
198
199/* Numa emulation */
200static int numa_emulation(unsigned long start_pfn, unsigned long end_pfn)
201{
202 int i;
Andi Kleenabe059e2006-03-25 16:29:12 +0100203 struct bootnode nodes[MAX_NUMNODES];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 unsigned long sz = ((end_pfn - start_pfn)<<PAGE_SHIFT) / numa_fake;
205
206 /* Kludge needed for the hash function */
207 if (hweight64(sz) > 1) {
208 unsigned long x = 1;
209 while ((x << 1) < sz)
210 x <<= 1;
211 if (x < sz/2)
Andi Kleen6b050f82006-01-11 22:44:33 +0100212 printk(KERN_ERR "Numa emulation unbalanced. Complain to maintainer\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 sz = x;
214 }
215
216 memset(&nodes,0,sizeof(nodes));
217 for (i = 0; i < numa_fake; i++) {
218 nodes[i].start = (start_pfn<<PAGE_SHIFT) + i*sz;
219 if (i == numa_fake-1)
220 sz = (end_pfn<<PAGE_SHIFT) - nodes[i].start;
221 nodes[i].end = nodes[i].start + sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n",
223 i,
224 nodes[i].start, nodes[i].end,
225 (nodes[i].end - nodes[i].start) >> 20);
226 node_set_online(i);
227 }
228 memnode_shift = compute_hash_shift(nodes, numa_fake);
229 if (memnode_shift < 0) {
230 memnode_shift = 0;
231 printk(KERN_ERR "No NUMA hash function found. Emulation disabled.\n");
232 return -1;
233 }
234 for_each_online_node(i)
235 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
236 numa_init_array();
237 return 0;
238}
239#endif
240
241void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
242{
243 int i;
244
245#ifdef CONFIG_NUMA_EMU
246 if (numa_fake && !numa_emulation(start_pfn, end_pfn))
247 return;
248#endif
249
250#ifdef CONFIG_ACPI_NUMA
251 if (!numa_off && !acpi_scan_nodes(start_pfn << PAGE_SHIFT,
252 end_pfn << PAGE_SHIFT))
253 return;
254#endif
255
256#ifdef CONFIG_K8_NUMA
257 if (!numa_off && !k8_scan_nodes(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT))
258 return;
259#endif
260 printk(KERN_INFO "%s\n",
261 numa_off ? "NUMA turned off" : "No NUMA configuration found");
262
263 printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
264 start_pfn << PAGE_SHIFT,
265 end_pfn << PAGE_SHIFT);
266 /* setup dummy node covering all memory */
267 memnode_shift = 63;
268 memnodemap[0] = 0;
269 nodes_clear(node_online_map);
270 node_set_online(0);
271 for (i = 0; i < NR_CPUS; i++)
Andi Kleen69d81fc2005-11-05 17:25:53 +0100272 numa_set_node(i, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 node_to_cpumask[0] = cpumask_of_cpu(0);
274 setup_node_bootmem(0, start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);
275}
276
Ashok Raje6982c62005-06-25 14:54:58 -0700277__cpuinit void numa_add_cpu(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278{
Ravikiran G Thirumalaie6a045a2005-09-30 11:59:21 -0700279 set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280}
281
Andi Kleen69d81fc2005-11-05 17:25:53 +0100282void __cpuinit numa_set_node(int cpu, int node)
283{
Ravikiran G Thirumalaidf79efd2006-01-11 22:45:39 +0100284 cpu_pda(cpu)->nodenumber = node;
Andi Kleen69d81fc2005-11-05 17:25:53 +0100285 cpu_to_node[cpu] = node;
286}
287
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288unsigned long __init numa_free_all_bootmem(void)
289{
290 int i;
291 unsigned long pages = 0;
292 for_each_online_node(i) {
293 pages += free_all_bootmem_node(NODE_DATA(i));
294 }
295 return pages;
296}
297
Bob Piccod3ee8712005-11-05 17:25:54 +0100298#ifdef CONFIG_SPARSEMEM
299static void __init arch_sparse_init(void)
300{
301 int i;
302
303 for_each_online_node(i)
304 memory_present(i, node_start_pfn(i), node_end_pfn(i));
305
306 sparse_init();
307}
308#else
309#define arch_sparse_init() do {} while (0)
310#endif
311
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312void __init paging_init(void)
313{
314 int i;
Bob Piccod3ee8712005-11-05 17:25:54 +0100315
316 arch_sparse_init();
317
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 for_each_online_node(i) {
319 setup_node_zones(i);
320 }
321}
322
323/* [numa=off] */
324__init int numa_setup(char *opt)
325{
326 if (!strncmp(opt,"off",3))
327 numa_off = 1;
328#ifdef CONFIG_NUMA_EMU
329 if(!strncmp(opt, "fake=", 5)) {
330 numa_fake = simple_strtoul(opt+5,NULL,0); ;
331 if (numa_fake >= MAX_NUMNODES)
332 numa_fake = MAX_NUMNODES;
333 }
334#endif
335#ifdef CONFIG_ACPI_NUMA
336 if (!strncmp(opt,"noacpi",6))
337 acpi_numa = -1;
338#endif
339 return 1;
340}
341
Ravikiran Thirumalai05b3cbd2006-01-11 22:45:36 +0100342/*
343 * Setup early cpu_to_node.
344 *
345 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
346 * and apicid_to_node[] tables have valid entries for a CPU.
347 * This means we skip cpu_to_node[] initialisation for NUMA
348 * emulation and faking node case (when running a kernel compiled
349 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
350 * is already initialized in a round robin manner at numa_init_array,
351 * prior to this call, and this initialization is good enough
352 * for the fake NUMA cases.
353 */
354void __init init_cpu_to_node(void)
355{
356 int i;
357 for (i = 0; i < NR_CPUS; i++) {
358 u8 apicid = x86_cpu_to_apicid[i];
359 if (apicid == BAD_APICID)
360 continue;
361 if (apicid_to_node[apicid] == NUMA_NO_NODE)
362 continue;
Daniel Yeisleyd1db4ec2006-02-15 15:17:41 -0800363 numa_set_node(i,apicid_to_node[apicid]);
Ravikiran Thirumalai05b3cbd2006-01-11 22:45:36 +0100364 }
365}
366
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367EXPORT_SYMBOL(cpu_to_node);
368EXPORT_SYMBOL(node_to_cpumask);
Eric Dumazetdcf36bf2006-03-25 16:31:46 +0100369EXPORT_SYMBOL(memnode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370EXPORT_SYMBOL(node_data);
Andi Kleencf050132006-01-11 22:46:27 +0100371
372#ifdef CONFIG_DISCONTIGMEM
373/*
374 * Functions to convert PFNs from/to per node page addresses.
375 * These are out of line because they are quite big.
376 * They could be all tuned by pre caching more state.
377 * Should do that.
378 */
379
380/* Requires pfn_valid(pfn) to be true */
381struct page *pfn_to_page(unsigned long pfn)
382{
383 int nid = phys_to_nid(((unsigned long)(pfn)) << PAGE_SHIFT);
384 return (pfn - node_start_pfn(nid)) + NODE_DATA(nid)->node_mem_map;
385}
386EXPORT_SYMBOL(pfn_to_page);
387
388unsigned long page_to_pfn(struct page *page)
389{
390 return (long)(((page) - page_zone(page)->zone_mem_map) +
391 page_zone(page)->zone_start_pfn);
392}
393EXPORT_SYMBOL(page_to_pfn);
394
395int pfn_valid(unsigned long pfn)
396{
397 unsigned nid;
398 if (pfn >= num_physpages)
399 return 0;
400 nid = pfn_to_nid(pfn);
401 if (nid == 0xff)
402 return 0;
403 return pfn >= node_start_pfn(nid) && (pfn) < node_end_pfn(nid);
404}
405EXPORT_SYMBOL(pfn_valid);
406#endif