blob: 7d9c428f409438ed4d2d80c15e4393bcb7027586 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Generic VM initialization for x86-64 NUMA setups.
3 * Copyright 2002,2003 Andi Kleen, SuSE Labs.
4 */
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/string.h>
8#include <linux/init.h>
9#include <linux/bootmem.h>
10#include <linux/mmzone.h>
11#include <linux/ctype.h>
12#include <linux/module.h>
13#include <linux/nodemask.h>
14
15#include <asm/e820.h>
16#include <asm/proto.h>
17#include <asm/dma.h>
18#include <asm/numa.h>
19#include <asm/acpi.h>
20
21#ifndef Dprintk
22#define Dprintk(x...)
23#endif
24
Ravikiran G Thirumalai6c231b72005-09-06 15:17:45 -070025struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070026bootmem_data_t plat_node_bdata[MAX_NUMNODES];
27
Eric Dumazetdcf36bf2006-03-25 16:31:46 +010028struct memnode memnode;
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Andi Kleen3f098c22005-09-12 18:49:24 +020030unsigned char cpu_to_node[NR_CPUS] __read_mostly = {
31 [0 ... NR_CPUS-1] = NUMA_NO_NODE
Andi Kleen0b07e982005-09-12 18:49:24 +020032};
Andi Kleen3f098c22005-09-12 18:49:24 +020033unsigned char apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
34 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
35};
36cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
38int numa_off __initdata;
Amul Shah076422d22007-02-13 13:26:19 +010039unsigned long __initdata nodemap_addr;
40unsigned long __initdata nodemap_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Eric Dumazet529a3402005-11-05 17:25:54 +010042
43/*
44 * Given a shift value, try to populate memnodemap[]
45 * Returns :
46 * 1 if OK
47 * 0 if memnodmap[] too small (of shift too small)
48 * -1 if node overlap or lost ram (shift too big)
49 */
Andi Kleend18ff472006-01-11 22:44:30 +010050static int __init
Andi Kleenabe059e2006-03-25 16:29:12 +010051populate_memnodemap(const struct bootnode *nodes, int numnodes, int shift)
Linus Torvalds1da177e2005-04-16 15:20:36 -070052{
53 int i;
Eric Dumazet529a3402005-11-05 17:25:54 +010054 int res = -1;
55 unsigned long addr, end;
Keith Manntheyb6846642005-07-28 21:15:38 -070056
Amul Shah076422d22007-02-13 13:26:19 +010057 memset(memnodemap, 0xff, memnodemapsize);
Eric Dumazet529a3402005-11-05 17:25:54 +010058 for (i = 0; i < numnodes; i++) {
59 addr = nodes[i].start;
60 end = nodes[i].end;
61 if (addr >= end)
62 continue;
Amul Shah076422d22007-02-13 13:26:19 +010063 if ((end >> shift) >= memnodemapsize)
Eric Dumazet529a3402005-11-05 17:25:54 +010064 return 0;
65 do {
66 if (memnodemap[addr >> shift] != 0xff)
67 return -1;
68 memnodemap[addr >> shift] = i;
Amul Shah076422d22007-02-13 13:26:19 +010069 addr += (1UL << shift);
Eric Dumazet529a3402005-11-05 17:25:54 +010070 } while (addr < end);
71 res = 1;
72 }
73 return res;
74}
75
Amul Shah076422d22007-02-13 13:26:19 +010076static int __init allocate_cachealigned_memnodemap(void)
77{
78 unsigned long pad, pad_addr;
79
80 memnodemap = memnode.embedded_map;
81 if (memnodemapsize <= 48) {
82 printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
83 nodemap_addr, nodemap_addr + nodemap_size);
84 return 0;
85 }
86
87 pad = L1_CACHE_BYTES - 1;
88 pad_addr = 0x8000;
89 nodemap_size = pad + memnodemapsize;
90 nodemap_addr = find_e820_area(pad_addr, end_pfn<<PAGE_SHIFT,
91 nodemap_size);
92 if (nodemap_addr == -1UL) {
93 printk(KERN_ERR
94 "NUMA: Unable to allocate Memory to Node hash map\n");
95 nodemap_addr = nodemap_size = 0;
96 return -1;
97 }
98 pad_addr = (nodemap_addr + pad) & ~pad;
99 memnodemap = phys_to_virt(pad_addr);
100
101 printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
102 nodemap_addr, nodemap_addr + nodemap_size);
103 return 0;
104}
105
106/*
107 * The LSB of all start and end addresses in the node map is the value of the
108 * maximum possible shift.
109 */
110static int __init
111extract_lsb_from_nodes (const struct bootnode *nodes, int numnodes)
112{
113 int i;
114 unsigned long start, end;
115 unsigned long bitfield = 0, memtop = 0;
116
117 for (i = 0; i < numnodes; i++) {
118 start = nodes[i].start;
119 end = nodes[i].end;
120 if (start >= end)
121 continue;
122 bitfield |= start | end;
123 if (end > memtop)
124 memtop = end;
125 }
126 i = find_first_bit(&bitfield, sizeof(unsigned long)*8);
127 memnodemapsize = (memtop >> i)+1;
128 return i;
129}
130
Andi Kleenabe059e2006-03-25 16:29:12 +0100131int __init compute_hash_shift(struct bootnode *nodes, int numnodes)
Eric Dumazet529a3402005-11-05 17:25:54 +0100132{
Amul Shah076422d22007-02-13 13:26:19 +0100133 int shift;
Eric Dumazet529a3402005-11-05 17:25:54 +0100134
Amul Shah076422d22007-02-13 13:26:19 +0100135 shift = extract_lsb_from_nodes(nodes, numnodes);
136 if (allocate_cachealigned_memnodemap())
137 return -1;
Andi Kleen6b050f82006-01-11 22:44:33 +0100138 printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
Eric Dumazet529a3402005-11-05 17:25:54 +0100139 shift);
140
141 if (populate_memnodemap(nodes, numnodes, shift) != 1) {
142 printk(KERN_INFO
Keith Manntheyb6846642005-07-28 21:15:38 -0700143 "Your memory is not aligned you need to rebuild your kernel "
Eric Dumazet529a3402005-11-05 17:25:54 +0100144 "with a bigger NODEMAPSIZE shift=%d\n",
145 shift);
146 return -1;
147 }
Keith Manntheyb6846642005-07-28 21:15:38 -0700148 return shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149}
150
Matt Tolentinobbfceef2005-06-23 00:08:07 -0700151#ifdef CONFIG_SPARSEMEM
152int early_pfn_to_nid(unsigned long pfn)
153{
154 return phys_to_nid(pfn << PAGE_SHIFT);
155}
156#endif
157
Andi Kleena8062232006-04-07 19:49:21 +0200158static void * __init
159early_node_mem(int nodeid, unsigned long start, unsigned long end,
160 unsigned long size)
161{
162 unsigned long mem = find_e820_area(start, end, size);
163 void *ptr;
164 if (mem != -1L)
165 return __va(mem);
166 ptr = __alloc_bootmem_nopanic(size,
167 SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS));
168 if (ptr == 0) {
169 printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
170 size, nodeid);
171 return NULL;
172 }
173 return ptr;
174}
175
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176/* Initialize bootmem allocator for a node */
177void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
178{
179 unsigned long start_pfn, end_pfn, bootmap_pages, bootmap_size, bootmap_start;
180 unsigned long nodedata_phys;
Andi Kleena8062232006-04-07 19:49:21 +0200181 void *bootmap;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE);
183
184 start = round_up(start, ZONE_ALIGN);
185
Andi Kleen6b050f82006-01-11 22:44:33 +0100186 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187
188 start_pfn = start >> PAGE_SHIFT;
189 end_pfn = end >> PAGE_SHIFT;
190
Andi Kleena8062232006-04-07 19:49:21 +0200191 node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size);
192 if (node_data[nodeid] == NULL)
193 return;
194 nodedata_phys = __pa(node_data[nodeid]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t));
197 NODE_DATA(nodeid)->bdata = &plat_node_bdata[nodeid];
198 NODE_DATA(nodeid)->node_start_pfn = start_pfn;
199 NODE_DATA(nodeid)->node_spanned_pages = end_pfn - start_pfn;
200
201 /* Find a place for the bootmem map */
202 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
203 bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE);
Andi Kleena8062232006-04-07 19:49:21 +0200204 bootmap = early_node_mem(nodeid, bootmap_start, end,
205 bootmap_pages<<PAGE_SHIFT);
206 if (bootmap == NULL) {
207 if (nodedata_phys < start || nodedata_phys >= end)
208 free_bootmem((unsigned long)node_data[nodeid],pgdat_size);
209 node_data[nodeid] = NULL;
210 return;
211 }
212 bootmap_start = __pa(bootmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 Dprintk("bootmap start %lu pages %lu\n", bootmap_start, bootmap_pages);
214
215 bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
216 bootmap_start >> PAGE_SHIFT,
217 start_pfn, end_pfn);
218
Mel Gorman5cb248a2006-09-27 01:49:52 -0700219 free_bootmem_with_active_regions(nodeid, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
221 reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size);
222 reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start, bootmap_pages<<PAGE_SHIFT);
Andi Kleen68a3a7f2006-04-07 19:49:18 +0200223#ifdef CONFIG_ACPI_NUMA
224 srat_reserve_add_area(nodeid);
225#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 node_set_online(nodeid);
227}
228
229/* Initialize final allocator for a zone */
230void __init setup_node_zones(int nodeid)
231{
Andi Kleen267b4802006-03-25 16:31:10 +0100232 unsigned long start_pfn, end_pfn, memmapsize, limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233
Andi Kleena2f1b422005-11-05 17:25:53 +0100234 start_pfn = node_start_pfn(nodeid);
235 end_pfn = node_end_pfn(nodeid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236
Mel Gorman5cb248a2006-09-27 01:49:52 -0700237 Dprintk(KERN_INFO "Setting up memmap for node %d %lx-%lx\n",
Andi Kleena2f1b422005-11-05 17:25:53 +0100238 nodeid, start_pfn, end_pfn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
Andi Kleen267b4802006-03-25 16:31:10 +0100240 /* Try to allocate mem_map at end to not fill up precious <4GB
241 memory. */
242 memmapsize = sizeof(struct page) * (end_pfn-start_pfn);
243 limit = end_pfn << PAGE_SHIFT;
Andy Whitcroft3b5fd592006-04-22 02:35:41 -0700244#ifdef CONFIG_FLAT_NODE_MEM_MAP
Andi Kleen267b4802006-03-25 16:31:10 +0100245 NODE_DATA(nodeid)->node_mem_map =
246 __alloc_bootmem_core(NODE_DATA(nodeid)->bdata,
247 memmapsize, SMP_CACHE_BYTES,
248 round_down(limit - memmapsize, PAGE_SIZE),
249 limit);
Andy Whitcroft3b5fd592006-04-22 02:35:41 -0700250#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251}
252
253void __init numa_init_array(void)
254{
255 int rr, i;
256 /* There are unfortunately some poorly designed mainboards around
257 that only connect memory to a single CPU. This breaks the 1:1 cpu->node
258 mapping. To avoid this fill in the mapping for all possible
259 CPUs, as the number of CPUs is not known yet.
260 We round robin the existing nodes. */
Ravikiran G Thirumalai85cc5132005-09-30 11:59:22 -0700261 rr = first_node(node_online_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 for (i = 0; i < NR_CPUS; i++) {
263 if (cpu_to_node[i] != NUMA_NO_NODE)
264 continue;
Andi Kleen69d81fc2005-11-05 17:25:53 +0100265 numa_set_node(i, rr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 rr = next_node(rr, node_online_map);
267 if (rr == MAX_NUMNODES)
268 rr = first_node(node_online_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 }
270
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271}
272
273#ifdef CONFIG_NUMA_EMU
274int numa_fake __initdata = 0;
275
276/* Numa emulation */
Andrew Morton1164c992006-09-26 10:52:37 +0200277static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278{
279 int i;
Andi Kleenabe059e2006-03-25 16:29:12 +0100280 struct bootnode nodes[MAX_NUMNODES];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 unsigned long sz = ((end_pfn - start_pfn)<<PAGE_SHIFT) / numa_fake;
282
283 /* Kludge needed for the hash function */
284 if (hweight64(sz) > 1) {
285 unsigned long x = 1;
286 while ((x << 1) < sz)
287 x <<= 1;
288 if (x < sz/2)
Andi Kleen6b050f82006-01-11 22:44:33 +0100289 printk(KERN_ERR "Numa emulation unbalanced. Complain to maintainer\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 sz = x;
291 }
292
293 memset(&nodes,0,sizeof(nodes));
294 for (i = 0; i < numa_fake; i++) {
295 nodes[i].start = (start_pfn<<PAGE_SHIFT) + i*sz;
296 if (i == numa_fake-1)
297 sz = (end_pfn<<PAGE_SHIFT) - nodes[i].start;
298 nodes[i].end = nodes[i].start + sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n",
300 i,
301 nodes[i].start, nodes[i].end,
302 (nodes[i].end - nodes[i].start) >> 20);
303 node_set_online(i);
304 }
305 memnode_shift = compute_hash_shift(nodes, numa_fake);
306 if (memnode_shift < 0) {
307 memnode_shift = 0;
308 printk(KERN_ERR "No NUMA hash function found. Emulation disabled.\n");
309 return -1;
310 }
Mel Gorman5cb248a2006-09-27 01:49:52 -0700311 for_each_online_node(i) {
312 e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
313 nodes[i].end >> PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
Mel Gorman5cb248a2006-09-27 01:49:52 -0700315 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 numa_init_array();
317 return 0;
318}
319#endif
320
321void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
322{
323 int i;
324
325#ifdef CONFIG_NUMA_EMU
326 if (numa_fake && !numa_emulation(start_pfn, end_pfn))
327 return;
328#endif
329
330#ifdef CONFIG_ACPI_NUMA
331 if (!numa_off && !acpi_scan_nodes(start_pfn << PAGE_SHIFT,
332 end_pfn << PAGE_SHIFT))
333 return;
334#endif
335
336#ifdef CONFIG_K8_NUMA
337 if (!numa_off && !k8_scan_nodes(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT))
338 return;
339#endif
340 printk(KERN_INFO "%s\n",
341 numa_off ? "NUMA turned off" : "No NUMA configuration found");
342
343 printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
344 start_pfn << PAGE_SHIFT,
345 end_pfn << PAGE_SHIFT);
346 /* setup dummy node covering all memory */
347 memnode_shift = 63;
Amul Shah076422d22007-02-13 13:26:19 +0100348 memnodemap = memnode.embedded_map;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 memnodemap[0] = 0;
350 nodes_clear(node_online_map);
351 node_set_online(0);
352 for (i = 0; i < NR_CPUS; i++)
Andi Kleen69d81fc2005-11-05 17:25:53 +0100353 numa_set_node(i, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 node_to_cpumask[0] = cpumask_of_cpu(0);
Mel Gorman5cb248a2006-09-27 01:49:52 -0700355 e820_register_active_regions(0, start_pfn, end_pfn);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 setup_node_bootmem(0, start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);
357}
358
Ashok Raje6982c62005-06-25 14:54:58 -0700359__cpuinit void numa_add_cpu(int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360{
Ravikiran G Thirumalaie6a045a2005-09-30 11:59:21 -0700361 set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362}
363
Andi Kleen69d81fc2005-11-05 17:25:53 +0100364void __cpuinit numa_set_node(int cpu, int node)
365{
Ravikiran G Thirumalaidf79efd2006-01-11 22:45:39 +0100366 cpu_pda(cpu)->nodenumber = node;
Andi Kleen69d81fc2005-11-05 17:25:53 +0100367 cpu_to_node[cpu] = node;
368}
369
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370unsigned long __init numa_free_all_bootmem(void)
371{
372 int i;
373 unsigned long pages = 0;
374 for_each_online_node(i) {
375 pages += free_all_bootmem_node(NODE_DATA(i));
376 }
377 return pages;
378}
379
Bob Piccod3ee8712005-11-05 17:25:54 +0100380#ifdef CONFIG_SPARSEMEM
381static void __init arch_sparse_init(void)
382{
383 int i;
384
385 for_each_online_node(i)
386 memory_present(i, node_start_pfn(i), node_end_pfn(i));
387
388 sparse_init();
389}
390#else
391#define arch_sparse_init() do {} while (0)
392#endif
393
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394void __init paging_init(void)
395{
396 int i;
Mel Gorman6391af12006-10-11 01:20:39 -0700397 unsigned long max_zone_pfns[MAX_NR_ZONES];
398 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
399 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
400 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
401 max_zone_pfns[ZONE_NORMAL] = end_pfn;
Bob Piccod3ee8712005-11-05 17:25:54 +0100402
403 arch_sparse_init();
404
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 for_each_online_node(i) {
406 setup_node_zones(i);
407 }
Mel Gorman5cb248a2006-09-27 01:49:52 -0700408
409 free_area_init_nodes(max_zone_pfns);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410}
411
Andi Kleen2c8c0e62006-09-26 10:52:32 +0200412static __init int numa_setup(char *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413{
Andi Kleen2c8c0e62006-09-26 10:52:32 +0200414 if (!opt)
415 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 if (!strncmp(opt,"off",3))
417 numa_off = 1;
418#ifdef CONFIG_NUMA_EMU
419 if(!strncmp(opt, "fake=", 5)) {
420 numa_fake = simple_strtoul(opt+5,NULL,0); ;
421 if (numa_fake >= MAX_NUMNODES)
422 numa_fake = MAX_NUMNODES;
423 }
424#endif
425#ifdef CONFIG_ACPI_NUMA
426 if (!strncmp(opt,"noacpi",6))
427 acpi_numa = -1;
Andi Kleen68a3a7f2006-04-07 19:49:18 +0200428 if (!strncmp(opt,"hotadd=", 7))
429 hotadd_percent = simple_strtoul(opt+7, NULL, 10);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430#endif
Andi Kleen2c8c0e62006-09-26 10:52:32 +0200431 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432}
433
Andi Kleen2c8c0e62006-09-26 10:52:32 +0200434early_param("numa", numa_setup);
435
Ravikiran Thirumalai05b3cbd2006-01-11 22:45:36 +0100436/*
437 * Setup early cpu_to_node.
438 *
439 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
440 * and apicid_to_node[] tables have valid entries for a CPU.
441 * This means we skip cpu_to_node[] initialisation for NUMA
442 * emulation and faking node case (when running a kernel compiled
443 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
444 * is already initialized in a round robin manner at numa_init_array,
445 * prior to this call, and this initialization is good enough
446 * for the fake NUMA cases.
447 */
448void __init init_cpu_to_node(void)
449{
450 int i;
451 for (i = 0; i < NR_CPUS; i++) {
452 u8 apicid = x86_cpu_to_apicid[i];
453 if (apicid == BAD_APICID)
454 continue;
455 if (apicid_to_node[apicid] == NUMA_NO_NODE)
456 continue;
Daniel Yeisleyd1db4ec2006-02-15 15:17:41 -0800457 numa_set_node(i,apicid_to_node[apicid]);
Ravikiran Thirumalai05b3cbd2006-01-11 22:45:36 +0100458 }
459}
460
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461EXPORT_SYMBOL(cpu_to_node);
462EXPORT_SYMBOL(node_to_cpumask);
Eric Dumazetdcf36bf2006-03-25 16:31:46 +0100463EXPORT_SYMBOL(memnode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464EXPORT_SYMBOL(node_data);
Andi Kleencf050132006-01-11 22:46:27 +0100465
466#ifdef CONFIG_DISCONTIGMEM
467/*
468 * Functions to convert PFNs from/to per node page addresses.
469 * These are out of line because they are quite big.
470 * They could be all tuned by pre caching more state.
471 * Should do that.
472 */
473
Andi Kleencf050132006-01-11 22:46:27 +0100474int pfn_valid(unsigned long pfn)
475{
476 unsigned nid;
477 if (pfn >= num_physpages)
478 return 0;
479 nid = pfn_to_nid(pfn);
480 if (nid == 0xff)
481 return 0;
482 return pfn >= node_start_pfn(nid) && (pfn) < node_end_pfn(nid);
483}
484EXPORT_SYMBOL(pfn_valid);
485#endif