blob: f8c04d6935c922725d64ddf97178826f1b3e27cd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * ACPI 3.0 based NUMA setup
3 * Copyright 2004 Andi Kleen, SuSE Labs.
4 *
5 * Reads the ACPI SRAT table to figure out what memory belongs to which CPUs.
6 *
7 * Called from acpi_numa_init while reading the SRAT and SLIT tables.
8 * Assumes all memory regions belonging to a single proximity domain
9 * are in one chunk. Holes between them will be included in the node.
10 */
11
12#include <linux/kernel.h>
13#include <linux/acpi.h>
14#include <linux/mmzone.h>
15#include <linux/bitmap.h>
16#include <linux/module.h>
17#include <linux/topology.h>
Andi Kleen68a3a7f2006-04-07 19:49:18 +020018#include <linux/bootmem.h>
19#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <asm/proto.h>
21#include <asm/numa.h>
Andi Kleen8a6fdd32006-01-11 22:44:39 +010022#include <asm/e820.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
Andi Kleenc31fbb12006-09-26 10:52:33 +020024int acpi_numa __initdata;
25
Andi Kleen68a3a7f2006-04-07 19:49:18 +020026#if (defined(CONFIG_ACPI_HOTPLUG_MEMORY) || \
27 defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)) \
28 && !defined(CONFIG_MEMORY_HOTPLUG)
29#define RESERVE_HOTADD 1
30#endif
31
Linus Torvalds1da177e2005-04-16 15:20:36 -070032static struct acpi_table_slit *acpi_slit;
33
34static nodemask_t nodes_parsed __initdata;
Andi Kleenabe059e2006-03-25 16:29:12 +010035static struct bootnode nodes[MAX_NUMNODES] __initdata;
Andi Kleen68a3a7f2006-04-07 19:49:18 +020036static struct bootnode nodes_add[MAX_NUMNODES] __initdata;
37static int found_add_area __initdata;
Andi Kleenfad79062006-05-15 18:19:44 +020038int hotadd_percent __initdata = 0;
39#ifndef RESERVE_HOTADD
40#define hotadd_percent 0 /* Ignore all settings */
41#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Andi Kleen9391a3f2006-02-03 21:51:17 +010043/* Too small nodes confuse the VM badly. Usually they result
44 from BIOS bugs. */
45#define NODE_MIN_SIZE (4*1024*1024)
46
Linus Torvalds1da177e2005-04-16 15:20:36 -070047static __init int setup_node(int pxm)
48{
Yasunori Goto762834e2006-06-23 02:03:19 -070049 return acpi_map_pxm_to_node(pxm);
Linus Torvalds1da177e2005-04-16 15:20:36 -070050}
51
52static __init int conflicting_nodes(unsigned long start, unsigned long end)
53{
54 int i;
Andi Kleen4b6a4552005-09-12 18:49:25 +020055 for_each_node_mask(i, nodes_parsed) {
Andi Kleenabe059e2006-03-25 16:29:12 +010056 struct bootnode *nd = &nodes[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 if (nd->start == nd->end)
58 continue;
59 if (nd->end > start && nd->start < end)
Andi Kleen05d1fa42005-09-12 18:49:24 +020060 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 if (nd->end == end && nd->start == start)
Andi Kleen05d1fa42005-09-12 18:49:24 +020062 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -070063 }
64 return -1;
65}
66
67static __init void cutoff_node(int i, unsigned long start, unsigned long end)
68{
Andi Kleenabe059e2006-03-25 16:29:12 +010069 struct bootnode *nd = &nodes[i];
Andi Kleen68a3a7f2006-04-07 19:49:18 +020070
71 if (found_add_area)
72 return;
73
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 if (nd->start < start) {
75 nd->start = start;
76 if (nd->end < nd->start)
77 nd->start = nd->end;
78 }
79 if (nd->end > end) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 nd->end = end;
81 if (nd->start > nd->end)
82 nd->start = nd->end;
83 }
84}
85
86static __init void bad_srat(void)
87{
Andi Kleen2bce2b52005-09-12 18:49:25 +020088 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 printk(KERN_ERR "SRAT: SRAT not used.\n");
90 acpi_numa = -1;
Andi Kleenfad79062006-05-15 18:19:44 +020091 found_add_area = 0;
Andi Kleen2bce2b52005-09-12 18:49:25 +020092 for (i = 0; i < MAX_LOCAL_APIC; i++)
93 apicid_to_node[i] = NUMA_NO_NODE;
Andi Kleen68a3a7f2006-04-07 19:49:18 +020094 for (i = 0; i < MAX_NUMNODES; i++)
95 nodes_add[i].start = nodes[i].end = 0;
Mel Gorman5cb248a2006-09-27 01:49:52 -070096 remove_all_active_ranges();
Linus Torvalds1da177e2005-04-16 15:20:36 -070097}
98
99static __init inline int srat_disabled(void)
100{
101 return numa_off || acpi_numa < 0;
102}
103
Andi Kleen1584b892006-01-11 22:43:42 +0100104/*
105 * A lot of BIOS fill in 10 (= no distance) everywhere. This messes
106 * up the NUMA heuristics which wants the local node to have a smaller
107 * distance than the others.
108 * Do some quick checks here and only use the SLIT if it passes.
109 */
110static __init int slit_valid(struct acpi_table_slit *slit)
111{
112 int i, j;
113 int d = slit->localities;
114 for (i = 0; i < d; i++) {
115 for (j = 0; j < d; j++) {
116 u8 val = slit->entry[d*i + j];
117 if (i == j) {
118 if (val != 10)
119 return 0;
120 } else if (val <= 10)
121 return 0;
122 }
123 }
124 return 1;
125}
126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127/* Callback for SLIT parsing */
128void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
129{
Andi Kleen1584b892006-01-11 22:43:42 +0100130 if (!slit_valid(slit)) {
131 printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n");
132 return;
133 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 acpi_slit = slit;
135}
136
137/* Callback for Proximity Domain -> LAPIC mapping */
138void __init
139acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
140{
141 int pxm, node;
Andi Kleend22fe802006-02-03 21:51:26 +0100142 if (srat_disabled())
143 return;
Andi Kleenfad79062006-05-15 18:19:44 +0200144 if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) {
145 bad_srat();
Andi Kleend22fe802006-02-03 21:51:26 +0100146 return;
147 }
148 if (pa->flags.enabled == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 return;
150 pxm = pa->proximity_domain;
151 node = setup_node(pxm);
152 if (node < 0) {
153 printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
154 bad_srat();
155 return;
156 }
Andi Kleen0b07e982005-09-12 18:49:24 +0200157 apicid_to_node[pa->apic_id] = node;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 acpi_numa = 1;
Andi Kleen0b07e982005-09-12 18:49:24 +0200159 printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n",
160 pxm, pa->apic_id, node);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161}
162
Andi Kleen68a3a7f2006-04-07 19:49:18 +0200163#ifdef RESERVE_HOTADD
164/*
165 * Protect against too large hotadd areas that would fill up memory.
166 */
167static int hotadd_enough_memory(struct bootnode *nd)
168{
169 static unsigned long allocated;
170 static unsigned long last_area_end;
171 unsigned long pages = (nd->end - nd->start) >> PAGE_SHIFT;
172 long mem = pages * sizeof(struct page);
173 unsigned long addr;
174 unsigned long allowed;
175 unsigned long oldpages = pages;
176
177 if (mem < 0)
178 return 0;
Mel Gorman5cb248a2006-09-27 01:49:52 -0700179 allowed = (end_pfn - absent_pages_in_range(0, end_pfn)) * PAGE_SIZE;
Andi Kleen68a3a7f2006-04-07 19:49:18 +0200180 allowed = (allowed / 100) * hotadd_percent;
181 if (allocated + mem > allowed) {
Andi Kleenfad79062006-05-15 18:19:44 +0200182 unsigned long range;
Andi Kleen68a3a7f2006-04-07 19:49:18 +0200183 /* Give them at least part of their hotadd memory upto hotadd_percent
184 It would be better to spread the limit out
185 over multiple hotplug areas, but that is too complicated
186 right now */
187 if (allocated >= allowed)
188 return 0;
Andi Kleenfad79062006-05-15 18:19:44 +0200189 range = allowed - allocated;
190 pages = (range / PAGE_SIZE);
Andi Kleen68a3a7f2006-04-07 19:49:18 +0200191 mem = pages * sizeof(struct page);
Andi Kleenfad79062006-05-15 18:19:44 +0200192 nd->end = nd->start + range;
Andi Kleen68a3a7f2006-04-07 19:49:18 +0200193 }
194 /* Not completely fool proof, but a good sanity check */
195 addr = find_e820_area(last_area_end, end_pfn<<PAGE_SHIFT, mem);
196 if (addr == -1UL)
197 return 0;
198 if (pages != oldpages)
199 printk(KERN_NOTICE "SRAT: Hotadd area limited to %lu bytes\n",
200 pages << PAGE_SHIFT);
201 last_area_end = addr + mem;
202 allocated += mem;
203 return 1;
204}
205
206/*
207 * It is fine to add this area to the nodes data it will be used later
208 * This code supports one contigious hot add area per node.
209 */
210static int reserve_hotadd(int node, unsigned long start, unsigned long end)
211{
212 unsigned long s_pfn = start >> PAGE_SHIFT;
213 unsigned long e_pfn = end >> PAGE_SHIFT;
214 int changed = 0;
215 struct bootnode *nd = &nodes_add[node];
216
217 /* I had some trouble with strange memory hotadd regions breaking
218 the boot. Be very strict here and reject anything unexpected.
219 If you want working memory hotadd write correct SRATs.
220
221 The node size check is a basic sanity check to guard against
222 mistakes */
223 if ((signed long)(end - start) < NODE_MIN_SIZE) {
224 printk(KERN_ERR "SRAT: Hotplug area too small\n");
225 return -1;
226 }
227
228 /* This check might be a bit too strict, but I'm keeping it for now. */
Mel Gorman5cb248a2006-09-27 01:49:52 -0700229 if (absent_pages_in_range(s_pfn, e_pfn) != e_pfn - s_pfn) {
Mel Gorman9c7cd682006-09-27 01:49:58 -0700230 printk(KERN_ERR
231 "SRAT: Hotplug area %lu -> %lu has existing memory\n",
232 s_pfn, e_pfn);
Andi Kleen68a3a7f2006-04-07 19:49:18 +0200233 return -1;
234 }
235
236 if (!hotadd_enough_memory(&nodes_add[node])) {
237 printk(KERN_ERR "SRAT: Hotplug area too large\n");
238 return -1;
239 }
240
241 /* Looks good */
242
243 found_add_area = 1;
244 if (nd->start == nd->end) {
245 nd->start = start;
246 nd->end = end;
247 changed = 1;
248 } else {
249 if (nd->start == end) {
250 nd->start = start;
251 changed = 1;
252 }
253 if (nd->end == start) {
254 nd->end = end;
255 changed = 1;
256 }
257 if (!changed)
258 printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n");
259 }
260
261 if ((nd->end >> PAGE_SHIFT) > end_pfn)
262 end_pfn = nd->end >> PAGE_SHIFT;
263
264 if (changed)
265 printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n", nd->start, nd->end);
266 return 0;
267}
268#endif
269
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
271void __init
272acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
273{
Andi Kleen68a3a7f2006-04-07 19:49:18 +0200274 struct bootnode *nd, oldnode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 unsigned long start, end;
276 int node, pxm;
277 int i;
278
Andi Kleend22fe802006-02-03 21:51:26 +0100279 if (srat_disabled())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 return;
Andi Kleend22fe802006-02-03 21:51:26 +0100281 if (ma->header.length != sizeof(struct acpi_table_memory_affinity)) {
282 bad_srat();
283 return;
284 }
285 if (ma->flags.enabled == 0)
286 return;
Andi Kleen68a3a7f2006-04-07 19:49:18 +0200287 if (ma->flags.hot_pluggable && hotadd_percent == 0)
288 return;
Andi Kleend22fe802006-02-03 21:51:26 +0100289 start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32);
290 end = start + (ma->length_lo | ((u64)ma->length_hi << 32));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 pxm = ma->proximity_domain;
292 node = setup_node(pxm);
293 if (node < 0) {
294 printk(KERN_ERR "SRAT: Too many proximity domains.\n");
295 bad_srat();
296 return;
297 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 i = conflicting_nodes(start, end);
Andi Kleen05d1fa42005-09-12 18:49:24 +0200299 if (i == node) {
300 printk(KERN_WARNING
301 "SRAT: Warning: PXM %d (%lx-%lx) overlaps with itself (%Lx-%Lx)\n",
302 pxm, start, end, nodes[i].start, nodes[i].end);
303 } else if (i >= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 printk(KERN_ERR
Andi Kleen05d1fa42005-09-12 18:49:24 +0200305 "SRAT: PXM %d (%lx-%lx) overlaps with PXM %d (%Lx-%Lx)\n",
306 pxm, start, end, node_to_pxm(i),
307 nodes[i].start, nodes[i].end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 bad_srat();
309 return;
310 }
311 nd = &nodes[node];
Andi Kleen68a3a7f2006-04-07 19:49:18 +0200312 oldnode = *nd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 if (!node_test_and_set(node, nodes_parsed)) {
314 nd->start = start;
315 nd->end = end;
316 } else {
317 if (start < nd->start)
318 nd->start = start;
319 if (nd->end < end)
320 nd->end = end;
321 }
Andi Kleen68a3a7f2006-04-07 19:49:18 +0200322
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 printk(KERN_INFO "SRAT: Node %u PXM %u %Lx-%Lx\n", node, pxm,
324 nd->start, nd->end);
Mel Gorman5cb248a2006-09-27 01:49:52 -0700325 e820_register_active_regions(node, nd->start >> PAGE_SHIFT,
326 nd->end >> PAGE_SHIFT);
Mel Gormanfb014392006-09-27 01:49:59 -0700327 push_node_boundaries(node, nd->start >> PAGE_SHIFT,
328 nd->end >> PAGE_SHIFT);
Andi Kleen68a3a7f2006-04-07 19:49:18 +0200329
330#ifdef RESERVE_HOTADD
331 if (ma->flags.hot_pluggable && reserve_hotadd(node, start, end) < 0) {
332 /* Ignore hotadd region. Undo damage */
333 printk(KERN_NOTICE "SRAT: Hotplug region ignored\n");
334 *nd = oldnode;
335 if ((nd->start | nd->end) == 0)
336 node_clear(node, nodes_parsed);
337 }
338#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339}
340
Andi Kleen8a6fdd32006-01-11 22:44:39 +0100341/* Sanity check to catch more bad SRATs (they are amazingly common).
342 Make sure the PXMs cover all memory. */
343static int nodes_cover_memory(void)
344{
345 int i;
346 unsigned long pxmram, e820ram;
347
348 pxmram = 0;
349 for_each_node_mask(i, nodes_parsed) {
350 unsigned long s = nodes[i].start >> PAGE_SHIFT;
351 unsigned long e = nodes[i].end >> PAGE_SHIFT;
352 pxmram += e - s;
Mel Gorman5cb248a2006-09-27 01:49:52 -0700353 pxmram -= absent_pages_in_range(s, e);
Andi Kleen68a3a7f2006-04-07 19:49:18 +0200354 pxmram -= nodes_add[i].end - nodes_add[i].start;
355 if ((long)pxmram < 0)
356 pxmram = 0;
Andi Kleen8a6fdd32006-01-11 22:44:39 +0100357 }
358
Mel Gorman5cb248a2006-09-27 01:49:52 -0700359 e820ram = end_pfn - absent_pages_in_range(0, end_pfn);
Andi Kleenfdb9df92006-02-16 23:42:13 +0100360 /* We seem to lose 3 pages somewhere. Allow a bit of slack. */
361 if ((long)(e820ram - pxmram) >= 1*1024*1024) {
Andi Kleen8a6fdd32006-01-11 22:44:39 +0100362 printk(KERN_ERR
363 "SRAT: PXMs only cover %luMB of your %luMB e820 RAM. Not used.\n",
364 (pxmram << PAGE_SHIFT) >> 20,
365 (e820ram << PAGE_SHIFT) >> 20);
366 return 0;
367 }
368 return 1;
369}
370
Andi Kleen9391a3f2006-02-03 21:51:17 +0100371static void unparse_node(int node)
372{
373 int i;
374 node_clear(node, nodes_parsed);
375 for (i = 0; i < MAX_LOCAL_APIC; i++) {
376 if (apicid_to_node[i] == node)
377 apicid_to_node[i] = NUMA_NO_NODE;
378 }
379}
380
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381void __init acpi_numa_arch_fixup(void) {}
382
383/* Use the information discovered above to actually set up the nodes. */
384int __init acpi_scan_nodes(unsigned long start, unsigned long end)
385{
386 int i;
Andi Kleen8a6fdd32006-01-11 22:44:39 +0100387
Andi Kleen9391a3f2006-02-03 21:51:17 +0100388 /* First clean up the node list */
389 for (i = 0; i < MAX_NUMNODES; i++) {
Andi Kleen68a3a7f2006-04-07 19:49:18 +0200390 cutoff_node(i, start, end);
Daniel Yeisley0d015322006-05-30 22:47:57 +0200391 if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) {
Andi Kleen9391a3f2006-02-03 21:51:17 +0100392 unparse_node(i);
Daniel Yeisley0d015322006-05-30 22:47:57 +0200393 node_set_offline(i);
394 }
Andi Kleen9391a3f2006-02-03 21:51:17 +0100395 }
396
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 if (acpi_numa <= 0)
398 return -1;
Andi Kleene58e0d02005-09-12 18:49:25 +0200399
Andi Kleen8a6fdd32006-01-11 22:44:39 +0100400 if (!nodes_cover_memory()) {
401 bad_srat();
402 return -1;
403 }
404
Andi Kleen2aed7112006-02-16 23:42:16 +0100405 memnode_shift = compute_hash_shift(nodes, MAX_NUMNODES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 if (memnode_shift < 0) {
407 printk(KERN_ERR
408 "SRAT: No NUMA node hash function found. Contact maintainer\n");
409 bad_srat();
410 return -1;
411 }
Andi Kleene58e0d02005-09-12 18:49:25 +0200412
413 /* Finally register nodes */
414 for_each_node_mask(i, nodes_parsed)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
Andi Kleena8062232006-04-07 19:49:21 +0200416 /* Try again in case setup_node_bootmem missed one due
417 to missing bootmem */
418 for_each_node_mask(i, nodes_parsed)
419 if (!node_online(i))
420 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
421
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 for (i = 0; i < NR_CPUS; i++) {
423 if (cpu_to_node[i] == NUMA_NO_NODE)
424 continue;
425 if (!node_isset(cpu_to_node[i], nodes_parsed))
Andi Kleen69d81fc2005-11-05 17:25:53 +0100426 numa_set_node(i, NUMA_NO_NODE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 }
428 numa_init_array();
429 return 0;
430}
431
Andi Kleen68a3a7f2006-04-07 19:49:18 +0200432void __init srat_reserve_add_area(int nodeid)
433{
434 if (found_add_area && nodes_add[nodeid].end) {
435 u64 total_mb;
436
437 printk(KERN_INFO "SRAT: Reserving hot-add memory space "
438 "for node %d at %Lx-%Lx\n",
439 nodeid, nodes_add[nodeid].start, nodes_add[nodeid].end);
440 total_mb = (nodes_add[nodeid].end - nodes_add[nodeid].start)
441 >> PAGE_SHIFT;
442 total_mb *= sizeof(struct page);
443 total_mb >>= 20;
444 printk(KERN_INFO "SRAT: This will cost you %Lu MB of "
445 "pre-allocated memory.\n", (unsigned long long)total_mb);
446 reserve_bootmem_node(NODE_DATA(nodeid), nodes_add[nodeid].start,
447 nodes_add[nodeid].end - nodes_add[nodeid].start);
448 }
449}
450
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451int __node_distance(int a, int b)
452{
453 int index;
454
455 if (!acpi_slit)
456 return a == b ? 10 : 20;
457 index = acpi_slit->localities * node_to_pxm(a);
458 return acpi_slit->entry[index + node_to_pxm(b)];
459}
460
461EXPORT_SYMBOL(__node_distance);