Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * ACPI 3.0 based NUMA setup |
| 3 | * Copyright 2004 Andi Kleen, SuSE Labs. |
| 4 | * |
| 5 | * Reads the ACPI SRAT table to figure out what memory belongs to which CPUs. |
| 6 | * |
| 7 | * Called from acpi_numa_init while reading the SRAT and SLIT tables. |
| 8 | * Assumes all memory regions belonging to a single proximity domain |
| 9 | * are in one chunk. Holes between them will be included in the node. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/kernel.h> |
| 13 | #include <linux/acpi.h> |
| 14 | #include <linux/mmzone.h> |
| 15 | #include <linux/bitmap.h> |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/topology.h> |
Andi Kleen | 68a3a7f | 2006-04-07 19:49:18 +0200 | [diff] [blame] | 18 | #include <linux/bootmem.h> |
| 19 | #include <linux/mm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include <asm/proto.h> |
| 21 | #include <asm/numa.h> |
Andi Kleen | 8a6fdd3 | 2006-01-11 22:44:39 +0100 | [diff] [blame] | 22 | #include <asm/e820.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
Andi Kleen | c31fbb1 | 2006-09-26 10:52:33 +0200 | [diff] [blame] | 24 | int acpi_numa __initdata; |
| 25 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | static struct acpi_table_slit *acpi_slit; |
| 27 | |
| 28 | static nodemask_t nodes_parsed __initdata; |
Andi Kleen | abe059e | 2006-03-25 16:29:12 +0100 | [diff] [blame] | 29 | static struct bootnode nodes[MAX_NUMNODES] __initdata; |
Keith Mannthey | 4942e99 | 2006-09-30 23:27:06 -0700 | [diff] [blame] | 30 | static struct bootnode nodes_add[MAX_NUMNODES]; |
Andi Kleen | 68a3a7f | 2006-04-07 19:49:18 +0200 | [diff] [blame] | 31 | static int found_add_area __initdata; |
Andi Kleen | fad7906 | 2006-05-15 18:19:44 +0200 | [diff] [blame] | 32 | int hotadd_percent __initdata = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | |
Andi Kleen | 9391a3f | 2006-02-03 21:51:17 +0100 | [diff] [blame] | 34 | /* Too small nodes confuse the VM badly. Usually they result |
| 35 | from BIOS bugs. */ |
| 36 | #define NODE_MIN_SIZE (4*1024*1024) |
| 37 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | static __init int setup_node(int pxm) |
| 39 | { |
Yasunori Goto | 762834e | 2006-06-23 02:03:19 -0700 | [diff] [blame] | 40 | return acpi_map_pxm_to_node(pxm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | } |
| 42 | |
| 43 | static __init int conflicting_nodes(unsigned long start, unsigned long end) |
| 44 | { |
| 45 | int i; |
Andi Kleen | 4b6a455 | 2005-09-12 18:49:25 +0200 | [diff] [blame] | 46 | for_each_node_mask(i, nodes_parsed) { |
Andi Kleen | abe059e | 2006-03-25 16:29:12 +0100 | [diff] [blame] | 47 | struct bootnode *nd = &nodes[i]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | if (nd->start == nd->end) |
| 49 | continue; |
| 50 | if (nd->end > start && nd->start < end) |
Andi Kleen | 05d1fa4 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 51 | return i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | if (nd->end == end && nd->start == start) |
Andi Kleen | 05d1fa4 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 53 | return i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | } |
| 55 | return -1; |
| 56 | } |
| 57 | |
| 58 | static __init void cutoff_node(int i, unsigned long start, unsigned long end) |
| 59 | { |
Andi Kleen | abe059e | 2006-03-25 16:29:12 +0100 | [diff] [blame] | 60 | struct bootnode *nd = &nodes[i]; |
Andi Kleen | 68a3a7f | 2006-04-07 19:49:18 +0200 | [diff] [blame] | 61 | |
| 62 | if (found_add_area) |
| 63 | return; |
| 64 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | if (nd->start < start) { |
| 66 | nd->start = start; |
| 67 | if (nd->end < nd->start) |
| 68 | nd->start = nd->end; |
| 69 | } |
| 70 | if (nd->end > end) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | nd->end = end; |
| 72 | if (nd->start > nd->end) |
| 73 | nd->start = nd->end; |
| 74 | } |
| 75 | } |
| 76 | |
| 77 | static __init void bad_srat(void) |
| 78 | { |
Andi Kleen | 2bce2b5 | 2005-09-12 18:49:25 +0200 | [diff] [blame] | 79 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | printk(KERN_ERR "SRAT: SRAT not used.\n"); |
| 81 | acpi_numa = -1; |
Andi Kleen | fad7906 | 2006-05-15 18:19:44 +0200 | [diff] [blame] | 82 | found_add_area = 0; |
Andi Kleen | 2bce2b5 | 2005-09-12 18:49:25 +0200 | [diff] [blame] | 83 | for (i = 0; i < MAX_LOCAL_APIC; i++) |
| 84 | apicid_to_node[i] = NUMA_NO_NODE; |
Andi Kleen | 68a3a7f | 2006-04-07 19:49:18 +0200 | [diff] [blame] | 85 | for (i = 0; i < MAX_NUMNODES; i++) |
| 86 | nodes_add[i].start = nodes[i].end = 0; |
Mel Gorman | 5cb248a | 2006-09-27 01:49:52 -0700 | [diff] [blame] | 87 | remove_all_active_ranges(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | } |
| 89 | |
| 90 | static __init inline int srat_disabled(void) |
| 91 | { |
| 92 | return numa_off || acpi_numa < 0; |
| 93 | } |
| 94 | |
Andi Kleen | 1584b89 | 2006-01-11 22:43:42 +0100 | [diff] [blame] | 95 | /* |
| 96 | * A lot of BIOS fill in 10 (= no distance) everywhere. This messes |
| 97 | * up the NUMA heuristics which wants the local node to have a smaller |
| 98 | * distance than the others. |
| 99 | * Do some quick checks here and only use the SLIT if it passes. |
| 100 | */ |
| 101 | static __init int slit_valid(struct acpi_table_slit *slit) |
| 102 | { |
| 103 | int i, j; |
| 104 | int d = slit->localities; |
| 105 | for (i = 0; i < d; i++) { |
| 106 | for (j = 0; j < d; j++) { |
| 107 | u8 val = slit->entry[d*i + j]; |
| 108 | if (i == j) { |
| 109 | if (val != 10) |
| 110 | return 0; |
| 111 | } else if (val <= 10) |
| 112 | return 0; |
| 113 | } |
| 114 | } |
| 115 | return 1; |
| 116 | } |
| 117 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | /* Callback for SLIT parsing */ |
| 119 | void __init acpi_numa_slit_init(struct acpi_table_slit *slit) |
| 120 | { |
Andi Kleen | 1584b89 | 2006-01-11 22:43:42 +0100 | [diff] [blame] | 121 | if (!slit_valid(slit)) { |
| 122 | printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n"); |
| 123 | return; |
| 124 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | acpi_slit = slit; |
| 126 | } |
| 127 | |
| 128 | /* Callback for Proximity Domain -> LAPIC mapping */ |
| 129 | void __init |
| 130 | acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa) |
| 131 | { |
| 132 | int pxm, node; |
Andi Kleen | d22fe80 | 2006-02-03 21:51:26 +0100 | [diff] [blame] | 133 | if (srat_disabled()) |
| 134 | return; |
Andi Kleen | fad7906 | 2006-05-15 18:19:44 +0200 | [diff] [blame] | 135 | if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) { |
| 136 | bad_srat(); |
Andi Kleen | d22fe80 | 2006-02-03 21:51:26 +0100 | [diff] [blame] | 137 | return; |
| 138 | } |
| 139 | if (pa->flags.enabled == 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | return; |
| 141 | pxm = pa->proximity_domain; |
| 142 | node = setup_node(pxm); |
| 143 | if (node < 0) { |
| 144 | printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm); |
| 145 | bad_srat(); |
| 146 | return; |
| 147 | } |
Andi Kleen | 0b07e98 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 148 | apicid_to_node[pa->apic_id] = node; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | acpi_numa = 1; |
Andi Kleen | 0b07e98 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 150 | printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n", |
| 151 | pxm, pa->apic_id, node); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | } |
| 153 | |
Keith Mannthey | 71efa8f | 2006-09-30 23:27:05 -0700 | [diff] [blame] | 154 | #ifdef CONFIG_MEMORY_HOTPLUG_RESERVE |
Andi Kleen | 68a3a7f | 2006-04-07 19:49:18 +0200 | [diff] [blame] | 155 | /* |
| 156 | * Protect against too large hotadd areas that would fill up memory. |
| 157 | */ |
| 158 | static int hotadd_enough_memory(struct bootnode *nd) |
| 159 | { |
| 160 | static unsigned long allocated; |
| 161 | static unsigned long last_area_end; |
| 162 | unsigned long pages = (nd->end - nd->start) >> PAGE_SHIFT; |
| 163 | long mem = pages * sizeof(struct page); |
| 164 | unsigned long addr; |
| 165 | unsigned long allowed; |
| 166 | unsigned long oldpages = pages; |
| 167 | |
| 168 | if (mem < 0) |
| 169 | return 0; |
Mel Gorman | 5cb248a | 2006-09-27 01:49:52 -0700 | [diff] [blame] | 170 | allowed = (end_pfn - absent_pages_in_range(0, end_pfn)) * PAGE_SIZE; |
Andi Kleen | 68a3a7f | 2006-04-07 19:49:18 +0200 | [diff] [blame] | 171 | allowed = (allowed / 100) * hotadd_percent; |
| 172 | if (allocated + mem > allowed) { |
Andi Kleen | fad7906 | 2006-05-15 18:19:44 +0200 | [diff] [blame] | 173 | unsigned long range; |
Andi Kleen | 68a3a7f | 2006-04-07 19:49:18 +0200 | [diff] [blame] | 174 | /* Give them at least part of their hotadd memory upto hotadd_percent |
| 175 | It would be better to spread the limit out |
| 176 | over multiple hotplug areas, but that is too complicated |
| 177 | right now */ |
| 178 | if (allocated >= allowed) |
| 179 | return 0; |
Andi Kleen | fad7906 | 2006-05-15 18:19:44 +0200 | [diff] [blame] | 180 | range = allowed - allocated; |
| 181 | pages = (range / PAGE_SIZE); |
Andi Kleen | 68a3a7f | 2006-04-07 19:49:18 +0200 | [diff] [blame] | 182 | mem = pages * sizeof(struct page); |
Andi Kleen | fad7906 | 2006-05-15 18:19:44 +0200 | [diff] [blame] | 183 | nd->end = nd->start + range; |
Andi Kleen | 68a3a7f | 2006-04-07 19:49:18 +0200 | [diff] [blame] | 184 | } |
| 185 | /* Not completely fool proof, but a good sanity check */ |
| 186 | addr = find_e820_area(last_area_end, end_pfn<<PAGE_SHIFT, mem); |
| 187 | if (addr == -1UL) |
| 188 | return 0; |
| 189 | if (pages != oldpages) |
| 190 | printk(KERN_NOTICE "SRAT: Hotadd area limited to %lu bytes\n", |
| 191 | pages << PAGE_SHIFT); |
| 192 | last_area_end = addr + mem; |
| 193 | allocated += mem; |
| 194 | return 1; |
| 195 | } |
| 196 | |
Keith Mannthey | 71efa8f | 2006-09-30 23:27:05 -0700 | [diff] [blame] | 197 | static int update_end_of_memory(unsigned long end) |
| 198 | { |
| 199 | found_add_area = 1; |
| 200 | if ((end >> PAGE_SHIFT) > end_pfn) |
| 201 | end_pfn = end >> PAGE_SHIFT; |
| 202 | return 1; |
| 203 | } |
| 204 | |
| 205 | static inline int save_add_info(void) |
| 206 | { |
| 207 | return hotadd_percent > 0; |
| 208 | } |
| 209 | #else |
keith mannthey | 926fafe | 2006-10-21 18:37:01 +0200 | [diff] [blame] | 210 | int update_end_of_memory(unsigned long end) {return -1;} |
Keith Mannthey | 71efa8f | 2006-09-30 23:27:05 -0700 | [diff] [blame] | 211 | static int hotadd_enough_memory(struct bootnode *nd) {return 1;} |
| 212 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE |
| 213 | static inline int save_add_info(void) {return 1;} |
| 214 | #else |
| 215 | static inline int save_add_info(void) {return 0;} |
| 216 | #endif |
| 217 | #endif |
Andi Kleen | 68a3a7f | 2006-04-07 19:49:18 +0200 | [diff] [blame] | 218 | /* |
Keith Mannthey | 71efa8f | 2006-09-30 23:27:05 -0700 | [diff] [blame] | 219 | * Update nodes_add and decide if to include add are in the zone. |
| 220 | * Both SPARSE and RESERVE need nodes_add infomation. |
Andi Kleen | 68a3a7f | 2006-04-07 19:49:18 +0200 | [diff] [blame] | 221 | * This code supports one contigious hot add area per node. |
| 222 | */ |
| 223 | static int reserve_hotadd(int node, unsigned long start, unsigned long end) |
| 224 | { |
| 225 | unsigned long s_pfn = start >> PAGE_SHIFT; |
| 226 | unsigned long e_pfn = end >> PAGE_SHIFT; |
Keith Mannthey | 71efa8f | 2006-09-30 23:27:05 -0700 | [diff] [blame] | 227 | int ret = 0, changed = 0; |
Andi Kleen | 68a3a7f | 2006-04-07 19:49:18 +0200 | [diff] [blame] | 228 | struct bootnode *nd = &nodes_add[node]; |
| 229 | |
| 230 | /* I had some trouble with strange memory hotadd regions breaking |
| 231 | the boot. Be very strict here and reject anything unexpected. |
| 232 | If you want working memory hotadd write correct SRATs. |
| 233 | |
| 234 | The node size check is a basic sanity check to guard against |
| 235 | mistakes */ |
| 236 | if ((signed long)(end - start) < NODE_MIN_SIZE) { |
| 237 | printk(KERN_ERR "SRAT: Hotplug area too small\n"); |
| 238 | return -1; |
| 239 | } |
| 240 | |
| 241 | /* This check might be a bit too strict, but I'm keeping it for now. */ |
Mel Gorman | 5cb248a | 2006-09-27 01:49:52 -0700 | [diff] [blame] | 242 | if (absent_pages_in_range(s_pfn, e_pfn) != e_pfn - s_pfn) { |
Mel Gorman | 9c7cd68 | 2006-09-27 01:49:58 -0700 | [diff] [blame] | 243 | printk(KERN_ERR |
| 244 | "SRAT: Hotplug area %lu -> %lu has existing memory\n", |
| 245 | s_pfn, e_pfn); |
Andi Kleen | 68a3a7f | 2006-04-07 19:49:18 +0200 | [diff] [blame] | 246 | return -1; |
| 247 | } |
| 248 | |
| 249 | if (!hotadd_enough_memory(&nodes_add[node])) { |
| 250 | printk(KERN_ERR "SRAT: Hotplug area too large\n"); |
| 251 | return -1; |
| 252 | } |
| 253 | |
| 254 | /* Looks good */ |
| 255 | |
Andi Kleen | 68a3a7f | 2006-04-07 19:49:18 +0200 | [diff] [blame] | 256 | if (nd->start == nd->end) { |
| 257 | nd->start = start; |
| 258 | nd->end = end; |
| 259 | changed = 1; |
| 260 | } else { |
| 261 | if (nd->start == end) { |
| 262 | nd->start = start; |
| 263 | changed = 1; |
| 264 | } |
| 265 | if (nd->end == start) { |
| 266 | nd->end = end; |
| 267 | changed = 1; |
| 268 | } |
| 269 | if (!changed) |
| 270 | printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n"); |
| 271 | } |
| 272 | |
Keith Mannthey | 71efa8f | 2006-09-30 23:27:05 -0700 | [diff] [blame] | 273 | ret = update_end_of_memory(nd->end); |
Andi Kleen | 68a3a7f | 2006-04-07 19:49:18 +0200 | [diff] [blame] | 274 | |
| 275 | if (changed) |
| 276 | printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n", nd->start, nd->end); |
Keith Mannthey | 71efa8f | 2006-09-30 23:27:05 -0700 | [diff] [blame] | 277 | return ret; |
Andi Kleen | 68a3a7f | 2006-04-07 19:49:18 +0200 | [diff] [blame] | 278 | } |
Andi Kleen | 68a3a7f | 2006-04-07 19:49:18 +0200 | [diff] [blame] | 279 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ |
| 281 | void __init |
| 282 | acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) |
| 283 | { |
Andi Kleen | 68a3a7f | 2006-04-07 19:49:18 +0200 | [diff] [blame] | 284 | struct bootnode *nd, oldnode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | unsigned long start, end; |
| 286 | int node, pxm; |
| 287 | int i; |
| 288 | |
Andi Kleen | d22fe80 | 2006-02-03 21:51:26 +0100 | [diff] [blame] | 289 | if (srat_disabled()) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | return; |
Andi Kleen | d22fe80 | 2006-02-03 21:51:26 +0100 | [diff] [blame] | 291 | if (ma->header.length != sizeof(struct acpi_table_memory_affinity)) { |
| 292 | bad_srat(); |
| 293 | return; |
| 294 | } |
| 295 | if (ma->flags.enabled == 0) |
| 296 | return; |
Keith Mannthey | 71efa8f | 2006-09-30 23:27:05 -0700 | [diff] [blame] | 297 | if (ma->flags.hot_pluggable && !save_add_info()) |
Andi Kleen | 68a3a7f | 2006-04-07 19:49:18 +0200 | [diff] [blame] | 298 | return; |
Andi Kleen | d22fe80 | 2006-02-03 21:51:26 +0100 | [diff] [blame] | 299 | start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32); |
| 300 | end = start + (ma->length_lo | ((u64)ma->length_hi << 32)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | pxm = ma->proximity_domain; |
| 302 | node = setup_node(pxm); |
| 303 | if (node < 0) { |
| 304 | printk(KERN_ERR "SRAT: Too many proximity domains.\n"); |
| 305 | bad_srat(); |
| 306 | return; |
| 307 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | i = conflicting_nodes(start, end); |
Andi Kleen | 05d1fa4 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 309 | if (i == node) { |
| 310 | printk(KERN_WARNING |
| 311 | "SRAT: Warning: PXM %d (%lx-%lx) overlaps with itself (%Lx-%Lx)\n", |
| 312 | pxm, start, end, nodes[i].start, nodes[i].end); |
| 313 | } else if (i >= 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 314 | printk(KERN_ERR |
Andi Kleen | 05d1fa4 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 315 | "SRAT: PXM %d (%lx-%lx) overlaps with PXM %d (%Lx-%Lx)\n", |
| 316 | pxm, start, end, node_to_pxm(i), |
| 317 | nodes[i].start, nodes[i].end); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | bad_srat(); |
| 319 | return; |
| 320 | } |
| 321 | nd = &nodes[node]; |
Andi Kleen | 68a3a7f | 2006-04-07 19:49:18 +0200 | [diff] [blame] | 322 | oldnode = *nd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | if (!node_test_and_set(node, nodes_parsed)) { |
| 324 | nd->start = start; |
| 325 | nd->end = end; |
| 326 | } else { |
| 327 | if (start < nd->start) |
| 328 | nd->start = start; |
| 329 | if (nd->end < end) |
| 330 | nd->end = end; |
| 331 | } |
Andi Kleen | 68a3a7f | 2006-04-07 19:49:18 +0200 | [diff] [blame] | 332 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | printk(KERN_INFO "SRAT: Node %u PXM %u %Lx-%Lx\n", node, pxm, |
| 334 | nd->start, nd->end); |
Mel Gorman | 5cb248a | 2006-09-27 01:49:52 -0700 | [diff] [blame] | 335 | e820_register_active_regions(node, nd->start >> PAGE_SHIFT, |
| 336 | nd->end >> PAGE_SHIFT); |
Mel Gorman | fb01439 | 2006-09-27 01:49:59 -0700 | [diff] [blame] | 337 | push_node_boundaries(node, nd->start >> PAGE_SHIFT, |
| 338 | nd->end >> PAGE_SHIFT); |
Andi Kleen | 68a3a7f | 2006-04-07 19:49:18 +0200 | [diff] [blame] | 339 | |
keith mannthey | 926fafe | 2006-10-21 18:37:01 +0200 | [diff] [blame] | 340 | if (ma->flags.hot_pluggable && (reserve_hotadd(node, start, end) < 0)) { |
Andi Kleen | 68a3a7f | 2006-04-07 19:49:18 +0200 | [diff] [blame] | 341 | /* Ignore hotadd region. Undo damage */ |
| 342 | printk(KERN_NOTICE "SRAT: Hotplug region ignored\n"); |
| 343 | *nd = oldnode; |
| 344 | if ((nd->start | nd->end) == 0) |
| 345 | node_clear(node, nodes_parsed); |
| 346 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | } |
| 348 | |
Andi Kleen | 8a6fdd3 | 2006-01-11 22:44:39 +0100 | [diff] [blame] | 349 | /* Sanity check to catch more bad SRATs (they are amazingly common). |
| 350 | Make sure the PXMs cover all memory. */ |
| 351 | static int nodes_cover_memory(void) |
| 352 | { |
| 353 | int i; |
| 354 | unsigned long pxmram, e820ram; |
| 355 | |
| 356 | pxmram = 0; |
| 357 | for_each_node_mask(i, nodes_parsed) { |
| 358 | unsigned long s = nodes[i].start >> PAGE_SHIFT; |
| 359 | unsigned long e = nodes[i].end >> PAGE_SHIFT; |
| 360 | pxmram += e - s; |
Mel Gorman | 5cb248a | 2006-09-27 01:49:52 -0700 | [diff] [blame] | 361 | pxmram -= absent_pages_in_range(s, e); |
Andi Kleen | 68a3a7f | 2006-04-07 19:49:18 +0200 | [diff] [blame] | 362 | if ((long)pxmram < 0) |
| 363 | pxmram = 0; |
Andi Kleen | 8a6fdd3 | 2006-01-11 22:44:39 +0100 | [diff] [blame] | 364 | } |
| 365 | |
Mel Gorman | 5cb248a | 2006-09-27 01:49:52 -0700 | [diff] [blame] | 366 | e820ram = end_pfn - absent_pages_in_range(0, end_pfn); |
Andi Kleen | fdb9df9 | 2006-02-16 23:42:13 +0100 | [diff] [blame] | 367 | /* We seem to lose 3 pages somewhere. Allow a bit of slack. */ |
| 368 | if ((long)(e820ram - pxmram) >= 1*1024*1024) { |
Andi Kleen | 8a6fdd3 | 2006-01-11 22:44:39 +0100 | [diff] [blame] | 369 | printk(KERN_ERR |
| 370 | "SRAT: PXMs only cover %luMB of your %luMB e820 RAM. Not used.\n", |
| 371 | (pxmram << PAGE_SHIFT) >> 20, |
| 372 | (e820ram << PAGE_SHIFT) >> 20); |
| 373 | return 0; |
| 374 | } |
| 375 | return 1; |
| 376 | } |
| 377 | |
Andi Kleen | 9391a3f | 2006-02-03 21:51:17 +0100 | [diff] [blame] | 378 | static void unparse_node(int node) |
| 379 | { |
| 380 | int i; |
| 381 | node_clear(node, nodes_parsed); |
| 382 | for (i = 0; i < MAX_LOCAL_APIC; i++) { |
| 383 | if (apicid_to_node[i] == node) |
| 384 | apicid_to_node[i] = NUMA_NO_NODE; |
| 385 | } |
| 386 | } |
| 387 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | void __init acpi_numa_arch_fixup(void) {} |
| 389 | |
| 390 | /* Use the information discovered above to actually set up the nodes. */ |
| 391 | int __init acpi_scan_nodes(unsigned long start, unsigned long end) |
| 392 | { |
| 393 | int i; |
Andi Kleen | 8a6fdd3 | 2006-01-11 22:44:39 +0100 | [diff] [blame] | 394 | |
Andi Kleen | 9391a3f | 2006-02-03 21:51:17 +0100 | [diff] [blame] | 395 | /* First clean up the node list */ |
| 396 | for (i = 0; i < MAX_NUMNODES; i++) { |
Andi Kleen | 68a3a7f | 2006-04-07 19:49:18 +0200 | [diff] [blame] | 397 | cutoff_node(i, start, end); |
Daniel Yeisley | 0d01532 | 2006-05-30 22:47:57 +0200 | [diff] [blame] | 398 | if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) { |
Andi Kleen | 9391a3f | 2006-02-03 21:51:17 +0100 | [diff] [blame] | 399 | unparse_node(i); |
Daniel Yeisley | 0d01532 | 2006-05-30 22:47:57 +0200 | [diff] [blame] | 400 | node_set_offline(i); |
| 401 | } |
Andi Kleen | 9391a3f | 2006-02-03 21:51:17 +0100 | [diff] [blame] | 402 | } |
| 403 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | if (acpi_numa <= 0) |
| 405 | return -1; |
Andi Kleen | e58e0d0 | 2005-09-12 18:49:25 +0200 | [diff] [blame] | 406 | |
Andi Kleen | 8a6fdd3 | 2006-01-11 22:44:39 +0100 | [diff] [blame] | 407 | if (!nodes_cover_memory()) { |
| 408 | bad_srat(); |
| 409 | return -1; |
| 410 | } |
| 411 | |
Andi Kleen | 2aed711 | 2006-02-16 23:42:16 +0100 | [diff] [blame] | 412 | memnode_shift = compute_hash_shift(nodes, MAX_NUMNODES); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 413 | if (memnode_shift < 0) { |
| 414 | printk(KERN_ERR |
| 415 | "SRAT: No NUMA node hash function found. Contact maintainer\n"); |
| 416 | bad_srat(); |
| 417 | return -1; |
| 418 | } |
Andi Kleen | e58e0d0 | 2005-09-12 18:49:25 +0200 | [diff] [blame] | 419 | |
| 420 | /* Finally register nodes */ |
| 421 | for_each_node_mask(i, nodes_parsed) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 422 | setup_node_bootmem(i, nodes[i].start, nodes[i].end); |
Andi Kleen | a806223 | 2006-04-07 19:49:21 +0200 | [diff] [blame] | 423 | /* Try again in case setup_node_bootmem missed one due |
| 424 | to missing bootmem */ |
| 425 | for_each_node_mask(i, nodes_parsed) |
| 426 | if (!node_online(i)) |
| 427 | setup_node_bootmem(i, nodes[i].start, nodes[i].end); |
| 428 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 | for (i = 0; i < NR_CPUS; i++) { |
| 430 | if (cpu_to_node[i] == NUMA_NO_NODE) |
| 431 | continue; |
| 432 | if (!node_isset(cpu_to_node[i], nodes_parsed)) |
Andi Kleen | 69d81fc | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 433 | numa_set_node(i, NUMA_NO_NODE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | } |
| 435 | numa_init_array(); |
| 436 | return 0; |
| 437 | } |
| 438 | |
Andi Kleen | 68a3a7f | 2006-04-07 19:49:18 +0200 | [diff] [blame] | 439 | void __init srat_reserve_add_area(int nodeid) |
| 440 | { |
| 441 | if (found_add_area && nodes_add[nodeid].end) { |
| 442 | u64 total_mb; |
| 443 | |
| 444 | printk(KERN_INFO "SRAT: Reserving hot-add memory space " |
| 445 | "for node %d at %Lx-%Lx\n", |
| 446 | nodeid, nodes_add[nodeid].start, nodes_add[nodeid].end); |
| 447 | total_mb = (nodes_add[nodeid].end - nodes_add[nodeid].start) |
| 448 | >> PAGE_SHIFT; |
| 449 | total_mb *= sizeof(struct page); |
| 450 | total_mb >>= 20; |
| 451 | printk(KERN_INFO "SRAT: This will cost you %Lu MB of " |
| 452 | "pre-allocated memory.\n", (unsigned long long)total_mb); |
| 453 | reserve_bootmem_node(NODE_DATA(nodeid), nodes_add[nodeid].start, |
| 454 | nodes_add[nodeid].end - nodes_add[nodeid].start); |
| 455 | } |
| 456 | } |
| 457 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | int __node_distance(int a, int b) |
| 459 | { |
| 460 | int index; |
| 461 | |
| 462 | if (!acpi_slit) |
| 463 | return a == b ? 10 : 20; |
| 464 | index = acpi_slit->localities * node_to_pxm(a); |
| 465 | return acpi_slit->entry[index + node_to_pxm(b)]; |
| 466 | } |
| 467 | |
| 468 | EXPORT_SYMBOL(__node_distance); |
Keith Mannthey | 4942e99 | 2006-09-30 23:27:06 -0700 | [diff] [blame] | 469 | |
| 470 | int memory_add_physaddr_to_nid(u64 start) |
| 471 | { |
| 472 | int i, ret = 0; |
| 473 | |
| 474 | for_each_node(i) |
| 475 | if (nodes_add[i].start <= start && nodes_add[i].end > start) |
| 476 | ret = i; |
| 477 | |
| 478 | return ret; |
| 479 | } |
Keith Mannthey | 8c2676a | 2006-09-30 23:27:07 -0700 | [diff] [blame] | 480 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); |
| 481 | |