Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * pSeries NUMA support |
| 3 | * |
| 4 | * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ |
| 11 | #include <linux/threads.h> |
| 12 | #include <linux/bootmem.h> |
| 13 | #include <linux/init.h> |
| 14 | #include <linux/mm.h> |
| 15 | #include <linux/mmzone.h> |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/nodemask.h> |
| 18 | #include <linux/cpu.h> |
| 19 | #include <linux/notifier.h> |
Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 20 | #include <asm/sparsemem.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <asm/lmb.h> |
Paul Mackerras | cf00a8d | 2005-10-31 13:07:02 +1100 | [diff] [blame] | 22 | #include <asm/system.h> |
Paul Mackerras | 2249ca9 | 2005-11-07 13:18:13 +1100 | [diff] [blame] | 23 | #include <asm/smp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | |
| 25 | static int numa_enabled = 1; |
| 26 | |
| 27 | static int numa_debug; |
| 28 | #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); } |
| 29 | |
Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 30 | int numa_cpu_lookup_table[NR_CPUS]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | struct pglist_data *node_data[MAX_NUMNODES]; |
Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 33 | |
| 34 | EXPORT_SYMBOL(numa_cpu_lookup_table); |
| 35 | EXPORT_SYMBOL(numa_cpumask_lookup_table); |
| 36 | EXPORT_SYMBOL(node_data); |
| 37 | |
| 38 | static bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | static int min_common_depth; |
Mike Kravetz | 237a098 | 2005-12-05 12:06:42 -0800 | [diff] [blame] | 40 | static int n_mem_addr_cells, n_mem_size_cells; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | |
Nathan Lynch | 2e5ce39 | 2006-03-20 18:35:15 -0600 | [diff] [blame] | 42 | static void __cpuinit map_cpu_to_node(int cpu, int node) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | { |
| 44 | numa_cpu_lookup_table[cpu] = node; |
Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 45 | |
Nathan Lynch | bf4b85b | 2006-03-20 18:34:45 -0600 | [diff] [blame] | 46 | dbg("adding cpu %d to node %d\n", cpu, node); |
| 47 | |
Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 48 | if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node]))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | cpu_set(cpu, numa_cpumask_lookup_table[node]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | } |
| 51 | |
| 52 | #ifdef CONFIG_HOTPLUG_CPU |
| 53 | static void unmap_cpu_from_node(unsigned long cpu) |
| 54 | { |
| 55 | int node = numa_cpu_lookup_table[cpu]; |
| 56 | |
| 57 | dbg("removing cpu %lu from node %d\n", cpu, node); |
| 58 | |
| 59 | if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) { |
| 60 | cpu_clear(cpu, numa_cpumask_lookup_table[node]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | } else { |
| 62 | printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n", |
| 63 | cpu, node); |
| 64 | } |
| 65 | } |
| 66 | #endif /* CONFIG_HOTPLUG_CPU */ |
| 67 | |
Nathan Lynch | 2e5ce39 | 2006-03-20 18:35:15 -0600 | [diff] [blame] | 68 | static struct device_node * __cpuinit find_cpu_node(unsigned int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | { |
| 70 | unsigned int hw_cpuid = get_hard_smp_processor_id(cpu); |
| 71 | struct device_node *cpu_node = NULL; |
Jeremy Kerr | a7f67bd | 2006-07-12 15:35:54 +1000 | [diff] [blame] | 72 | const unsigned int *interrupt_server, *reg; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | int len; |
| 74 | |
| 75 | while ((cpu_node = of_find_node_by_type(cpu_node, "cpu")) != NULL) { |
| 76 | /* Try interrupt server first */ |
Stephen Rothwell | e2eb639 | 2007-04-03 22:26:41 +1000 | [diff] [blame] | 77 | interrupt_server = of_get_property(cpu_node, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | "ibm,ppc-interrupt-server#s", &len); |
| 79 | |
| 80 | len = len / sizeof(u32); |
| 81 | |
| 82 | if (interrupt_server && (len > 0)) { |
| 83 | while (len--) { |
| 84 | if (interrupt_server[len] == hw_cpuid) |
| 85 | return cpu_node; |
| 86 | } |
| 87 | } else { |
Stephen Rothwell | e2eb639 | 2007-04-03 22:26:41 +1000 | [diff] [blame] | 88 | reg = of_get_property(cpu_node, "reg", &len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | if (reg && (len > 0) && (reg[0] == hw_cpuid)) |
| 90 | return cpu_node; |
| 91 | } |
| 92 | } |
| 93 | |
| 94 | return NULL; |
| 95 | } |
| 96 | |
| 97 | /* must hold reference to node during call */ |
Jeremy Kerr | a7f67bd | 2006-07-12 15:35:54 +1000 | [diff] [blame] | 98 | static const int *of_get_associativity(struct device_node *dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | { |
Stephen Rothwell | e2eb639 | 2007-04-03 22:26:41 +1000 | [diff] [blame] | 100 | return of_get_property(dev, "ibm,associativity", NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | } |
| 102 | |
Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 103 | /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa |
| 104 | * info is found. |
| 105 | */ |
Jeremy Kerr | 953039c | 2006-05-01 12:16:12 -0700 | [diff] [blame] | 106 | static int of_node_to_nid_single(struct device_node *device) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | { |
Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 108 | int nid = -1; |
Jeremy Kerr | a7f67bd | 2006-07-12 15:35:54 +1000 | [diff] [blame] | 109 | const unsigned int *tmp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | |
| 111 | if (min_common_depth == -1) |
Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 112 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | |
| 114 | tmp = of_get_associativity(device); |
Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 115 | if (!tmp) |
| 116 | goto out; |
| 117 | |
| 118 | if (tmp[0] >= min_common_depth) |
Nathan Lynch | cf950b7 | 2006-03-20 18:35:45 -0600 | [diff] [blame] | 119 | nid = tmp[min_common_depth]; |
Nathan Lynch | bc16a75 | 2006-03-20 18:36:15 -0600 | [diff] [blame] | 120 | |
| 121 | /* POWER4 LPAR uses 0xffff as invalid node */ |
Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 122 | if (nid == 0xffff || nid >= MAX_NUMNODES) |
| 123 | nid = -1; |
| 124 | out: |
Nathan Lynch | cf950b7 | 2006-03-20 18:35:45 -0600 | [diff] [blame] | 125 | return nid; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | } |
| 127 | |
Jeremy Kerr | 953039c | 2006-05-01 12:16:12 -0700 | [diff] [blame] | 128 | /* Walk the device tree upwards, looking for an associativity id */ |
| 129 | int of_node_to_nid(struct device_node *device) |
| 130 | { |
| 131 | struct device_node *tmp; |
| 132 | int nid = -1; |
| 133 | |
| 134 | of_node_get(device); |
| 135 | while (device) { |
| 136 | nid = of_node_to_nid_single(device); |
| 137 | if (nid != -1) |
| 138 | break; |
| 139 | |
| 140 | tmp = device; |
| 141 | device = of_get_parent(tmp); |
| 142 | of_node_put(tmp); |
| 143 | } |
| 144 | of_node_put(device); |
| 145 | |
| 146 | return nid; |
| 147 | } |
| 148 | EXPORT_SYMBOL_GPL(of_node_to_nid); |
| 149 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | /* |
| 151 | * In theory, the "ibm,associativity" property may contain multiple |
| 152 | * associativity lists because a resource may be multiply connected |
| 153 | * into the machine. This resource then has different associativity |
| 154 | * characteristics relative to its multiple connections. We ignore |
| 155 | * this for now. We also assume that all cpu and memory sets have |
| 156 | * their distances represented at a common level. This won't be |
Uwe Kleine-König | 1b3c371 | 2007-02-17 19:23:03 +0100 | [diff] [blame] | 157 | * true for hierarchical NUMA. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | * |
| 159 | * In any case the ibm,associativity-reference-points should give |
| 160 | * the correct depth for a normal NUMA system. |
| 161 | * |
| 162 | * - Dave Hansen <haveblue@us.ibm.com> |
| 163 | */ |
| 164 | static int __init find_min_common_depth(void) |
| 165 | { |
| 166 | int depth; |
Jeremy Kerr | a7f67bd | 2006-07-12 15:35:54 +1000 | [diff] [blame] | 167 | const unsigned int *ref_points; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | struct device_node *rtas_root; |
| 169 | unsigned int len; |
| 170 | |
| 171 | rtas_root = of_find_node_by_path("/rtas"); |
| 172 | |
| 173 | if (!rtas_root) |
| 174 | return -1; |
| 175 | |
| 176 | /* |
| 177 | * this property is 2 32-bit integers, each representing a level of |
| 178 | * depth in the associativity nodes. The first is for an SMP |
| 179 | * configuration (should be all 0's) and the second is for a normal |
| 180 | * NUMA configuration. |
| 181 | */ |
Stephen Rothwell | e2eb639 | 2007-04-03 22:26:41 +1000 | [diff] [blame] | 182 | ref_points = of_get_property(rtas_root, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | "ibm,associativity-reference-points", &len); |
| 184 | |
| 185 | if ((len >= 1) && ref_points) { |
| 186 | depth = ref_points[1]; |
| 187 | } else { |
Nathan Lynch | bf4b85b | 2006-03-20 18:34:45 -0600 | [diff] [blame] | 188 | dbg("NUMA: ibm,associativity-reference-points not found.\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | depth = -1; |
| 190 | } |
| 191 | of_node_put(rtas_root); |
| 192 | |
| 193 | return depth; |
| 194 | } |
| 195 | |
Mike Kravetz | 84c9fdd | 2005-11-30 13:47:23 -0800 | [diff] [blame] | 196 | static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | { |
| 198 | struct device_node *memory = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | |
| 200 | memory = of_find_node_by_type(memory, "memory"); |
Paul Mackerras | 54c2331 | 2005-12-05 15:50:39 +1100 | [diff] [blame] | 201 | if (!memory) |
Mike Kravetz | 84c9fdd | 2005-11-30 13:47:23 -0800 | [diff] [blame] | 202 | panic("numa.c: No memory nodes found!"); |
Paul Mackerras | 54c2331 | 2005-12-05 15:50:39 +1100 | [diff] [blame] | 203 | |
Stephen Rothwell | a8bda5d | 2007-04-03 10:56:50 +1000 | [diff] [blame] | 204 | *n_addr_cells = of_n_addr_cells(memory); |
Stephen Rothwell | 9213fee | 2007-04-03 10:57:48 +1000 | [diff] [blame] | 205 | *n_size_cells = of_n_size_cells(memory); |
Mike Kravetz | 84c9fdd | 2005-11-30 13:47:23 -0800 | [diff] [blame] | 206 | of_node_put(memory); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | } |
| 208 | |
Jeremy Kerr | a7f67bd | 2006-07-12 15:35:54 +1000 | [diff] [blame] | 209 | static unsigned long __devinit read_n_cells(int n, const unsigned int **buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | { |
| 211 | unsigned long result = 0; |
| 212 | |
| 213 | while (n--) { |
| 214 | result = (result << 32) | **buf; |
| 215 | (*buf)++; |
| 216 | } |
| 217 | return result; |
| 218 | } |
| 219 | |
| 220 | /* |
| 221 | * Figure out to which domain a cpu belongs and stick it there. |
| 222 | * Return the id of the domain used. |
| 223 | */ |
Nathan Lynch | 2e5ce39 | 2006-03-20 18:35:15 -0600 | [diff] [blame] | 224 | static int __cpuinit numa_setup_cpu(unsigned long lcpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | { |
Nathan Lynch | cf950b7 | 2006-03-20 18:35:45 -0600 | [diff] [blame] | 226 | int nid = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | struct device_node *cpu = find_cpu_node(lcpu); |
| 228 | |
| 229 | if (!cpu) { |
| 230 | WARN_ON(1); |
| 231 | goto out; |
| 232 | } |
| 233 | |
Jeremy Kerr | 953039c | 2006-05-01 12:16:12 -0700 | [diff] [blame] | 234 | nid = of_node_to_nid_single(cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | |
Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 236 | if (nid < 0 || !node_online(nid)) |
| 237 | nid = any_online_node(NODE_MASK_ALL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | out: |
Nathan Lynch | cf950b7 | 2006-03-20 18:35:45 -0600 | [diff] [blame] | 239 | map_cpu_to_node(lcpu, nid); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | |
| 241 | of_node_put(cpu); |
| 242 | |
Nathan Lynch | cf950b7 | 2006-03-20 18:35:45 -0600 | [diff] [blame] | 243 | return nid; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | } |
| 245 | |
Chandra Seetharaman | 74b85f3 | 2006-06-27 02:54:09 -0700 | [diff] [blame] | 246 | static int __cpuinit cpu_numa_callback(struct notifier_block *nfb, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | unsigned long action, |
| 248 | void *hcpu) |
| 249 | { |
| 250 | unsigned long lcpu = (unsigned long)hcpu; |
| 251 | int ret = NOTIFY_DONE; |
| 252 | |
| 253 | switch (action) { |
| 254 | case CPU_UP_PREPARE: |
Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 255 | case CPU_UP_PREPARE_FROZEN: |
Nathan Lynch | 2b26122 | 2006-03-20 18:37:15 -0600 | [diff] [blame] | 256 | numa_setup_cpu(lcpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | ret = NOTIFY_OK; |
| 258 | break; |
| 259 | #ifdef CONFIG_HOTPLUG_CPU |
| 260 | case CPU_DEAD: |
Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 261 | case CPU_DEAD_FROZEN: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 | case CPU_UP_CANCELED: |
Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 263 | case CPU_UP_CANCELED_FROZEN: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | unmap_cpu_from_node(lcpu); |
| 265 | break; |
| 266 | ret = NOTIFY_OK; |
| 267 | #endif |
| 268 | } |
| 269 | return ret; |
| 270 | } |
| 271 | |
| 272 | /* |
| 273 | * Check and possibly modify a memory region to enforce the memory limit. |
| 274 | * |
| 275 | * Returns the size the region should have to enforce the memory limit. |
| 276 | * This will either be the original value of size, a truncated value, |
| 277 | * or zero. If the returned value of size is 0 the region should be |
| 278 | * discarded as it lies wholy above the memory limit. |
| 279 | */ |
Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 280 | static unsigned long __init numa_enforce_memory_limit(unsigned long start, |
| 281 | unsigned long size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | { |
| 283 | /* |
| 284 | * We use lmb_end_of_DRAM() in here instead of memory_limit because |
| 285 | * we've already adjusted it for the limit and it takes care of |
| 286 | * having memory holes below the limit. |
| 287 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | |
| 289 | if (! memory_limit) |
| 290 | return size; |
| 291 | |
| 292 | if (start + size <= lmb_end_of_DRAM()) |
| 293 | return size; |
| 294 | |
| 295 | if (start >= lmb_end_of_DRAM()) |
| 296 | return 0; |
| 297 | |
| 298 | return lmb_end_of_DRAM() - start; |
| 299 | } |
| 300 | |
Paul Mackerras | 0204568 | 2006-11-29 22:27:42 +1100 | [diff] [blame] | 301 | /* |
| 302 | * Extract NUMA information from the ibm,dynamic-reconfiguration-memory |
| 303 | * node. This assumes n_mem_{addr,size}_cells have been set. |
| 304 | */ |
| 305 | static void __init parse_drconf_memory(struct device_node *memory) |
| 306 | { |
| 307 | const unsigned int *lm, *dm, *aa; |
| 308 | unsigned int ls, ld, la; |
| 309 | unsigned int n, aam, aalen; |
| 310 | unsigned long lmb_size, size; |
| 311 | int nid, default_nid = 0; |
| 312 | unsigned int start, ai, flags; |
| 313 | |
Stephen Rothwell | e2eb639 | 2007-04-03 22:26:41 +1000 | [diff] [blame] | 314 | lm = of_get_property(memory, "ibm,lmb-size", &ls); |
| 315 | dm = of_get_property(memory, "ibm,dynamic-memory", &ld); |
| 316 | aa = of_get_property(memory, "ibm,associativity-lookup-arrays", &la); |
Paul Mackerras | 0204568 | 2006-11-29 22:27:42 +1100 | [diff] [blame] | 317 | if (!lm || !dm || !aa || |
| 318 | ls < sizeof(unsigned int) || ld < sizeof(unsigned int) || |
| 319 | la < 2 * sizeof(unsigned int)) |
| 320 | return; |
| 321 | |
| 322 | lmb_size = read_n_cells(n_mem_size_cells, &lm); |
| 323 | n = *dm++; /* number of LMBs */ |
| 324 | aam = *aa++; /* number of associativity lists */ |
| 325 | aalen = *aa++; /* length of each associativity list */ |
| 326 | if (ld < (n * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int) || |
| 327 | la < (aam * aalen + 2) * sizeof(unsigned int)) |
| 328 | return; |
| 329 | |
| 330 | for (; n != 0; --n) { |
| 331 | start = read_n_cells(n_mem_addr_cells, &dm); |
| 332 | ai = dm[2]; |
| 333 | flags = dm[3]; |
| 334 | dm += 4; |
| 335 | /* 0x80 == reserved, 0x8 = assigned to us */ |
| 336 | if ((flags & 0x80) || !(flags & 0x8)) |
| 337 | continue; |
| 338 | nid = default_nid; |
| 339 | /* flags & 0x40 means associativity index is invalid */ |
| 340 | if (min_common_depth > 0 && min_common_depth <= aalen && |
| 341 | (flags & 0x40) == 0 && ai < aam) { |
| 342 | /* this is like of_node_to_nid_single */ |
| 343 | nid = aa[ai * aalen + min_common_depth - 1]; |
| 344 | if (nid == 0xffff || nid >= MAX_NUMNODES) |
| 345 | nid = default_nid; |
| 346 | } |
| 347 | node_set_online(nid); |
| 348 | |
| 349 | size = numa_enforce_memory_limit(start, lmb_size); |
| 350 | if (!size) |
| 351 | continue; |
| 352 | |
| 353 | add_active_range(nid, start >> PAGE_SHIFT, |
| 354 | (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT)); |
| 355 | } |
| 356 | } |
| 357 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | static int __init parse_numa_properties(void) |
| 359 | { |
| 360 | struct device_node *cpu = NULL; |
| 361 | struct device_node *memory = NULL; |
Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 362 | int default_nid = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | unsigned long i; |
| 364 | |
| 365 | if (numa_enabled == 0) { |
| 366 | printk(KERN_WARNING "NUMA disabled by user\n"); |
| 367 | return -1; |
| 368 | } |
| 369 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | min_common_depth = find_min_common_depth(); |
| 371 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | if (min_common_depth < 0) |
| 373 | return min_common_depth; |
| 374 | |
Nathan Lynch | bf4b85b | 2006-03-20 18:34:45 -0600 | [diff] [blame] | 375 | dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth); |
| 376 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 377 | /* |
Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 378 | * Even though we connect cpus to numa domains later in SMP |
| 379 | * init, we need to know the node ids now. This is because |
| 380 | * each node to be onlined must have NODE_DATA etc backing it. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | */ |
Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 382 | for_each_present_cpu(i) { |
Nathan Lynch | cf950b7 | 2006-03-20 18:35:45 -0600 | [diff] [blame] | 383 | int nid; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 | |
| 385 | cpu = find_cpu_node(i); |
Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 386 | BUG_ON(!cpu); |
Jeremy Kerr | 953039c | 2006-05-01 12:16:12 -0700 | [diff] [blame] | 387 | nid = of_node_to_nid_single(cpu); |
Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 388 | of_node_put(cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | |
Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 390 | /* |
| 391 | * Don't fall back to default_nid yet -- we will plug |
| 392 | * cpus into nodes once the memory scan has discovered |
| 393 | * the topology. |
| 394 | */ |
| 395 | if (nid < 0) |
| 396 | continue; |
| 397 | node_set_online(nid); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 | } |
| 399 | |
Mike Kravetz | 237a098 | 2005-12-05 12:06:42 -0800 | [diff] [blame] | 400 | get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 401 | memory = NULL; |
| 402 | while ((memory = of_find_node_by_type(memory, "memory")) != NULL) { |
| 403 | unsigned long start; |
| 404 | unsigned long size; |
Nathan Lynch | cf950b7 | 2006-03-20 18:35:45 -0600 | [diff] [blame] | 405 | int nid; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | int ranges; |
Jeremy Kerr | a7f67bd | 2006-07-12 15:35:54 +1000 | [diff] [blame] | 407 | const unsigned int *memcell_buf; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | unsigned int len; |
| 409 | |
Stephen Rothwell | e2eb639 | 2007-04-03 22:26:41 +1000 | [diff] [blame] | 410 | memcell_buf = of_get_property(memory, |
Michael Ellerman | ba75948 | 2005-12-04 18:39:55 +1100 | [diff] [blame] | 411 | "linux,usable-memory", &len); |
| 412 | if (!memcell_buf || len <= 0) |
Stephen Rothwell | e2eb639 | 2007-04-03 22:26:41 +1000 | [diff] [blame] | 413 | memcell_buf = of_get_property(memory, "reg", &len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 414 | if (!memcell_buf || len <= 0) |
| 415 | continue; |
| 416 | |
Benjamin Herrenschmidt | cc5d018 | 2005-12-13 18:01:21 +1100 | [diff] [blame] | 417 | /* ranges in cell */ |
| 418 | ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 | new_range: |
| 420 | /* these are order-sensitive, and modify the buffer pointer */ |
Mike Kravetz | 237a098 | 2005-12-05 12:06:42 -0800 | [diff] [blame] | 421 | start = read_n_cells(n_mem_addr_cells, &memcell_buf); |
| 422 | size = read_n_cells(n_mem_size_cells, &memcell_buf); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 | |
Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 424 | /* |
| 425 | * Assumption: either all memory nodes or none will |
| 426 | * have associativity properties. If none, then |
| 427 | * everything goes to default_nid. |
| 428 | */ |
Jeremy Kerr | 953039c | 2006-05-01 12:16:12 -0700 | [diff] [blame] | 429 | nid = of_node_to_nid_single(memory); |
Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 430 | if (nid < 0) |
| 431 | nid = default_nid; |
| 432 | node_set_online(nid); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 433 | |
Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 434 | if (!(size = numa_enforce_memory_limit(start, size))) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 435 | if (--ranges) |
| 436 | goto new_range; |
| 437 | else |
| 438 | continue; |
| 439 | } |
| 440 | |
Mel Gorman | c67c3cb | 2006-09-27 01:49:49 -0700 | [diff] [blame] | 441 | add_active_range(nid, start >> PAGE_SHIFT, |
| 442 | (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 443 | |
| 444 | if (--ranges) |
| 445 | goto new_range; |
| 446 | } |
| 447 | |
Paul Mackerras | 0204568 | 2006-11-29 22:27:42 +1100 | [diff] [blame] | 448 | /* |
| 449 | * Now do the same thing for each LMB listed in the ibm,dynamic-memory |
| 450 | * property in the ibm,dynamic-reconfiguration-memory node. |
| 451 | */ |
| 452 | memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); |
| 453 | if (memory) |
| 454 | parse_drconf_memory(memory); |
| 455 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 456 | return 0; |
| 457 | } |
| 458 | |
| 459 | static void __init setup_nonnuma(void) |
| 460 | { |
| 461 | unsigned long top_of_ram = lmb_end_of_DRAM(); |
| 462 | unsigned long total_ram = lmb_phys_mem_size(); |
Mel Gorman | c67c3cb | 2006-09-27 01:49:49 -0700 | [diff] [blame] | 463 | unsigned long start_pfn, end_pfn; |
Paul Mackerras | fb6d73d | 2005-11-16 11:43:26 +1100 | [diff] [blame] | 464 | unsigned int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | |
Olof Johansson | e110b28 | 2006-04-12 15:25:01 -0500 | [diff] [blame] | 466 | printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 467 | top_of_ram, total_ram); |
Olof Johansson | e110b28 | 2006-04-12 15:25:01 -0500 | [diff] [blame] | 468 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 469 | (top_of_ram - total_ram) >> 20); |
| 470 | |
Mel Gorman | c67c3cb | 2006-09-27 01:49:49 -0700 | [diff] [blame] | 471 | for (i = 0; i < lmb.memory.cnt; ++i) { |
| 472 | start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; |
| 473 | end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); |
| 474 | add_active_range(0, start_pfn, end_pfn); |
| 475 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 476 | node_set_online(0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 | } |
| 478 | |
Anton Blanchard | 4b703a2 | 2005-12-13 06:56:47 +1100 | [diff] [blame] | 479 | void __init dump_numa_cpu_topology(void) |
| 480 | { |
| 481 | unsigned int node; |
| 482 | unsigned int cpu, count; |
| 483 | |
| 484 | if (min_common_depth == -1 || !numa_enabled) |
| 485 | return; |
| 486 | |
| 487 | for_each_online_node(node) { |
Olof Johansson | e110b28 | 2006-04-12 15:25:01 -0500 | [diff] [blame] | 488 | printk(KERN_DEBUG "Node %d CPUs:", node); |
Anton Blanchard | 4b703a2 | 2005-12-13 06:56:47 +1100 | [diff] [blame] | 489 | |
| 490 | count = 0; |
| 491 | /* |
| 492 | * If we used a CPU iterator here we would miss printing |
| 493 | * the holes in the cpumap. |
| 494 | */ |
| 495 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
| 496 | if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) { |
| 497 | if (count == 0) |
| 498 | printk(" %u", cpu); |
| 499 | ++count; |
| 500 | } else { |
| 501 | if (count > 1) |
| 502 | printk("-%u", cpu - 1); |
| 503 | count = 0; |
| 504 | } |
| 505 | } |
| 506 | |
| 507 | if (count > 1) |
| 508 | printk("-%u", NR_CPUS - 1); |
| 509 | printk("\n"); |
| 510 | } |
| 511 | } |
| 512 | |
| 513 | static void __init dump_numa_memory_topology(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 514 | { |
| 515 | unsigned int node; |
| 516 | unsigned int count; |
| 517 | |
| 518 | if (min_common_depth == -1 || !numa_enabled) |
| 519 | return; |
| 520 | |
| 521 | for_each_online_node(node) { |
| 522 | unsigned long i; |
| 523 | |
Olof Johansson | e110b28 | 2006-04-12 15:25:01 -0500 | [diff] [blame] | 524 | printk(KERN_DEBUG "Node %d Memory:", node); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 525 | |
| 526 | count = 0; |
| 527 | |
Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 528 | for (i = 0; i < lmb_end_of_DRAM(); |
| 529 | i += (1 << SECTION_SIZE_BITS)) { |
| 530 | if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 531 | if (count == 0) |
| 532 | printk(" 0x%lx", i); |
| 533 | ++count; |
| 534 | } else { |
| 535 | if (count > 0) |
| 536 | printk("-0x%lx", i); |
| 537 | count = 0; |
| 538 | } |
| 539 | } |
| 540 | |
| 541 | if (count > 0) |
| 542 | printk("-0x%lx", i); |
| 543 | printk("\n"); |
| 544 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | } |
| 546 | |
| 547 | /* |
| 548 | * Allocate some memory, satisfying the lmb or bootmem allocator where |
| 549 | * required. nid is the preferred node and end is the physical address of |
| 550 | * the highest address in the node. |
| 551 | * |
| 552 | * Returns the physical address of the memory. |
| 553 | */ |
Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 554 | static void __init *careful_allocation(int nid, unsigned long size, |
| 555 | unsigned long align, |
| 556 | unsigned long end_pfn) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 557 | { |
Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 558 | int new_nid; |
Michael Ellerman | d7a5b2f | 2006-01-25 21:31:28 +1300 | [diff] [blame] | 559 | unsigned long ret = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 560 | |
| 561 | /* retry over all memory */ |
| 562 | if (!ret) |
Michael Ellerman | d7a5b2f | 2006-01-25 21:31:28 +1300 | [diff] [blame] | 563 | ret = __lmb_alloc_base(size, align, lmb_end_of_DRAM()); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 564 | |
| 565 | if (!ret) |
| 566 | panic("numa.c: cannot allocate %lu bytes on node %d", |
| 567 | size, nid); |
| 568 | |
| 569 | /* |
| 570 | * If the memory came from a previously allocated node, we must |
| 571 | * retry with the bootmem allocator. |
| 572 | */ |
Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 573 | new_nid = early_pfn_to_nid(ret >> PAGE_SHIFT); |
| 574 | if (new_nid < nid) { |
| 575 | ret = (unsigned long)__alloc_bootmem_node(NODE_DATA(new_nid), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 576 | size, align, 0); |
| 577 | |
| 578 | if (!ret) |
| 579 | panic("numa.c: cannot allocate %lu bytes on node %d", |
Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 580 | size, new_nid); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 581 | |
Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 582 | ret = __pa(ret); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 583 | |
| 584 | dbg("alloc_bootmem %lx %lx\n", ret, size); |
| 585 | } |
| 586 | |
Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 587 | return (void *)ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 588 | } |
| 589 | |
Chandra Seetharaman | 74b85f3 | 2006-06-27 02:54:09 -0700 | [diff] [blame] | 590 | static struct notifier_block __cpuinitdata ppc64_numa_nb = { |
| 591 | .notifier_call = cpu_numa_callback, |
| 592 | .priority = 1 /* Must run before sched domains notifier. */ |
| 593 | }; |
| 594 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 595 | void __init do_init_bootmem(void) |
| 596 | { |
| 597 | int nid; |
Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 598 | unsigned int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 599 | |
| 600 | min_low_pfn = 0; |
| 601 | max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; |
| 602 | max_pfn = max_low_pfn; |
| 603 | |
| 604 | if (parse_numa_properties()) |
| 605 | setup_nonnuma(); |
| 606 | else |
Anton Blanchard | 4b703a2 | 2005-12-13 06:56:47 +1100 | [diff] [blame] | 607 | dump_numa_memory_topology(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 | |
| 609 | register_cpu_notifier(&ppc64_numa_nb); |
Nathan Lynch | 2b26122 | 2006-03-20 18:37:15 -0600 | [diff] [blame] | 610 | cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE, |
| 611 | (void *)(unsigned long)boot_cpuid); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 612 | |
| 613 | for_each_online_node(nid) { |
Mel Gorman | c67c3cb | 2006-09-27 01:49:49 -0700 | [diff] [blame] | 614 | unsigned long start_pfn, end_pfn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 615 | unsigned long bootmem_paddr; |
| 616 | unsigned long bootmap_pages; |
| 617 | |
Mel Gorman | c67c3cb | 2006-09-27 01:49:49 -0700 | [diff] [blame] | 618 | get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 619 | |
| 620 | /* Allocate the node structure node local if possible */ |
Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 621 | NODE_DATA(nid) = careful_allocation(nid, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 622 | sizeof(struct pglist_data), |
Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 623 | SMP_CACHE_BYTES, end_pfn); |
| 624 | NODE_DATA(nid) = __va(NODE_DATA(nid)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 625 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); |
| 626 | |
| 627 | dbg("node %d\n", nid); |
| 628 | dbg("NODE_DATA() = %p\n", NODE_DATA(nid)); |
| 629 | |
| 630 | NODE_DATA(nid)->bdata = &plat_node_bdata[nid]; |
Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 631 | NODE_DATA(nid)->node_start_pfn = start_pfn; |
| 632 | NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 633 | |
| 634 | if (NODE_DATA(nid)->node_spanned_pages == 0) |
| 635 | continue; |
| 636 | |
Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 637 | dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT); |
| 638 | dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 | |
Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 640 | bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); |
| 641 | bootmem_paddr = (unsigned long)careful_allocation(nid, |
| 642 | bootmap_pages << PAGE_SHIFT, |
| 643 | PAGE_SIZE, end_pfn); |
| 644 | memset(__va(bootmem_paddr), 0, bootmap_pages << PAGE_SHIFT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 645 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 646 | dbg("bootmap_paddr = %lx\n", bootmem_paddr); |
| 647 | |
| 648 | init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, |
Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 649 | start_pfn, end_pfn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 650 | |
Mel Gorman | c67c3cb | 2006-09-27 01:49:49 -0700 | [diff] [blame] | 651 | free_bootmem_with_active_regions(nid, end_pfn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 652 | |
Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 653 | /* Mark reserved regions on this node */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 654 | for (i = 0; i < lmb.reserved.cnt; i++) { |
Michael Ellerman | 180379d | 2005-08-03 20:21:26 +1000 | [diff] [blame] | 655 | unsigned long physbase = lmb.reserved.region[i].base; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 656 | unsigned long size = lmb.reserved.region[i].size; |
Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 657 | unsigned long start_paddr = start_pfn << PAGE_SHIFT; |
| 658 | unsigned long end_paddr = end_pfn << PAGE_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 659 | |
Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 660 | if (early_pfn_to_nid(physbase >> PAGE_SHIFT) != nid && |
| 661 | early_pfn_to_nid((physbase+size-1) >> PAGE_SHIFT) != nid) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | continue; |
| 663 | |
| 664 | if (physbase < end_paddr && |
| 665 | (physbase+size) > start_paddr) { |
| 666 | /* overlaps */ |
| 667 | if (physbase < start_paddr) { |
| 668 | size -= start_paddr - physbase; |
| 669 | physbase = start_paddr; |
| 670 | } |
| 671 | |
| 672 | if (size > end_paddr - physbase) |
| 673 | size = end_paddr - physbase; |
| 674 | |
| 675 | dbg("reserve_bootmem %lx %lx\n", physbase, |
| 676 | size); |
| 677 | reserve_bootmem_node(NODE_DATA(nid), physbase, |
| 678 | size); |
| 679 | } |
| 680 | } |
Bob Picco | 802f192 | 2005-09-03 15:54:26 -0700 | [diff] [blame] | 681 | |
Mel Gorman | c67c3cb | 2006-09-27 01:49:49 -0700 | [diff] [blame] | 682 | sparse_memory_present_with_active_regions(nid); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 683 | } |
| 684 | } |
| 685 | |
| 686 | void __init paging_init(void) |
| 687 | { |
Mel Gorman | 6391af1 | 2006-10-11 01:20:39 -0700 | [diff] [blame] | 688 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
| 689 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
| 690 | max_zone_pfns[ZONE_DMA] = lmb_end_of_DRAM() >> PAGE_SHIFT; |
Mel Gorman | c67c3cb | 2006-09-27 01:49:49 -0700 | [diff] [blame] | 691 | free_area_init_nodes(max_zone_pfns); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 692 | } |
| 693 | |
| 694 | static int __init early_numa(char *p) |
| 695 | { |
| 696 | if (!p) |
| 697 | return 0; |
| 698 | |
| 699 | if (strstr(p, "off")) |
| 700 | numa_enabled = 0; |
| 701 | |
| 702 | if (strstr(p, "debug")) |
| 703 | numa_debug = 1; |
| 704 | |
| 705 | return 0; |
| 706 | } |
| 707 | early_param("numa", early_numa); |
Mike Kravetz | 237a098 | 2005-12-05 12:06:42 -0800 | [diff] [blame] | 708 | |
| 709 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 710 | /* |
| 711 | * Find the node associated with a hot added memory section. Section |
| 712 | * corresponds to a SPARSEMEM section, not an LMB. It is assumed that |
| 713 | * sections are fully contained within a single LMB. |
| 714 | */ |
| 715 | int hot_add_scn_to_nid(unsigned long scn_addr) |
| 716 | { |
| 717 | struct device_node *memory = NULL; |
Mike Kravetz | b226e46 | 2005-12-16 14:30:35 -0800 | [diff] [blame] | 718 | nodemask_t nodes; |
Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 719 | int default_nid = any_online_node(NODE_MASK_ALL); |
Andrew Morton | 069007a | 2006-03-24 02:34:46 -0800 | [diff] [blame] | 720 | int nid; |
Mike Kravetz | 237a098 | 2005-12-05 12:06:42 -0800 | [diff] [blame] | 721 | |
| 722 | if (!numa_enabled || (min_common_depth < 0)) |
Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 723 | return default_nid; |
Mike Kravetz | 237a098 | 2005-12-05 12:06:42 -0800 | [diff] [blame] | 724 | |
| 725 | while ((memory = of_find_node_by_type(memory, "memory")) != NULL) { |
| 726 | unsigned long start, size; |
Mike Kravetz | b226e46 | 2005-12-16 14:30:35 -0800 | [diff] [blame] | 727 | int ranges; |
Jeremy Kerr | a7f67bd | 2006-07-12 15:35:54 +1000 | [diff] [blame] | 728 | const unsigned int *memcell_buf; |
Mike Kravetz | 237a098 | 2005-12-05 12:06:42 -0800 | [diff] [blame] | 729 | unsigned int len; |
| 730 | |
Stephen Rothwell | e2eb639 | 2007-04-03 22:26:41 +1000 | [diff] [blame] | 731 | memcell_buf = of_get_property(memory, "reg", &len); |
Mike Kravetz | 237a098 | 2005-12-05 12:06:42 -0800 | [diff] [blame] | 732 | if (!memcell_buf || len <= 0) |
| 733 | continue; |
| 734 | |
Benjamin Herrenschmidt | cc5d018 | 2005-12-13 18:01:21 +1100 | [diff] [blame] | 735 | /* ranges in cell */ |
| 736 | ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); |
Mike Kravetz | 237a098 | 2005-12-05 12:06:42 -0800 | [diff] [blame] | 737 | ha_new_range: |
| 738 | start = read_n_cells(n_mem_addr_cells, &memcell_buf); |
| 739 | size = read_n_cells(n_mem_size_cells, &memcell_buf); |
Jeremy Kerr | 953039c | 2006-05-01 12:16:12 -0700 | [diff] [blame] | 740 | nid = of_node_to_nid_single(memory); |
Mike Kravetz | 237a098 | 2005-12-05 12:06:42 -0800 | [diff] [blame] | 741 | |
| 742 | /* Domains not present at boot default to 0 */ |
Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 743 | if (nid < 0 || !node_online(nid)) |
| 744 | nid = default_nid; |
Mike Kravetz | 237a098 | 2005-12-05 12:06:42 -0800 | [diff] [blame] | 745 | |
| 746 | if ((scn_addr >= start) && (scn_addr < (start + size))) { |
| 747 | of_node_put(memory); |
Nathan Lynch | cf950b7 | 2006-03-20 18:35:45 -0600 | [diff] [blame] | 748 | goto got_nid; |
Mike Kravetz | 237a098 | 2005-12-05 12:06:42 -0800 | [diff] [blame] | 749 | } |
| 750 | |
| 751 | if (--ranges) /* process all ranges in cell */ |
| 752 | goto ha_new_range; |
| 753 | } |
Mike Kravetz | 237a098 | 2005-12-05 12:06:42 -0800 | [diff] [blame] | 754 | BUG(); /* section address should be found above */ |
Andrew Morton | 069007a | 2006-03-24 02:34:46 -0800 | [diff] [blame] | 755 | return 0; |
Mike Kravetz | b226e46 | 2005-12-16 14:30:35 -0800 | [diff] [blame] | 756 | |
| 757 | /* Temporary code to ensure that returned node is not empty */ |
Nathan Lynch | cf950b7 | 2006-03-20 18:35:45 -0600 | [diff] [blame] | 758 | got_nid: |
Mike Kravetz | b226e46 | 2005-12-16 14:30:35 -0800 | [diff] [blame] | 759 | nodes_setall(nodes); |
Nathan Lynch | cf950b7 | 2006-03-20 18:35:45 -0600 | [diff] [blame] | 760 | while (NODE_DATA(nid)->node_spanned_pages == 0) { |
| 761 | node_clear(nid, nodes); |
| 762 | nid = any_online_node(nodes); |
Mike Kravetz | b226e46 | 2005-12-16 14:30:35 -0800 | [diff] [blame] | 763 | } |
Nathan Lynch | cf950b7 | 2006-03-20 18:35:45 -0600 | [diff] [blame] | 764 | return nid; |
Mike Kravetz | 237a098 | 2005-12-05 12:06:42 -0800 | [diff] [blame] | 765 | } |
| 766 | #endif /* CONFIG_MEMORY_HOTPLUG */ |