| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * pSeries NUMA support | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM | 
|  | 5 | * | 
|  | 6 | * This program is free software; you can redistribute it and/or | 
|  | 7 | * modify it under the terms of the GNU General Public License | 
|  | 8 | * as published by the Free Software Foundation; either version | 
|  | 9 | * 2 of the License, or (at your option) any later version. | 
|  | 10 | */ | 
|  | 11 | #include <linux/threads.h> | 
|  | 12 | #include <linux/bootmem.h> | 
|  | 13 | #include <linux/init.h> | 
|  | 14 | #include <linux/mm.h> | 
|  | 15 | #include <linux/mmzone.h> | 
|  | 16 | #include <linux/module.h> | 
|  | 17 | #include <linux/nodemask.h> | 
|  | 18 | #include <linux/cpu.h> | 
|  | 19 | #include <linux/notifier.h> | 
| David S. Miller | d9b2b2a | 2008-02-13 16:56:49 -0800 | [diff] [blame] | 20 | #include <linux/lmb.h> | 
| Michael Ellerman | 6df1646 | 2008-02-14 11:37:49 +1100 | [diff] [blame] | 21 | #include <linux/of.h> | 
| Dave Hansen | 06eccea | 2009-02-12 12:36:04 +0000 | [diff] [blame] | 22 | #include <linux/pfn.h> | 
| Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 23 | #include <asm/sparsemem.h> | 
| David S. Miller | d9b2b2a | 2008-02-13 16:56:49 -0800 | [diff] [blame] | 24 | #include <asm/prom.h> | 
| Paul Mackerras | cf00a8d | 2005-10-31 13:07:02 +1100 | [diff] [blame] | 25 | #include <asm/system.h> | 
| Paul Mackerras | 2249ca9 | 2005-11-07 13:18:13 +1100 | [diff] [blame] | 26 | #include <asm/smp.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 |  | 
|  | 28 | static int numa_enabled = 1; | 
|  | 29 |  | 
| Balbir Singh | 1daa6d0 | 2008-02-01 15:57:31 +1100 | [diff] [blame] | 30 | static char *cmdline __initdata; | 
|  | 31 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | static int numa_debug; | 
|  | 33 | #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); } | 
|  | 34 |  | 
| Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 35 | int numa_cpu_lookup_table[NR_CPUS]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | struct pglist_data *node_data[MAX_NUMNODES]; | 
| Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 38 |  | 
|  | 39 | EXPORT_SYMBOL(numa_cpu_lookup_table); | 
|  | 40 | EXPORT_SYMBOL(numa_cpumask_lookup_table); | 
|  | 41 | EXPORT_SYMBOL(node_data); | 
|  | 42 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | static int min_common_depth; | 
| Mike Kravetz | 237a098 | 2005-12-05 12:06:42 -0800 | [diff] [blame] | 44 | static int n_mem_addr_cells, n_mem_size_cells; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 |  | 
| Balbir Singh | 1daa6d0 | 2008-02-01 15:57:31 +1100 | [diff] [blame] | 46 | static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn, | 
|  | 47 | unsigned int *nid) | 
|  | 48 | { | 
|  | 49 | unsigned long long mem; | 
|  | 50 | char *p = cmdline; | 
|  | 51 | static unsigned int fake_nid; | 
|  | 52 | static unsigned long long curr_boundary; | 
|  | 53 |  | 
|  | 54 | /* | 
|  | 55 | * Modify node id, iff we started creating NUMA nodes | 
|  | 56 | * We want to continue from where we left of the last time | 
|  | 57 | */ | 
|  | 58 | if (fake_nid) | 
|  | 59 | *nid = fake_nid; | 
|  | 60 | /* | 
|  | 61 | * In case there are no more arguments to parse, the | 
|  | 62 | * node_id should be the same as the last fake node id | 
|  | 63 | * (we've handled this above). | 
|  | 64 | */ | 
|  | 65 | if (!p) | 
|  | 66 | return 0; | 
|  | 67 |  | 
|  | 68 | mem = memparse(p, &p); | 
|  | 69 | if (!mem) | 
|  | 70 | return 0; | 
|  | 71 |  | 
|  | 72 | if (mem < curr_boundary) | 
|  | 73 | return 0; | 
|  | 74 |  | 
|  | 75 | curr_boundary = mem; | 
|  | 76 |  | 
|  | 77 | if ((end_pfn << PAGE_SHIFT) > mem) { | 
|  | 78 | /* | 
|  | 79 | * Skip commas and spaces | 
|  | 80 | */ | 
|  | 81 | while (*p == ',' || *p == ' ' || *p == '\t') | 
|  | 82 | p++; | 
|  | 83 |  | 
|  | 84 | cmdline = p; | 
|  | 85 | fake_nid++; | 
|  | 86 | *nid = fake_nid; | 
|  | 87 | dbg("created new fake_node with id %d\n", fake_nid); | 
|  | 88 | return 1; | 
|  | 89 | } | 
|  | 90 | return 0; | 
|  | 91 | } | 
|  | 92 |  | 
| Jon Tollefson | 8f64e1f | 2008-10-09 10:18:40 +0000 | [diff] [blame] | 93 | /* | 
|  | 94 | * get_active_region_work_fn - A helper function for get_node_active_region | 
|  | 95 | *	Returns datax set to the start_pfn and end_pfn if they contain | 
|  | 96 | *	the initial value of datax->start_pfn between them | 
|  | 97 | * @start_pfn: start page(inclusive) of region to check | 
|  | 98 | * @end_pfn: end page(exclusive) of region to check | 
|  | 99 | * @datax: comes in with ->start_pfn set to value to search for and | 
|  | 100 | *	goes out with active range if it contains it | 
|  | 101 | * Returns 1 if search value is in range else 0 | 
|  | 102 | */ | 
|  | 103 | static int __init get_active_region_work_fn(unsigned long start_pfn, | 
|  | 104 | unsigned long end_pfn, void *datax) | 
|  | 105 | { | 
|  | 106 | struct node_active_region *data; | 
|  | 107 | data = (struct node_active_region *)datax; | 
|  | 108 |  | 
|  | 109 | if (start_pfn <= data->start_pfn && end_pfn > data->start_pfn) { | 
|  | 110 | data->start_pfn = start_pfn; | 
|  | 111 | data->end_pfn = end_pfn; | 
|  | 112 | return 1; | 
|  | 113 | } | 
|  | 114 | return 0; | 
|  | 115 |  | 
|  | 116 | } | 
|  | 117 |  | 
|  | 118 | /* | 
|  | 119 | * get_node_active_region - Return active region containing start_pfn | 
| Jon Tollefson | e817037 | 2008-10-16 18:59:43 +0000 | [diff] [blame] | 120 | * Active range returned is empty if none found. | 
| Jon Tollefson | 8f64e1f | 2008-10-09 10:18:40 +0000 | [diff] [blame] | 121 | * @start_pfn: The page to return the region for. | 
|  | 122 | * @node_ar: Returned set to the active region containing start_pfn | 
|  | 123 | */ | 
|  | 124 | static void __init get_node_active_region(unsigned long start_pfn, | 
|  | 125 | struct node_active_region *node_ar) | 
|  | 126 | { | 
|  | 127 | int nid = early_pfn_to_nid(start_pfn); | 
|  | 128 |  | 
|  | 129 | node_ar->nid = nid; | 
|  | 130 | node_ar->start_pfn = start_pfn; | 
| Jon Tollefson | e817037 | 2008-10-16 18:59:43 +0000 | [diff] [blame] | 131 | node_ar->end_pfn = start_pfn; | 
| Jon Tollefson | 8f64e1f | 2008-10-09 10:18:40 +0000 | [diff] [blame] | 132 | work_with_active_regions(nid, get_active_region_work_fn, node_ar); | 
|  | 133 | } | 
|  | 134 |  | 
| Nathan Lynch | 2e5ce39 | 2006-03-20 18:35:15 -0600 | [diff] [blame] | 135 | static void __cpuinit map_cpu_to_node(int cpu, int node) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | { | 
|  | 137 | numa_cpu_lookup_table[cpu] = node; | 
| Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 138 |  | 
| Nathan Lynch | bf4b85b | 2006-03-20 18:34:45 -0600 | [diff] [blame] | 139 | dbg("adding cpu %d to node %d\n", cpu, node); | 
|  | 140 |  | 
| Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 141 | if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node]))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | cpu_set(cpu, numa_cpumask_lookup_table[node]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | } | 
|  | 144 |  | 
|  | 145 | #ifdef CONFIG_HOTPLUG_CPU | 
|  | 146 | static void unmap_cpu_from_node(unsigned long cpu) | 
|  | 147 | { | 
|  | 148 | int node = numa_cpu_lookup_table[cpu]; | 
|  | 149 |  | 
|  | 150 | dbg("removing cpu %lu from node %d\n", cpu, node); | 
|  | 151 |  | 
|  | 152 | if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) { | 
|  | 153 | cpu_clear(cpu, numa_cpumask_lookup_table[node]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | } else { | 
|  | 155 | printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n", | 
|  | 156 | cpu, node); | 
|  | 157 | } | 
|  | 158 | } | 
|  | 159 | #endif /* CONFIG_HOTPLUG_CPU */ | 
|  | 160 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | /* must hold reference to node during call */ | 
| Jeremy Kerr | a7f67bd | 2006-07-12 15:35:54 +1000 | [diff] [blame] | 162 | static const int *of_get_associativity(struct device_node *dev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | { | 
| Stephen Rothwell | e2eb639 | 2007-04-03 22:26:41 +1000 | [diff] [blame] | 164 | return of_get_property(dev, "ibm,associativity", NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | } | 
|  | 166 |  | 
| Chandru | cf00085 | 2008-08-30 00:28:16 +1000 | [diff] [blame] | 167 | /* | 
|  | 168 | * Returns the property linux,drconf-usable-memory if | 
|  | 169 | * it exists (the property exists only in kexec/kdump kernels, | 
|  | 170 | * added by kexec-tools) | 
|  | 171 | */ | 
|  | 172 | static const u32 *of_get_usable_memory(struct device_node *memory) | 
|  | 173 | { | 
|  | 174 | const u32 *prop; | 
|  | 175 | u32 len; | 
|  | 176 | prop = of_get_property(memory, "linux,drconf-usable-memory", &len); | 
|  | 177 | if (!prop || len < sizeof(unsigned int)) | 
|  | 178 | return 0; | 
|  | 179 | return prop; | 
|  | 180 | } | 
|  | 181 |  | 
| Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 182 | /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa | 
|  | 183 | * info is found. | 
|  | 184 | */ | 
| Jeremy Kerr | 953039c | 2006-05-01 12:16:12 -0700 | [diff] [blame] | 185 | static int of_node_to_nid_single(struct device_node *device) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | { | 
| Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 187 | int nid = -1; | 
| Jeremy Kerr | a7f67bd | 2006-07-12 15:35:54 +1000 | [diff] [blame] | 188 | const unsigned int *tmp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 |  | 
|  | 190 | if (min_common_depth == -1) | 
| Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 191 | goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 |  | 
|  | 193 | tmp = of_get_associativity(device); | 
| Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 194 | if (!tmp) | 
|  | 195 | goto out; | 
|  | 196 |  | 
|  | 197 | if (tmp[0] >= min_common_depth) | 
| Nathan Lynch | cf950b7 | 2006-03-20 18:35:45 -0600 | [diff] [blame] | 198 | nid = tmp[min_common_depth]; | 
| Nathan Lynch | bc16a75 | 2006-03-20 18:36:15 -0600 | [diff] [blame] | 199 |  | 
|  | 200 | /* POWER4 LPAR uses 0xffff as invalid node */ | 
| Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 201 | if (nid == 0xffff || nid >= MAX_NUMNODES) | 
|  | 202 | nid = -1; | 
|  | 203 | out: | 
| Nathan Lynch | cf950b7 | 2006-03-20 18:35:45 -0600 | [diff] [blame] | 204 | return nid; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | } | 
|  | 206 |  | 
| Jeremy Kerr | 953039c | 2006-05-01 12:16:12 -0700 | [diff] [blame] | 207 | /* Walk the device tree upwards, looking for an associativity id */ | 
|  | 208 | int of_node_to_nid(struct device_node *device) | 
|  | 209 | { | 
|  | 210 | struct device_node *tmp; | 
|  | 211 | int nid = -1; | 
|  | 212 |  | 
|  | 213 | of_node_get(device); | 
|  | 214 | while (device) { | 
|  | 215 | nid = of_node_to_nid_single(device); | 
|  | 216 | if (nid != -1) | 
|  | 217 | break; | 
|  | 218 |  | 
|  | 219 | tmp = device; | 
|  | 220 | device = of_get_parent(tmp); | 
|  | 221 | of_node_put(tmp); | 
|  | 222 | } | 
|  | 223 | of_node_put(device); | 
|  | 224 |  | 
|  | 225 | return nid; | 
|  | 226 | } | 
|  | 227 | EXPORT_SYMBOL_GPL(of_node_to_nid); | 
|  | 228 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | /* | 
|  | 230 | * In theory, the "ibm,associativity" property may contain multiple | 
|  | 231 | * associativity lists because a resource may be multiply connected | 
|  | 232 | * into the machine.  This resource then has different associativity | 
|  | 233 | * characteristics relative to its multiple connections.  We ignore | 
|  | 234 | * this for now.  We also assume that all cpu and memory sets have | 
|  | 235 | * their distances represented at a common level.  This won't be | 
| Uwe Kleine-König | 1b3c371 | 2007-02-17 19:23:03 +0100 | [diff] [blame] | 236 | * true for hierarchical NUMA. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | * | 
|  | 238 | * In any case the ibm,associativity-reference-points should give | 
|  | 239 | * the correct depth for a normal NUMA system. | 
|  | 240 | * | 
|  | 241 | * - Dave Hansen <haveblue@us.ibm.com> | 
|  | 242 | */ | 
|  | 243 | static int __init find_min_common_depth(void) | 
|  | 244 | { | 
| Anton Blanchard | 4b83c33 | 2010-04-07 15:33:44 +0000 | [diff] [blame] | 245 | int depth, index; | 
| Jeremy Kerr | a7f67bd | 2006-07-12 15:35:54 +1000 | [diff] [blame] | 246 | const unsigned int *ref_points; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | struct device_node *rtas_root; | 
|  | 248 | unsigned int len; | 
| Anton Blanchard | 4b83c33 | 2010-04-07 15:33:44 +0000 | [diff] [blame] | 249 | struct device_node *options; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 |  | 
|  | 251 | rtas_root = of_find_node_by_path("/rtas"); | 
|  | 252 |  | 
|  | 253 | if (!rtas_root) | 
|  | 254 | return -1; | 
|  | 255 |  | 
|  | 256 | /* | 
|  | 257 | * this property is 2 32-bit integers, each representing a level of | 
|  | 258 | * depth in the associativity nodes.  The first is for an SMP | 
|  | 259 | * configuration (should be all 0's) and the second is for a normal | 
|  | 260 | * NUMA configuration. | 
|  | 261 | */ | 
| Anton Blanchard | 4b83c33 | 2010-04-07 15:33:44 +0000 | [diff] [blame] | 262 | index = 1; | 
| Stephen Rothwell | e2eb639 | 2007-04-03 22:26:41 +1000 | [diff] [blame] | 263 | ref_points = of_get_property(rtas_root, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | "ibm,associativity-reference-points", &len); | 
|  | 265 |  | 
| Anton Blanchard | 4b83c33 | 2010-04-07 15:33:44 +0000 | [diff] [blame] | 266 | /* | 
|  | 267 | * For type 1 affinity information we want the first field | 
|  | 268 | */ | 
|  | 269 | options = of_find_node_by_path("/options"); | 
|  | 270 | if (options) { | 
|  | 271 | const char *str; | 
|  | 272 | str = of_get_property(options, "ibm,associativity-form", NULL); | 
|  | 273 | if (str && !strcmp(str, "1")) | 
|  | 274 | index = 0; | 
|  | 275 | } | 
|  | 276 |  | 
| Milton Miller | 20fcefe | 2009-01-08 02:19:43 +0000 | [diff] [blame] | 277 | if ((len >= 2 * sizeof(unsigned int)) && ref_points) { | 
| Anton Blanchard | 4b83c33 | 2010-04-07 15:33:44 +0000 | [diff] [blame] | 278 | depth = ref_points[index]; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | } else { | 
| Nathan Lynch | bf4b85b | 2006-03-20 18:34:45 -0600 | [diff] [blame] | 280 | dbg("NUMA: ibm,associativity-reference-points not found.\n"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | depth = -1; | 
|  | 282 | } | 
|  | 283 | of_node_put(rtas_root); | 
|  | 284 |  | 
|  | 285 | return depth; | 
|  | 286 | } | 
|  | 287 |  | 
| Mike Kravetz | 84c9fdd | 2005-11-30 13:47:23 -0800 | [diff] [blame] | 288 | static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | { | 
|  | 290 | struct device_node *memory = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 |  | 
|  | 292 | memory = of_find_node_by_type(memory, "memory"); | 
| Paul Mackerras | 54c2331 | 2005-12-05 15:50:39 +1100 | [diff] [blame] | 293 | if (!memory) | 
| Mike Kravetz | 84c9fdd | 2005-11-30 13:47:23 -0800 | [diff] [blame] | 294 | panic("numa.c: No memory nodes found!"); | 
| Paul Mackerras | 54c2331 | 2005-12-05 15:50:39 +1100 | [diff] [blame] | 295 |  | 
| Stephen Rothwell | a8bda5d | 2007-04-03 10:56:50 +1000 | [diff] [blame] | 296 | *n_addr_cells = of_n_addr_cells(memory); | 
| Stephen Rothwell | 9213fee | 2007-04-03 10:57:48 +1000 | [diff] [blame] | 297 | *n_size_cells = of_n_size_cells(memory); | 
| Mike Kravetz | 84c9fdd | 2005-11-30 13:47:23 -0800 | [diff] [blame] | 298 | of_node_put(memory); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | } | 
|  | 300 |  | 
| Jeremy Kerr | a7f67bd | 2006-07-12 15:35:54 +1000 | [diff] [blame] | 301 | static unsigned long __devinit read_n_cells(int n, const unsigned int **buf) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | { | 
|  | 303 | unsigned long result = 0; | 
|  | 304 |  | 
|  | 305 | while (n--) { | 
|  | 306 | result = (result << 32) | **buf; | 
|  | 307 | (*buf)++; | 
|  | 308 | } | 
|  | 309 | return result; | 
|  | 310 | } | 
|  | 311 |  | 
| Nathan Fontenot | 8342681 | 2008-07-03 13:35:54 +1000 | [diff] [blame] | 312 | struct of_drconf_cell { | 
|  | 313 | u64	base_addr; | 
|  | 314 | u32	drc_index; | 
|  | 315 | u32	reserved; | 
|  | 316 | u32	aa_index; | 
|  | 317 | u32	flags; | 
|  | 318 | }; | 
|  | 319 |  | 
|  | 320 | #define DRCONF_MEM_ASSIGNED	0x00000008 | 
|  | 321 | #define DRCONF_MEM_AI_INVALID	0x00000040 | 
|  | 322 | #define DRCONF_MEM_RESERVED	0x00000080 | 
|  | 323 |  | 
|  | 324 | /* | 
|  | 325 | * Read the next lmb list entry from the ibm,dynamic-memory property | 
|  | 326 | * and return the information in the provided of_drconf_cell structure. | 
|  | 327 | */ | 
|  | 328 | static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp) | 
|  | 329 | { | 
|  | 330 | const u32 *cp; | 
|  | 331 |  | 
|  | 332 | drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp); | 
|  | 333 |  | 
|  | 334 | cp = *cellp; | 
|  | 335 | drmem->drc_index = cp[0]; | 
|  | 336 | drmem->reserved = cp[1]; | 
|  | 337 | drmem->aa_index = cp[2]; | 
|  | 338 | drmem->flags = cp[3]; | 
|  | 339 |  | 
|  | 340 | *cellp = cp + 4; | 
|  | 341 | } | 
|  | 342 |  | 
|  | 343 | /* | 
|  | 344 | * Retreive and validate the ibm,dynamic-memory property of the device tree. | 
|  | 345 | * | 
|  | 346 | * The layout of the ibm,dynamic-memory property is a number N of lmb | 
|  | 347 | * list entries followed by N lmb list entries.  Each lmb list entry | 
|  | 348 | * contains information as layed out in the of_drconf_cell struct above. | 
|  | 349 | */ | 
|  | 350 | static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) | 
|  | 351 | { | 
|  | 352 | const u32 *prop; | 
|  | 353 | u32 len, entries; | 
|  | 354 |  | 
|  | 355 | prop = of_get_property(memory, "ibm,dynamic-memory", &len); | 
|  | 356 | if (!prop || len < sizeof(unsigned int)) | 
|  | 357 | return 0; | 
|  | 358 |  | 
|  | 359 | entries = *prop++; | 
|  | 360 |  | 
|  | 361 | /* Now that we know the number of entries, revalidate the size | 
|  | 362 | * of the property read in to ensure we have everything | 
|  | 363 | */ | 
|  | 364 | if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int)) | 
|  | 365 | return 0; | 
|  | 366 |  | 
|  | 367 | *dm = prop; | 
|  | 368 | return entries; | 
|  | 369 | } | 
|  | 370 |  | 
|  | 371 | /* | 
|  | 372 | * Retreive and validate the ibm,lmb-size property for drconf memory | 
|  | 373 | * from the device tree. | 
|  | 374 | */ | 
|  | 375 | static u64 of_get_lmb_size(struct device_node *memory) | 
|  | 376 | { | 
|  | 377 | const u32 *prop; | 
|  | 378 | u32 len; | 
|  | 379 |  | 
|  | 380 | prop = of_get_property(memory, "ibm,lmb-size", &len); | 
|  | 381 | if (!prop || len < sizeof(unsigned int)) | 
|  | 382 | return 0; | 
|  | 383 |  | 
|  | 384 | return read_n_cells(n_mem_size_cells, &prop); | 
|  | 385 | } | 
|  | 386 |  | 
|  | 387 | struct assoc_arrays { | 
|  | 388 | u32	n_arrays; | 
|  | 389 | u32	array_sz; | 
|  | 390 | const u32 *arrays; | 
|  | 391 | }; | 
|  | 392 |  | 
|  | 393 | /* | 
|  | 394 | * Retreive and validate the list of associativity arrays for drconf | 
|  | 395 | * memory from the ibm,associativity-lookup-arrays property of the | 
|  | 396 | * device tree.. | 
|  | 397 | * | 
|  | 398 | * The layout of the ibm,associativity-lookup-arrays property is a number N | 
|  | 399 | * indicating the number of associativity arrays, followed by a number M | 
|  | 400 | * indicating the size of each associativity array, followed by a list | 
|  | 401 | * of N associativity arrays. | 
|  | 402 | */ | 
|  | 403 | static int of_get_assoc_arrays(struct device_node *memory, | 
|  | 404 | struct assoc_arrays *aa) | 
|  | 405 | { | 
|  | 406 | const u32 *prop; | 
|  | 407 | u32 len; | 
|  | 408 |  | 
|  | 409 | prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len); | 
|  | 410 | if (!prop || len < 2 * sizeof(unsigned int)) | 
|  | 411 | return -1; | 
|  | 412 |  | 
|  | 413 | aa->n_arrays = *prop++; | 
|  | 414 | aa->array_sz = *prop++; | 
|  | 415 |  | 
|  | 416 | /* Now that we know the number of arrrays and size of each array, | 
|  | 417 | * revalidate the size of the property read in. | 
|  | 418 | */ | 
|  | 419 | if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int)) | 
|  | 420 | return -1; | 
|  | 421 |  | 
|  | 422 | aa->arrays = prop; | 
|  | 423 | return 0; | 
|  | 424 | } | 
|  | 425 |  | 
|  | 426 | /* | 
|  | 427 | * This is like of_node_to_nid_single() for memory represented in the | 
|  | 428 | * ibm,dynamic-reconfiguration-memory node. | 
|  | 429 | */ | 
|  | 430 | static int of_drconf_to_nid_single(struct of_drconf_cell *drmem, | 
|  | 431 | struct assoc_arrays *aa) | 
|  | 432 | { | 
|  | 433 | int default_nid = 0; | 
|  | 434 | int nid = default_nid; | 
|  | 435 | int index; | 
|  | 436 |  | 
|  | 437 | if (min_common_depth > 0 && min_common_depth <= aa->array_sz && | 
|  | 438 | !(drmem->flags & DRCONF_MEM_AI_INVALID) && | 
|  | 439 | drmem->aa_index < aa->n_arrays) { | 
|  | 440 | index = drmem->aa_index * aa->array_sz + min_common_depth - 1; | 
|  | 441 | nid = aa->arrays[index]; | 
|  | 442 |  | 
|  | 443 | if (nid == 0xffff || nid >= MAX_NUMNODES) | 
|  | 444 | nid = default_nid; | 
|  | 445 | } | 
|  | 446 |  | 
|  | 447 | return nid; | 
|  | 448 | } | 
|  | 449 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 | /* | 
|  | 451 | * Figure out to which domain a cpu belongs and stick it there. | 
|  | 452 | * Return the id of the domain used. | 
|  | 453 | */ | 
| Nathan Lynch | 2e5ce39 | 2006-03-20 18:35:15 -0600 | [diff] [blame] | 454 | static int __cpuinit numa_setup_cpu(unsigned long lcpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 455 | { | 
| Nathan Lynch | cf950b7 | 2006-03-20 18:35:45 -0600 | [diff] [blame] | 456 | int nid = 0; | 
| Milton Miller | 8b16cd2 | 2009-01-08 02:19:45 +0000 | [diff] [blame] | 457 | struct device_node *cpu = of_get_cpu_node(lcpu, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 |  | 
|  | 459 | if (!cpu) { | 
|  | 460 | WARN_ON(1); | 
|  | 461 | goto out; | 
|  | 462 | } | 
|  | 463 |  | 
| Jeremy Kerr | 953039c | 2006-05-01 12:16:12 -0700 | [diff] [blame] | 464 | nid = of_node_to_nid_single(cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 |  | 
| Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 466 | if (nid < 0 || !node_online(nid)) | 
| H Hartley Sweeten | 72c3368 | 2010-03-05 13:42:43 -0800 | [diff] [blame] | 467 | nid = first_online_node; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 468 | out: | 
| Nathan Lynch | cf950b7 | 2006-03-20 18:35:45 -0600 | [diff] [blame] | 469 | map_cpu_to_node(lcpu, nid); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 470 |  | 
|  | 471 | of_node_put(cpu); | 
|  | 472 |  | 
| Nathan Lynch | cf950b7 | 2006-03-20 18:35:45 -0600 | [diff] [blame] | 473 | return nid; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 | } | 
|  | 475 |  | 
| Chandra Seetharaman | 74b85f3 | 2006-06-27 02:54:09 -0700 | [diff] [blame] | 476 | static int __cpuinit cpu_numa_callback(struct notifier_block *nfb, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 | unsigned long action, | 
|  | 478 | void *hcpu) | 
|  | 479 | { | 
|  | 480 | unsigned long lcpu = (unsigned long)hcpu; | 
|  | 481 | int ret = NOTIFY_DONE; | 
|  | 482 |  | 
|  | 483 | switch (action) { | 
|  | 484 | case CPU_UP_PREPARE: | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 485 | case CPU_UP_PREPARE_FROZEN: | 
| Nathan Lynch | 2b26122 | 2006-03-20 18:37:15 -0600 | [diff] [blame] | 486 | numa_setup_cpu(lcpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 487 | ret = NOTIFY_OK; | 
|  | 488 | break; | 
|  | 489 | #ifdef CONFIG_HOTPLUG_CPU | 
|  | 490 | case CPU_DEAD: | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 491 | case CPU_DEAD_FROZEN: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 | case CPU_UP_CANCELED: | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 493 | case CPU_UP_CANCELED_FROZEN: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 494 | unmap_cpu_from_node(lcpu); | 
|  | 495 | break; | 
|  | 496 | ret = NOTIFY_OK; | 
|  | 497 | #endif | 
|  | 498 | } | 
|  | 499 | return ret; | 
|  | 500 | } | 
|  | 501 |  | 
|  | 502 | /* | 
|  | 503 | * Check and possibly modify a memory region to enforce the memory limit. | 
|  | 504 | * | 
|  | 505 | * Returns the size the region should have to enforce the memory limit. | 
|  | 506 | * This will either be the original value of size, a truncated value, | 
|  | 507 | * or zero. If the returned value of size is 0 the region should be | 
|  | 508 | * discarded as it lies wholy above the memory limit. | 
|  | 509 | */ | 
| Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 510 | static unsigned long __init numa_enforce_memory_limit(unsigned long start, | 
|  | 511 | unsigned long size) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 512 | { | 
|  | 513 | /* | 
|  | 514 | * We use lmb_end_of_DRAM() in here instead of memory_limit because | 
|  | 515 | * we've already adjusted it for the limit and it takes care of | 
| Milton Miller | fe55249 | 2008-10-20 15:37:04 +0000 | [diff] [blame] | 516 | * having memory holes below the limit.  Also, in the case of | 
|  | 517 | * iommu_is_off, memory_limit is not set but is implicitly enforced. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 518 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 519 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 | if (start + size <= lmb_end_of_DRAM()) | 
|  | 521 | return size; | 
|  | 522 |  | 
|  | 523 | if (start >= lmb_end_of_DRAM()) | 
|  | 524 | return 0; | 
|  | 525 |  | 
|  | 526 | return lmb_end_of_DRAM() - start; | 
|  | 527 | } | 
|  | 528 |  | 
| Paul Mackerras | 0204568 | 2006-11-29 22:27:42 +1100 | [diff] [blame] | 529 | /* | 
| Chandru | cf00085 | 2008-08-30 00:28:16 +1000 | [diff] [blame] | 530 | * Reads the counter for a given entry in | 
|  | 531 | * linux,drconf-usable-memory property | 
|  | 532 | */ | 
|  | 533 | static inline int __init read_usm_ranges(const u32 **usm) | 
|  | 534 | { | 
|  | 535 | /* | 
|  | 536 | * For each lmb in ibm,dynamic-memory a corresponding | 
|  | 537 | * entry in linux,drconf-usable-memory property contains | 
|  | 538 | * a counter followed by that many (base, size) duple. | 
|  | 539 | * read the counter from linux,drconf-usable-memory | 
|  | 540 | */ | 
|  | 541 | return read_n_cells(n_mem_size_cells, usm); | 
|  | 542 | } | 
|  | 543 |  | 
|  | 544 | /* | 
| Paul Mackerras | 0204568 | 2006-11-29 22:27:42 +1100 | [diff] [blame] | 545 | * Extract NUMA information from the ibm,dynamic-reconfiguration-memory | 
|  | 546 | * node.  This assumes n_mem_{addr,size}_cells have been set. | 
|  | 547 | */ | 
|  | 548 | static void __init parse_drconf_memory(struct device_node *memory) | 
|  | 549 | { | 
| Chandru | cf00085 | 2008-08-30 00:28:16 +1000 | [diff] [blame] | 550 | const u32 *dm, *usm; | 
|  | 551 | unsigned int n, rc, ranges, is_kexec_kdump = 0; | 
|  | 552 | unsigned long lmb_size, base, size, sz; | 
| Nathan Fontenot | 8342681 | 2008-07-03 13:35:54 +1000 | [diff] [blame] | 553 | int nid; | 
|  | 554 | struct assoc_arrays aa; | 
| Paul Mackerras | 0204568 | 2006-11-29 22:27:42 +1100 | [diff] [blame] | 555 |  | 
| Nathan Fontenot | 8342681 | 2008-07-03 13:35:54 +1000 | [diff] [blame] | 556 | n = of_get_drconf_memory(memory, &dm); | 
|  | 557 | if (!n) | 
| Paul Mackerras | 0204568 | 2006-11-29 22:27:42 +1100 | [diff] [blame] | 558 | return; | 
|  | 559 |  | 
| Nathan Fontenot | 8342681 | 2008-07-03 13:35:54 +1000 | [diff] [blame] | 560 | lmb_size = of_get_lmb_size(memory); | 
|  | 561 | if (!lmb_size) | 
|  | 562 | return; | 
|  | 563 |  | 
|  | 564 | rc = of_get_assoc_arrays(memory, &aa); | 
|  | 565 | if (rc) | 
| Paul Mackerras | 0204568 | 2006-11-29 22:27:42 +1100 | [diff] [blame] | 566 | return; | 
|  | 567 |  | 
| Chandru | cf00085 | 2008-08-30 00:28:16 +1000 | [diff] [blame] | 568 | /* check if this is a kexec/kdump kernel */ | 
|  | 569 | usm = of_get_usable_memory(memory); | 
|  | 570 | if (usm != NULL) | 
|  | 571 | is_kexec_kdump = 1; | 
|  | 572 |  | 
| Paul Mackerras | 0204568 | 2006-11-29 22:27:42 +1100 | [diff] [blame] | 573 | for (; n != 0; --n) { | 
| Nathan Fontenot | 8342681 | 2008-07-03 13:35:54 +1000 | [diff] [blame] | 574 | struct of_drconf_cell drmem; | 
| Balbir Singh | 1daa6d0 | 2008-02-01 15:57:31 +1100 | [diff] [blame] | 575 |  | 
| Nathan Fontenot | 8342681 | 2008-07-03 13:35:54 +1000 | [diff] [blame] | 576 | read_drconf_cell(&drmem, &dm); | 
|  | 577 |  | 
|  | 578 | /* skip this block if the reserved bit is set in flags (0x80) | 
|  | 579 | or if the block is not assigned to this partition (0x8) */ | 
|  | 580 | if ((drmem.flags & DRCONF_MEM_RESERVED) | 
|  | 581 | || !(drmem.flags & DRCONF_MEM_ASSIGNED)) | 
|  | 582 | continue; | 
|  | 583 |  | 
| Chandru | cf00085 | 2008-08-30 00:28:16 +1000 | [diff] [blame] | 584 | base = drmem.base_addr; | 
|  | 585 | size = lmb_size; | 
|  | 586 | ranges = 1; | 
| Nathan Fontenot | 8342681 | 2008-07-03 13:35:54 +1000 | [diff] [blame] | 587 |  | 
| Chandru | cf00085 | 2008-08-30 00:28:16 +1000 | [diff] [blame] | 588 | if (is_kexec_kdump) { | 
|  | 589 | ranges = read_usm_ranges(&usm); | 
|  | 590 | if (!ranges) /* there are no (base, size) duple */ | 
|  | 591 | continue; | 
|  | 592 | } | 
|  | 593 | do { | 
|  | 594 | if (is_kexec_kdump) { | 
|  | 595 | base = read_n_cells(n_mem_addr_cells, &usm); | 
|  | 596 | size = read_n_cells(n_mem_size_cells, &usm); | 
|  | 597 | } | 
|  | 598 | nid = of_drconf_to_nid_single(&drmem, &aa); | 
|  | 599 | fake_numa_create_new_node( | 
|  | 600 | ((base + size) >> PAGE_SHIFT), | 
| Nathan Fontenot | 8342681 | 2008-07-03 13:35:54 +1000 | [diff] [blame] | 601 | &nid); | 
| Chandru | cf00085 | 2008-08-30 00:28:16 +1000 | [diff] [blame] | 602 | node_set_online(nid); | 
|  | 603 | sz = numa_enforce_memory_limit(base, size); | 
|  | 604 | if (sz) | 
|  | 605 | add_active_range(nid, base >> PAGE_SHIFT, | 
|  | 606 | (base >> PAGE_SHIFT) | 
|  | 607 | + (sz >> PAGE_SHIFT)); | 
|  | 608 | } while (--ranges); | 
| Paul Mackerras | 0204568 | 2006-11-29 22:27:42 +1100 | [diff] [blame] | 609 | } | 
|  | 610 | } | 
|  | 611 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 612 | static int __init parse_numa_properties(void) | 
|  | 613 | { | 
|  | 614 | struct device_node *cpu = NULL; | 
|  | 615 | struct device_node *memory = NULL; | 
| Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 616 | int default_nid = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 617 | unsigned long i; | 
|  | 618 |  | 
|  | 619 | if (numa_enabled == 0) { | 
|  | 620 | printk(KERN_WARNING "NUMA disabled by user\n"); | 
|  | 621 | return -1; | 
|  | 622 | } | 
|  | 623 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 624 | min_common_depth = find_min_common_depth(); | 
|  | 625 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 626 | if (min_common_depth < 0) | 
|  | 627 | return min_common_depth; | 
|  | 628 |  | 
| Nathan Lynch | bf4b85b | 2006-03-20 18:34:45 -0600 | [diff] [blame] | 629 | dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth); | 
|  | 630 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 631 | /* | 
| Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 632 | * Even though we connect cpus to numa domains later in SMP | 
|  | 633 | * init, we need to know the node ids now. This is because | 
|  | 634 | * each node to be onlined must have NODE_DATA etc backing it. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 635 | */ | 
| Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 636 | for_each_present_cpu(i) { | 
| Nathan Lynch | cf950b7 | 2006-03-20 18:35:45 -0600 | [diff] [blame] | 637 | int nid; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 638 |  | 
| Milton Miller | 8b16cd2 | 2009-01-08 02:19:45 +0000 | [diff] [blame] | 639 | cpu = of_get_cpu_node(i, NULL); | 
| Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 640 | BUG_ON(!cpu); | 
| Jeremy Kerr | 953039c | 2006-05-01 12:16:12 -0700 | [diff] [blame] | 641 | nid = of_node_to_nid_single(cpu); | 
| Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 642 | of_node_put(cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 643 |  | 
| Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 644 | /* | 
|  | 645 | * Don't fall back to default_nid yet -- we will plug | 
|  | 646 | * cpus into nodes once the memory scan has discovered | 
|  | 647 | * the topology. | 
|  | 648 | */ | 
|  | 649 | if (nid < 0) | 
|  | 650 | continue; | 
|  | 651 | node_set_online(nid); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 652 | } | 
|  | 653 |  | 
| Mike Kravetz | 237a098 | 2005-12-05 12:06:42 -0800 | [diff] [blame] | 654 | get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 655 | memory = NULL; | 
|  | 656 | while ((memory = of_find_node_by_type(memory, "memory")) != NULL) { | 
|  | 657 | unsigned long start; | 
|  | 658 | unsigned long size; | 
| Nathan Lynch | cf950b7 | 2006-03-20 18:35:45 -0600 | [diff] [blame] | 659 | int nid; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 660 | int ranges; | 
| Jeremy Kerr | a7f67bd | 2006-07-12 15:35:54 +1000 | [diff] [blame] | 661 | const unsigned int *memcell_buf; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | unsigned int len; | 
|  | 663 |  | 
| Stephen Rothwell | e2eb639 | 2007-04-03 22:26:41 +1000 | [diff] [blame] | 664 | memcell_buf = of_get_property(memory, | 
| Michael Ellerman | ba75948 | 2005-12-04 18:39:55 +1100 | [diff] [blame] | 665 | "linux,usable-memory", &len); | 
|  | 666 | if (!memcell_buf || len <= 0) | 
| Stephen Rothwell | e2eb639 | 2007-04-03 22:26:41 +1000 | [diff] [blame] | 667 | memcell_buf = of_get_property(memory, "reg", &len); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 668 | if (!memcell_buf || len <= 0) | 
|  | 669 | continue; | 
|  | 670 |  | 
| Benjamin Herrenschmidt | cc5d018 | 2005-12-13 18:01:21 +1100 | [diff] [blame] | 671 | /* ranges in cell */ | 
|  | 672 | ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 673 | new_range: | 
|  | 674 | /* these are order-sensitive, and modify the buffer pointer */ | 
| Mike Kravetz | 237a098 | 2005-12-05 12:06:42 -0800 | [diff] [blame] | 675 | start = read_n_cells(n_mem_addr_cells, &memcell_buf); | 
|  | 676 | size = read_n_cells(n_mem_size_cells, &memcell_buf); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 677 |  | 
| Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 678 | /* | 
|  | 679 | * Assumption: either all memory nodes or none will | 
|  | 680 | * have associativity properties.  If none, then | 
|  | 681 | * everything goes to default_nid. | 
|  | 682 | */ | 
| Jeremy Kerr | 953039c | 2006-05-01 12:16:12 -0700 | [diff] [blame] | 683 | nid = of_node_to_nid_single(memory); | 
| Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 684 | if (nid < 0) | 
|  | 685 | nid = default_nid; | 
| Balbir Singh | 1daa6d0 | 2008-02-01 15:57:31 +1100 | [diff] [blame] | 686 |  | 
|  | 687 | fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid); | 
| Nathan Lynch | 482ec7c | 2006-03-20 18:36:45 -0600 | [diff] [blame] | 688 | node_set_online(nid); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 689 |  | 
| Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 690 | if (!(size = numa_enforce_memory_limit(start, size))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 691 | if (--ranges) | 
|  | 692 | goto new_range; | 
|  | 693 | else | 
|  | 694 | continue; | 
|  | 695 | } | 
|  | 696 |  | 
| Mel Gorman | c67c3cb | 2006-09-27 01:49:49 -0700 | [diff] [blame] | 697 | add_active_range(nid, start >> PAGE_SHIFT, | 
|  | 698 | (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 699 |  | 
|  | 700 | if (--ranges) | 
|  | 701 | goto new_range; | 
|  | 702 | } | 
|  | 703 |  | 
| Paul Mackerras | 0204568 | 2006-11-29 22:27:42 +1100 | [diff] [blame] | 704 | /* | 
|  | 705 | * Now do the same thing for each LMB listed in the ibm,dynamic-memory | 
|  | 706 | * property in the ibm,dynamic-reconfiguration-memory node. | 
|  | 707 | */ | 
|  | 708 | memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); | 
|  | 709 | if (memory) | 
|  | 710 | parse_drconf_memory(memory); | 
|  | 711 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 712 | return 0; | 
|  | 713 | } | 
|  | 714 |  | 
|  | 715 | static void __init setup_nonnuma(void) | 
|  | 716 | { | 
|  | 717 | unsigned long top_of_ram = lmb_end_of_DRAM(); | 
|  | 718 | unsigned long total_ram = lmb_phys_mem_size(); | 
| Mel Gorman | c67c3cb | 2006-09-27 01:49:49 -0700 | [diff] [blame] | 719 | unsigned long start_pfn, end_pfn; | 
| Balbir Singh | 1daa6d0 | 2008-02-01 15:57:31 +1100 | [diff] [blame] | 720 | unsigned int i, nid = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 721 |  | 
| Olof Johansson | e110b28 | 2006-04-12 15:25:01 -0500 | [diff] [blame] | 722 | printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 723 | top_of_ram, total_ram); | 
| Olof Johansson | e110b28 | 2006-04-12 15:25:01 -0500 | [diff] [blame] | 724 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 725 | (top_of_ram - total_ram) >> 20); | 
|  | 726 |  | 
| Mel Gorman | c67c3cb | 2006-09-27 01:49:49 -0700 | [diff] [blame] | 727 | for (i = 0; i < lmb.memory.cnt; ++i) { | 
|  | 728 | start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; | 
|  | 729 | end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); | 
| Balbir Singh | 1daa6d0 | 2008-02-01 15:57:31 +1100 | [diff] [blame] | 730 |  | 
|  | 731 | fake_numa_create_new_node(end_pfn, &nid); | 
|  | 732 | add_active_range(nid, start_pfn, end_pfn); | 
|  | 733 | node_set_online(nid); | 
| Mel Gorman | c67c3cb | 2006-09-27 01:49:49 -0700 | [diff] [blame] | 734 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 735 | } | 
|  | 736 |  | 
| Anton Blanchard | 4b703a2 | 2005-12-13 06:56:47 +1100 | [diff] [blame] | 737 | void __init dump_numa_cpu_topology(void) | 
|  | 738 | { | 
|  | 739 | unsigned int node; | 
|  | 740 | unsigned int cpu, count; | 
|  | 741 |  | 
|  | 742 | if (min_common_depth == -1 || !numa_enabled) | 
|  | 743 | return; | 
|  | 744 |  | 
|  | 745 | for_each_online_node(node) { | 
| Olof Johansson | e110b28 | 2006-04-12 15:25:01 -0500 | [diff] [blame] | 746 | printk(KERN_DEBUG "Node %d CPUs:", node); | 
| Anton Blanchard | 4b703a2 | 2005-12-13 06:56:47 +1100 | [diff] [blame] | 747 |  | 
|  | 748 | count = 0; | 
|  | 749 | /* | 
|  | 750 | * If we used a CPU iterator here we would miss printing | 
|  | 751 | * the holes in the cpumap. | 
|  | 752 | */ | 
|  | 753 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 
|  | 754 | if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) { | 
|  | 755 | if (count == 0) | 
|  | 756 | printk(" %u", cpu); | 
|  | 757 | ++count; | 
|  | 758 | } else { | 
|  | 759 | if (count > 1) | 
|  | 760 | printk("-%u", cpu - 1); | 
|  | 761 | count = 0; | 
|  | 762 | } | 
|  | 763 | } | 
|  | 764 |  | 
|  | 765 | if (count > 1) | 
|  | 766 | printk("-%u", NR_CPUS - 1); | 
|  | 767 | printk("\n"); | 
|  | 768 | } | 
|  | 769 | } | 
|  | 770 |  | 
|  | 771 | static void __init dump_numa_memory_topology(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 772 | { | 
|  | 773 | unsigned int node; | 
|  | 774 | unsigned int count; | 
|  | 775 |  | 
|  | 776 | if (min_common_depth == -1 || !numa_enabled) | 
|  | 777 | return; | 
|  | 778 |  | 
|  | 779 | for_each_online_node(node) { | 
|  | 780 | unsigned long i; | 
|  | 781 |  | 
| Olof Johansson | e110b28 | 2006-04-12 15:25:01 -0500 | [diff] [blame] | 782 | printk(KERN_DEBUG "Node %d Memory:", node); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 783 |  | 
|  | 784 | count = 0; | 
|  | 785 |  | 
| Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 786 | for (i = 0; i < lmb_end_of_DRAM(); | 
|  | 787 | i += (1 << SECTION_SIZE_BITS)) { | 
|  | 788 | if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 789 | if (count == 0) | 
|  | 790 | printk(" 0x%lx", i); | 
|  | 791 | ++count; | 
|  | 792 | } else { | 
|  | 793 | if (count > 0) | 
|  | 794 | printk("-0x%lx", i); | 
|  | 795 | count = 0; | 
|  | 796 | } | 
|  | 797 | } | 
|  | 798 |  | 
|  | 799 | if (count > 0) | 
|  | 800 | printk("-0x%lx", i); | 
|  | 801 | printk("\n"); | 
|  | 802 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 803 | } | 
|  | 804 |  | 
|  | 805 | /* | 
|  | 806 | * Allocate some memory, satisfying the lmb or bootmem allocator where | 
|  | 807 | * required. nid is the preferred node and end is the physical address of | 
|  | 808 | * the highest address in the node. | 
|  | 809 | * | 
| Dave Hansen | 0be210f | 2008-12-09 08:21:35 +0000 | [diff] [blame] | 810 | * Returns the virtual address of the memory. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 811 | */ | 
| Dave Hansen | 893473d | 2008-12-09 08:21:36 +0000 | [diff] [blame] | 812 | static void __init *careful_zallocation(int nid, unsigned long size, | 
| Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 813 | unsigned long align, | 
|  | 814 | unsigned long end_pfn) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 815 | { | 
| Dave Hansen | 0be210f | 2008-12-09 08:21:35 +0000 | [diff] [blame] | 816 | void *ret; | 
| Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 817 | int new_nid; | 
| Dave Hansen | 0be210f | 2008-12-09 08:21:35 +0000 | [diff] [blame] | 818 | unsigned long ret_paddr; | 
|  | 819 |  | 
|  | 820 | ret_paddr = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 821 |  | 
|  | 822 | /* retry over all memory */ | 
| Dave Hansen | 0be210f | 2008-12-09 08:21:35 +0000 | [diff] [blame] | 823 | if (!ret_paddr) | 
|  | 824 | ret_paddr = __lmb_alloc_base(size, align, lmb_end_of_DRAM()); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 825 |  | 
| Dave Hansen | 0be210f | 2008-12-09 08:21:35 +0000 | [diff] [blame] | 826 | if (!ret_paddr) | 
| Dave Hansen | 5d21ea2 | 2008-12-09 08:21:33 +0000 | [diff] [blame] | 827 | panic("numa.c: cannot allocate %lu bytes for node %d", | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 828 | size, nid); | 
|  | 829 |  | 
| Dave Hansen | 0be210f | 2008-12-09 08:21:35 +0000 | [diff] [blame] | 830 | ret = __va(ret_paddr); | 
|  | 831 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 832 | /* | 
| Dave Hansen | c555e52 | 2008-12-09 08:21:32 +0000 | [diff] [blame] | 833 | * We initialize the nodes in numeric order: 0, 1, 2... | 
|  | 834 | * and hand over control from the LMB allocator to the | 
|  | 835 | * bootmem allocator.  If this function is called for | 
|  | 836 | * node 5, then we know that all nodes <5 are using the | 
|  | 837 | * bootmem allocator instead of the LMB allocator. | 
|  | 838 | * | 
|  | 839 | * So, check the nid from which this allocation came | 
|  | 840 | * and double check to see if we need to use bootmem | 
|  | 841 | * instead of the LMB.  We don't free the LMB memory | 
|  | 842 | * since it would be useless. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 843 | */ | 
| Dave Hansen | 0be210f | 2008-12-09 08:21:35 +0000 | [diff] [blame] | 844 | new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT); | 
| Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 845 | if (new_nid < nid) { | 
| Dave Hansen | 0be210f | 2008-12-09 08:21:35 +0000 | [diff] [blame] | 846 | ret = __alloc_bootmem_node(NODE_DATA(new_nid), | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 847 | size, align, 0); | 
|  | 848 |  | 
| Dave Hansen | 0be210f | 2008-12-09 08:21:35 +0000 | [diff] [blame] | 849 | dbg("alloc_bootmem %p %lx\n", ret, size); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 850 | } | 
|  | 851 |  | 
| Dave Hansen | 893473d | 2008-12-09 08:21:36 +0000 | [diff] [blame] | 852 | memset(ret, 0, size); | 
| Dave Hansen | 0be210f | 2008-12-09 08:21:35 +0000 | [diff] [blame] | 853 | return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 854 | } | 
|  | 855 |  | 
| Chandra Seetharaman | 74b85f3 | 2006-06-27 02:54:09 -0700 | [diff] [blame] | 856 | static struct notifier_block __cpuinitdata ppc64_numa_nb = { | 
|  | 857 | .notifier_call = cpu_numa_callback, | 
|  | 858 | .priority = 1 /* Must run before sched domains notifier. */ | 
|  | 859 | }; | 
|  | 860 |  | 
| Dave Hansen | 4a61866 | 2008-11-24 12:02:35 +0000 | [diff] [blame] | 861 | static void mark_reserved_regions_for_nid(int nid) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 862 | { | 
| Dave Hansen | 4a61866 | 2008-11-24 12:02:35 +0000 | [diff] [blame] | 863 | struct pglist_data *node = NODE_DATA(nid); | 
|  | 864 | int i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 865 |  | 
| Jon Tollefson | 8f64e1f | 2008-10-09 10:18:40 +0000 | [diff] [blame] | 866 | for (i = 0; i < lmb.reserved.cnt; i++) { | 
|  | 867 | unsigned long physbase = lmb.reserved.region[i].base; | 
|  | 868 | unsigned long size = lmb.reserved.region[i].size; | 
|  | 869 | unsigned long start_pfn = physbase >> PAGE_SHIFT; | 
| Dave Hansen | 06eccea | 2009-02-12 12:36:04 +0000 | [diff] [blame] | 870 | unsigned long end_pfn = PFN_UP(physbase + size); | 
| Jon Tollefson | 8f64e1f | 2008-10-09 10:18:40 +0000 | [diff] [blame] | 871 | struct node_active_region node_ar; | 
| Dave Hansen | 4a61866 | 2008-11-24 12:02:35 +0000 | [diff] [blame] | 872 | unsigned long node_end_pfn = node->node_start_pfn + | 
|  | 873 | node->node_spanned_pages; | 
|  | 874 |  | 
|  | 875 | /* | 
|  | 876 | * Check to make sure that this lmb.reserved area is | 
|  | 877 | * within the bounds of the node that we care about. | 
|  | 878 | * Checking the nid of the start and end points is not | 
|  | 879 | * sufficient because the reserved area could span the | 
|  | 880 | * entire node. | 
|  | 881 | */ | 
|  | 882 | if (end_pfn <= node->node_start_pfn || | 
|  | 883 | start_pfn >= node_end_pfn) | 
|  | 884 | continue; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 885 |  | 
| Jon Tollefson | 8f64e1f | 2008-10-09 10:18:40 +0000 | [diff] [blame] | 886 | get_node_active_region(start_pfn, &node_ar); | 
| Jon Tollefson | e817037 | 2008-10-16 18:59:43 +0000 | [diff] [blame] | 887 | while (start_pfn < end_pfn && | 
|  | 888 | node_ar.start_pfn < node_ar.end_pfn) { | 
|  | 889 | unsigned long reserve_size = size; | 
| Jon Tollefson | 8f64e1f | 2008-10-09 10:18:40 +0000 | [diff] [blame] | 890 | /* | 
|  | 891 | * if reserved region extends past active region | 
|  | 892 | * then trim size to active region | 
|  | 893 | */ | 
|  | 894 | if (end_pfn > node_ar.end_pfn) | 
| Jon Tollefson | e817037 | 2008-10-16 18:59:43 +0000 | [diff] [blame] | 895 | reserve_size = (node_ar.end_pfn << PAGE_SHIFT) | 
| Dave Hansen | 06eccea | 2009-02-12 12:36:04 +0000 | [diff] [blame] | 896 | - physbase; | 
| Dave Hansen | a4c74dd | 2008-12-11 08:36:06 +0000 | [diff] [blame] | 897 | /* | 
|  | 898 | * Only worry about *this* node, others may not | 
|  | 899 | * yet have valid NODE_DATA(). | 
|  | 900 | */ | 
|  | 901 | if (node_ar.nid == nid) { | 
|  | 902 | dbg("reserve_bootmem %lx %lx nid=%d\n", | 
|  | 903 | physbase, reserve_size, node_ar.nid); | 
|  | 904 | reserve_bootmem_node(NODE_DATA(node_ar.nid), | 
|  | 905 | physbase, reserve_size, | 
|  | 906 | BOOTMEM_DEFAULT); | 
|  | 907 | } | 
| Jon Tollefson | 8f64e1f | 2008-10-09 10:18:40 +0000 | [diff] [blame] | 908 | /* | 
|  | 909 | * if reserved region is contained in the active region | 
|  | 910 | * then done. | 
|  | 911 | */ | 
|  | 912 | if (end_pfn <= node_ar.end_pfn) | 
|  | 913 | break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 914 |  | 
| Jon Tollefson | 8f64e1f | 2008-10-09 10:18:40 +0000 | [diff] [blame] | 915 | /* | 
|  | 916 | * reserved region extends past the active region | 
|  | 917 | *   get next active region that contains this | 
|  | 918 | *   reserved region | 
|  | 919 | */ | 
|  | 920 | start_pfn = node_ar.end_pfn; | 
|  | 921 | physbase = start_pfn << PAGE_SHIFT; | 
| Jon Tollefson | e817037 | 2008-10-16 18:59:43 +0000 | [diff] [blame] | 922 | size = size - reserve_size; | 
| Jon Tollefson | 8f64e1f | 2008-10-09 10:18:40 +0000 | [diff] [blame] | 923 | get_node_active_region(start_pfn, &node_ar); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 924 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 925 | } | 
| Dave Hansen | 4a61866 | 2008-11-24 12:02:35 +0000 | [diff] [blame] | 926 | } | 
| Jon Tollefson | 8f64e1f | 2008-10-09 10:18:40 +0000 | [diff] [blame] | 927 |  | 
| Dave Hansen | 4a61866 | 2008-11-24 12:02:35 +0000 | [diff] [blame] | 928 |  | 
|  | 929 | void __init do_init_bootmem(void) | 
|  | 930 | { | 
|  | 931 | int nid; | 
| Dave Hansen | 4a61866 | 2008-11-24 12:02:35 +0000 | [diff] [blame] | 932 |  | 
|  | 933 | min_low_pfn = 0; | 
|  | 934 | max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; | 
|  | 935 | max_pfn = max_low_pfn; | 
|  | 936 |  | 
|  | 937 | if (parse_numa_properties()) | 
|  | 938 | setup_nonnuma(); | 
|  | 939 | else | 
|  | 940 | dump_numa_memory_topology(); | 
|  | 941 |  | 
|  | 942 | register_cpu_notifier(&ppc64_numa_nb); | 
|  | 943 | cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE, | 
|  | 944 | (void *)(unsigned long)boot_cpuid); | 
|  | 945 |  | 
|  | 946 | for_each_online_node(nid) { | 
|  | 947 | unsigned long start_pfn, end_pfn; | 
| Dave Hansen | 0be210f | 2008-12-09 08:21:35 +0000 | [diff] [blame] | 948 | void *bootmem_vaddr; | 
| Dave Hansen | 4a61866 | 2008-11-24 12:02:35 +0000 | [diff] [blame] | 949 | unsigned long bootmap_pages; | 
|  | 950 |  | 
|  | 951 | get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); | 
|  | 952 |  | 
|  | 953 | /* | 
|  | 954 | * Allocate the node structure node local if possible | 
|  | 955 | * | 
|  | 956 | * Be careful moving this around, as it relies on all | 
|  | 957 | * previous nodes' bootmem to be initialized and have | 
|  | 958 | * all reserved areas marked. | 
|  | 959 | */ | 
| Dave Hansen | 893473d | 2008-12-09 08:21:36 +0000 | [diff] [blame] | 960 | NODE_DATA(nid) = careful_zallocation(nid, | 
| Dave Hansen | 4a61866 | 2008-11-24 12:02:35 +0000 | [diff] [blame] | 961 | sizeof(struct pglist_data), | 
|  | 962 | SMP_CACHE_BYTES, end_pfn); | 
| Dave Hansen | 4a61866 | 2008-11-24 12:02:35 +0000 | [diff] [blame] | 963 |  | 
|  | 964 | dbg("node %d\n", nid); | 
|  | 965 | dbg("NODE_DATA() = %p\n", NODE_DATA(nid)); | 
|  | 966 |  | 
|  | 967 | NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; | 
|  | 968 | NODE_DATA(nid)->node_start_pfn = start_pfn; | 
|  | 969 | NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; | 
|  | 970 |  | 
|  | 971 | if (NODE_DATA(nid)->node_spanned_pages == 0) | 
|  | 972 | continue; | 
|  | 973 |  | 
|  | 974 | dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT); | 
|  | 975 | dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT); | 
|  | 976 |  | 
|  | 977 | bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); | 
| Dave Hansen | 893473d | 2008-12-09 08:21:36 +0000 | [diff] [blame] | 978 | bootmem_vaddr = careful_zallocation(nid, | 
| Dave Hansen | 4a61866 | 2008-11-24 12:02:35 +0000 | [diff] [blame] | 979 | bootmap_pages << PAGE_SHIFT, | 
|  | 980 | PAGE_SIZE, end_pfn); | 
| Dave Hansen | 4a61866 | 2008-11-24 12:02:35 +0000 | [diff] [blame] | 981 |  | 
| Dave Hansen | 0be210f | 2008-12-09 08:21:35 +0000 | [diff] [blame] | 982 | dbg("bootmap_vaddr = %p\n", bootmem_vaddr); | 
| Dave Hansen | 4a61866 | 2008-11-24 12:02:35 +0000 | [diff] [blame] | 983 |  | 
| Dave Hansen | 0be210f | 2008-12-09 08:21:35 +0000 | [diff] [blame] | 984 | init_bootmem_node(NODE_DATA(nid), | 
|  | 985 | __pa(bootmem_vaddr) >> PAGE_SHIFT, | 
| Dave Hansen | 4a61866 | 2008-11-24 12:02:35 +0000 | [diff] [blame] | 986 | start_pfn, end_pfn); | 
|  | 987 |  | 
|  | 988 | free_bootmem_with_active_regions(nid, end_pfn); | 
|  | 989 | /* | 
|  | 990 | * Be very careful about moving this around.  Future | 
| Dave Hansen | 893473d | 2008-12-09 08:21:36 +0000 | [diff] [blame] | 991 | * calls to careful_zallocation() depend on this getting | 
| Dave Hansen | 4a61866 | 2008-11-24 12:02:35 +0000 | [diff] [blame] | 992 | * done correctly. | 
|  | 993 | */ | 
|  | 994 | mark_reserved_regions_for_nid(nid); | 
| Jon Tollefson | 8f64e1f | 2008-10-09 10:18:40 +0000 | [diff] [blame] | 995 | sparse_memory_present_with_active_regions(nid); | 
| Dave Hansen | 4a61866 | 2008-11-24 12:02:35 +0000 | [diff] [blame] | 996 | } | 
| Benjamin Herrenschmidt | d3f6204 | 2009-06-02 21:16:38 +0000 | [diff] [blame] | 997 |  | 
|  | 998 | init_bootmem_done = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 999 | } | 
|  | 1000 |  | 
|  | 1001 | void __init paging_init(void) | 
|  | 1002 | { | 
| Mel Gorman | 6391af1 | 2006-10-11 01:20:39 -0700 | [diff] [blame] | 1003 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | 
|  | 1004 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | 
|  | 1005 | max_zone_pfns[ZONE_DMA] = lmb_end_of_DRAM() >> PAGE_SHIFT; | 
| Mel Gorman | c67c3cb | 2006-09-27 01:49:49 -0700 | [diff] [blame] | 1006 | free_area_init_nodes(max_zone_pfns); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1007 | } | 
|  | 1008 |  | 
|  | 1009 | static int __init early_numa(char *p) | 
|  | 1010 | { | 
|  | 1011 | if (!p) | 
|  | 1012 | return 0; | 
|  | 1013 |  | 
|  | 1014 | if (strstr(p, "off")) | 
|  | 1015 | numa_enabled = 0; | 
|  | 1016 |  | 
|  | 1017 | if (strstr(p, "debug")) | 
|  | 1018 | numa_debug = 1; | 
|  | 1019 |  | 
| Balbir Singh | 1daa6d0 | 2008-02-01 15:57:31 +1100 | [diff] [blame] | 1020 | p = strstr(p, "fake="); | 
|  | 1021 | if (p) | 
|  | 1022 | cmdline = p + strlen("fake="); | 
|  | 1023 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1024 | return 0; | 
|  | 1025 | } | 
|  | 1026 | early_param("numa", early_numa); | 
| Mike Kravetz | 237a098 | 2005-12-05 12:06:42 -0800 | [diff] [blame] | 1027 |  | 
|  | 1028 | #ifdef CONFIG_MEMORY_HOTPLUG | 
|  | 1029 | /* | 
| Nathan Fontenot | 0f16ef7 | 2009-02-17 08:08:30 +0000 | [diff] [blame] | 1030 | * Find the node associated with a hot added memory section for | 
|  | 1031 | * memory represented in the device tree by the property | 
|  | 1032 | * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory. | 
| Nathan Fontenot | 0db9360 | 2008-07-03 13:25:08 +1000 | [diff] [blame] | 1033 | */ | 
|  | 1034 | static int hot_add_drconf_scn_to_nid(struct device_node *memory, | 
|  | 1035 | unsigned long scn_addr) | 
|  | 1036 | { | 
|  | 1037 | const u32 *dm; | 
| Nathan Fontenot | 0f16ef7 | 2009-02-17 08:08:30 +0000 | [diff] [blame] | 1038 | unsigned int drconf_cell_cnt, rc; | 
| Nathan Fontenot | 0db9360 | 2008-07-03 13:25:08 +1000 | [diff] [blame] | 1039 | unsigned long lmb_size; | 
| Nathan Fontenot | 0db9360 | 2008-07-03 13:25:08 +1000 | [diff] [blame] | 1040 | struct assoc_arrays aa; | 
| Nathan Fontenot | 0f16ef7 | 2009-02-17 08:08:30 +0000 | [diff] [blame] | 1041 | int nid = -1; | 
| Nathan Fontenot | 0db9360 | 2008-07-03 13:25:08 +1000 | [diff] [blame] | 1042 |  | 
| Nathan Fontenot | 0f16ef7 | 2009-02-17 08:08:30 +0000 | [diff] [blame] | 1043 | drconf_cell_cnt = of_get_drconf_memory(memory, &dm); | 
|  | 1044 | if (!drconf_cell_cnt) | 
|  | 1045 | return -1; | 
| Nathan Fontenot | 0db9360 | 2008-07-03 13:25:08 +1000 | [diff] [blame] | 1046 |  | 
|  | 1047 | lmb_size = of_get_lmb_size(memory); | 
|  | 1048 | if (!lmb_size) | 
| Nathan Fontenot | 0f16ef7 | 2009-02-17 08:08:30 +0000 | [diff] [blame] | 1049 | return -1; | 
| Nathan Fontenot | 0db9360 | 2008-07-03 13:25:08 +1000 | [diff] [blame] | 1050 |  | 
|  | 1051 | rc = of_get_assoc_arrays(memory, &aa); | 
|  | 1052 | if (rc) | 
| Nathan Fontenot | 0f16ef7 | 2009-02-17 08:08:30 +0000 | [diff] [blame] | 1053 | return -1; | 
| Nathan Fontenot | 0db9360 | 2008-07-03 13:25:08 +1000 | [diff] [blame] | 1054 |  | 
| Nathan Fontenot | 0f16ef7 | 2009-02-17 08:08:30 +0000 | [diff] [blame] | 1055 | for (; drconf_cell_cnt != 0; --drconf_cell_cnt) { | 
| Nathan Fontenot | 0db9360 | 2008-07-03 13:25:08 +1000 | [diff] [blame] | 1056 | struct of_drconf_cell drmem; | 
|  | 1057 |  | 
|  | 1058 | read_drconf_cell(&drmem, &dm); | 
|  | 1059 |  | 
|  | 1060 | /* skip this block if it is reserved or not assigned to | 
|  | 1061 | * this partition */ | 
|  | 1062 | if ((drmem.flags & DRCONF_MEM_RESERVED) | 
|  | 1063 | || !(drmem.flags & DRCONF_MEM_ASSIGNED)) | 
|  | 1064 | continue; | 
|  | 1065 |  | 
| Nathan Fontenot | 0f16ef7 | 2009-02-17 08:08:30 +0000 | [diff] [blame] | 1066 | if ((scn_addr < drmem.base_addr) | 
|  | 1067 | || (scn_addr >= (drmem.base_addr + lmb_size))) | 
|  | 1068 | continue; | 
| Nathan Fontenot | 0db9360 | 2008-07-03 13:25:08 +1000 | [diff] [blame] | 1069 |  | 
| Nathan Fontenot | 0f16ef7 | 2009-02-17 08:08:30 +0000 | [diff] [blame] | 1070 | nid = of_drconf_to_nid_single(&drmem, &aa); | 
|  | 1071 | break; | 
| Nathan Fontenot | 0db9360 | 2008-07-03 13:25:08 +1000 | [diff] [blame] | 1072 | } | 
|  | 1073 |  | 
| Nathan Fontenot | 0f16ef7 | 2009-02-17 08:08:30 +0000 | [diff] [blame] | 1074 | return nid; | 
| Nathan Fontenot | 0db9360 | 2008-07-03 13:25:08 +1000 | [diff] [blame] | 1075 | } | 
|  | 1076 |  | 
|  | 1077 | /* | 
| Nathan Fontenot | 0f16ef7 | 2009-02-17 08:08:30 +0000 | [diff] [blame] | 1078 | * Find the node associated with a hot added memory section for memory | 
|  | 1079 | * represented in the device tree as a node (i.e. memory@XXXX) for | 
|  | 1080 | * each lmb. | 
| Mike Kravetz | 237a098 | 2005-12-05 12:06:42 -0800 | [diff] [blame] | 1081 | */ | 
| Nathan Fontenot | 0f16ef7 | 2009-02-17 08:08:30 +0000 | [diff] [blame] | 1082 | int hot_add_node_scn_to_nid(unsigned long scn_addr) | 
| Mike Kravetz | 237a098 | 2005-12-05 12:06:42 -0800 | [diff] [blame] | 1083 | { | 
|  | 1084 | struct device_node *memory = NULL; | 
| Nathan Fontenot | 0f16ef7 | 2009-02-17 08:08:30 +0000 | [diff] [blame] | 1085 | int nid = -1; | 
| Mike Kravetz | 237a098 | 2005-12-05 12:06:42 -0800 | [diff] [blame] | 1086 |  | 
|  | 1087 | while ((memory = of_find_node_by_type(memory, "memory")) != NULL) { | 
|  | 1088 | unsigned long start, size; | 
| Mike Kravetz | b226e46 | 2005-12-16 14:30:35 -0800 | [diff] [blame] | 1089 | int ranges; | 
| Jeremy Kerr | a7f67bd | 2006-07-12 15:35:54 +1000 | [diff] [blame] | 1090 | const unsigned int *memcell_buf; | 
| Mike Kravetz | 237a098 | 2005-12-05 12:06:42 -0800 | [diff] [blame] | 1091 | unsigned int len; | 
|  | 1092 |  | 
| Stephen Rothwell | e2eb639 | 2007-04-03 22:26:41 +1000 | [diff] [blame] | 1093 | memcell_buf = of_get_property(memory, "reg", &len); | 
| Mike Kravetz | 237a098 | 2005-12-05 12:06:42 -0800 | [diff] [blame] | 1094 | if (!memcell_buf || len <= 0) | 
|  | 1095 | continue; | 
|  | 1096 |  | 
| Benjamin Herrenschmidt | cc5d018 | 2005-12-13 18:01:21 +1100 | [diff] [blame] | 1097 | /* ranges in cell */ | 
|  | 1098 | ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); | 
| Mike Kravetz | 237a098 | 2005-12-05 12:06:42 -0800 | [diff] [blame] | 1099 |  | 
| Nathan Fontenot | 0f16ef7 | 2009-02-17 08:08:30 +0000 | [diff] [blame] | 1100 | while (ranges--) { | 
|  | 1101 | start = read_n_cells(n_mem_addr_cells, &memcell_buf); | 
|  | 1102 | size = read_n_cells(n_mem_size_cells, &memcell_buf); | 
|  | 1103 |  | 
|  | 1104 | if ((scn_addr < start) || (scn_addr >= (start + size))) | 
|  | 1105 | continue; | 
|  | 1106 |  | 
|  | 1107 | nid = of_node_to_nid_single(memory); | 
|  | 1108 | break; | 
| Mike Kravetz | 237a098 | 2005-12-05 12:06:42 -0800 | [diff] [blame] | 1109 | } | 
|  | 1110 |  | 
| Nathan Fontenot | 0f16ef7 | 2009-02-17 08:08:30 +0000 | [diff] [blame] | 1111 | of_node_put(memory); | 
|  | 1112 | if (nid >= 0) | 
|  | 1113 | break; | 
| Mike Kravetz | 237a098 | 2005-12-05 12:06:42 -0800 | [diff] [blame] | 1114 | } | 
| Nathan Fontenot | 0f16ef7 | 2009-02-17 08:08:30 +0000 | [diff] [blame] | 1115 |  | 
|  | 1116 | return nid; | 
| Mike Kravetz | 237a098 | 2005-12-05 12:06:42 -0800 | [diff] [blame] | 1117 | } | 
| Nathan Fontenot | 0f16ef7 | 2009-02-17 08:08:30 +0000 | [diff] [blame] | 1118 |  | 
|  | 1119 | /* | 
|  | 1120 | * Find the node associated with a hot added memory section.  Section | 
|  | 1121 | * corresponds to a SPARSEMEM section, not an LMB.  It is assumed that | 
|  | 1122 | * sections are fully contained within a single LMB. | 
|  | 1123 | */ | 
|  | 1124 | int hot_add_scn_to_nid(unsigned long scn_addr) | 
|  | 1125 | { | 
|  | 1126 | struct device_node *memory = NULL; | 
|  | 1127 | int nid, found = 0; | 
|  | 1128 |  | 
|  | 1129 | if (!numa_enabled || (min_common_depth < 0)) | 
| H Hartley Sweeten | 72c3368 | 2010-03-05 13:42:43 -0800 | [diff] [blame] | 1130 | return first_online_node; | 
| Nathan Fontenot | 0f16ef7 | 2009-02-17 08:08:30 +0000 | [diff] [blame] | 1131 |  | 
|  | 1132 | memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); | 
|  | 1133 | if (memory) { | 
|  | 1134 | nid = hot_add_drconf_scn_to_nid(memory, scn_addr); | 
|  | 1135 | of_node_put(memory); | 
|  | 1136 | } else { | 
|  | 1137 | nid = hot_add_node_scn_to_nid(scn_addr); | 
|  | 1138 | } | 
|  | 1139 |  | 
|  | 1140 | if (nid < 0 || !node_online(nid)) | 
| H Hartley Sweeten | 72c3368 | 2010-03-05 13:42:43 -0800 | [diff] [blame] | 1141 | nid = first_online_node; | 
| Nathan Fontenot | 0f16ef7 | 2009-02-17 08:08:30 +0000 | [diff] [blame] | 1142 |  | 
|  | 1143 | if (NODE_DATA(nid)->node_spanned_pages) | 
|  | 1144 | return nid; | 
|  | 1145 |  | 
|  | 1146 | for_each_online_node(nid) { | 
|  | 1147 | if (NODE_DATA(nid)->node_spanned_pages) { | 
|  | 1148 | found = 1; | 
|  | 1149 | break; | 
|  | 1150 | } | 
|  | 1151 | } | 
|  | 1152 |  | 
|  | 1153 | BUG_ON(!found); | 
|  | 1154 | return nid; | 
|  | 1155 | } | 
|  | 1156 |  | 
| Mike Kravetz | 237a098 | 2005-12-05 12:06:42 -0800 | [diff] [blame] | 1157 | #endif /* CONFIG_MEMORY_HOTPLUG */ |