blob: 5e80621d9324c7d176510842595513c915d49d3b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * pSeries NUMA support
3 *
4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
Nishanth Aravamudan2d73bae2014-10-10 09:04:49 -070011#define pr_fmt(fmt) "numa: " fmt
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/threads.h>
14#include <linux/bootmem.h>
15#include <linux/init.h>
16#include <linux/mm.h>
17#include <linux/mmzone.h>
Paul Gortmaker4b16f8e2011-07-22 18:24:23 -040018#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/nodemask.h>
20#include <linux/cpu.h>
21#include <linux/notifier.h>
Yinghai Lu95f72d12010-07-12 14:36:09 +100022#include <linux/memblock.h>
Michael Ellerman6df16462008-02-14 11:37:49 +110023#include <linux/of.h>
Dave Hansen06eccea2009-02-12 12:36:04 +000024#include <linux/pfn.h>
Jesse Larrew9eff1a32010-12-01 12:31:15 +000025#include <linux/cpuset.h>
26#include <linux/node.h>
Nathan Fontenot30c05352013-04-24 06:02:13 +000027#include <linux/stop_machine.h>
Nathan Fontenote04fa612013-04-24 06:07:39 +000028#include <linux/proc_fs.h>
29#include <linux/seq_file.h>
30#include <linux/uaccess.h>
Linus Torvalds191a7122013-04-29 19:14:20 -070031#include <linux/slab.h>
Robert Jennings3be7db62013-07-24 20:13:21 -050032#include <asm/cputhreads.h>
Anton Blanchard45fb6ce2005-11-11 14:22:35 +110033#include <asm/sparsemem.h>
David S. Millerd9b2b2a2008-02-13 16:56:49 -080034#include <asm/prom.h>
Paul Mackerras2249ca92005-11-07 13:18:13 +110035#include <asm/smp.h>
Srivatsa S. Bhatd4edc5b2013-12-30 17:05:34 +053036#include <asm/cputhreads.h>
37#include <asm/topology.h>
Jesse Larrew9eff1a32010-12-01 12:31:15 +000038#include <asm/firmware.h>
39#include <asm/paca.h>
Jesse Larrew39bf9902010-12-17 22:07:47 +000040#include <asm/hvcall.h>
David Howellsae3a1972012-03-28 18:30:02 +010041#include <asm/setup.h>
Jesse Larrew176bbf12013-04-24 06:03:48 +000042#include <asm/vdso.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44static int numa_enabled = 1;
45
Balbir Singh1daa6d02008-02-01 15:57:31 +110046static char *cmdline __initdata;
47
Linus Torvalds1da177e2005-04-16 15:20:36 -070048static int numa_debug;
49#define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
50
Anton Blanchard45fb6ce2005-11-11 14:22:35 +110051int numa_cpu_lookup_table[NR_CPUS];
Anton Blanchard25863de2010-04-26 15:32:43 +000052cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
Linus Torvalds1da177e2005-04-16 15:20:36 -070053struct pglist_data *node_data[MAX_NUMNODES];
Anton Blanchard45fb6ce2005-11-11 14:22:35 +110054
55EXPORT_SYMBOL(numa_cpu_lookup_table);
Anton Blanchard25863de2010-04-26 15:32:43 +000056EXPORT_SYMBOL(node_to_cpumask_map);
Anton Blanchard45fb6ce2005-11-11 14:22:35 +110057EXPORT_SYMBOL(node_data);
58
Linus Torvalds1da177e2005-04-16 15:20:36 -070059static int min_common_depth;
Mike Kravetz237a0982005-12-05 12:06:42 -080060static int n_mem_addr_cells, n_mem_size_cells;
Anton Blanchard41eab6f2010-05-16 20:22:31 +000061static int form1_affinity;
62
63#define MAX_DISTANCE_REF_POINTS 4
64static int distance_ref_points_depth;
Alistair Poppleb08a2a12013-08-07 02:01:44 +100065static const __be32 *distance_ref_points;
Anton Blanchard41eab6f2010-05-16 20:22:31 +000066static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Anton Blanchard25863de2010-04-26 15:32:43 +000068/*
69 * Allocate node_to_cpumask_map based on number of available nodes
70 * Requires node_possible_map to be valid.
71 *
Wanlong Gao95129382012-01-12 17:20:09 -080072 * Note: cpumask_of_node() is not valid until after this is done.
Anton Blanchard25863de2010-04-26 15:32:43 +000073 */
74static void __init setup_node_to_cpumask_map(void)
75{
Cody P Schaferf9d531b2013-04-29 15:08:03 -070076 unsigned int node;
Anton Blanchard25863de2010-04-26 15:32:43 +000077
78 /* setup nr_node_ids if not done yet */
Cody P Schaferf9d531b2013-04-29 15:08:03 -070079 if (nr_node_ids == MAX_NUMNODES)
80 setup_nr_node_ids();
Anton Blanchard25863de2010-04-26 15:32:43 +000081
82 /* allocate the map */
83 for (node = 0; node < nr_node_ids; node++)
84 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
85
86 /* cpumask_of_node() will now work */
87 dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
88}
89
Stephen Rothwell55671f32013-03-25 18:44:44 +000090static int __init fake_numa_create_new_node(unsigned long end_pfn,
Balbir Singh1daa6d02008-02-01 15:57:31 +110091 unsigned int *nid)
92{
93 unsigned long long mem;
94 char *p = cmdline;
95 static unsigned int fake_nid;
96 static unsigned long long curr_boundary;
97
98 /*
99 * Modify node id, iff we started creating NUMA nodes
100 * We want to continue from where we left of the last time
101 */
102 if (fake_nid)
103 *nid = fake_nid;
104 /*
105 * In case there are no more arguments to parse, the
106 * node_id should be the same as the last fake node id
107 * (we've handled this above).
108 */
109 if (!p)
110 return 0;
111
112 mem = memparse(p, &p);
113 if (!mem)
114 return 0;
115
116 if (mem < curr_boundary)
117 return 0;
118
119 curr_boundary = mem;
120
121 if ((end_pfn << PAGE_SHIFT) > mem) {
122 /*
123 * Skip commas and spaces
124 */
125 while (*p == ',' || *p == ' ' || *p == '\t')
126 p++;
127
128 cmdline = p;
129 fake_nid++;
130 *nid = fake_nid;
131 dbg("created new fake_node with id %d\n", fake_nid);
132 return 1;
133 }
134 return 0;
135}
136
Srivatsa S. Bhatd4edc5b2013-12-30 17:05:34 +0530137static void reset_numa_cpu_lookup_table(void)
138{
139 unsigned int cpu;
140
141 for_each_possible_cpu(cpu)
142 numa_cpu_lookup_table[cpu] = -1;
143}
144
145static void update_numa_cpu_lookup_table(unsigned int cpu, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146{
147 numa_cpu_lookup_table[cpu] = node;
Srivatsa S. Bhatd4edc5b2013-12-30 17:05:34 +0530148}
149
150static void map_cpu_to_node(int cpu, int node)
151{
152 update_numa_cpu_lookup_table(cpu, node);
Anton Blanchard45fb6ce2005-11-11 14:22:35 +1100153
Nathan Lynchbf4b85b2006-03-20 18:34:45 -0600154 dbg("adding cpu %d to node %d\n", cpu, node);
155
Anton Blanchard25863de2010-04-26 15:32:43 +0000156 if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
157 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158}
159
Jesse Larrew39bf9902010-12-17 22:07:47 +0000160#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161static void unmap_cpu_from_node(unsigned long cpu)
162{
163 int node = numa_cpu_lookup_table[cpu];
164
165 dbg("removing cpu %lu from node %d\n", cpu, node);
166
Anton Blanchard25863de2010-04-26 15:32:43 +0000167 if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
Anton Blanchard429f4d82011-01-29 12:37:16 +0000168 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 } else {
170 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
171 cpu, node);
172 }
173}
Jesse Larrew39bf9902010-12-17 22:07:47 +0000174#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176/* must hold reference to node during call */
Alistair Poppleb08a2a12013-08-07 02:01:44 +1000177static const __be32 *of_get_associativity(struct device_node *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178{
Stephen Rothwelle2eb6392007-04-03 22:26:41 +1000179 return of_get_property(dev, "ibm,associativity", NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180}
181
Chandrucf000852008-08-30 00:28:16 +1000182/*
183 * Returns the property linux,drconf-usable-memory if
184 * it exists (the property exists only in kexec/kdump kernels,
185 * added by kexec-tools)
186 */
Alistair Poppleb08a2a12013-08-07 02:01:44 +1000187static const __be32 *of_get_usable_memory(struct device_node *memory)
Chandrucf000852008-08-30 00:28:16 +1000188{
Alistair Poppleb08a2a12013-08-07 02:01:44 +1000189 const __be32 *prop;
Chandrucf000852008-08-30 00:28:16 +1000190 u32 len;
191 prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
192 if (!prop || len < sizeof(unsigned int))
Robert Jenningsec32dd62013-10-28 09:20:50 -0500193 return NULL;
Chandrucf000852008-08-30 00:28:16 +1000194 return prop;
195}
196
Anton Blanchard41eab6f2010-05-16 20:22:31 +0000197int __node_distance(int a, int b)
198{
199 int i;
200 int distance = LOCAL_DISTANCE;
201
202 if (!form1_affinity)
Vaidyanathan Srinivasan7122bee2013-03-22 05:49:35 +0000203 return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
Anton Blanchard41eab6f2010-05-16 20:22:31 +0000204
205 for (i = 0; i < distance_ref_points_depth; i++) {
206 if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
207 break;
208
209 /* Double the distance for each NUMA level */
210 distance *= 2;
211 }
212
213 return distance;
214}
Mike Qiu12c743e2014-04-18 15:07:14 -0700215EXPORT_SYMBOL(__node_distance);
Anton Blanchard41eab6f2010-05-16 20:22:31 +0000216
217static void initialize_distance_lookup_table(int nid,
Alistair Poppleb08a2a12013-08-07 02:01:44 +1000218 const __be32 *associativity)
Anton Blanchard41eab6f2010-05-16 20:22:31 +0000219{
220 int i;
221
222 if (!form1_affinity)
223 return;
224
225 for (i = 0; i < distance_ref_points_depth; i++) {
Alistair Poppleb08a2a12013-08-07 02:01:44 +1000226 const __be32 *entry;
227
228 entry = &associativity[be32_to_cpu(distance_ref_points[i])];
229 distance_lookup_table[nid][i] = of_read_number(entry, 1);
Anton Blanchard41eab6f2010-05-16 20:22:31 +0000230 }
231}
232
Nathan Lynch482ec7c2006-03-20 18:36:45 -0600233/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
234 * info is found.
235 */
Alistair Poppleb08a2a12013-08-07 02:01:44 +1000236static int associativity_to_nid(const __be32 *associativity)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237{
Nathan Lynch482ec7c2006-03-20 18:36:45 -0600238 int nid = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
240 if (min_common_depth == -1)
Nathan Lynch482ec7c2006-03-20 18:36:45 -0600241 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242
Alistair Poppleb08a2a12013-08-07 02:01:44 +1000243 if (of_read_number(associativity, 1) >= min_common_depth)
244 nid = of_read_number(&associativity[min_common_depth], 1);
Nathan Lynchbc16a752006-03-20 18:36:15 -0600245
246 /* POWER4 LPAR uses 0xffff as invalid node */
Nathan Lynch482ec7c2006-03-20 18:36:45 -0600247 if (nid == 0xffff || nid >= MAX_NUMNODES)
248 nid = -1;
Anton Blanchard41eab6f2010-05-16 20:22:31 +0000249
Alistair Poppleb08a2a12013-08-07 02:01:44 +1000250 if (nid > 0 &&
251 of_read_number(associativity, 1) >= distance_ref_points_depth)
Jesse Larrew9eff1a32010-12-01 12:31:15 +0000252 initialize_distance_lookup_table(nid, associativity);
Anton Blanchard41eab6f2010-05-16 20:22:31 +0000253
Nathan Lynch482ec7c2006-03-20 18:36:45 -0600254out:
Nathan Lynchcf950b72006-03-20 18:35:45 -0600255 return nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256}
257
Jesse Larrew9eff1a32010-12-01 12:31:15 +0000258/* Returns the nid associated with the given device tree node,
259 * or -1 if not found.
260 */
261static int of_node_to_nid_single(struct device_node *device)
262{
263 int nid = -1;
Alistair Poppleb08a2a12013-08-07 02:01:44 +1000264 const __be32 *tmp;
Jesse Larrew9eff1a32010-12-01 12:31:15 +0000265
266 tmp = of_get_associativity(device);
267 if (tmp)
268 nid = associativity_to_nid(tmp);
269 return nid;
270}
271
Jeremy Kerr953039c2006-05-01 12:16:12 -0700272/* Walk the device tree upwards, looking for an associativity id */
273int of_node_to_nid(struct device_node *device)
274{
275 struct device_node *tmp;
276 int nid = -1;
277
278 of_node_get(device);
279 while (device) {
280 nid = of_node_to_nid_single(device);
281 if (nid != -1)
282 break;
283
284 tmp = device;
285 device = of_get_parent(tmp);
286 of_node_put(tmp);
287 }
288 of_node_put(device);
289
290 return nid;
291}
292EXPORT_SYMBOL_GPL(of_node_to_nid);
293
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294static int __init find_min_common_depth(void)
295{
Anton Blanchard41eab6f2010-05-16 20:22:31 +0000296 int depth;
Michael Ellermane70606e2011-04-10 20:42:05 +0000297 struct device_node *root;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298
Dipankar Sarma1c8ee732011-10-28 04:25:32 +0000299 if (firmware_has_feature(FW_FEATURE_OPAL))
300 root = of_find_node_by_path("/ibm,opal");
301 else
302 root = of_find_node_by_path("/rtas");
Michael Ellermane70606e2011-04-10 20:42:05 +0000303 if (!root)
304 root = of_find_node_by_path("/");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
306 /*
Anton Blanchard41eab6f2010-05-16 20:22:31 +0000307 * This property is a set of 32-bit integers, each representing
308 * an index into the ibm,associativity nodes.
309 *
310 * With form 0 affinity the first integer is for an SMP configuration
311 * (should be all 0's) and the second is for a normal NUMA
312 * configuration. We have only one level of NUMA.
313 *
314 * With form 1 affinity the first integer is the most significant
315 * NUMA boundary and the following are progressively less significant
316 * boundaries. There can be more than one level of NUMA.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 */
Michael Ellermane70606e2011-04-10 20:42:05 +0000318 distance_ref_points = of_get_property(root,
Anton Blanchard41eab6f2010-05-16 20:22:31 +0000319 "ibm,associativity-reference-points",
320 &distance_ref_points_depth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321
Anton Blanchard41eab6f2010-05-16 20:22:31 +0000322 if (!distance_ref_points) {
323 dbg("NUMA: ibm,associativity-reference-points not found.\n");
324 goto err;
325 }
326
327 distance_ref_points_depth /= sizeof(int);
328
Nathan Fontenot8002b0c2013-04-24 05:58:23 +0000329 if (firmware_has_feature(FW_FEATURE_OPAL) ||
330 firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
331 dbg("Using form 1 affinity\n");
Dipankar Sarma1c8ee732011-10-28 04:25:32 +0000332 form1_affinity = 1;
Anton Blanchard4b83c332010-04-07 15:33:44 +0000333 }
334
Anton Blanchard41eab6f2010-05-16 20:22:31 +0000335 if (form1_affinity) {
Alistair Poppleb08a2a12013-08-07 02:01:44 +1000336 depth = of_read_number(distance_ref_points, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 } else {
Anton Blanchard41eab6f2010-05-16 20:22:31 +0000338 if (distance_ref_points_depth < 2) {
339 printk(KERN_WARNING "NUMA: "
340 "short ibm,associativity-reference-points\n");
341 goto err;
342 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
Alistair Poppleb08a2a12013-08-07 02:01:44 +1000344 depth = of_read_number(&distance_ref_points[1], 1);
Anton Blanchard41eab6f2010-05-16 20:22:31 +0000345 }
346
347 /*
348 * Warn and cap if the hardware supports more than
349 * MAX_DISTANCE_REF_POINTS domains.
350 */
351 if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
352 printk(KERN_WARNING "NUMA: distance array capped at "
353 "%d entries\n", MAX_DISTANCE_REF_POINTS);
354 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
355 }
356
Michael Ellermane70606e2011-04-10 20:42:05 +0000357 of_node_put(root);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 return depth;
Anton Blanchard41eab6f2010-05-16 20:22:31 +0000359
360err:
Michael Ellermane70606e2011-04-10 20:42:05 +0000361 of_node_put(root);
Anton Blanchard41eab6f2010-05-16 20:22:31 +0000362 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363}
364
Mike Kravetz84c9fdd2005-11-30 13:47:23 -0800365static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366{
367 struct device_node *memory = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368
369 memory = of_find_node_by_type(memory, "memory");
Paul Mackerras54c23312005-12-05 15:50:39 +1100370 if (!memory)
Mike Kravetz84c9fdd2005-11-30 13:47:23 -0800371 panic("numa.c: No memory nodes found!");
Paul Mackerras54c23312005-12-05 15:50:39 +1100372
Stephen Rothwella8bda5d2007-04-03 10:56:50 +1000373 *n_addr_cells = of_n_addr_cells(memory);
Stephen Rothwell9213fee2007-04-03 10:57:48 +1000374 *n_size_cells = of_n_size_cells(memory);
Mike Kravetz84c9fdd2005-11-30 13:47:23 -0800375 of_node_put(memory);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376}
377
Alistair Poppleb08a2a12013-08-07 02:01:44 +1000378static unsigned long read_n_cells(int n, const __be32 **buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379{
380 unsigned long result = 0;
381
382 while (n--) {
Alistair Poppleb08a2a12013-08-07 02:01:44 +1000383 result = (result << 32) | of_read_number(*buf, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 (*buf)++;
385 }
386 return result;
387}
388
Nathan Fontenot83426812008-07-03 13:35:54 +1000389/*
Yinghai Lu95f72d12010-07-12 14:36:09 +1000390 * Read the next memblock list entry from the ibm,dynamic-memory property
Nathan Fontenot83426812008-07-03 13:35:54 +1000391 * and return the information in the provided of_drconf_cell structure.
392 */
Alistair Poppleb08a2a12013-08-07 02:01:44 +1000393static void read_drconf_cell(struct of_drconf_cell *drmem, const __be32 **cellp)
Nathan Fontenot83426812008-07-03 13:35:54 +1000394{
Alistair Poppleb08a2a12013-08-07 02:01:44 +1000395 const __be32 *cp;
Nathan Fontenot83426812008-07-03 13:35:54 +1000396
397 drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);
398
399 cp = *cellp;
Alistair Poppleb08a2a12013-08-07 02:01:44 +1000400 drmem->drc_index = of_read_number(cp, 1);
401 drmem->reserved = of_read_number(&cp[1], 1);
402 drmem->aa_index = of_read_number(&cp[2], 1);
403 drmem->flags = of_read_number(&cp[3], 1);
Nathan Fontenot83426812008-07-03 13:35:54 +1000404
405 *cellp = cp + 4;
406}
407
408/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300409 * Retrieve and validate the ibm,dynamic-memory property of the device tree.
Nathan Fontenot83426812008-07-03 13:35:54 +1000410 *
Yinghai Lu95f72d12010-07-12 14:36:09 +1000411 * The layout of the ibm,dynamic-memory property is a number N of memblock
412 * list entries followed by N memblock list entries. Each memblock list entry
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300413 * contains information as laid out in the of_drconf_cell struct above.
Nathan Fontenot83426812008-07-03 13:35:54 +1000414 */
Alistair Poppleb08a2a12013-08-07 02:01:44 +1000415static int of_get_drconf_memory(struct device_node *memory, const __be32 **dm)
Nathan Fontenot83426812008-07-03 13:35:54 +1000416{
Alistair Poppleb08a2a12013-08-07 02:01:44 +1000417 const __be32 *prop;
Nathan Fontenot83426812008-07-03 13:35:54 +1000418 u32 len, entries;
419
420 prop = of_get_property(memory, "ibm,dynamic-memory", &len);
421 if (!prop || len < sizeof(unsigned int))
422 return 0;
423
Alistair Poppleb08a2a12013-08-07 02:01:44 +1000424 entries = of_read_number(prop++, 1);
Nathan Fontenot83426812008-07-03 13:35:54 +1000425
426 /* Now that we know the number of entries, revalidate the size
427 * of the property read in to ensure we have everything
428 */
429 if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int))
430 return 0;
431
432 *dm = prop;
433 return entries;
434}
435
436/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300437 * Retrieve and validate the ibm,lmb-size property for drconf memory
Nathan Fontenot83426812008-07-03 13:35:54 +1000438 * from the device tree.
439 */
Benjamin Herrenschmidt3fdfd992010-07-23 10:35:52 +1000440static u64 of_get_lmb_size(struct device_node *memory)
Nathan Fontenot83426812008-07-03 13:35:54 +1000441{
Alistair Poppleb08a2a12013-08-07 02:01:44 +1000442 const __be32 *prop;
Nathan Fontenot83426812008-07-03 13:35:54 +1000443 u32 len;
444
Benjamin Herrenschmidt3fdfd992010-07-23 10:35:52 +1000445 prop = of_get_property(memory, "ibm,lmb-size", &len);
Nathan Fontenot83426812008-07-03 13:35:54 +1000446 if (!prop || len < sizeof(unsigned int))
447 return 0;
448
449 return read_n_cells(n_mem_size_cells, &prop);
450}
451
452struct assoc_arrays {
453 u32 n_arrays;
454 u32 array_sz;
Alistair Poppleb08a2a12013-08-07 02:01:44 +1000455 const __be32 *arrays;
Nathan Fontenot83426812008-07-03 13:35:54 +1000456};
457
458/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300459 * Retrieve and validate the list of associativity arrays for drconf
Nathan Fontenot83426812008-07-03 13:35:54 +1000460 * memory from the ibm,associativity-lookup-arrays property of the
461 * device tree..
462 *
463 * The layout of the ibm,associativity-lookup-arrays property is a number N
464 * indicating the number of associativity arrays, followed by a number M
465 * indicating the size of each associativity array, followed by a list
466 * of N associativity arrays.
467 */
468static int of_get_assoc_arrays(struct device_node *memory,
469 struct assoc_arrays *aa)
470{
Alistair Poppleb08a2a12013-08-07 02:01:44 +1000471 const __be32 *prop;
Nathan Fontenot83426812008-07-03 13:35:54 +1000472 u32 len;
473
474 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
475 if (!prop || len < 2 * sizeof(unsigned int))
476 return -1;
477
Alistair Poppleb08a2a12013-08-07 02:01:44 +1000478 aa->n_arrays = of_read_number(prop++, 1);
479 aa->array_sz = of_read_number(prop++, 1);
Nathan Fontenot83426812008-07-03 13:35:54 +1000480
Justin P. Mattock42b2aa82011-11-28 20:31:00 -0800481 /* Now that we know the number of arrays and size of each array,
Nathan Fontenot83426812008-07-03 13:35:54 +1000482 * revalidate the size of the property read in.
483 */
484 if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
485 return -1;
486
487 aa->arrays = prop;
488 return 0;
489}
490
491/*
492 * This is like of_node_to_nid_single() for memory represented in the
493 * ibm,dynamic-reconfiguration-memory node.
494 */
495static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
496 struct assoc_arrays *aa)
497{
498 int default_nid = 0;
499 int nid = default_nid;
500 int index;
501
502 if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
503 !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
504 drmem->aa_index < aa->n_arrays) {
505 index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
Alistair Poppleb08a2a12013-08-07 02:01:44 +1000506 nid = of_read_number(&aa->arrays[index], 1);
Nathan Fontenot83426812008-07-03 13:35:54 +1000507
508 if (nid == 0xffff || nid >= MAX_NUMNODES)
509 nid = default_nid;
510 }
511
512 return nid;
513}
514
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515/*
516 * Figure out to which domain a cpu belongs and stick it there.
517 * Return the id of the domain used.
518 */
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400519static int numa_setup_cpu(unsigned long lcpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520{
Li Zhong297cf502014-08-27 17:34:01 +0800521 int nid = -1;
Srivatsa S. Bhatd4edc5b2013-12-30 17:05:34 +0530522 struct device_node *cpu;
523
524 /*
525 * If a valid cpu-to-node mapping is already available, use it
526 * directly instead of querying the firmware, since it represents
527 * the most recent mapping notified to us by the platform (eg: VPHN).
528 */
529 if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) {
530 map_cpu_to_node(lcpu, nid);
531 return nid;
532 }
533
534 cpu = of_get_cpu_node(lcpu, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535
536 if (!cpu) {
537 WARN_ON(1);
Li Zhong297cf502014-08-27 17:34:01 +0800538 if (cpu_present(lcpu))
539 goto out_present;
540 else
541 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 }
543
Jeremy Kerr953039c2006-05-01 12:16:12 -0700544 nid = of_node_to_nid_single(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545
Li Zhong297cf502014-08-27 17:34:01 +0800546out_present:
Nathan Lynch482ec7c2006-03-20 18:36:45 -0600547 if (nid < 0 || !node_online(nid))
H Hartley Sweeten72c33682010-03-05 13:42:43 -0800548 nid = first_online_node;
Li Zhong297cf502014-08-27 17:34:01 +0800549
Nathan Lynchcf950b72006-03-20 18:35:45 -0600550 map_cpu_to_node(lcpu, nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 of_node_put(cpu);
Li Zhong297cf502014-08-27 17:34:01 +0800552out:
Nathan Lynchcf950b72006-03-20 18:35:45 -0600553 return nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554}
555
Srivatsa S. Bhat68fb18aa2013-12-30 17:06:04 +0530556static void verify_cpu_node_mapping(int cpu, int node)
557{
558 int base, sibling, i;
559
560 /* Verify that all the threads in the core belong to the same node */
561 base = cpu_first_thread_sibling(cpu);
562
563 for (i = 0; i < threads_per_core; i++) {
564 sibling = base + i;
565
566 if (sibling == cpu || cpu_is_offline(sibling))
567 continue;
568
569 if (cpu_to_node(sibling) != node) {
570 WARN(1, "CPU thread siblings %d and %d don't belong"
571 " to the same node!\n", cpu, sibling);
572 break;
573 }
574 }
575}
576
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400577static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 void *hcpu)
579{
580 unsigned long lcpu = (unsigned long)hcpu;
Srivatsa S. Bhat68fb18aa2013-12-30 17:06:04 +0530581 int ret = NOTIFY_DONE, nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
583 switch (action) {
584 case CPU_UP_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700585 case CPU_UP_PREPARE_FROZEN:
Srivatsa S. Bhat68fb18aa2013-12-30 17:06:04 +0530586 nid = numa_setup_cpu(lcpu);
587 verify_cpu_node_mapping((int)lcpu, nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 ret = NOTIFY_OK;
589 break;
590#ifdef CONFIG_HOTPLUG_CPU
591 case CPU_DEAD:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700592 case CPU_DEAD_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 case CPU_UP_CANCELED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700594 case CPU_UP_CANCELED_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 unmap_cpu_from_node(lcpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 ret = NOTIFY_OK;
Andrey Utkinb00fc6e2014-08-04 23:13:10 +0300597 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598#endif
599 }
600 return ret;
601}
602
603/*
604 * Check and possibly modify a memory region to enforce the memory limit.
605 *
606 * Returns the size the region should have to enforce the memory limit.
607 * This will either be the original value of size, a truncated value,
608 * or zero. If the returned value of size is 0 the region should be
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300609 * discarded as it lies wholly above the memory limit.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 */
Anton Blanchard45fb6ce2005-11-11 14:22:35 +1100611static unsigned long __init numa_enforce_memory_limit(unsigned long start,
612 unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613{
614 /*
Yinghai Lu95f72d12010-07-12 14:36:09 +1000615 * We use memblock_end_of_DRAM() in here instead of memory_limit because
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 * we've already adjusted it for the limit and it takes care of
Milton Millerfe552492008-10-20 15:37:04 +0000617 * having memory holes below the limit. Also, in the case of
618 * iommu_is_off, memory_limit is not set but is implicitly enforced.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620
Yinghai Lu95f72d12010-07-12 14:36:09 +1000621 if (start + size <= memblock_end_of_DRAM())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 return size;
623
Yinghai Lu95f72d12010-07-12 14:36:09 +1000624 if (start >= memblock_end_of_DRAM())
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 return 0;
626
Yinghai Lu95f72d12010-07-12 14:36:09 +1000627 return memblock_end_of_DRAM() - start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628}
629
Paul Mackerras02045682006-11-29 22:27:42 +1100630/*
Chandrucf000852008-08-30 00:28:16 +1000631 * Reads the counter for a given entry in
632 * linux,drconf-usable-memory property
633 */
Alistair Poppleb08a2a12013-08-07 02:01:44 +1000634static inline int __init read_usm_ranges(const __be32 **usm)
Chandrucf000852008-08-30 00:28:16 +1000635{
636 /*
Benjamin Herrenschmidt3fdfd992010-07-23 10:35:52 +1000637 * For each lmb in ibm,dynamic-memory a corresponding
Chandrucf000852008-08-30 00:28:16 +1000638 * entry in linux,drconf-usable-memory property contains
639 * a counter followed by that many (base, size) duple.
640 * read the counter from linux,drconf-usable-memory
641 */
642 return read_n_cells(n_mem_size_cells, usm);
643}
644
645/*
Paul Mackerras02045682006-11-29 22:27:42 +1100646 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
647 * node. This assumes n_mem_{addr,size}_cells have been set.
648 */
649static void __init parse_drconf_memory(struct device_node *memory)
650{
Alistair Poppleb08a2a12013-08-07 02:01:44 +1000651 const __be32 *uninitialized_var(dm), *usm;
Chandrucf000852008-08-30 00:28:16 +1000652 unsigned int n, rc, ranges, is_kexec_kdump = 0;
Benjamin Herrenschmidt3fdfd992010-07-23 10:35:52 +1000653 unsigned long lmb_size, base, size, sz;
Nathan Fontenot83426812008-07-03 13:35:54 +1000654 int nid;
Benjamin Herrenschmidtaa709f32012-07-05 16:30:33 +0000655 struct assoc_arrays aa = { .arrays = NULL };
Paul Mackerras02045682006-11-29 22:27:42 +1100656
Nathan Fontenot83426812008-07-03 13:35:54 +1000657 n = of_get_drconf_memory(memory, &dm);
658 if (!n)
Paul Mackerras02045682006-11-29 22:27:42 +1100659 return;
660
Benjamin Herrenschmidt3fdfd992010-07-23 10:35:52 +1000661 lmb_size = of_get_lmb_size(memory);
662 if (!lmb_size)
Nathan Fontenot83426812008-07-03 13:35:54 +1000663 return;
664
665 rc = of_get_assoc_arrays(memory, &aa);
666 if (rc)
Paul Mackerras02045682006-11-29 22:27:42 +1100667 return;
668
Chandrucf000852008-08-30 00:28:16 +1000669 /* check if this is a kexec/kdump kernel */
670 usm = of_get_usable_memory(memory);
671 if (usm != NULL)
672 is_kexec_kdump = 1;
673
Paul Mackerras02045682006-11-29 22:27:42 +1100674 for (; n != 0; --n) {
Nathan Fontenot83426812008-07-03 13:35:54 +1000675 struct of_drconf_cell drmem;
Balbir Singh1daa6d02008-02-01 15:57:31 +1100676
Nathan Fontenot83426812008-07-03 13:35:54 +1000677 read_drconf_cell(&drmem, &dm);
678
679 /* skip this block if the reserved bit is set in flags (0x80)
680 or if the block is not assigned to this partition (0x8) */
681 if ((drmem.flags & DRCONF_MEM_RESERVED)
682 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
683 continue;
684
Chandrucf000852008-08-30 00:28:16 +1000685 base = drmem.base_addr;
Benjamin Herrenschmidt3fdfd992010-07-23 10:35:52 +1000686 size = lmb_size;
Chandrucf000852008-08-30 00:28:16 +1000687 ranges = 1;
Nathan Fontenot83426812008-07-03 13:35:54 +1000688
Chandrucf000852008-08-30 00:28:16 +1000689 if (is_kexec_kdump) {
690 ranges = read_usm_ranges(&usm);
691 if (!ranges) /* there are no (base, size) duple */
692 continue;
693 }
694 do {
695 if (is_kexec_kdump) {
696 base = read_n_cells(n_mem_addr_cells, &usm);
697 size = read_n_cells(n_mem_size_cells, &usm);
698 }
699 nid = of_drconf_to_nid_single(&drmem, &aa);
700 fake_numa_create_new_node(
701 ((base + size) >> PAGE_SHIFT),
Nathan Fontenot83426812008-07-03 13:35:54 +1000702 &nid);
Chandrucf000852008-08-30 00:28:16 +1000703 node_set_online(nid);
704 sz = numa_enforce_memory_limit(base, size);
705 if (sz)
Tang Chene7e8de52014-01-21 15:49:26 -0800706 memblock_set_node(base, sz,
707 &memblock.memory, nid);
Chandrucf000852008-08-30 00:28:16 +1000708 } while (--ranges);
Paul Mackerras02045682006-11-29 22:27:42 +1100709 }
710}
711
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712static int __init parse_numa_properties(void)
713{
Anton Blanchard94db7c52011-08-10 20:44:22 +0000714 struct device_node *memory;
Nathan Lynch482ec7c2006-03-20 18:36:45 -0600715 int default_nid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 unsigned long i;
717
718 if (numa_enabled == 0) {
719 printk(KERN_WARNING "NUMA disabled by user\n");
720 return -1;
721 }
722
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 min_common_depth = find_min_common_depth();
724
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 if (min_common_depth < 0)
726 return min_common_depth;
727
Nathan Lynchbf4b85b2006-03-20 18:34:45 -0600728 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
729
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 /*
Nathan Lynch482ec7c2006-03-20 18:36:45 -0600731 * Even though we connect cpus to numa domains later in SMP
732 * init, we need to know the node ids now. This is because
733 * each node to be onlined must have NODE_DATA etc backing it.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 */
Nathan Lynch482ec7c2006-03-20 18:36:45 -0600735 for_each_present_cpu(i) {
Anton Blancharddfbe93a2011-08-10 20:44:23 +0000736 struct device_node *cpu;
Nathan Lynchcf950b72006-03-20 18:35:45 -0600737 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738
Milton Miller8b16cd22009-01-08 02:19:45 +0000739 cpu = of_get_cpu_node(i, NULL);
Nathan Lynch482ec7c2006-03-20 18:36:45 -0600740 BUG_ON(!cpu);
Jeremy Kerr953039c2006-05-01 12:16:12 -0700741 nid = of_node_to_nid_single(cpu);
Nathan Lynch482ec7c2006-03-20 18:36:45 -0600742 of_node_put(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743
Nathan Lynch482ec7c2006-03-20 18:36:45 -0600744 /*
745 * Don't fall back to default_nid yet -- we will plug
746 * cpus into nodes once the memory scan has discovered
747 * the topology.
748 */
749 if (nid < 0)
750 continue;
751 node_set_online(nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 }
753
Mike Kravetz237a0982005-12-05 12:06:42 -0800754 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
Anton Blanchard94db7c52011-08-10 20:44:22 +0000755
756 for_each_node_by_type(memory, "memory") {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 unsigned long start;
758 unsigned long size;
Nathan Lynchcf950b72006-03-20 18:35:45 -0600759 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 int ranges;
Alistair Poppleb08a2a12013-08-07 02:01:44 +1000761 const __be32 *memcell_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 unsigned int len;
763
Stephen Rothwelle2eb6392007-04-03 22:26:41 +1000764 memcell_buf = of_get_property(memory,
Michael Ellermanba759482005-12-04 18:39:55 +1100765 "linux,usable-memory", &len);
766 if (!memcell_buf || len <= 0)
Stephen Rothwelle2eb6392007-04-03 22:26:41 +1000767 memcell_buf = of_get_property(memory, "reg", &len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 if (!memcell_buf || len <= 0)
769 continue;
770
Benjamin Herrenschmidtcc5d0182005-12-13 18:01:21 +1100771 /* ranges in cell */
772 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773new_range:
774 /* these are order-sensitive, and modify the buffer pointer */
Mike Kravetz237a0982005-12-05 12:06:42 -0800775 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
776 size = read_n_cells(n_mem_size_cells, &memcell_buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777
Nathan Lynch482ec7c2006-03-20 18:36:45 -0600778 /*
779 * Assumption: either all memory nodes or none will
780 * have associativity properties. If none, then
781 * everything goes to default_nid.
782 */
Jeremy Kerr953039c2006-05-01 12:16:12 -0700783 nid = of_node_to_nid_single(memory);
Nathan Lynch482ec7c2006-03-20 18:36:45 -0600784 if (nid < 0)
785 nid = default_nid;
Balbir Singh1daa6d02008-02-01 15:57:31 +1100786
787 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
Nathan Lynch482ec7c2006-03-20 18:36:45 -0600788 node_set_online(nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789
Anton Blanchard45fb6ce2005-11-11 14:22:35 +1100790 if (!(size = numa_enforce_memory_limit(start, size))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 if (--ranges)
792 goto new_range;
793 else
794 continue;
795 }
796
Tang Chene7e8de52014-01-21 15:49:26 -0800797 memblock_set_node(start, size, &memblock.memory, nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798
799 if (--ranges)
800 goto new_range;
801 }
802
Paul Mackerras02045682006-11-29 22:27:42 +1100803 /*
Anton Blancharddfbe93a2011-08-10 20:44:23 +0000804 * Now do the same thing for each MEMBLOCK listed in the
805 * ibm,dynamic-memory property in the
806 * ibm,dynamic-reconfiguration-memory node.
Paul Mackerras02045682006-11-29 22:27:42 +1100807 */
808 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
809 if (memory)
810 parse_drconf_memory(memory);
811
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 return 0;
813}
814
815static void __init setup_nonnuma(void)
816{
Yinghai Lu95f72d12010-07-12 14:36:09 +1000817 unsigned long top_of_ram = memblock_end_of_DRAM();
818 unsigned long total_ram = memblock_phys_mem_size();
Mel Gormanc67c3cb2006-09-27 01:49:49 -0700819 unsigned long start_pfn, end_pfn;
Benjamin Herrenschmidt28be7072010-08-04 13:43:53 +1000820 unsigned int nid = 0;
821 struct memblock_region *reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822
Olof Johanssone110b282006-04-12 15:25:01 -0500823 printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 top_of_ram, total_ram);
Olof Johanssone110b282006-04-12 15:25:01 -0500825 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 (top_of_ram - total_ram) >> 20);
827
Benjamin Herrenschmidt28be7072010-08-04 13:43:53 +1000828 for_each_memblock(memory, reg) {
Yinghai Luc7fc2de2010-10-12 14:07:09 -0700829 start_pfn = memblock_region_memory_base_pfn(reg);
830 end_pfn = memblock_region_memory_end_pfn(reg);
Balbir Singh1daa6d02008-02-01 15:57:31 +1100831
832 fake_numa_create_new_node(end_pfn, &nid);
Tejun Heo1d7cfe12011-12-08 10:22:08 -0800833 memblock_set_node(PFN_PHYS(start_pfn),
Tang Chene7e8de52014-01-21 15:49:26 -0800834 PFN_PHYS(end_pfn - start_pfn),
835 &memblock.memory, nid);
Balbir Singh1daa6d02008-02-01 15:57:31 +1100836 node_set_online(nid);
Mel Gormanc67c3cb2006-09-27 01:49:49 -0700837 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838}
839
Anton Blanchard4b703a22005-12-13 06:56:47 +1100840void __init dump_numa_cpu_topology(void)
841{
842 unsigned int node;
843 unsigned int cpu, count;
844
845 if (min_common_depth == -1 || !numa_enabled)
846 return;
847
848 for_each_online_node(node) {
Olof Johanssone110b282006-04-12 15:25:01 -0500849 printk(KERN_DEBUG "Node %d CPUs:", node);
Anton Blanchard4b703a22005-12-13 06:56:47 +1100850
851 count = 0;
852 /*
853 * If we used a CPU iterator here we would miss printing
854 * the holes in the cpumap.
855 */
Anton Blanchard25863de2010-04-26 15:32:43 +0000856 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
857 if (cpumask_test_cpu(cpu,
858 node_to_cpumask_map[node])) {
Anton Blanchard4b703a22005-12-13 06:56:47 +1100859 if (count == 0)
860 printk(" %u", cpu);
861 ++count;
862 } else {
863 if (count > 1)
864 printk("-%u", cpu - 1);
865 count = 0;
866 }
867 }
868
869 if (count > 1)
Anton Blanchard25863de2010-04-26 15:32:43 +0000870 printk("-%u", nr_cpu_ids - 1);
Anton Blanchard4b703a22005-12-13 06:56:47 +1100871 printk("\n");
872 }
873}
874
875static void __init dump_numa_memory_topology(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876{
877 unsigned int node;
878 unsigned int count;
879
880 if (min_common_depth == -1 || !numa_enabled)
881 return;
882
883 for_each_online_node(node) {
884 unsigned long i;
885
Olof Johanssone110b282006-04-12 15:25:01 -0500886 printk(KERN_DEBUG "Node %d Memory:", node);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887
888 count = 0;
889
Yinghai Lu95f72d12010-07-12 14:36:09 +1000890 for (i = 0; i < memblock_end_of_DRAM();
Anton Blanchard45fb6ce2005-11-11 14:22:35 +1100891 i += (1 << SECTION_SIZE_BITS)) {
892 if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 if (count == 0)
894 printk(" 0x%lx", i);
895 ++count;
896 } else {
897 if (count > 0)
898 printk("-0x%lx", i);
899 count = 0;
900 }
901 }
902
903 if (count > 0)
904 printk("-0x%lx", i);
905 printk("\n");
906 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907}
908
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400909static struct notifier_block ppc64_numa_nb = {
Chandra Seetharaman74b85f32006-06-27 02:54:09 -0700910 .notifier_call = cpu_numa_callback,
911 .priority = 1 /* Must run before sched domains notifier. */
912};
913
Anton Blanchard10239732014-09-17 22:15:33 +1000914/* Initialize NODE_DATA for a node on the local memory */
915static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916{
Anton Blanchard10239732014-09-17 22:15:33 +1000917 u64 spanned_pages = end_pfn - start_pfn;
918 const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
919 u64 nd_pa;
920 void *nd;
921 int tnid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922
Anton Blanchard10239732014-09-17 22:15:33 +1000923 if (spanned_pages)
924 pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
925 nid, start_pfn << PAGE_SHIFT,
926 (end_pfn << PAGE_SHIFT) - 1);
927 else
928 pr_info("Initmem setup node %d\n", nid);
Dave Hansen4a618662008-11-24 12:02:35 +0000929
Anton Blanchard10239732014-09-17 22:15:33 +1000930 nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
931 nd = __va(nd_pa);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932
Anton Blanchard10239732014-09-17 22:15:33 +1000933 /* report and initialize */
934 pr_info(" NODE_DATA [mem %#010Lx-%#010Lx]\n",
935 nd_pa, nd_pa + nd_size - 1);
936 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
937 if (tnid != nid)
938 pr_info(" NODE_DATA(%d) on node %d\n", nid, tnid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939
Anton Blanchard10239732014-09-17 22:15:33 +1000940 node_data[nid] = nd;
941 memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
942 NODE_DATA(nid)->node_id = nid;
943 NODE_DATA(nid)->node_start_pfn = start_pfn;
944 NODE_DATA(nid)->node_spanned_pages = spanned_pages;
Dave Hansen4a618662008-11-24 12:02:35 +0000945}
Jon Tollefson8f64e1f2008-10-09 10:18:40 +0000946
Anton Blanchard10239732014-09-17 22:15:33 +1000947void __init initmem_init(void)
Dave Hansen4a618662008-11-24 12:02:35 +0000948{
Nishanth Aravamudan2fabf082014-07-17 16:15:12 -0700949 int nid, cpu;
Dave Hansen4a618662008-11-24 12:02:35 +0000950
Yinghai Lu95f72d12010-07-12 14:36:09 +1000951 max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
Dave Hansen4a618662008-11-24 12:02:35 +0000952 max_pfn = max_low_pfn;
953
954 if (parse_numa_properties())
955 setup_nonnuma();
956 else
957 dump_numa_memory_topology();
958
Anton Blanchard10239732014-09-17 22:15:33 +1000959 memblock_dump_all();
960
Nishanth Aravamudan3af229f2015-03-10 16:50:59 -0700961 /*
962 * Reduce the possible NUMA nodes to the online NUMA nodes,
963 * since we do not support node hotplug. This ensures that we
964 * lower the maximum NUMA node ID to what is actually present.
965 */
966 nodes_and(node_possible_map, node_possible_map, node_online_map);
967
Dave Hansen4a618662008-11-24 12:02:35 +0000968 for_each_online_node(nid) {
969 unsigned long start_pfn, end_pfn;
Dave Hansen4a618662008-11-24 12:02:35 +0000970
971 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
Anton Blanchard10239732014-09-17 22:15:33 +1000972 setup_node_data(nid, start_pfn, end_pfn);
Jon Tollefson8f64e1f2008-10-09 10:18:40 +0000973 sparse_memory_present_with_active_regions(nid);
Dave Hansen4a618662008-11-24 12:02:35 +0000974 }
Benjamin Herrenschmidtd3f62042009-06-02 21:16:38 +0000975
Anton Blanchard21098b92014-09-17 22:15:36 +1000976 sparse_init();
Anton Blanchard25863de2010-04-26 15:32:43 +0000977
Anton Blanchard25863de2010-04-26 15:32:43 +0000978 setup_node_to_cpumask_map();
979
Srivatsa S. Bhatd4edc5b2013-12-30 17:05:34 +0530980 reset_numa_cpu_lookup_table();
Anton Blanchard25863de2010-04-26 15:32:43 +0000981 register_cpu_notifier(&ppc64_numa_nb);
Nishanth Aravamudan2fabf082014-07-17 16:15:12 -0700982 /*
983 * We need the numa_cpu_lookup_table to be accurate for all CPUs,
984 * even before we online them, so that we can use cpu_to_{node,mem}
985 * early in boot, cf. smp_prepare_cpus().
986 */
Li Zhongbc3c4322014-08-27 17:34:00 +0800987 for_each_present_cpu(cpu) {
Li Zhong70ad2372014-08-27 17:33:59 +0800988 numa_setup_cpu((unsigned long)cpu);
Nishanth Aravamudan2fabf082014-07-17 16:15:12 -0700989 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990}
991
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992static int __init early_numa(char *p)
993{
994 if (!p)
995 return 0;
996
997 if (strstr(p, "off"))
998 numa_enabled = 0;
999
1000 if (strstr(p, "debug"))
1001 numa_debug = 1;
1002
Balbir Singh1daa6d02008-02-01 15:57:31 +11001003 p = strstr(p, "fake=");
1004 if (p)
1005 cmdline = p + strlen("fake=");
1006
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007 return 0;
1008}
1009early_param("numa", early_numa);
Mike Kravetz237a0982005-12-05 12:06:42 -08001010
Nishanth Aravamudan2d73bae2014-10-10 09:04:49 -07001011static bool topology_updates_enabled = true;
1012
1013static int __init early_topology_updates(char *p)
1014{
1015 if (!p)
1016 return 0;
1017
1018 if (!strcmp(p, "off")) {
1019 pr_info("Disabling topology updates\n");
1020 topology_updates_enabled = false;
1021 }
1022
1023 return 0;
1024}
1025early_param("topology_updates", early_topology_updates);
1026
Mike Kravetz237a0982005-12-05 12:06:42 -08001027#ifdef CONFIG_MEMORY_HOTPLUG
1028/*
Nathan Fontenot0f16ef72009-02-17 08:08:30 +00001029 * Find the node associated with a hot added memory section for
1030 * memory represented in the device tree by the property
1031 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
Nathan Fontenot0db93602008-07-03 13:25:08 +10001032 */
1033static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1034 unsigned long scn_addr)
1035{
Alistair Poppleb08a2a12013-08-07 02:01:44 +10001036 const __be32 *dm;
Nathan Fontenot0f16ef72009-02-17 08:08:30 +00001037 unsigned int drconf_cell_cnt, rc;
Benjamin Herrenschmidt3fdfd992010-07-23 10:35:52 +10001038 unsigned long lmb_size;
Nathan Fontenot0db93602008-07-03 13:25:08 +10001039 struct assoc_arrays aa;
Nathan Fontenot0f16ef72009-02-17 08:08:30 +00001040 int nid = -1;
Nathan Fontenot0db93602008-07-03 13:25:08 +10001041
Nathan Fontenot0f16ef72009-02-17 08:08:30 +00001042 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1043 if (!drconf_cell_cnt)
1044 return -1;
Nathan Fontenot0db93602008-07-03 13:25:08 +10001045
Benjamin Herrenschmidt3fdfd992010-07-23 10:35:52 +10001046 lmb_size = of_get_lmb_size(memory);
1047 if (!lmb_size)
Nathan Fontenot0f16ef72009-02-17 08:08:30 +00001048 return -1;
Nathan Fontenot0db93602008-07-03 13:25:08 +10001049
1050 rc = of_get_assoc_arrays(memory, &aa);
1051 if (rc)
Nathan Fontenot0f16ef72009-02-17 08:08:30 +00001052 return -1;
Nathan Fontenot0db93602008-07-03 13:25:08 +10001053
Nathan Fontenot0f16ef72009-02-17 08:08:30 +00001054 for (; drconf_cell_cnt != 0; --drconf_cell_cnt) {
Nathan Fontenot0db93602008-07-03 13:25:08 +10001055 struct of_drconf_cell drmem;
1056
1057 read_drconf_cell(&drmem, &dm);
1058
1059 /* skip this block if it is reserved or not assigned to
1060 * this partition */
1061 if ((drmem.flags & DRCONF_MEM_RESERVED)
1062 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
1063 continue;
1064
Nathan Fontenot0f16ef72009-02-17 08:08:30 +00001065 if ((scn_addr < drmem.base_addr)
Benjamin Herrenschmidt3fdfd992010-07-23 10:35:52 +10001066 || (scn_addr >= (drmem.base_addr + lmb_size)))
Nathan Fontenot0f16ef72009-02-17 08:08:30 +00001067 continue;
Nathan Fontenot0db93602008-07-03 13:25:08 +10001068
Nathan Fontenot0f16ef72009-02-17 08:08:30 +00001069 nid = of_drconf_to_nid_single(&drmem, &aa);
1070 break;
Nathan Fontenot0db93602008-07-03 13:25:08 +10001071 }
1072
Nathan Fontenot0f16ef72009-02-17 08:08:30 +00001073 return nid;
Nathan Fontenot0db93602008-07-03 13:25:08 +10001074}
1075
1076/*
Nathan Fontenot0f16ef72009-02-17 08:08:30 +00001077 * Find the node associated with a hot added memory section for memory
1078 * represented in the device tree as a node (i.e. memory@XXXX) for
Yinghai Lu95f72d12010-07-12 14:36:09 +10001079 * each memblock.
Mike Kravetz237a0982005-12-05 12:06:42 -08001080 */
Robert Jenningsec32dd62013-10-28 09:20:50 -05001081static int hot_add_node_scn_to_nid(unsigned long scn_addr)
Mike Kravetz237a0982005-12-05 12:06:42 -08001082{
Anton Blanchard94db7c52011-08-10 20:44:22 +00001083 struct device_node *memory;
Nathan Fontenot0f16ef72009-02-17 08:08:30 +00001084 int nid = -1;
Mike Kravetz237a0982005-12-05 12:06:42 -08001085
Anton Blanchard94db7c52011-08-10 20:44:22 +00001086 for_each_node_by_type(memory, "memory") {
Mike Kravetz237a0982005-12-05 12:06:42 -08001087 unsigned long start, size;
Mike Kravetzb226e462005-12-16 14:30:35 -08001088 int ranges;
Alistair Poppleb08a2a12013-08-07 02:01:44 +10001089 const __be32 *memcell_buf;
Mike Kravetz237a0982005-12-05 12:06:42 -08001090 unsigned int len;
1091
Stephen Rothwelle2eb6392007-04-03 22:26:41 +10001092 memcell_buf = of_get_property(memory, "reg", &len);
Mike Kravetz237a0982005-12-05 12:06:42 -08001093 if (!memcell_buf || len <= 0)
1094 continue;
1095
Benjamin Herrenschmidtcc5d0182005-12-13 18:01:21 +11001096 /* ranges in cell */
1097 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
Mike Kravetz237a0982005-12-05 12:06:42 -08001098
Nathan Fontenot0f16ef72009-02-17 08:08:30 +00001099 while (ranges--) {
1100 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1101 size = read_n_cells(n_mem_size_cells, &memcell_buf);
1102
1103 if ((scn_addr < start) || (scn_addr >= (start + size)))
1104 continue;
1105
1106 nid = of_node_to_nid_single(memory);
1107 break;
Mike Kravetz237a0982005-12-05 12:06:42 -08001108 }
1109
Nathan Fontenot0f16ef72009-02-17 08:08:30 +00001110 if (nid >= 0)
1111 break;
Mike Kravetz237a0982005-12-05 12:06:42 -08001112 }
Nathan Fontenot0f16ef72009-02-17 08:08:30 +00001113
Anton Blanchard60831842011-08-10 20:44:21 +00001114 of_node_put(memory);
1115
Nathan Fontenot0f16ef72009-02-17 08:08:30 +00001116 return nid;
Mike Kravetz237a0982005-12-05 12:06:42 -08001117}
Nathan Fontenot0f16ef72009-02-17 08:08:30 +00001118
1119/*
1120 * Find the node associated with a hot added memory section. Section
Yinghai Lu95f72d12010-07-12 14:36:09 +10001121 * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that
1122 * sections are fully contained within a single MEMBLOCK.
Nathan Fontenot0f16ef72009-02-17 08:08:30 +00001123 */
1124int hot_add_scn_to_nid(unsigned long scn_addr)
1125{
1126 struct device_node *memory = NULL;
1127 int nid, found = 0;
1128
1129 if (!numa_enabled || (min_common_depth < 0))
H Hartley Sweeten72c33682010-03-05 13:42:43 -08001130 return first_online_node;
Nathan Fontenot0f16ef72009-02-17 08:08:30 +00001131
1132 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1133 if (memory) {
1134 nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
1135 of_node_put(memory);
1136 } else {
1137 nid = hot_add_node_scn_to_nid(scn_addr);
1138 }
1139
1140 if (nid < 0 || !node_online(nid))
H Hartley Sweeten72c33682010-03-05 13:42:43 -08001141 nid = first_online_node;
Nathan Fontenot0f16ef72009-02-17 08:08:30 +00001142
1143 if (NODE_DATA(nid)->node_spanned_pages)
1144 return nid;
1145
1146 for_each_online_node(nid) {
1147 if (NODE_DATA(nid)->node_spanned_pages) {
1148 found = 1;
1149 break;
1150 }
1151 }
1152
1153 BUG_ON(!found);
1154 return nid;
1155}
1156
Nishanth Aravamudancd342062010-10-26 17:35:12 +00001157static u64 hot_add_drconf_memory_max(void)
1158{
1159 struct device_node *memory = NULL;
1160 unsigned int drconf_cell_cnt = 0;
1161 u64 lmb_size = 0;
Robert Jenningsec32dd62013-10-28 09:20:50 -05001162 const __be32 *dm = NULL;
Nishanth Aravamudancd342062010-10-26 17:35:12 +00001163
1164 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1165 if (memory) {
1166 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1167 lmb_size = of_get_lmb_size(memory);
1168 of_node_put(memory);
1169 }
1170 return lmb_size * drconf_cell_cnt;
1171}
1172
1173/*
1174 * memory_hotplug_max - return max address of memory that may be added
1175 *
1176 * This is currently only used on systems that support drconfig memory
1177 * hotplug.
1178 */
1179u64 memory_hotplug_max(void)
1180{
1181 return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1182}
Mike Kravetz237a0982005-12-05 12:06:42 -08001183#endif /* CONFIG_MEMORY_HOTPLUG */
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001184
Jesse Larrewbd034032011-01-20 19:00:51 +00001185/* Virtual Processor Home Node (VPHN) support */
Jesse Larrew39bf9902010-12-17 22:07:47 +00001186#ifdef CONFIG_PPC_SPLPAR
Greg Kurz4b6cfb22015-02-23 16:14:31 +01001187
1188#include "vphn.h"
1189
Nathan Fontenot30c05352013-04-24 06:02:13 +00001190struct topology_update_data {
1191 struct topology_update_data *next;
1192 unsigned int cpu;
1193 int old_nid;
1194 int new_nid;
1195};
1196
Anton Blanchard5de16692011-01-29 12:24:34 +00001197static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001198static cpumask_t cpu_associativity_changes_mask;
1199static int vphn_enabled;
Jesse Larrew5d88aa82013-04-24 06:00:35 +00001200static int prrn_enabled;
1201static void reset_topology_timer(void);
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001202
1203/*
1204 * Store the current values of the associativity change counters in the
1205 * hypervisor.
1206 */
1207static void setup_cpu_associativity_change_counters(void)
1208{
Jesse Larrewcd9d6cc2011-01-20 19:01:35 +00001209 int cpu;
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001210
Anton Blanchard5de16692011-01-29 12:24:34 +00001211 /* The VPHN feature supports a maximum of 8 reference points */
1212 BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
1213
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001214 for_each_possible_cpu(cpu) {
Jesse Larrewcd9d6cc2011-01-20 19:01:35 +00001215 int i;
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001216 u8 *counts = vphn_cpu_change_counts[cpu];
1217 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1218
Anton Blanchard5de16692011-01-29 12:24:34 +00001219 for (i = 0; i < distance_ref_points_depth; i++)
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001220 counts[i] = hypervisor_counts[i];
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001221 }
1222}
1223
1224/*
1225 * The hypervisor maintains a set of 8 associativity change counters in
1226 * the VPA of each cpu that correspond to the associativity levels in the
1227 * ibm,associativity-reference-points property. When an associativity
1228 * level changes, the corresponding counter is incremented.
1229 *
1230 * Set a bit in cpu_associativity_changes_mask for each cpu whose home
1231 * node associativity levels have changed.
1232 *
1233 * Returns the number of cpus with unhandled associativity changes.
1234 */
1235static int update_cpu_associativity_changes_mask(void)
1236{
Jesse Larrew5d88aa82013-04-24 06:00:35 +00001237 int cpu;
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001238 cpumask_t *changes = &cpu_associativity_changes_mask;
1239
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001240 for_each_possible_cpu(cpu) {
1241 int i, changed = 0;
1242 u8 *counts = vphn_cpu_change_counts[cpu];
1243 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1244
Anton Blanchard5de16692011-01-29 12:24:34 +00001245 for (i = 0; i < distance_ref_points_depth; i++) {
Anton Blanchardd69043e2011-01-29 12:26:19 +00001246 if (hypervisor_counts[i] != counts[i]) {
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001247 counts[i] = hypervisor_counts[i];
1248 changed = 1;
1249 }
1250 }
1251 if (changed) {
Robert Jennings3be7db62013-07-24 20:13:21 -05001252 cpumask_or(changes, changes, cpu_sibling_mask(cpu));
1253 cpu = cpu_last_thread_sibling(cpu);
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001254 }
1255 }
1256
Jesse Larrew5d88aa82013-04-24 06:00:35 +00001257 return cpumask_weight(changes);
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001258}
1259
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001260/*
1261 * Retrieve the new associativity information for a virtual processor's
1262 * home node.
1263 */
Alistair Poppleb08a2a12013-08-07 02:01:44 +10001264static long hcall_vphn(unsigned long cpu, __be32 *associativity)
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001265{
Jesse Larrewcd9d6cc2011-01-20 19:01:35 +00001266 long rc;
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001267 long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
1268 u64 flags = 1;
1269 int hwcpu = get_hard_smp_processor_id(cpu);
1270
1271 rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
1272 vphn_unpack_associativity(retbuf, associativity);
1273
1274 return rc;
1275}
1276
1277static long vphn_get_associativity(unsigned long cpu,
Alistair Poppleb08a2a12013-08-07 02:01:44 +10001278 __be32 *associativity)
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001279{
Jesse Larrewcd9d6cc2011-01-20 19:01:35 +00001280 long rc;
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001281
1282 rc = hcall_vphn(cpu, associativity);
1283
1284 switch (rc) {
1285 case H_FUNCTION:
1286 printk(KERN_INFO
1287 "VPHN is not supported. Disabling polling...\n");
1288 stop_topology_update();
1289 break;
1290 case H_HARDWARE:
1291 printk(KERN_ERR
1292 "hcall_vphn() experienced a hardware fault "
1293 "preventing VPHN. Disabling polling...\n");
1294 stop_topology_update();
1295 }
1296
1297 return rc;
1298}
1299
1300/*
Nathan Fontenot30c05352013-04-24 06:02:13 +00001301 * Update the CPU maps and sysfs entries for a single CPU when its NUMA
1302 * characteristics change. This function doesn't perform any locking and is
1303 * only safe to call from stop_machine().
1304 */
1305static int update_cpu_topology(void *data)
1306{
1307 struct topology_update_data *update;
1308 unsigned long cpu;
1309
1310 if (!data)
1311 return -EINVAL;
1312
Robert Jennings3be7db62013-07-24 20:13:21 -05001313 cpu = smp_processor_id();
Nathan Fontenot30c05352013-04-24 06:02:13 +00001314
1315 for (update = data; update; update = update->next) {
Nishanth Aravamudan2c0a33f2014-10-17 17:50:40 -07001316 int new_nid = update->new_nid;
Nathan Fontenot30c05352013-04-24 06:02:13 +00001317 if (cpu != update->cpu)
1318 continue;
1319
Nishanth Aravamudan49f8d8c2014-10-17 17:49:44 -07001320 unmap_cpu_from_node(cpu);
Nishanth Aravamudan2c0a33f2014-10-17 17:50:40 -07001321 map_cpu_to_node(cpu, new_nid);
1322 set_cpu_numa_node(cpu, new_nid);
1323 set_cpu_numa_mem(cpu, local_memory_node(new_nid));
Jesse Larrew176bbf12013-04-24 06:03:48 +00001324 vdso_getcpu_init();
Nathan Fontenot30c05352013-04-24 06:02:13 +00001325 }
1326
1327 return 0;
1328}
1329
Srivatsa S. Bhatd4edc5b2013-12-30 17:05:34 +05301330static int update_lookup_table(void *data)
1331{
1332 struct topology_update_data *update;
1333
1334 if (!data)
1335 return -EINVAL;
1336
1337 /*
1338 * Upon topology update, the numa-cpu lookup table needs to be updated
1339 * for all threads in the core, including offline CPUs, to ensure that
1340 * future hotplug operations respect the cpu-to-node associativity
1341 * properly.
1342 */
1343 for (update = data; update; update = update->next) {
1344 int nid, base, j;
1345
1346 nid = update->new_nid;
1347 base = cpu_first_thread_sibling(update->cpu);
1348
1349 for (j = 0; j < threads_per_core; j++) {
1350 update_numa_cpu_lookup_table(base + j, nid);
1351 }
1352 }
1353
1354 return 0;
1355}
1356
Nathan Fontenot30c05352013-04-24 06:02:13 +00001357/*
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001358 * Update the node maps and sysfs entries for each cpu whose home node
Jesse Larrew79c5fce2012-06-07 16:04:34 -05001359 * has changed. Returns 1 when the topology has changed, and 0 otherwise.
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001360 */
1361int arch_update_cpu_topology(void)
1362{
Robert Jennings3be7db62013-07-24 20:13:21 -05001363 unsigned int cpu, sibling, changed = 0;
Nathan Fontenot30c05352013-04-24 06:02:13 +00001364 struct topology_update_data *updates, *ud;
Alistair Poppleb08a2a12013-08-07 02:01:44 +10001365 __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
Jesse Larrew176bbf12013-04-24 06:03:48 +00001366 cpumask_t updated_cpus;
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001367 struct device *dev;
Robert Jennings3be7db62013-07-24 20:13:21 -05001368 int weight, new_nid, i = 0;
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001369
Nishanth Aravamudan2d73bae2014-10-10 09:04:49 -07001370 if (!prrn_enabled && !vphn_enabled)
1371 return 0;
1372
Nathan Fontenot30c05352013-04-24 06:02:13 +00001373 weight = cpumask_weight(&cpu_associativity_changes_mask);
1374 if (!weight)
1375 return 0;
1376
1377 updates = kzalloc(weight * (sizeof(*updates)), GFP_KERNEL);
1378 if (!updates)
1379 return 0;
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001380
Jesse Larrew176bbf12013-04-24 06:03:48 +00001381 cpumask_clear(&updated_cpus);
1382
Jesse Larrew5d88aa82013-04-24 06:00:35 +00001383 for_each_cpu(cpu, &cpu_associativity_changes_mask) {
Robert Jennings3be7db62013-07-24 20:13:21 -05001384 /*
1385 * If siblings aren't flagged for changes, updates list
1386 * will be too short. Skip on this update and set for next
1387 * update.
1388 */
1389 if (!cpumask_subset(cpu_sibling_mask(cpu),
1390 &cpu_associativity_changes_mask)) {
1391 pr_info("Sibling bits not set for associativity "
1392 "change, cpu%d\n", cpu);
1393 cpumask_or(&cpu_associativity_changes_mask,
1394 &cpu_associativity_changes_mask,
1395 cpu_sibling_mask(cpu));
1396 cpu = cpu_last_thread_sibling(cpu);
1397 continue;
1398 }
1399
1400 /* Use associativity from first thread for all siblings */
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001401 vphn_get_associativity(cpu, associativity);
Robert Jennings3be7db62013-07-24 20:13:21 -05001402 new_nid = associativity_to_nid(associativity);
1403 if (new_nid < 0 || !node_online(new_nid))
1404 new_nid = first_online_node;
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001405
Robert Jennings3be7db62013-07-24 20:13:21 -05001406 if (new_nid == numa_cpu_lookup_table[cpu]) {
1407 cpumask_andnot(&cpu_associativity_changes_mask,
1408 &cpu_associativity_changes_mask,
1409 cpu_sibling_mask(cpu));
1410 cpu = cpu_last_thread_sibling(cpu);
1411 continue;
1412 }
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001413
Robert Jennings3be7db62013-07-24 20:13:21 -05001414 for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
1415 ud = &updates[i++];
1416 ud->cpu = sibling;
1417 ud->new_nid = new_nid;
1418 ud->old_nid = numa_cpu_lookup_table[sibling];
1419 cpumask_set_cpu(sibling, &updated_cpus);
1420 if (i < weight)
1421 ud->next = &updates[i];
1422 }
1423 cpu = cpu_last_thread_sibling(cpu);
Nathan Fontenot30c05352013-04-24 06:02:13 +00001424 }
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001425
Nishanth Aravamudan2d73bae2014-10-10 09:04:49 -07001426 pr_debug("Topology update for the following CPUs:\n");
1427 if (cpumask_weight(&updated_cpus)) {
1428 for (ud = &updates[0]; ud; ud = ud->next) {
1429 pr_debug("cpu %d moving from node %d "
1430 "to %d\n", ud->cpu,
1431 ud->old_nid, ud->new_nid);
1432 }
1433 }
1434
Michael Wang9a013362014-04-08 11:19:36 +08001435 /*
1436 * In cases where we have nothing to update (because the updates list
1437 * is too short or because the new topology is same as the old one),
1438 * skip invoking update_cpu_topology() via stop-machine(). This is
1439 * necessary (and not just a fast-path optimization) since stop-machine
1440 * can end up electing a random CPU to run update_cpu_topology(), and
1441 * thus trick us into setting up incorrect cpu-node mappings (since
1442 * 'updates' is kzalloc()'ed).
1443 *
1444 * And for the similar reason, we will skip all the following updating.
1445 */
1446 if (!cpumask_weight(&updated_cpus))
1447 goto out;
1448
Jesse Larrew176bbf12013-04-24 06:03:48 +00001449 stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
Nathan Fontenot30c05352013-04-24 06:02:13 +00001450
Srivatsa S. Bhatd4edc5b2013-12-30 17:05:34 +05301451 /*
1452 * Update the numa-cpu lookup table with the new mappings, even for
1453 * offline CPUs. It is best to perform this update from the stop-
1454 * machine context.
1455 */
1456 stop_machine(update_lookup_table, &updates[0],
1457 cpumask_of(raw_smp_processor_id()));
1458
Nathan Fontenot30c05352013-04-24 06:02:13 +00001459 for (ud = &updates[0]; ud; ud = ud->next) {
Nathan Fontenotdd023212013-06-24 22:08:05 -05001460 unregister_cpu_under_node(ud->cpu, ud->old_nid);
1461 register_cpu_under_node(ud->cpu, ud->new_nid);
1462
Nathan Fontenot30c05352013-04-24 06:02:13 +00001463 dev = get_cpu_device(ud->cpu);
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001464 if (dev)
1465 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
Nathan Fontenot30c05352013-04-24 06:02:13 +00001466 cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
Jesse Larrew79c5fce2012-06-07 16:04:34 -05001467 changed = 1;
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001468 }
1469
Michael Wang9a013362014-04-08 11:19:36 +08001470out:
Nathan Fontenot30c05352013-04-24 06:02:13 +00001471 kfree(updates);
Jesse Larrew79c5fce2012-06-07 16:04:34 -05001472 return changed;
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001473}
1474
1475static void topology_work_fn(struct work_struct *work)
1476{
1477 rebuild_sched_domains();
1478}
1479static DECLARE_WORK(topology_work, topology_work_fn);
1480
Robert Jenningsec32dd62013-10-28 09:20:50 -05001481static void topology_schedule_update(void)
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001482{
1483 schedule_work(&topology_work);
1484}
1485
1486static void topology_timer_fn(unsigned long ignored)
1487{
Jesse Larrew5d88aa82013-04-24 06:00:35 +00001488 if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001489 topology_schedule_update();
Jesse Larrew5d88aa82013-04-24 06:00:35 +00001490 else if (vphn_enabled) {
1491 if (update_cpu_associativity_changes_mask() > 0)
1492 topology_schedule_update();
1493 reset_topology_timer();
1494 }
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001495}
1496static struct timer_list topology_timer =
1497 TIMER_INITIALIZER(topology_timer_fn, 0, 0);
1498
Jesse Larrew5d88aa82013-04-24 06:00:35 +00001499static void reset_topology_timer(void)
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001500{
1501 topology_timer.data = 0;
1502 topology_timer.expires = jiffies + 60 * HZ;
Jesse Larrew5d88aa82013-04-24 06:00:35 +00001503 mod_timer(&topology_timer, topology_timer.expires);
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001504}
1505
Nathan Fontenot601abdc32013-04-29 03:45:36 +00001506#ifdef CONFIG_SMP
1507
Jesse Larrew5d88aa82013-04-24 06:00:35 +00001508static void stage_topology_update(int core_id)
1509{
1510 cpumask_or(&cpu_associativity_changes_mask,
1511 &cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
1512 reset_topology_timer();
1513}
1514
1515static int dt_update_callback(struct notifier_block *nb,
1516 unsigned long action, void *data)
1517{
Grant Likelyf5242e52014-11-24 17:58:01 +00001518 struct of_reconfig_data *update = data;
Jesse Larrew5d88aa82013-04-24 06:00:35 +00001519 int rc = NOTIFY_DONE;
1520
1521 switch (action) {
Jesse Larrew5d88aa82013-04-24 06:00:35 +00001522 case OF_RECONFIG_UPDATE_PROPERTY:
Nathan Fontenot30c05352013-04-24 06:02:13 +00001523 if (!of_prop_cmp(update->dn->type, "cpu") &&
1524 !of_prop_cmp(update->prop->name, "ibm,associativity")) {
Jesse Larrew5d88aa82013-04-24 06:00:35 +00001525 u32 core_id;
1526 of_property_read_u32(update->dn, "reg", &core_id);
1527 stage_topology_update(core_id);
1528 rc = NOTIFY_OK;
1529 }
1530 break;
1531 }
1532
1533 return rc;
1534}
1535
1536static struct notifier_block dt_update_nb = {
1537 .notifier_call = dt_update_callback,
1538};
1539
Nathan Fontenot601abdc32013-04-29 03:45:36 +00001540#endif
1541
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001542/*
Jesse Larrew5d88aa82013-04-24 06:00:35 +00001543 * Start polling for associativity changes.
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001544 */
1545int start_topology_update(void)
1546{
1547 int rc = 0;
1548
Jesse Larrew5d88aa82013-04-24 06:00:35 +00001549 if (firmware_has_feature(FW_FEATURE_PRRN)) {
1550 if (!prrn_enabled) {
1551 prrn_enabled = 1;
1552 vphn_enabled = 0;
Nathan Fontenot601abdc32013-04-29 03:45:36 +00001553#ifdef CONFIG_SMP
Jesse Larrew5d88aa82013-04-24 06:00:35 +00001554 rc = of_reconfig_notifier_register(&dt_update_nb);
Nathan Fontenot601abdc32013-04-29 03:45:36 +00001555#endif
Jesse Larrew5d88aa82013-04-24 06:00:35 +00001556 }
Jesse Larrewb7abef02013-04-24 06:05:22 +00001557 } else if (firmware_has_feature(FW_FEATURE_VPHN) &&
Anton Blanchardf13c13a2013-08-07 02:01:26 +10001558 lppaca_shared_proc(get_lppaca())) {
Jesse Larrew5d88aa82013-04-24 06:00:35 +00001559 if (!vphn_enabled) {
1560 prrn_enabled = 0;
1561 vphn_enabled = 1;
1562 setup_cpu_associativity_change_counters();
1563 init_timer_deferrable(&topology_timer);
1564 reset_topology_timer();
1565 }
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001566 }
1567
1568 return rc;
1569}
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001570
1571/*
1572 * Disable polling for VPHN associativity changes.
1573 */
1574int stop_topology_update(void)
1575{
Jesse Larrew5d88aa82013-04-24 06:00:35 +00001576 int rc = 0;
1577
1578 if (prrn_enabled) {
1579 prrn_enabled = 0;
Nathan Fontenot601abdc32013-04-29 03:45:36 +00001580#ifdef CONFIG_SMP
Jesse Larrew5d88aa82013-04-24 06:00:35 +00001581 rc = of_reconfig_notifier_unregister(&dt_update_nb);
Nathan Fontenot601abdc32013-04-29 03:45:36 +00001582#endif
Jesse Larrew5d88aa82013-04-24 06:00:35 +00001583 } else if (vphn_enabled) {
1584 vphn_enabled = 0;
1585 rc = del_timer_sync(&topology_timer);
1586 }
1587
1588 return rc;
Jesse Larrew9eff1a32010-12-01 12:31:15 +00001589}
Nathan Fontenote04fa612013-04-24 06:07:39 +00001590
1591int prrn_is_enabled(void)
1592{
1593 return prrn_enabled;
1594}
1595
1596static int topology_read(struct seq_file *file, void *v)
1597{
1598 if (vphn_enabled || prrn_enabled)
1599 seq_puts(file, "on\n");
1600 else
1601 seq_puts(file, "off\n");
1602
1603 return 0;
1604}
1605
1606static int topology_open(struct inode *inode, struct file *file)
1607{
1608 return single_open(file, topology_read, NULL);
1609}
1610
1611static ssize_t topology_write(struct file *file, const char __user *buf,
1612 size_t count, loff_t *off)
1613{
1614 char kbuf[4]; /* "on" or "off" plus null. */
1615 int read_len;
1616
1617 read_len = count < 3 ? count : 3;
1618 if (copy_from_user(kbuf, buf, read_len))
1619 return -EINVAL;
1620
1621 kbuf[read_len] = '\0';
1622
1623 if (!strncmp(kbuf, "on", 2))
1624 start_topology_update();
1625 else if (!strncmp(kbuf, "off", 3))
1626 stop_topology_update();
1627 else
1628 return -EINVAL;
1629
1630 return count;
1631}
1632
1633static const struct file_operations topology_ops = {
1634 .read = seq_read,
1635 .write = topology_write,
1636 .open = topology_open,
1637 .release = single_release
1638};
1639
1640static int topology_update_init(void)
1641{
Nishanth Aravamudan2d73bae2014-10-10 09:04:49 -07001642 /* Do not poll for changes if disabled at boot */
1643 if (topology_updates_enabled)
1644 start_topology_update();
1645
Nishanth Aravamudan2d15b9b2014-10-09 16:41:28 -07001646 if (!proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops))
1647 return -ENOMEM;
Nathan Fontenote04fa612013-04-24 06:07:39 +00001648
1649 return 0;
1650}
1651device_initcall(topology_update_init);
Jesse Larrew39bf9902010-12-17 22:07:47 +00001652#endif /* CONFIG_PPC_SPLPAR */