blob: 187810be3d6cf0fe4c72aa0ae21ed22550dddbfd [file] [log] [blame]
Rusty Russell71ee73e2009-03-13 14:49:52 +10301/* Common code for 32 and 64-bit NUMA */
2#include <linux/topology.h>
3#include <linux/module.h>
4#include <linux/bootmem.h>
Jan Beulich90321602011-01-19 08:57:21 +00005#include <asm/numa.h>
6#include <asm/acpi.h>
7
8int __initdata numa_off;
9
10static __init int numa_setup(char *opt)
11{
12 if (!opt)
13 return -EINVAL;
14 if (!strncmp(opt, "off", 3))
15 numa_off = 1;
16#ifdef CONFIG_NUMA_EMU
17 if (!strncmp(opt, "fake=", 5))
18 numa_emu_cmdline(opt + 5);
19#endif
20#ifdef CONFIG_ACPI_NUMA
21 if (!strncmp(opt, "noacpi", 6))
22 acpi_numa = -1;
23#endif
24 return 0;
25}
26early_param("numa", numa_setup);
Rusty Russell71ee73e2009-03-13 14:49:52 +103027
Rusty Russell71ee73e2009-03-13 14:49:52 +103028/*
Tejun Heobbc9e2f2011-01-23 14:37:39 +010029 * apicid, cpu, node mappings
Rusty Russell71ee73e2009-03-13 14:49:52 +103030 */
Tejun Heobbc9e2f2011-01-23 14:37:39 +010031s16 __apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
32 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
33};
34
Rusty Russellc032ef602009-03-13 14:49:53 +103035cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
Rusty Russell71ee73e2009-03-13 14:49:52 +103036EXPORT_SYMBOL(node_to_cpumask_map);
37
38/*
Tejun Heo645a7912011-01-23 14:37:40 +010039 * Map cpu index to node index
40 */
41#ifdef CONFIG_X86_32
42DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, 0);
43#else
44DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
45#endif
46EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
47
48void __cpuinit numa_set_node(int cpu, int node)
49{
50 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
51
52 /* early setting, no percpu area yet */
53 if (cpu_to_node_map) {
54 cpu_to_node_map[cpu] = node;
55 return;
56 }
57
58#ifdef CONFIG_DEBUG_PER_CPU_MAPS
59 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
60 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
61 dump_stack();
62 return;
63 }
64#endif
65 per_cpu(x86_cpu_to_node_map, cpu) = node;
66
67 if (node != NUMA_NO_NODE)
68 set_cpu_numa_node(cpu, node);
69}
70
71void __cpuinit numa_clear_node(int cpu)
72{
73 numa_set_node(cpu, NUMA_NO_NODE);
74}
75
76/*
Rusty Russell71ee73e2009-03-13 14:49:52 +103077 * Allocate node_to_cpumask_map based on number of available nodes
78 * Requires node_possible_map to be valid.
79 *
80 * Note: node_to_cpumask() is not valid until after this is done.
81 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
82 */
83void __init setup_node_to_cpumask_map(void)
84{
85 unsigned int node, num = 0;
Rusty Russell71ee73e2009-03-13 14:49:52 +103086
87 /* setup nr_node_ids if not done yet */
88 if (nr_node_ids == MAX_NUMNODES) {
89 for_each_node_mask(node, node_possible_map)
90 num = node;
91 nr_node_ids = num + 1;
92 }
93
94 /* allocate the map */
Rusty Russellc032ef602009-03-13 14:49:53 +103095 for (node = 0; node < nr_node_ids; node++)
96 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
Rusty Russell71ee73e2009-03-13 14:49:52 +103097
Rusty Russellc032ef602009-03-13 14:49:53 +103098 /* cpumask_of_node() will now work */
99 pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
Rusty Russell71ee73e2009-03-13 14:49:52 +1030100}
101
102#ifdef CONFIG_DEBUG_PER_CPU_MAPS
Tejun Heo645a7912011-01-23 14:37:40 +0100103
104int __cpu_to_node(int cpu)
105{
106 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
107 printk(KERN_WARNING
108 "cpu_to_node(%d): usage too early!\n", cpu);
109 dump_stack();
110 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
111 }
112 return per_cpu(x86_cpu_to_node_map, cpu);
113}
114EXPORT_SYMBOL(__cpu_to_node);
115
116/*
117 * Same function as cpu_to_node() but used if called before the
118 * per_cpu areas are setup.
119 */
120int early_cpu_to_node(int cpu)
121{
122 if (early_per_cpu_ptr(x86_cpu_to_node_map))
123 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
124
125 if (!cpu_possible(cpu)) {
126 printk(KERN_WARNING
127 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
128 dump_stack();
129 return NUMA_NO_NODE;
130 }
131 return per_cpu(x86_cpu_to_node_map, cpu);
132}
133
Rusty Russell71ee73e2009-03-13 14:49:52 +1030134/*
135 * Returns a pointer to the bitmask of CPUs on Node 'node'.
136 */
Rusty Russell73e907d2009-03-13 14:49:57 +1030137const struct cpumask *cpumask_of_node(int node)
Rusty Russell71ee73e2009-03-13 14:49:52 +1030138{
Rusty Russell71ee73e2009-03-13 14:49:52 +1030139 if (node >= nr_node_ids) {
140 printk(KERN_WARNING
141 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
142 node, nr_node_ids);
143 dump_stack();
144 return cpu_none_mask;
145 }
Rusty Russellc032ef602009-03-13 14:49:53 +1030146 if (node_to_cpumask_map[node] == NULL) {
147 printk(KERN_WARNING
148 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
149 node);
150 dump_stack();
151 return cpu_online_mask;
152 }
Rusty Russell0b966252009-03-13 23:42:42 +1030153 return node_to_cpumask_map[node];
Rusty Russell71ee73e2009-03-13 14:49:52 +1030154}
155EXPORT_SYMBOL(cpumask_of_node);
Tejun Heo645a7912011-01-23 14:37:40 +0100156
157#endif /* CONFIG_DEBUG_PER_CPU_MAPS */