blob: c44df50a05ab5a81e42b1a37299fb9e790298fc3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * include/linux/topology.h
3 *
4 * Written by: Matthew Dobson, IBM Corporation
5 *
6 * Copyright (C) 2002, IBM Corp.
7 *
Ingo Molnar32525d02008-01-25 21:08:20 +01008 * All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
18 * NON INFRINGEMENT. See the GNU General Public License for more
19 * details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 * Send feedback to <colpatch@us.ibm.com>
26 */
27#ifndef _LINUX_TOPOLOGY_H
28#define _LINUX_TOPOLOGY_H
29
30#include <linux/cpumask.h>
31#include <linux/bitops.h>
32#include <linux/mmzone.h>
33#include <linux/smp.h>
Lee Schermerhorn72812012010-05-26 14:44:56 -070034#include <linux/percpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/topology.h>
36
37#ifndef node_has_online_mem
38#define node_has_online_mem(nid) (1)
39#endif
40
41#ifndef nr_cpus_node
Rusty Russella70f7302009-03-13 14:49:46 +103042#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#endif
44
Mike Travis7c16ec52008-04-04 18:11:11 -070045#define for_each_node_with_cpus(node) \
46 for_each_online_node(node) \
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 if (nr_cpus_node(node))
48
Heiko Carstensee79d1b2008-12-09 18:49:50 +010049int arch_update_cpu_topology(void);
Heiko Carstens22e52b02008-03-12 18:31:59 +010050
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* Conform to ACPI 2.0 SLIT distance definitions */
52#define LOCAL_DISTANCE 10
53#define REMOTE_DISTANCE 20
Ingo Molnarf787a5032007-07-11 21:21:47 +020054#ifndef node_distance
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#define node_distance(from,to) ((from) == (to) ? LOCAL_DISTANCE : REMOTE_DISTANCE)
56#endif
Christoph Lameter9eeff232006-01-18 17:42:31 -080057#ifndef RECLAIM_DISTANCE
58/*
59 * If the distance between nodes in a system is larger than RECLAIM_DISTANCE
60 * (in whatever arch specific measurement units returned by node_distance())
61 * then switch on zone reclaim on boot.
62 */
63#define RECLAIM_DISTANCE 20
64#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070065#ifndef PENALTY_FOR_NODE_WITH_CPUS
66#define PENALTY_FOR_NODE_WITH_CPUS (1)
67#endif
68
69/*
70 * Below are the 3 major initializers used in building sched_domains:
71 * SD_SIBLING_INIT, for SMT domains
72 * SD_CPU_INIT, for SMP domains
73 * SD_NODE_INIT, for NUMA domains
74 *
75 * Any architecture that cares to do any tuning to these values should do so
76 * by defining their own arch-specific initializer in include/asm/topology.h.
77 * A definition there will automagically override these default initializers
78 * and allow arch-specific performance tuning of sched_domains.
Mike Travis7c16ec52008-04-04 18:11:11 -070079 * (Only non-zero and non-null fields need be specified.)
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 */
Mike Travis7c16ec52008-04-04 18:11:11 -070081
Linus Torvalds1da177e2005-04-16 15:20:36 -070082#ifdef CONFIG_SCHED_SMT
83/* MCD - Do we really need this? It is always on if CONFIG_SCHED_SMT is,
84 * so can't we drop this in favor of CONFIG_SCHED_SMT?
85 */
86#define ARCH_HAS_SCHED_WAKE_IDLE
87/* Common values for SMT siblings */
88#ifndef SD_SIBLING_INIT
Ingo Molnar47734f82009-09-04 11:21:24 +020089#define SD_SIBLING_INIT (struct sched_domain) { \
90 .min_interval = 1, \
91 .max_interval = 2, \
92 .busy_factor = 64, \
93 .imbalance_pct = 110, \
94 \
95 .flags = 1*SD_LOAD_BALANCE \
96 | 1*SD_BALANCE_NEWIDLE \
97 | 1*SD_BALANCE_EXEC \
98 | 1*SD_BALANCE_FORK \
Peter Zijlstra182a85f2009-09-16 13:24:49 +020099 | 0*SD_BALANCE_WAKE \
Ingo Molnar47734f82009-09-04 11:21:24 +0200100 | 1*SD_WAKE_AFFINE \
Ingo Molnar47734f82009-09-04 11:21:24 +0200101 | 1*SD_SHARE_CPUPOWER \
102 | 0*SD_POWERSAVINGS_BALANCE \
Mike Galbraith50b926e2010-01-04 14:44:56 +0100103 | 1*SD_SHARE_PKG_RESOURCES \
Ingo Molnar47734f82009-09-04 11:21:24 +0200104 | 0*SD_SERIALIZE \
Ingo Molnar47734f82009-09-04 11:21:24 +0200105 | 0*SD_PREFER_SIBLING \
106 , \
107 .last_balance = jiffies, \
108 .balance_interval = 1, \
109 .smt_gain = 1178, /* 15% */ \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110}
111#endif
112#endif /* CONFIG_SCHED_SMT */
113
Siddha, Suresh B89c47102006-10-03 01:14:09 -0700114#ifdef CONFIG_SCHED_MC
115/* Common values for MC siblings. for now mostly derived from SD_CPU_INIT */
116#ifndef SD_MC_INIT
Ingo Molnar47734f82009-09-04 11:21:24 +0200117#define SD_MC_INIT (struct sched_domain) { \
118 .min_interval = 1, \
119 .max_interval = 4, \
120 .busy_factor = 64, \
121 .imbalance_pct = 125, \
122 .cache_nice_tries = 1, \
123 .busy_idx = 2, \
Peter Zijlstra78e7ed52009-09-03 13:16:51 +0200124 .wake_idx = 0, \
Peter Zijlstrab8a543e2009-09-15 15:22:03 +0200125 .forkexec_idx = 0, \
Ingo Molnar47734f82009-09-04 11:21:24 +0200126 \
127 .flags = 1*SD_LOAD_BALANCE \
Ingo Molnar840a0652009-09-04 11:32:54 +0200128 | 1*SD_BALANCE_NEWIDLE \
Ingo Molnar47734f82009-09-04 11:21:24 +0200129 | 1*SD_BALANCE_EXEC \
130 | 1*SD_BALANCE_FORK \
Peter Zijlstra182a85f2009-09-16 13:24:49 +0200131 | 0*SD_BALANCE_WAKE \
Ingo Molnar47734f82009-09-04 11:21:24 +0200132 | 1*SD_WAKE_AFFINE \
Peter Zijlstra799e2202009-10-09 12:16:40 +0200133 | 0*SD_PREFER_LOCAL \
Ingo Molnar47734f82009-09-04 11:21:24 +0200134 | 0*SD_SHARE_CPUPOWER \
135 | 1*SD_SHARE_PKG_RESOURCES \
136 | 0*SD_SERIALIZE \
Ingo Molnar47734f82009-09-04 11:21:24 +0200137 | sd_balance_for_mc_power() \
138 | sd_power_saving_flags() \
139 , \
140 .last_balance = jiffies, \
141 .balance_interval = 1, \
Siddha, Suresh B89c47102006-10-03 01:14:09 -0700142}
143#endif
144#endif /* CONFIG_SCHED_MC */
145
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146/* Common values for CPUs */
147#ifndef SD_CPU_INIT
Ingo Molnar47734f82009-09-04 11:21:24 +0200148#define SD_CPU_INIT (struct sched_domain) { \
149 .min_interval = 1, \
150 .max_interval = 4, \
151 .busy_factor = 64, \
152 .imbalance_pct = 125, \
153 .cache_nice_tries = 1, \
154 .busy_idx = 2, \
155 .idle_idx = 1, \
Mike Galbraith0ec9fab2009-09-15 15:07:03 +0200156 .newidle_idx = 0, \
Peter Zijlstra78e7ed52009-09-03 13:16:51 +0200157 .wake_idx = 0, \
Peter Zijlstrab8a543e2009-09-15 15:22:03 +0200158 .forkexec_idx = 0, \
Ingo Molnar47734f82009-09-04 11:21:24 +0200159 \
160 .flags = 1*SD_LOAD_BALANCE \
Ingo Molnar840a0652009-09-04 11:32:54 +0200161 | 1*SD_BALANCE_NEWIDLE \
Ingo Molnar47734f82009-09-04 11:21:24 +0200162 | 1*SD_BALANCE_EXEC \
163 | 1*SD_BALANCE_FORK \
Peter Zijlstra182a85f2009-09-16 13:24:49 +0200164 | 0*SD_BALANCE_WAKE \
Peter Zijlstra6bd78212009-09-11 18:42:15 +0200165 | 1*SD_WAKE_AFFINE \
Peter Zijlstra799e2202009-10-09 12:16:40 +0200166 | 0*SD_PREFER_LOCAL \
Ingo Molnar47734f82009-09-04 11:21:24 +0200167 | 0*SD_SHARE_CPUPOWER \
168 | 0*SD_SHARE_PKG_RESOURCES \
169 | 0*SD_SERIALIZE \
Ingo Molnar47734f82009-09-04 11:21:24 +0200170 | sd_balance_for_package_power() \
171 | sd_power_saving_flags() \
172 , \
173 .last_balance = jiffies, \
174 .balance_interval = 1, \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175}
176#endif
177
John Hawkes9c1cfda2005-09-06 15:18:14 -0700178/* sched_domains SD_ALLNODES_INIT for NUMA machines */
Ingo Molnar47734f82009-09-04 11:21:24 +0200179#define SD_ALLNODES_INIT (struct sched_domain) { \
180 .min_interval = 64, \
181 .max_interval = 64*num_online_cpus(), \
182 .busy_factor = 128, \
183 .imbalance_pct = 133, \
184 .cache_nice_tries = 1, \
185 .busy_idx = 3, \
186 .idle_idx = 3, \
187 .flags = 1*SD_LOAD_BALANCE \
188 | 1*SD_BALANCE_NEWIDLE \
189 | 0*SD_BALANCE_EXEC \
190 | 0*SD_BALANCE_FORK \
Peter Zijlstrac88d5912009-09-10 13:50:02 +0200191 | 0*SD_BALANCE_WAKE \
Peter Zijlstra6bd78212009-09-11 18:42:15 +0200192 | 0*SD_WAKE_AFFINE \
Ingo Molnar47734f82009-09-04 11:21:24 +0200193 | 0*SD_SHARE_CPUPOWER \
194 | 0*SD_POWERSAVINGS_BALANCE \
195 | 0*SD_SHARE_PKG_RESOURCES \
196 | 1*SD_SERIALIZE \
Ingo Molnar47734f82009-09-04 11:21:24 +0200197 | 0*SD_PREFER_SIBLING \
198 , \
199 .last_balance = jiffies, \
200 .balance_interval = 64, \
John Hawkes9c1cfda2005-09-06 15:18:14 -0700201}
202
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203#ifdef CONFIG_NUMA
204#ifndef SD_NODE_INIT
205#error Please define an appropriate SD_NODE_INIT in include/asm/topology.h!!!
206#endif
Lee Schermerhorn72812012010-05-26 14:44:56 -0700207
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208#endif /* CONFIG_NUMA */
209
Lee Schermerhorn72812012010-05-26 14:44:56 -0700210#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
211DECLARE_PER_CPU(int, numa_node);
212
213#ifndef numa_node_id
214/* Returns the number of the current Node. */
215static inline int numa_node_id(void)
216{
217 return __this_cpu_read(numa_node);
218}
219#endif
220
221#ifndef cpu_to_node
222static inline int cpu_to_node(int cpu)
223{
224 return per_cpu(numa_node, cpu);
225}
226#endif
227
228#ifndef set_numa_node
229static inline void set_numa_node(int node)
230{
231 percpu_write(numa_node, node);
232}
233#endif
234
235#ifndef set_cpu_numa_node
236static inline void set_cpu_numa_node(int cpu, int node)
237{
238 per_cpu(numa_node, cpu) = node;
239}
240#endif
241
242#else /* !CONFIG_USE_PERCPU_NUMA_NODE_ID */
243
244/* Returns the number of the current Node. */
245#ifndef numa_node_id
246static inline int numa_node_id(void)
247{
248 return cpu_to_node(raw_smp_processor_id());
249}
250#endif
251
252#endif /* [!]CONFIG_USE_PERCPU_NUMA_NODE_ID */
253
Lee Schermerhorn7aac7892010-05-26 14:45:00 -0700254#ifdef CONFIG_HAVE_MEMORYLESS_NODES
255
256/*
257 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
258 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
259 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem().
260 */
261DECLARE_PER_CPU(int, _numa_mem_);
262
263#ifndef set_numa_mem
264static inline void set_numa_mem(int node)
265{
266 percpu_write(_numa_mem_, node);
267}
268#endif
269
270#ifndef numa_mem_id
271/* Returns the number of the nearest Node with memory */
272static inline int numa_mem_id(void)
273{
274 return __this_cpu_read(_numa_mem_);
275}
276#endif
277
278#ifndef cpu_to_mem
279static inline int cpu_to_mem(int cpu)
280{
281 return per_cpu(_numa_mem_, cpu);
282}
283#endif
284
285#ifndef set_cpu_numa_mem
286static inline void set_cpu_numa_mem(int cpu, int node)
287{
288 per_cpu(_numa_mem_, cpu) = node;
289}
290#endif
291
292#else /* !CONFIG_HAVE_MEMORYLESS_NODES */
293
294static inline void set_numa_mem(int node) {}
295
296static inline void set_cpu_numa_mem(int cpu, int node) {}
297
298#ifndef numa_mem_id
299/* Returns the number of the nearest Node with memory */
300static inline int numa_mem_id(void)
301{
302 return numa_node_id();
303}
304#endif
305
306#ifndef cpu_to_mem
307static inline int cpu_to_mem(int cpu)
308{
309 return cpu_to_node(cpu);
310}
311#endif
312
313#endif /* [!]CONFIG_HAVE_MEMORYLESS_NODES */
314
Ben Hutchingsc50cbb02008-06-04 21:47:29 -0700315#ifndef topology_physical_package_id
316#define topology_physical_package_id(cpu) ((void)(cpu), -1)
317#endif
318#ifndef topology_core_id
319#define topology_core_id(cpu) ((void)(cpu), 0)
320#endif
Rusty Russellfbd59a82009-01-10 21:58:08 -0800321#ifndef topology_thread_cpumask
322#define topology_thread_cpumask(cpu) cpumask_of(cpu)
323#endif
324#ifndef topology_core_cpumask
325#define topology_core_cpumask(cpu) cpumask_of(cpu)
326#endif
Ben Hutchingsc50cbb02008-06-04 21:47:29 -0700327
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328#endif /* _LINUX_TOPOLOGY_H */