blob: 64e084ff5e5c9a6e68257068821b79110a760f02 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * include/linux/topology.h
3 *
4 * Written by: Matthew Dobson, IBM Corporation
5 *
6 * Copyright (C) 2002, IBM Corp.
7 *
Ingo Molnar32525d02008-01-25 21:08:20 +01008 * All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
18 * NON INFRINGEMENT. See the GNU General Public License for more
19 * details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 * Send feedback to <colpatch@us.ibm.com>
26 */
27#ifndef _LINUX_TOPOLOGY_H
28#define _LINUX_TOPOLOGY_H
29
30#include <linux/cpumask.h>
31#include <linux/bitops.h>
32#include <linux/mmzone.h>
33#include <linux/smp.h>
Lee Schermerhorn72812012010-05-26 14:44:56 -070034#include <linux/percpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/topology.h>
36
37#ifndef node_has_online_mem
38#define node_has_online_mem(nid) (1)
39#endif
40
41#ifndef nr_cpus_node
Rusty Russella70f7302009-03-13 14:49:46 +103042#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#endif
44
Mike Travis7c16ec52008-04-04 18:11:11 -070045#define for_each_node_with_cpus(node) \
46 for_each_online_node(node) \
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 if (nr_cpus_node(node))
48
Heiko Carstensee79d1b2008-12-09 18:49:50 +010049int arch_update_cpu_topology(void);
Heiko Carstens22e52b02008-03-12 18:31:59 +010050
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* Conform to ACPI 2.0 SLIT distance definitions */
52#define LOCAL_DISTANCE 10
53#define REMOTE_DISTANCE 20
Ingo Molnarf787a5032007-07-11 21:21:47 +020054#ifndef node_distance
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#define node_distance(from,to) ((from) == (to) ? LOCAL_DISTANCE : REMOTE_DISTANCE)
56#endif
Christoph Lameter9eeff232006-01-18 17:42:31 -080057#ifndef RECLAIM_DISTANCE
58/*
59 * If the distance between nodes in a system is larger than RECLAIM_DISTANCE
60 * (in whatever arch specific measurement units returned by node_distance())
61 * then switch on zone reclaim on boot.
62 */
63#define RECLAIM_DISTANCE 20
64#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070065#ifndef PENALTY_FOR_NODE_WITH_CPUS
66#define PENALTY_FOR_NODE_WITH_CPUS (1)
67#endif
68
69/*
70 * Below are the 3 major initializers used in building sched_domains:
71 * SD_SIBLING_INIT, for SMT domains
72 * SD_CPU_INIT, for SMP domains
73 * SD_NODE_INIT, for NUMA domains
74 *
75 * Any architecture that cares to do any tuning to these values should do so
76 * by defining their own arch-specific initializer in include/asm/topology.h.
77 * A definition there will automagically override these default initializers
78 * and allow arch-specific performance tuning of sched_domains.
Mike Travis7c16ec52008-04-04 18:11:11 -070079 * (Only non-zero and non-null fields need be specified.)
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 */
Mike Travis7c16ec52008-04-04 18:11:11 -070081
Linus Torvalds1da177e2005-04-16 15:20:36 -070082#ifdef CONFIG_SCHED_SMT
83/* MCD - Do we really need this? It is always on if CONFIG_SCHED_SMT is,
84 * so can't we drop this in favor of CONFIG_SCHED_SMT?
85 */
86#define ARCH_HAS_SCHED_WAKE_IDLE
87/* Common values for SMT siblings */
88#ifndef SD_SIBLING_INIT
Ingo Molnar47734f82009-09-04 11:21:24 +020089#define SD_SIBLING_INIT (struct sched_domain) { \
90 .min_interval = 1, \
91 .max_interval = 2, \
92 .busy_factor = 64, \
93 .imbalance_pct = 110, \
94 \
95 .flags = 1*SD_LOAD_BALANCE \
96 | 1*SD_BALANCE_NEWIDLE \
97 | 1*SD_BALANCE_EXEC \
98 | 1*SD_BALANCE_FORK \
Peter Zijlstra182a85f2009-09-16 13:24:49 +020099 | 0*SD_BALANCE_WAKE \
Ingo Molnar47734f82009-09-04 11:21:24 +0200100 | 1*SD_WAKE_AFFINE \
Ingo Molnar47734f82009-09-04 11:21:24 +0200101 | 1*SD_SHARE_CPUPOWER \
102 | 0*SD_POWERSAVINGS_BALANCE \
Mike Galbraith50b926e2010-01-04 14:44:56 +0100103 | 1*SD_SHARE_PKG_RESOURCES \
Ingo Molnar47734f82009-09-04 11:21:24 +0200104 | 0*SD_SERIALIZE \
Ingo Molnar47734f82009-09-04 11:21:24 +0200105 | 0*SD_PREFER_SIBLING \
Michael Neuling2ec57d42010-06-29 12:02:01 +1000106 | arch_sd_sibling_asym_packing() \
Ingo Molnar47734f82009-09-04 11:21:24 +0200107 , \
108 .last_balance = jiffies, \
109 .balance_interval = 1, \
110 .smt_gain = 1178, /* 15% */ \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111}
112#endif
113#endif /* CONFIG_SCHED_SMT */
114
Siddha, Suresh B89c47102006-10-03 01:14:09 -0700115#ifdef CONFIG_SCHED_MC
116/* Common values for MC siblings. for now mostly derived from SD_CPU_INIT */
117#ifndef SD_MC_INIT
Ingo Molnar47734f82009-09-04 11:21:24 +0200118#define SD_MC_INIT (struct sched_domain) { \
119 .min_interval = 1, \
120 .max_interval = 4, \
121 .busy_factor = 64, \
122 .imbalance_pct = 125, \
123 .cache_nice_tries = 1, \
124 .busy_idx = 2, \
Peter Zijlstra78e7ed52009-09-03 13:16:51 +0200125 .wake_idx = 0, \
Peter Zijlstrab8a543e2009-09-15 15:22:03 +0200126 .forkexec_idx = 0, \
Ingo Molnar47734f82009-09-04 11:21:24 +0200127 \
128 .flags = 1*SD_LOAD_BALANCE \
Ingo Molnar840a0652009-09-04 11:32:54 +0200129 | 1*SD_BALANCE_NEWIDLE \
Ingo Molnar47734f82009-09-04 11:21:24 +0200130 | 1*SD_BALANCE_EXEC \
131 | 1*SD_BALANCE_FORK \
Peter Zijlstra182a85f2009-09-16 13:24:49 +0200132 | 0*SD_BALANCE_WAKE \
Ingo Molnar47734f82009-09-04 11:21:24 +0200133 | 1*SD_WAKE_AFFINE \
Peter Zijlstra799e2202009-10-09 12:16:40 +0200134 | 0*SD_PREFER_LOCAL \
Ingo Molnar47734f82009-09-04 11:21:24 +0200135 | 0*SD_SHARE_CPUPOWER \
136 | 1*SD_SHARE_PKG_RESOURCES \
137 | 0*SD_SERIALIZE \
Ingo Molnar47734f82009-09-04 11:21:24 +0200138 | sd_balance_for_mc_power() \
139 | sd_power_saving_flags() \
140 , \
141 .last_balance = jiffies, \
142 .balance_interval = 1, \
Siddha, Suresh B89c47102006-10-03 01:14:09 -0700143}
144#endif
145#endif /* CONFIG_SCHED_MC */
146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147/* Common values for CPUs */
148#ifndef SD_CPU_INIT
Ingo Molnar47734f82009-09-04 11:21:24 +0200149#define SD_CPU_INIT (struct sched_domain) { \
150 .min_interval = 1, \
151 .max_interval = 4, \
152 .busy_factor = 64, \
153 .imbalance_pct = 125, \
154 .cache_nice_tries = 1, \
155 .busy_idx = 2, \
156 .idle_idx = 1, \
Mike Galbraith0ec9fab2009-09-15 15:07:03 +0200157 .newidle_idx = 0, \
Peter Zijlstra78e7ed52009-09-03 13:16:51 +0200158 .wake_idx = 0, \
Peter Zijlstrab8a543e2009-09-15 15:22:03 +0200159 .forkexec_idx = 0, \
Ingo Molnar47734f82009-09-04 11:21:24 +0200160 \
161 .flags = 1*SD_LOAD_BALANCE \
Ingo Molnar840a0652009-09-04 11:32:54 +0200162 | 1*SD_BALANCE_NEWIDLE \
Ingo Molnar47734f82009-09-04 11:21:24 +0200163 | 1*SD_BALANCE_EXEC \
164 | 1*SD_BALANCE_FORK \
Peter Zijlstra182a85f2009-09-16 13:24:49 +0200165 | 0*SD_BALANCE_WAKE \
Peter Zijlstra6bd78212009-09-11 18:42:15 +0200166 | 1*SD_WAKE_AFFINE \
Peter Zijlstra799e2202009-10-09 12:16:40 +0200167 | 0*SD_PREFER_LOCAL \
Ingo Molnar47734f82009-09-04 11:21:24 +0200168 | 0*SD_SHARE_CPUPOWER \
169 | 0*SD_SHARE_PKG_RESOURCES \
170 | 0*SD_SERIALIZE \
Ingo Molnar47734f82009-09-04 11:21:24 +0200171 | sd_balance_for_package_power() \
172 | sd_power_saving_flags() \
173 , \
174 .last_balance = jiffies, \
175 .balance_interval = 1, \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176}
177#endif
178
John Hawkes9c1cfda2005-09-06 15:18:14 -0700179/* sched_domains SD_ALLNODES_INIT for NUMA machines */
Ingo Molnar47734f82009-09-04 11:21:24 +0200180#define SD_ALLNODES_INIT (struct sched_domain) { \
181 .min_interval = 64, \
182 .max_interval = 64*num_online_cpus(), \
183 .busy_factor = 128, \
184 .imbalance_pct = 133, \
185 .cache_nice_tries = 1, \
186 .busy_idx = 3, \
187 .idle_idx = 3, \
188 .flags = 1*SD_LOAD_BALANCE \
189 | 1*SD_BALANCE_NEWIDLE \
190 | 0*SD_BALANCE_EXEC \
191 | 0*SD_BALANCE_FORK \
Peter Zijlstrac88d5912009-09-10 13:50:02 +0200192 | 0*SD_BALANCE_WAKE \
Peter Zijlstra6bd78212009-09-11 18:42:15 +0200193 | 0*SD_WAKE_AFFINE \
Ingo Molnar47734f82009-09-04 11:21:24 +0200194 | 0*SD_SHARE_CPUPOWER \
195 | 0*SD_POWERSAVINGS_BALANCE \
196 | 0*SD_SHARE_PKG_RESOURCES \
197 | 1*SD_SERIALIZE \
Ingo Molnar47734f82009-09-04 11:21:24 +0200198 | 0*SD_PREFER_SIBLING \
199 , \
200 .last_balance = jiffies, \
201 .balance_interval = 64, \
John Hawkes9c1cfda2005-09-06 15:18:14 -0700202}
203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204#ifdef CONFIG_NUMA
205#ifndef SD_NODE_INIT
206#error Please define an appropriate SD_NODE_INIT in include/asm/topology.h!!!
207#endif
Lee Schermerhorn72812012010-05-26 14:44:56 -0700208
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209#endif /* CONFIG_NUMA */
210
Lee Schermerhorn72812012010-05-26 14:44:56 -0700211#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
212DECLARE_PER_CPU(int, numa_node);
213
214#ifndef numa_node_id
215/* Returns the number of the current Node. */
216static inline int numa_node_id(void)
217{
218 return __this_cpu_read(numa_node);
219}
220#endif
221
222#ifndef cpu_to_node
223static inline int cpu_to_node(int cpu)
224{
225 return per_cpu(numa_node, cpu);
226}
227#endif
228
229#ifndef set_numa_node
230static inline void set_numa_node(int node)
231{
232 percpu_write(numa_node, node);
233}
234#endif
235
236#ifndef set_cpu_numa_node
237static inline void set_cpu_numa_node(int cpu, int node)
238{
239 per_cpu(numa_node, cpu) = node;
240}
241#endif
242
243#else /* !CONFIG_USE_PERCPU_NUMA_NODE_ID */
244
245/* Returns the number of the current Node. */
246#ifndef numa_node_id
247static inline int numa_node_id(void)
248{
249 return cpu_to_node(raw_smp_processor_id());
250}
251#endif
252
253#endif /* [!]CONFIG_USE_PERCPU_NUMA_NODE_ID */
254
Lee Schermerhorn7aac7892010-05-26 14:45:00 -0700255#ifdef CONFIG_HAVE_MEMORYLESS_NODES
256
257/*
258 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
259 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
260 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem().
261 */
262DECLARE_PER_CPU(int, _numa_mem_);
263
264#ifndef set_numa_mem
265static inline void set_numa_mem(int node)
266{
267 percpu_write(_numa_mem_, node);
268}
269#endif
270
271#ifndef numa_mem_id
272/* Returns the number of the nearest Node with memory */
273static inline int numa_mem_id(void)
274{
275 return __this_cpu_read(_numa_mem_);
276}
277#endif
278
279#ifndef cpu_to_mem
280static inline int cpu_to_mem(int cpu)
281{
282 return per_cpu(_numa_mem_, cpu);
283}
284#endif
285
286#ifndef set_cpu_numa_mem
287static inline void set_cpu_numa_mem(int cpu, int node)
288{
289 per_cpu(_numa_mem_, cpu) = node;
290}
291#endif
292
293#else /* !CONFIG_HAVE_MEMORYLESS_NODES */
294
Lee Schermerhorn7aac7892010-05-26 14:45:00 -0700295#ifndef numa_mem_id
296/* Returns the number of the nearest Node with memory */
297static inline int numa_mem_id(void)
298{
299 return numa_node_id();
300}
301#endif
302
303#ifndef cpu_to_mem
304static inline int cpu_to_mem(int cpu)
305{
306 return cpu_to_node(cpu);
307}
308#endif
309
310#endif /* [!]CONFIG_HAVE_MEMORYLESS_NODES */
311
Ben Hutchingsc50cbb02008-06-04 21:47:29 -0700312#ifndef topology_physical_package_id
313#define topology_physical_package_id(cpu) ((void)(cpu), -1)
314#endif
315#ifndef topology_core_id
316#define topology_core_id(cpu) ((void)(cpu), 0)
317#endif
Rusty Russellfbd59a82009-01-10 21:58:08 -0800318#ifndef topology_thread_cpumask
319#define topology_thread_cpumask(cpu) cpumask_of(cpu)
320#endif
321#ifndef topology_core_cpumask
322#define topology_core_cpumask(cpu) cpumask_of(cpu)
323#endif
Ben Hutchingsc50cbb02008-06-04 21:47:29 -0700324
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325#endif /* _LINUX_TOPOLOGY_H */