blob: 7062330a13296188ae96448f4abec8a28617fe1d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * include/linux/topology.h
3 *
4 * Written by: Matthew Dobson, IBM Corporation
5 *
6 * Copyright (C) 2002, IBM Corp.
7 *
Ingo Molnar32525d02008-01-25 21:08:20 +01008 * All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
18 * NON INFRINGEMENT. See the GNU General Public License for more
19 * details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 * Send feedback to <colpatch@us.ibm.com>
26 */
27#ifndef _LINUX_TOPOLOGY_H
28#define _LINUX_TOPOLOGY_H
29
30#include <linux/cpumask.h>
31#include <linux/bitops.h>
32#include <linux/mmzone.h>
33#include <linux/smp.h>
Lee Schermerhorn72812012010-05-26 14:44:56 -070034#include <linux/percpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/topology.h>
36
37#ifndef node_has_online_mem
38#define node_has_online_mem(nid) (1)
39#endif
40
41#ifndef nr_cpus_node
Rusty Russella70f7302009-03-13 14:49:46 +103042#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#endif
44
Mike Travis7c16ec52008-04-04 18:11:11 -070045#define for_each_node_with_cpus(node) \
46 for_each_online_node(node) \
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 if (nr_cpus_node(node))
48
Heiko Carstensee79d1b2008-12-09 18:49:50 +010049int arch_update_cpu_topology(void);
Heiko Carstens22e52b02008-03-12 18:31:59 +010050
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* Conform to ACPI 2.0 SLIT distance definitions */
52#define LOCAL_DISTANCE 10
53#define REMOTE_DISTANCE 20
Ingo Molnarf787a5032007-07-11 21:21:47 +020054#ifndef node_distance
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#define node_distance(from,to) ((from) == (to) ? LOCAL_DISTANCE : REMOTE_DISTANCE)
56#endif
Christoph Lameter9eeff232006-01-18 17:42:31 -080057#ifndef RECLAIM_DISTANCE
58/*
59 * If the distance between nodes in a system is larger than RECLAIM_DISTANCE
60 * (in whatever arch specific measurement units returned by node_distance())
61 * then switch on zone reclaim on boot.
62 */
KOSAKI Motohiro32e45ff2011-06-15 15:08:20 -070063#define RECLAIM_DISTANCE 30
Christoph Lameter9eeff232006-01-18 17:42:31 -080064#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070065#ifndef PENALTY_FOR_NODE_WITH_CPUS
66#define PENALTY_FOR_NODE_WITH_CPUS (1)
67#endif
68
69/*
70 * Below are the 3 major initializers used in building sched_domains:
71 * SD_SIBLING_INIT, for SMT domains
72 * SD_CPU_INIT, for SMP domains
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 *
74 * Any architecture that cares to do any tuning to these values should do so
75 * by defining their own arch-specific initializer in include/asm/topology.h.
76 * A definition there will automagically override these default initializers
77 * and allow arch-specific performance tuning of sched_domains.
Mike Travis7c16ec52008-04-04 18:11:11 -070078 * (Only non-zero and non-null fields need be specified.)
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 */
Mike Travis7c16ec52008-04-04 18:11:11 -070080
Linus Torvalds1da177e2005-04-16 15:20:36 -070081#ifdef CONFIG_SCHED_SMT
82/* MCD - Do we really need this? It is always on if CONFIG_SCHED_SMT is,
83 * so can't we drop this in favor of CONFIG_SCHED_SMT?
84 */
85#define ARCH_HAS_SCHED_WAKE_IDLE
86/* Common values for SMT siblings */
87#ifndef SD_SIBLING_INIT
Ingo Molnar47734f82009-09-04 11:21:24 +020088#define SD_SIBLING_INIT (struct sched_domain) { \
89 .min_interval = 1, \
90 .max_interval = 2, \
91 .busy_factor = 64, \
92 .imbalance_pct = 110, \
93 \
94 .flags = 1*SD_LOAD_BALANCE \
95 | 1*SD_BALANCE_NEWIDLE \
96 | 1*SD_BALANCE_EXEC \
97 | 1*SD_BALANCE_FORK \
Peter Zijlstra182a85f2009-09-16 13:24:49 +020098 | 0*SD_BALANCE_WAKE \
Ingo Molnar47734f82009-09-04 11:21:24 +020099 | 1*SD_WAKE_AFFINE \
Ingo Molnar47734f82009-09-04 11:21:24 +0200100 | 1*SD_SHARE_CPUPOWER \
Mike Galbraith50b926e2010-01-04 14:44:56 +0100101 | 1*SD_SHARE_PKG_RESOURCES \
Ingo Molnar47734f82009-09-04 11:21:24 +0200102 | 0*SD_SERIALIZE \
Ingo Molnar47734f82009-09-04 11:21:24 +0200103 | 0*SD_PREFER_SIBLING \
Michael Neuling2ec57d42010-06-29 12:02:01 +1000104 | arch_sd_sibling_asym_packing() \
Ingo Molnar47734f82009-09-04 11:21:24 +0200105 , \
106 .last_balance = jiffies, \
107 .balance_interval = 1, \
108 .smt_gain = 1178, /* 15% */ \
Jason Low9bd721c2013-09-13 11:26:52 -0700109 .max_newidle_lb_cost = 0, \
Jason Lowf48627e2013-09-13 11:26:53 -0700110 .next_decay_max_lb_cost = jiffies, \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111}
112#endif
113#endif /* CONFIG_SCHED_SMT */
114
Siddha, Suresh B89c47102006-10-03 01:14:09 -0700115#ifdef CONFIG_SCHED_MC
116/* Common values for MC siblings. for now mostly derived from SD_CPU_INIT */
117#ifndef SD_MC_INIT
Ingo Molnar47734f82009-09-04 11:21:24 +0200118#define SD_MC_INIT (struct sched_domain) { \
119 .min_interval = 1, \
120 .max_interval = 4, \
121 .busy_factor = 64, \
122 .imbalance_pct = 125, \
123 .cache_nice_tries = 1, \
124 .busy_idx = 2, \
Peter Zijlstra78e7ed52009-09-03 13:16:51 +0200125 .wake_idx = 0, \
Peter Zijlstrab8a543e2009-09-15 15:22:03 +0200126 .forkexec_idx = 0, \
Ingo Molnar47734f82009-09-04 11:21:24 +0200127 \
128 .flags = 1*SD_LOAD_BALANCE \
Ingo Molnar840a0652009-09-04 11:32:54 +0200129 | 1*SD_BALANCE_NEWIDLE \
Ingo Molnar47734f82009-09-04 11:21:24 +0200130 | 1*SD_BALANCE_EXEC \
131 | 1*SD_BALANCE_FORK \
Peter Zijlstra182a85f2009-09-16 13:24:49 +0200132 | 0*SD_BALANCE_WAKE \
Ingo Molnar47734f82009-09-04 11:21:24 +0200133 | 1*SD_WAKE_AFFINE \
Ingo Molnar47734f82009-09-04 11:21:24 +0200134 | 0*SD_SHARE_CPUPOWER \
135 | 1*SD_SHARE_PKG_RESOURCES \
136 | 0*SD_SERIALIZE \
Ingo Molnar47734f82009-09-04 11:21:24 +0200137 , \
138 .last_balance = jiffies, \
139 .balance_interval = 1, \
Jason Low9bd721c2013-09-13 11:26:52 -0700140 .max_newidle_lb_cost = 0, \
Jason Lowf48627e2013-09-13 11:26:53 -0700141 .next_decay_max_lb_cost = jiffies, \
Siddha, Suresh B89c47102006-10-03 01:14:09 -0700142}
143#endif
144#endif /* CONFIG_SCHED_MC */
145
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146/* Common values for CPUs */
147#ifndef SD_CPU_INIT
Ingo Molnar47734f82009-09-04 11:21:24 +0200148#define SD_CPU_INIT (struct sched_domain) { \
149 .min_interval = 1, \
150 .max_interval = 4, \
151 .busy_factor = 64, \
152 .imbalance_pct = 125, \
153 .cache_nice_tries = 1, \
154 .busy_idx = 2, \
155 .idle_idx = 1, \
Mike Galbraith0ec9fab2009-09-15 15:07:03 +0200156 .newidle_idx = 0, \
Peter Zijlstra78e7ed52009-09-03 13:16:51 +0200157 .wake_idx = 0, \
Peter Zijlstrab8a543e2009-09-15 15:22:03 +0200158 .forkexec_idx = 0, \
Ingo Molnar47734f82009-09-04 11:21:24 +0200159 \
160 .flags = 1*SD_LOAD_BALANCE \
Ingo Molnar840a0652009-09-04 11:32:54 +0200161 | 1*SD_BALANCE_NEWIDLE \
Ingo Molnar47734f82009-09-04 11:21:24 +0200162 | 1*SD_BALANCE_EXEC \
163 | 1*SD_BALANCE_FORK \
Peter Zijlstra182a85f2009-09-16 13:24:49 +0200164 | 0*SD_BALANCE_WAKE \
Peter Zijlstra6bd78212009-09-11 18:42:15 +0200165 | 1*SD_WAKE_AFFINE \
Ingo Molnar47734f82009-09-04 11:21:24 +0200166 | 0*SD_SHARE_CPUPOWER \
167 | 0*SD_SHARE_PKG_RESOURCES \
168 | 0*SD_SERIALIZE \
Alex Shi6956dc52012-07-20 14:19:50 +0800169 | 1*SD_PREFER_SIBLING \
Ingo Molnar47734f82009-09-04 11:21:24 +0200170 , \
171 .last_balance = jiffies, \
172 .balance_interval = 1, \
Jason Low9bd721c2013-09-13 11:26:52 -0700173 .max_newidle_lb_cost = 0, \
Jason Lowf48627e2013-09-13 11:26:53 -0700174 .next_decay_max_lb_cost = jiffies, \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175}
176#endif
177
Heiko Carstens01a08542010-08-31 10:28:16 +0200178#ifdef CONFIG_SCHED_BOOK
179#ifndef SD_BOOK_INIT
180#error Please define an appropriate SD_BOOK_INIT in include/asm/topology.h!!!
181#endif
182#endif /* CONFIG_SCHED_BOOK */
183
Lee Schermerhorn72812012010-05-26 14:44:56 -0700184#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
185DECLARE_PER_CPU(int, numa_node);
186
187#ifndef numa_node_id
188/* Returns the number of the current Node. */
189static inline int numa_node_id(void)
190{
Christoph Lameterdc322a92014-04-07 15:39:38 -0700191 return raw_cpu_read(numa_node);
Lee Schermerhorn72812012010-05-26 14:44:56 -0700192}
193#endif
194
195#ifndef cpu_to_node
196static inline int cpu_to_node(int cpu)
197{
198 return per_cpu(numa_node, cpu);
199}
200#endif
201
202#ifndef set_numa_node
203static inline void set_numa_node(int node)
204{
Alex Shic6ae41e2012-05-11 15:35:27 +0800205 this_cpu_write(numa_node, node);
Lee Schermerhorn72812012010-05-26 14:44:56 -0700206}
207#endif
208
209#ifndef set_cpu_numa_node
210static inline void set_cpu_numa_node(int cpu, int node)
211{
212 per_cpu(numa_node, cpu) = node;
213}
214#endif
215
216#else /* !CONFIG_USE_PERCPU_NUMA_NODE_ID */
217
218/* Returns the number of the current Node. */
219#ifndef numa_node_id
220static inline int numa_node_id(void)
221{
222 return cpu_to_node(raw_smp_processor_id());
223}
224#endif
225
226#endif /* [!]CONFIG_USE_PERCPU_NUMA_NODE_ID */
227
Lee Schermerhorn7aac7892010-05-26 14:45:00 -0700228#ifdef CONFIG_HAVE_MEMORYLESS_NODES
229
230/*
231 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
232 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
233 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem().
234 */
235DECLARE_PER_CPU(int, _numa_mem_);
236
237#ifndef set_numa_mem
238static inline void set_numa_mem(int node)
239{
Alex Shic6ae41e2012-05-11 15:35:27 +0800240 this_cpu_write(_numa_mem_, node);
Lee Schermerhorn7aac7892010-05-26 14:45:00 -0700241}
242#endif
243
244#ifndef numa_mem_id
245/* Returns the number of the nearest Node with memory */
246static inline int numa_mem_id(void)
247{
Christoph Lameterdc322a92014-04-07 15:39:38 -0700248 return raw_cpu_read(_numa_mem_);
Lee Schermerhorn7aac7892010-05-26 14:45:00 -0700249}
250#endif
251
252#ifndef cpu_to_mem
253static inline int cpu_to_mem(int cpu)
254{
255 return per_cpu(_numa_mem_, cpu);
256}
257#endif
258
259#ifndef set_cpu_numa_mem
260static inline void set_cpu_numa_mem(int cpu, int node)
261{
262 per_cpu(_numa_mem_, cpu) = node;
263}
264#endif
265
266#else /* !CONFIG_HAVE_MEMORYLESS_NODES */
267
Lee Schermerhorn7aac7892010-05-26 14:45:00 -0700268#ifndef numa_mem_id
269/* Returns the number of the nearest Node with memory */
270static inline int numa_mem_id(void)
271{
272 return numa_node_id();
273}
274#endif
275
276#ifndef cpu_to_mem
277static inline int cpu_to_mem(int cpu)
278{
279 return cpu_to_node(cpu);
280}
281#endif
282
283#endif /* [!]CONFIG_HAVE_MEMORYLESS_NODES */
284
Ben Hutchingsc50cbb02008-06-04 21:47:29 -0700285#ifndef topology_physical_package_id
286#define topology_physical_package_id(cpu) ((void)(cpu), -1)
287#endif
288#ifndef topology_core_id
289#define topology_core_id(cpu) ((void)(cpu), 0)
290#endif
Rusty Russellfbd59a82009-01-10 21:58:08 -0800291#ifndef topology_thread_cpumask
292#define topology_thread_cpumask(cpu) cpumask_of(cpu)
293#endif
294#ifndef topology_core_cpumask
295#define topology_core_cpumask(cpu) cpumask_of(cpu)
296#endif
Ben Hutchingsc50cbb02008-06-04 21:47:29 -0700297
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298#endif /* _LINUX_TOPOLOGY_H */