Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ASM_S390_TOPOLOGY_H |
| 2 | #define _ASM_S390_TOPOLOGY_H |
| 3 | |
Heiko Carstens | dbd70fb | 2008-04-17 07:46:12 +0200 | [diff] [blame] | 4 | #include <linux/cpumask.h> |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 5 | #include <asm/numa.h> |
Heiko Carstens | dbd70fb | 2008-04-17 07:46:12 +0200 | [diff] [blame] | 6 | |
Heiko Carstens | 7860913 | 2012-09-03 14:11:32 +0200 | [diff] [blame] | 7 | struct sysinfo_15_1_x; |
Heiko Carstens | 83a24e3 | 2011-12-27 11:27:09 +0100 | [diff] [blame] | 8 | struct cpu; |
| 9 | |
Heiko Carstens | 9236b4d | 2015-12-28 13:20:43 +0100 | [diff] [blame] | 10 | #ifdef CONFIG_SCHED_TOPOLOGY |
Heiko Carstens | 83a24e3 | 2011-12-27 11:27:09 +0100 | [diff] [blame] | 11 | |
Heiko Carstens | d1e57508 | 2012-11-12 10:03:25 +0100 | [diff] [blame] | 12 | struct cpu_topology_s390 { |
Martin Schwidefsky | 10ad34b | 2015-01-14 17:52:10 +0100 | [diff] [blame] | 13 | unsigned short thread_id; |
Heiko Carstens | d1e57508 | 2012-11-12 10:03:25 +0100 | [diff] [blame] | 14 | unsigned short core_id; |
| 15 | unsigned short socket_id; |
| 16 | unsigned short book_id; |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 17 | unsigned short node_id; |
Martin Schwidefsky | 10ad34b | 2015-01-14 17:52:10 +0100 | [diff] [blame] | 18 | cpumask_t thread_mask; |
Heiko Carstens | d1e57508 | 2012-11-12 10:03:25 +0100 | [diff] [blame] | 19 | cpumask_t core_mask; |
| 20 | cpumask_t book_mask; |
| 21 | }; |
Heiko Carstens | 658e5ce | 2012-11-10 11:04:27 +0100 | [diff] [blame] | 22 | |
Heiko Carstens | da0c636 | 2015-02-04 14:48:25 +0100 | [diff] [blame] | 23 | DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology); |
Heiko Carstens | d1e57508 | 2012-11-12 10:03:25 +0100 | [diff] [blame] | 24 | |
Heiko Carstens | da0c636 | 2015-02-04 14:48:25 +0100 | [diff] [blame] | 25 | #define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id) |
| 26 | #define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id) |
Bartosz Golaszewski | 06931e6 | 2015-05-26 15:11:28 +0200 | [diff] [blame] | 27 | #define topology_sibling_cpumask(cpu) \ |
| 28 | (&per_cpu(cpu_topology, cpu).thread_mask) |
Heiko Carstens | da0c636 | 2015-02-04 14:48:25 +0100 | [diff] [blame] | 29 | #define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id) |
| 30 | #define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask) |
| 31 | #define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id) |
| 32 | #define topology_book_cpumask(cpu) (&per_cpu(cpu_topology, cpu).book_mask) |
Heiko Carstens | d1e57508 | 2012-11-12 10:03:25 +0100 | [diff] [blame] | 33 | |
| 34 | #define mc_capable() 1 |
Heiko Carstens | d00aa4e | 2008-04-30 13:38:40 +0200 | [diff] [blame] | 35 | |
Heiko Carstens | 83a24e3 | 2011-12-27 11:27:09 +0100 | [diff] [blame] | 36 | int topology_cpu_init(struct cpu *); |
Heiko Carstens | c10fde0 | 2008-04-17 07:46:13 +0200 | [diff] [blame] | 37 | int topology_set_cpu_management(int fc); |
| 38 | void topology_schedule_update(void); |
Heiko Carstens | 96f4a70 | 2010-10-25 16:10:54 +0200 | [diff] [blame] | 39 | void store_topology(struct sysinfo_15_1_x *info); |
Heiko Carstens | d68bddb | 2011-12-27 11:27:16 +0100 | [diff] [blame] | 40 | void topology_expect_change(void); |
Vincent Guittot | 2dfd747 | 2014-04-11 11:44:38 +0200 | [diff] [blame] | 41 | const struct cpumask *cpu_coregroup_mask(int cpu); |
Heiko Carstens | c10fde0 | 2008-04-17 07:46:13 +0200 | [diff] [blame] | 42 | |
Heiko Carstens | 9236b4d | 2015-12-28 13:20:43 +0100 | [diff] [blame] | 43 | #else /* CONFIG_SCHED_TOPOLOGY */ |
Heiko Carstens | 83a24e3 | 2011-12-27 11:27:09 +0100 | [diff] [blame] | 44 | |
| 45 | static inline void topology_schedule_update(void) { } |
| 46 | static inline int topology_cpu_init(struct cpu *cpu) { return 0; } |
Heiko Carstens | d68bddb | 2011-12-27 11:27:16 +0100 | [diff] [blame] | 47 | static inline void topology_expect_change(void) { } |
Heiko Carstens | 83a24e3 | 2011-12-27 11:27:09 +0100 | [diff] [blame] | 48 | |
Heiko Carstens | 9236b4d | 2015-12-28 13:20:43 +0100 | [diff] [blame] | 49 | #endif /* CONFIG_SCHED_TOPOLOGY */ |
Heiko Carstens | 83a24e3 | 2011-12-27 11:27:09 +0100 | [diff] [blame] | 50 | |
| 51 | #define POLARIZATION_UNKNOWN (-1) |
Heiko Carstens | c10fde0 | 2008-04-17 07:46:13 +0200 | [diff] [blame] | 52 | #define POLARIZATION_HRZ (0) |
| 53 | #define POLARIZATION_VL (1) |
| 54 | #define POLARIZATION_VM (2) |
| 55 | #define POLARIZATION_VH (3) |
| 56 | |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 57 | #define SD_BOOK_INIT SD_CPU_INIT |
| 58 | |
| 59 | #ifdef CONFIG_NUMA |
| 60 | |
| 61 | #define cpu_to_node cpu_to_node |
| 62 | static inline int cpu_to_node(int cpu) |
| 63 | { |
| 64 | return per_cpu(cpu_topology, cpu).node_id; |
| 65 | } |
| 66 | |
| 67 | /* Returns a pointer to the cpumask of CPUs on node 'node'. */ |
| 68 | #define cpumask_of_node cpumask_of_node |
| 69 | static inline const struct cpumask *cpumask_of_node(int node) |
| 70 | { |
Martin Schwidefsky | 22be9cd | 2015-09-22 14:21:16 +0200 | [diff] [blame] | 71 | return &node_to_cpumask_map[node]; |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 72 | } |
| 73 | |
| 74 | /* |
| 75 | * Returns the number of the node containing node 'node'. This |
| 76 | * architecture is flat, so it is a pretty simple function! |
| 77 | */ |
| 78 | #define parent_node(node) (node) |
| 79 | |
| 80 | #define pcibus_to_node(bus) __pcibus_to_node(bus) |
| 81 | |
| 82 | #define node_distance(a, b) __node_distance(a, b) |
| 83 | |
| 84 | #else /* !CONFIG_NUMA */ |
| 85 | |
| 86 | #define numa_node_id numa_node_id |
| 87 | static inline int numa_node_id(void) |
| 88 | { |
| 89 | return 0; |
| 90 | } |
| 91 | |
| 92 | #endif /* CONFIG_NUMA */ |
| 93 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | #include <asm-generic/topology.h> |
| 95 | |
| 96 | #endif /* _ASM_S390_TOPOLOGY_H */ |