jdl@freescale.com | dd56fdf | 2005-09-07 15:59:48 -0500 | [diff] [blame] | 1 | #ifndef _ASM_POWERPC_TOPOLOGY_H |
| 2 | #define _ASM_POWERPC_TOPOLOGY_H |
Arnd Bergmann | 88ced03 | 2005-12-16 22:43:46 +0100 | [diff] [blame] | 3 | #ifdef __KERNEL__ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | |
Jeremy Kerr | 953039c | 2006-05-01 12:16:12 -0700 | [diff] [blame] | 6 | struct sys_device; |
| 7 | struct device_node; |
| 8 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #ifdef CONFIG_NUMA |
| 10 | |
Anton Blanchard | 27f1090 | 2010-02-18 12:29:23 +0000 | [diff] [blame] | 11 | /* |
| 12 | * Before going off node we want the VM to try and reclaim from the local |
| 13 | * node. It does this if the remote distance is larger than RECLAIM_DISTANCE. |
| 14 | * With the default REMOTE_DISTANCE of 20 and the default RECLAIM_DISTANCE of |
| 15 | * 20, we never reclaim and go off node straight away. |
| 16 | * |
| 17 | * To fix this we choose a smaller value of RECLAIM_DISTANCE. |
| 18 | */ |
| 19 | #define RECLAIM_DISTANCE 10 |
| 20 | |
Anton Blanchard | 5660820 | 2010-05-16 20:19:56 +0000 | [diff] [blame] | 21 | /* |
| 22 | * Before going off node we want the VM to try and reclaim from the local |
| 23 | * node. It does this if the remote distance is larger than RECLAIM_DISTANCE. |
| 24 | * With the default REMOTE_DISTANCE of 20 and the default RECLAIM_DISTANCE of |
| 25 | * 20, we never reclaim and go off node straight away. |
| 26 | * |
| 27 | * To fix this we choose a smaller value of RECLAIM_DISTANCE. |
| 28 | */ |
| 29 | #define RECLAIM_DISTANCE 10 |
| 30 | |
jdl@freescale.com | dd56fdf | 2005-09-07 15:59:48 -0500 | [diff] [blame] | 31 | #include <asm/mmzone.h> |
| 32 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | static inline int cpu_to_node(int cpu) |
| 34 | { |
Anton Blanchard | 45fb6ce | 2005-11-11 14:22:35 +1100 | [diff] [blame] | 35 | return numa_cpu_lookup_table[cpu]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | } |
| 37 | |
| 38 | #define parent_node(node) (node) |
| 39 | |
Anton Blanchard | c81b812 | 2010-01-05 18:55:10 +0000 | [diff] [blame] | 40 | #define cpumask_of_node(node) ((node) == -1 ? \ |
| 41 | cpu_all_mask : \ |
Anton Blanchard | 25863de | 2010-04-26 15:32:43 +0000 | [diff] [blame] | 42 | node_to_cpumask_map[node]) |
Rusty Russell | 86c6f27 | 2008-12-26 22:23:39 +1030 | [diff] [blame] | 43 | |
Anton Blanchard | 357518f | 2006-06-10 20:53:06 +1000 | [diff] [blame] | 44 | struct pci_bus; |
Arnd Bergmann | 11faa65 | 2006-11-27 19:19:00 +0100 | [diff] [blame] | 45 | #ifdef CONFIG_PCI |
Anton Blanchard | 357518f | 2006-06-10 20:53:06 +1000 | [diff] [blame] | 46 | extern int pcibus_to_node(struct pci_bus *bus); |
Arnd Bergmann | 11faa65 | 2006-11-27 19:19:00 +0100 | [diff] [blame] | 47 | #else |
| 48 | static inline int pcibus_to_node(struct pci_bus *bus) |
| 49 | { |
| 50 | return -1; |
| 51 | } |
| 52 | #endif |
Anton Blanchard | 357518f | 2006-06-10 20:53:06 +1000 | [diff] [blame] | 53 | |
Rusty Russell | 86c6f27 | 2008-12-26 22:23:39 +1030 | [diff] [blame] | 54 | #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ |
| 55 | cpu_all_mask : \ |
| 56 | cpumask_of_node(pcibus_to_node(bus))) |
| 57 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | /* sched_domains SD_NODE_INIT for PPC64 machines */ |
Anton Blanchard | a13672f | 2010-02-07 13:23:30 +0000 | [diff] [blame] | 59 | #define SD_NODE_INIT (struct sched_domain) { \ |
| 60 | .min_interval = 8, \ |
| 61 | .max_interval = 32, \ |
| 62 | .busy_factor = 32, \ |
| 63 | .imbalance_pct = 125, \ |
| 64 | .cache_nice_tries = 1, \ |
| 65 | .busy_idx = 3, \ |
| 66 | .idle_idx = 1, \ |
| 67 | .newidle_idx = 0, \ |
| 68 | .wake_idx = 0, \ |
| 69 | .forkexec_idx = 0, \ |
| 70 | \ |
| 71 | .flags = 1*SD_LOAD_BALANCE \ |
| 72 | | 1*SD_BALANCE_NEWIDLE \ |
| 73 | | 1*SD_BALANCE_EXEC \ |
| 74 | | 1*SD_BALANCE_FORK \ |
| 75 | | 0*SD_BALANCE_WAKE \ |
| 76 | | 0*SD_WAKE_AFFINE \ |
| 77 | | 0*SD_PREFER_LOCAL \ |
| 78 | | 0*SD_SHARE_CPUPOWER \ |
| 79 | | 0*SD_POWERSAVINGS_BALANCE \ |
| 80 | | 0*SD_SHARE_PKG_RESOURCES \ |
| 81 | | 1*SD_SERIALIZE \ |
| 82 | | 0*SD_PREFER_SIBLING \ |
| 83 | , \ |
| 84 | .last_balance = jiffies, \ |
| 85 | .balance_interval = 1, \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | } |
| 87 | |
Anton Blanchard | 41eab6f | 2010-05-16 20:22:31 +0000 | [diff] [blame] | 88 | extern int __node_distance(int, int); |
| 89 | #define node_distance(a, b) __node_distance(a, b) |
| 90 | |
Anton Blanchard | 4b703a2 | 2005-12-13 06:56:47 +1100 | [diff] [blame] | 91 | extern void __init dump_numa_cpu_topology(void); |
| 92 | |
Jeremy Kerr | 953039c | 2006-05-01 12:16:12 -0700 | [diff] [blame] | 93 | extern int sysfs_add_device_to_node(struct sys_device *dev, int nid); |
| 94 | extern void sysfs_remove_device_from_node(struct sys_device *dev, int nid); |
| 95 | |
Jesse Larrew | 39bf990 | 2010-12-17 22:07:47 +0000 | [diff] [blame] | 96 | #else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | |
Anton Blanchard | 4b703a2 | 2005-12-13 06:56:47 +1100 | [diff] [blame] | 98 | static inline void dump_numa_cpu_topology(void) {} |
| 99 | |
Jeremy Kerr | 953039c | 2006-05-01 12:16:12 -0700 | [diff] [blame] | 100 | static inline int sysfs_add_device_to_node(struct sys_device *dev, int nid) |
| 101 | { |
| 102 | return 0; |
| 103 | } |
| 104 | |
| 105 | static inline void sysfs_remove_device_from_node(struct sys_device *dev, |
| 106 | int nid) |
| 107 | { |
| 108 | } |
Mike Travis | aa6b544 | 2008-03-31 08:41:55 -0700 | [diff] [blame] | 109 | #endif /* CONFIG_NUMA */ |
Jeremy Kerr | 953039c | 2006-05-01 12:16:12 -0700 | [diff] [blame] | 110 | |
Benjamin Herrenschmidt | 5d7d807 | 2011-01-12 10:56:29 +1100 | [diff] [blame] | 111 | #if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR) |
| 112 | extern int start_topology_update(void); |
| 113 | extern int stop_topology_update(void); |
| 114 | #else |
| 115 | static inline int start_topology_update(void) |
| 116 | { |
| 117 | return 0; |
| 118 | } |
| 119 | static inline int stop_topology_update(void) |
| 120 | { |
| 121 | return 0; |
| 122 | } |
| 123 | #endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */ |
| 124 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | #include <asm-generic/topology.h> |
| 126 | |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 127 | #ifdef CONFIG_SMP |
| 128 | #include <asm/cputable.h> |
Stephen Rothwell | 056f4fa | 2006-11-13 14:51:46 +1100 | [diff] [blame] | 129 | #define smt_capable() (cpu_has_feature(CPU_FTR_SMT)) |
| 130 | |
| 131 | #ifdef CONFIG_PPC64 |
| 132 | #include <asm/smp.h> |
| 133 | |
Anton Blanchard | cc1ba8e | 2010-04-26 15:32:41 +0000 | [diff] [blame] | 134 | #define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) |
| 135 | #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) |
Nathan Lynch | e9efed3 | 2008-07-27 15:24:54 +1000 | [diff] [blame] | 136 | #define topology_core_id(cpu) (cpu_to_core_id(cpu)) |
Stephen Rothwell | 056f4fa | 2006-11-13 14:51:46 +1100 | [diff] [blame] | 137 | #endif |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 138 | #endif |
| 139 | |
Arnd Bergmann | 88ced03 | 2005-12-16 22:43:46 +0100 | [diff] [blame] | 140 | #endif /* __KERNEL__ */ |
jdl@freescale.com | dd56fdf | 2005-09-07 15:59:48 -0500 | [diff] [blame] | 141 | #endif /* _ASM_POWERPC_TOPOLOGY_H */ |