Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_CPUSET_H |
| 2 | #define _LINUX_CPUSET_H |
| 3 | /* |
| 4 | * cpuset interface |
| 5 | * |
| 6 | * Copyright (C) 2003 BULL SA |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 7 | * Copyright (C) 2004-2006 Silicon Graphics, Inc. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | * |
| 9 | */ |
| 10 | |
| 11 | #include <linux/sched.h> |
| 12 | #include <linux/cpumask.h> |
| 13 | #include <linux/nodemask.h> |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 14 | #include <linux/cgroup.h> |
David Rientjes | a1bc5a4e | 2009-04-02 16:57:54 -0700 | [diff] [blame] | 15 | #include <linux/mm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | |
| 17 | #ifdef CONFIG_CPUSETS |
| 18 | |
Paul Jackson | 202f72d | 2006-01-08 01:01:57 -0800 | [diff] [blame] | 19 | extern int number_of_cpusets; /* How many cpusets are defined in system? */ |
| 20 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | extern int cpuset_init(void); |
| 22 | extern void cpuset_init_smp(void); |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 23 | extern void cpuset_update_active_cpus(void); |
Li Zefan | 6af866a | 2009-01-07 18:08:45 -0800 | [diff] [blame] | 24 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); |
Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 25 | extern int cpuset_cpus_allowed_fallback(struct task_struct *p); |
Paul Jackson | 909d75a | 2006-01-08 01:01:55 -0800 | [diff] [blame] | 26 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); |
Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 27 | #define cpuset_current_mems_allowed (current->mems_allowed) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | void cpuset_init_current_mems_allowed(void); |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 29 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); |
Paul Jackson | 202f72d | 2006-01-08 01:01:57 -0800 | [diff] [blame] | 30 | |
David Rientjes | a1bc5a4e | 2009-04-02 16:57:54 -0700 | [diff] [blame] | 31 | extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask); |
| 32 | extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask); |
Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 33 | |
David Rientjes | a1bc5a4e | 2009-04-02 16:57:54 -0700 | [diff] [blame] | 34 | static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) |
Paul Jackson | 202f72d | 2006-01-08 01:01:57 -0800 | [diff] [blame] | 35 | { |
Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 36 | return number_of_cpusets <= 1 || |
David Rientjes | a1bc5a4e | 2009-04-02 16:57:54 -0700 | [diff] [blame] | 37 | __cpuset_node_allowed_softwall(node, gfp_mask); |
Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 38 | } |
| 39 | |
David Rientjes | a1bc5a4e | 2009-04-02 16:57:54 -0700 | [diff] [blame] | 40 | static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) |
Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 41 | { |
| 42 | return number_of_cpusets <= 1 || |
David Rientjes | a1bc5a4e | 2009-04-02 16:57:54 -0700 | [diff] [blame] | 43 | __cpuset_node_allowed_hardwall(node, gfp_mask); |
| 44 | } |
| 45 | |
| 46 | static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) |
| 47 | { |
| 48 | return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask); |
| 49 | } |
| 50 | |
| 51 | static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) |
| 52 | { |
| 53 | return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask); |
Paul Jackson | 202f72d | 2006-01-08 01:01:57 -0800 | [diff] [blame] | 54 | } |
| 55 | |
David Rientjes | bbe373f | 2007-10-16 23:25:58 -0700 | [diff] [blame] | 56 | extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
| 57 | const struct task_struct *tsk2); |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 58 | |
| 59 | #define cpuset_memory_pressure_bump() \ |
| 60 | do { \ |
| 61 | if (cpuset_memory_pressure_enabled) \ |
| 62 | __cpuset_memory_pressure_bump(); \ |
| 63 | } while (0) |
| 64 | extern int cpuset_memory_pressure_enabled; |
| 65 | extern void __cpuset_memory_pressure_bump(void); |
| 66 | |
Arjan van de Ven | 5404732 | 2007-02-12 00:55:28 -0800 | [diff] [blame] | 67 | extern const struct file_operations proc_cpuset_operations; |
Eric W. Biederman | df5f831 | 2008-02-08 04:18:33 -0800 | [diff] [blame] | 68 | struct seq_file; |
| 69 | extern void cpuset_task_status_allowed(struct seq_file *m, |
| 70 | struct task_struct *task); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 72 | extern int cpuset_mem_spread_node(void); |
Jack Steiner | 6adef3e | 2010-05-26 14:42:49 -0700 | [diff] [blame] | 73 | extern int cpuset_slab_spread_node(void); |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 74 | |
| 75 | static inline int cpuset_do_page_mem_spread(void) |
| 76 | { |
| 77 | return current->flags & PF_SPREAD_PAGE; |
| 78 | } |
| 79 | |
| 80 | static inline int cpuset_do_slab_mem_spread(void) |
| 81 | { |
| 82 | return current->flags & PF_SPREAD_SLAB; |
| 83 | } |
| 84 | |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 85 | extern int current_cpuset_is_being_rebound(void); |
| 86 | |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 87 | extern void rebuild_sched_domains(void); |
| 88 | |
David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 89 | extern void cpuset_print_task_mems_allowed(struct task_struct *p); |
| 90 | |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 91 | /* |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame^] | 92 | * get_mems_allowed is required when making decisions involving mems_allowed |
| 93 | * such as during page allocation. mems_allowed can be updated in parallel |
| 94 | * and depending on the new value an operation can fail potentially causing |
| 95 | * process failure. A retry loop with get_mems_allowed and put_mems_allowed |
| 96 | * prevents these artificial failures. |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 97 | */ |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame^] | 98 | static inline unsigned int get_mems_allowed(void) |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 99 | { |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame^] | 100 | return read_seqcount_begin(¤t->mems_allowed_seq); |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 101 | } |
| 102 | |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame^] | 103 | /* |
| 104 | * If this returns false, the operation that took place after get_mems_allowed |
| 105 | * may have failed. It is up to the caller to retry the operation if |
| 106 | * appropriate. |
| 107 | */ |
| 108 | static inline bool put_mems_allowed(unsigned int seq) |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 109 | { |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame^] | 110 | return !read_seqcount_retry(¤t->mems_allowed_seq, seq); |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 111 | } |
| 112 | |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 113 | static inline void set_mems_allowed(nodemask_t nodemask) |
| 114 | { |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 115 | task_lock(current); |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame^] | 116 | write_seqcount_begin(¤t->mems_allowed_seq); |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 117 | current->mems_allowed = nodemask; |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame^] | 118 | write_seqcount_end(¤t->mems_allowed_seq); |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 119 | task_unlock(current); |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 120 | } |
| 121 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | #else /* !CONFIG_CPUSETS */ |
| 123 | |
| 124 | static inline int cpuset_init(void) { return 0; } |
| 125 | static inline void cpuset_init_smp(void) {} |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 127 | static inline void cpuset_update_active_cpus(void) |
| 128 | { |
| 129 | partition_sched_domains(1, NULL, NULL); |
| 130 | } |
| 131 | |
Li Zefan | 6af866a | 2009-01-07 18:08:45 -0800 | [diff] [blame] | 132 | static inline void cpuset_cpus_allowed(struct task_struct *p, |
| 133 | struct cpumask *mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | { |
Rusty Russell | aa85ea5 | 2009-03-30 22:05:15 -0600 | [diff] [blame] | 135 | cpumask_copy(mask, cpu_possible_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | } |
| 137 | |
Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 138 | static inline int cpuset_cpus_allowed_fallback(struct task_struct *p) |
| 139 | { |
KOSAKI Motohiro | 1e1b6c5 | 2011-05-19 15:08:58 +0900 | [diff] [blame] | 140 | do_set_cpus_allowed(p, cpu_possible_mask); |
Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 141 | return cpumask_any(cpu_active_mask); |
| 142 | } |
| 143 | |
Paul Jackson | 909d75a | 2006-01-08 01:01:55 -0800 | [diff] [blame] | 144 | static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) |
| 145 | { |
| 146 | return node_possible_map; |
| 147 | } |
| 148 | |
Christoph Lameter | 0e1e7c7 | 2007-10-16 01:25:38 -0700 | [diff] [blame] | 149 | #define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY]) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | static inline void cpuset_init_current_mems_allowed(void) {} |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 152 | static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | { |
| 154 | return 1; |
| 155 | } |
| 156 | |
David Rientjes | a1bc5a4e | 2009-04-02 16:57:54 -0700 | [diff] [blame] | 157 | static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) |
| 158 | { |
| 159 | return 1; |
| 160 | } |
| 161 | |
| 162 | static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) |
| 163 | { |
| 164 | return 1; |
| 165 | } |
| 166 | |
Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 167 | static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) |
| 168 | { |
| 169 | return 1; |
| 170 | } |
| 171 | |
| 172 | static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | { |
| 174 | return 1; |
| 175 | } |
| 176 | |
David Rientjes | bbe373f | 2007-10-16 23:25:58 -0700 | [diff] [blame] | 177 | static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
| 178 | const struct task_struct *tsk2) |
Paul Jackson | ef08e3b | 2005-09-06 15:18:13 -0700 | [diff] [blame] | 179 | { |
| 180 | return 1; |
| 181 | } |
| 182 | |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 183 | static inline void cpuset_memory_pressure_bump(void) {} |
| 184 | |
Eric W. Biederman | df5f831 | 2008-02-08 04:18:33 -0800 | [diff] [blame] | 185 | static inline void cpuset_task_status_allowed(struct seq_file *m, |
| 186 | struct task_struct *task) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | } |
| 189 | |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 190 | static inline int cpuset_mem_spread_node(void) |
| 191 | { |
| 192 | return 0; |
| 193 | } |
| 194 | |
Jack Steiner | 6adef3e | 2010-05-26 14:42:49 -0700 | [diff] [blame] | 195 | static inline int cpuset_slab_spread_node(void) |
| 196 | { |
| 197 | return 0; |
| 198 | } |
| 199 | |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 200 | static inline int cpuset_do_page_mem_spread(void) |
| 201 | { |
| 202 | return 0; |
| 203 | } |
| 204 | |
| 205 | static inline int cpuset_do_slab_mem_spread(void) |
| 206 | { |
| 207 | return 0; |
| 208 | } |
| 209 | |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 210 | static inline int current_cpuset_is_being_rebound(void) |
| 211 | { |
| 212 | return 0; |
| 213 | } |
| 214 | |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 215 | static inline void rebuild_sched_domains(void) |
| 216 | { |
Max Krasnyansky | dfb512e | 2008-08-29 13:11:41 -0700 | [diff] [blame] | 217 | partition_sched_domains(1, NULL, NULL); |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 218 | } |
| 219 | |
David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 220 | static inline void cpuset_print_task_mems_allowed(struct task_struct *p) |
| 221 | { |
| 222 | } |
| 223 | |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 224 | static inline void set_mems_allowed(nodemask_t nodemask) |
| 225 | { |
| 226 | } |
| 227 | |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame^] | 228 | static inline unsigned int get_mems_allowed(void) |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 229 | { |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame^] | 230 | return 0; |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 231 | } |
| 232 | |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame^] | 233 | static inline bool put_mems_allowed(unsigned int seq) |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 234 | { |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame^] | 235 | return true; |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 236 | } |
| 237 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | #endif /* !CONFIG_CPUSETS */ |
| 239 | |
| 240 | #endif /* _LINUX_CPUSET_H */ |