Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #ifndef _LINUX_CPUSET_H |
| 3 | #define _LINUX_CPUSET_H |
| 4 | /* |
| 5 | * cpuset interface |
| 6 | * |
| 7 | * Copyright (C) 2003 BULL SA |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 8 | * Copyright (C) 2004-2006 Silicon Graphics, Inc. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | * |
| 10 | */ |
| 11 | |
| 12 | #include <linux/sched.h> |
Ingo Molnar | 105ab3d | 2017-02-01 16:36:40 +0100 | [diff] [blame] | 13 | #include <linux/sched/topology.h> |
Ingo Molnar | f719ff9 | 2017-02-06 10:57:33 +0100 | [diff] [blame] | 14 | #include <linux/sched/task.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/cpumask.h> |
| 16 | #include <linux/nodemask.h> |
David Rientjes | a1bc5a4 | 2009-04-02 16:57:54 -0700 | [diff] [blame] | 17 | #include <linux/mm.h> |
Mel Gorman | 664eedd | 2014-06-04 16:10:08 -0700 | [diff] [blame] | 18 | #include <linux/jump_label.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | |
| 20 | #ifdef CONFIG_CPUSETS |
| 21 | |
Dima Zavin | 89affbf | 2017-08-02 13:32:18 -0700 | [diff] [blame] | 22 | /* |
| 23 | * Static branch rewrites can happen in an arbitrary order for a given |
| 24 | * key. In code paths where we need to loop with read_mems_allowed_begin() and |
| 25 | * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need |
| 26 | * to ensure that begin() always gets rewritten before retry() in the |
| 27 | * disabled -> enabled transition. If not, then if local irqs are disabled |
| 28 | * around the loop, we can deadlock since retry() would always be |
| 29 | * comparing the latest value of the mems_allowed seqcount against 0 as |
| 30 | * begin() still would see cpusets_enabled() as false. The enabled -> disabled |
| 31 | * transition should happen in reverse order for the same reasons (want to stop |
| 32 | * looking at real value of mems_allowed.sequence in retry() first). |
| 33 | */ |
| 34 | extern struct static_key_false cpusets_pre_enable_key; |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 35 | extern struct static_key_false cpusets_enabled_key; |
Mel Gorman | 664eedd | 2014-06-04 16:10:08 -0700 | [diff] [blame] | 36 | static inline bool cpusets_enabled(void) |
| 37 | { |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 38 | return static_branch_unlikely(&cpusets_enabled_key); |
Mel Gorman | 664eedd | 2014-06-04 16:10:08 -0700 | [diff] [blame] | 39 | } |
| 40 | |
Mel Gorman | 664eedd | 2014-06-04 16:10:08 -0700 | [diff] [blame] | 41 | static inline void cpuset_inc(void) |
| 42 | { |
Dima Zavin | 89affbf | 2017-08-02 13:32:18 -0700 | [diff] [blame] | 43 | static_branch_inc(&cpusets_pre_enable_key); |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 44 | static_branch_inc(&cpusets_enabled_key); |
Mel Gorman | 664eedd | 2014-06-04 16:10:08 -0700 | [diff] [blame] | 45 | } |
| 46 | |
| 47 | static inline void cpuset_dec(void) |
| 48 | { |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 49 | static_branch_dec(&cpusets_enabled_key); |
Dima Zavin | 89affbf | 2017-08-02 13:32:18 -0700 | [diff] [blame] | 50 | static_branch_dec(&cpusets_pre_enable_key); |
Mel Gorman | 664eedd | 2014-06-04 16:10:08 -0700 | [diff] [blame] | 51 | } |
Paul Jackson | 202f72d | 2006-01-08 01:01:57 -0800 | [diff] [blame] | 52 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | extern int cpuset_init(void); |
| 54 | extern void cpuset_init_smp(void); |
Peter Zijlstra | 50e7663 | 2017-09-07 11:13:38 +0200 | [diff] [blame] | 55 | extern void cpuset_force_rebuild(void); |
Rakib Mullick | 30e03ac | 2017-04-09 07:36:14 +0600 | [diff] [blame] | 56 | extern void cpuset_update_active_cpus(void); |
Peter Zijlstra | 50e7663 | 2017-09-07 11:13:38 +0200 | [diff] [blame] | 57 | extern void cpuset_wait_for_hotplug(void); |
Li Zefan | 6af866a | 2009-01-07 18:08:45 -0800 | [diff] [blame] | 58 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); |
Peter Zijlstra | 2baab4e | 2012-03-20 15:57:01 +0100 | [diff] [blame] | 59 | extern void cpuset_cpus_allowed_fallback(struct task_struct *p); |
Paul Jackson | 909d75a | 2006-01-08 01:01:55 -0800 | [diff] [blame] | 60 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); |
Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 61 | #define cpuset_current_mems_allowed (current->mems_allowed) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | void cpuset_init_current_mems_allowed(void); |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 63 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); |
Paul Jackson | 202f72d | 2006-01-08 01:01:57 -0800 | [diff] [blame] | 64 | |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 65 | extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask); |
Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 66 | |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 67 | static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) |
Paul Jackson | 202f72d | 2006-01-08 01:01:57 -0800 | [diff] [blame] | 68 | { |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 69 | if (cpusets_enabled()) |
| 70 | return __cpuset_node_allowed(node, gfp_mask); |
| 71 | return true; |
Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 72 | } |
| 73 | |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 74 | static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) |
Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 75 | { |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 76 | return __cpuset_node_allowed(zone_to_nid(z), gfp_mask); |
| 77 | } |
| 78 | |
| 79 | static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) |
| 80 | { |
| 81 | if (cpusets_enabled()) |
| 82 | return __cpuset_zone_allowed(z, gfp_mask); |
| 83 | return true; |
Paul Jackson | 202f72d | 2006-01-08 01:01:57 -0800 | [diff] [blame] | 84 | } |
| 85 | |
David Rientjes | bbe373f | 2007-10-16 23:25:58 -0700 | [diff] [blame] | 86 | extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
| 87 | const struct task_struct *tsk2); |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 88 | |
| 89 | #define cpuset_memory_pressure_bump() \ |
| 90 | do { \ |
| 91 | if (cpuset_memory_pressure_enabled) \ |
| 92 | __cpuset_memory_pressure_bump(); \ |
| 93 | } while (0) |
| 94 | extern int cpuset_memory_pressure_enabled; |
| 95 | extern void __cpuset_memory_pressure_bump(void); |
| 96 | |
Eric W. Biederman | df5f831 | 2008-02-08 04:18:33 -0800 | [diff] [blame] | 97 | extern void cpuset_task_status_allowed(struct seq_file *m, |
| 98 | struct task_struct *task); |
Zefan Li | 52de477 | 2014-09-18 16:03:36 +0800 | [diff] [blame] | 99 | extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, |
| 100 | struct pid *pid, struct task_struct *tsk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 102 | extern int cpuset_mem_spread_node(void); |
Jack Steiner | 6adef3e | 2010-05-26 14:42:49 -0700 | [diff] [blame] | 103 | extern int cpuset_slab_spread_node(void); |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 104 | |
| 105 | static inline int cpuset_do_page_mem_spread(void) |
| 106 | { |
Zefan Li | 2ad654b | 2014-09-25 09:41:02 +0800 | [diff] [blame] | 107 | return task_spread_page(current); |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 108 | } |
| 109 | |
| 110 | static inline int cpuset_do_slab_mem_spread(void) |
| 111 | { |
Zefan Li | 2ad654b | 2014-09-25 09:41:02 +0800 | [diff] [blame] | 112 | return task_spread_slab(current); |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 113 | } |
| 114 | |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 115 | extern int current_cpuset_is_being_rebound(void); |
| 116 | |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 117 | extern void rebuild_sched_domains(void); |
| 118 | |
David Rientjes | da39da3 | 2015-11-05 18:48:05 -0800 | [diff] [blame] | 119 | extern void cpuset_print_current_mems_allowed(void); |
David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 120 | |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 121 | /* |
Mel Gorman | d26914d | 2014-04-03 14:47:24 -0700 | [diff] [blame] | 122 | * read_mems_allowed_begin is required when making decisions involving |
| 123 | * mems_allowed such as during page allocation. mems_allowed can be updated in |
| 124 | * parallel and depending on the new value an operation can fail potentially |
| 125 | * causing process failure. A retry loop with read_mems_allowed_begin and |
| 126 | * read_mems_allowed_retry prevents these artificial failures. |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 127 | */ |
Mel Gorman | d26914d | 2014-04-03 14:47:24 -0700 | [diff] [blame] | 128 | static inline unsigned int read_mems_allowed_begin(void) |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 129 | { |
Dima Zavin | 89affbf | 2017-08-02 13:32:18 -0700 | [diff] [blame] | 130 | if (!static_branch_unlikely(&cpusets_pre_enable_key)) |
Mel Gorman | 46e700ab | 2015-11-06 16:28:15 -0800 | [diff] [blame] | 131 | return 0; |
| 132 | |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 133 | return read_seqcount_begin(¤t->mems_allowed_seq); |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 134 | } |
| 135 | |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 136 | /* |
Mel Gorman | d26914d | 2014-04-03 14:47:24 -0700 | [diff] [blame] | 137 | * If this returns true, the operation that took place after |
| 138 | * read_mems_allowed_begin may have failed artificially due to a concurrent |
| 139 | * update of mems_allowed. It is up to the caller to retry the operation if |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 140 | * appropriate. |
| 141 | */ |
Mel Gorman | d26914d | 2014-04-03 14:47:24 -0700 | [diff] [blame] | 142 | static inline bool read_mems_allowed_retry(unsigned int seq) |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 143 | { |
Dima Zavin | 89affbf | 2017-08-02 13:32:18 -0700 | [diff] [blame] | 144 | if (!static_branch_unlikely(&cpusets_enabled_key)) |
Mel Gorman | 46e700ab | 2015-11-06 16:28:15 -0800 | [diff] [blame] | 145 | return false; |
| 146 | |
Mel Gorman | d26914d | 2014-04-03 14:47:24 -0700 | [diff] [blame] | 147 | return read_seqcount_retry(¤t->mems_allowed_seq, seq); |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 148 | } |
| 149 | |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 150 | static inline void set_mems_allowed(nodemask_t nodemask) |
| 151 | { |
John Stultz | db751fe | 2013-10-07 15:52:00 -0700 | [diff] [blame] | 152 | unsigned long flags; |
| 153 | |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 154 | task_lock(current); |
John Stultz | db751fe | 2013-10-07 15:52:00 -0700 | [diff] [blame] | 155 | local_irq_save(flags); |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 156 | write_seqcount_begin(¤t->mems_allowed_seq); |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 157 | current->mems_allowed = nodemask; |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 158 | write_seqcount_end(¤t->mems_allowed_seq); |
John Stultz | db751fe | 2013-10-07 15:52:00 -0700 | [diff] [blame] | 159 | local_irq_restore(flags); |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 160 | task_unlock(current); |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 161 | } |
| 162 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | #else /* !CONFIG_CPUSETS */ |
| 164 | |
Mel Gorman | 664eedd | 2014-06-04 16:10:08 -0700 | [diff] [blame] | 165 | static inline bool cpusets_enabled(void) { return false; } |
| 166 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | static inline int cpuset_init(void) { return 0; } |
| 168 | static inline void cpuset_init_smp(void) {} |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | |
Peter Zijlstra | 50e7663 | 2017-09-07 11:13:38 +0200 | [diff] [blame] | 170 | static inline void cpuset_force_rebuild(void) { } |
| 171 | |
Rakib Mullick | 30e03ac | 2017-04-09 07:36:14 +0600 | [diff] [blame] | 172 | static inline void cpuset_update_active_cpus(void) |
Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 173 | { |
| 174 | partition_sched_domains(1, NULL, NULL); |
| 175 | } |
| 176 | |
Peter Zijlstra | 50e7663 | 2017-09-07 11:13:38 +0200 | [diff] [blame] | 177 | static inline void cpuset_wait_for_hotplug(void) { } |
| 178 | |
Li Zefan | 6af866a | 2009-01-07 18:08:45 -0800 | [diff] [blame] | 179 | static inline void cpuset_cpus_allowed(struct task_struct *p, |
| 180 | struct cpumask *mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | { |
Rusty Russell | aa85ea5 | 2009-03-30 22:05:15 -0600 | [diff] [blame] | 182 | cpumask_copy(mask, cpu_possible_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | } |
| 184 | |
Peter Zijlstra | 2baab4e | 2012-03-20 15:57:01 +0100 | [diff] [blame] | 185 | static inline void cpuset_cpus_allowed_fallback(struct task_struct *p) |
Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 186 | { |
Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 187 | } |
| 188 | |
Paul Jackson | 909d75a | 2006-01-08 01:01:55 -0800 | [diff] [blame] | 189 | static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) |
| 190 | { |
| 191 | return node_possible_map; |
| 192 | } |
| 193 | |
Lai Jiangshan | 38d7bee | 2012-12-12 13:51:24 -0800 | [diff] [blame] | 194 | #define cpuset_current_mems_allowed (node_states[N_MEMORY]) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | static inline void cpuset_init_current_mems_allowed(void) {} |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 197 | static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | { |
| 199 | return 1; |
| 200 | } |
| 201 | |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 202 | static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) |
David Rientjes | a1bc5a4 | 2009-04-02 16:57:54 -0700 | [diff] [blame] | 203 | { |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 204 | return true; |
David Rientjes | a1bc5a4 | 2009-04-02 16:57:54 -0700 | [diff] [blame] | 205 | } |
| 206 | |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 207 | static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | { |
Vlastimil Babka | 002f290 | 2016-05-19 17:14:30 -0700 | [diff] [blame] | 209 | return true; |
| 210 | } |
| 211 | |
| 212 | static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) |
| 213 | { |
| 214 | return true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | } |
| 216 | |
David Rientjes | bbe373f | 2007-10-16 23:25:58 -0700 | [diff] [blame] | 217 | static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
| 218 | const struct task_struct *tsk2) |
Paul Jackson | ef08e3b | 2005-09-06 15:18:13 -0700 | [diff] [blame] | 219 | { |
| 220 | return 1; |
| 221 | } |
| 222 | |
Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 223 | static inline void cpuset_memory_pressure_bump(void) {} |
| 224 | |
Eric W. Biederman | df5f831 | 2008-02-08 04:18:33 -0800 | [diff] [blame] | 225 | static inline void cpuset_task_status_allowed(struct seq_file *m, |
| 226 | struct task_struct *task) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | } |
| 229 | |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 230 | static inline int cpuset_mem_spread_node(void) |
| 231 | { |
| 232 | return 0; |
| 233 | } |
| 234 | |
Jack Steiner | 6adef3e | 2010-05-26 14:42:49 -0700 | [diff] [blame] | 235 | static inline int cpuset_slab_spread_node(void) |
| 236 | { |
| 237 | return 0; |
| 238 | } |
| 239 | |
Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 240 | static inline int cpuset_do_page_mem_spread(void) |
| 241 | { |
| 242 | return 0; |
| 243 | } |
| 244 | |
| 245 | static inline int cpuset_do_slab_mem_spread(void) |
| 246 | { |
| 247 | return 0; |
| 248 | } |
| 249 | |
Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 250 | static inline int current_cpuset_is_being_rebound(void) |
| 251 | { |
| 252 | return 0; |
| 253 | } |
| 254 | |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 255 | static inline void rebuild_sched_domains(void) |
| 256 | { |
Max Krasnyansky | dfb512e | 2008-08-29 13:11:41 -0700 | [diff] [blame] | 257 | partition_sched_domains(1, NULL, NULL); |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 258 | } |
| 259 | |
David Rientjes | da39da3 | 2015-11-05 18:48:05 -0800 | [diff] [blame] | 260 | static inline void cpuset_print_current_mems_allowed(void) |
David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 261 | { |
| 262 | } |
| 263 | |
Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 264 | static inline void set_mems_allowed(nodemask_t nodemask) |
| 265 | { |
| 266 | } |
| 267 | |
Mel Gorman | d26914d | 2014-04-03 14:47:24 -0700 | [diff] [blame] | 268 | static inline unsigned int read_mems_allowed_begin(void) |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 269 | { |
Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 270 | return 0; |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 271 | } |
| 272 | |
Mel Gorman | d26914d | 2014-04-03 14:47:24 -0700 | [diff] [blame] | 273 | static inline bool read_mems_allowed_retry(unsigned int seq) |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 274 | { |
Mel Gorman | d26914d | 2014-04-03 14:47:24 -0700 | [diff] [blame] | 275 | return false; |
Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 276 | } |
| 277 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | #endif /* !CONFIG_CPUSETS */ |
| 279 | |
| 280 | #endif /* _LINUX_CPUSET_H */ |