blob: ade2390ffe92baec361677b16f700de9f379ee74 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_CPUSET_H
2#define _LINUX_CPUSET_H
3/*
4 * cpuset interface
5 *
6 * Copyright (C) 2003 BULL SA
Paul Jackson825a46a2006-03-24 03:16:03 -08007 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 */
10
11#include <linux/sched.h>
12#include <linux/cpumask.h>
13#include <linux/nodemask.h>
David Rientjesa1bc5a42009-04-02 16:57:54 -070014#include <linux/mm.h>
Mel Gorman664eedd2014-06-04 16:10:08 -070015#include <linux/jump_label.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
17#ifdef CONFIG_CPUSETS
18
Mel Gorman664eedd2014-06-04 16:10:08 -070019extern struct static_key cpusets_enabled_key;
20static inline bool cpusets_enabled(void)
21{
22 return static_key_false(&cpusets_enabled_key);
23}
24
25static inline int nr_cpusets(void)
26{
27 /* jump label reference count + the top-level cpuset */
28 return static_key_count(&cpusets_enabled_key) + 1;
29}
30
31static inline void cpuset_inc(void)
32{
33 static_key_slow_inc(&cpusets_enabled_key);
34}
35
36static inline void cpuset_dec(void)
37{
38 static_key_slow_dec(&cpusets_enabled_key);
39}
Paul Jackson202f72d2006-01-08 01:01:57 -080040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041extern int cpuset_init(void);
42extern void cpuset_init_smp(void);
Srivatsa S. Bhat7ddf96b2012-05-24 19:46:55 +053043extern void cpuset_update_active_cpus(bool cpu_online);
Li Zefan6af866a2009-01-07 18:08:45 -080044extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
Peter Zijlstra2baab4e2012-03-20 15:57:01 +010045extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
Paul Jackson909d75a2006-01-08 01:01:55 -080046extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
Paul Jackson9276b1bc2006-12-06 20:31:48 -080047#define cpuset_current_mems_allowed (current->mems_allowed)
Linus Torvalds1da177e2005-04-16 15:20:36 -070048void cpuset_init_current_mems_allowed(void);
Mel Gorman19770b32008-04-28 02:12:18 -070049int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
Paul Jackson202f72d2006-01-08 01:01:57 -080050
David Rientjesa1bc5a42009-04-02 16:57:54 -070051extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
52extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
Paul Jackson02a0e532006-12-13 00:34:25 -080053
David Rientjesa1bc5a42009-04-02 16:57:54 -070054static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
Paul Jackson202f72d2006-01-08 01:01:57 -080055{
Mel Gorman664eedd2014-06-04 16:10:08 -070056 return nr_cpusets() <= 1 ||
David Rientjesa1bc5a42009-04-02 16:57:54 -070057 __cpuset_node_allowed_softwall(node, gfp_mask);
Paul Jackson02a0e532006-12-13 00:34:25 -080058}
59
David Rientjesa1bc5a42009-04-02 16:57:54 -070060static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
Paul Jackson02a0e532006-12-13 00:34:25 -080061{
Mel Gorman664eedd2014-06-04 16:10:08 -070062 return nr_cpusets() <= 1 ||
David Rientjesa1bc5a42009-04-02 16:57:54 -070063 __cpuset_node_allowed_hardwall(node, gfp_mask);
64}
65
66static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
67{
68 return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
69}
70
71static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
72{
73 return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
Paul Jackson202f72d2006-01-08 01:01:57 -080074}
75
David Rientjesbbe373f2007-10-16 23:25:58 -070076extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
77 const struct task_struct *tsk2);
Paul Jackson3e0d98b2006-01-08 01:01:49 -080078
79#define cpuset_memory_pressure_bump() \
80 do { \
81 if (cpuset_memory_pressure_enabled) \
82 __cpuset_memory_pressure_bump(); \
83 } while (0)
84extern int cpuset_memory_pressure_enabled;
85extern void __cpuset_memory_pressure_bump(void);
86
Eric W. Biedermandf5f8312008-02-08 04:18:33 -080087extern void cpuset_task_status_allowed(struct seq_file *m,
88 struct task_struct *task);
Al Viro8d8b97b2013-04-19 23:11:24 -040089extern int proc_cpuset_show(struct seq_file *, void *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Paul Jackson825a46a2006-03-24 03:16:03 -080091extern int cpuset_mem_spread_node(void);
Jack Steiner6adef3e2010-05-26 14:42:49 -070092extern int cpuset_slab_spread_node(void);
Paul Jackson825a46a2006-03-24 03:16:03 -080093
94static inline int cpuset_do_page_mem_spread(void)
95{
96 return current->flags & PF_SPREAD_PAGE;
97}
98
99static inline int cpuset_do_slab_mem_spread(void)
100{
101 return current->flags & PF_SPREAD_SLAB;
102}
103
Paul Menage8793d852007-10-18 23:39:39 -0700104extern int current_cpuset_is_being_rebound(void);
105
Max Krasnyanskye761b772008-07-15 04:43:49 -0700106extern void rebuild_sched_domains(void);
107
David Rientjes75aa1992009-01-06 14:39:01 -0800108extern void cpuset_print_task_mems_allowed(struct task_struct *p);
109
Miao Xiec0ff7452010-05-24 14:32:08 -0700110/*
Mel Gormand26914d2014-04-03 14:47:24 -0700111 * read_mems_allowed_begin is required when making decisions involving
112 * mems_allowed such as during page allocation. mems_allowed can be updated in
113 * parallel and depending on the new value an operation can fail potentially
114 * causing process failure. A retry loop with read_mems_allowed_begin and
115 * read_mems_allowed_retry prevents these artificial failures.
Miao Xiec0ff7452010-05-24 14:32:08 -0700116 */
Mel Gormand26914d2014-04-03 14:47:24 -0700117static inline unsigned int read_mems_allowed_begin(void)
Miao Xiec0ff7452010-05-24 14:32:08 -0700118{
Mel Gormancc9a6c82012-03-21 16:34:11 -0700119 return read_seqcount_begin(&current->mems_allowed_seq);
Miao Xiec0ff7452010-05-24 14:32:08 -0700120}
121
Mel Gormancc9a6c82012-03-21 16:34:11 -0700122/*
Mel Gormand26914d2014-04-03 14:47:24 -0700123 * If this returns true, the operation that took place after
124 * read_mems_allowed_begin may have failed artificially due to a concurrent
125 * update of mems_allowed. It is up to the caller to retry the operation if
Mel Gormancc9a6c82012-03-21 16:34:11 -0700126 * appropriate.
127 */
Mel Gormand26914d2014-04-03 14:47:24 -0700128static inline bool read_mems_allowed_retry(unsigned int seq)
Miao Xiec0ff7452010-05-24 14:32:08 -0700129{
Mel Gormand26914d2014-04-03 14:47:24 -0700130 return read_seqcount_retry(&current->mems_allowed_seq, seq);
Miao Xiec0ff7452010-05-24 14:32:08 -0700131}
132
Miao Xie58568d22009-06-16 15:31:49 -0700133static inline void set_mems_allowed(nodemask_t nodemask)
134{
John Stultzdb751fe2013-10-07 15:52:00 -0700135 unsigned long flags;
136
Miao Xiec0ff7452010-05-24 14:32:08 -0700137 task_lock(current);
John Stultzdb751fe2013-10-07 15:52:00 -0700138 local_irq_save(flags);
Mel Gormancc9a6c82012-03-21 16:34:11 -0700139 write_seqcount_begin(&current->mems_allowed_seq);
Miao Xie58568d22009-06-16 15:31:49 -0700140 current->mems_allowed = nodemask;
Mel Gormancc9a6c82012-03-21 16:34:11 -0700141 write_seqcount_end(&current->mems_allowed_seq);
John Stultzdb751fe2013-10-07 15:52:00 -0700142 local_irq_restore(flags);
Miao Xiec0ff7452010-05-24 14:32:08 -0700143 task_unlock(current);
Miao Xie58568d22009-06-16 15:31:49 -0700144}
145
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146#else /* !CONFIG_CPUSETS */
147
Mel Gorman664eedd2014-06-04 16:10:08 -0700148static inline bool cpusets_enabled(void) { return false; }
149
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150static inline int cpuset_init(void) { return 0; }
151static inline void cpuset_init_smp(void) {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
Srivatsa S. Bhat7ddf96b2012-05-24 19:46:55 +0530153static inline void cpuset_update_active_cpus(bool cpu_online)
Tejun Heo3a101d02010-06-08 21:40:36 +0200154{
155 partition_sched_domains(1, NULL, NULL);
156}
157
Li Zefan6af866a2009-01-07 18:08:45 -0800158static inline void cpuset_cpus_allowed(struct task_struct *p,
159 struct cpumask *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160{
Rusty Russellaa85ea52009-03-30 22:05:15 -0600161 cpumask_copy(mask, cpu_possible_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162}
163
Peter Zijlstra2baab4e2012-03-20 15:57:01 +0100164static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
Oleg Nesterov9084bb82010-03-15 10:10:27 +0100165{
Oleg Nesterov9084bb82010-03-15 10:10:27 +0100166}
167
Paul Jackson909d75a2006-01-08 01:01:55 -0800168static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
169{
170 return node_possible_map;
171}
172
Lai Jiangshan38d7bee2012-12-12 13:51:24 -0800173#define cpuset_current_mems_allowed (node_states[N_MEMORY])
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174static inline void cpuset_init_current_mems_allowed(void) {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
Mel Gorman19770b32008-04-28 02:12:18 -0700176static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177{
178 return 1;
179}
180
David Rientjesa1bc5a42009-04-02 16:57:54 -0700181static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
182{
183 return 1;
184}
185
186static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
187{
188 return 1;
189}
190
Paul Jackson02a0e532006-12-13 00:34:25 -0800191static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
192{
193 return 1;
194}
195
196static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197{
198 return 1;
199}
200
David Rientjesbbe373f2007-10-16 23:25:58 -0700201static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
202 const struct task_struct *tsk2)
Paul Jacksonef08e3b2005-09-06 15:18:13 -0700203{
204 return 1;
205}
206
Paul Jackson3e0d98b2006-01-08 01:01:49 -0800207static inline void cpuset_memory_pressure_bump(void) {}
208
Eric W. Biedermandf5f8312008-02-08 04:18:33 -0800209static inline void cpuset_task_status_allowed(struct seq_file *m,
210 struct task_struct *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212}
213
Paul Jackson825a46a2006-03-24 03:16:03 -0800214static inline int cpuset_mem_spread_node(void)
215{
216 return 0;
217}
218
Jack Steiner6adef3e2010-05-26 14:42:49 -0700219static inline int cpuset_slab_spread_node(void)
220{
221 return 0;
222}
223
Paul Jackson825a46a2006-03-24 03:16:03 -0800224static inline int cpuset_do_page_mem_spread(void)
225{
226 return 0;
227}
228
229static inline int cpuset_do_slab_mem_spread(void)
230{
231 return 0;
232}
233
Paul Menage8793d852007-10-18 23:39:39 -0700234static inline int current_cpuset_is_being_rebound(void)
235{
236 return 0;
237}
238
Max Krasnyanskye761b772008-07-15 04:43:49 -0700239static inline void rebuild_sched_domains(void)
240{
Max Krasnyanskydfb512e2008-08-29 13:11:41 -0700241 partition_sched_domains(1, NULL, NULL);
Max Krasnyanskye761b772008-07-15 04:43:49 -0700242}
243
David Rientjes75aa1992009-01-06 14:39:01 -0800244static inline void cpuset_print_task_mems_allowed(struct task_struct *p)
245{
246}
247
Miao Xie58568d22009-06-16 15:31:49 -0700248static inline void set_mems_allowed(nodemask_t nodemask)
249{
250}
251
Mel Gormand26914d2014-04-03 14:47:24 -0700252static inline unsigned int read_mems_allowed_begin(void)
Miao Xiec0ff7452010-05-24 14:32:08 -0700253{
Mel Gormancc9a6c82012-03-21 16:34:11 -0700254 return 0;
Miao Xiec0ff7452010-05-24 14:32:08 -0700255}
256
Mel Gormand26914d2014-04-03 14:47:24 -0700257static inline bool read_mems_allowed_retry(unsigned int seq)
Miao Xiec0ff7452010-05-24 14:32:08 -0700258{
Mel Gormand26914d2014-04-03 14:47:24 -0700259 return false;
Miao Xiec0ff7452010-05-24 14:32:08 -0700260}
261
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262#endif /* !CONFIG_CPUSETS */
263
264#endif /* _LINUX_CPUSET_H */