blob: 2f073db7392e06cf98455f9a83716be2cfaba69e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_CPUSET_H
2#define _LINUX_CPUSET_H
3/*
4 * cpuset interface
5 *
6 * Copyright (C) 2003 BULL SA
Paul Jackson825a46a2006-03-24 03:16:03 -08007 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 */
10
11#include <linux/sched.h>
12#include <linux/cpumask.h>
13#include <linux/nodemask.h>
David Rientjesa1bc5a42009-04-02 16:57:54 -070014#include <linux/mm.h>
Mel Gorman664eedd2014-06-04 16:10:08 -070015#include <linux/jump_label.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
17#ifdef CONFIG_CPUSETS
18
Mel Gorman664eedd2014-06-04 16:10:08 -070019extern struct static_key cpusets_enabled_key;
20static inline bool cpusets_enabled(void)
21{
22 return static_key_false(&cpusets_enabled_key);
23}
24
25static inline int nr_cpusets(void)
26{
27 /* jump label reference count + the top-level cpuset */
28 return static_key_count(&cpusets_enabled_key) + 1;
29}
30
31static inline void cpuset_inc(void)
32{
33 static_key_slow_inc(&cpusets_enabled_key);
34}
35
36static inline void cpuset_dec(void)
37{
38 static_key_slow_dec(&cpusets_enabled_key);
39}
Paul Jackson202f72d2006-01-08 01:01:57 -080040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041extern int cpuset_init(void);
42extern void cpuset_init_smp(void);
Srivatsa S. Bhat7ddf96b2012-05-24 19:46:55 +053043extern void cpuset_update_active_cpus(bool cpu_online);
Li Zefan6af866a2009-01-07 18:08:45 -080044extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
Peter Zijlstra2baab4e2012-03-20 15:57:01 +010045extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
Paul Jackson909d75a2006-01-08 01:01:55 -080046extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
Paul Jackson9276b1bc2006-12-06 20:31:48 -080047#define cpuset_current_mems_allowed (current->mems_allowed)
Linus Torvalds1da177e2005-04-16 15:20:36 -070048void cpuset_init_current_mems_allowed(void);
Mel Gorman19770b32008-04-28 02:12:18 -070049int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
Paul Jackson202f72d2006-01-08 01:01:57 -080050
David Rientjesa1bc5a42009-04-02 16:57:54 -070051extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
52extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
Paul Jackson02a0e532006-12-13 00:34:25 -080053
David Rientjesa1bc5a42009-04-02 16:57:54 -070054static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
Paul Jackson202f72d2006-01-08 01:01:57 -080055{
Mel Gorman664eedd2014-06-04 16:10:08 -070056 return nr_cpusets() <= 1 ||
David Rientjesa1bc5a42009-04-02 16:57:54 -070057 __cpuset_node_allowed_softwall(node, gfp_mask);
Paul Jackson02a0e532006-12-13 00:34:25 -080058}
59
David Rientjesa1bc5a42009-04-02 16:57:54 -070060static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
Paul Jackson02a0e532006-12-13 00:34:25 -080061{
Mel Gorman664eedd2014-06-04 16:10:08 -070062 return nr_cpusets() <= 1 ||
David Rientjesa1bc5a42009-04-02 16:57:54 -070063 __cpuset_node_allowed_hardwall(node, gfp_mask);
64}
65
66static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
67{
68 return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
69}
70
71static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
72{
73 return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
Paul Jackson202f72d2006-01-08 01:01:57 -080074}
75
David Rientjesbbe373f2007-10-16 23:25:58 -070076extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
77 const struct task_struct *tsk2);
Paul Jackson3e0d98b2006-01-08 01:01:49 -080078
79#define cpuset_memory_pressure_bump() \
80 do { \
81 if (cpuset_memory_pressure_enabled) \
82 __cpuset_memory_pressure_bump(); \
83 } while (0)
84extern int cpuset_memory_pressure_enabled;
85extern void __cpuset_memory_pressure_bump(void);
86
Eric W. Biedermandf5f8312008-02-08 04:18:33 -080087extern void cpuset_task_status_allowed(struct seq_file *m,
88 struct task_struct *task);
Zefan Li52de4772014-09-18 16:03:36 +080089extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
90 struct pid *pid, struct task_struct *tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
Paul Jackson825a46a2006-03-24 03:16:03 -080092extern int cpuset_mem_spread_node(void);
Jack Steiner6adef3e2010-05-26 14:42:49 -070093extern int cpuset_slab_spread_node(void);
Paul Jackson825a46a2006-03-24 03:16:03 -080094
95static inline int cpuset_do_page_mem_spread(void)
96{
Zefan Li2ad654b2014-09-25 09:41:02 +080097 return task_spread_page(current);
Paul Jackson825a46a2006-03-24 03:16:03 -080098}
99
100static inline int cpuset_do_slab_mem_spread(void)
101{
Zefan Li2ad654b2014-09-25 09:41:02 +0800102 return task_spread_slab(current);
Paul Jackson825a46a2006-03-24 03:16:03 -0800103}
104
Paul Menage8793d852007-10-18 23:39:39 -0700105extern int current_cpuset_is_being_rebound(void);
106
Max Krasnyanskye761b772008-07-15 04:43:49 -0700107extern void rebuild_sched_domains(void);
108
David Rientjes75aa1992009-01-06 14:39:01 -0800109extern void cpuset_print_task_mems_allowed(struct task_struct *p);
110
Miao Xiec0ff7452010-05-24 14:32:08 -0700111/*
Mel Gormand26914d2014-04-03 14:47:24 -0700112 * read_mems_allowed_begin is required when making decisions involving
113 * mems_allowed such as during page allocation. mems_allowed can be updated in
114 * parallel and depending on the new value an operation can fail potentially
115 * causing process failure. A retry loop with read_mems_allowed_begin and
116 * read_mems_allowed_retry prevents these artificial failures.
Miao Xiec0ff7452010-05-24 14:32:08 -0700117 */
Mel Gormand26914d2014-04-03 14:47:24 -0700118static inline unsigned int read_mems_allowed_begin(void)
Miao Xiec0ff7452010-05-24 14:32:08 -0700119{
Mel Gormancc9a6c82012-03-21 16:34:11 -0700120 return read_seqcount_begin(&current->mems_allowed_seq);
Miao Xiec0ff7452010-05-24 14:32:08 -0700121}
122
Mel Gormancc9a6c82012-03-21 16:34:11 -0700123/*
Mel Gormand26914d2014-04-03 14:47:24 -0700124 * If this returns true, the operation that took place after
125 * read_mems_allowed_begin may have failed artificially due to a concurrent
126 * update of mems_allowed. It is up to the caller to retry the operation if
Mel Gormancc9a6c82012-03-21 16:34:11 -0700127 * appropriate.
128 */
Mel Gormand26914d2014-04-03 14:47:24 -0700129static inline bool read_mems_allowed_retry(unsigned int seq)
Miao Xiec0ff7452010-05-24 14:32:08 -0700130{
Mel Gormand26914d2014-04-03 14:47:24 -0700131 return read_seqcount_retry(&current->mems_allowed_seq, seq);
Miao Xiec0ff7452010-05-24 14:32:08 -0700132}
133
Miao Xie58568d22009-06-16 15:31:49 -0700134static inline void set_mems_allowed(nodemask_t nodemask)
135{
John Stultzdb751fe2013-10-07 15:52:00 -0700136 unsigned long flags;
137
Miao Xiec0ff7452010-05-24 14:32:08 -0700138 task_lock(current);
John Stultzdb751fe2013-10-07 15:52:00 -0700139 local_irq_save(flags);
Mel Gormancc9a6c82012-03-21 16:34:11 -0700140 write_seqcount_begin(&current->mems_allowed_seq);
Miao Xie58568d22009-06-16 15:31:49 -0700141 current->mems_allowed = nodemask;
Mel Gormancc9a6c82012-03-21 16:34:11 -0700142 write_seqcount_end(&current->mems_allowed_seq);
John Stultzdb751fe2013-10-07 15:52:00 -0700143 local_irq_restore(flags);
Miao Xiec0ff7452010-05-24 14:32:08 -0700144 task_unlock(current);
Miao Xie58568d22009-06-16 15:31:49 -0700145}
146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147#else /* !CONFIG_CPUSETS */
148
Mel Gorman664eedd2014-06-04 16:10:08 -0700149static inline bool cpusets_enabled(void) { return false; }
150
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151static inline int cpuset_init(void) { return 0; }
152static inline void cpuset_init_smp(void) {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
Srivatsa S. Bhat7ddf96b2012-05-24 19:46:55 +0530154static inline void cpuset_update_active_cpus(bool cpu_online)
Tejun Heo3a101d02010-06-08 21:40:36 +0200155{
156 partition_sched_domains(1, NULL, NULL);
157}
158
Li Zefan6af866a2009-01-07 18:08:45 -0800159static inline void cpuset_cpus_allowed(struct task_struct *p,
160 struct cpumask *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161{
Rusty Russellaa85ea52009-03-30 22:05:15 -0600162 cpumask_copy(mask, cpu_possible_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163}
164
Peter Zijlstra2baab4e2012-03-20 15:57:01 +0100165static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
Oleg Nesterov9084bb82010-03-15 10:10:27 +0100166{
Oleg Nesterov9084bb82010-03-15 10:10:27 +0100167}
168
Paul Jackson909d75a2006-01-08 01:01:55 -0800169static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
170{
171 return node_possible_map;
172}
173
Lai Jiangshan38d7bee2012-12-12 13:51:24 -0800174#define cpuset_current_mems_allowed (node_states[N_MEMORY])
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175static inline void cpuset_init_current_mems_allowed(void) {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176
Mel Gorman19770b32008-04-28 02:12:18 -0700177static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178{
179 return 1;
180}
181
David Rientjesa1bc5a42009-04-02 16:57:54 -0700182static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
183{
184 return 1;
185}
186
187static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
188{
189 return 1;
190}
191
Paul Jackson02a0e532006-12-13 00:34:25 -0800192static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
193{
194 return 1;
195}
196
197static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198{
199 return 1;
200}
201
David Rientjesbbe373f2007-10-16 23:25:58 -0700202static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
203 const struct task_struct *tsk2)
Paul Jacksonef08e3b2005-09-06 15:18:13 -0700204{
205 return 1;
206}
207
Paul Jackson3e0d98b2006-01-08 01:01:49 -0800208static inline void cpuset_memory_pressure_bump(void) {}
209
Eric W. Biedermandf5f8312008-02-08 04:18:33 -0800210static inline void cpuset_task_status_allowed(struct seq_file *m,
211 struct task_struct *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213}
214
Paul Jackson825a46a2006-03-24 03:16:03 -0800215static inline int cpuset_mem_spread_node(void)
216{
217 return 0;
218}
219
Jack Steiner6adef3e2010-05-26 14:42:49 -0700220static inline int cpuset_slab_spread_node(void)
221{
222 return 0;
223}
224
Paul Jackson825a46a2006-03-24 03:16:03 -0800225static inline int cpuset_do_page_mem_spread(void)
226{
227 return 0;
228}
229
230static inline int cpuset_do_slab_mem_spread(void)
231{
232 return 0;
233}
234
Paul Menage8793d852007-10-18 23:39:39 -0700235static inline int current_cpuset_is_being_rebound(void)
236{
237 return 0;
238}
239
Max Krasnyanskye761b772008-07-15 04:43:49 -0700240static inline void rebuild_sched_domains(void)
241{
Max Krasnyanskydfb512e2008-08-29 13:11:41 -0700242 partition_sched_domains(1, NULL, NULL);
Max Krasnyanskye761b772008-07-15 04:43:49 -0700243}
244
David Rientjes75aa1992009-01-06 14:39:01 -0800245static inline void cpuset_print_task_mems_allowed(struct task_struct *p)
246{
247}
248
Miao Xie58568d22009-06-16 15:31:49 -0700249static inline void set_mems_allowed(nodemask_t nodemask)
250{
251}
252
Mel Gormand26914d2014-04-03 14:47:24 -0700253static inline unsigned int read_mems_allowed_begin(void)
Miao Xiec0ff7452010-05-24 14:32:08 -0700254{
Mel Gormancc9a6c82012-03-21 16:34:11 -0700255 return 0;
Miao Xiec0ff7452010-05-24 14:32:08 -0700256}
257
Mel Gormand26914d2014-04-03 14:47:24 -0700258static inline bool read_mems_allowed_retry(unsigned int seq)
Miao Xiec0ff7452010-05-24 14:32:08 -0700259{
Mel Gormand26914d2014-04-03 14:47:24 -0700260 return false;
Miao Xiec0ff7452010-05-24 14:32:08 -0700261}
262
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263#endif /* !CONFIG_CPUSETS */
264
265#endif /* _LINUX_CPUSET_H */