blob: b19d3dc2e6519498c9e32c07ee23275060bbc5d1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_CPUSET_H
2#define _LINUX_CPUSET_H
3/*
4 * cpuset interface
5 *
6 * Copyright (C) 2003 BULL SA
Paul Jackson825a46a2006-03-24 03:16:03 -08007 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 */
10
11#include <linux/sched.h>
12#include <linux/cpumask.h>
13#include <linux/nodemask.h>
David Rientjesa1bc5a42009-04-02 16:57:54 -070014#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
16#ifdef CONFIG_CPUSETS
17
Paul Jackson202f72d2006-01-08 01:01:57 -080018extern int number_of_cpusets; /* How many cpusets are defined in system? */
19
Linus Torvalds1da177e2005-04-16 15:20:36 -070020extern int cpuset_init(void);
21extern void cpuset_init_smp(void);
Srivatsa S. Bhat7ddf96b2012-05-24 19:46:55 +053022extern void cpuset_update_active_cpus(bool cpu_online);
Li Zefan6af866a2009-01-07 18:08:45 -080023extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
Peter Zijlstra2baab4e2012-03-20 15:57:01 +010024extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
Paul Jackson909d75a2006-01-08 01:01:55 -080025extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
Paul Jackson9276b1bc2006-12-06 20:31:48 -080026#define cpuset_current_mems_allowed (current->mems_allowed)
Linus Torvalds1da177e2005-04-16 15:20:36 -070027void cpuset_init_current_mems_allowed(void);
Mel Gorman19770b32008-04-28 02:12:18 -070028int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
Paul Jackson202f72d2006-01-08 01:01:57 -080029
David Rientjesa1bc5a42009-04-02 16:57:54 -070030extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
31extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
Paul Jackson02a0e532006-12-13 00:34:25 -080032
David Rientjesa1bc5a42009-04-02 16:57:54 -070033static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
Paul Jackson202f72d2006-01-08 01:01:57 -080034{
Paul Jackson02a0e532006-12-13 00:34:25 -080035 return number_of_cpusets <= 1 ||
David Rientjesa1bc5a42009-04-02 16:57:54 -070036 __cpuset_node_allowed_softwall(node, gfp_mask);
Paul Jackson02a0e532006-12-13 00:34:25 -080037}
38
David Rientjesa1bc5a42009-04-02 16:57:54 -070039static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
Paul Jackson02a0e532006-12-13 00:34:25 -080040{
41 return number_of_cpusets <= 1 ||
David Rientjesa1bc5a42009-04-02 16:57:54 -070042 __cpuset_node_allowed_hardwall(node, gfp_mask);
43}
44
45static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
46{
47 return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
48}
49
50static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
51{
52 return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
Paul Jackson202f72d2006-01-08 01:01:57 -080053}
54
David Rientjesbbe373f2007-10-16 23:25:58 -070055extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
56 const struct task_struct *tsk2);
Paul Jackson3e0d98b2006-01-08 01:01:49 -080057
58#define cpuset_memory_pressure_bump() \
59 do { \
60 if (cpuset_memory_pressure_enabled) \
61 __cpuset_memory_pressure_bump(); \
62 } while (0)
63extern int cpuset_memory_pressure_enabled;
64extern void __cpuset_memory_pressure_bump(void);
65
Eric W. Biedermandf5f8312008-02-08 04:18:33 -080066extern void cpuset_task_status_allowed(struct seq_file *m,
67 struct task_struct *task);
Al Viro8d8b97b2013-04-19 23:11:24 -040068extern int proc_cpuset_show(struct seq_file *, void *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
Paul Jackson825a46a2006-03-24 03:16:03 -080070extern int cpuset_mem_spread_node(void);
Jack Steiner6adef3e2010-05-26 14:42:49 -070071extern int cpuset_slab_spread_node(void);
Paul Jackson825a46a2006-03-24 03:16:03 -080072
73static inline int cpuset_do_page_mem_spread(void)
74{
75 return current->flags & PF_SPREAD_PAGE;
76}
77
78static inline int cpuset_do_slab_mem_spread(void)
79{
80 return current->flags & PF_SPREAD_SLAB;
81}
82
Paul Menage8793d852007-10-18 23:39:39 -070083extern int current_cpuset_is_being_rebound(void);
84
Max Krasnyanskye761b772008-07-15 04:43:49 -070085extern void rebuild_sched_domains(void);
86
David Rientjes75aa1992009-01-06 14:39:01 -080087extern void cpuset_print_task_mems_allowed(struct task_struct *p);
88
Miao Xiec0ff7452010-05-24 14:32:08 -070089/*
Mel Gormand26914d2014-04-03 14:47:24 -070090 * read_mems_allowed_begin is required when making decisions involving
91 * mems_allowed such as during page allocation. mems_allowed can be updated in
92 * parallel and depending on the new value an operation can fail potentially
93 * causing process failure. A retry loop with read_mems_allowed_begin and
94 * read_mems_allowed_retry prevents these artificial failures.
Miao Xiec0ff7452010-05-24 14:32:08 -070095 */
Mel Gormand26914d2014-04-03 14:47:24 -070096static inline unsigned int read_mems_allowed_begin(void)
Miao Xiec0ff7452010-05-24 14:32:08 -070097{
Mel Gormancc9a6c82012-03-21 16:34:11 -070098 return read_seqcount_begin(&current->mems_allowed_seq);
Miao Xiec0ff7452010-05-24 14:32:08 -070099}
100
Mel Gormancc9a6c82012-03-21 16:34:11 -0700101/*
Mel Gormand26914d2014-04-03 14:47:24 -0700102 * If this returns true, the operation that took place after
103 * read_mems_allowed_begin may have failed artificially due to a concurrent
104 * update of mems_allowed. It is up to the caller to retry the operation if
Mel Gormancc9a6c82012-03-21 16:34:11 -0700105 * appropriate.
106 */
Mel Gormand26914d2014-04-03 14:47:24 -0700107static inline bool read_mems_allowed_retry(unsigned int seq)
Miao Xiec0ff7452010-05-24 14:32:08 -0700108{
Mel Gormand26914d2014-04-03 14:47:24 -0700109 return read_seqcount_retry(&current->mems_allowed_seq, seq);
Miao Xiec0ff7452010-05-24 14:32:08 -0700110}
111
Miao Xie58568d22009-06-16 15:31:49 -0700112static inline void set_mems_allowed(nodemask_t nodemask)
113{
John Stultzdb751fe2013-10-07 15:52:00 -0700114 unsigned long flags;
115
Miao Xiec0ff7452010-05-24 14:32:08 -0700116 task_lock(current);
John Stultzdb751fe2013-10-07 15:52:00 -0700117 local_irq_save(flags);
Mel Gormancc9a6c82012-03-21 16:34:11 -0700118 write_seqcount_begin(&current->mems_allowed_seq);
Miao Xie58568d22009-06-16 15:31:49 -0700119 current->mems_allowed = nodemask;
Mel Gormancc9a6c82012-03-21 16:34:11 -0700120 write_seqcount_end(&current->mems_allowed_seq);
John Stultzdb751fe2013-10-07 15:52:00 -0700121 local_irq_restore(flags);
Miao Xiec0ff7452010-05-24 14:32:08 -0700122 task_unlock(current);
Miao Xie58568d22009-06-16 15:31:49 -0700123}
124
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125#else /* !CONFIG_CPUSETS */
126
127static inline int cpuset_init(void) { return 0; }
128static inline void cpuset_init_smp(void) {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
Srivatsa S. Bhat7ddf96b2012-05-24 19:46:55 +0530130static inline void cpuset_update_active_cpus(bool cpu_online)
Tejun Heo3a101d02010-06-08 21:40:36 +0200131{
132 partition_sched_domains(1, NULL, NULL);
133}
134
Li Zefan6af866a2009-01-07 18:08:45 -0800135static inline void cpuset_cpus_allowed(struct task_struct *p,
136 struct cpumask *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137{
Rusty Russellaa85ea52009-03-30 22:05:15 -0600138 cpumask_copy(mask, cpu_possible_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139}
140
Peter Zijlstra2baab4e2012-03-20 15:57:01 +0100141static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
Oleg Nesterov9084bb82010-03-15 10:10:27 +0100142{
Oleg Nesterov9084bb82010-03-15 10:10:27 +0100143}
144
Paul Jackson909d75a2006-01-08 01:01:55 -0800145static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
146{
147 return node_possible_map;
148}
149
Lai Jiangshan38d7bee2012-12-12 13:51:24 -0800150#define cpuset_current_mems_allowed (node_states[N_MEMORY])
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151static inline void cpuset_init_current_mems_allowed(void) {}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
Mel Gorman19770b32008-04-28 02:12:18 -0700153static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154{
155 return 1;
156}
157
David Rientjesa1bc5a42009-04-02 16:57:54 -0700158static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
159{
160 return 1;
161}
162
163static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
164{
165 return 1;
166}
167
Paul Jackson02a0e532006-12-13 00:34:25 -0800168static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
169{
170 return 1;
171}
172
173static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174{
175 return 1;
176}
177
David Rientjesbbe373f2007-10-16 23:25:58 -0700178static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
179 const struct task_struct *tsk2)
Paul Jacksonef08e3b2005-09-06 15:18:13 -0700180{
181 return 1;
182}
183
Paul Jackson3e0d98b2006-01-08 01:01:49 -0800184static inline void cpuset_memory_pressure_bump(void) {}
185
Eric W. Biedermandf5f8312008-02-08 04:18:33 -0800186static inline void cpuset_task_status_allowed(struct seq_file *m,
187 struct task_struct *task)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189}
190
Paul Jackson825a46a2006-03-24 03:16:03 -0800191static inline int cpuset_mem_spread_node(void)
192{
193 return 0;
194}
195
Jack Steiner6adef3e2010-05-26 14:42:49 -0700196static inline int cpuset_slab_spread_node(void)
197{
198 return 0;
199}
200
Paul Jackson825a46a2006-03-24 03:16:03 -0800201static inline int cpuset_do_page_mem_spread(void)
202{
203 return 0;
204}
205
206static inline int cpuset_do_slab_mem_spread(void)
207{
208 return 0;
209}
210
Paul Menage8793d852007-10-18 23:39:39 -0700211static inline int current_cpuset_is_being_rebound(void)
212{
213 return 0;
214}
215
Max Krasnyanskye761b772008-07-15 04:43:49 -0700216static inline void rebuild_sched_domains(void)
217{
Max Krasnyanskydfb512e2008-08-29 13:11:41 -0700218 partition_sched_domains(1, NULL, NULL);
Max Krasnyanskye761b772008-07-15 04:43:49 -0700219}
220
David Rientjes75aa1992009-01-06 14:39:01 -0800221static inline void cpuset_print_task_mems_allowed(struct task_struct *p)
222{
223}
224
Miao Xie58568d22009-06-16 15:31:49 -0700225static inline void set_mems_allowed(nodemask_t nodemask)
226{
227}
228
Mel Gormand26914d2014-04-03 14:47:24 -0700229static inline unsigned int read_mems_allowed_begin(void)
Miao Xiec0ff7452010-05-24 14:32:08 -0700230{
Mel Gormancc9a6c82012-03-21 16:34:11 -0700231 return 0;
Miao Xiec0ff7452010-05-24 14:32:08 -0700232}
233
Mel Gormand26914d2014-04-03 14:47:24 -0700234static inline bool read_mems_allowed_retry(unsigned int seq)
Miao Xiec0ff7452010-05-24 14:32:08 -0700235{
Mel Gormand26914d2014-04-03 14:47:24 -0700236 return false;
Miao Xiec0ff7452010-05-24 14:32:08 -0700237}
238
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239#endif /* !CONFIG_CPUSETS */
240
241#endif /* _LINUX_CPUSET_H */