blob: b0fab9e80655b919d65e55496bb9677daa3884bb [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_MEMPOLICY_H
2#define _LINUX_MEMPOLICY_H 1
3
4#include <linux/errno.h>
5
6/*
7 * NUMA memory policies for Linux.
8 * Copyright 2003,2004 Andi Kleen SuSE Labs
9 */
10
David Rientjes028fec42008-04-28 02:12:25 -070011/*
12 * Both the MPOL_* mempolicy mode and the MPOL_F_* optional mode flags are
13 * passed by the user to either set_mempolicy() or mbind() in an 'int' actual.
14 * The MPOL_MODE_FLAGS macro determines the legal set of optional mode flags.
15 */
16
Linus Torvalds1da177e2005-04-16 15:20:36 -070017/* Policies */
David Rientjesa3b51e02008-04-28 02:12:23 -070018enum {
19 MPOL_DEFAULT,
20 MPOL_PREFERRED,
21 MPOL_BIND,
22 MPOL_INTERLEAVE,
23 MPOL_MAX, /* always last member of enum */
24};
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
David Rientjes028fec42008-04-28 02:12:25 -070026/* Flags for set_mempolicy */
David Rientjesf5b087b2008-04-28 02:12:27 -070027#define MPOL_F_STATIC_NODES (1 << 15)
David Rientjes4c50bc02008-04-28 02:12:30 -070028#define MPOL_F_RELATIVE_NODES (1 << 14)
David Rientjesf5b087b2008-04-28 02:12:27 -070029
David Rientjes028fec42008-04-28 02:12:25 -070030/*
31 * MPOL_MODE_FLAGS is the union of all possible optional mode flags passed to
32 * either set_mempolicy() or mbind().
33 */
David Rientjes4c50bc02008-04-28 02:12:30 -070034#define MPOL_MODE_FLAGS (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES)
David Rientjes028fec42008-04-28 02:12:25 -070035
36/* Flags for get_mempolicy */
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
38#define MPOL_F_ADDR (1<<1) /* look up vma using address */
Lee Schermerhorn754af6f2007-10-16 01:24:51 -070039#define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
41/* Flags for mbind */
42#define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080043#define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */
44#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
45#define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Lee Schermerhornaab0b102008-04-28 02:13:13 -070047/*
48 * Internal flags that share the struct mempolicy flags word with
49 * "mode flags". These flags are allocated from bit 0 up, as they
50 * are never OR'ed into the mode in mempolicy API arguments.
51 */
52#define MPOL_F_SHARED (1 << 0) /* identify shared policies */
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -070053#define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */
Lee Schermerhornaab0b102008-04-28 02:13:13 -070054
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#ifdef __KERNEL__
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057#include <linux/mmzone.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070058#include <linux/slab.h>
59#include <linux/rbtree.h>
60#include <linux/spinlock.h>
Andi Kleendfcd3c02005-10-29 18:15:48 -070061#include <linux/nodemask.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Ralf Baechle45b35a52006-06-08 00:43:41 -070063struct mm_struct;
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
65#ifdef CONFIG_NUMA
66
67/*
68 * Describe a memory policy.
69 *
70 * A mempolicy can be either associated with a process or with a VMA.
71 * For VMA related allocations the VMA policy is preferred, otherwise
72 * the process policy is used. Interrupts ignore the memory policy
73 * of the current process.
74 *
75 * Locking policy for interlave:
76 * In process context there is no locking because only the process accesses
77 * its own state. All vma manipulation is somewhat protected by a down_read on
Hugh Dickinsb8072f02005-10-29 18:16:41 -070078 * mmap_sem.
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 *
80 * Freeing policy:
Mel Gorman19770b32008-04-28 02:12:18 -070081 * Mempolicy objects are reference counted. A mempolicy will be freed when
Lee Schermerhornf0be3d32008-04-28 02:13:08 -070082 * mpol_put() decrements the reference count to zero.
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 *
Lee Schermerhorn846a16b2008-04-28 02:13:09 -070084 * Duplicating policy objects:
85 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
Mel Gorman19770b32008-04-28 02:12:18 -070086 * to the new storage. The reference count of the new object is initialized
Lee Schermerhorn846a16b2008-04-28 02:13:09 -070087 * to 1, representing the caller of mpol_dup().
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 */
89struct mempolicy {
90 atomic_t refcnt;
Lee Schermerhorn45c47452008-04-28 02:13:12 -070091 unsigned short mode; /* See MPOL_* above */
David Rientjes028fec42008-04-28 02:12:25 -070092 unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 union {
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 short preferred_node; /* preferred */
Mel Gorman19770b32008-04-28 02:12:18 -070095 nodemask_t nodes; /* interleave/bind */
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 /* undefined for default */
97 } v;
David Rientjesf5b087b2008-04-28 02:12:27 -070098 union {
99 nodemask_t cpuset_mems_allowed; /* relative to these nodes */
100 nodemask_t user_nodemask; /* nodemask passed by user */
101 } w;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102};
103
104/*
105 * Support for managing mempolicy data objects (clone, copy, destroy)
106 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
107 */
108
Lee Schermerhornf0be3d32008-04-28 02:13:08 -0700109extern void __mpol_put(struct mempolicy *pol);
110static inline void mpol_put(struct mempolicy *pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111{
112 if (pol)
Lee Schermerhornf0be3d32008-04-28 02:13:08 -0700113 __mpol_put(pol);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114}
115
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700116/*
117 * Does mempolicy pol need explicit unref after use?
118 * Currently only needed for shared policies.
119 */
120static inline int mpol_needs_cond_ref(struct mempolicy *pol)
121{
122 return (pol && (pol->flags & MPOL_F_SHARED));
123}
124
125static inline void mpol_cond_put(struct mempolicy *pol)
126{
127 if (mpol_needs_cond_ref(pol))
128 __mpol_put(pol);
129}
130
131extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
132 struct mempolicy *frompol);
133static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol,
134 struct mempolicy *frompol)
135{
136 if (!frompol)
137 return frompol;
138 return __mpol_cond_copy(tompol, frompol);
139}
140
Lee Schermerhorn846a16b2008-04-28 02:13:09 -0700141extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
142static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143{
144 if (pol)
Lee Schermerhorn846a16b2008-04-28 02:13:09 -0700145 pol = __mpol_dup(pol);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 return pol;
147}
148
149#define vma_policy(vma) ((vma)->vm_policy)
150#define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol))
151
152static inline void mpol_get(struct mempolicy *pol)
153{
154 if (pol)
155 atomic_inc(&pol->refcnt);
156}
157
158extern int __mpol_equal(struct mempolicy *a, struct mempolicy *b);
159static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
160{
161 if (a == b)
162 return 1;
163 return __mpol_equal(a, b);
164}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 * Tree of shared policies for a shared memory region.
168 * Maintain the policies in a pseudo mm that contains vmas. The vmas
169 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
170 * bytes, so that we can work with shared memory segments bigger than
171 * unsigned long.
172 */
173
174struct sp_node {
175 struct rb_node nd;
176 unsigned long start, end;
177 struct mempolicy *policy;
178};
179
180struct shared_policy {
181 struct rb_root root;
182 spinlock_t lock;
183};
184
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700185void mpol_shared_policy_init(struct shared_policy *info, unsigned short mode,
David Rientjes028fec42008-04-28 02:12:25 -0700186 unsigned short flags, nodemask_t *nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187int mpol_set_shared_policy(struct shared_policy *info,
188 struct vm_area_struct *vma,
189 struct mempolicy *new);
190void mpol_free_shared_policy(struct shared_policy *p);
191struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
192 unsigned long idx);
193
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194extern void numa_default_policy(void);
195extern void numa_policy_init(void);
Paul Jackson74cb2152006-01-08 01:01:56 -0800196extern void mpol_rebind_task(struct task_struct *tsk,
197 const nodemask_t *new);
Paul Jackson42253992006-01-08 01:01:59 -0800198extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
Paul Jacksonc61afb12006-03-24 03:16:08 -0800199extern void mpol_fix_fork_child_flag(struct task_struct *p);
Paul Jackson42253992006-01-08 01:01:59 -0800200
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800201extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
Mel Gorman19770b32008-04-28 02:12:18 -0700202 unsigned long addr, gfp_t gfp_flags,
203 struct mempolicy **mpol, nodemask_t **nodemask);
Christoph Lameterdc85da12006-01-18 17:42:36 -0800204extern unsigned slab_node(struct mempolicy *policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
Christoph Lameter2f6726e2006-09-25 23:31:18 -0700206extern enum zone_type policy_zone;
Christoph Lameter4be38e32006-01-06 00:11:17 -0800207
Christoph Lameter2f6726e2006-09-25 23:31:18 -0700208static inline void check_highest_zone(enum zone_type k)
Christoph Lameter4be38e32006-01-06 00:11:17 -0800209{
Mel Gormanb377fd32007-08-22 14:02:05 -0700210 if (k > policy_zone && k != ZONE_MOVABLE)
Christoph Lameter4be38e32006-01-06 00:11:17 -0800211 policy_zone = k;
212}
213
Christoph Lameter39743882006-01-08 01:00:51 -0800214int do_migrate_pages(struct mm_struct *mm,
215 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags);
216
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217#else
218
219struct mempolicy {};
220
221static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
222{
223 return 1;
224}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225
Lee Schermerhornf0be3d32008-04-28 02:13:08 -0700226static inline void mpol_put(struct mempolicy *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227{
228}
229
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700230static inline void mpol_cond_put(struct mempolicy *pol)
231{
232}
233
234static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to,
235 struct mempolicy *from)
236{
237 return from;
238}
239
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240static inline void mpol_get(struct mempolicy *pol)
241{
242}
243
Lee Schermerhorn846a16b2008-04-28 02:13:09 -0700244static inline struct mempolicy *mpol_dup(struct mempolicy *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245{
246 return NULL;
247}
248
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249struct shared_policy {};
250
251static inline int mpol_set_shared_policy(struct shared_policy *info,
252 struct vm_area_struct *vma,
253 struct mempolicy *new)
254{
255 return -EINVAL;
256}
257
Robin Holt7339ff82006-01-14 13:20:48 -0800258static inline void mpol_shared_policy_init(struct shared_policy *info,
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700259 unsigned short mode, unsigned short flags, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260{
261}
262
263static inline void mpol_free_shared_policy(struct shared_policy *p)
264{
265}
266
267static inline struct mempolicy *
268mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
269{
270 return NULL;
271}
272
273#define vma_policy(vma) NULL
274#define vma_set_policy(vma, pol) do {} while(0)
275
276static inline void numa_policy_init(void)
277{
278}
279
280static inline void numa_default_policy(void)
281{
282}
283
Paul Jackson74cb2152006-01-08 01:01:56 -0800284static inline void mpol_rebind_task(struct task_struct *tsk,
Paul Jackson68860ec2005-10-30 15:02:36 -0800285 const nodemask_t *new)
286{
287}
288
Paul Jackson42253992006-01-08 01:01:59 -0800289static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
290{
291}
292
Paul Jacksonc61afb12006-03-24 03:16:08 -0800293static inline void mpol_fix_fork_child_flag(struct task_struct *p)
294{
295}
296
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800297static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
Mel Gorman19770b32008-04-28 02:12:18 -0700298 unsigned long addr, gfp_t gfp_flags,
299 struct mempolicy **mpol, nodemask_t **nodemask)
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800300{
Mel Gorman19770b32008-04-28 02:12:18 -0700301 *mpol = NULL;
302 *nodemask = NULL;
Mel Gorman0e884602008-04-28 02:12:14 -0700303 return node_zonelist(0, gfp_flags);
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800304}
305
Paul Jackson45b07ef2006-01-08 01:00:56 -0800306static inline int do_migrate_pages(struct mm_struct *mm,
307 const nodemask_t *from_nodes,
308 const nodemask_t *to_nodes, int flags)
309{
310 return 0;
311}
312
Christoph Lameter4be38e32006-01-06 00:11:17 -0800313static inline void check_highest_zone(int k)
314{
315}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316#endif /* CONFIG_NUMA */
317#endif /* __KERNEL__ */
318
319#endif