blob: bbd2221923c3a274dae0d13d1425942108831d6d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_MEMPOLICY_H
2#define _LINUX_MEMPOLICY_H 1
3
4#include <linux/errno.h>
5
6/*
7 * NUMA memory policies for Linux.
8 * Copyright 2003,2004 Andi Kleen SuSE Labs
9 */
10
11/* Policies */
12#define MPOL_DEFAULT 0
13#define MPOL_PREFERRED 1
14#define MPOL_BIND 2
15#define MPOL_INTERLEAVE 3
16
17#define MPOL_MAX MPOL_INTERLEAVE
18
19/* Flags for get_mem_policy */
20#define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
21#define MPOL_F_ADDR (1<<1) /* look up vma using address */
22
23/* Flags for mbind */
24#define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080025#define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */
26#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
27#define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
29#ifdef __KERNEL__
30
31#include <linux/config.h>
32#include <linux/mmzone.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/slab.h>
34#include <linux/rbtree.h>
35#include <linux/spinlock.h>
Andi Kleendfcd3c02005-10-29 18:15:48 -070036#include <linux/nodemask.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
38struct vm_area_struct;
39
40#ifdef CONFIG_NUMA
41
42/*
43 * Describe a memory policy.
44 *
45 * A mempolicy can be either associated with a process or with a VMA.
46 * For VMA related allocations the VMA policy is preferred, otherwise
47 * the process policy is used. Interrupts ignore the memory policy
48 * of the current process.
49 *
50 * Locking policy for interlave:
51 * In process context there is no locking because only the process accesses
52 * its own state. All vma manipulation is somewhat protected by a down_read on
Hugh Dickinsb8072f02005-10-29 18:16:41 -070053 * mmap_sem.
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 *
55 * Freeing policy:
56 * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd.
57 * All other policies don't have any external state. mpol_free() handles this.
58 *
59 * Copying policy objects:
60 * For MPOL_BIND the zonelist must be always duplicated. mpol_clone() does this.
61 */
62struct mempolicy {
63 atomic_t refcnt;
64 short policy; /* See MPOL_* above */
65 union {
66 struct zonelist *zonelist; /* bind */
67 short preferred_node; /* preferred */
Andi Kleendfcd3c02005-10-29 18:15:48 -070068 nodemask_t nodes; /* interleave */
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 /* undefined for default */
70 } v;
Paul Jackson74cb2152006-01-08 01:01:56 -080071 nodemask_t cpuset_mems_allowed; /* mempolicy relative to these nodes */
Linus Torvalds1da177e2005-04-16 15:20:36 -070072};
73
74/*
75 * Support for managing mempolicy data objects (clone, copy, destroy)
76 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
77 */
78
79extern void __mpol_free(struct mempolicy *pol);
80static inline void mpol_free(struct mempolicy *pol)
81{
82 if (pol)
83 __mpol_free(pol);
84}
85
86extern struct mempolicy *__mpol_copy(struct mempolicy *pol);
87static inline struct mempolicy *mpol_copy(struct mempolicy *pol)
88{
89 if (pol)
90 pol = __mpol_copy(pol);
91 return pol;
92}
93
94#define vma_policy(vma) ((vma)->vm_policy)
95#define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol))
96
97static inline void mpol_get(struct mempolicy *pol)
98{
99 if (pol)
100 atomic_inc(&pol->refcnt);
101}
102
103extern int __mpol_equal(struct mempolicy *a, struct mempolicy *b);
104static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
105{
106 if (a == b)
107 return 1;
108 return __mpol_equal(a, b);
109}
110#define vma_mpol_equal(a,b) mpol_equal(vma_policy(a), vma_policy(b))
111
112/* Could later add inheritance of the process policy here. */
113
114#define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL)
115
116/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 * Tree of shared policies for a shared memory region.
118 * Maintain the policies in a pseudo mm that contains vmas. The vmas
119 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
120 * bytes, so that we can work with shared memory segments bigger than
121 * unsigned long.
122 */
123
124struct sp_node {
125 struct rb_node nd;
126 unsigned long start, end;
127 struct mempolicy *policy;
128};
129
130struct shared_policy {
131 struct rb_root root;
132 spinlock_t lock;
133};
134
Robin Holt7339ff82006-01-14 13:20:48 -0800135void mpol_shared_policy_init(struct shared_policy *info, int policy,
136 nodemask_t *nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137int mpol_set_shared_policy(struct shared_policy *info,
138 struct vm_area_struct *vma,
139 struct mempolicy *new);
140void mpol_free_shared_policy(struct shared_policy *p);
141struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
142 unsigned long idx);
143
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144extern void numa_default_policy(void);
145extern void numa_policy_init(void);
Paul Jackson74cb2152006-01-08 01:01:56 -0800146extern void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *new);
147extern void mpol_rebind_task(struct task_struct *tsk,
148 const nodemask_t *new);
Paul Jackson42253992006-01-08 01:01:59 -0800149extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
150#define set_cpuset_being_rebound(x) (cpuset_being_rebound = (x))
151
152#ifdef CONFIG_CPUSET
153#define current_cpuset_is_being_rebound() \
154 (cpuset_being_rebound == current->cpuset)
155#else
156#define current_cpuset_is_being_rebound() 0
157#endif
158
Andi Kleend42c6992005-07-06 19:56:03 +0200159extern struct mempolicy default_policy;
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800160extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
161 unsigned long addr);
Christoph Lameterdc85da12006-01-18 17:42:36 -0800162extern unsigned slab_node(struct mempolicy *policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
Christoph Lameter4be38e32006-01-06 00:11:17 -0800164extern int policy_zone;
165
166static inline void check_highest_zone(int k)
167{
168 if (k > policy_zone)
169 policy_zone = k;
170}
171
Christoph Lameter39743882006-01-08 01:00:51 -0800172int do_migrate_pages(struct mm_struct *mm,
173 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags);
174
Paul Jackson42253992006-01-08 01:01:59 -0800175extern void *cpuset_being_rebound; /* Trigger mpol_copy vma rebind */
176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177#else
178
179struct mempolicy {};
180
181static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
182{
183 return 1;
184}
185#define vma_mpol_equal(a,b) 1
186
187#define mpol_set_vma_default(vma) do {} while(0)
188
189static inline void mpol_free(struct mempolicy *p)
190{
191}
192
193static inline void mpol_get(struct mempolicy *pol)
194{
195}
196
197static inline struct mempolicy *mpol_copy(struct mempolicy *old)
198{
199 return NULL;
200}
201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202struct shared_policy {};
203
204static inline int mpol_set_shared_policy(struct shared_policy *info,
205 struct vm_area_struct *vma,
206 struct mempolicy *new)
207{
208 return -EINVAL;
209}
210
Robin Holt7339ff82006-01-14 13:20:48 -0800211static inline void mpol_shared_policy_init(struct shared_policy *info,
212 int policy, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213{
214}
215
216static inline void mpol_free_shared_policy(struct shared_policy *p)
217{
218}
219
220static inline struct mempolicy *
221mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
222{
223 return NULL;
224}
225
226#define vma_policy(vma) NULL
227#define vma_set_policy(vma, pol) do {} while(0)
228
229static inline void numa_policy_init(void)
230{
231}
232
233static inline void numa_default_policy(void)
234{
235}
236
Paul Jackson74cb2152006-01-08 01:01:56 -0800237static inline void mpol_rebind_policy(struct mempolicy *pol,
238 const nodemask_t *new)
239{
240}
241
242static inline void mpol_rebind_task(struct task_struct *tsk,
Paul Jackson68860ec2005-10-30 15:02:36 -0800243 const nodemask_t *new)
244{
245}
246
Paul Jackson42253992006-01-08 01:01:59 -0800247static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
248{
249}
250
251#define set_cpuset_being_rebound(x) do {} while (0)
252
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800253static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
254 unsigned long addr)
255{
256 return NODE_DATA(0)->node_zonelists + gfp_zone(GFP_HIGHUSER);
257}
258
Paul Jackson45b07ef2006-01-08 01:00:56 -0800259static inline int do_migrate_pages(struct mm_struct *mm,
260 const nodemask_t *from_nodes,
261 const nodemask_t *to_nodes, int flags)
262{
263 return 0;
264}
265
Christoph Lameter4be38e32006-01-06 00:11:17 -0800266static inline void check_highest_zone(int k)
267{
268}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269#endif /* CONFIG_NUMA */
270#endif /* __KERNEL__ */
271
272#endif