blob: 8480aef10e62fa41d986751a651ffd090db4ceb7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_MEMPOLICY_H
2#define _LINUX_MEMPOLICY_H 1
3
4#include <linux/errno.h>
5
6/*
7 * NUMA memory policies for Linux.
8 * Copyright 2003,2004 Andi Kleen SuSE Labs
9 */
10
11/* Policies */
12#define MPOL_DEFAULT 0
13#define MPOL_PREFERRED 1
14#define MPOL_BIND 2
15#define MPOL_INTERLEAVE 3
16
17#define MPOL_MAX MPOL_INTERLEAVE
18
19/* Flags for get_mem_policy */
20#define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
21#define MPOL_F_ADDR (1<<1) /* look up vma using address */
22
23/* Flags for mbind */
24#define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */
25
26#ifdef __KERNEL__
27
28#include <linux/config.h>
29#include <linux/mmzone.h>
30#include <linux/bitmap.h>
31#include <linux/slab.h>
32#include <linux/rbtree.h>
33#include <linux/spinlock.h>
34
35struct vm_area_struct;
36
37#ifdef CONFIG_NUMA
38
39/*
40 * Describe a memory policy.
41 *
42 * A mempolicy can be either associated with a process or with a VMA.
43 * For VMA related allocations the VMA policy is preferred, otherwise
44 * the process policy is used. Interrupts ignore the memory policy
45 * of the current process.
46 *
47 * Locking policy for interlave:
48 * In process context there is no locking because only the process accesses
49 * its own state. All vma manipulation is somewhat protected by a down_read on
50 * mmap_sem. For allocating in the interleave policy the page_table_lock
51 * must be also aquired to protect il_next.
52 *
53 * Freeing policy:
54 * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd.
55 * All other policies don't have any external state. mpol_free() handles this.
56 *
57 * Copying policy objects:
58 * For MPOL_BIND the zonelist must be always duplicated. mpol_clone() does this.
59 */
60struct mempolicy {
61 atomic_t refcnt;
62 short policy; /* See MPOL_* above */
63 union {
64 struct zonelist *zonelist; /* bind */
65 short preferred_node; /* preferred */
66 DECLARE_BITMAP(nodes, MAX_NUMNODES); /* interleave */
67 /* undefined for default */
68 } v;
69};
70
71/*
72 * Support for managing mempolicy data objects (clone, copy, destroy)
73 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
74 */
75
76extern void __mpol_free(struct mempolicy *pol);
77static inline void mpol_free(struct mempolicy *pol)
78{
79 if (pol)
80 __mpol_free(pol);
81}
82
83extern struct mempolicy *__mpol_copy(struct mempolicy *pol);
84static inline struct mempolicy *mpol_copy(struct mempolicy *pol)
85{
86 if (pol)
87 pol = __mpol_copy(pol);
88 return pol;
89}
90
91#define vma_policy(vma) ((vma)->vm_policy)
92#define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol))
93
94static inline void mpol_get(struct mempolicy *pol)
95{
96 if (pol)
97 atomic_inc(&pol->refcnt);
98}
99
100extern int __mpol_equal(struct mempolicy *a, struct mempolicy *b);
101static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
102{
103 if (a == b)
104 return 1;
105 return __mpol_equal(a, b);
106}
107#define vma_mpol_equal(a,b) mpol_equal(vma_policy(a), vma_policy(b))
108
109/* Could later add inheritance of the process policy here. */
110
111#define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL)
112
113/*
114 * Hugetlb policy. i386 hugetlb so far works with node numbers
115 * instead of zone lists, so give it special interfaces for now.
116 */
117extern int mpol_first_node(struct vm_area_struct *vma, unsigned long addr);
118extern int mpol_node_valid(int nid, struct vm_area_struct *vma,
119 unsigned long addr);
120
121/*
122 * Tree of shared policies for a shared memory region.
123 * Maintain the policies in a pseudo mm that contains vmas. The vmas
124 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
125 * bytes, so that we can work with shared memory segments bigger than
126 * unsigned long.
127 */
128
129struct sp_node {
130 struct rb_node nd;
131 unsigned long start, end;
132 struct mempolicy *policy;
133};
134
135struct shared_policy {
136 struct rb_root root;
137 spinlock_t lock;
138};
139
140static inline void mpol_shared_policy_init(struct shared_policy *info)
141{
142 info->root = RB_ROOT;
143 spin_lock_init(&info->lock);
144}
145
146int mpol_set_shared_policy(struct shared_policy *info,
147 struct vm_area_struct *vma,
148 struct mempolicy *new);
149void mpol_free_shared_policy(struct shared_policy *p);
150struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
151 unsigned long idx);
152
153extern void numa_default_policy(void);
154extern void numa_policy_init(void);
155
156#else
157
158struct mempolicy {};
159
160static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
161{
162 return 1;
163}
164#define vma_mpol_equal(a,b) 1
165
166#define mpol_set_vma_default(vma) do {} while(0)
167
168static inline void mpol_free(struct mempolicy *p)
169{
170}
171
172static inline void mpol_get(struct mempolicy *pol)
173{
174}
175
176static inline struct mempolicy *mpol_copy(struct mempolicy *old)
177{
178 return NULL;
179}
180
181static inline int mpol_first_node(struct vm_area_struct *vma, unsigned long a)
182{
183 return numa_node_id();
184}
185
186static inline int
187mpol_node_valid(int nid, struct vm_area_struct *vma, unsigned long a)
188{
189 return 1;
190}
191
192struct shared_policy {};
193
194static inline int mpol_set_shared_policy(struct shared_policy *info,
195 struct vm_area_struct *vma,
196 struct mempolicy *new)
197{
198 return -EINVAL;
199}
200
201static inline void mpol_shared_policy_init(struct shared_policy *info)
202{
203}
204
205static inline void mpol_free_shared_policy(struct shared_policy *p)
206{
207}
208
209static inline struct mempolicy *
210mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
211{
212 return NULL;
213}
214
215#define vma_policy(vma) NULL
216#define vma_set_policy(vma, pol) do {} while(0)
217
218static inline void numa_policy_init(void)
219{
220}
221
222static inline void numa_default_policy(void)
223{
224}
225
226#endif /* CONFIG_NUMA */
227#endif /* __KERNEL__ */
228
229#endif