blob: 3d385c81c153497e9970b4f6227aeded05fd6f22 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NUMA memory policies for Linux.
3 * Copyright 2003,2004 Andi Kleen SuSE Labs
4 */
David Howells607ca462012-10-13 10:46:48 +01005#ifndef _LINUX_MEMPOLICY_H
6#define _LINUX_MEMPOLICY_H 1
Linus Torvalds1da177e2005-04-16 15:20:36 -07007
Linus Torvalds1da177e2005-04-16 15:20:36 -07008
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/mmzone.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/slab.h>
11#include <linux/rbtree.h>
12#include <linux/spinlock.h>
Andi Kleendfcd3c02005-10-29 18:15:48 -070013#include <linux/nodemask.h>
Gerald Schaefer83d16742008-07-23 21:28:22 -070014#include <linux/pagemap.h>
David Howells607ca462012-10-13 10:46:48 +010015#include <uapi/linux/mempolicy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
Ralf Baechle45b35a52006-06-08 00:43:41 -070017struct mm_struct;
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19#ifdef CONFIG_NUMA
20
21/*
22 * Describe a memory policy.
23 *
24 * A mempolicy can be either associated with a process or with a VMA.
25 * For VMA related allocations the VMA policy is preferred, otherwise
26 * the process policy is used. Interrupts ignore the memory policy
27 * of the current process.
28 *
29 * Locking policy for interlave:
30 * In process context there is no locking because only the process accesses
31 * its own state. All vma manipulation is somewhat protected by a down_read on
Hugh Dickinsb8072f02005-10-29 18:16:41 -070032 * mmap_sem.
Linus Torvalds1da177e2005-04-16 15:20:36 -070033 *
34 * Freeing policy:
Mel Gorman19770b32008-04-28 02:12:18 -070035 * Mempolicy objects are reference counted. A mempolicy will be freed when
Lee Schermerhornf0be3d32008-04-28 02:13:08 -070036 * mpol_put() decrements the reference count to zero.
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 *
Lee Schermerhorn846a16b2008-04-28 02:13:09 -070038 * Duplicating policy objects:
39 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
Mel Gorman19770b32008-04-28 02:12:18 -070040 * to the new storage. The reference count of the new object is initialized
Lee Schermerhorn846a16b2008-04-28 02:13:09 -070041 * to 1, representing the caller of mpol_dup().
Linus Torvalds1da177e2005-04-16 15:20:36 -070042 */
43struct mempolicy {
44 atomic_t refcnt;
Lee Schermerhorn45c47452008-04-28 02:13:12 -070045 unsigned short mode; /* See MPOL_* above */
David Rientjes028fec42008-04-28 02:12:25 -070046 unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 union {
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 short preferred_node; /* preferred */
Mel Gorman19770b32008-04-28 02:12:18 -070049 nodemask_t nodes; /* interleave/bind */
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 /* undefined for default */
51 } v;
David Rientjesf5b087b2008-04-28 02:12:27 -070052 union {
53 nodemask_t cpuset_mems_allowed; /* relative to these nodes */
54 nodemask_t user_nodemask; /* nodemask passed by user */
55 } w;
Linus Torvalds1da177e2005-04-16 15:20:36 -070056};
57
58/*
59 * Support for managing mempolicy data objects (clone, copy, destroy)
60 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
61 */
62
Lee Schermerhornf0be3d32008-04-28 02:13:08 -070063extern void __mpol_put(struct mempolicy *pol);
64static inline void mpol_put(struct mempolicy *pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -070065{
66 if (pol)
Lee Schermerhornf0be3d32008-04-28 02:13:08 -070067 __mpol_put(pol);
Linus Torvalds1da177e2005-04-16 15:20:36 -070068}
69
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -070070/*
71 * Does mempolicy pol need explicit unref after use?
72 * Currently only needed for shared policies.
73 */
74static inline int mpol_needs_cond_ref(struct mempolicy *pol)
75{
76 return (pol && (pol->flags & MPOL_F_SHARED));
77}
78
79static inline void mpol_cond_put(struct mempolicy *pol)
80{
81 if (mpol_needs_cond_ref(pol))
82 __mpol_put(pol);
83}
84
Lee Schermerhorn846a16b2008-04-28 02:13:09 -070085extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
86static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -070087{
88 if (pol)
Lee Schermerhorn846a16b2008-04-28 02:13:09 -070089 pol = __mpol_dup(pol);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 return pol;
91}
92
93#define vma_policy(vma) ((vma)->vm_policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
95static inline void mpol_get(struct mempolicy *pol)
96{
97 if (pol)
98 atomic_inc(&pol->refcnt);
99}
100
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -0800101extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b);
102static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103{
104 if (a == b)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -0800105 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 return __mpol_equal(a, b);
107}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 * Tree of shared policies for a shared memory region.
111 * Maintain the policies in a pseudo mm that contains vmas. The vmas
112 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
113 * bytes, so that we can work with shared memory segments bigger than
114 * unsigned long.
115 */
116
117struct sp_node {
118 struct rb_node nd;
119 unsigned long start, end;
120 struct mempolicy *policy;
121};
122
123struct shared_policy {
124 struct rb_root root;
Mel Gorman42288fe2012-12-21 23:10:25 +0000125 spinlock_t lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126};
127
Oleg Nesterovef0855d2013-09-11 14:20:14 -0700128int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
Lee Schermerhorn71fe8042008-04-28 02:13:26 -0700129void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130int mpol_set_shared_policy(struct shared_policy *info,
131 struct vm_area_struct *vma,
132 struct mempolicy *new);
133void mpol_free_shared_policy(struct shared_policy *p);
134struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
135 unsigned long idx);
136
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -0700137struct mempolicy *get_task_policy(struct task_struct *p);
138struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
139 unsigned long addr);
Oleg Nesterov6b6482b2014-10-09 15:27:48 -0700140bool vma_policy_mof(struct vm_area_struct *vma);
Stephen Wilsond98f6cb2011-05-24 17:12:41 -0700141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142extern void numa_default_policy(void);
143extern void numa_policy_init(void);
Miao Xie708c1bb2010-05-24 14:32:07 -0700144extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
145 enum mpol_rebind_step step);
Paul Jackson42253992006-01-08 01:01:59 -0800146extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
Paul Jackson42253992006-01-08 01:01:59 -0800147
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800148extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
Mel Gorman19770b32008-04-28 02:12:18 -0700149 unsigned long addr, gfp_t gfp_flags,
150 struct mempolicy **mpol, nodemask_t **nodemask);
Lee Schermerhorn06808b02009-12-14 17:58:21 -0800151extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
David Rientjes6f48d0eb2010-08-09 17:18:52 -0700152extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
153 const nodemask_t *mask);
David Rientjes2a389612014-04-07 15:37:29 -0700154extern unsigned int mempolicy_slab_node(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
Christoph Lameter2f6726e2006-09-25 23:31:18 -0700156extern enum zone_type policy_zone;
Christoph Lameter4be38e32006-01-06 00:11:17 -0800157
Christoph Lameter2f6726e2006-09-25 23:31:18 -0700158static inline void check_highest_zone(enum zone_type k)
Christoph Lameter4be38e32006-01-06 00:11:17 -0800159{
Mel Gormanb377fd32007-08-22 14:02:05 -0700160 if (k > policy_zone && k != ZONE_MOVABLE)
Christoph Lameter4be38e32006-01-06 00:11:17 -0800161 policy_zone = k;
162}
163
Andrew Morton0ce72d42012-05-29 15:06:24 -0700164int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
165 const nodemask_t *to, int flags);
Christoph Lameter39743882006-01-08 01:00:51 -0800166
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -0700167
168#ifdef CONFIG_TMPFS
Hugh Dickinsa7a88b22013-01-02 02:04:23 -0800169extern int mpol_parse_str(char *str, struct mempolicy **mpol);
Stephen Wilson13057ef2011-05-24 17:12:46 -0700170#endif
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -0700171
David Rientjes948927e2013-11-12 15:07:28 -0800172extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
Gerald Schaefer83d16742008-07-23 21:28:22 -0700173
174/* Check if a vma is migratable */
175static inline int vma_migratable(struct vm_area_struct *vma)
176{
Naoya Horiguchi71ea2ef2013-09-11 14:22:08 -0700177 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
Gerald Schaefer83d16742008-07-23 21:28:22 -0700178 return 0;
Naoya Horiguchic177c812014-06-04 16:05:35 -0700179
180#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
181 if (vma->vm_flags & VM_HUGETLB)
182 return 0;
183#endif
184
Gerald Schaefer83d16742008-07-23 21:28:22 -0700185 /*
186 * Migration allocates pages in the highest zone. If we cannot
187 * do so then migration (at least from node to node) is not
188 * possible.
189 */
190 if (vma->vm_file &&
191 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
192 < policy_zone)
193 return 0;
194 return 1;
195}
196
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +0200197extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
198
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199#else
200
201struct mempolicy {};
202
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -0800203static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204{
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -0800205 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
Lee Schermerhornf0be3d32008-04-28 02:13:08 -0700208static inline void mpol_put(struct mempolicy *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209{
210}
211
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700212static inline void mpol_cond_put(struct mempolicy *pol)
213{
214}
215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216static inline void mpol_get(struct mempolicy *pol)
217{
218}
219
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220struct shared_policy {};
221
Lee Schermerhorn71fe8042008-04-28 02:13:26 -0700222static inline void mpol_shared_policy_init(struct shared_policy *sp,
223 struct mempolicy *mpol)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224{
225}
226
227static inline void mpol_free_shared_policy(struct shared_policy *p)
228{
229}
230
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231#define vma_policy(vma) NULL
Oleg Nesterovef0855d2013-09-11 14:20:14 -0700232
233static inline int
234vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
235{
236 return 0;
237}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
239static inline void numa_policy_init(void)
240{
241}
242
243static inline void numa_default_policy(void)
244{
245}
246
Paul Jackson74cb2152006-01-08 01:01:56 -0800247static inline void mpol_rebind_task(struct task_struct *tsk,
Miao Xie708c1bb2010-05-24 14:32:07 -0700248 const nodemask_t *new,
249 enum mpol_rebind_step step)
Paul Jackson68860ec2005-10-30 15:02:36 -0800250{
251}
252
Paul Jackson42253992006-01-08 01:01:59 -0800253static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
254{
255}
256
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800257static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
Mel Gorman19770b32008-04-28 02:12:18 -0700258 unsigned long addr, gfp_t gfp_flags,
259 struct mempolicy **mpol, nodemask_t **nodemask)
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800260{
Mel Gorman19770b32008-04-28 02:12:18 -0700261 *mpol = NULL;
262 *nodemask = NULL;
Mel Gorman0e884602008-04-28 02:12:14 -0700263 return node_zonelist(0, gfp_flags);
Christoph Lameter5da7ca82006-01-06 00:10:46 -0800264}
265
David Rientjes6f48d0eb2010-08-09 17:18:52 -0700266static inline bool init_nodemask_of_mempolicy(nodemask_t *m)
267{
268 return false;
269}
270
Andrew Morton0ce72d42012-05-29 15:06:24 -0700271static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
272 const nodemask_t *to, int flags)
Paul Jackson45b07ef2006-01-08 01:00:56 -0800273{
274 return 0;
275}
276
Christoph Lameter4be38e32006-01-06 00:11:17 -0800277static inline void check_highest_zone(int k)
278{
279}
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -0700280
281#ifdef CONFIG_TMPFS
Hugh Dickinsa7a88b22013-01-02 02:04:23 -0800282static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -0700283{
Lee Schermerhorn71fe8042008-04-28 02:13:26 -0700284 return 1; /* error */
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -0700285}
Stephen Wilson13057ef2011-05-24 17:12:46 -0700286#endif
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -0700287
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +0200288static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
289 unsigned long address)
290{
291 return -1; /* no node preference */
292}
293
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294#endif /* CONFIG_NUMA */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295#endif