Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * NUMA memory policies for Linux. |
| 3 | * Copyright 2003,2004 Andi Kleen SuSE Labs |
| 4 | */ |
David Howells | 607ca46 | 2012-10-13 10:46:48 +0100 | [diff] [blame] | 5 | #ifndef _LINUX_MEMPOLICY_H |
| 6 | #define _LINUX_MEMPOLICY_H 1 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/mmzone.h> |
Dan Williams | c1ef8e2 | 2016-12-12 16:43:12 -0800 | [diff] [blame] | 10 | #include <linux/dax.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <linux/slab.h> |
| 12 | #include <linux/rbtree.h> |
| 13 | #include <linux/spinlock.h> |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 14 | #include <linux/nodemask.h> |
Gerald Schaefer | 83d1674 | 2008-07-23 21:28:22 -0700 | [diff] [blame] | 15 | #include <linux/pagemap.h> |
David Howells | 607ca46 | 2012-10-13 10:46:48 +0100 | [diff] [blame] | 16 | #include <uapi/linux/mempolicy.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | |
Ralf Baechle | 45b35a5 | 2006-06-08 00:43:41 -0700 | [diff] [blame] | 18 | struct mm_struct; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | |
| 20 | #ifdef CONFIG_NUMA |
| 21 | |
| 22 | /* |
| 23 | * Describe a memory policy. |
| 24 | * |
| 25 | * A mempolicy can be either associated with a process or with a VMA. |
| 26 | * For VMA related allocations the VMA policy is preferred, otherwise |
| 27 | * the process policy is used. Interrupts ignore the memory policy |
| 28 | * of the current process. |
| 29 | * |
| 30 | * Locking policy for interlave: |
| 31 | * In process context there is no locking because only the process accesses |
| 32 | * its own state. All vma manipulation is somewhat protected by a down_read on |
Hugh Dickins | b8072f0 | 2005-10-29 18:16:41 -0700 | [diff] [blame] | 33 | * mmap_sem. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | * |
| 35 | * Freeing policy: |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 36 | * Mempolicy objects are reference counted. A mempolicy will be freed when |
Lee Schermerhorn | f0be3d3 | 2008-04-28 02:13:08 -0700 | [diff] [blame] | 37 | * mpol_put() decrements the reference count to zero. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | * |
Lee Schermerhorn | 846a16b | 2008-04-28 02:13:09 -0700 | [diff] [blame] | 39 | * Duplicating policy objects: |
| 40 | * mpol_dup() allocates a new mempolicy and copies the specified mempolicy |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 41 | * to the new storage. The reference count of the new object is initialized |
Lee Schermerhorn | 846a16b | 2008-04-28 02:13:09 -0700 | [diff] [blame] | 42 | * to 1, representing the caller of mpol_dup(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | */ |
| 44 | struct mempolicy { |
| 45 | atomic_t refcnt; |
Lee Schermerhorn | 45c4745 | 2008-04-28 02:13:12 -0700 | [diff] [blame] | 46 | unsigned short mode; /* See MPOL_* above */ |
David Rientjes | 028fec4 | 2008-04-28 02:12:25 -0700 | [diff] [blame] | 47 | unsigned short flags; /* See set_mempolicy() MPOL_F_* above */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | union { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | short preferred_node; /* preferred */ |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 50 | nodemask_t nodes; /* interleave/bind */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | /* undefined for default */ |
| 52 | } v; |
David Rientjes | f5b087b | 2008-04-28 02:12:27 -0700 | [diff] [blame] | 53 | union { |
| 54 | nodemask_t cpuset_mems_allowed; /* relative to these nodes */ |
| 55 | nodemask_t user_nodemask; /* nodemask passed by user */ |
| 56 | } w; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | }; |
| 58 | |
| 59 | /* |
| 60 | * Support for managing mempolicy data objects (clone, copy, destroy) |
| 61 | * The default fast path of a NULL MPOL_DEFAULT policy is always inlined. |
| 62 | */ |
| 63 | |
Lee Schermerhorn | f0be3d3 | 2008-04-28 02:13:08 -0700 | [diff] [blame] | 64 | extern void __mpol_put(struct mempolicy *pol); |
| 65 | static inline void mpol_put(struct mempolicy *pol) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | { |
| 67 | if (pol) |
Lee Schermerhorn | f0be3d3 | 2008-04-28 02:13:08 -0700 | [diff] [blame] | 68 | __mpol_put(pol); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | } |
| 70 | |
Lee Schermerhorn | 52cd3b0 | 2008-04-28 02:13:16 -0700 | [diff] [blame] | 71 | /* |
| 72 | * Does mempolicy pol need explicit unref after use? |
| 73 | * Currently only needed for shared policies. |
| 74 | */ |
| 75 | static inline int mpol_needs_cond_ref(struct mempolicy *pol) |
| 76 | { |
| 77 | return (pol && (pol->flags & MPOL_F_SHARED)); |
| 78 | } |
| 79 | |
| 80 | static inline void mpol_cond_put(struct mempolicy *pol) |
| 81 | { |
| 82 | if (mpol_needs_cond_ref(pol)) |
| 83 | __mpol_put(pol); |
| 84 | } |
| 85 | |
Lee Schermerhorn | 846a16b | 2008-04-28 02:13:09 -0700 | [diff] [blame] | 86 | extern struct mempolicy *__mpol_dup(struct mempolicy *pol); |
| 87 | static inline struct mempolicy *mpol_dup(struct mempolicy *pol) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | { |
| 89 | if (pol) |
Lee Schermerhorn | 846a16b | 2008-04-28 02:13:09 -0700 | [diff] [blame] | 90 | pol = __mpol_dup(pol); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | return pol; |
| 92 | } |
| 93 | |
| 94 | #define vma_policy(vma) ((vma)->vm_policy) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | |
| 96 | static inline void mpol_get(struct mempolicy *pol) |
| 97 | { |
| 98 | if (pol) |
| 99 | atomic_inc(&pol->refcnt); |
| 100 | } |
| 101 | |
KOSAKI Motohiro | fcfb4dc | 2012-01-10 15:08:21 -0800 | [diff] [blame] | 102 | extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b); |
| 103 | static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | { |
| 105 | if (a == b) |
KOSAKI Motohiro | fcfb4dc | 2012-01-10 15:08:21 -0800 | [diff] [blame] | 106 | return true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | return __mpol_equal(a, b); |
| 108 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | * Tree of shared policies for a shared memory region. |
| 112 | * Maintain the policies in a pseudo mm that contains vmas. The vmas |
| 113 | * carry the policy. As a special twist the pseudo mm is indexed in pages, not |
| 114 | * bytes, so that we can work with shared memory segments bigger than |
| 115 | * unsigned long. |
| 116 | */ |
| 117 | |
| 118 | struct sp_node { |
| 119 | struct rb_node nd; |
| 120 | unsigned long start, end; |
| 121 | struct mempolicy *policy; |
| 122 | }; |
| 123 | |
| 124 | struct shared_policy { |
| 125 | struct rb_root root; |
Nathan Zimmer | 4a8c7bb | 2016-01-14 15:18:36 -0800 | [diff] [blame] | 126 | rwlock_t lock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | }; |
| 128 | |
Oleg Nesterov | ef0855d | 2013-09-11 14:20:14 -0700 | [diff] [blame] | 129 | int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst); |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 130 | void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | int mpol_set_shared_policy(struct shared_policy *info, |
| 132 | struct vm_area_struct *vma, |
| 133 | struct mempolicy *new); |
| 134 | void mpol_free_shared_policy(struct shared_policy *p); |
| 135 | struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, |
| 136 | unsigned long idx); |
| 137 | |
Oleg Nesterov | 74d2c3a | 2014-10-09 15:27:50 -0700 | [diff] [blame] | 138 | struct mempolicy *get_task_policy(struct task_struct *p); |
| 139 | struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, |
| 140 | unsigned long addr); |
Oleg Nesterov | 6b6482b | 2014-10-09 15:27:48 -0700 | [diff] [blame] | 141 | bool vma_policy_mof(struct vm_area_struct *vma); |
Stephen Wilson | d98f6cb | 2011-05-24 17:12:41 -0700 | [diff] [blame] | 142 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | extern void numa_default_policy(void); |
| 144 | extern void numa_policy_init(void); |
Miao Xie | 708c1bb | 2010-05-24 14:32:07 -0700 | [diff] [blame] | 145 | extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, |
| 146 | enum mpol_rebind_step step); |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 147 | extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 148 | |
Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 149 | extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 150 | unsigned long addr, gfp_t gfp_flags, |
| 151 | struct mempolicy **mpol, nodemask_t **nodemask); |
Lee Schermerhorn | 06808b0 | 2009-12-14 17:58:21 -0800 | [diff] [blame] | 152 | extern bool init_nodemask_of_mempolicy(nodemask_t *mask); |
David Rientjes | 6f48d0eb | 2010-08-09 17:18:52 -0700 | [diff] [blame] | 153 | extern bool mempolicy_nodemask_intersects(struct task_struct *tsk, |
| 154 | const nodemask_t *mask); |
David Rientjes | 2a38961 | 2014-04-07 15:37:29 -0700 | [diff] [blame] | 155 | extern unsigned int mempolicy_slab_node(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | |
Christoph Lameter | 2f6726e | 2006-09-25 23:31:18 -0700 | [diff] [blame] | 157 | extern enum zone_type policy_zone; |
Christoph Lameter | 4be38e3 | 2006-01-06 00:11:17 -0800 | [diff] [blame] | 158 | |
Christoph Lameter | 2f6726e | 2006-09-25 23:31:18 -0700 | [diff] [blame] | 159 | static inline void check_highest_zone(enum zone_type k) |
Christoph Lameter | 4be38e3 | 2006-01-06 00:11:17 -0800 | [diff] [blame] | 160 | { |
Mel Gorman | b377fd3 | 2007-08-22 14:02:05 -0700 | [diff] [blame] | 161 | if (k > policy_zone && k != ZONE_MOVABLE) |
Christoph Lameter | 4be38e3 | 2006-01-06 00:11:17 -0800 | [diff] [blame] | 162 | policy_zone = k; |
| 163 | } |
| 164 | |
Andrew Morton | 0ce72d4 | 2012-05-29 15:06:24 -0700 | [diff] [blame] | 165 | int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, |
| 166 | const nodemask_t *to, int flags); |
Christoph Lameter | 3974388 | 2006-01-08 01:00:51 -0800 | [diff] [blame] | 167 | |
Lee Schermerhorn | 095f1fc | 2008-04-28 02:13:23 -0700 | [diff] [blame] | 168 | |
| 169 | #ifdef CONFIG_TMPFS |
Hugh Dickins | a7a88b2 | 2013-01-02 02:04:23 -0800 | [diff] [blame] | 170 | extern int mpol_parse_str(char *str, struct mempolicy **mpol); |
Stephen Wilson | 13057ef | 2011-05-24 17:12:46 -0700 | [diff] [blame] | 171 | #endif |
Lee Schermerhorn | 095f1fc | 2008-04-28 02:13:23 -0700 | [diff] [blame] | 172 | |
David Rientjes | 948927e | 2013-11-12 15:07:28 -0800 | [diff] [blame] | 173 | extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); |
Gerald Schaefer | 83d1674 | 2008-07-23 21:28:22 -0700 | [diff] [blame] | 174 | |
| 175 | /* Check if a vma is migratable */ |
Yaowei Bai | 4ee815b | 2016-05-19 17:11:32 -0700 | [diff] [blame] | 176 | static inline bool vma_migratable(struct vm_area_struct *vma) |
Gerald Schaefer | 83d1674 | 2008-07-23 21:28:22 -0700 | [diff] [blame] | 177 | { |
Naoya Horiguchi | 71ea2ef | 2013-09-11 14:22:08 -0700 | [diff] [blame] | 178 | if (vma->vm_flags & (VM_IO | VM_PFNMAP)) |
Yaowei Bai | 4ee815b | 2016-05-19 17:11:32 -0700 | [diff] [blame] | 179 | return false; |
Naoya Horiguchi | c177c81 | 2014-06-04 16:05:35 -0700 | [diff] [blame] | 180 | |
Dan Williams | c1ef8e2 | 2016-12-12 16:43:12 -0800 | [diff] [blame] | 181 | /* |
| 182 | * DAX device mappings require predictable access latency, so avoid |
| 183 | * incurring periodic faults. |
| 184 | */ |
| 185 | if (vma_is_dax(vma)) |
| 186 | return false; |
| 187 | |
Naoya Horiguchi | c177c81 | 2014-06-04 16:05:35 -0700 | [diff] [blame] | 188 | #ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION |
| 189 | if (vma->vm_flags & VM_HUGETLB) |
Yaowei Bai | 4ee815b | 2016-05-19 17:11:32 -0700 | [diff] [blame] | 190 | return false; |
Naoya Horiguchi | c177c81 | 2014-06-04 16:05:35 -0700 | [diff] [blame] | 191 | #endif |
| 192 | |
Gerald Schaefer | 83d1674 | 2008-07-23 21:28:22 -0700 | [diff] [blame] | 193 | /* |
| 194 | * Migration allocates pages in the highest zone. If we cannot |
| 195 | * do so then migration (at least from node to node) is not |
| 196 | * possible. |
| 197 | */ |
| 198 | if (vma->vm_file && |
| 199 | gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) |
| 200 | < policy_zone) |
Yaowei Bai | 4ee815b | 2016-05-19 17:11:32 -0700 | [diff] [blame] | 201 | return false; |
| 202 | return true; |
Gerald Schaefer | 83d1674 | 2008-07-23 21:28:22 -0700 | [diff] [blame] | 203 | } |
| 204 | |
Lee Schermerhorn | 771fb4d | 2012-10-25 14:16:30 +0200 | [diff] [blame] | 205 | extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); |
David Rientjes | c11600e | 2016-09-01 16:15:07 -0700 | [diff] [blame] | 206 | extern void mpol_put_task_policy(struct task_struct *); |
Lee Schermerhorn | 771fb4d | 2012-10-25 14:16:30 +0200 | [diff] [blame] | 207 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | #else |
| 209 | |
| 210 | struct mempolicy {}; |
| 211 | |
KOSAKI Motohiro | fcfb4dc | 2012-01-10 15:08:21 -0800 | [diff] [blame] | 212 | static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | { |
KOSAKI Motohiro | fcfb4dc | 2012-01-10 15:08:21 -0800 | [diff] [blame] | 214 | return true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | |
Lee Schermerhorn | f0be3d3 | 2008-04-28 02:13:08 -0700 | [diff] [blame] | 217 | static inline void mpol_put(struct mempolicy *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | { |
| 219 | } |
| 220 | |
Lee Schermerhorn | 52cd3b0 | 2008-04-28 02:13:16 -0700 | [diff] [blame] | 221 | static inline void mpol_cond_put(struct mempolicy *pol) |
| 222 | { |
| 223 | } |
| 224 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | static inline void mpol_get(struct mempolicy *pol) |
| 226 | { |
| 227 | } |
| 228 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | struct shared_policy {}; |
| 230 | |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 231 | static inline void mpol_shared_policy_init(struct shared_policy *sp, |
| 232 | struct mempolicy *mpol) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | { |
| 234 | } |
| 235 | |
| 236 | static inline void mpol_free_shared_policy(struct shared_policy *p) |
| 237 | { |
| 238 | } |
| 239 | |
Hugh Dickins | 75edd34 | 2016-05-19 17:12:44 -0700 | [diff] [blame] | 240 | static inline struct mempolicy * |
| 241 | mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) |
| 242 | { |
| 243 | return NULL; |
| 244 | } |
| 245 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | #define vma_policy(vma) NULL |
Oleg Nesterov | ef0855d | 2013-09-11 14:20:14 -0700 | [diff] [blame] | 247 | |
| 248 | static inline int |
| 249 | vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) |
| 250 | { |
| 251 | return 0; |
| 252 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | |
| 254 | static inline void numa_policy_init(void) |
| 255 | { |
| 256 | } |
| 257 | |
| 258 | static inline void numa_default_policy(void) |
| 259 | { |
| 260 | } |
| 261 | |
Paul Jackson | 74cb215 | 2006-01-08 01:01:56 -0800 | [diff] [blame] | 262 | static inline void mpol_rebind_task(struct task_struct *tsk, |
Miao Xie | 708c1bb | 2010-05-24 14:32:07 -0700 | [diff] [blame] | 263 | const nodemask_t *new, |
| 264 | enum mpol_rebind_step step) |
Paul Jackson | 68860ec | 2005-10-30 15:02:36 -0800 | [diff] [blame] | 265 | { |
| 266 | } |
| 267 | |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 268 | static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) |
| 269 | { |
| 270 | } |
| 271 | |
Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 272 | static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 273 | unsigned long addr, gfp_t gfp_flags, |
| 274 | struct mempolicy **mpol, nodemask_t **nodemask) |
Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 275 | { |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 276 | *mpol = NULL; |
| 277 | *nodemask = NULL; |
Mel Gorman | 0e88460 | 2008-04-28 02:12:14 -0700 | [diff] [blame] | 278 | return node_zonelist(0, gfp_flags); |
Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 279 | } |
| 280 | |
David Rientjes | 6f48d0eb | 2010-08-09 17:18:52 -0700 | [diff] [blame] | 281 | static inline bool init_nodemask_of_mempolicy(nodemask_t *m) |
| 282 | { |
| 283 | return false; |
| 284 | } |
| 285 | |
Andrew Morton | 0ce72d4 | 2012-05-29 15:06:24 -0700 | [diff] [blame] | 286 | static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, |
| 287 | const nodemask_t *to, int flags) |
Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 288 | { |
| 289 | return 0; |
| 290 | } |
| 291 | |
Christoph Lameter | 4be38e3 | 2006-01-06 00:11:17 -0800 | [diff] [blame] | 292 | static inline void check_highest_zone(int k) |
| 293 | { |
| 294 | } |
Lee Schermerhorn | 095f1fc | 2008-04-28 02:13:23 -0700 | [diff] [blame] | 295 | |
| 296 | #ifdef CONFIG_TMPFS |
Hugh Dickins | a7a88b2 | 2013-01-02 02:04:23 -0800 | [diff] [blame] | 297 | static inline int mpol_parse_str(char *str, struct mempolicy **mpol) |
Lee Schermerhorn | 095f1fc | 2008-04-28 02:13:23 -0700 | [diff] [blame] | 298 | { |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 299 | return 1; /* error */ |
Lee Schermerhorn | 095f1fc | 2008-04-28 02:13:23 -0700 | [diff] [blame] | 300 | } |
Stephen Wilson | 13057ef | 2011-05-24 17:12:46 -0700 | [diff] [blame] | 301 | #endif |
Lee Schermerhorn | 095f1fc | 2008-04-28 02:13:23 -0700 | [diff] [blame] | 302 | |
Lee Schermerhorn | 771fb4d | 2012-10-25 14:16:30 +0200 | [diff] [blame] | 303 | static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma, |
| 304 | unsigned long address) |
| 305 | { |
| 306 | return -1; /* no node preference */ |
| 307 | } |
| 308 | |
David Rientjes | c11600e | 2016-09-01 16:15:07 -0700 | [diff] [blame] | 309 | static inline void mpol_put_task_policy(struct task_struct *task) |
| 310 | { |
| 311 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | #endif /* CONFIG_NUMA */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | #endif |