Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_MEMPOLICY_H |
| 2 | #define _LINUX_MEMPOLICY_H 1 |
| 3 | |
| 4 | #include <linux/errno.h> |
| 5 | |
| 6 | /* |
| 7 | * NUMA memory policies for Linux. |
| 8 | * Copyright 2003,2004 Andi Kleen SuSE Labs |
| 9 | */ |
| 10 | |
David Rientjes | 028fec4 | 2008-04-28 02:12:25 -0700 | [diff] [blame] | 11 | /* |
| 12 | * Both the MPOL_* mempolicy mode and the MPOL_F_* optional mode flags are |
| 13 | * passed by the user to either set_mempolicy() or mbind() in an 'int' actual. |
| 14 | * The MPOL_MODE_FLAGS macro determines the legal set of optional mode flags. |
| 15 | */ |
| 16 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | /* Policies */ |
David Rientjes | a3b51e0 | 2008-04-28 02:12:23 -0700 | [diff] [blame] | 18 | enum { |
| 19 | MPOL_DEFAULT, |
| 20 | MPOL_PREFERRED, |
| 21 | MPOL_BIND, |
| 22 | MPOL_INTERLEAVE, |
| 23 | MPOL_MAX, /* always last member of enum */ |
| 24 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | |
Miao Xie | 708c1bb | 2010-05-24 14:32:07 -0700 | [diff] [blame] | 26 | enum mpol_rebind_step { |
| 27 | MPOL_REBIND_ONCE, /* do rebind work at once(not by two step) */ |
| 28 | MPOL_REBIND_STEP1, /* first step(set all the newly nodes) */ |
| 29 | MPOL_REBIND_STEP2, /* second step(clean all the disallowed nodes)*/ |
| 30 | MPOL_REBIND_NSTEP, |
| 31 | }; |
| 32 | |
David Rientjes | 028fec4 | 2008-04-28 02:12:25 -0700 | [diff] [blame] | 33 | /* Flags for set_mempolicy */ |
David Rientjes | f5b087b | 2008-04-28 02:12:27 -0700 | [diff] [blame] | 34 | #define MPOL_F_STATIC_NODES (1 << 15) |
David Rientjes | 4c50bc0 | 2008-04-28 02:12:30 -0700 | [diff] [blame] | 35 | #define MPOL_F_RELATIVE_NODES (1 << 14) |
David Rientjes | f5b087b | 2008-04-28 02:12:27 -0700 | [diff] [blame] | 36 | |
David Rientjes | 028fec4 | 2008-04-28 02:12:25 -0700 | [diff] [blame] | 37 | /* |
| 38 | * MPOL_MODE_FLAGS is the union of all possible optional mode flags passed to |
| 39 | * either set_mempolicy() or mbind(). |
| 40 | */ |
David Rientjes | 4c50bc0 | 2008-04-28 02:12:30 -0700 | [diff] [blame] | 41 | #define MPOL_MODE_FLAGS (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES) |
David Rientjes | 028fec4 | 2008-04-28 02:12:25 -0700 | [diff] [blame] | 42 | |
| 43 | /* Flags for get_mempolicy */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | #define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */ |
| 45 | #define MPOL_F_ADDR (1<<1) /* look up vma using address */ |
Lee Schermerhorn | 754af6f | 2007-10-16 01:24:51 -0700 | [diff] [blame] | 46 | #define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | |
| 48 | /* Flags for mbind */ |
| 49 | #define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */ |
Christoph Lameter | dc9aa5b | 2006-01-08 01:00:50 -0800 | [diff] [blame] | 50 | #define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */ |
| 51 | #define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */ |
| 52 | #define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | |
Lee Schermerhorn | aab0b10 | 2008-04-28 02:13:13 -0700 | [diff] [blame] | 54 | /* |
| 55 | * Internal flags that share the struct mempolicy flags word with |
| 56 | * "mode flags". These flags are allocated from bit 0 up, as they |
| 57 | * are never OR'ed into the mode in mempolicy API arguments. |
| 58 | */ |
| 59 | #define MPOL_F_SHARED (1 << 0) /* identify shared policies */ |
Lee Schermerhorn | fc36b8d | 2008-04-28 02:13:21 -0700 | [diff] [blame] | 60 | #define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */ |
Miao Xie | 708c1bb | 2010-05-24 14:32:07 -0700 | [diff] [blame] | 61 | #define MPOL_F_REBINDING (1 << 2) /* identify policies in rebinding */ |
Lee Schermerhorn | aab0b10 | 2008-04-28 02:13:13 -0700 | [diff] [blame] | 62 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | #ifdef __KERNEL__ |
| 64 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | #include <linux/mmzone.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | #include <linux/slab.h> |
| 67 | #include <linux/rbtree.h> |
| 68 | #include <linux/spinlock.h> |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 69 | #include <linux/nodemask.h> |
Gerald Schaefer | 83d1674 | 2008-07-23 21:28:22 -0700 | [diff] [blame] | 70 | #include <linux/pagemap.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | |
Ralf Baechle | 45b35a5 | 2006-06-08 00:43:41 -0700 | [diff] [blame] | 72 | struct mm_struct; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | |
| 74 | #ifdef CONFIG_NUMA |
| 75 | |
| 76 | /* |
| 77 | * Describe a memory policy. |
| 78 | * |
| 79 | * A mempolicy can be either associated with a process or with a VMA. |
| 80 | * For VMA related allocations the VMA policy is preferred, otherwise |
| 81 | * the process policy is used. Interrupts ignore the memory policy |
| 82 | * of the current process. |
| 83 | * |
| 84 | * Locking policy for interlave: |
| 85 | * In process context there is no locking because only the process accesses |
| 86 | * its own state. All vma manipulation is somewhat protected by a down_read on |
Hugh Dickins | b8072f0 | 2005-10-29 18:16:41 -0700 | [diff] [blame] | 87 | * mmap_sem. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | * |
| 89 | * Freeing policy: |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 90 | * Mempolicy objects are reference counted. A mempolicy will be freed when |
Lee Schermerhorn | f0be3d3 | 2008-04-28 02:13:08 -0700 | [diff] [blame] | 91 | * mpol_put() decrements the reference count to zero. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | * |
Lee Schermerhorn | 846a16b | 2008-04-28 02:13:09 -0700 | [diff] [blame] | 93 | * Duplicating policy objects: |
| 94 | * mpol_dup() allocates a new mempolicy and copies the specified mempolicy |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 95 | * to the new storage. The reference count of the new object is initialized |
Lee Schermerhorn | 846a16b | 2008-04-28 02:13:09 -0700 | [diff] [blame] | 96 | * to 1, representing the caller of mpol_dup(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | */ |
| 98 | struct mempolicy { |
| 99 | atomic_t refcnt; |
Lee Schermerhorn | 45c4745 | 2008-04-28 02:13:12 -0700 | [diff] [blame] | 100 | unsigned short mode; /* See MPOL_* above */ |
David Rientjes | 028fec4 | 2008-04-28 02:12:25 -0700 | [diff] [blame] | 101 | unsigned short flags; /* See set_mempolicy() MPOL_F_* above */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | union { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | short preferred_node; /* preferred */ |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 104 | nodemask_t nodes; /* interleave/bind */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | /* undefined for default */ |
| 106 | } v; |
David Rientjes | f5b087b | 2008-04-28 02:12:27 -0700 | [diff] [blame] | 107 | union { |
| 108 | nodemask_t cpuset_mems_allowed; /* relative to these nodes */ |
| 109 | nodemask_t user_nodemask; /* nodemask passed by user */ |
| 110 | } w; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | }; |
| 112 | |
| 113 | /* |
| 114 | * Support for managing mempolicy data objects (clone, copy, destroy) |
| 115 | * The default fast path of a NULL MPOL_DEFAULT policy is always inlined. |
| 116 | */ |
| 117 | |
Lee Schermerhorn | f0be3d3 | 2008-04-28 02:13:08 -0700 | [diff] [blame] | 118 | extern void __mpol_put(struct mempolicy *pol); |
| 119 | static inline void mpol_put(struct mempolicy *pol) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | { |
| 121 | if (pol) |
Lee Schermerhorn | f0be3d3 | 2008-04-28 02:13:08 -0700 | [diff] [blame] | 122 | __mpol_put(pol); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | } |
| 124 | |
Lee Schermerhorn | 52cd3b0 | 2008-04-28 02:13:16 -0700 | [diff] [blame] | 125 | /* |
| 126 | * Does mempolicy pol need explicit unref after use? |
| 127 | * Currently only needed for shared policies. |
| 128 | */ |
| 129 | static inline int mpol_needs_cond_ref(struct mempolicy *pol) |
| 130 | { |
| 131 | return (pol && (pol->flags & MPOL_F_SHARED)); |
| 132 | } |
| 133 | |
| 134 | static inline void mpol_cond_put(struct mempolicy *pol) |
| 135 | { |
| 136 | if (mpol_needs_cond_ref(pol)) |
| 137 | __mpol_put(pol); |
| 138 | } |
| 139 | |
| 140 | extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol, |
| 141 | struct mempolicy *frompol); |
| 142 | static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol, |
| 143 | struct mempolicy *frompol) |
| 144 | { |
| 145 | if (!frompol) |
| 146 | return frompol; |
| 147 | return __mpol_cond_copy(tompol, frompol); |
| 148 | } |
| 149 | |
Lee Schermerhorn | 846a16b | 2008-04-28 02:13:09 -0700 | [diff] [blame] | 150 | extern struct mempolicy *__mpol_dup(struct mempolicy *pol); |
| 151 | static inline struct mempolicy *mpol_dup(struct mempolicy *pol) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | { |
| 153 | if (pol) |
Lee Schermerhorn | 846a16b | 2008-04-28 02:13:09 -0700 | [diff] [blame] | 154 | pol = __mpol_dup(pol); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | return pol; |
| 156 | } |
| 157 | |
| 158 | #define vma_policy(vma) ((vma)->vm_policy) |
| 159 | #define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol)) |
| 160 | |
| 161 | static inline void mpol_get(struct mempolicy *pol) |
| 162 | { |
| 163 | if (pol) |
| 164 | atomic_inc(&pol->refcnt); |
| 165 | } |
| 166 | |
KOSAKI Motohiro | fcfb4dc | 2012-01-10 15:08:21 -0800 | [diff] [blame] | 167 | extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b); |
| 168 | static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | { |
| 170 | if (a == b) |
KOSAKI Motohiro | fcfb4dc | 2012-01-10 15:08:21 -0800 | [diff] [blame] | 171 | return true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | return __mpol_equal(a, b); |
| 173 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | * Tree of shared policies for a shared memory region. |
| 177 | * Maintain the policies in a pseudo mm that contains vmas. The vmas |
| 178 | * carry the policy. As a special twist the pseudo mm is indexed in pages, not |
| 179 | * bytes, so that we can work with shared memory segments bigger than |
| 180 | * unsigned long. |
| 181 | */ |
| 182 | |
| 183 | struct sp_node { |
| 184 | struct rb_node nd; |
| 185 | unsigned long start, end; |
| 186 | struct mempolicy *policy; |
| 187 | }; |
| 188 | |
| 189 | struct shared_policy { |
| 190 | struct rb_root root; |
| 191 | spinlock_t lock; |
| 192 | }; |
| 193 | |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 194 | void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | int mpol_set_shared_policy(struct shared_policy *info, |
| 196 | struct vm_area_struct *vma, |
| 197 | struct mempolicy *new); |
| 198 | void mpol_free_shared_policy(struct shared_policy *p); |
| 199 | struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, |
| 200 | unsigned long idx); |
| 201 | |
Stephen Wilson | d98f6cb | 2011-05-24 17:12:41 -0700 | [diff] [blame] | 202 | struct mempolicy *get_vma_policy(struct task_struct *tsk, |
| 203 | struct vm_area_struct *vma, unsigned long addr); |
| 204 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | extern void numa_default_policy(void); |
| 206 | extern void numa_policy_init(void); |
Miao Xie | 708c1bb | 2010-05-24 14:32:07 -0700 | [diff] [blame] | 207 | extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new, |
| 208 | enum mpol_rebind_step step); |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 209 | extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); |
Paul Jackson | c61afb1 | 2006-03-24 03:16:08 -0800 | [diff] [blame] | 210 | extern void mpol_fix_fork_child_flag(struct task_struct *p); |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 211 | |
Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 212 | extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 213 | unsigned long addr, gfp_t gfp_flags, |
| 214 | struct mempolicy **mpol, nodemask_t **nodemask); |
Lee Schermerhorn | 06808b0 | 2009-12-14 17:58:21 -0800 | [diff] [blame] | 215 | extern bool init_nodemask_of_mempolicy(nodemask_t *mask); |
David Rientjes | 6f48d0eb | 2010-08-09 17:18:52 -0700 | [diff] [blame] | 216 | extern bool mempolicy_nodemask_intersects(struct task_struct *tsk, |
| 217 | const nodemask_t *mask); |
Christoph Lameter | dc85da1 | 2006-01-18 17:42:36 -0800 | [diff] [blame] | 218 | extern unsigned slab_node(struct mempolicy *policy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | |
Christoph Lameter | 2f6726e | 2006-09-25 23:31:18 -0700 | [diff] [blame] | 220 | extern enum zone_type policy_zone; |
Christoph Lameter | 4be38e3 | 2006-01-06 00:11:17 -0800 | [diff] [blame] | 221 | |
Christoph Lameter | 2f6726e | 2006-09-25 23:31:18 -0700 | [diff] [blame] | 222 | static inline void check_highest_zone(enum zone_type k) |
Christoph Lameter | 4be38e3 | 2006-01-06 00:11:17 -0800 | [diff] [blame] | 223 | { |
Mel Gorman | b377fd3 | 2007-08-22 14:02:05 -0700 | [diff] [blame] | 224 | if (k > policy_zone && k != ZONE_MOVABLE) |
Christoph Lameter | 4be38e3 | 2006-01-06 00:11:17 -0800 | [diff] [blame] | 225 | policy_zone = k; |
| 226 | } |
| 227 | |
Christoph Lameter | 3974388 | 2006-01-08 01:00:51 -0800 | [diff] [blame] | 228 | int do_migrate_pages(struct mm_struct *mm, |
| 229 | const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags); |
| 230 | |
Lee Schermerhorn | 095f1fc | 2008-04-28 02:13:23 -0700 | [diff] [blame] | 231 | |
| 232 | #ifdef CONFIG_TMPFS |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 233 | extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context); |
Stephen Wilson | 13057ef | 2011-05-24 17:12:46 -0700 | [diff] [blame] | 234 | #endif |
Lee Schermerhorn | 095f1fc | 2008-04-28 02:13:23 -0700 | [diff] [blame] | 235 | |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 236 | extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, |
| 237 | int no_context); |
Gerald Schaefer | 83d1674 | 2008-07-23 21:28:22 -0700 | [diff] [blame] | 238 | |
| 239 | /* Check if a vma is migratable */ |
| 240 | static inline int vma_migratable(struct vm_area_struct *vma) |
| 241 | { |
| 242 | if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED)) |
| 243 | return 0; |
| 244 | /* |
| 245 | * Migration allocates pages in the highest zone. If we cannot |
| 246 | * do so then migration (at least from node to node) is not |
| 247 | * possible. |
| 248 | */ |
| 249 | if (vma->vm_file && |
| 250 | gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) |
| 251 | < policy_zone) |
| 252 | return 0; |
| 253 | return 1; |
| 254 | } |
| 255 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | #else |
| 257 | |
| 258 | struct mempolicy {}; |
| 259 | |
KOSAKI Motohiro | fcfb4dc | 2012-01-10 15:08:21 -0800 | [diff] [blame] | 260 | static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | { |
KOSAKI Motohiro | fcfb4dc | 2012-01-10 15:08:21 -0800 | [diff] [blame] | 262 | return true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | |
Lee Schermerhorn | f0be3d3 | 2008-04-28 02:13:08 -0700 | [diff] [blame] | 265 | static inline void mpol_put(struct mempolicy *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 266 | { |
| 267 | } |
| 268 | |
Lee Schermerhorn | 52cd3b0 | 2008-04-28 02:13:16 -0700 | [diff] [blame] | 269 | static inline void mpol_cond_put(struct mempolicy *pol) |
| 270 | { |
| 271 | } |
| 272 | |
| 273 | static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to, |
| 274 | struct mempolicy *from) |
| 275 | { |
| 276 | return from; |
| 277 | } |
| 278 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | static inline void mpol_get(struct mempolicy *pol) |
| 280 | { |
| 281 | } |
| 282 | |
Lee Schermerhorn | 846a16b | 2008-04-28 02:13:09 -0700 | [diff] [blame] | 283 | static inline struct mempolicy *mpol_dup(struct mempolicy *old) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | { |
| 285 | return NULL; |
| 286 | } |
| 287 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | struct shared_policy {}; |
| 289 | |
| 290 | static inline int mpol_set_shared_policy(struct shared_policy *info, |
| 291 | struct vm_area_struct *vma, |
| 292 | struct mempolicy *new) |
| 293 | { |
| 294 | return -EINVAL; |
| 295 | } |
| 296 | |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 297 | static inline void mpol_shared_policy_init(struct shared_policy *sp, |
| 298 | struct mempolicy *mpol) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | { |
| 300 | } |
| 301 | |
| 302 | static inline void mpol_free_shared_policy(struct shared_policy *p) |
| 303 | { |
| 304 | } |
| 305 | |
| 306 | static inline struct mempolicy * |
| 307 | mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) |
| 308 | { |
| 309 | return NULL; |
| 310 | } |
| 311 | |
| 312 | #define vma_policy(vma) NULL |
| 313 | #define vma_set_policy(vma, pol) do {} while(0) |
| 314 | |
| 315 | static inline void numa_policy_init(void) |
| 316 | { |
| 317 | } |
| 318 | |
| 319 | static inline void numa_default_policy(void) |
| 320 | { |
| 321 | } |
| 322 | |
Paul Jackson | 74cb215 | 2006-01-08 01:01:56 -0800 | [diff] [blame] | 323 | static inline void mpol_rebind_task(struct task_struct *tsk, |
Miao Xie | 708c1bb | 2010-05-24 14:32:07 -0700 | [diff] [blame] | 324 | const nodemask_t *new, |
| 325 | enum mpol_rebind_step step) |
Paul Jackson | 68860ec | 2005-10-30 15:02:36 -0800 | [diff] [blame] | 326 | { |
| 327 | } |
| 328 | |
Paul Jackson | 4225399 | 2006-01-08 01:01:59 -0800 | [diff] [blame] | 329 | static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) |
| 330 | { |
| 331 | } |
| 332 | |
Paul Jackson | c61afb1 | 2006-03-24 03:16:08 -0800 | [diff] [blame] | 333 | static inline void mpol_fix_fork_child_flag(struct task_struct *p) |
| 334 | { |
| 335 | } |
| 336 | |
Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 337 | static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 338 | unsigned long addr, gfp_t gfp_flags, |
| 339 | struct mempolicy **mpol, nodemask_t **nodemask) |
Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 340 | { |
Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 341 | *mpol = NULL; |
| 342 | *nodemask = NULL; |
Mel Gorman | 0e88460 | 2008-04-28 02:12:14 -0700 | [diff] [blame] | 343 | return node_zonelist(0, gfp_flags); |
Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 344 | } |
| 345 | |
David Rientjes | 6f48d0eb | 2010-08-09 17:18:52 -0700 | [diff] [blame] | 346 | static inline bool init_nodemask_of_mempolicy(nodemask_t *m) |
| 347 | { |
| 348 | return false; |
| 349 | } |
| 350 | |
| 351 | static inline bool mempolicy_nodemask_intersects(struct task_struct *tsk, |
| 352 | const nodemask_t *mask) |
| 353 | { |
| 354 | return false; |
| 355 | } |
Lee Schermerhorn | 06808b0 | 2009-12-14 17:58:21 -0800 | [diff] [blame] | 356 | |
Paul Jackson | 45b07ef | 2006-01-08 01:00:56 -0800 | [diff] [blame] | 357 | static inline int do_migrate_pages(struct mm_struct *mm, |
| 358 | const nodemask_t *from_nodes, |
| 359 | const nodemask_t *to_nodes, int flags) |
| 360 | { |
| 361 | return 0; |
| 362 | } |
| 363 | |
Christoph Lameter | 4be38e3 | 2006-01-06 00:11:17 -0800 | [diff] [blame] | 364 | static inline void check_highest_zone(int k) |
| 365 | { |
| 366 | } |
Lee Schermerhorn | 095f1fc | 2008-04-28 02:13:23 -0700 | [diff] [blame] | 367 | |
| 368 | #ifdef CONFIG_TMPFS |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 369 | static inline int mpol_parse_str(char *str, struct mempolicy **mpol, |
| 370 | int no_context) |
Lee Schermerhorn | 095f1fc | 2008-04-28 02:13:23 -0700 | [diff] [blame] | 371 | { |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 372 | return 1; /* error */ |
Lee Schermerhorn | 095f1fc | 2008-04-28 02:13:23 -0700 | [diff] [blame] | 373 | } |
Stephen Wilson | 13057ef | 2011-05-24 17:12:46 -0700 | [diff] [blame] | 374 | #endif |
Lee Schermerhorn | 095f1fc | 2008-04-28 02:13:23 -0700 | [diff] [blame] | 375 | |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 376 | static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, |
| 377 | int no_context) |
Lee Schermerhorn | 095f1fc | 2008-04-28 02:13:23 -0700 | [diff] [blame] | 378 | { |
| 379 | return 0; |
| 380 | } |
Lee Schermerhorn | 095f1fc | 2008-04-28 02:13:23 -0700 | [diff] [blame] | 381 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | #endif /* CONFIG_NUMA */ |
| 383 | #endif /* __KERNEL__ */ |
| 384 | |
| 385 | #endif |