Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_MEMPOLICY_H |
| 2 | #define _LINUX_MEMPOLICY_H 1 |
| 3 | |
| 4 | #include <linux/errno.h> |
| 5 | |
| 6 | /* |
| 7 | * NUMA memory policies for Linux. |
| 8 | * Copyright 2003,2004 Andi Kleen SuSE Labs |
| 9 | */ |
| 10 | |
| 11 | /* Policies */ |
| 12 | #define MPOL_DEFAULT 0 |
| 13 | #define MPOL_PREFERRED 1 |
| 14 | #define MPOL_BIND 2 |
| 15 | #define MPOL_INTERLEAVE 3 |
| 16 | |
| 17 | #define MPOL_MAX MPOL_INTERLEAVE |
| 18 | |
| 19 | /* Flags for get_mem_policy */ |
| 20 | #define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */ |
| 21 | #define MPOL_F_ADDR (1<<1) /* look up vma using address */ |
| 22 | |
| 23 | /* Flags for mbind */ |
| 24 | #define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */ |
Christoph Lameter | dc9aa5b | 2006-01-08 01:00:50 -0800 | [diff] [blame] | 25 | #define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */ |
| 26 | #define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */ |
| 27 | #define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | |
| 29 | #ifdef __KERNEL__ |
| 30 | |
| 31 | #include <linux/config.h> |
| 32 | #include <linux/mmzone.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #include <linux/slab.h> |
| 34 | #include <linux/rbtree.h> |
| 35 | #include <linux/spinlock.h> |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 36 | #include <linux/nodemask.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | |
| 38 | struct vm_area_struct; |
| 39 | |
| 40 | #ifdef CONFIG_NUMA |
| 41 | |
| 42 | /* |
| 43 | * Describe a memory policy. |
| 44 | * |
| 45 | * A mempolicy can be either associated with a process or with a VMA. |
| 46 | * For VMA related allocations the VMA policy is preferred, otherwise |
| 47 | * the process policy is used. Interrupts ignore the memory policy |
| 48 | * of the current process. |
| 49 | * |
| 50 | * Locking policy for interlave: |
| 51 | * In process context there is no locking because only the process accesses |
| 52 | * its own state. All vma manipulation is somewhat protected by a down_read on |
Hugh Dickins | b8072f0 | 2005-10-29 18:16:41 -0700 | [diff] [blame] | 53 | * mmap_sem. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | * |
| 55 | * Freeing policy: |
| 56 | * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd. |
| 57 | * All other policies don't have any external state. mpol_free() handles this. |
| 58 | * |
| 59 | * Copying policy objects: |
| 60 | * For MPOL_BIND the zonelist must be always duplicated. mpol_clone() does this. |
| 61 | */ |
| 62 | struct mempolicy { |
| 63 | atomic_t refcnt; |
| 64 | short policy; /* See MPOL_* above */ |
| 65 | union { |
| 66 | struct zonelist *zonelist; /* bind */ |
| 67 | short preferred_node; /* preferred */ |
Andi Kleen | dfcd3c0 | 2005-10-29 18:15:48 -0700 | [diff] [blame] | 68 | nodemask_t nodes; /* interleave */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | /* undefined for default */ |
| 70 | } v; |
| 71 | }; |
| 72 | |
| 73 | /* |
| 74 | * Support for managing mempolicy data objects (clone, copy, destroy) |
| 75 | * The default fast path of a NULL MPOL_DEFAULT policy is always inlined. |
| 76 | */ |
| 77 | |
| 78 | extern void __mpol_free(struct mempolicy *pol); |
| 79 | static inline void mpol_free(struct mempolicy *pol) |
| 80 | { |
| 81 | if (pol) |
| 82 | __mpol_free(pol); |
| 83 | } |
| 84 | |
| 85 | extern struct mempolicy *__mpol_copy(struct mempolicy *pol); |
| 86 | static inline struct mempolicy *mpol_copy(struct mempolicy *pol) |
| 87 | { |
| 88 | if (pol) |
| 89 | pol = __mpol_copy(pol); |
| 90 | return pol; |
| 91 | } |
| 92 | |
| 93 | #define vma_policy(vma) ((vma)->vm_policy) |
| 94 | #define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol)) |
| 95 | |
| 96 | static inline void mpol_get(struct mempolicy *pol) |
| 97 | { |
| 98 | if (pol) |
| 99 | atomic_inc(&pol->refcnt); |
| 100 | } |
| 101 | |
| 102 | extern int __mpol_equal(struct mempolicy *a, struct mempolicy *b); |
| 103 | static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b) |
| 104 | { |
| 105 | if (a == b) |
| 106 | return 1; |
| 107 | return __mpol_equal(a, b); |
| 108 | } |
| 109 | #define vma_mpol_equal(a,b) mpol_equal(vma_policy(a), vma_policy(b)) |
| 110 | |
| 111 | /* Could later add inheritance of the process policy here. */ |
| 112 | |
| 113 | #define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL) |
| 114 | |
| 115 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | * Tree of shared policies for a shared memory region. |
| 117 | * Maintain the policies in a pseudo mm that contains vmas. The vmas |
| 118 | * carry the policy. As a special twist the pseudo mm is indexed in pages, not |
| 119 | * bytes, so that we can work with shared memory segments bigger than |
| 120 | * unsigned long. |
| 121 | */ |
| 122 | |
| 123 | struct sp_node { |
| 124 | struct rb_node nd; |
| 125 | unsigned long start, end; |
| 126 | struct mempolicy *policy; |
| 127 | }; |
| 128 | |
| 129 | struct shared_policy { |
| 130 | struct rb_root root; |
| 131 | spinlock_t lock; |
| 132 | }; |
| 133 | |
| 134 | static inline void mpol_shared_policy_init(struct shared_policy *info) |
| 135 | { |
| 136 | info->root = RB_ROOT; |
| 137 | spin_lock_init(&info->lock); |
| 138 | } |
| 139 | |
| 140 | int mpol_set_shared_policy(struct shared_policy *info, |
| 141 | struct vm_area_struct *vma, |
| 142 | struct mempolicy *new); |
| 143 | void mpol_free_shared_policy(struct shared_policy *p); |
| 144 | struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, |
| 145 | unsigned long idx); |
| 146 | |
Christoph Lameter | 6e21c8f | 2005-09-03 15:54:45 -0700 | [diff] [blame] | 147 | struct mempolicy *get_vma_policy(struct task_struct *task, |
| 148 | struct vm_area_struct *vma, unsigned long addr); |
| 149 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | extern void numa_default_policy(void); |
| 151 | extern void numa_policy_init(void); |
Paul Jackson | 68860ec | 2005-10-30 15:02:36 -0800 | [diff] [blame] | 152 | extern void numa_policy_rebind(const nodemask_t *old, const nodemask_t *new); |
Andi Kleen | d42c699 | 2005-07-06 19:56:03 +0200 | [diff] [blame] | 153 | extern struct mempolicy default_policy; |
Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 154 | extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, |
| 155 | unsigned long addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | |
Christoph Lameter | 4be38e3 | 2006-01-06 00:11:17 -0800 | [diff] [blame] | 157 | extern int policy_zone; |
| 158 | |
| 159 | static inline void check_highest_zone(int k) |
| 160 | { |
| 161 | if (k > policy_zone) |
| 162 | policy_zone = k; |
| 163 | } |
| 164 | |
Christoph Lameter | 3974388 | 2006-01-08 01:00:51 -0800 | [diff] [blame^] | 165 | int do_migrate_pages(struct mm_struct *mm, |
| 166 | const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags); |
| 167 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | #else |
| 169 | |
| 170 | struct mempolicy {}; |
| 171 | |
| 172 | static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b) |
| 173 | { |
| 174 | return 1; |
| 175 | } |
| 176 | #define vma_mpol_equal(a,b) 1 |
| 177 | |
| 178 | #define mpol_set_vma_default(vma) do {} while(0) |
| 179 | |
| 180 | static inline void mpol_free(struct mempolicy *p) |
| 181 | { |
| 182 | } |
| 183 | |
| 184 | static inline void mpol_get(struct mempolicy *pol) |
| 185 | { |
| 186 | } |
| 187 | |
| 188 | static inline struct mempolicy *mpol_copy(struct mempolicy *old) |
| 189 | { |
| 190 | return NULL; |
| 191 | } |
| 192 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | struct shared_policy {}; |
| 194 | |
| 195 | static inline int mpol_set_shared_policy(struct shared_policy *info, |
| 196 | struct vm_area_struct *vma, |
| 197 | struct mempolicy *new) |
| 198 | { |
| 199 | return -EINVAL; |
| 200 | } |
| 201 | |
| 202 | static inline void mpol_shared_policy_init(struct shared_policy *info) |
| 203 | { |
| 204 | } |
| 205 | |
| 206 | static inline void mpol_free_shared_policy(struct shared_policy *p) |
| 207 | { |
| 208 | } |
| 209 | |
| 210 | static inline struct mempolicy * |
| 211 | mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) |
| 212 | { |
| 213 | return NULL; |
| 214 | } |
| 215 | |
| 216 | #define vma_policy(vma) NULL |
| 217 | #define vma_set_policy(vma, pol) do {} while(0) |
| 218 | |
| 219 | static inline void numa_policy_init(void) |
| 220 | { |
| 221 | } |
| 222 | |
| 223 | static inline void numa_default_policy(void) |
| 224 | { |
| 225 | } |
| 226 | |
Paul Jackson | 68860ec | 2005-10-30 15:02:36 -0800 | [diff] [blame] | 227 | static inline void numa_policy_rebind(const nodemask_t *old, |
| 228 | const nodemask_t *new) |
| 229 | { |
| 230 | } |
| 231 | |
Christoph Lameter | 5da7ca8 | 2006-01-06 00:10:46 -0800 | [diff] [blame] | 232 | static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, |
| 233 | unsigned long addr) |
| 234 | { |
| 235 | return NODE_DATA(0)->node_zonelists + gfp_zone(GFP_HIGHUSER); |
| 236 | } |
| 237 | |
Christoph Lameter | 4be38e3 | 2006-01-06 00:11:17 -0800 | [diff] [blame] | 238 | static inline void check_highest_zone(int k) |
| 239 | { |
| 240 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | #endif /* CONFIG_NUMA */ |
| 242 | #endif /* __KERNEL__ */ |
| 243 | |
| 244 | #endif |