Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_MEMPOLICY_H |
| 2 | #define _LINUX_MEMPOLICY_H 1 |
| 3 | |
| 4 | #include <linux/errno.h> |
| 5 | |
| 6 | /* |
| 7 | * NUMA memory policies for Linux. |
| 8 | * Copyright 2003,2004 Andi Kleen SuSE Labs |
| 9 | */ |
| 10 | |
| 11 | /* Policies */ |
| 12 | #define MPOL_DEFAULT 0 |
| 13 | #define MPOL_PREFERRED 1 |
| 14 | #define MPOL_BIND 2 |
| 15 | #define MPOL_INTERLEAVE 3 |
| 16 | |
| 17 | #define MPOL_MAX MPOL_INTERLEAVE |
| 18 | |
| 19 | /* Flags for get_mem_policy */ |
| 20 | #define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */ |
| 21 | #define MPOL_F_ADDR (1<<1) /* look up vma using address */ |
| 22 | |
| 23 | /* Flags for mbind */ |
| 24 | #define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */ |
| 25 | |
| 26 | #ifdef __KERNEL__ |
| 27 | |
| 28 | #include <linux/config.h> |
| 29 | #include <linux/mmzone.h> |
| 30 | #include <linux/bitmap.h> |
| 31 | #include <linux/slab.h> |
| 32 | #include <linux/rbtree.h> |
| 33 | #include <linux/spinlock.h> |
| 34 | |
| 35 | struct vm_area_struct; |
| 36 | |
| 37 | #ifdef CONFIG_NUMA |
| 38 | |
| 39 | /* |
| 40 | * Describe a memory policy. |
| 41 | * |
| 42 | * A mempolicy can be either associated with a process or with a VMA. |
| 43 | * For VMA related allocations the VMA policy is preferred, otherwise |
| 44 | * the process policy is used. Interrupts ignore the memory policy |
| 45 | * of the current process. |
| 46 | * |
| 47 | * Locking policy for interlave: |
| 48 | * In process context there is no locking because only the process accesses |
| 49 | * its own state. All vma manipulation is somewhat protected by a down_read on |
| 50 | * mmap_sem. For allocating in the interleave policy the page_table_lock |
| 51 | * must be also aquired to protect il_next. |
| 52 | * |
| 53 | * Freeing policy: |
| 54 | * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd. |
| 55 | * All other policies don't have any external state. mpol_free() handles this. |
| 56 | * |
| 57 | * Copying policy objects: |
| 58 | * For MPOL_BIND the zonelist must be always duplicated. mpol_clone() does this. |
| 59 | */ |
| 60 | struct mempolicy { |
| 61 | atomic_t refcnt; |
| 62 | short policy; /* See MPOL_* above */ |
| 63 | union { |
| 64 | struct zonelist *zonelist; /* bind */ |
| 65 | short preferred_node; /* preferred */ |
| 66 | DECLARE_BITMAP(nodes, MAX_NUMNODES); /* interleave */ |
| 67 | /* undefined for default */ |
| 68 | } v; |
| 69 | }; |
| 70 | |
| 71 | /* |
| 72 | * Support for managing mempolicy data objects (clone, copy, destroy) |
| 73 | * The default fast path of a NULL MPOL_DEFAULT policy is always inlined. |
| 74 | */ |
| 75 | |
| 76 | extern void __mpol_free(struct mempolicy *pol); |
| 77 | static inline void mpol_free(struct mempolicy *pol) |
| 78 | { |
| 79 | if (pol) |
| 80 | __mpol_free(pol); |
| 81 | } |
| 82 | |
| 83 | extern struct mempolicy *__mpol_copy(struct mempolicy *pol); |
| 84 | static inline struct mempolicy *mpol_copy(struct mempolicy *pol) |
| 85 | { |
| 86 | if (pol) |
| 87 | pol = __mpol_copy(pol); |
| 88 | return pol; |
| 89 | } |
| 90 | |
| 91 | #define vma_policy(vma) ((vma)->vm_policy) |
| 92 | #define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol)) |
| 93 | |
| 94 | static inline void mpol_get(struct mempolicy *pol) |
| 95 | { |
| 96 | if (pol) |
| 97 | atomic_inc(&pol->refcnt); |
| 98 | } |
| 99 | |
| 100 | extern int __mpol_equal(struct mempolicy *a, struct mempolicy *b); |
| 101 | static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b) |
| 102 | { |
| 103 | if (a == b) |
| 104 | return 1; |
| 105 | return __mpol_equal(a, b); |
| 106 | } |
| 107 | #define vma_mpol_equal(a,b) mpol_equal(vma_policy(a), vma_policy(b)) |
| 108 | |
| 109 | /* Could later add inheritance of the process policy here. */ |
| 110 | |
| 111 | #define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL) |
| 112 | |
| 113 | /* |
| 114 | * Hugetlb policy. i386 hugetlb so far works with node numbers |
| 115 | * instead of zone lists, so give it special interfaces for now. |
| 116 | */ |
| 117 | extern int mpol_first_node(struct vm_area_struct *vma, unsigned long addr); |
| 118 | extern int mpol_node_valid(int nid, struct vm_area_struct *vma, |
| 119 | unsigned long addr); |
| 120 | |
| 121 | /* |
| 122 | * Tree of shared policies for a shared memory region. |
| 123 | * Maintain the policies in a pseudo mm that contains vmas. The vmas |
| 124 | * carry the policy. As a special twist the pseudo mm is indexed in pages, not |
| 125 | * bytes, so that we can work with shared memory segments bigger than |
| 126 | * unsigned long. |
| 127 | */ |
| 128 | |
| 129 | struct sp_node { |
| 130 | struct rb_node nd; |
| 131 | unsigned long start, end; |
| 132 | struct mempolicy *policy; |
| 133 | }; |
| 134 | |
| 135 | struct shared_policy { |
| 136 | struct rb_root root; |
| 137 | spinlock_t lock; |
| 138 | }; |
| 139 | |
| 140 | static inline void mpol_shared_policy_init(struct shared_policy *info) |
| 141 | { |
| 142 | info->root = RB_ROOT; |
| 143 | spin_lock_init(&info->lock); |
| 144 | } |
| 145 | |
| 146 | int mpol_set_shared_policy(struct shared_policy *info, |
| 147 | struct vm_area_struct *vma, |
| 148 | struct mempolicy *new); |
| 149 | void mpol_free_shared_policy(struct shared_policy *p); |
| 150 | struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, |
| 151 | unsigned long idx); |
| 152 | |
Christoph Lameter | 6e21c8f | 2005-09-03 15:54:45 -0700 | [diff] [blame] | 153 | struct mempolicy *get_vma_policy(struct task_struct *task, |
| 154 | struct vm_area_struct *vma, unsigned long addr); |
| 155 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | extern void numa_default_policy(void); |
| 157 | extern void numa_policy_init(void); |
Andi Kleen | d42c699 | 2005-07-06 19:56:03 +0200 | [diff] [blame^] | 158 | extern struct mempolicy default_policy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | |
| 160 | #else |
| 161 | |
| 162 | struct mempolicy {}; |
| 163 | |
| 164 | static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b) |
| 165 | { |
| 166 | return 1; |
| 167 | } |
| 168 | #define vma_mpol_equal(a,b) 1 |
| 169 | |
| 170 | #define mpol_set_vma_default(vma) do {} while(0) |
| 171 | |
| 172 | static inline void mpol_free(struct mempolicy *p) |
| 173 | { |
| 174 | } |
| 175 | |
| 176 | static inline void mpol_get(struct mempolicy *pol) |
| 177 | { |
| 178 | } |
| 179 | |
| 180 | static inline struct mempolicy *mpol_copy(struct mempolicy *old) |
| 181 | { |
| 182 | return NULL; |
| 183 | } |
| 184 | |
| 185 | static inline int mpol_first_node(struct vm_area_struct *vma, unsigned long a) |
| 186 | { |
| 187 | return numa_node_id(); |
| 188 | } |
| 189 | |
| 190 | static inline int |
| 191 | mpol_node_valid(int nid, struct vm_area_struct *vma, unsigned long a) |
| 192 | { |
| 193 | return 1; |
| 194 | } |
| 195 | |
| 196 | struct shared_policy {}; |
| 197 | |
| 198 | static inline int mpol_set_shared_policy(struct shared_policy *info, |
| 199 | struct vm_area_struct *vma, |
| 200 | struct mempolicy *new) |
| 201 | { |
| 202 | return -EINVAL; |
| 203 | } |
| 204 | |
| 205 | static inline void mpol_shared_policy_init(struct shared_policy *info) |
| 206 | { |
| 207 | } |
| 208 | |
| 209 | static inline void mpol_free_shared_policy(struct shared_policy *p) |
| 210 | { |
| 211 | } |
| 212 | |
| 213 | static inline struct mempolicy * |
| 214 | mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) |
| 215 | { |
| 216 | return NULL; |
| 217 | } |
| 218 | |
| 219 | #define vma_policy(vma) NULL |
| 220 | #define vma_set_policy(vma, pol) do {} while(0) |
| 221 | |
| 222 | static inline void numa_policy_init(void) |
| 223 | { |
| 224 | } |
| 225 | |
| 226 | static inline void numa_default_policy(void) |
| 227 | { |
| 228 | } |
| 229 | |
| 230 | #endif /* CONFIG_NUMA */ |
| 231 | #endif /* __KERNEL__ */ |
| 232 | |
| 233 | #endif |