blob: 568152ae6cafe9c6f6052cf704c0e6dae0d198f7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
Christoph Lameter8bccd852005-10-29 18:16:59 -07005 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
Christoph Lameter8bccd852005-10-29 18:16:59 -070021 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
Christoph Lameter8bccd852005-10-29 18:16:59 -070024 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 * preferred Try a specific node first before normal fallback.
29 * As a special case node -1 here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
Christoph Lameter8bccd852005-10-29 18:16:59 -070033 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
66 could replace all the switch()es with a mempolicy_ops structure.
67*/
68
69#include <linux/mempolicy.h>
70#include <linux/mm.h>
71#include <linux/highmem.h>
72#include <linux/hugetlb.h>
73#include <linux/kernel.h>
74#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070075#include <linux/nodemask.h>
76#include <linux/cpuset.h>
77#include <linux/gfp.h>
78#include <linux/slab.h>
79#include <linux/string.h>
80#include <linux/module.h>
81#include <linux/interrupt.h>
82#include <linux/init.h>
83#include <linux/compat.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080084#include <linux/swap.h>
Christoph Lameter1a75a6c2006-01-08 01:01:02 -080085#include <linux/seq_file.h>
86#include <linux/proc_fs.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080087#include <linux/migrate.h>
Christoph Lameter95a402c2006-06-23 02:03:53 -070088#include <linux/rmap.h>
David Quigley86c3a762006-06-23 02:04:02 -070089#include <linux/security.h>
Adrian Bunkdbcb0f12007-10-16 01:26:26 -070090#include <linux/syscalls.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080091
Linus Torvalds1da177e2005-04-16 15:20:36 -070092#include <asm/tlbflush.h>
93#include <asm/uaccess.h>
94
Christoph Lameter38e35862006-01-08 01:01:01 -080095/* Internal flags */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080096#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
Christoph Lameter38e35862006-01-08 01:01:01 -080097#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
Christoph Lameter1a75a6c2006-01-08 01:01:02 -080098#define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080099
Pekka Enbergfcc234f2006-03-22 00:08:13 -0800100static struct kmem_cache *policy_cache;
101static struct kmem_cache *sn_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103/* Highest zone. An specific allocation for a zone below that is not
104 policied. */
Christoph Lameter62672762007-02-10 01:43:07 -0800105enum zone_type policy_zone = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
Andi Kleend42c6992005-07-06 19:56:03 +0200107struct mempolicy default_policy = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 .refcnt = ATOMIC_INIT(1), /* never free it */
109 .policy = MPOL_DEFAULT,
110};
111
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700112static void mpol_rebind_policy(struct mempolicy *pol,
113 const nodemask_t *newmask);
114
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115/* Do sanity checking on a policy */
Andi Kleendfcd3c02005-10-29 18:15:48 -0700116static int mpol_check_policy(int mode, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117{
Andi Kleendfcd3c02005-10-29 18:15:48 -0700118 int empty = nodes_empty(*nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
120 switch (mode) {
121 case MPOL_DEFAULT:
122 if (!empty)
123 return -EINVAL;
124 break;
125 case MPOL_BIND:
126 case MPOL_INTERLEAVE:
127 /* Preferred will only use the first bit, but allow
128 more for now. */
129 if (empty)
130 return -EINVAL;
131 break;
132 }
Lee Schermerhorn37b07e42007-10-16 01:25:39 -0700133 return nodes_subset(*nodes, node_states[N_HIGH_MEMORY]) ? 0 : -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134}
Andi Kleendd942ae2006-02-17 01:39:16 +0100135
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136/* Generate a custom zonelist for the BIND policy. */
Andi Kleendfcd3c02005-10-29 18:15:48 -0700137static struct zonelist *bind_zonelist(nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138{
139 struct zonelist *zl;
Christoph Lameter2f6726e2006-09-25 23:31:18 -0700140 int num, max, nd;
141 enum zone_type k;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
Andi Kleendfcd3c02005-10-29 18:15:48 -0700143 max = 1 + MAX_NR_ZONES * nodes_weight(*nodes);
Paul Jackson9276b1bc2006-12-06 20:31:48 -0800144 max++; /* space for zlcache_ptr (see mmzone.h) */
Andi Kleendd942ae2006-02-17 01:39:16 +0100145 zl = kmalloc(sizeof(struct zone *) * max, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 if (!zl)
KAMEZAWA Hiroyuki8af5e2e2007-02-20 13:57:49 -0800147 return ERR_PTR(-ENOMEM);
Paul Jackson9276b1bc2006-12-06 20:31:48 -0800148 zl->zlcache_ptr = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 num = 0;
Andi Kleendd942ae2006-02-17 01:39:16 +0100150 /* First put in the highest zones from all nodes, then all the next
151 lower zones etc. Avoid empty zones because the memory allocator
152 doesn't like them. If you implement node hot removal you
153 have to fix that. */
Mel Gormanb377fd32007-08-22 14:02:05 -0700154 k = MAX_NR_ZONES - 1;
Christoph Lameter2f6726e2006-09-25 23:31:18 -0700155 while (1) {
Andi Kleendd942ae2006-02-17 01:39:16 +0100156 for_each_node_mask(nd, *nodes) {
157 struct zone *z = &NODE_DATA(nd)->node_zones[k];
158 if (z->present_pages > 0)
159 zl->zones[num++] = z;
160 }
Christoph Lameter2f6726e2006-09-25 23:31:18 -0700161 if (k == 0)
162 break;
163 k--;
Andi Kleendd942ae2006-02-17 01:39:16 +0100164 }
KAMEZAWA Hiroyuki8af5e2e2007-02-20 13:57:49 -0800165 if (num == 0) {
166 kfree(zl);
167 return ERR_PTR(-EINVAL);
168 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 zl->zones[num] = NULL;
170 return zl;
171}
172
173/* Create a new policy */
Andi Kleendfcd3c02005-10-29 18:15:48 -0700174static struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175{
176 struct mempolicy *policy;
177
Paul Mundt140d5a42007-07-15 23:38:16 -0700178 pr_debug("setting mode %d nodes[0] %lx\n",
179 mode, nodes ? nodes_addr(*nodes)[0] : -1);
180
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 if (mode == MPOL_DEFAULT)
182 return NULL;
183 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
184 if (!policy)
185 return ERR_PTR(-ENOMEM);
186 atomic_set(&policy->refcnt, 1);
187 switch (mode) {
188 case MPOL_INTERLEAVE:
Andi Kleendfcd3c02005-10-29 18:15:48 -0700189 policy->v.nodes = *nodes;
Christoph Lameter6eaf8062007-10-16 01:25:30 -0700190 nodes_and(policy->v.nodes, policy->v.nodes,
191 node_states[N_HIGH_MEMORY]);
192 if (nodes_weight(policy->v.nodes) == 0) {
Andi Kleen8f493d72006-01-03 00:07:28 +0100193 kmem_cache_free(policy_cache, policy);
194 return ERR_PTR(-EINVAL);
195 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 break;
197 case MPOL_PREFERRED:
Andi Kleendfcd3c02005-10-29 18:15:48 -0700198 policy->v.preferred_node = first_node(*nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 if (policy->v.preferred_node >= MAX_NUMNODES)
200 policy->v.preferred_node = -1;
201 break;
202 case MPOL_BIND:
203 policy->v.zonelist = bind_zonelist(nodes);
KAMEZAWA Hiroyuki8af5e2e2007-02-20 13:57:49 -0800204 if (IS_ERR(policy->v.zonelist)) {
205 void *error_code = policy->v.zonelist;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 kmem_cache_free(policy_cache, policy);
KAMEZAWA Hiroyuki8af5e2e2007-02-20 13:57:49 -0800207 return error_code;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 }
209 break;
210 }
211 policy->policy = mode;
Paul Jackson74cb2152006-01-08 01:01:56 -0800212 policy->cpuset_mems_allowed = cpuset_mems_allowed(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 return policy;
214}
215
Christoph Lameter397874d2006-03-06 15:42:53 -0800216static void gather_stats(struct page *, void *, int pte_dirty);
Christoph Lameterfc301282006-01-18 17:42:29 -0800217static void migrate_page_add(struct page *page, struct list_head *pagelist,
218 unsigned long flags);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -0800219
Christoph Lameter38e35862006-01-08 01:01:01 -0800220/* Scan through pages checking if pages follow certain conditions. */
Nick Pigginb5810032005-10-29 18:16:12 -0700221static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800222 unsigned long addr, unsigned long end,
223 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800224 void *private)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225{
Hugh Dickins91612e02005-06-21 17:15:07 -0700226 pte_t *orig_pte;
227 pte_t *pte;
Hugh Dickins705e87c2005-10-29 18:16:27 -0700228 spinlock_t *ptl;
Hugh Dickins941150a2005-06-21 17:15:06 -0700229
Hugh Dickins705e87c2005-10-29 18:16:27 -0700230 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
Hugh Dickins91612e02005-06-21 17:15:07 -0700231 do {
Linus Torvalds6aab3412005-11-28 14:34:23 -0800232 struct page *page;
Andy Whitcroft25ba77c2006-12-06 20:33:03 -0800233 int nid;
Hugh Dickins91612e02005-06-21 17:15:07 -0700234
235 if (!pte_present(*pte))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800237 page = vm_normal_page(vma, addr, *pte);
238 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 continue;
Nick Piggin053837f2006-01-18 17:42:27 -0800240 /*
241 * The check for PageReserved here is important to avoid
242 * handling zero pages and other pages that may have been
243 * marked special by the system.
244 *
245 * If the PageReserved would not be checked here then f.e.
246 * the location of the zero page could have an influence
247 * on MPOL_MF_STRICT, zero pages would be counted for
248 * the per node stats, and there would be useless attempts
249 * to put zero pages on the migration list.
250 */
Christoph Lameterf4598c82006-01-12 01:05:20 -0800251 if (PageReserved(page))
252 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800253 nid = page_to_nid(page);
Christoph Lameter38e35862006-01-08 01:01:01 -0800254 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
255 continue;
256
Christoph Lameter1a75a6c2006-01-08 01:01:02 -0800257 if (flags & MPOL_MF_STATS)
Christoph Lameter397874d2006-03-06 15:42:53 -0800258 gather_stats(page, private, pte_dirty(*pte));
Nick Piggin053837f2006-01-18 17:42:27 -0800259 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
Christoph Lameterfc301282006-01-18 17:42:29 -0800260 migrate_page_add(page, private, flags);
Christoph Lameter38e35862006-01-08 01:01:01 -0800261 else
262 break;
Hugh Dickins91612e02005-06-21 17:15:07 -0700263 } while (pte++, addr += PAGE_SIZE, addr != end);
Hugh Dickins705e87c2005-10-29 18:16:27 -0700264 pte_unmap_unlock(orig_pte, ptl);
Hugh Dickins91612e02005-06-21 17:15:07 -0700265 return addr != end;
266}
267
Nick Pigginb5810032005-10-29 18:16:12 -0700268static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800269 unsigned long addr, unsigned long end,
270 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800271 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700272{
273 pmd_t *pmd;
274 unsigned long next;
275
276 pmd = pmd_offset(pud, addr);
277 do {
278 next = pmd_addr_end(addr, end);
279 if (pmd_none_or_clear_bad(pmd))
280 continue;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800281 if (check_pte_range(vma, pmd, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800282 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700283 return -EIO;
284 } while (pmd++, addr = next, addr != end);
285 return 0;
286}
287
Nick Pigginb5810032005-10-29 18:16:12 -0700288static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800289 unsigned long addr, unsigned long end,
290 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800291 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700292{
293 pud_t *pud;
294 unsigned long next;
295
296 pud = pud_offset(pgd, addr);
297 do {
298 next = pud_addr_end(addr, end);
299 if (pud_none_or_clear_bad(pud))
300 continue;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800301 if (check_pmd_range(vma, pud, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800302 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700303 return -EIO;
304 } while (pud++, addr = next, addr != end);
305 return 0;
306}
307
Nick Pigginb5810032005-10-29 18:16:12 -0700308static inline int check_pgd_range(struct vm_area_struct *vma,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800309 unsigned long addr, unsigned long end,
310 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800311 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700312{
313 pgd_t *pgd;
314 unsigned long next;
315
Nick Pigginb5810032005-10-29 18:16:12 -0700316 pgd = pgd_offset(vma->vm_mm, addr);
Hugh Dickins91612e02005-06-21 17:15:07 -0700317 do {
318 next = pgd_addr_end(addr, end);
319 if (pgd_none_or_clear_bad(pgd))
320 continue;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800321 if (check_pud_range(vma, pgd, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800322 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700323 return -EIO;
324 } while (pgd++, addr = next, addr != end);
325 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326}
327
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800328/*
329 * Check if all pages in a range are on a set of nodes.
330 * If pagelist != NULL then isolate pages from the LRU and
331 * put them on the pagelist.
332 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333static struct vm_area_struct *
334check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
Christoph Lameter38e35862006-01-08 01:01:01 -0800335 const nodemask_t *nodes, unsigned long flags, void *private)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336{
337 int err;
338 struct vm_area_struct *first, *vma, *prev;
339
Christoph Lameter90036ee2006-03-16 23:03:59 -0800340 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
Christoph Lameter90036ee2006-03-16 23:03:59 -0800341
Christoph Lameterb20a3502006-03-22 00:09:12 -0800342 err = migrate_prep();
343 if (err)
344 return ERR_PTR(err);
Christoph Lameter90036ee2006-03-16 23:03:59 -0800345 }
Nick Piggin053837f2006-01-18 17:42:27 -0800346
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 first = find_vma(mm, start);
348 if (!first)
349 return ERR_PTR(-EFAULT);
350 prev = NULL;
351 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800352 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
353 if (!vma->vm_next && vma->vm_end < end)
354 return ERR_PTR(-EFAULT);
355 if (prev && prev->vm_end < vma->vm_start)
356 return ERR_PTR(-EFAULT);
357 }
358 if (!is_vm_hugetlb_page(vma) &&
359 ((flags & MPOL_MF_STRICT) ||
360 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
361 vma_migratable(vma)))) {
Andi Kleen5b952b32005-09-13 01:25:08 -0700362 unsigned long endvma = vma->vm_end;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800363
Andi Kleen5b952b32005-09-13 01:25:08 -0700364 if (endvma > end)
365 endvma = end;
366 if (vma->vm_start > start)
367 start = vma->vm_start;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800368 err = check_pgd_range(vma, start, endvma, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800369 flags, private);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 if (err) {
371 first = ERR_PTR(err);
372 break;
373 }
374 }
375 prev = vma;
376 }
377 return first;
378}
379
380/* Apply policy to a single VMA */
381static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
382{
383 int err = 0;
384 struct mempolicy *old = vma->vm_policy;
385
Paul Mundt140d5a42007-07-15 23:38:16 -0700386 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 vma->vm_start, vma->vm_end, vma->vm_pgoff,
388 vma->vm_ops, vma->vm_file,
389 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
390
391 if (vma->vm_ops && vma->vm_ops->set_policy)
392 err = vma->vm_ops->set_policy(vma, new);
393 if (!err) {
394 mpol_get(new);
395 vma->vm_policy = new;
396 mpol_free(old);
397 }
398 return err;
399}
400
401/* Step 2: apply policy to a range and do splits. */
402static int mbind_range(struct vm_area_struct *vma, unsigned long start,
403 unsigned long end, struct mempolicy *new)
404{
405 struct vm_area_struct *next;
406 int err;
407
408 err = 0;
409 for (; vma && vma->vm_start < end; vma = next) {
410 next = vma->vm_next;
411 if (vma->vm_start < start)
412 err = split_vma(vma->vm_mm, vma, start, 1);
413 if (!err && vma->vm_end > end)
414 err = split_vma(vma->vm_mm, vma, end, 0);
415 if (!err)
416 err = policy_vma(vma, new);
417 if (err)
418 break;
419 }
420 return err;
421}
422
Christoph Lameter8bccd852005-10-29 18:16:59 -0700423static int contextualize_policy(int mode, nodemask_t *nodes)
424{
425 if (!nodes)
426 return 0;
427
Paul Jacksoncf2a473c2006-01-08 01:01:54 -0800428 cpuset_update_task_memory_state();
Paul Jackson59665142006-01-08 01:01:47 -0800429 if (!cpuset_nodes_subset_current_mems_allowed(*nodes))
430 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700431 return mpol_check_policy(mode, nodes);
432}
433
Paul Jacksonc61afb12006-03-24 03:16:08 -0800434
435/*
436 * Update task->flags PF_MEMPOLICY bit: set iff non-default
437 * mempolicy. Allows more rapid checking of this (combined perhaps
438 * with other PF_* flag bits) on memory allocation hot code paths.
439 *
440 * If called from outside this file, the task 'p' should -only- be
441 * a newly forked child not yet visible on the task list, because
442 * manipulating the task flags of a visible task is not safe.
443 *
444 * The above limitation is why this routine has the funny name
445 * mpol_fix_fork_child_flag().
446 *
447 * It is also safe to call this with a task pointer of current,
448 * which the static wrapper mpol_set_task_struct_flag() does,
449 * for use within this file.
450 */
451
452void mpol_fix_fork_child_flag(struct task_struct *p)
453{
454 if (p->mempolicy)
455 p->flags |= PF_MEMPOLICY;
456 else
457 p->flags &= ~PF_MEMPOLICY;
458}
459
460static void mpol_set_task_struct_flag(void)
461{
462 mpol_fix_fork_child_flag(current);
463}
464
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465/* Set the process memory policy */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700466static long do_set_mempolicy(int mode, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 struct mempolicy *new;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469
Christoph Lameter8bccd852005-10-29 18:16:59 -0700470 if (contextualize_policy(mode, nodes))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700472 new = mpol_new(mode, nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 if (IS_ERR(new))
474 return PTR_ERR(new);
475 mpol_free(current->mempolicy);
476 current->mempolicy = new;
Paul Jacksonc61afb12006-03-24 03:16:08 -0800477 mpol_set_task_struct_flag();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 if (new && new->policy == MPOL_INTERLEAVE)
Andi Kleendfcd3c02005-10-29 18:15:48 -0700479 current->il_next = first_node(new->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 return 0;
481}
482
483/* Fill a zone bitmap for a policy */
Andi Kleendfcd3c02005-10-29 18:15:48 -0700484static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485{
486 int i;
487
Andi Kleendfcd3c02005-10-29 18:15:48 -0700488 nodes_clear(*nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 switch (p->policy) {
490 case MPOL_BIND:
491 for (i = 0; p->v.zonelist->zones[i]; i++)
Christoph Lameter89fa3022006-09-25 23:31:55 -0700492 node_set(zone_to_nid(p->v.zonelist->zones[i]),
Christoph Lameter8bccd852005-10-29 18:16:59 -0700493 *nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 break;
495 case MPOL_DEFAULT:
496 break;
497 case MPOL_INTERLEAVE:
Andi Kleendfcd3c02005-10-29 18:15:48 -0700498 *nodes = p->v.nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 break;
500 case MPOL_PREFERRED:
Christoph Lameter56bbd652007-10-16 01:25:35 -0700501 /* or use current node instead of memory_map? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 if (p->v.preferred_node < 0)
Christoph Lameter56bbd652007-10-16 01:25:35 -0700503 *nodes = node_states[N_HIGH_MEMORY];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 else
Andi Kleendfcd3c02005-10-29 18:15:48 -0700505 node_set(p->v.preferred_node, *nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 break;
507 default:
508 BUG();
509 }
510}
511
512static int lookup_node(struct mm_struct *mm, unsigned long addr)
513{
514 struct page *p;
515 int err;
516
517 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
518 if (err >= 0) {
519 err = page_to_nid(p);
520 put_page(p);
521 }
522 return err;
523}
524
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525/* Retrieve NUMA policy */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700526static long do_get_mempolicy(int *policy, nodemask_t *nmask,
527 unsigned long addr, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528{
Christoph Lameter8bccd852005-10-29 18:16:59 -0700529 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 struct mm_struct *mm = current->mm;
531 struct vm_area_struct *vma = NULL;
532 struct mempolicy *pol = current->mempolicy;
533
Paul Jacksoncf2a473c2006-01-08 01:01:54 -0800534 cpuset_update_task_memory_state();
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700535 if (flags &
536 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 return -EINVAL;
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700538
539 if (flags & MPOL_F_MEMS_ALLOWED) {
540 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
541 return -EINVAL;
542 *policy = 0; /* just so it's initialized */
543 *nmask = cpuset_current_mems_allowed;
544 return 0;
545 }
546
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 if (flags & MPOL_F_ADDR) {
548 down_read(&mm->mmap_sem);
549 vma = find_vma_intersection(mm, addr, addr+1);
550 if (!vma) {
551 up_read(&mm->mmap_sem);
552 return -EFAULT;
553 }
554 if (vma->vm_ops && vma->vm_ops->get_policy)
555 pol = vma->vm_ops->get_policy(vma, addr);
556 else
557 pol = vma->vm_policy;
558 } else if (addr)
559 return -EINVAL;
560
561 if (!pol)
562 pol = &default_policy;
563
564 if (flags & MPOL_F_NODE) {
565 if (flags & MPOL_F_ADDR) {
566 err = lookup_node(mm, addr);
567 if (err < 0)
568 goto out;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700569 *policy = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 } else if (pol == current->mempolicy &&
571 pol->policy == MPOL_INTERLEAVE) {
Christoph Lameter8bccd852005-10-29 18:16:59 -0700572 *policy = current->il_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 } else {
574 err = -EINVAL;
575 goto out;
576 }
577 } else
Christoph Lameter8bccd852005-10-29 18:16:59 -0700578 *policy = pol->policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
580 if (vma) {
581 up_read(&current->mm->mmap_sem);
582 vma = NULL;
583 }
584
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 err = 0;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700586 if (nmask)
587 get_zonemask(pol, nmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
589 out:
590 if (vma)
591 up_read(&current->mm->mmap_sem);
592 return err;
593}
594
Christoph Lameterb20a3502006-03-22 00:09:12 -0800595#ifdef CONFIG_MIGRATION
Christoph Lameter8bccd852005-10-29 18:16:59 -0700596/*
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800597 * page migration
598 */
Christoph Lameterfc301282006-01-18 17:42:29 -0800599static void migrate_page_add(struct page *page, struct list_head *pagelist,
600 unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800601{
602 /*
Christoph Lameterfc301282006-01-18 17:42:29 -0800603 * Avoid migrating a page that is shared with others.
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800604 */
Christoph Lameterb20a3502006-03-22 00:09:12 -0800605 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1)
606 isolate_lru_page(page, pagelist);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800607}
608
Christoph Lameter742755a2006-06-23 02:03:55 -0700609static struct page *new_node_page(struct page *page, unsigned long node, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -0700610{
Mel Gorman769848c2007-07-17 04:03:05 -0700611 return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
Christoph Lameter95a402c2006-06-23 02:03:53 -0700612}
613
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800614/*
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800615 * Migrate pages from one node to a target node.
616 * Returns error or the number of pages not migrated.
617 */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700618static int migrate_to_node(struct mm_struct *mm, int source, int dest,
619 int flags)
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800620{
621 nodemask_t nmask;
622 LIST_HEAD(pagelist);
623 int err = 0;
624
625 nodes_clear(nmask);
626 node_set(source, nmask);
627
628 check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
629 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
630
Christoph Lameteraaa994b2006-06-23 02:03:52 -0700631 if (!list_empty(&pagelist))
Christoph Lameter95a402c2006-06-23 02:03:53 -0700632 err = migrate_pages(&pagelist, new_node_page, dest);
633
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800634 return err;
635}
636
637/*
638 * Move pages between the two nodesets so as to preserve the physical
639 * layout as much as possible.
Christoph Lameter39743882006-01-08 01:00:51 -0800640 *
641 * Returns the number of page that could not be moved.
642 */
643int do_migrate_pages(struct mm_struct *mm,
644 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
645{
646 LIST_HEAD(pagelist);
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800647 int busy = 0;
648 int err = 0;
649 nodemask_t tmp;
Christoph Lameter39743882006-01-08 01:00:51 -0800650
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800651 down_read(&mm->mmap_sem);
Christoph Lameter39743882006-01-08 01:00:51 -0800652
Christoph Lameter7b2259b2006-06-25 05:46:48 -0700653 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
654 if (err)
655 goto out;
656
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800657/*
658 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
659 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
660 * bit in 'tmp', and return that <source, dest> pair for migration.
661 * The pair of nodemasks 'to' and 'from' define the map.
662 *
663 * If no pair of bits is found that way, fallback to picking some
664 * pair of 'source' and 'dest' bits that are not the same. If the
665 * 'source' and 'dest' bits are the same, this represents a node
666 * that will be migrating to itself, so no pages need move.
667 *
668 * If no bits are left in 'tmp', or if all remaining bits left
669 * in 'tmp' correspond to the same bit in 'to', return false
670 * (nothing left to migrate).
671 *
672 * This lets us pick a pair of nodes to migrate between, such that
673 * if possible the dest node is not already occupied by some other
674 * source node, minimizing the risk of overloading the memory on a
675 * node that would happen if we migrated incoming memory to a node
676 * before migrating outgoing memory source that same node.
677 *
678 * A single scan of tmp is sufficient. As we go, we remember the
679 * most recent <s, d> pair that moved (s != d). If we find a pair
680 * that not only moved, but what's better, moved to an empty slot
681 * (d is not set in tmp), then we break out then, with that pair.
682 * Otherwise when we finish scannng from_tmp, we at least have the
683 * most recent <s, d> pair that moved. If we get all the way through
684 * the scan of tmp without finding any node that moved, much less
685 * moved to an empty node, then there is nothing left worth migrating.
686 */
Christoph Lameterd4984712006-01-08 01:00:55 -0800687
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800688 tmp = *from_nodes;
689 while (!nodes_empty(tmp)) {
690 int s,d;
691 int source = -1;
692 int dest = 0;
693
694 for_each_node_mask(s, tmp) {
695 d = node_remap(s, *from_nodes, *to_nodes);
696 if (s == d)
697 continue;
698
699 source = s; /* Node moved. Memorize */
700 dest = d;
701
702 /* dest not in remaining from nodes? */
703 if (!node_isset(dest, tmp))
704 break;
705 }
706 if (source == -1)
707 break;
708
709 node_clear(source, tmp);
710 err = migrate_to_node(mm, source, dest, flags);
711 if (err > 0)
712 busy += err;
713 if (err < 0)
714 break;
Christoph Lameter39743882006-01-08 01:00:51 -0800715 }
Christoph Lameter7b2259b2006-06-25 05:46:48 -0700716out:
Christoph Lameter39743882006-01-08 01:00:51 -0800717 up_read(&mm->mmap_sem);
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800718 if (err < 0)
719 return err;
720 return busy;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800721
Christoph Lameter39743882006-01-08 01:00:51 -0800722}
723
Christoph Lameter742755a2006-06-23 02:03:55 -0700724static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -0700725{
726 struct vm_area_struct *vma = (struct vm_area_struct *)private;
727
Mel Gorman769848c2007-07-17 04:03:05 -0700728 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
729 page_address_in_vma(page, vma));
Christoph Lameter95a402c2006-06-23 02:03:53 -0700730}
Christoph Lameterb20a3502006-03-22 00:09:12 -0800731#else
732
733static void migrate_page_add(struct page *page, struct list_head *pagelist,
734 unsigned long flags)
735{
736}
737
738int do_migrate_pages(struct mm_struct *mm,
739 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
740{
741 return -ENOSYS;
742}
Christoph Lameter95a402c2006-06-23 02:03:53 -0700743
Keith Owens69939742006-10-11 01:21:28 -0700744static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -0700745{
746 return NULL;
747}
Christoph Lameterb20a3502006-03-22 00:09:12 -0800748#endif
749
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700750static long do_mbind(unsigned long start, unsigned long len,
751 unsigned long mode, nodemask_t *nmask,
752 unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800753{
754 struct vm_area_struct *vma;
755 struct mm_struct *mm = current->mm;
756 struct mempolicy *new;
757 unsigned long end;
758 int err;
759 LIST_HEAD(pagelist);
760
761 if ((flags & ~(unsigned long)(MPOL_MF_STRICT |
762 MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
763 || mode > MPOL_MAX)
764 return -EINVAL;
Christoph Lameter74c00242006-03-14 19:50:21 -0800765 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800766 return -EPERM;
767
768 if (start & ~PAGE_MASK)
769 return -EINVAL;
770
771 if (mode == MPOL_DEFAULT)
772 flags &= ~MPOL_MF_STRICT;
773
774 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
775 end = start + len;
776
777 if (end < start)
778 return -EINVAL;
779 if (end == start)
780 return 0;
781
782 if (mpol_check_policy(mode, nmask))
783 return -EINVAL;
784
785 new = mpol_new(mode, nmask);
786 if (IS_ERR(new))
787 return PTR_ERR(new);
788
789 /*
790 * If we are using the default policy then operation
791 * on discontinuous address spaces is okay after all
792 */
793 if (!new)
794 flags |= MPOL_MF_DISCONTIG_OK;
795
Paul Mundt140d5a42007-07-15 23:38:16 -0700796 pr_debug("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len,
797 mode, nmask ? nodes_addr(*nmask)[0] : -1);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800798
799 down_write(&mm->mmap_sem);
800 vma = check_range(mm, start, end, nmask,
801 flags | MPOL_MF_INVERT, &pagelist);
802
803 err = PTR_ERR(vma);
804 if (!IS_ERR(vma)) {
805 int nr_failed = 0;
806
807 err = mbind_range(vma, start, end, new);
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800808
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800809 if (!list_empty(&pagelist))
Christoph Lameter95a402c2006-06-23 02:03:53 -0700810 nr_failed = migrate_pages(&pagelist, new_vma_page,
811 (unsigned long)vma);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800812
813 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
814 err = -EIO;
815 }
Christoph Lameterb20a3502006-03-22 00:09:12 -0800816
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800817 up_write(&mm->mmap_sem);
818 mpol_free(new);
819 return err;
820}
821
Christoph Lameter39743882006-01-08 01:00:51 -0800822/*
Christoph Lameter8bccd852005-10-29 18:16:59 -0700823 * User space interface with variable sized bitmaps for nodelists.
824 */
825
826/* Copy a node mask from user space. */
Christoph Lameter39743882006-01-08 01:00:51 -0800827static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
Christoph Lameter8bccd852005-10-29 18:16:59 -0700828 unsigned long maxnode)
829{
830 unsigned long k;
831 unsigned long nlongs;
832 unsigned long endmask;
833
834 --maxnode;
835 nodes_clear(*nodes);
836 if (maxnode == 0 || !nmask)
837 return 0;
Andi Kleena9c930b2006-02-20 18:27:59 -0800838 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
Chris Wright636f13c2006-02-17 13:59:36 -0800839 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700840
841 nlongs = BITS_TO_LONGS(maxnode);
842 if ((maxnode % BITS_PER_LONG) == 0)
843 endmask = ~0UL;
844 else
845 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
846
847 /* When the user specified more nodes than supported just check
848 if the non supported part is all zero. */
849 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
850 if (nlongs > PAGE_SIZE/sizeof(long))
851 return -EINVAL;
852 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
853 unsigned long t;
854 if (get_user(t, nmask + k))
855 return -EFAULT;
856 if (k == nlongs - 1) {
857 if (t & endmask)
858 return -EINVAL;
859 } else if (t)
860 return -EINVAL;
861 }
862 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
863 endmask = ~0UL;
864 }
865
866 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
867 return -EFAULT;
868 nodes_addr(*nodes)[nlongs-1] &= endmask;
869 return 0;
870}
871
872/* Copy a kernel node mask to user space */
873static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
874 nodemask_t *nodes)
875{
876 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
877 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
878
879 if (copy > nbytes) {
880 if (copy > PAGE_SIZE)
881 return -EINVAL;
882 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
883 return -EFAULT;
884 copy = nbytes;
885 }
886 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
887}
888
889asmlinkage long sys_mbind(unsigned long start, unsigned long len,
890 unsigned long mode,
891 unsigned long __user *nmask, unsigned long maxnode,
892 unsigned flags)
893{
894 nodemask_t nodes;
895 int err;
896
897 err = get_nodes(&nodes, nmask, maxnode);
898 if (err)
899 return err;
Christoph Lameter30150f82007-01-22 20:40:45 -0800900#ifdef CONFIG_CPUSETS
901 /* Restrict the nodes to the allowed nodes in the cpuset */
902 nodes_and(nodes, nodes, current->mems_allowed);
903#endif
Christoph Lameter8bccd852005-10-29 18:16:59 -0700904 return do_mbind(start, len, mode, &nodes, flags);
905}
906
907/* Set the process memory policy */
908asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
909 unsigned long maxnode)
910{
911 int err;
912 nodemask_t nodes;
913
914 if (mode < 0 || mode > MPOL_MAX)
915 return -EINVAL;
916 err = get_nodes(&nodes, nmask, maxnode);
917 if (err)
918 return err;
919 return do_set_mempolicy(mode, &nodes);
920}
921
Christoph Lameter39743882006-01-08 01:00:51 -0800922asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
923 const unsigned long __user *old_nodes,
924 const unsigned long __user *new_nodes)
925{
926 struct mm_struct *mm;
927 struct task_struct *task;
928 nodemask_t old;
929 nodemask_t new;
930 nodemask_t task_nodes;
931 int err;
932
933 err = get_nodes(&old, old_nodes, maxnode);
934 if (err)
935 return err;
936
937 err = get_nodes(&new, new_nodes, maxnode);
938 if (err)
939 return err;
940
941 /* Find the mm_struct */
942 read_lock(&tasklist_lock);
943 task = pid ? find_task_by_pid(pid) : current;
944 if (!task) {
945 read_unlock(&tasklist_lock);
946 return -ESRCH;
947 }
948 mm = get_task_mm(task);
949 read_unlock(&tasklist_lock);
950
951 if (!mm)
952 return -EINVAL;
953
954 /*
955 * Check if this process has the right to modify the specified
956 * process. The right exists if the process has administrative
Alexey Dobriyan7f927fc2006-03-28 01:56:53 -0800957 * capabilities, superuser privileges or the same
Christoph Lameter39743882006-01-08 01:00:51 -0800958 * userid as the target process.
959 */
960 if ((current->euid != task->suid) && (current->euid != task->uid) &&
961 (current->uid != task->suid) && (current->uid != task->uid) &&
Christoph Lameter74c00242006-03-14 19:50:21 -0800962 !capable(CAP_SYS_NICE)) {
Christoph Lameter39743882006-01-08 01:00:51 -0800963 err = -EPERM;
964 goto out;
965 }
966
967 task_nodes = cpuset_mems_allowed(task);
968 /* Is the user allowed to access the target nodes? */
Christoph Lameter74c00242006-03-14 19:50:21 -0800969 if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
Christoph Lameter39743882006-01-08 01:00:51 -0800970 err = -EPERM;
971 goto out;
972 }
973
Lee Schermerhorn37b07e42007-10-16 01:25:39 -0700974 if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
Christoph Lameter3b42d282007-08-31 00:12:08 -0700975 err = -EINVAL;
976 goto out;
977 }
978
David Quigley86c3a762006-06-23 02:04:02 -0700979 err = security_task_movememory(task);
980 if (err)
981 goto out;
982
Christoph Lameter511030b2006-02-28 16:58:57 -0800983 err = do_migrate_pages(mm, &old, &new,
Christoph Lameter74c00242006-03-14 19:50:21 -0800984 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
Christoph Lameter39743882006-01-08 01:00:51 -0800985out:
986 mmput(mm);
987 return err;
988}
989
990
Christoph Lameter8bccd852005-10-29 18:16:59 -0700991/* Retrieve NUMA policy */
992asmlinkage long sys_get_mempolicy(int __user *policy,
993 unsigned long __user *nmask,
994 unsigned long maxnode,
995 unsigned long addr, unsigned long flags)
996{
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700997 int err;
998 int uninitialized_var(pval);
Christoph Lameter8bccd852005-10-29 18:16:59 -0700999 nodemask_t nodes;
1000
1001 if (nmask != NULL && maxnode < MAX_NUMNODES)
1002 return -EINVAL;
1003
1004 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1005
1006 if (err)
1007 return err;
1008
1009 if (policy && put_user(pval, policy))
1010 return -EFAULT;
1011
1012 if (nmask)
1013 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1014
1015 return err;
1016}
1017
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018#ifdef CONFIG_COMPAT
1019
1020asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1021 compat_ulong_t __user *nmask,
1022 compat_ulong_t maxnode,
1023 compat_ulong_t addr, compat_ulong_t flags)
1024{
1025 long err;
1026 unsigned long __user *nm = NULL;
1027 unsigned long nr_bits, alloc_size;
1028 DECLARE_BITMAP(bm, MAX_NUMNODES);
1029
1030 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1031 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1032
1033 if (nmask)
1034 nm = compat_alloc_user_space(alloc_size);
1035
1036 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1037
1038 if (!err && nmask) {
1039 err = copy_from_user(bm, nm, alloc_size);
1040 /* ensure entire bitmap is zeroed */
1041 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1042 err |= compat_put_bitmap(nmask, bm, nr_bits);
1043 }
1044
1045 return err;
1046}
1047
1048asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1049 compat_ulong_t maxnode)
1050{
1051 long err = 0;
1052 unsigned long __user *nm = NULL;
1053 unsigned long nr_bits, alloc_size;
1054 DECLARE_BITMAP(bm, MAX_NUMNODES);
1055
1056 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1057 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1058
1059 if (nmask) {
1060 err = compat_get_bitmap(bm, nmask, nr_bits);
1061 nm = compat_alloc_user_space(alloc_size);
1062 err |= copy_to_user(nm, bm, alloc_size);
1063 }
1064
1065 if (err)
1066 return -EFAULT;
1067
1068 return sys_set_mempolicy(mode, nm, nr_bits+1);
1069}
1070
1071asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1072 compat_ulong_t mode, compat_ulong_t __user *nmask,
1073 compat_ulong_t maxnode, compat_ulong_t flags)
1074{
1075 long err = 0;
1076 unsigned long __user *nm = NULL;
1077 unsigned long nr_bits, alloc_size;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001078 nodemask_t bm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079
1080 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1081 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1082
1083 if (nmask) {
Andi Kleendfcd3c02005-10-29 18:15:48 -07001084 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 nm = compat_alloc_user_space(alloc_size);
Andi Kleendfcd3c02005-10-29 18:15:48 -07001086 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 }
1088
1089 if (err)
1090 return -EFAULT;
1091
1092 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1093}
1094
1095#endif
1096
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001097/*
1098 * get_vma_policy(@task, @vma, @addr)
1099 * @task - task for fallback if vma policy == default
1100 * @vma - virtual memory area whose policy is sought
1101 * @addr - address in @vma for shared policy lookup
1102 *
1103 * Returns effective policy for a VMA at specified address.
1104 * Falls back to @task or system default policy, as necessary.
1105 * Returned policy has extra reference count if shared, vma,
1106 * or some other task's policy [show_numa_maps() can pass
1107 * @task != current]. It is the caller's responsibility to
1108 * free the reference in these cases.
1109 */
Christoph Lameter48fce342006-01-08 01:01:03 -08001110static struct mempolicy * get_vma_policy(struct task_struct *task,
1111 struct vm_area_struct *vma, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112{
Christoph Lameter6e21c8f2005-09-03 15:54:45 -07001113 struct mempolicy *pol = task->mempolicy;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001114 int shared_pol = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115
1116 if (vma) {
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001117 if (vma->vm_ops && vma->vm_ops->get_policy) {
Christoph Lameter8bccd852005-10-29 18:16:59 -07001118 pol = vma->vm_ops->get_policy(vma, addr);
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001119 shared_pol = 1; /* if pol non-NULL, add ref below */
1120 } else if (vma->vm_policy &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 vma->vm_policy->policy != MPOL_DEFAULT)
1122 pol = vma->vm_policy;
1123 }
1124 if (!pol)
1125 pol = &default_policy;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001126 else if (!shared_pol && pol != current->mempolicy)
1127 mpol_get(pol); /* vma or other task's policy */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 return pol;
1129}
1130
1131/* Return a zonelist representing a mempolicy */
Al Virodd0fc662005-10-07 07:46:04 +01001132static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133{
1134 int nd;
1135
1136 switch (policy->policy) {
1137 case MPOL_PREFERRED:
1138 nd = policy->v.preferred_node;
1139 if (nd < 0)
1140 nd = numa_node_id();
1141 break;
1142 case MPOL_BIND:
1143 /* Lower zones don't get a policy applied */
1144 /* Careful: current->mems_allowed might have moved */
Christoph Lameter19655d32006-09-25 23:31:19 -07001145 if (gfp_zone(gfp) >= policy_zone)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist))
1147 return policy->v.zonelist;
1148 /*FALL THROUGH*/
1149 case MPOL_INTERLEAVE: /* should not happen */
1150 case MPOL_DEFAULT:
1151 nd = numa_node_id();
1152 break;
1153 default:
1154 nd = 0;
1155 BUG();
1156 }
Al Viroaf4ca452005-10-21 02:55:38 -04001157 return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158}
1159
1160/* Do dynamic interleaving for a process */
1161static unsigned interleave_nodes(struct mempolicy *policy)
1162{
1163 unsigned nid, next;
1164 struct task_struct *me = current;
1165
1166 nid = me->il_next;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001167 next = next_node(nid, policy->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 if (next >= MAX_NUMNODES)
Andi Kleendfcd3c02005-10-29 18:15:48 -07001169 next = first_node(policy->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170 me->il_next = next;
1171 return nid;
1172}
1173
Christoph Lameterdc85da12006-01-18 17:42:36 -08001174/*
1175 * Depending on the memory policy provide a node from which to allocate the
1176 * next slab entry.
1177 */
1178unsigned slab_node(struct mempolicy *policy)
1179{
Christoph Lameter765c4502006-09-27 01:50:08 -07001180 int pol = policy ? policy->policy : MPOL_DEFAULT;
1181
1182 switch (pol) {
Christoph Lameterdc85da12006-01-18 17:42:36 -08001183 case MPOL_INTERLEAVE:
1184 return interleave_nodes(policy);
1185
1186 case MPOL_BIND:
1187 /*
1188 * Follow bind policy behavior and start allocation at the
1189 * first node.
1190 */
Christoph Lameter89fa3022006-09-25 23:31:55 -07001191 return zone_to_nid(policy->v.zonelist->zones[0]);
Christoph Lameterdc85da12006-01-18 17:42:36 -08001192
1193 case MPOL_PREFERRED:
1194 if (policy->v.preferred_node >= 0)
1195 return policy->v.preferred_node;
1196 /* Fall through */
1197
1198 default:
1199 return numa_node_id();
1200 }
1201}
1202
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203/* Do static interleaving for a VMA with known offset. */
1204static unsigned offset_il_node(struct mempolicy *pol,
1205 struct vm_area_struct *vma, unsigned long off)
1206{
Andi Kleendfcd3c02005-10-29 18:15:48 -07001207 unsigned nnodes = nodes_weight(pol->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 unsigned target = (unsigned)off % nnodes;
1209 int c;
1210 int nid = -1;
1211
1212 c = 0;
1213 do {
Andi Kleendfcd3c02005-10-29 18:15:48 -07001214 nid = next_node(nid, pol->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 c++;
1216 } while (c <= target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 return nid;
1218}
1219
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001220/* Determine a node number for interleave */
1221static inline unsigned interleave_nid(struct mempolicy *pol,
1222 struct vm_area_struct *vma, unsigned long addr, int shift)
1223{
1224 if (vma) {
1225 unsigned long off;
1226
Nishanth Aravamudan3b98b082006-08-31 21:27:53 -07001227 /*
1228 * for small pages, there is no difference between
1229 * shift and PAGE_SHIFT, so the bit-shift is safe.
1230 * for huge pages, since vm_pgoff is in units of small
1231 * pages, we need to shift off the always 0 bits to get
1232 * a useful offset.
1233 */
1234 BUG_ON(shift < PAGE_SHIFT);
1235 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001236 off += (addr - vma->vm_start) >> shift;
1237 return offset_il_node(pol, vma, off);
1238 } else
1239 return interleave_nodes(pol);
1240}
1241
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001242#ifdef CONFIG_HUGETLBFS
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001243/*
1244 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1245 * @vma = virtual memory area whose policy is sought
1246 * @addr = address in @vma for shared policy lookup and interleave policy
1247 * @gfp_flags = for requested zone
1248 * @mpol = pointer to mempolicy pointer for reference counted 'BIND policy
1249 *
1250 * Returns a zonelist suitable for a huge page allocation.
1251 * If the effective policy is 'BIND, returns pointer to policy's zonelist.
1252 * If it is also a policy for which get_vma_policy() returns an extra
1253 * reference, we must hold that reference until after allocation.
1254 * In that case, return policy via @mpol so hugetlb allocation can drop
1255 * the reference. For non-'BIND referenced policies, we can/do drop the
1256 * reference here, so the caller doesn't need to know about the special case
1257 * for default and current task policy.
1258 */
Mel Gorman396faf02007-07-17 04:03:13 -07001259struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001260 gfp_t gfp_flags, struct mempolicy **mpol)
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001261{
1262 struct mempolicy *pol = get_vma_policy(current, vma, addr);
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001263 struct zonelist *zl;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001264
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001265 *mpol = NULL; /* probably no unref needed */
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001266 if (pol->policy == MPOL_INTERLEAVE) {
1267 unsigned nid;
1268
1269 nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001270 __mpol_free(pol); /* finished with pol */
Mel Gorman396faf02007-07-17 04:03:13 -07001271 return NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_flags);
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001272 }
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001273
1274 zl = zonelist_policy(GFP_HIGHUSER, pol);
1275 if (unlikely(pol != &default_policy && pol != current->mempolicy)) {
1276 if (pol->policy != MPOL_BIND)
1277 __mpol_free(pol); /* finished with pol */
1278 else
1279 *mpol = pol; /* unref needed after allocation */
1280 }
1281 return zl;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001282}
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001283#endif
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001284
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285/* Allocate a page in interleaved policy.
1286 Own path because it needs to do special accounting. */
Andi Kleen662f3a02005-10-29 18:15:49 -07001287static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1288 unsigned nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289{
1290 struct zonelist *zl;
1291 struct page *page;
1292
Al Viroaf4ca452005-10-21 02:55:38 -04001293 zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 page = __alloc_pages(gfp, order, zl);
Christoph Lameterca889e62006-06-30 01:55:44 -07001295 if (page && page_zone(page) == zl->zones[0])
1296 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 return page;
1298}
1299
1300/**
1301 * alloc_page_vma - Allocate a page for a VMA.
1302 *
1303 * @gfp:
1304 * %GFP_USER user allocation.
1305 * %GFP_KERNEL kernel allocations,
1306 * %GFP_HIGHMEM highmem/user allocations,
1307 * %GFP_FS allocation should not call back into a file system.
1308 * %GFP_ATOMIC don't sleep.
1309 *
1310 * @vma: Pointer to VMA or NULL if not available.
1311 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1312 *
1313 * This function allocates a page from the kernel page pool and applies
1314 * a NUMA policy associated with the VMA or the current process.
1315 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1316 * mm_struct of the VMA to prevent it from going away. Should be used for
1317 * all allocations for pages that will be mapped into
1318 * user space. Returns NULL when no page can be allocated.
1319 *
1320 * Should be called with the mm_sem of the vma hold.
1321 */
1322struct page *
Al Virodd0fc662005-10-07 07:46:04 +01001323alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324{
Christoph Lameter6e21c8f2005-09-03 15:54:45 -07001325 struct mempolicy *pol = get_vma_policy(current, vma, addr);
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001326 struct zonelist *zl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327
Paul Jacksoncf2a473c2006-01-08 01:01:54 -08001328 cpuset_update_task_memory_state();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329
1330 if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
1331 unsigned nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001332
1333 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 return alloc_page_interleave(gfp, 0, nid);
1335 }
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001336 zl = zonelist_policy(gfp, pol);
1337 if (pol != &default_policy && pol != current->mempolicy) {
1338 /*
1339 * slow path: ref counted policy -- shared or vma
1340 */
1341 struct page *page = __alloc_pages(gfp, 0, zl);
1342 __mpol_free(pol);
1343 return page;
1344 }
1345 /*
1346 * fast path: default or task policy
1347 */
1348 return __alloc_pages(gfp, 0, zl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349}
1350
1351/**
1352 * alloc_pages_current - Allocate pages.
1353 *
1354 * @gfp:
1355 * %GFP_USER user allocation,
1356 * %GFP_KERNEL kernel allocation,
1357 * %GFP_HIGHMEM highmem allocation,
1358 * %GFP_FS don't call back into a file system.
1359 * %GFP_ATOMIC don't sleep.
1360 * @order: Power of two of allocation size in pages. 0 is a single page.
1361 *
1362 * Allocate a page from the kernel page pool. When not in
1363 * interrupt context and apply the current process NUMA policy.
1364 * Returns NULL when no page can be allocated.
1365 *
Paul Jacksoncf2a473c2006-01-08 01:01:54 -08001366 * Don't call cpuset_update_task_memory_state() unless
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 * 1) it's ok to take cpuset_sem (can WAIT), and
1368 * 2) allocating for current task (not interrupt).
1369 */
Al Virodd0fc662005-10-07 07:46:04 +01001370struct page *alloc_pages_current(gfp_t gfp, unsigned order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371{
1372 struct mempolicy *pol = current->mempolicy;
1373
1374 if ((gfp & __GFP_WAIT) && !in_interrupt())
Paul Jacksoncf2a473c2006-01-08 01:01:54 -08001375 cpuset_update_task_memory_state();
Christoph Lameter9b819d22006-09-25 23:31:40 -07001376 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 pol = &default_policy;
1378 if (pol->policy == MPOL_INTERLEAVE)
1379 return alloc_page_interleave(gfp, order, interleave_nodes(pol));
1380 return __alloc_pages(gfp, order, zonelist_policy(gfp, pol));
1381}
1382EXPORT_SYMBOL(alloc_pages_current);
1383
Paul Jackson42253992006-01-08 01:01:59 -08001384/*
1385 * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it
1386 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1387 * with the mems_allowed returned by cpuset_mems_allowed(). This
1388 * keeps mempolicies cpuset relative after its cpuset moves. See
1389 * further kernel/cpuset.c update_nodemask().
1390 */
1391void *cpuset_being_rebound;
1392
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393/* Slow path of a mempolicy copy */
1394struct mempolicy *__mpol_copy(struct mempolicy *old)
1395{
1396 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1397
1398 if (!new)
1399 return ERR_PTR(-ENOMEM);
Paul Jackson42253992006-01-08 01:01:59 -08001400 if (current_cpuset_is_being_rebound()) {
1401 nodemask_t mems = cpuset_mems_allowed(current);
1402 mpol_rebind_policy(old, &mems);
1403 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 *new = *old;
1405 atomic_set(&new->refcnt, 1);
1406 if (new->policy == MPOL_BIND) {
1407 int sz = ksize(old->v.zonelist);
Christoph Lametere94b1762006-12-06 20:33:17 -08001408 new->v.zonelist = kmemdup(old->v.zonelist, sz, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 if (!new->v.zonelist) {
1410 kmem_cache_free(policy_cache, new);
1411 return ERR_PTR(-ENOMEM);
1412 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413 }
1414 return new;
1415}
1416
1417/* Slow path of a mempolicy comparison */
1418int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1419{
1420 if (!a || !b)
1421 return 0;
1422 if (a->policy != b->policy)
1423 return 0;
1424 switch (a->policy) {
1425 case MPOL_DEFAULT:
1426 return 1;
1427 case MPOL_INTERLEAVE:
Andi Kleendfcd3c02005-10-29 18:15:48 -07001428 return nodes_equal(a->v.nodes, b->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 case MPOL_PREFERRED:
1430 return a->v.preferred_node == b->v.preferred_node;
1431 case MPOL_BIND: {
1432 int i;
1433 for (i = 0; a->v.zonelist->zones[i]; i++)
1434 if (a->v.zonelist->zones[i] != b->v.zonelist->zones[i])
1435 return 0;
1436 return b->v.zonelist->zones[i] == NULL;
1437 }
1438 default:
1439 BUG();
1440 return 0;
1441 }
1442}
1443
1444/* Slow path of a mpol destructor. */
1445void __mpol_free(struct mempolicy *p)
1446{
1447 if (!atomic_dec_and_test(&p->refcnt))
1448 return;
1449 if (p->policy == MPOL_BIND)
1450 kfree(p->v.zonelist);
1451 p->policy = MPOL_DEFAULT;
1452 kmem_cache_free(policy_cache, p);
1453}
1454
1455/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 * Shared memory backing store policy support.
1457 *
1458 * Remember policies even when nobody has shared memory mapped.
1459 * The policies are kept in Red-Black tree linked from the inode.
1460 * They are protected by the sp->lock spinlock, which should be held
1461 * for any accesses to the tree.
1462 */
1463
1464/* lookup first element intersecting start-end */
1465/* Caller holds sp->lock */
1466static struct sp_node *
1467sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
1468{
1469 struct rb_node *n = sp->root.rb_node;
1470
1471 while (n) {
1472 struct sp_node *p = rb_entry(n, struct sp_node, nd);
1473
1474 if (start >= p->end)
1475 n = n->rb_right;
1476 else if (end <= p->start)
1477 n = n->rb_left;
1478 else
1479 break;
1480 }
1481 if (!n)
1482 return NULL;
1483 for (;;) {
1484 struct sp_node *w = NULL;
1485 struct rb_node *prev = rb_prev(n);
1486 if (!prev)
1487 break;
1488 w = rb_entry(prev, struct sp_node, nd);
1489 if (w->end <= start)
1490 break;
1491 n = prev;
1492 }
1493 return rb_entry(n, struct sp_node, nd);
1494}
1495
1496/* Insert a new shared policy into the list. */
1497/* Caller holds sp->lock */
1498static void sp_insert(struct shared_policy *sp, struct sp_node *new)
1499{
1500 struct rb_node **p = &sp->root.rb_node;
1501 struct rb_node *parent = NULL;
1502 struct sp_node *nd;
1503
1504 while (*p) {
1505 parent = *p;
1506 nd = rb_entry(parent, struct sp_node, nd);
1507 if (new->start < nd->start)
1508 p = &(*p)->rb_left;
1509 else if (new->end > nd->end)
1510 p = &(*p)->rb_right;
1511 else
1512 BUG();
1513 }
1514 rb_link_node(&new->nd, parent, p);
1515 rb_insert_color(&new->nd, &sp->root);
Paul Mundt140d5a42007-07-15 23:38:16 -07001516 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 new->policy ? new->policy->policy : 0);
1518}
1519
1520/* Find shared policy intersecting idx */
1521struct mempolicy *
1522mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
1523{
1524 struct mempolicy *pol = NULL;
1525 struct sp_node *sn;
1526
1527 if (!sp->root.rb_node)
1528 return NULL;
1529 spin_lock(&sp->lock);
1530 sn = sp_lookup(sp, idx, idx+1);
1531 if (sn) {
1532 mpol_get(sn->policy);
1533 pol = sn->policy;
1534 }
1535 spin_unlock(&sp->lock);
1536 return pol;
1537}
1538
1539static void sp_delete(struct shared_policy *sp, struct sp_node *n)
1540{
Paul Mundt140d5a42007-07-15 23:38:16 -07001541 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 rb_erase(&n->nd, &sp->root);
1543 mpol_free(n->policy);
1544 kmem_cache_free(sn_cache, n);
1545}
1546
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001547static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
1548 struct mempolicy *pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549{
1550 struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1551
1552 if (!n)
1553 return NULL;
1554 n->start = start;
1555 n->end = end;
1556 mpol_get(pol);
1557 n->policy = pol;
1558 return n;
1559}
1560
1561/* Replace a policy range. */
1562static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
1563 unsigned long end, struct sp_node *new)
1564{
1565 struct sp_node *n, *new2 = NULL;
1566
1567restart:
1568 spin_lock(&sp->lock);
1569 n = sp_lookup(sp, start, end);
1570 /* Take care of old policies in the same range. */
1571 while (n && n->start < end) {
1572 struct rb_node *next = rb_next(&n->nd);
1573 if (n->start >= start) {
1574 if (n->end <= end)
1575 sp_delete(sp, n);
1576 else
1577 n->start = end;
1578 } else {
1579 /* Old policy spanning whole new range. */
1580 if (n->end > end) {
1581 if (!new2) {
1582 spin_unlock(&sp->lock);
1583 new2 = sp_alloc(end, n->end, n->policy);
1584 if (!new2)
1585 return -ENOMEM;
1586 goto restart;
1587 }
1588 n->end = start;
1589 sp_insert(sp, new2);
1590 new2 = NULL;
1591 break;
1592 } else
1593 n->end = start;
1594 }
1595 if (!next)
1596 break;
1597 n = rb_entry(next, struct sp_node, nd);
1598 }
1599 if (new)
1600 sp_insert(sp, new);
1601 spin_unlock(&sp->lock);
1602 if (new2) {
1603 mpol_free(new2->policy);
1604 kmem_cache_free(sn_cache, new2);
1605 }
1606 return 0;
1607}
1608
Robin Holt7339ff82006-01-14 13:20:48 -08001609void mpol_shared_policy_init(struct shared_policy *info, int policy,
1610 nodemask_t *policy_nodes)
1611{
1612 info->root = RB_ROOT;
1613 spin_lock_init(&info->lock);
1614
1615 if (policy != MPOL_DEFAULT) {
1616 struct mempolicy *newpol;
1617
1618 /* Falls back to MPOL_DEFAULT on any error */
1619 newpol = mpol_new(policy, policy_nodes);
1620 if (!IS_ERR(newpol)) {
1621 /* Create pseudo-vma that contains just the policy */
1622 struct vm_area_struct pvma;
1623
1624 memset(&pvma, 0, sizeof(struct vm_area_struct));
1625 /* Policy covers entire file */
1626 pvma.vm_end = TASK_SIZE;
1627 mpol_set_shared_policy(info, &pvma, newpol);
1628 mpol_free(newpol);
1629 }
1630 }
1631}
1632
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633int mpol_set_shared_policy(struct shared_policy *info,
1634 struct vm_area_struct *vma, struct mempolicy *npol)
1635{
1636 int err;
1637 struct sp_node *new = NULL;
1638 unsigned long sz = vma_pages(vma);
1639
Paul Mundt140d5a42007-07-15 23:38:16 -07001640 pr_debug("set_shared_policy %lx sz %lu %d %lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 vma->vm_pgoff,
1642 sz, npol? npol->policy : -1,
Paul Mundt140d5a42007-07-15 23:38:16 -07001643 npol ? nodes_addr(npol->v.nodes)[0] : -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644
1645 if (npol) {
1646 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
1647 if (!new)
1648 return -ENOMEM;
1649 }
1650 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
1651 if (err && new)
1652 kmem_cache_free(sn_cache, new);
1653 return err;
1654}
1655
1656/* Free a backing policy store on inode delete. */
1657void mpol_free_shared_policy(struct shared_policy *p)
1658{
1659 struct sp_node *n;
1660 struct rb_node *next;
1661
1662 if (!p->root.rb_node)
1663 return;
1664 spin_lock(&p->lock);
1665 next = rb_first(&p->root);
1666 while (next) {
1667 n = rb_entry(next, struct sp_node, nd);
1668 next = rb_next(&n->nd);
Andi Kleen90c50292005-07-27 11:43:50 -07001669 rb_erase(&n->nd, &p->root);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670 mpol_free(n->policy);
1671 kmem_cache_free(sn_cache, n);
1672 }
1673 spin_unlock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674}
1675
1676/* assumes fs == KERNEL_DS */
1677void __init numa_policy_init(void)
1678{
Paul Mundtb71636e22007-07-15 23:38:15 -07001679 nodemask_t interleave_nodes;
1680 unsigned long largest = 0;
1681 int nid, prefer = 0;
1682
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 policy_cache = kmem_cache_create("numa_policy",
1684 sizeof(struct mempolicy),
Paul Mundt20c2df82007-07-20 10:11:58 +09001685 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686
1687 sn_cache = kmem_cache_create("shared_policy_node",
1688 sizeof(struct sp_node),
Paul Mundt20c2df82007-07-20 10:11:58 +09001689 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690
Paul Mundtb71636e22007-07-15 23:38:15 -07001691 /*
1692 * Set interleaving policy for system init. Interleaving is only
1693 * enabled across suitably sized nodes (default is >= 16MB), or
1694 * fall back to the largest node if they're all smaller.
1695 */
1696 nodes_clear(interleave_nodes);
Christoph Lameter56bbd652007-10-16 01:25:35 -07001697 for_each_node_state(nid, N_HIGH_MEMORY) {
Paul Mundtb71636e22007-07-15 23:38:15 -07001698 unsigned long total_pages = node_present_pages(nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699
Paul Mundtb71636e22007-07-15 23:38:15 -07001700 /* Preserve the largest node */
1701 if (largest < total_pages) {
1702 largest = total_pages;
1703 prefer = nid;
1704 }
1705
1706 /* Interleave this node? */
1707 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
1708 node_set(nid, interleave_nodes);
1709 }
1710
1711 /* All too small, use the largest */
1712 if (unlikely(nodes_empty(interleave_nodes)))
1713 node_set(prefer, interleave_nodes);
1714
1715 if (do_set_mempolicy(MPOL_INTERLEAVE, &interleave_nodes))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 printk("numa_policy_init: interleaving failed\n");
1717}
1718
Christoph Lameter8bccd852005-10-29 18:16:59 -07001719/* Reset policy of current process to default */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720void numa_default_policy(void)
1721{
Christoph Lameter8bccd852005-10-29 18:16:59 -07001722 do_set_mempolicy(MPOL_DEFAULT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723}
Paul Jackson68860ec2005-10-30 15:02:36 -08001724
1725/* Migrate a policy to a different set of nodes */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001726static void mpol_rebind_policy(struct mempolicy *pol,
1727 const nodemask_t *newmask)
Paul Jackson68860ec2005-10-30 15:02:36 -08001728{
Paul Jackson74cb2152006-01-08 01:01:56 -08001729 nodemask_t *mpolmask;
Paul Jackson68860ec2005-10-30 15:02:36 -08001730 nodemask_t tmp;
1731
1732 if (!pol)
1733 return;
Paul Jackson74cb2152006-01-08 01:01:56 -08001734 mpolmask = &pol->cpuset_mems_allowed;
1735 if (nodes_equal(*mpolmask, *newmask))
1736 return;
Paul Jackson68860ec2005-10-30 15:02:36 -08001737
1738 switch (pol->policy) {
1739 case MPOL_DEFAULT:
1740 break;
1741 case MPOL_INTERLEAVE:
Paul Jackson74cb2152006-01-08 01:01:56 -08001742 nodes_remap(tmp, pol->v.nodes, *mpolmask, *newmask);
Paul Jackson68860ec2005-10-30 15:02:36 -08001743 pol->v.nodes = tmp;
Paul Jackson74cb2152006-01-08 01:01:56 -08001744 *mpolmask = *newmask;
1745 current->il_next = node_remap(current->il_next,
1746 *mpolmask, *newmask);
Paul Jackson68860ec2005-10-30 15:02:36 -08001747 break;
1748 case MPOL_PREFERRED:
1749 pol->v.preferred_node = node_remap(pol->v.preferred_node,
Paul Jackson74cb2152006-01-08 01:01:56 -08001750 *mpolmask, *newmask);
1751 *mpolmask = *newmask;
Paul Jackson68860ec2005-10-30 15:02:36 -08001752 break;
1753 case MPOL_BIND: {
1754 nodemask_t nodes;
1755 struct zone **z;
1756 struct zonelist *zonelist;
1757
1758 nodes_clear(nodes);
1759 for (z = pol->v.zonelist->zones; *z; z++)
Christoph Lameter89fa3022006-09-25 23:31:55 -07001760 node_set(zone_to_nid(*z), nodes);
Paul Jackson74cb2152006-01-08 01:01:56 -08001761 nodes_remap(tmp, nodes, *mpolmask, *newmask);
Paul Jackson68860ec2005-10-30 15:02:36 -08001762 nodes = tmp;
1763
1764 zonelist = bind_zonelist(&nodes);
1765
1766 /* If no mem, then zonelist is NULL and we keep old zonelist.
1767 * If that old zonelist has no remaining mems_allowed nodes,
1768 * then zonelist_policy() will "FALL THROUGH" to MPOL_DEFAULT.
1769 */
1770
KAMEZAWA Hiroyuki8af5e2e2007-02-20 13:57:49 -08001771 if (!IS_ERR(zonelist)) {
Paul Jackson68860ec2005-10-30 15:02:36 -08001772 /* Good - got mem - substitute new zonelist */
1773 kfree(pol->v.zonelist);
1774 pol->v.zonelist = zonelist;
1775 }
Paul Jackson74cb2152006-01-08 01:01:56 -08001776 *mpolmask = *newmask;
Paul Jackson68860ec2005-10-30 15:02:36 -08001777 break;
1778 }
1779 default:
1780 BUG();
1781 break;
1782 }
1783}
1784
1785/*
Paul Jackson74cb2152006-01-08 01:01:56 -08001786 * Wrapper for mpol_rebind_policy() that just requires task
1787 * pointer, and updates task mempolicy.
Paul Jackson68860ec2005-10-30 15:02:36 -08001788 */
Paul Jackson74cb2152006-01-08 01:01:56 -08001789
1790void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
Paul Jackson68860ec2005-10-30 15:02:36 -08001791{
Paul Jackson74cb2152006-01-08 01:01:56 -08001792 mpol_rebind_policy(tsk->mempolicy, new);
Paul Jackson68860ec2005-10-30 15:02:36 -08001793}
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001794
1795/*
Paul Jackson42253992006-01-08 01:01:59 -08001796 * Rebind each vma in mm to new nodemask.
1797 *
1798 * Call holding a reference to mm. Takes mm->mmap_sem during call.
1799 */
1800
1801void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
1802{
1803 struct vm_area_struct *vma;
1804
1805 down_write(&mm->mmap_sem);
1806 for (vma = mm->mmap; vma; vma = vma->vm_next)
1807 mpol_rebind_policy(vma->vm_policy, new);
1808 up_write(&mm->mmap_sem);
1809}
1810
1811/*
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001812 * Display pages allocated per node and memory policy via /proc.
1813 */
1814
Helge Deller15ad7cd2006-12-06 20:40:36 -08001815static const char * const policy_types[] =
1816 { "default", "prefer", "bind", "interleave" };
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001817
1818/*
1819 * Convert a mempolicy into a string.
1820 * Returns the number of characters in buffer (if positive)
1821 * or an error (negative)
1822 */
1823static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1824{
1825 char *p = buffer;
1826 int l;
1827 nodemask_t nodes;
1828 int mode = pol ? pol->policy : MPOL_DEFAULT;
1829
1830 switch (mode) {
1831 case MPOL_DEFAULT:
1832 nodes_clear(nodes);
1833 break;
1834
1835 case MPOL_PREFERRED:
1836 nodes_clear(nodes);
1837 node_set(pol->v.preferred_node, nodes);
1838 break;
1839
1840 case MPOL_BIND:
1841 get_zonemask(pol, &nodes);
1842 break;
1843
1844 case MPOL_INTERLEAVE:
1845 nodes = pol->v.nodes;
1846 break;
1847
1848 default:
1849 BUG();
1850 return -EFAULT;
1851 }
1852
1853 l = strlen(policy_types[mode]);
1854 if (buffer + maxlen < p + l + 1)
1855 return -ENOSPC;
1856
1857 strcpy(p, policy_types[mode]);
1858 p += l;
1859
1860 if (!nodes_empty(nodes)) {
1861 if (buffer + maxlen < p + 2)
1862 return -ENOSPC;
1863 *p++ = '=';
1864 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
1865 }
1866 return p - buffer;
1867}
1868
1869struct numa_maps {
1870 unsigned long pages;
1871 unsigned long anon;
Christoph Lameter397874d2006-03-06 15:42:53 -08001872 unsigned long active;
1873 unsigned long writeback;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001874 unsigned long mapcount_max;
Christoph Lameter397874d2006-03-06 15:42:53 -08001875 unsigned long dirty;
1876 unsigned long swapcache;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001877 unsigned long node[MAX_NUMNODES];
1878};
1879
Christoph Lameter397874d2006-03-06 15:42:53 -08001880static void gather_stats(struct page *page, void *private, int pte_dirty)
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001881{
1882 struct numa_maps *md = private;
1883 int count = page_mapcount(page);
1884
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001885 md->pages++;
Christoph Lameter397874d2006-03-06 15:42:53 -08001886 if (pte_dirty || PageDirty(page))
1887 md->dirty++;
1888
1889 if (PageSwapCache(page))
1890 md->swapcache++;
1891
1892 if (PageActive(page))
1893 md->active++;
1894
1895 if (PageWriteback(page))
1896 md->writeback++;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001897
1898 if (PageAnon(page))
1899 md->anon++;
1900
Christoph Lameter397874d2006-03-06 15:42:53 -08001901 if (count > md->mapcount_max)
1902 md->mapcount_max = count;
1903
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001904 md->node[page_to_nid(page)]++;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001905}
1906
Andrew Morton7f709ed2006-03-07 21:55:22 -08001907#ifdef CONFIG_HUGETLB_PAGE
Christoph Lameter397874d2006-03-06 15:42:53 -08001908static void check_huge_range(struct vm_area_struct *vma,
1909 unsigned long start, unsigned long end,
1910 struct numa_maps *md)
1911{
1912 unsigned long addr;
1913 struct page *page;
1914
1915 for (addr = start; addr < end; addr += HPAGE_SIZE) {
1916 pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK);
1917 pte_t pte;
1918
1919 if (!ptep)
1920 continue;
1921
1922 pte = *ptep;
1923 if (pte_none(pte))
1924 continue;
1925
1926 page = pte_page(pte);
1927 if (!page)
1928 continue;
1929
1930 gather_stats(page, md, pte_dirty(*ptep));
1931 }
1932}
Andrew Morton7f709ed2006-03-07 21:55:22 -08001933#else
1934static inline void check_huge_range(struct vm_area_struct *vma,
1935 unsigned long start, unsigned long end,
1936 struct numa_maps *md)
1937{
1938}
1939#endif
Christoph Lameter397874d2006-03-06 15:42:53 -08001940
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001941int show_numa_map(struct seq_file *m, void *v)
1942{
Eric W. Biederman99f89552006-06-26 00:25:55 -07001943 struct proc_maps_private *priv = m->private;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001944 struct vm_area_struct *vma = v;
1945 struct numa_maps *md;
Christoph Lameter397874d2006-03-06 15:42:53 -08001946 struct file *file = vma->vm_file;
1947 struct mm_struct *mm = vma->vm_mm;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001948 struct mempolicy *pol;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001949 int n;
1950 char buffer[50];
1951
Christoph Lameter397874d2006-03-06 15:42:53 -08001952 if (!mm)
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001953 return 0;
1954
1955 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
1956 if (!md)
1957 return 0;
1958
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001959 pol = get_vma_policy(priv->task, vma, vma->vm_start);
1960 mpol_to_str(buffer, sizeof(buffer), pol);
1961 /*
1962 * unref shared or other task's mempolicy
1963 */
1964 if (pol != &default_policy && pol != current->mempolicy)
1965 __mpol_free(pol);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001966
Christoph Lameter397874d2006-03-06 15:42:53 -08001967 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001968
Christoph Lameter397874d2006-03-06 15:42:53 -08001969 if (file) {
1970 seq_printf(m, " file=");
Josef Sipeke9536ae2006-12-08 02:37:21 -08001971 seq_path(m, file->f_path.mnt, file->f_path.dentry, "\n\t= ");
Christoph Lameter397874d2006-03-06 15:42:53 -08001972 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1973 seq_printf(m, " heap");
1974 } else if (vma->vm_start <= mm->start_stack &&
1975 vma->vm_end >= mm->start_stack) {
1976 seq_printf(m, " stack");
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001977 }
Christoph Lameter397874d2006-03-06 15:42:53 -08001978
1979 if (is_vm_hugetlb_page(vma)) {
1980 check_huge_range(vma, vma->vm_start, vma->vm_end, md);
1981 seq_printf(m, " huge");
1982 } else {
1983 check_pgd_range(vma, vma->vm_start, vma->vm_end,
Christoph Lameter56bbd652007-10-16 01:25:35 -07001984 &node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
Christoph Lameter397874d2006-03-06 15:42:53 -08001985 }
1986
1987 if (!md->pages)
1988 goto out;
1989
1990 if (md->anon)
1991 seq_printf(m," anon=%lu",md->anon);
1992
1993 if (md->dirty)
1994 seq_printf(m," dirty=%lu",md->dirty);
1995
1996 if (md->pages != md->anon && md->pages != md->dirty)
1997 seq_printf(m, " mapped=%lu", md->pages);
1998
1999 if (md->mapcount_max > 1)
2000 seq_printf(m, " mapmax=%lu", md->mapcount_max);
2001
2002 if (md->swapcache)
2003 seq_printf(m," swapcache=%lu", md->swapcache);
2004
2005 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2006 seq_printf(m," active=%lu", md->active);
2007
2008 if (md->writeback)
2009 seq_printf(m," writeback=%lu", md->writeback);
2010
Christoph Lameter56bbd652007-10-16 01:25:35 -07002011 for_each_node_state(n, N_HIGH_MEMORY)
Christoph Lameter397874d2006-03-06 15:42:53 -08002012 if (md->node[n])
2013 seq_printf(m, " N%d=%lu", n, md->node[n]);
2014out:
2015 seq_putc(m, '\n');
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002016 kfree(md);
2017
2018 if (m->count < m->size)
Eric W. Biederman99f89552006-06-26 00:25:55 -07002019 m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002020 return 0;
2021}
2022