blob: 83c69f8a64c29dd0878050b3912e5f0c2b1af3ac [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
Christoph Lameter8bccd852005-10-29 18:16:59 -07005 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
Christoph Lameter8bccd852005-10-29 18:16:59 -070021 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
Christoph Lameter8bccd852005-10-29 18:16:59 -070024 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 * preferred Try a specific node first before normal fallback.
29 * As a special case node -1 here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
Christoph Lameter8bccd852005-10-29 18:16:59 -070033 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
66 could replace all the switch()es with a mempolicy_ops structure.
67*/
68
69#include <linux/mempolicy.h>
70#include <linux/mm.h>
71#include <linux/highmem.h>
72#include <linux/hugetlb.h>
73#include <linux/kernel.h>
74#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070075#include <linux/nodemask.h>
76#include <linux/cpuset.h>
77#include <linux/gfp.h>
78#include <linux/slab.h>
79#include <linux/string.h>
80#include <linux/module.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070081#include <linux/nsproxy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070082#include <linux/interrupt.h>
83#include <linux/init.h>
84#include <linux/compat.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080085#include <linux/swap.h>
Christoph Lameter1a75a6c2006-01-08 01:01:02 -080086#include <linux/seq_file.h>
87#include <linux/proc_fs.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080088#include <linux/migrate.h>
Christoph Lameter95a402c2006-06-23 02:03:53 -070089#include <linux/rmap.h>
David Quigley86c3a762006-06-23 02:04:02 -070090#include <linux/security.h>
Adrian Bunkdbcb0f12007-10-16 01:26:26 -070091#include <linux/syscalls.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080092
Linus Torvalds1da177e2005-04-16 15:20:36 -070093#include <asm/tlbflush.h>
94#include <asm/uaccess.h>
95
Christoph Lameter38e35862006-01-08 01:01:01 -080096/* Internal flags */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080097#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
Christoph Lameter38e35862006-01-08 01:01:01 -080098#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
Christoph Lameter1a75a6c2006-01-08 01:01:02 -080099#define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800100
Pekka Enbergfcc234f2006-03-22 00:08:13 -0800101static struct kmem_cache *policy_cache;
102static struct kmem_cache *sn_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104/* Highest zone. An specific allocation for a zone below that is not
105 policied. */
Christoph Lameter62672762007-02-10 01:43:07 -0800106enum zone_type policy_zone = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Andi Kleend42c6992005-07-06 19:56:03 +0200108struct mempolicy default_policy = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 .refcnt = ATOMIC_INIT(1), /* never free it */
110 .policy = MPOL_DEFAULT,
111};
112
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700113static void mpol_rebind_policy(struct mempolicy *pol,
114 const nodemask_t *newmask);
115
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116/* Do sanity checking on a policy */
Andi Kleendfcd3c02005-10-29 18:15:48 -0700117static int mpol_check_policy(int mode, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118{
Andi Kleendfcd3c02005-10-29 18:15:48 -0700119 int empty = nodes_empty(*nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
121 switch (mode) {
122 case MPOL_DEFAULT:
123 if (!empty)
124 return -EINVAL;
125 break;
126 case MPOL_BIND:
127 case MPOL_INTERLEAVE:
128 /* Preferred will only use the first bit, but allow
129 more for now. */
130 if (empty)
131 return -EINVAL;
132 break;
133 }
Lee Schermerhorn37b07e42007-10-16 01:25:39 -0700134 return nodes_subset(*nodes, node_states[N_HIGH_MEMORY]) ? 0 : -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135}
Andi Kleendd942ae2006-02-17 01:39:16 +0100136
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137/* Generate a custom zonelist for the BIND policy. */
Andi Kleendfcd3c02005-10-29 18:15:48 -0700138static struct zonelist *bind_zonelist(nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139{
140 struct zonelist *zl;
Christoph Lameter2f6726e2006-09-25 23:31:18 -0700141 int num, max, nd;
142 enum zone_type k;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
Andi Kleendfcd3c02005-10-29 18:15:48 -0700144 max = 1 + MAX_NR_ZONES * nodes_weight(*nodes);
Paul Jackson9276b1bc2006-12-06 20:31:48 -0800145 max++; /* space for zlcache_ptr (see mmzone.h) */
Andi Kleendd942ae2006-02-17 01:39:16 +0100146 zl = kmalloc(sizeof(struct zone *) * max, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 if (!zl)
KAMEZAWA Hiroyuki8af5e2e2007-02-20 13:57:49 -0800148 return ERR_PTR(-ENOMEM);
Paul Jackson9276b1bc2006-12-06 20:31:48 -0800149 zl->zlcache_ptr = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 num = 0;
Andi Kleendd942ae2006-02-17 01:39:16 +0100151 /* First put in the highest zones from all nodes, then all the next
152 lower zones etc. Avoid empty zones because the memory allocator
153 doesn't like them. If you implement node hot removal you
154 have to fix that. */
Mel Gormanb377fd32007-08-22 14:02:05 -0700155 k = MAX_NR_ZONES - 1;
Christoph Lameter2f6726e2006-09-25 23:31:18 -0700156 while (1) {
Andi Kleendd942ae2006-02-17 01:39:16 +0100157 for_each_node_mask(nd, *nodes) {
158 struct zone *z = &NODE_DATA(nd)->node_zones[k];
159 if (z->present_pages > 0)
160 zl->zones[num++] = z;
161 }
Christoph Lameter2f6726e2006-09-25 23:31:18 -0700162 if (k == 0)
163 break;
164 k--;
Andi Kleendd942ae2006-02-17 01:39:16 +0100165 }
KAMEZAWA Hiroyuki8af5e2e2007-02-20 13:57:49 -0800166 if (num == 0) {
167 kfree(zl);
168 return ERR_PTR(-EINVAL);
169 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 zl->zones[num] = NULL;
171 return zl;
172}
173
174/* Create a new policy */
Andi Kleendfcd3c02005-10-29 18:15:48 -0700175static struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176{
177 struct mempolicy *policy;
178
Paul Mundt140d5a42007-07-15 23:38:16 -0700179 pr_debug("setting mode %d nodes[0] %lx\n",
180 mode, nodes ? nodes_addr(*nodes)[0] : -1);
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 if (mode == MPOL_DEFAULT)
183 return NULL;
184 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
185 if (!policy)
186 return ERR_PTR(-ENOMEM);
187 atomic_set(&policy->refcnt, 1);
188 switch (mode) {
189 case MPOL_INTERLEAVE:
Andi Kleendfcd3c02005-10-29 18:15:48 -0700190 policy->v.nodes = *nodes;
Christoph Lameter6eaf8062007-10-16 01:25:30 -0700191 nodes_and(policy->v.nodes, policy->v.nodes,
192 node_states[N_HIGH_MEMORY]);
193 if (nodes_weight(policy->v.nodes) == 0) {
Andi Kleen8f493d72006-01-03 00:07:28 +0100194 kmem_cache_free(policy_cache, policy);
195 return ERR_PTR(-EINVAL);
196 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 break;
198 case MPOL_PREFERRED:
Andi Kleendfcd3c02005-10-29 18:15:48 -0700199 policy->v.preferred_node = first_node(*nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 if (policy->v.preferred_node >= MAX_NUMNODES)
201 policy->v.preferred_node = -1;
202 break;
203 case MPOL_BIND:
204 policy->v.zonelist = bind_zonelist(nodes);
KAMEZAWA Hiroyuki8af5e2e2007-02-20 13:57:49 -0800205 if (IS_ERR(policy->v.zonelist)) {
206 void *error_code = policy->v.zonelist;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 kmem_cache_free(policy_cache, policy);
KAMEZAWA Hiroyuki8af5e2e2007-02-20 13:57:49 -0800208 return error_code;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 }
210 break;
211 }
212 policy->policy = mode;
Paul Jackson74cb2152006-01-08 01:01:56 -0800213 policy->cpuset_mems_allowed = cpuset_mems_allowed(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 return policy;
215}
216
Christoph Lameter397874d2006-03-06 15:42:53 -0800217static void gather_stats(struct page *, void *, int pte_dirty);
Christoph Lameterfc301282006-01-18 17:42:29 -0800218static void migrate_page_add(struct page *page, struct list_head *pagelist,
219 unsigned long flags);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -0800220
Christoph Lameter38e35862006-01-08 01:01:01 -0800221/* Scan through pages checking if pages follow certain conditions. */
Nick Pigginb5810032005-10-29 18:16:12 -0700222static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800223 unsigned long addr, unsigned long end,
224 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800225 void *private)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226{
Hugh Dickins91612e02005-06-21 17:15:07 -0700227 pte_t *orig_pte;
228 pte_t *pte;
Hugh Dickins705e87c2005-10-29 18:16:27 -0700229 spinlock_t *ptl;
Hugh Dickins941150a2005-06-21 17:15:06 -0700230
Hugh Dickins705e87c2005-10-29 18:16:27 -0700231 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
Hugh Dickins91612e02005-06-21 17:15:07 -0700232 do {
Linus Torvalds6aab3412005-11-28 14:34:23 -0800233 struct page *page;
Andy Whitcroft25ba77c2006-12-06 20:33:03 -0800234 int nid;
Hugh Dickins91612e02005-06-21 17:15:07 -0700235
236 if (!pte_present(*pte))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800238 page = vm_normal_page(vma, addr, *pte);
239 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 continue;
Nick Piggin053837f2006-01-18 17:42:27 -0800241 /*
242 * The check for PageReserved here is important to avoid
243 * handling zero pages and other pages that may have been
244 * marked special by the system.
245 *
246 * If the PageReserved would not be checked here then f.e.
247 * the location of the zero page could have an influence
248 * on MPOL_MF_STRICT, zero pages would be counted for
249 * the per node stats, and there would be useless attempts
250 * to put zero pages on the migration list.
251 */
Christoph Lameterf4598c82006-01-12 01:05:20 -0800252 if (PageReserved(page))
253 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800254 nid = page_to_nid(page);
Christoph Lameter38e35862006-01-08 01:01:01 -0800255 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
256 continue;
257
Christoph Lameter1a75a6c2006-01-08 01:01:02 -0800258 if (flags & MPOL_MF_STATS)
Christoph Lameter397874d2006-03-06 15:42:53 -0800259 gather_stats(page, private, pte_dirty(*pte));
Nick Piggin053837f2006-01-18 17:42:27 -0800260 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
Christoph Lameterfc301282006-01-18 17:42:29 -0800261 migrate_page_add(page, private, flags);
Christoph Lameter38e35862006-01-08 01:01:01 -0800262 else
263 break;
Hugh Dickins91612e02005-06-21 17:15:07 -0700264 } while (pte++, addr += PAGE_SIZE, addr != end);
Hugh Dickins705e87c2005-10-29 18:16:27 -0700265 pte_unmap_unlock(orig_pte, ptl);
Hugh Dickins91612e02005-06-21 17:15:07 -0700266 return addr != end;
267}
268
Nick Pigginb5810032005-10-29 18:16:12 -0700269static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800270 unsigned long addr, unsigned long end,
271 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800272 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700273{
274 pmd_t *pmd;
275 unsigned long next;
276
277 pmd = pmd_offset(pud, addr);
278 do {
279 next = pmd_addr_end(addr, end);
280 if (pmd_none_or_clear_bad(pmd))
281 continue;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800282 if (check_pte_range(vma, pmd, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800283 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700284 return -EIO;
285 } while (pmd++, addr = next, addr != end);
286 return 0;
287}
288
Nick Pigginb5810032005-10-29 18:16:12 -0700289static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800290 unsigned long addr, unsigned long end,
291 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800292 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700293{
294 pud_t *pud;
295 unsigned long next;
296
297 pud = pud_offset(pgd, addr);
298 do {
299 next = pud_addr_end(addr, end);
300 if (pud_none_or_clear_bad(pud))
301 continue;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800302 if (check_pmd_range(vma, pud, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800303 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700304 return -EIO;
305 } while (pud++, addr = next, addr != end);
306 return 0;
307}
308
Nick Pigginb5810032005-10-29 18:16:12 -0700309static inline int check_pgd_range(struct vm_area_struct *vma,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800310 unsigned long addr, unsigned long end,
311 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800312 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700313{
314 pgd_t *pgd;
315 unsigned long next;
316
Nick Pigginb5810032005-10-29 18:16:12 -0700317 pgd = pgd_offset(vma->vm_mm, addr);
Hugh Dickins91612e02005-06-21 17:15:07 -0700318 do {
319 next = pgd_addr_end(addr, end);
320 if (pgd_none_or_clear_bad(pgd))
321 continue;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800322 if (check_pud_range(vma, pgd, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800323 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700324 return -EIO;
325 } while (pgd++, addr = next, addr != end);
326 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327}
328
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800329/*
330 * Check if all pages in a range are on a set of nodes.
331 * If pagelist != NULL then isolate pages from the LRU and
332 * put them on the pagelist.
333 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334static struct vm_area_struct *
335check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
Christoph Lameter38e35862006-01-08 01:01:01 -0800336 const nodemask_t *nodes, unsigned long flags, void *private)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337{
338 int err;
339 struct vm_area_struct *first, *vma, *prev;
340
Christoph Lameter90036ee2006-03-16 23:03:59 -0800341 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
Christoph Lameter90036ee2006-03-16 23:03:59 -0800342
Christoph Lameterb20a3502006-03-22 00:09:12 -0800343 err = migrate_prep();
344 if (err)
345 return ERR_PTR(err);
Christoph Lameter90036ee2006-03-16 23:03:59 -0800346 }
Nick Piggin053837f2006-01-18 17:42:27 -0800347
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 first = find_vma(mm, start);
349 if (!first)
350 return ERR_PTR(-EFAULT);
351 prev = NULL;
352 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800353 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
354 if (!vma->vm_next && vma->vm_end < end)
355 return ERR_PTR(-EFAULT);
356 if (prev && prev->vm_end < vma->vm_start)
357 return ERR_PTR(-EFAULT);
358 }
359 if (!is_vm_hugetlb_page(vma) &&
360 ((flags & MPOL_MF_STRICT) ||
361 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
362 vma_migratable(vma)))) {
Andi Kleen5b952b32005-09-13 01:25:08 -0700363 unsigned long endvma = vma->vm_end;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800364
Andi Kleen5b952b32005-09-13 01:25:08 -0700365 if (endvma > end)
366 endvma = end;
367 if (vma->vm_start > start)
368 start = vma->vm_start;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800369 err = check_pgd_range(vma, start, endvma, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800370 flags, private);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 if (err) {
372 first = ERR_PTR(err);
373 break;
374 }
375 }
376 prev = vma;
377 }
378 return first;
379}
380
381/* Apply policy to a single VMA */
382static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
383{
384 int err = 0;
385 struct mempolicy *old = vma->vm_policy;
386
Paul Mundt140d5a42007-07-15 23:38:16 -0700387 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 vma->vm_start, vma->vm_end, vma->vm_pgoff,
389 vma->vm_ops, vma->vm_file,
390 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
391
392 if (vma->vm_ops && vma->vm_ops->set_policy)
393 err = vma->vm_ops->set_policy(vma, new);
394 if (!err) {
395 mpol_get(new);
396 vma->vm_policy = new;
397 mpol_free(old);
398 }
399 return err;
400}
401
402/* Step 2: apply policy to a range and do splits. */
403static int mbind_range(struct vm_area_struct *vma, unsigned long start,
404 unsigned long end, struct mempolicy *new)
405{
406 struct vm_area_struct *next;
407 int err;
408
409 err = 0;
410 for (; vma && vma->vm_start < end; vma = next) {
411 next = vma->vm_next;
412 if (vma->vm_start < start)
413 err = split_vma(vma->vm_mm, vma, start, 1);
414 if (!err && vma->vm_end > end)
415 err = split_vma(vma->vm_mm, vma, end, 0);
416 if (!err)
417 err = policy_vma(vma, new);
418 if (err)
419 break;
420 }
421 return err;
422}
423
Christoph Lameter8bccd852005-10-29 18:16:59 -0700424static int contextualize_policy(int mode, nodemask_t *nodes)
425{
426 if (!nodes)
427 return 0;
428
Paul Jacksoncf2a473c2006-01-08 01:01:54 -0800429 cpuset_update_task_memory_state();
Paul Jackson59665142006-01-08 01:01:47 -0800430 if (!cpuset_nodes_subset_current_mems_allowed(*nodes))
431 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700432 return mpol_check_policy(mode, nodes);
433}
434
Paul Jacksonc61afb12006-03-24 03:16:08 -0800435
436/*
437 * Update task->flags PF_MEMPOLICY bit: set iff non-default
438 * mempolicy. Allows more rapid checking of this (combined perhaps
439 * with other PF_* flag bits) on memory allocation hot code paths.
440 *
441 * If called from outside this file, the task 'p' should -only- be
442 * a newly forked child not yet visible on the task list, because
443 * manipulating the task flags of a visible task is not safe.
444 *
445 * The above limitation is why this routine has the funny name
446 * mpol_fix_fork_child_flag().
447 *
448 * It is also safe to call this with a task pointer of current,
449 * which the static wrapper mpol_set_task_struct_flag() does,
450 * for use within this file.
451 */
452
453void mpol_fix_fork_child_flag(struct task_struct *p)
454{
455 if (p->mempolicy)
456 p->flags |= PF_MEMPOLICY;
457 else
458 p->flags &= ~PF_MEMPOLICY;
459}
460
461static void mpol_set_task_struct_flag(void)
462{
463 mpol_fix_fork_child_flag(current);
464}
465
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466/* Set the process memory policy */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700467static long do_set_mempolicy(int mode, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 struct mempolicy *new;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470
Christoph Lameter8bccd852005-10-29 18:16:59 -0700471 if (contextualize_policy(mode, nodes))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700473 new = mpol_new(mode, nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 if (IS_ERR(new))
475 return PTR_ERR(new);
476 mpol_free(current->mempolicy);
477 current->mempolicy = new;
Paul Jacksonc61afb12006-03-24 03:16:08 -0800478 mpol_set_task_struct_flag();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 if (new && new->policy == MPOL_INTERLEAVE)
Andi Kleendfcd3c02005-10-29 18:15:48 -0700480 current->il_next = first_node(new->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 return 0;
482}
483
484/* Fill a zone bitmap for a policy */
Andi Kleendfcd3c02005-10-29 18:15:48 -0700485static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486{
487 int i;
488
Andi Kleendfcd3c02005-10-29 18:15:48 -0700489 nodes_clear(*nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 switch (p->policy) {
491 case MPOL_BIND:
492 for (i = 0; p->v.zonelist->zones[i]; i++)
Christoph Lameter89fa3022006-09-25 23:31:55 -0700493 node_set(zone_to_nid(p->v.zonelist->zones[i]),
Christoph Lameter8bccd852005-10-29 18:16:59 -0700494 *nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 break;
496 case MPOL_DEFAULT:
497 break;
498 case MPOL_INTERLEAVE:
Andi Kleendfcd3c02005-10-29 18:15:48 -0700499 *nodes = p->v.nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 break;
501 case MPOL_PREFERRED:
Christoph Lameter56bbd652007-10-16 01:25:35 -0700502 /* or use current node instead of memory_map? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 if (p->v.preferred_node < 0)
Christoph Lameter56bbd652007-10-16 01:25:35 -0700504 *nodes = node_states[N_HIGH_MEMORY];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 else
Andi Kleendfcd3c02005-10-29 18:15:48 -0700506 node_set(p->v.preferred_node, *nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 break;
508 default:
509 BUG();
510 }
511}
512
513static int lookup_node(struct mm_struct *mm, unsigned long addr)
514{
515 struct page *p;
516 int err;
517
518 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
519 if (err >= 0) {
520 err = page_to_nid(p);
521 put_page(p);
522 }
523 return err;
524}
525
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526/* Retrieve NUMA policy */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700527static long do_get_mempolicy(int *policy, nodemask_t *nmask,
528 unsigned long addr, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529{
Christoph Lameter8bccd852005-10-29 18:16:59 -0700530 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 struct mm_struct *mm = current->mm;
532 struct vm_area_struct *vma = NULL;
533 struct mempolicy *pol = current->mempolicy;
534
Paul Jacksoncf2a473c2006-01-08 01:01:54 -0800535 cpuset_update_task_memory_state();
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700536 if (flags &
537 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 return -EINVAL;
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700539
540 if (flags & MPOL_F_MEMS_ALLOWED) {
541 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
542 return -EINVAL;
543 *policy = 0; /* just so it's initialized */
544 *nmask = cpuset_current_mems_allowed;
545 return 0;
546 }
547
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 if (flags & MPOL_F_ADDR) {
549 down_read(&mm->mmap_sem);
550 vma = find_vma_intersection(mm, addr, addr+1);
551 if (!vma) {
552 up_read(&mm->mmap_sem);
553 return -EFAULT;
554 }
555 if (vma->vm_ops && vma->vm_ops->get_policy)
556 pol = vma->vm_ops->get_policy(vma, addr);
557 else
558 pol = vma->vm_policy;
559 } else if (addr)
560 return -EINVAL;
561
562 if (!pol)
563 pol = &default_policy;
564
565 if (flags & MPOL_F_NODE) {
566 if (flags & MPOL_F_ADDR) {
567 err = lookup_node(mm, addr);
568 if (err < 0)
569 goto out;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700570 *policy = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 } else if (pol == current->mempolicy &&
572 pol->policy == MPOL_INTERLEAVE) {
Christoph Lameter8bccd852005-10-29 18:16:59 -0700573 *policy = current->il_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 } else {
575 err = -EINVAL;
576 goto out;
577 }
578 } else
Christoph Lameter8bccd852005-10-29 18:16:59 -0700579 *policy = pol->policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580
581 if (vma) {
582 up_read(&current->mm->mmap_sem);
583 vma = NULL;
584 }
585
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 err = 0;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700587 if (nmask)
588 get_zonemask(pol, nmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589
590 out:
591 if (vma)
592 up_read(&current->mm->mmap_sem);
593 return err;
594}
595
Christoph Lameterb20a3502006-03-22 00:09:12 -0800596#ifdef CONFIG_MIGRATION
Christoph Lameter8bccd852005-10-29 18:16:59 -0700597/*
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800598 * page migration
599 */
Christoph Lameterfc301282006-01-18 17:42:29 -0800600static void migrate_page_add(struct page *page, struct list_head *pagelist,
601 unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800602{
603 /*
Christoph Lameterfc301282006-01-18 17:42:29 -0800604 * Avoid migrating a page that is shared with others.
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800605 */
Christoph Lameterb20a3502006-03-22 00:09:12 -0800606 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1)
607 isolate_lru_page(page, pagelist);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800608}
609
Christoph Lameter742755a2006-06-23 02:03:55 -0700610static struct page *new_node_page(struct page *page, unsigned long node, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -0700611{
Mel Gorman769848c2007-07-17 04:03:05 -0700612 return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
Christoph Lameter95a402c2006-06-23 02:03:53 -0700613}
614
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800615/*
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800616 * Migrate pages from one node to a target node.
617 * Returns error or the number of pages not migrated.
618 */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700619static int migrate_to_node(struct mm_struct *mm, int source, int dest,
620 int flags)
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800621{
622 nodemask_t nmask;
623 LIST_HEAD(pagelist);
624 int err = 0;
625
626 nodes_clear(nmask);
627 node_set(source, nmask);
628
629 check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
630 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
631
Christoph Lameteraaa994b2006-06-23 02:03:52 -0700632 if (!list_empty(&pagelist))
Christoph Lameter95a402c2006-06-23 02:03:53 -0700633 err = migrate_pages(&pagelist, new_node_page, dest);
634
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800635 return err;
636}
637
638/*
639 * Move pages between the two nodesets so as to preserve the physical
640 * layout as much as possible.
Christoph Lameter39743882006-01-08 01:00:51 -0800641 *
642 * Returns the number of page that could not be moved.
643 */
644int do_migrate_pages(struct mm_struct *mm,
645 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
646{
647 LIST_HEAD(pagelist);
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800648 int busy = 0;
649 int err = 0;
650 nodemask_t tmp;
Christoph Lameter39743882006-01-08 01:00:51 -0800651
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800652 down_read(&mm->mmap_sem);
Christoph Lameter39743882006-01-08 01:00:51 -0800653
Christoph Lameter7b2259b2006-06-25 05:46:48 -0700654 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
655 if (err)
656 goto out;
657
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800658/*
659 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
660 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
661 * bit in 'tmp', and return that <source, dest> pair for migration.
662 * The pair of nodemasks 'to' and 'from' define the map.
663 *
664 * If no pair of bits is found that way, fallback to picking some
665 * pair of 'source' and 'dest' bits that are not the same. If the
666 * 'source' and 'dest' bits are the same, this represents a node
667 * that will be migrating to itself, so no pages need move.
668 *
669 * If no bits are left in 'tmp', or if all remaining bits left
670 * in 'tmp' correspond to the same bit in 'to', return false
671 * (nothing left to migrate).
672 *
673 * This lets us pick a pair of nodes to migrate between, such that
674 * if possible the dest node is not already occupied by some other
675 * source node, minimizing the risk of overloading the memory on a
676 * node that would happen if we migrated incoming memory to a node
677 * before migrating outgoing memory source that same node.
678 *
679 * A single scan of tmp is sufficient. As we go, we remember the
680 * most recent <s, d> pair that moved (s != d). If we find a pair
681 * that not only moved, but what's better, moved to an empty slot
682 * (d is not set in tmp), then we break out then, with that pair.
683 * Otherwise when we finish scannng from_tmp, we at least have the
684 * most recent <s, d> pair that moved. If we get all the way through
685 * the scan of tmp without finding any node that moved, much less
686 * moved to an empty node, then there is nothing left worth migrating.
687 */
Christoph Lameterd4984712006-01-08 01:00:55 -0800688
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800689 tmp = *from_nodes;
690 while (!nodes_empty(tmp)) {
691 int s,d;
692 int source = -1;
693 int dest = 0;
694
695 for_each_node_mask(s, tmp) {
696 d = node_remap(s, *from_nodes, *to_nodes);
697 if (s == d)
698 continue;
699
700 source = s; /* Node moved. Memorize */
701 dest = d;
702
703 /* dest not in remaining from nodes? */
704 if (!node_isset(dest, tmp))
705 break;
706 }
707 if (source == -1)
708 break;
709
710 node_clear(source, tmp);
711 err = migrate_to_node(mm, source, dest, flags);
712 if (err > 0)
713 busy += err;
714 if (err < 0)
715 break;
Christoph Lameter39743882006-01-08 01:00:51 -0800716 }
Christoph Lameter7b2259b2006-06-25 05:46:48 -0700717out:
Christoph Lameter39743882006-01-08 01:00:51 -0800718 up_read(&mm->mmap_sem);
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800719 if (err < 0)
720 return err;
721 return busy;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800722
Christoph Lameter39743882006-01-08 01:00:51 -0800723}
724
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -0800725/*
726 * Allocate a new page for page migration based on vma policy.
727 * Start assuming that page is mapped by vma pointed to by @private.
728 * Search forward from there, if not. N.B., this assumes that the
729 * list of pages handed to migrate_pages()--which is how we get here--
730 * is in virtual address order.
731 */
Christoph Lameter742755a2006-06-23 02:03:55 -0700732static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -0700733{
734 struct vm_area_struct *vma = (struct vm_area_struct *)private;
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -0800735 unsigned long uninitialized_var(address);
Christoph Lameter95a402c2006-06-23 02:03:53 -0700736
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -0800737 while (vma) {
738 address = page_address_in_vma(page, vma);
739 if (address != -EFAULT)
740 break;
741 vma = vma->vm_next;
742 }
743
744 /*
745 * if !vma, alloc_page_vma() will use task or system default policy
746 */
747 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
Christoph Lameter95a402c2006-06-23 02:03:53 -0700748}
Christoph Lameterb20a3502006-03-22 00:09:12 -0800749#else
750
751static void migrate_page_add(struct page *page, struct list_head *pagelist,
752 unsigned long flags)
753{
754}
755
756int do_migrate_pages(struct mm_struct *mm,
757 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
758{
759 return -ENOSYS;
760}
Christoph Lameter95a402c2006-06-23 02:03:53 -0700761
Keith Owens69939742006-10-11 01:21:28 -0700762static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -0700763{
764 return NULL;
765}
Christoph Lameterb20a3502006-03-22 00:09:12 -0800766#endif
767
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700768static long do_mbind(unsigned long start, unsigned long len,
769 unsigned long mode, nodemask_t *nmask,
770 unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800771{
772 struct vm_area_struct *vma;
773 struct mm_struct *mm = current->mm;
774 struct mempolicy *new;
775 unsigned long end;
776 int err;
777 LIST_HEAD(pagelist);
778
779 if ((flags & ~(unsigned long)(MPOL_MF_STRICT |
780 MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
781 || mode > MPOL_MAX)
782 return -EINVAL;
Christoph Lameter74c00242006-03-14 19:50:21 -0800783 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800784 return -EPERM;
785
786 if (start & ~PAGE_MASK)
787 return -EINVAL;
788
789 if (mode == MPOL_DEFAULT)
790 flags &= ~MPOL_MF_STRICT;
791
792 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
793 end = start + len;
794
795 if (end < start)
796 return -EINVAL;
797 if (end == start)
798 return 0;
799
800 if (mpol_check_policy(mode, nmask))
801 return -EINVAL;
802
803 new = mpol_new(mode, nmask);
804 if (IS_ERR(new))
805 return PTR_ERR(new);
806
807 /*
808 * If we are using the default policy then operation
809 * on discontinuous address spaces is okay after all
810 */
811 if (!new)
812 flags |= MPOL_MF_DISCONTIG_OK;
813
Paul Mundt140d5a42007-07-15 23:38:16 -0700814 pr_debug("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len,
815 mode, nmask ? nodes_addr(*nmask)[0] : -1);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800816
817 down_write(&mm->mmap_sem);
818 vma = check_range(mm, start, end, nmask,
819 flags | MPOL_MF_INVERT, &pagelist);
820
821 err = PTR_ERR(vma);
822 if (!IS_ERR(vma)) {
823 int nr_failed = 0;
824
825 err = mbind_range(vma, start, end, new);
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800826
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800827 if (!list_empty(&pagelist))
Christoph Lameter95a402c2006-06-23 02:03:53 -0700828 nr_failed = migrate_pages(&pagelist, new_vma_page,
829 (unsigned long)vma);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800830
831 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
832 err = -EIO;
833 }
Christoph Lameterb20a3502006-03-22 00:09:12 -0800834
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800835 up_write(&mm->mmap_sem);
836 mpol_free(new);
837 return err;
838}
839
Christoph Lameter39743882006-01-08 01:00:51 -0800840/*
Christoph Lameter8bccd852005-10-29 18:16:59 -0700841 * User space interface with variable sized bitmaps for nodelists.
842 */
843
844/* Copy a node mask from user space. */
Christoph Lameter39743882006-01-08 01:00:51 -0800845static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
Christoph Lameter8bccd852005-10-29 18:16:59 -0700846 unsigned long maxnode)
847{
848 unsigned long k;
849 unsigned long nlongs;
850 unsigned long endmask;
851
852 --maxnode;
853 nodes_clear(*nodes);
854 if (maxnode == 0 || !nmask)
855 return 0;
Andi Kleena9c930b2006-02-20 18:27:59 -0800856 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
Chris Wright636f13c2006-02-17 13:59:36 -0800857 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700858
859 nlongs = BITS_TO_LONGS(maxnode);
860 if ((maxnode % BITS_PER_LONG) == 0)
861 endmask = ~0UL;
862 else
863 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
864
865 /* When the user specified more nodes than supported just check
866 if the non supported part is all zero. */
867 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
868 if (nlongs > PAGE_SIZE/sizeof(long))
869 return -EINVAL;
870 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
871 unsigned long t;
872 if (get_user(t, nmask + k))
873 return -EFAULT;
874 if (k == nlongs - 1) {
875 if (t & endmask)
876 return -EINVAL;
877 } else if (t)
878 return -EINVAL;
879 }
880 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
881 endmask = ~0UL;
882 }
883
884 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
885 return -EFAULT;
886 nodes_addr(*nodes)[nlongs-1] &= endmask;
887 return 0;
888}
889
890/* Copy a kernel node mask to user space */
891static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
892 nodemask_t *nodes)
893{
894 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
895 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
896
897 if (copy > nbytes) {
898 if (copy > PAGE_SIZE)
899 return -EINVAL;
900 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
901 return -EFAULT;
902 copy = nbytes;
903 }
904 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
905}
906
907asmlinkage long sys_mbind(unsigned long start, unsigned long len,
908 unsigned long mode,
909 unsigned long __user *nmask, unsigned long maxnode,
910 unsigned flags)
911{
912 nodemask_t nodes;
913 int err;
914
915 err = get_nodes(&nodes, nmask, maxnode);
916 if (err)
917 return err;
Christoph Lameter30150f82007-01-22 20:40:45 -0800918#ifdef CONFIG_CPUSETS
919 /* Restrict the nodes to the allowed nodes in the cpuset */
920 nodes_and(nodes, nodes, current->mems_allowed);
921#endif
Christoph Lameter8bccd852005-10-29 18:16:59 -0700922 return do_mbind(start, len, mode, &nodes, flags);
923}
924
925/* Set the process memory policy */
926asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
927 unsigned long maxnode)
928{
929 int err;
930 nodemask_t nodes;
931
932 if (mode < 0 || mode > MPOL_MAX)
933 return -EINVAL;
934 err = get_nodes(&nodes, nmask, maxnode);
935 if (err)
936 return err;
937 return do_set_mempolicy(mode, &nodes);
938}
939
Christoph Lameter39743882006-01-08 01:00:51 -0800940asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
941 const unsigned long __user *old_nodes,
942 const unsigned long __user *new_nodes)
943{
944 struct mm_struct *mm;
945 struct task_struct *task;
946 nodemask_t old;
947 nodemask_t new;
948 nodemask_t task_nodes;
949 int err;
950
951 err = get_nodes(&old, old_nodes, maxnode);
952 if (err)
953 return err;
954
955 err = get_nodes(&new, new_nodes, maxnode);
956 if (err)
957 return err;
958
959 /* Find the mm_struct */
960 read_lock(&tasklist_lock);
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -0700961 task = pid ? find_task_by_vpid(pid) : current;
Christoph Lameter39743882006-01-08 01:00:51 -0800962 if (!task) {
963 read_unlock(&tasklist_lock);
964 return -ESRCH;
965 }
966 mm = get_task_mm(task);
967 read_unlock(&tasklist_lock);
968
969 if (!mm)
970 return -EINVAL;
971
972 /*
973 * Check if this process has the right to modify the specified
974 * process. The right exists if the process has administrative
Alexey Dobriyan7f927fc2006-03-28 01:56:53 -0800975 * capabilities, superuser privileges or the same
Christoph Lameter39743882006-01-08 01:00:51 -0800976 * userid as the target process.
977 */
978 if ((current->euid != task->suid) && (current->euid != task->uid) &&
979 (current->uid != task->suid) && (current->uid != task->uid) &&
Christoph Lameter74c00242006-03-14 19:50:21 -0800980 !capable(CAP_SYS_NICE)) {
Christoph Lameter39743882006-01-08 01:00:51 -0800981 err = -EPERM;
982 goto out;
983 }
984
985 task_nodes = cpuset_mems_allowed(task);
986 /* Is the user allowed to access the target nodes? */
Christoph Lameter74c00242006-03-14 19:50:21 -0800987 if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
Christoph Lameter39743882006-01-08 01:00:51 -0800988 err = -EPERM;
989 goto out;
990 }
991
Lee Schermerhorn37b07e42007-10-16 01:25:39 -0700992 if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
Christoph Lameter3b42d282007-08-31 00:12:08 -0700993 err = -EINVAL;
994 goto out;
995 }
996
David Quigley86c3a762006-06-23 02:04:02 -0700997 err = security_task_movememory(task);
998 if (err)
999 goto out;
1000
Christoph Lameter511030b2006-02-28 16:58:57 -08001001 err = do_migrate_pages(mm, &old, &new,
Christoph Lameter74c00242006-03-14 19:50:21 -08001002 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
Christoph Lameter39743882006-01-08 01:00:51 -08001003out:
1004 mmput(mm);
1005 return err;
1006}
1007
1008
Christoph Lameter8bccd852005-10-29 18:16:59 -07001009/* Retrieve NUMA policy */
1010asmlinkage long sys_get_mempolicy(int __user *policy,
1011 unsigned long __user *nmask,
1012 unsigned long maxnode,
1013 unsigned long addr, unsigned long flags)
1014{
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001015 int err;
1016 int uninitialized_var(pval);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001017 nodemask_t nodes;
1018
1019 if (nmask != NULL && maxnode < MAX_NUMNODES)
1020 return -EINVAL;
1021
1022 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1023
1024 if (err)
1025 return err;
1026
1027 if (policy && put_user(pval, policy))
1028 return -EFAULT;
1029
1030 if (nmask)
1031 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1032
1033 return err;
1034}
1035
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036#ifdef CONFIG_COMPAT
1037
1038asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1039 compat_ulong_t __user *nmask,
1040 compat_ulong_t maxnode,
1041 compat_ulong_t addr, compat_ulong_t flags)
1042{
1043 long err;
1044 unsigned long __user *nm = NULL;
1045 unsigned long nr_bits, alloc_size;
1046 DECLARE_BITMAP(bm, MAX_NUMNODES);
1047
1048 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1049 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1050
1051 if (nmask)
1052 nm = compat_alloc_user_space(alloc_size);
1053
1054 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1055
1056 if (!err && nmask) {
1057 err = copy_from_user(bm, nm, alloc_size);
1058 /* ensure entire bitmap is zeroed */
1059 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1060 err |= compat_put_bitmap(nmask, bm, nr_bits);
1061 }
1062
1063 return err;
1064}
1065
1066asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1067 compat_ulong_t maxnode)
1068{
1069 long err = 0;
1070 unsigned long __user *nm = NULL;
1071 unsigned long nr_bits, alloc_size;
1072 DECLARE_BITMAP(bm, MAX_NUMNODES);
1073
1074 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1075 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1076
1077 if (nmask) {
1078 err = compat_get_bitmap(bm, nmask, nr_bits);
1079 nm = compat_alloc_user_space(alloc_size);
1080 err |= copy_to_user(nm, bm, alloc_size);
1081 }
1082
1083 if (err)
1084 return -EFAULT;
1085
1086 return sys_set_mempolicy(mode, nm, nr_bits+1);
1087}
1088
1089asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1090 compat_ulong_t mode, compat_ulong_t __user *nmask,
1091 compat_ulong_t maxnode, compat_ulong_t flags)
1092{
1093 long err = 0;
1094 unsigned long __user *nm = NULL;
1095 unsigned long nr_bits, alloc_size;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001096 nodemask_t bm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097
1098 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1099 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1100
1101 if (nmask) {
Andi Kleendfcd3c02005-10-29 18:15:48 -07001102 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 nm = compat_alloc_user_space(alloc_size);
Andi Kleendfcd3c02005-10-29 18:15:48 -07001104 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 }
1106
1107 if (err)
1108 return -EFAULT;
1109
1110 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1111}
1112
1113#endif
1114
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001115/*
1116 * get_vma_policy(@task, @vma, @addr)
1117 * @task - task for fallback if vma policy == default
1118 * @vma - virtual memory area whose policy is sought
1119 * @addr - address in @vma for shared policy lookup
1120 *
1121 * Returns effective policy for a VMA at specified address.
1122 * Falls back to @task or system default policy, as necessary.
1123 * Returned policy has extra reference count if shared, vma,
1124 * or some other task's policy [show_numa_maps() can pass
1125 * @task != current]. It is the caller's responsibility to
1126 * free the reference in these cases.
1127 */
Christoph Lameter48fce342006-01-08 01:01:03 -08001128static struct mempolicy * get_vma_policy(struct task_struct *task,
1129 struct vm_area_struct *vma, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130{
Christoph Lameter6e21c8f2005-09-03 15:54:45 -07001131 struct mempolicy *pol = task->mempolicy;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001132 int shared_pol = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133
1134 if (vma) {
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001135 if (vma->vm_ops && vma->vm_ops->get_policy) {
Christoph Lameter8bccd852005-10-29 18:16:59 -07001136 pol = vma->vm_ops->get_policy(vma, addr);
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001137 shared_pol = 1; /* if pol non-NULL, add ref below */
1138 } else if (vma->vm_policy &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 vma->vm_policy->policy != MPOL_DEFAULT)
1140 pol = vma->vm_policy;
1141 }
1142 if (!pol)
1143 pol = &default_policy;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001144 else if (!shared_pol && pol != current->mempolicy)
1145 mpol_get(pol); /* vma or other task's policy */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 return pol;
1147}
1148
1149/* Return a zonelist representing a mempolicy */
Al Virodd0fc662005-10-07 07:46:04 +01001150static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151{
1152 int nd;
1153
1154 switch (policy->policy) {
1155 case MPOL_PREFERRED:
1156 nd = policy->v.preferred_node;
1157 if (nd < 0)
1158 nd = numa_node_id();
1159 break;
1160 case MPOL_BIND:
1161 /* Lower zones don't get a policy applied */
1162 /* Careful: current->mems_allowed might have moved */
Christoph Lameter19655d32006-09-25 23:31:19 -07001163 if (gfp_zone(gfp) >= policy_zone)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist))
1165 return policy->v.zonelist;
1166 /*FALL THROUGH*/
1167 case MPOL_INTERLEAVE: /* should not happen */
1168 case MPOL_DEFAULT:
1169 nd = numa_node_id();
1170 break;
1171 default:
1172 nd = 0;
1173 BUG();
1174 }
Al Viroaf4ca452005-10-21 02:55:38 -04001175 return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176}
1177
1178/* Do dynamic interleaving for a process */
1179static unsigned interleave_nodes(struct mempolicy *policy)
1180{
1181 unsigned nid, next;
1182 struct task_struct *me = current;
1183
1184 nid = me->il_next;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001185 next = next_node(nid, policy->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 if (next >= MAX_NUMNODES)
Andi Kleendfcd3c02005-10-29 18:15:48 -07001187 next = first_node(policy->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 me->il_next = next;
1189 return nid;
1190}
1191
Christoph Lameterdc85da12006-01-18 17:42:36 -08001192/*
1193 * Depending on the memory policy provide a node from which to allocate the
1194 * next slab entry.
1195 */
1196unsigned slab_node(struct mempolicy *policy)
1197{
Christoph Lameter765c4502006-09-27 01:50:08 -07001198 int pol = policy ? policy->policy : MPOL_DEFAULT;
1199
1200 switch (pol) {
Christoph Lameterdc85da12006-01-18 17:42:36 -08001201 case MPOL_INTERLEAVE:
1202 return interleave_nodes(policy);
1203
1204 case MPOL_BIND:
1205 /*
1206 * Follow bind policy behavior and start allocation at the
1207 * first node.
1208 */
Christoph Lameter89fa3022006-09-25 23:31:55 -07001209 return zone_to_nid(policy->v.zonelist->zones[0]);
Christoph Lameterdc85da12006-01-18 17:42:36 -08001210
1211 case MPOL_PREFERRED:
1212 if (policy->v.preferred_node >= 0)
1213 return policy->v.preferred_node;
1214 /* Fall through */
1215
1216 default:
1217 return numa_node_id();
1218 }
1219}
1220
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221/* Do static interleaving for a VMA with known offset. */
1222static unsigned offset_il_node(struct mempolicy *pol,
1223 struct vm_area_struct *vma, unsigned long off)
1224{
Andi Kleendfcd3c02005-10-29 18:15:48 -07001225 unsigned nnodes = nodes_weight(pol->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 unsigned target = (unsigned)off % nnodes;
1227 int c;
1228 int nid = -1;
1229
1230 c = 0;
1231 do {
Andi Kleendfcd3c02005-10-29 18:15:48 -07001232 nid = next_node(nid, pol->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 c++;
1234 } while (c <= target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 return nid;
1236}
1237
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001238/* Determine a node number for interleave */
1239static inline unsigned interleave_nid(struct mempolicy *pol,
1240 struct vm_area_struct *vma, unsigned long addr, int shift)
1241{
1242 if (vma) {
1243 unsigned long off;
1244
Nishanth Aravamudan3b98b082006-08-31 21:27:53 -07001245 /*
1246 * for small pages, there is no difference between
1247 * shift and PAGE_SHIFT, so the bit-shift is safe.
1248 * for huge pages, since vm_pgoff is in units of small
1249 * pages, we need to shift off the always 0 bits to get
1250 * a useful offset.
1251 */
1252 BUG_ON(shift < PAGE_SHIFT);
1253 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001254 off += (addr - vma->vm_start) >> shift;
1255 return offset_il_node(pol, vma, off);
1256 } else
1257 return interleave_nodes(pol);
1258}
1259
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001260#ifdef CONFIG_HUGETLBFS
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001261/*
1262 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1263 * @vma = virtual memory area whose policy is sought
1264 * @addr = address in @vma for shared policy lookup and interleave policy
1265 * @gfp_flags = for requested zone
1266 * @mpol = pointer to mempolicy pointer for reference counted 'BIND policy
1267 *
1268 * Returns a zonelist suitable for a huge page allocation.
1269 * If the effective policy is 'BIND, returns pointer to policy's zonelist.
1270 * If it is also a policy for which get_vma_policy() returns an extra
1271 * reference, we must hold that reference until after allocation.
1272 * In that case, return policy via @mpol so hugetlb allocation can drop
1273 * the reference. For non-'BIND referenced policies, we can/do drop the
1274 * reference here, so the caller doesn't need to know about the special case
1275 * for default and current task policy.
1276 */
Mel Gorman396faf02007-07-17 04:03:13 -07001277struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001278 gfp_t gfp_flags, struct mempolicy **mpol)
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001279{
1280 struct mempolicy *pol = get_vma_policy(current, vma, addr);
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001281 struct zonelist *zl;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001282
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001283 *mpol = NULL; /* probably no unref needed */
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001284 if (pol->policy == MPOL_INTERLEAVE) {
1285 unsigned nid;
1286
1287 nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001288 __mpol_free(pol); /* finished with pol */
Mel Gorman396faf02007-07-17 04:03:13 -07001289 return NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_flags);
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001290 }
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001291
1292 zl = zonelist_policy(GFP_HIGHUSER, pol);
1293 if (unlikely(pol != &default_policy && pol != current->mempolicy)) {
1294 if (pol->policy != MPOL_BIND)
1295 __mpol_free(pol); /* finished with pol */
1296 else
1297 *mpol = pol; /* unref needed after allocation */
1298 }
1299 return zl;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001300}
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001301#endif
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001302
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303/* Allocate a page in interleaved policy.
1304 Own path because it needs to do special accounting. */
Andi Kleen662f3a02005-10-29 18:15:49 -07001305static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1306 unsigned nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307{
1308 struct zonelist *zl;
1309 struct page *page;
1310
Al Viroaf4ca452005-10-21 02:55:38 -04001311 zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 page = __alloc_pages(gfp, order, zl);
Christoph Lameterca889e62006-06-30 01:55:44 -07001313 if (page && page_zone(page) == zl->zones[0])
1314 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 return page;
1316}
1317
1318/**
1319 * alloc_page_vma - Allocate a page for a VMA.
1320 *
1321 * @gfp:
1322 * %GFP_USER user allocation.
1323 * %GFP_KERNEL kernel allocations,
1324 * %GFP_HIGHMEM highmem/user allocations,
1325 * %GFP_FS allocation should not call back into a file system.
1326 * %GFP_ATOMIC don't sleep.
1327 *
1328 * @vma: Pointer to VMA or NULL if not available.
1329 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1330 *
1331 * This function allocates a page from the kernel page pool and applies
1332 * a NUMA policy associated with the VMA or the current process.
1333 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1334 * mm_struct of the VMA to prevent it from going away. Should be used for
1335 * all allocations for pages that will be mapped into
1336 * user space. Returns NULL when no page can be allocated.
1337 *
1338 * Should be called with the mm_sem of the vma hold.
1339 */
1340struct page *
Al Virodd0fc662005-10-07 07:46:04 +01001341alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342{
Christoph Lameter6e21c8f2005-09-03 15:54:45 -07001343 struct mempolicy *pol = get_vma_policy(current, vma, addr);
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001344 struct zonelist *zl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345
Paul Jacksoncf2a473c2006-01-08 01:01:54 -08001346 cpuset_update_task_memory_state();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347
1348 if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
1349 unsigned nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001350
1351 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 return alloc_page_interleave(gfp, 0, nid);
1353 }
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001354 zl = zonelist_policy(gfp, pol);
1355 if (pol != &default_policy && pol != current->mempolicy) {
1356 /*
1357 * slow path: ref counted policy -- shared or vma
1358 */
1359 struct page *page = __alloc_pages(gfp, 0, zl);
1360 __mpol_free(pol);
1361 return page;
1362 }
1363 /*
1364 * fast path: default or task policy
1365 */
1366 return __alloc_pages(gfp, 0, zl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367}
1368
1369/**
1370 * alloc_pages_current - Allocate pages.
1371 *
1372 * @gfp:
1373 * %GFP_USER user allocation,
1374 * %GFP_KERNEL kernel allocation,
1375 * %GFP_HIGHMEM highmem allocation,
1376 * %GFP_FS don't call back into a file system.
1377 * %GFP_ATOMIC don't sleep.
1378 * @order: Power of two of allocation size in pages. 0 is a single page.
1379 *
1380 * Allocate a page from the kernel page pool. When not in
1381 * interrupt context and apply the current process NUMA policy.
1382 * Returns NULL when no page can be allocated.
1383 *
Paul Jacksoncf2a473c2006-01-08 01:01:54 -08001384 * Don't call cpuset_update_task_memory_state() unless
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 * 1) it's ok to take cpuset_sem (can WAIT), and
1386 * 2) allocating for current task (not interrupt).
1387 */
Al Virodd0fc662005-10-07 07:46:04 +01001388struct page *alloc_pages_current(gfp_t gfp, unsigned order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389{
1390 struct mempolicy *pol = current->mempolicy;
1391
1392 if ((gfp & __GFP_WAIT) && !in_interrupt())
Paul Jacksoncf2a473c2006-01-08 01:01:54 -08001393 cpuset_update_task_memory_state();
Christoph Lameter9b819d22006-09-25 23:31:40 -07001394 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395 pol = &default_policy;
1396 if (pol->policy == MPOL_INTERLEAVE)
1397 return alloc_page_interleave(gfp, order, interleave_nodes(pol));
1398 return __alloc_pages(gfp, order, zonelist_policy(gfp, pol));
1399}
1400EXPORT_SYMBOL(alloc_pages_current);
1401
Paul Jackson42253992006-01-08 01:01:59 -08001402/*
1403 * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it
1404 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1405 * with the mems_allowed returned by cpuset_mems_allowed(). This
1406 * keeps mempolicies cpuset relative after its cpuset moves. See
1407 * further kernel/cpuset.c update_nodemask().
1408 */
Paul Jackson42253992006-01-08 01:01:59 -08001409
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410/* Slow path of a mempolicy copy */
1411struct mempolicy *__mpol_copy(struct mempolicy *old)
1412{
1413 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1414
1415 if (!new)
1416 return ERR_PTR(-ENOMEM);
Paul Jackson42253992006-01-08 01:01:59 -08001417 if (current_cpuset_is_being_rebound()) {
1418 nodemask_t mems = cpuset_mems_allowed(current);
1419 mpol_rebind_policy(old, &mems);
1420 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 *new = *old;
1422 atomic_set(&new->refcnt, 1);
1423 if (new->policy == MPOL_BIND) {
1424 int sz = ksize(old->v.zonelist);
Christoph Lametere94b1762006-12-06 20:33:17 -08001425 new->v.zonelist = kmemdup(old->v.zonelist, sz, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 if (!new->v.zonelist) {
1427 kmem_cache_free(policy_cache, new);
1428 return ERR_PTR(-ENOMEM);
1429 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 }
1431 return new;
1432}
1433
1434/* Slow path of a mempolicy comparison */
1435int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1436{
1437 if (!a || !b)
1438 return 0;
1439 if (a->policy != b->policy)
1440 return 0;
1441 switch (a->policy) {
1442 case MPOL_DEFAULT:
1443 return 1;
1444 case MPOL_INTERLEAVE:
Andi Kleendfcd3c02005-10-29 18:15:48 -07001445 return nodes_equal(a->v.nodes, b->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 case MPOL_PREFERRED:
1447 return a->v.preferred_node == b->v.preferred_node;
1448 case MPOL_BIND: {
1449 int i;
1450 for (i = 0; a->v.zonelist->zones[i]; i++)
1451 if (a->v.zonelist->zones[i] != b->v.zonelist->zones[i])
1452 return 0;
1453 return b->v.zonelist->zones[i] == NULL;
1454 }
1455 default:
1456 BUG();
1457 return 0;
1458 }
1459}
1460
1461/* Slow path of a mpol destructor. */
1462void __mpol_free(struct mempolicy *p)
1463{
1464 if (!atomic_dec_and_test(&p->refcnt))
1465 return;
1466 if (p->policy == MPOL_BIND)
1467 kfree(p->v.zonelist);
1468 p->policy = MPOL_DEFAULT;
1469 kmem_cache_free(policy_cache, p);
1470}
1471
1472/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 * Shared memory backing store policy support.
1474 *
1475 * Remember policies even when nobody has shared memory mapped.
1476 * The policies are kept in Red-Black tree linked from the inode.
1477 * They are protected by the sp->lock spinlock, which should be held
1478 * for any accesses to the tree.
1479 */
1480
1481/* lookup first element intersecting start-end */
1482/* Caller holds sp->lock */
1483static struct sp_node *
1484sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
1485{
1486 struct rb_node *n = sp->root.rb_node;
1487
1488 while (n) {
1489 struct sp_node *p = rb_entry(n, struct sp_node, nd);
1490
1491 if (start >= p->end)
1492 n = n->rb_right;
1493 else if (end <= p->start)
1494 n = n->rb_left;
1495 else
1496 break;
1497 }
1498 if (!n)
1499 return NULL;
1500 for (;;) {
1501 struct sp_node *w = NULL;
1502 struct rb_node *prev = rb_prev(n);
1503 if (!prev)
1504 break;
1505 w = rb_entry(prev, struct sp_node, nd);
1506 if (w->end <= start)
1507 break;
1508 n = prev;
1509 }
1510 return rb_entry(n, struct sp_node, nd);
1511}
1512
1513/* Insert a new shared policy into the list. */
1514/* Caller holds sp->lock */
1515static void sp_insert(struct shared_policy *sp, struct sp_node *new)
1516{
1517 struct rb_node **p = &sp->root.rb_node;
1518 struct rb_node *parent = NULL;
1519 struct sp_node *nd;
1520
1521 while (*p) {
1522 parent = *p;
1523 nd = rb_entry(parent, struct sp_node, nd);
1524 if (new->start < nd->start)
1525 p = &(*p)->rb_left;
1526 else if (new->end > nd->end)
1527 p = &(*p)->rb_right;
1528 else
1529 BUG();
1530 }
1531 rb_link_node(&new->nd, parent, p);
1532 rb_insert_color(&new->nd, &sp->root);
Paul Mundt140d5a42007-07-15 23:38:16 -07001533 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 new->policy ? new->policy->policy : 0);
1535}
1536
1537/* Find shared policy intersecting idx */
1538struct mempolicy *
1539mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
1540{
1541 struct mempolicy *pol = NULL;
1542 struct sp_node *sn;
1543
1544 if (!sp->root.rb_node)
1545 return NULL;
1546 spin_lock(&sp->lock);
1547 sn = sp_lookup(sp, idx, idx+1);
1548 if (sn) {
1549 mpol_get(sn->policy);
1550 pol = sn->policy;
1551 }
1552 spin_unlock(&sp->lock);
1553 return pol;
1554}
1555
1556static void sp_delete(struct shared_policy *sp, struct sp_node *n)
1557{
Paul Mundt140d5a42007-07-15 23:38:16 -07001558 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 rb_erase(&n->nd, &sp->root);
1560 mpol_free(n->policy);
1561 kmem_cache_free(sn_cache, n);
1562}
1563
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001564static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
1565 struct mempolicy *pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566{
1567 struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1568
1569 if (!n)
1570 return NULL;
1571 n->start = start;
1572 n->end = end;
1573 mpol_get(pol);
1574 n->policy = pol;
1575 return n;
1576}
1577
1578/* Replace a policy range. */
1579static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
1580 unsigned long end, struct sp_node *new)
1581{
1582 struct sp_node *n, *new2 = NULL;
1583
1584restart:
1585 spin_lock(&sp->lock);
1586 n = sp_lookup(sp, start, end);
1587 /* Take care of old policies in the same range. */
1588 while (n && n->start < end) {
1589 struct rb_node *next = rb_next(&n->nd);
1590 if (n->start >= start) {
1591 if (n->end <= end)
1592 sp_delete(sp, n);
1593 else
1594 n->start = end;
1595 } else {
1596 /* Old policy spanning whole new range. */
1597 if (n->end > end) {
1598 if (!new2) {
1599 spin_unlock(&sp->lock);
1600 new2 = sp_alloc(end, n->end, n->policy);
1601 if (!new2)
1602 return -ENOMEM;
1603 goto restart;
1604 }
1605 n->end = start;
1606 sp_insert(sp, new2);
1607 new2 = NULL;
1608 break;
1609 } else
1610 n->end = start;
1611 }
1612 if (!next)
1613 break;
1614 n = rb_entry(next, struct sp_node, nd);
1615 }
1616 if (new)
1617 sp_insert(sp, new);
1618 spin_unlock(&sp->lock);
1619 if (new2) {
1620 mpol_free(new2->policy);
1621 kmem_cache_free(sn_cache, new2);
1622 }
1623 return 0;
1624}
1625
Robin Holt7339ff82006-01-14 13:20:48 -08001626void mpol_shared_policy_init(struct shared_policy *info, int policy,
1627 nodemask_t *policy_nodes)
1628{
1629 info->root = RB_ROOT;
1630 spin_lock_init(&info->lock);
1631
1632 if (policy != MPOL_DEFAULT) {
1633 struct mempolicy *newpol;
1634
1635 /* Falls back to MPOL_DEFAULT on any error */
1636 newpol = mpol_new(policy, policy_nodes);
1637 if (!IS_ERR(newpol)) {
1638 /* Create pseudo-vma that contains just the policy */
1639 struct vm_area_struct pvma;
1640
1641 memset(&pvma, 0, sizeof(struct vm_area_struct));
1642 /* Policy covers entire file */
1643 pvma.vm_end = TASK_SIZE;
1644 mpol_set_shared_policy(info, &pvma, newpol);
1645 mpol_free(newpol);
1646 }
1647 }
1648}
1649
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650int mpol_set_shared_policy(struct shared_policy *info,
1651 struct vm_area_struct *vma, struct mempolicy *npol)
1652{
1653 int err;
1654 struct sp_node *new = NULL;
1655 unsigned long sz = vma_pages(vma);
1656
Paul Mundt140d5a42007-07-15 23:38:16 -07001657 pr_debug("set_shared_policy %lx sz %lu %d %lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 vma->vm_pgoff,
1659 sz, npol? npol->policy : -1,
Paul Mundt140d5a42007-07-15 23:38:16 -07001660 npol ? nodes_addr(npol->v.nodes)[0] : -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661
1662 if (npol) {
1663 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
1664 if (!new)
1665 return -ENOMEM;
1666 }
1667 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
1668 if (err && new)
1669 kmem_cache_free(sn_cache, new);
1670 return err;
1671}
1672
1673/* Free a backing policy store on inode delete. */
1674void mpol_free_shared_policy(struct shared_policy *p)
1675{
1676 struct sp_node *n;
1677 struct rb_node *next;
1678
1679 if (!p->root.rb_node)
1680 return;
1681 spin_lock(&p->lock);
1682 next = rb_first(&p->root);
1683 while (next) {
1684 n = rb_entry(next, struct sp_node, nd);
1685 next = rb_next(&n->nd);
Andi Kleen90c50292005-07-27 11:43:50 -07001686 rb_erase(&n->nd, &p->root);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 mpol_free(n->policy);
1688 kmem_cache_free(sn_cache, n);
1689 }
1690 spin_unlock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691}
1692
1693/* assumes fs == KERNEL_DS */
1694void __init numa_policy_init(void)
1695{
Paul Mundtb71636e2007-07-15 23:38:15 -07001696 nodemask_t interleave_nodes;
1697 unsigned long largest = 0;
1698 int nid, prefer = 0;
1699
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 policy_cache = kmem_cache_create("numa_policy",
1701 sizeof(struct mempolicy),
Paul Mundt20c2df82007-07-20 10:11:58 +09001702 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703
1704 sn_cache = kmem_cache_create("shared_policy_node",
1705 sizeof(struct sp_node),
Paul Mundt20c2df82007-07-20 10:11:58 +09001706 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707
Paul Mundtb71636e2007-07-15 23:38:15 -07001708 /*
1709 * Set interleaving policy for system init. Interleaving is only
1710 * enabled across suitably sized nodes (default is >= 16MB), or
1711 * fall back to the largest node if they're all smaller.
1712 */
1713 nodes_clear(interleave_nodes);
Christoph Lameter56bbd652007-10-16 01:25:35 -07001714 for_each_node_state(nid, N_HIGH_MEMORY) {
Paul Mundtb71636e2007-07-15 23:38:15 -07001715 unsigned long total_pages = node_present_pages(nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716
Paul Mundtb71636e2007-07-15 23:38:15 -07001717 /* Preserve the largest node */
1718 if (largest < total_pages) {
1719 largest = total_pages;
1720 prefer = nid;
1721 }
1722
1723 /* Interleave this node? */
1724 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
1725 node_set(nid, interleave_nodes);
1726 }
1727
1728 /* All too small, use the largest */
1729 if (unlikely(nodes_empty(interleave_nodes)))
1730 node_set(prefer, interleave_nodes);
1731
1732 if (do_set_mempolicy(MPOL_INTERLEAVE, &interleave_nodes))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 printk("numa_policy_init: interleaving failed\n");
1734}
1735
Christoph Lameter8bccd852005-10-29 18:16:59 -07001736/* Reset policy of current process to default */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737void numa_default_policy(void)
1738{
Christoph Lameter8bccd852005-10-29 18:16:59 -07001739 do_set_mempolicy(MPOL_DEFAULT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740}
Paul Jackson68860ec2005-10-30 15:02:36 -08001741
1742/* Migrate a policy to a different set of nodes */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001743static void mpol_rebind_policy(struct mempolicy *pol,
1744 const nodemask_t *newmask)
Paul Jackson68860ec2005-10-30 15:02:36 -08001745{
Paul Jackson74cb2152006-01-08 01:01:56 -08001746 nodemask_t *mpolmask;
Paul Jackson68860ec2005-10-30 15:02:36 -08001747 nodemask_t tmp;
1748
1749 if (!pol)
1750 return;
Paul Jackson74cb2152006-01-08 01:01:56 -08001751 mpolmask = &pol->cpuset_mems_allowed;
1752 if (nodes_equal(*mpolmask, *newmask))
1753 return;
Paul Jackson68860ec2005-10-30 15:02:36 -08001754
1755 switch (pol->policy) {
1756 case MPOL_DEFAULT:
1757 break;
1758 case MPOL_INTERLEAVE:
Paul Jackson74cb2152006-01-08 01:01:56 -08001759 nodes_remap(tmp, pol->v.nodes, *mpolmask, *newmask);
Paul Jackson68860ec2005-10-30 15:02:36 -08001760 pol->v.nodes = tmp;
Paul Jackson74cb2152006-01-08 01:01:56 -08001761 *mpolmask = *newmask;
1762 current->il_next = node_remap(current->il_next,
1763 *mpolmask, *newmask);
Paul Jackson68860ec2005-10-30 15:02:36 -08001764 break;
1765 case MPOL_PREFERRED:
1766 pol->v.preferred_node = node_remap(pol->v.preferred_node,
Paul Jackson74cb2152006-01-08 01:01:56 -08001767 *mpolmask, *newmask);
1768 *mpolmask = *newmask;
Paul Jackson68860ec2005-10-30 15:02:36 -08001769 break;
1770 case MPOL_BIND: {
1771 nodemask_t nodes;
1772 struct zone **z;
1773 struct zonelist *zonelist;
1774
1775 nodes_clear(nodes);
1776 for (z = pol->v.zonelist->zones; *z; z++)
Christoph Lameter89fa3022006-09-25 23:31:55 -07001777 node_set(zone_to_nid(*z), nodes);
Paul Jackson74cb2152006-01-08 01:01:56 -08001778 nodes_remap(tmp, nodes, *mpolmask, *newmask);
Paul Jackson68860ec2005-10-30 15:02:36 -08001779 nodes = tmp;
1780
1781 zonelist = bind_zonelist(&nodes);
1782
1783 /* If no mem, then zonelist is NULL and we keep old zonelist.
1784 * If that old zonelist has no remaining mems_allowed nodes,
1785 * then zonelist_policy() will "FALL THROUGH" to MPOL_DEFAULT.
1786 */
1787
KAMEZAWA Hiroyuki8af5e2e2007-02-20 13:57:49 -08001788 if (!IS_ERR(zonelist)) {
Paul Jackson68860ec2005-10-30 15:02:36 -08001789 /* Good - got mem - substitute new zonelist */
1790 kfree(pol->v.zonelist);
1791 pol->v.zonelist = zonelist;
1792 }
Paul Jackson74cb2152006-01-08 01:01:56 -08001793 *mpolmask = *newmask;
Paul Jackson68860ec2005-10-30 15:02:36 -08001794 break;
1795 }
1796 default:
1797 BUG();
1798 break;
1799 }
1800}
1801
1802/*
Paul Jackson74cb2152006-01-08 01:01:56 -08001803 * Wrapper for mpol_rebind_policy() that just requires task
1804 * pointer, and updates task mempolicy.
Paul Jackson68860ec2005-10-30 15:02:36 -08001805 */
Paul Jackson74cb2152006-01-08 01:01:56 -08001806
1807void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
Paul Jackson68860ec2005-10-30 15:02:36 -08001808{
Paul Jackson74cb2152006-01-08 01:01:56 -08001809 mpol_rebind_policy(tsk->mempolicy, new);
Paul Jackson68860ec2005-10-30 15:02:36 -08001810}
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001811
1812/*
Paul Jackson42253992006-01-08 01:01:59 -08001813 * Rebind each vma in mm to new nodemask.
1814 *
1815 * Call holding a reference to mm. Takes mm->mmap_sem during call.
1816 */
1817
1818void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
1819{
1820 struct vm_area_struct *vma;
1821
1822 down_write(&mm->mmap_sem);
1823 for (vma = mm->mmap; vma; vma = vma->vm_next)
1824 mpol_rebind_policy(vma->vm_policy, new);
1825 up_write(&mm->mmap_sem);
1826}
1827
1828/*
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001829 * Display pages allocated per node and memory policy via /proc.
1830 */
1831
Helge Deller15ad7cd2006-12-06 20:40:36 -08001832static const char * const policy_types[] =
1833 { "default", "prefer", "bind", "interleave" };
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001834
1835/*
1836 * Convert a mempolicy into a string.
1837 * Returns the number of characters in buffer (if positive)
1838 * or an error (negative)
1839 */
1840static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1841{
1842 char *p = buffer;
1843 int l;
1844 nodemask_t nodes;
1845 int mode = pol ? pol->policy : MPOL_DEFAULT;
1846
1847 switch (mode) {
1848 case MPOL_DEFAULT:
1849 nodes_clear(nodes);
1850 break;
1851
1852 case MPOL_PREFERRED:
1853 nodes_clear(nodes);
1854 node_set(pol->v.preferred_node, nodes);
1855 break;
1856
1857 case MPOL_BIND:
1858 get_zonemask(pol, &nodes);
1859 break;
1860
1861 case MPOL_INTERLEAVE:
1862 nodes = pol->v.nodes;
1863 break;
1864
1865 default:
1866 BUG();
1867 return -EFAULT;
1868 }
1869
1870 l = strlen(policy_types[mode]);
1871 if (buffer + maxlen < p + l + 1)
1872 return -ENOSPC;
1873
1874 strcpy(p, policy_types[mode]);
1875 p += l;
1876
1877 if (!nodes_empty(nodes)) {
1878 if (buffer + maxlen < p + 2)
1879 return -ENOSPC;
1880 *p++ = '=';
1881 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
1882 }
1883 return p - buffer;
1884}
1885
1886struct numa_maps {
1887 unsigned long pages;
1888 unsigned long anon;
Christoph Lameter397874d2006-03-06 15:42:53 -08001889 unsigned long active;
1890 unsigned long writeback;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001891 unsigned long mapcount_max;
Christoph Lameter397874d2006-03-06 15:42:53 -08001892 unsigned long dirty;
1893 unsigned long swapcache;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001894 unsigned long node[MAX_NUMNODES];
1895};
1896
Christoph Lameter397874d2006-03-06 15:42:53 -08001897static void gather_stats(struct page *page, void *private, int pte_dirty)
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001898{
1899 struct numa_maps *md = private;
1900 int count = page_mapcount(page);
1901
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001902 md->pages++;
Christoph Lameter397874d2006-03-06 15:42:53 -08001903 if (pte_dirty || PageDirty(page))
1904 md->dirty++;
1905
1906 if (PageSwapCache(page))
1907 md->swapcache++;
1908
1909 if (PageActive(page))
1910 md->active++;
1911
1912 if (PageWriteback(page))
1913 md->writeback++;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001914
1915 if (PageAnon(page))
1916 md->anon++;
1917
Christoph Lameter397874d2006-03-06 15:42:53 -08001918 if (count > md->mapcount_max)
1919 md->mapcount_max = count;
1920
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001921 md->node[page_to_nid(page)]++;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001922}
1923
Andrew Morton7f709ed2006-03-07 21:55:22 -08001924#ifdef CONFIG_HUGETLB_PAGE
Christoph Lameter397874d2006-03-06 15:42:53 -08001925static void check_huge_range(struct vm_area_struct *vma,
1926 unsigned long start, unsigned long end,
1927 struct numa_maps *md)
1928{
1929 unsigned long addr;
1930 struct page *page;
1931
1932 for (addr = start; addr < end; addr += HPAGE_SIZE) {
1933 pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK);
1934 pte_t pte;
1935
1936 if (!ptep)
1937 continue;
1938
1939 pte = *ptep;
1940 if (pte_none(pte))
1941 continue;
1942
1943 page = pte_page(pte);
1944 if (!page)
1945 continue;
1946
1947 gather_stats(page, md, pte_dirty(*ptep));
1948 }
1949}
Andrew Morton7f709ed2006-03-07 21:55:22 -08001950#else
1951static inline void check_huge_range(struct vm_area_struct *vma,
1952 unsigned long start, unsigned long end,
1953 struct numa_maps *md)
1954{
1955}
1956#endif
Christoph Lameter397874d2006-03-06 15:42:53 -08001957
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001958int show_numa_map(struct seq_file *m, void *v)
1959{
Eric W. Biederman99f89552006-06-26 00:25:55 -07001960 struct proc_maps_private *priv = m->private;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001961 struct vm_area_struct *vma = v;
1962 struct numa_maps *md;
Christoph Lameter397874d2006-03-06 15:42:53 -08001963 struct file *file = vma->vm_file;
1964 struct mm_struct *mm = vma->vm_mm;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001965 struct mempolicy *pol;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001966 int n;
1967 char buffer[50];
1968
Christoph Lameter397874d2006-03-06 15:42:53 -08001969 if (!mm)
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001970 return 0;
1971
1972 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
1973 if (!md)
1974 return 0;
1975
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001976 pol = get_vma_policy(priv->task, vma, vma->vm_start);
1977 mpol_to_str(buffer, sizeof(buffer), pol);
1978 /*
1979 * unref shared or other task's mempolicy
1980 */
1981 if (pol != &default_policy && pol != current->mempolicy)
1982 __mpol_free(pol);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001983
Christoph Lameter397874d2006-03-06 15:42:53 -08001984 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001985
Christoph Lameter397874d2006-03-06 15:42:53 -08001986 if (file) {
1987 seq_printf(m, " file=");
Josef Sipeke9536ae2006-12-08 02:37:21 -08001988 seq_path(m, file->f_path.mnt, file->f_path.dentry, "\n\t= ");
Christoph Lameter397874d2006-03-06 15:42:53 -08001989 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1990 seq_printf(m, " heap");
1991 } else if (vma->vm_start <= mm->start_stack &&
1992 vma->vm_end >= mm->start_stack) {
1993 seq_printf(m, " stack");
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001994 }
Christoph Lameter397874d2006-03-06 15:42:53 -08001995
1996 if (is_vm_hugetlb_page(vma)) {
1997 check_huge_range(vma, vma->vm_start, vma->vm_end, md);
1998 seq_printf(m, " huge");
1999 } else {
2000 check_pgd_range(vma, vma->vm_start, vma->vm_end,
Christoph Lameter56bbd652007-10-16 01:25:35 -07002001 &node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
Christoph Lameter397874d2006-03-06 15:42:53 -08002002 }
2003
2004 if (!md->pages)
2005 goto out;
2006
2007 if (md->anon)
2008 seq_printf(m," anon=%lu",md->anon);
2009
2010 if (md->dirty)
2011 seq_printf(m," dirty=%lu",md->dirty);
2012
2013 if (md->pages != md->anon && md->pages != md->dirty)
2014 seq_printf(m, " mapped=%lu", md->pages);
2015
2016 if (md->mapcount_max > 1)
2017 seq_printf(m, " mapmax=%lu", md->mapcount_max);
2018
2019 if (md->swapcache)
2020 seq_printf(m," swapcache=%lu", md->swapcache);
2021
2022 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2023 seq_printf(m," active=%lu", md->active);
2024
2025 if (md->writeback)
2026 seq_printf(m," writeback=%lu", md->writeback);
2027
Christoph Lameter56bbd652007-10-16 01:25:35 -07002028 for_each_node_state(n, N_HIGH_MEMORY)
Christoph Lameter397874d2006-03-06 15:42:53 -08002029 if (md->node[n])
2030 seq_printf(m, " N%d=%lu", n, md->node[n]);
2031out:
2032 seq_putc(m, '\n');
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002033 kfree(md);
2034
2035 if (m->count < m->size)
Eric W. Biederman99f89552006-06-26 00:25:55 -07002036 m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002037 return 0;
2038}