blob: 1f6ff9c1bbc3ec99d5ad6b8db04859eb234ee96d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
Christoph Lameter8bccd852005-10-29 18:16:59 -07005 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
Christoph Lameter8bccd852005-10-29 18:16:59 -070021 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
Christoph Lameter8bccd852005-10-29 18:16:59 -070024 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 * preferred Try a specific node first before normal fallback.
29 * As a special case node -1 here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
Christoph Lameter8bccd852005-10-29 18:16:59 -070033 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
66 could replace all the switch()es with a mempolicy_ops structure.
67*/
68
69#include <linux/mempolicy.h>
70#include <linux/mm.h>
71#include <linux/highmem.h>
72#include <linux/hugetlb.h>
73#include <linux/kernel.h>
74#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070075#include <linux/nodemask.h>
76#include <linux/cpuset.h>
77#include <linux/gfp.h>
78#include <linux/slab.h>
79#include <linux/string.h>
80#include <linux/module.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070081#include <linux/nsproxy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070082#include <linux/interrupt.h>
83#include <linux/init.h>
84#include <linux/compat.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080085#include <linux/swap.h>
Christoph Lameter1a75a6c2006-01-08 01:01:02 -080086#include <linux/seq_file.h>
87#include <linux/proc_fs.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080088#include <linux/migrate.h>
Christoph Lameter95a402c2006-06-23 02:03:53 -070089#include <linux/rmap.h>
David Quigley86c3a762006-06-23 02:04:02 -070090#include <linux/security.h>
Adrian Bunkdbcb0f12007-10-16 01:26:26 -070091#include <linux/syscalls.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080092
Linus Torvalds1da177e2005-04-16 15:20:36 -070093#include <asm/tlbflush.h>
94#include <asm/uaccess.h>
95
Christoph Lameter38e35862006-01-08 01:01:01 -080096/* Internal flags */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080097#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
Christoph Lameter38e35862006-01-08 01:01:01 -080098#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
Christoph Lameter1a75a6c2006-01-08 01:01:02 -080099#define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800100
Pekka Enbergfcc234f2006-03-22 00:08:13 -0800101static struct kmem_cache *policy_cache;
102static struct kmem_cache *sn_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104/* Highest zone. An specific allocation for a zone below that is not
105 policied. */
Christoph Lameter62672762007-02-10 01:43:07 -0800106enum zone_type policy_zone = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Andi Kleend42c6992005-07-06 19:56:03 +0200108struct mempolicy default_policy = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 .refcnt = ATOMIC_INIT(1), /* never free it */
110 .policy = MPOL_DEFAULT,
111};
112
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700113static void mpol_rebind_policy(struct mempolicy *pol,
114 const nodemask_t *newmask);
115
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116/* Do sanity checking on a policy */
David Rientjesa3b51e02008-04-28 02:12:23 -0700117static int mpol_check_policy(unsigned short mode, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118{
KOSAKI Motohiro31f1de42008-02-12 13:30:22 +0900119 int was_empty, is_empty;
120
121 if (!nodes)
122 return 0;
123
124 /*
125 * "Contextualize" the in-coming nodemast for cpusets:
126 * Remember whether in-coming nodemask was empty, If not,
127 * restrict the nodes to the allowed nodes in the cpuset.
128 * This is guaranteed to be a subset of nodes with memory.
129 */
130 cpuset_update_task_memory_state();
131 is_empty = was_empty = nodes_empty(*nodes);
132 if (!was_empty) {
133 nodes_and(*nodes, *nodes, cpuset_current_mems_allowed);
134 is_empty = nodes_empty(*nodes); /* after "contextualization" */
135 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136
137 switch (mode) {
138 case MPOL_DEFAULT:
KOSAKI Motohiro31f1de42008-02-12 13:30:22 +0900139 /*
140 * require caller to specify an empty nodemask
141 * before "contextualization"
142 */
143 if (!was_empty)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 return -EINVAL;
145 break;
146 case MPOL_BIND:
147 case MPOL_INTERLEAVE:
KOSAKI Motohiro31f1de42008-02-12 13:30:22 +0900148 /*
149 * require at least 1 valid node after "contextualization"
150 */
151 if (is_empty)
152 return -EINVAL;
153 break;
154 case MPOL_PREFERRED:
155 /*
156 * Did caller specify invalid nodes?
157 * Don't silently accept this as "local allocation".
158 */
159 if (!was_empty && is_empty)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 return -EINVAL;
161 break;
David Rientjesa3b51e02008-04-28 02:12:23 -0700162 default:
163 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 }
KOSAKI Motohiro31f1de42008-02-12 13:30:22 +0900165 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166}
Andi Kleendd942ae2006-02-17 01:39:16 +0100167
Mel Gorman19770b32008-04-28 02:12:18 -0700168/* Check that the nodemask contains at least one populated zone */
169static int is_valid_nodemask(nodemask_t *nodemask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170{
Mel Gorman19770b32008-04-28 02:12:18 -0700171 int nd, k;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
Mel Gorman19770b32008-04-28 02:12:18 -0700173 /* Check that there is something useful in this mask */
174 k = policy_zone;
175
176 for_each_node_mask(nd, *nodemask) {
177 struct zone *z;
178
179 for (k = 0; k <= policy_zone; k++) {
180 z = &NODE_DATA(nd)->node_zones[k];
181 if (z->present_pages > 0)
182 return 1;
Andi Kleendd942ae2006-02-17 01:39:16 +0100183 }
184 }
Mel Gorman19770b32008-04-28 02:12:18 -0700185
186 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187}
188
189/* Create a new policy */
David Rientjes028fec42008-04-28 02:12:25 -0700190static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
191 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192{
193 struct mempolicy *policy;
194
David Rientjes028fec42008-04-28 02:12:25 -0700195 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
196 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
Paul Mundt140d5a42007-07-15 23:38:16 -0700197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 if (mode == MPOL_DEFAULT)
199 return NULL;
200 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
201 if (!policy)
202 return ERR_PTR(-ENOMEM);
203 atomic_set(&policy->refcnt, 1);
204 switch (mode) {
205 case MPOL_INTERLEAVE:
Andi Kleendfcd3c02005-10-29 18:15:48 -0700206 policy->v.nodes = *nodes;
Christoph Lameter6eaf8062007-10-16 01:25:30 -0700207 if (nodes_weight(policy->v.nodes) == 0) {
Andi Kleen8f493d72006-01-03 00:07:28 +0100208 kmem_cache_free(policy_cache, policy);
209 return ERR_PTR(-EINVAL);
210 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 break;
212 case MPOL_PREFERRED:
Andi Kleendfcd3c02005-10-29 18:15:48 -0700213 policy->v.preferred_node = first_node(*nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 if (policy->v.preferred_node >= MAX_NUMNODES)
215 policy->v.preferred_node = -1;
216 break;
217 case MPOL_BIND:
Mel Gorman19770b32008-04-28 02:12:18 -0700218 if (!is_valid_nodemask(nodes)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 kmem_cache_free(policy_cache, policy);
Mel Gorman19770b32008-04-28 02:12:18 -0700220 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 }
Mel Gorman19770b32008-04-28 02:12:18 -0700222 policy->v.nodes = *nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 break;
David Rientjesa3b51e02008-04-28 02:12:23 -0700224 default:
225 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 }
227 policy->policy = mode;
David Rientjes028fec42008-04-28 02:12:25 -0700228 policy->flags = flags;
Paul Jackson74cb2152006-01-08 01:01:56 -0800229 policy->cpuset_mems_allowed = cpuset_mems_allowed(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 return policy;
231}
232
Christoph Lameter397874d2006-03-06 15:42:53 -0800233static void gather_stats(struct page *, void *, int pte_dirty);
Christoph Lameterfc301282006-01-18 17:42:29 -0800234static void migrate_page_add(struct page *page, struct list_head *pagelist,
235 unsigned long flags);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -0800236
Christoph Lameter38e35862006-01-08 01:01:01 -0800237/* Scan through pages checking if pages follow certain conditions. */
Nick Pigginb5810032005-10-29 18:16:12 -0700238static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800239 unsigned long addr, unsigned long end,
240 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800241 void *private)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242{
Hugh Dickins91612e02005-06-21 17:15:07 -0700243 pte_t *orig_pte;
244 pte_t *pte;
Hugh Dickins705e87c2005-10-29 18:16:27 -0700245 spinlock_t *ptl;
Hugh Dickins941150a2005-06-21 17:15:06 -0700246
Hugh Dickins705e87c2005-10-29 18:16:27 -0700247 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
Hugh Dickins91612e02005-06-21 17:15:07 -0700248 do {
Linus Torvalds6aab3412005-11-28 14:34:23 -0800249 struct page *page;
Andy Whitcroft25ba77c2006-12-06 20:33:03 -0800250 int nid;
Hugh Dickins91612e02005-06-21 17:15:07 -0700251
252 if (!pte_present(*pte))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800254 page = vm_normal_page(vma, addr, *pte);
255 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 continue;
Nick Piggin053837f2006-01-18 17:42:27 -0800257 /*
258 * The check for PageReserved here is important to avoid
259 * handling zero pages and other pages that may have been
260 * marked special by the system.
261 *
262 * If the PageReserved would not be checked here then f.e.
263 * the location of the zero page could have an influence
264 * on MPOL_MF_STRICT, zero pages would be counted for
265 * the per node stats, and there would be useless attempts
266 * to put zero pages on the migration list.
267 */
Christoph Lameterf4598c82006-01-12 01:05:20 -0800268 if (PageReserved(page))
269 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800270 nid = page_to_nid(page);
Christoph Lameter38e35862006-01-08 01:01:01 -0800271 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
272 continue;
273
Christoph Lameter1a75a6c2006-01-08 01:01:02 -0800274 if (flags & MPOL_MF_STATS)
Christoph Lameter397874d2006-03-06 15:42:53 -0800275 gather_stats(page, private, pte_dirty(*pte));
Nick Piggin053837f2006-01-18 17:42:27 -0800276 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
Christoph Lameterfc301282006-01-18 17:42:29 -0800277 migrate_page_add(page, private, flags);
Christoph Lameter38e35862006-01-08 01:01:01 -0800278 else
279 break;
Hugh Dickins91612e02005-06-21 17:15:07 -0700280 } while (pte++, addr += PAGE_SIZE, addr != end);
Hugh Dickins705e87c2005-10-29 18:16:27 -0700281 pte_unmap_unlock(orig_pte, ptl);
Hugh Dickins91612e02005-06-21 17:15:07 -0700282 return addr != end;
283}
284
Nick Pigginb5810032005-10-29 18:16:12 -0700285static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800286 unsigned long addr, unsigned long end,
287 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800288 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700289{
290 pmd_t *pmd;
291 unsigned long next;
292
293 pmd = pmd_offset(pud, addr);
294 do {
295 next = pmd_addr_end(addr, end);
296 if (pmd_none_or_clear_bad(pmd))
297 continue;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800298 if (check_pte_range(vma, pmd, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800299 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700300 return -EIO;
301 } while (pmd++, addr = next, addr != end);
302 return 0;
303}
304
Nick Pigginb5810032005-10-29 18:16:12 -0700305static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800306 unsigned long addr, unsigned long end,
307 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800308 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700309{
310 pud_t *pud;
311 unsigned long next;
312
313 pud = pud_offset(pgd, addr);
314 do {
315 next = pud_addr_end(addr, end);
316 if (pud_none_or_clear_bad(pud))
317 continue;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800318 if (check_pmd_range(vma, pud, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800319 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700320 return -EIO;
321 } while (pud++, addr = next, addr != end);
322 return 0;
323}
324
Nick Pigginb5810032005-10-29 18:16:12 -0700325static inline int check_pgd_range(struct vm_area_struct *vma,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800326 unsigned long addr, unsigned long end,
327 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800328 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700329{
330 pgd_t *pgd;
331 unsigned long next;
332
Nick Pigginb5810032005-10-29 18:16:12 -0700333 pgd = pgd_offset(vma->vm_mm, addr);
Hugh Dickins91612e02005-06-21 17:15:07 -0700334 do {
335 next = pgd_addr_end(addr, end);
336 if (pgd_none_or_clear_bad(pgd))
337 continue;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800338 if (check_pud_range(vma, pgd, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800339 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700340 return -EIO;
341 } while (pgd++, addr = next, addr != end);
342 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343}
344
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800345/*
346 * Check if all pages in a range are on a set of nodes.
347 * If pagelist != NULL then isolate pages from the LRU and
348 * put them on the pagelist.
349 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350static struct vm_area_struct *
351check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
Christoph Lameter38e35862006-01-08 01:01:01 -0800352 const nodemask_t *nodes, unsigned long flags, void *private)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353{
354 int err;
355 struct vm_area_struct *first, *vma, *prev;
356
Christoph Lameter90036ee2006-03-16 23:03:59 -0800357 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
Christoph Lameter90036ee2006-03-16 23:03:59 -0800358
Christoph Lameterb20a3502006-03-22 00:09:12 -0800359 err = migrate_prep();
360 if (err)
361 return ERR_PTR(err);
Christoph Lameter90036ee2006-03-16 23:03:59 -0800362 }
Nick Piggin053837f2006-01-18 17:42:27 -0800363
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 first = find_vma(mm, start);
365 if (!first)
366 return ERR_PTR(-EFAULT);
367 prev = NULL;
368 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800369 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
370 if (!vma->vm_next && vma->vm_end < end)
371 return ERR_PTR(-EFAULT);
372 if (prev && prev->vm_end < vma->vm_start)
373 return ERR_PTR(-EFAULT);
374 }
375 if (!is_vm_hugetlb_page(vma) &&
376 ((flags & MPOL_MF_STRICT) ||
377 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
378 vma_migratable(vma)))) {
Andi Kleen5b952b32005-09-13 01:25:08 -0700379 unsigned long endvma = vma->vm_end;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800380
Andi Kleen5b952b32005-09-13 01:25:08 -0700381 if (endvma > end)
382 endvma = end;
383 if (vma->vm_start > start)
384 start = vma->vm_start;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800385 err = check_pgd_range(vma, start, endvma, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800386 flags, private);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 if (err) {
388 first = ERR_PTR(err);
389 break;
390 }
391 }
392 prev = vma;
393 }
394 return first;
395}
396
397/* Apply policy to a single VMA */
398static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
399{
400 int err = 0;
401 struct mempolicy *old = vma->vm_policy;
402
Paul Mundt140d5a42007-07-15 23:38:16 -0700403 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 vma->vm_start, vma->vm_end, vma->vm_pgoff,
405 vma->vm_ops, vma->vm_file,
406 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
407
408 if (vma->vm_ops && vma->vm_ops->set_policy)
409 err = vma->vm_ops->set_policy(vma, new);
410 if (!err) {
411 mpol_get(new);
412 vma->vm_policy = new;
413 mpol_free(old);
414 }
415 return err;
416}
417
418/* Step 2: apply policy to a range and do splits. */
419static int mbind_range(struct vm_area_struct *vma, unsigned long start,
420 unsigned long end, struct mempolicy *new)
421{
422 struct vm_area_struct *next;
423 int err;
424
425 err = 0;
426 for (; vma && vma->vm_start < end; vma = next) {
427 next = vma->vm_next;
428 if (vma->vm_start < start)
429 err = split_vma(vma->vm_mm, vma, start, 1);
430 if (!err && vma->vm_end > end)
431 err = split_vma(vma->vm_mm, vma, end, 0);
432 if (!err)
433 err = policy_vma(vma, new);
434 if (err)
435 break;
436 }
437 return err;
438}
439
Paul Jacksonc61afb12006-03-24 03:16:08 -0800440/*
441 * Update task->flags PF_MEMPOLICY bit: set iff non-default
442 * mempolicy. Allows more rapid checking of this (combined perhaps
443 * with other PF_* flag bits) on memory allocation hot code paths.
444 *
445 * If called from outside this file, the task 'p' should -only- be
446 * a newly forked child not yet visible on the task list, because
447 * manipulating the task flags of a visible task is not safe.
448 *
449 * The above limitation is why this routine has the funny name
450 * mpol_fix_fork_child_flag().
451 *
452 * It is also safe to call this with a task pointer of current,
453 * which the static wrapper mpol_set_task_struct_flag() does,
454 * for use within this file.
455 */
456
457void mpol_fix_fork_child_flag(struct task_struct *p)
458{
459 if (p->mempolicy)
460 p->flags |= PF_MEMPOLICY;
461 else
462 p->flags &= ~PF_MEMPOLICY;
463}
464
465static void mpol_set_task_struct_flag(void)
466{
467 mpol_fix_fork_child_flag(current);
468}
469
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470/* Set the process memory policy */
David Rientjes028fec42008-04-28 02:12:25 -0700471static long do_set_mempolicy(unsigned short mode, unsigned short flags,
472 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 struct mempolicy *new;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475
KOSAKI Motohiro31f1de42008-02-12 13:30:22 +0900476 if (mpol_check_policy(mode, nodes))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 return -EINVAL;
David Rientjes028fec42008-04-28 02:12:25 -0700478 new = mpol_new(mode, flags, nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 if (IS_ERR(new))
480 return PTR_ERR(new);
481 mpol_free(current->mempolicy);
482 current->mempolicy = new;
Paul Jacksonc61afb12006-03-24 03:16:08 -0800483 mpol_set_task_struct_flag();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 if (new && new->policy == MPOL_INTERLEAVE)
Andi Kleendfcd3c02005-10-29 18:15:48 -0700485 current->il_next = first_node(new->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 return 0;
487}
488
489/* Fill a zone bitmap for a policy */
Andi Kleendfcd3c02005-10-29 18:15:48 -0700490static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491{
Andi Kleendfcd3c02005-10-29 18:15:48 -0700492 nodes_clear(*nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 switch (p->policy) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 case MPOL_DEFAULT:
495 break;
Mel Gorman19770b32008-04-28 02:12:18 -0700496 case MPOL_BIND:
497 /* Fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 case MPOL_INTERLEAVE:
Andi Kleendfcd3c02005-10-29 18:15:48 -0700499 *nodes = p->v.nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 break;
501 case MPOL_PREFERRED:
Christoph Lameter56bbd652007-10-16 01:25:35 -0700502 /* or use current node instead of memory_map? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 if (p->v.preferred_node < 0)
Christoph Lameter56bbd652007-10-16 01:25:35 -0700504 *nodes = node_states[N_HIGH_MEMORY];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 else
Andi Kleendfcd3c02005-10-29 18:15:48 -0700506 node_set(p->v.preferred_node, *nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 break;
508 default:
509 BUG();
510 }
511}
512
513static int lookup_node(struct mm_struct *mm, unsigned long addr)
514{
515 struct page *p;
516 int err;
517
518 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
519 if (err >= 0) {
520 err = page_to_nid(p);
521 put_page(p);
522 }
523 return err;
524}
525
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526/* Retrieve NUMA policy */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700527static long do_get_mempolicy(int *policy, nodemask_t *nmask,
528 unsigned long addr, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529{
Christoph Lameter8bccd852005-10-29 18:16:59 -0700530 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 struct mm_struct *mm = current->mm;
532 struct vm_area_struct *vma = NULL;
533 struct mempolicy *pol = current->mempolicy;
534
Paul Jacksoncf2a4732006-01-08 01:01:54 -0800535 cpuset_update_task_memory_state();
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700536 if (flags &
537 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 return -EINVAL;
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700539
540 if (flags & MPOL_F_MEMS_ALLOWED) {
541 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
542 return -EINVAL;
543 *policy = 0; /* just so it's initialized */
544 *nmask = cpuset_current_mems_allowed;
545 return 0;
546 }
547
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 if (flags & MPOL_F_ADDR) {
549 down_read(&mm->mmap_sem);
550 vma = find_vma_intersection(mm, addr, addr+1);
551 if (!vma) {
552 up_read(&mm->mmap_sem);
553 return -EFAULT;
554 }
555 if (vma->vm_ops && vma->vm_ops->get_policy)
556 pol = vma->vm_ops->get_policy(vma, addr);
557 else
558 pol = vma->vm_policy;
559 } else if (addr)
560 return -EINVAL;
561
562 if (!pol)
563 pol = &default_policy;
564
565 if (flags & MPOL_F_NODE) {
566 if (flags & MPOL_F_ADDR) {
567 err = lookup_node(mm, addr);
568 if (err < 0)
569 goto out;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700570 *policy = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 } else if (pol == current->mempolicy &&
572 pol->policy == MPOL_INTERLEAVE) {
Christoph Lameter8bccd852005-10-29 18:16:59 -0700573 *policy = current->il_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 } else {
575 err = -EINVAL;
576 goto out;
577 }
578 } else
David Rientjes028fec42008-04-28 02:12:25 -0700579 *policy = pol->policy | pol->flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580
581 if (vma) {
582 up_read(&current->mm->mmap_sem);
583 vma = NULL;
584 }
585
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 err = 0;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700587 if (nmask)
588 get_zonemask(pol, nmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589
590 out:
591 if (vma)
592 up_read(&current->mm->mmap_sem);
593 return err;
594}
595
Christoph Lameterb20a3502006-03-22 00:09:12 -0800596#ifdef CONFIG_MIGRATION
Christoph Lameter8bccd852005-10-29 18:16:59 -0700597/*
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800598 * page migration
599 */
Christoph Lameterfc301282006-01-18 17:42:29 -0800600static void migrate_page_add(struct page *page, struct list_head *pagelist,
601 unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800602{
603 /*
Christoph Lameterfc301282006-01-18 17:42:29 -0800604 * Avoid migrating a page that is shared with others.
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800605 */
Christoph Lameterb20a3502006-03-22 00:09:12 -0800606 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1)
607 isolate_lru_page(page, pagelist);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800608}
609
Christoph Lameter742755a2006-06-23 02:03:55 -0700610static struct page *new_node_page(struct page *page, unsigned long node, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -0700611{
Mel Gorman769848c2007-07-17 04:03:05 -0700612 return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
Christoph Lameter95a402c2006-06-23 02:03:53 -0700613}
614
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800615/*
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800616 * Migrate pages from one node to a target node.
617 * Returns error or the number of pages not migrated.
618 */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700619static int migrate_to_node(struct mm_struct *mm, int source, int dest,
620 int flags)
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800621{
622 nodemask_t nmask;
623 LIST_HEAD(pagelist);
624 int err = 0;
625
626 nodes_clear(nmask);
627 node_set(source, nmask);
628
629 check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
630 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
631
Christoph Lameteraaa994b2006-06-23 02:03:52 -0700632 if (!list_empty(&pagelist))
Christoph Lameter95a402c2006-06-23 02:03:53 -0700633 err = migrate_pages(&pagelist, new_node_page, dest);
634
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800635 return err;
636}
637
638/*
639 * Move pages between the two nodesets so as to preserve the physical
640 * layout as much as possible.
Christoph Lameter39743882006-01-08 01:00:51 -0800641 *
642 * Returns the number of page that could not be moved.
643 */
644int do_migrate_pages(struct mm_struct *mm,
645 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
646{
647 LIST_HEAD(pagelist);
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800648 int busy = 0;
649 int err = 0;
650 nodemask_t tmp;
Christoph Lameter39743882006-01-08 01:00:51 -0800651
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800652 down_read(&mm->mmap_sem);
Christoph Lameter39743882006-01-08 01:00:51 -0800653
Christoph Lameter7b2259b2006-06-25 05:46:48 -0700654 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
655 if (err)
656 goto out;
657
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800658/*
659 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
660 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
661 * bit in 'tmp', and return that <source, dest> pair for migration.
662 * The pair of nodemasks 'to' and 'from' define the map.
663 *
664 * If no pair of bits is found that way, fallback to picking some
665 * pair of 'source' and 'dest' bits that are not the same. If the
666 * 'source' and 'dest' bits are the same, this represents a node
667 * that will be migrating to itself, so no pages need move.
668 *
669 * If no bits are left in 'tmp', or if all remaining bits left
670 * in 'tmp' correspond to the same bit in 'to', return false
671 * (nothing left to migrate).
672 *
673 * This lets us pick a pair of nodes to migrate between, such that
674 * if possible the dest node is not already occupied by some other
675 * source node, minimizing the risk of overloading the memory on a
676 * node that would happen if we migrated incoming memory to a node
677 * before migrating outgoing memory source that same node.
678 *
679 * A single scan of tmp is sufficient. As we go, we remember the
680 * most recent <s, d> pair that moved (s != d). If we find a pair
681 * that not only moved, but what's better, moved to an empty slot
682 * (d is not set in tmp), then we break out then, with that pair.
683 * Otherwise when we finish scannng from_tmp, we at least have the
684 * most recent <s, d> pair that moved. If we get all the way through
685 * the scan of tmp without finding any node that moved, much less
686 * moved to an empty node, then there is nothing left worth migrating.
687 */
Christoph Lameterd4984712006-01-08 01:00:55 -0800688
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800689 tmp = *from_nodes;
690 while (!nodes_empty(tmp)) {
691 int s,d;
692 int source = -1;
693 int dest = 0;
694
695 for_each_node_mask(s, tmp) {
696 d = node_remap(s, *from_nodes, *to_nodes);
697 if (s == d)
698 continue;
699
700 source = s; /* Node moved. Memorize */
701 dest = d;
702
703 /* dest not in remaining from nodes? */
704 if (!node_isset(dest, tmp))
705 break;
706 }
707 if (source == -1)
708 break;
709
710 node_clear(source, tmp);
711 err = migrate_to_node(mm, source, dest, flags);
712 if (err > 0)
713 busy += err;
714 if (err < 0)
715 break;
Christoph Lameter39743882006-01-08 01:00:51 -0800716 }
Christoph Lameter7b2259b2006-06-25 05:46:48 -0700717out:
Christoph Lameter39743882006-01-08 01:00:51 -0800718 up_read(&mm->mmap_sem);
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800719 if (err < 0)
720 return err;
721 return busy;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800722
Christoph Lameter39743882006-01-08 01:00:51 -0800723}
724
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -0800725/*
726 * Allocate a new page for page migration based on vma policy.
727 * Start assuming that page is mapped by vma pointed to by @private.
728 * Search forward from there, if not. N.B., this assumes that the
729 * list of pages handed to migrate_pages()--which is how we get here--
730 * is in virtual address order.
731 */
Christoph Lameter742755a2006-06-23 02:03:55 -0700732static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -0700733{
734 struct vm_area_struct *vma = (struct vm_area_struct *)private;
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -0800735 unsigned long uninitialized_var(address);
Christoph Lameter95a402c2006-06-23 02:03:53 -0700736
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -0800737 while (vma) {
738 address = page_address_in_vma(page, vma);
739 if (address != -EFAULT)
740 break;
741 vma = vma->vm_next;
742 }
743
744 /*
745 * if !vma, alloc_page_vma() will use task or system default policy
746 */
747 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
Christoph Lameter95a402c2006-06-23 02:03:53 -0700748}
Christoph Lameterb20a3502006-03-22 00:09:12 -0800749#else
750
751static void migrate_page_add(struct page *page, struct list_head *pagelist,
752 unsigned long flags)
753{
754}
755
756int do_migrate_pages(struct mm_struct *mm,
757 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
758{
759 return -ENOSYS;
760}
Christoph Lameter95a402c2006-06-23 02:03:53 -0700761
Keith Owens69939742006-10-11 01:21:28 -0700762static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -0700763{
764 return NULL;
765}
Christoph Lameterb20a3502006-03-22 00:09:12 -0800766#endif
767
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700768static long do_mbind(unsigned long start, unsigned long len,
David Rientjes028fec42008-04-28 02:12:25 -0700769 unsigned short mode, unsigned short mode_flags,
770 nodemask_t *nmask, unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800771{
772 struct vm_area_struct *vma;
773 struct mm_struct *mm = current->mm;
774 struct mempolicy *new;
775 unsigned long end;
776 int err;
777 LIST_HEAD(pagelist);
778
David Rientjesa3b51e02008-04-28 02:12:23 -0700779 if (flags & ~(unsigned long)(MPOL_MF_STRICT |
780 MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800781 return -EINVAL;
Christoph Lameter74c00242006-03-14 19:50:21 -0800782 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800783 return -EPERM;
784
785 if (start & ~PAGE_MASK)
786 return -EINVAL;
787
788 if (mode == MPOL_DEFAULT)
789 flags &= ~MPOL_MF_STRICT;
790
791 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
792 end = start + len;
793
794 if (end < start)
795 return -EINVAL;
796 if (end == start)
797 return 0;
798
799 if (mpol_check_policy(mode, nmask))
800 return -EINVAL;
801
David Rientjes028fec42008-04-28 02:12:25 -0700802 new = mpol_new(mode, mode_flags, nmask);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800803 if (IS_ERR(new))
804 return PTR_ERR(new);
805
806 /*
807 * If we are using the default policy then operation
808 * on discontinuous address spaces is okay after all
809 */
810 if (!new)
811 flags |= MPOL_MF_DISCONTIG_OK;
812
David Rientjes028fec42008-04-28 02:12:25 -0700813 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
814 start, start + len, mode, mode_flags,
815 nmask ? nodes_addr(*nmask)[0] : -1);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800816
817 down_write(&mm->mmap_sem);
818 vma = check_range(mm, start, end, nmask,
819 flags | MPOL_MF_INVERT, &pagelist);
820
821 err = PTR_ERR(vma);
822 if (!IS_ERR(vma)) {
823 int nr_failed = 0;
824
825 err = mbind_range(vma, start, end, new);
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800826
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800827 if (!list_empty(&pagelist))
Christoph Lameter95a402c2006-06-23 02:03:53 -0700828 nr_failed = migrate_pages(&pagelist, new_vma_page,
829 (unsigned long)vma);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800830
831 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
832 err = -EIO;
833 }
Christoph Lameterb20a3502006-03-22 00:09:12 -0800834
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800835 up_write(&mm->mmap_sem);
836 mpol_free(new);
837 return err;
838}
839
Christoph Lameter39743882006-01-08 01:00:51 -0800840/*
Christoph Lameter8bccd852005-10-29 18:16:59 -0700841 * User space interface with variable sized bitmaps for nodelists.
842 */
843
844/* Copy a node mask from user space. */
Christoph Lameter39743882006-01-08 01:00:51 -0800845static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
Christoph Lameter8bccd852005-10-29 18:16:59 -0700846 unsigned long maxnode)
847{
848 unsigned long k;
849 unsigned long nlongs;
850 unsigned long endmask;
851
852 --maxnode;
853 nodes_clear(*nodes);
854 if (maxnode == 0 || !nmask)
855 return 0;
Andi Kleena9c930b2006-02-20 18:27:59 -0800856 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
Chris Wright636f13c2006-02-17 13:59:36 -0800857 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700858
859 nlongs = BITS_TO_LONGS(maxnode);
860 if ((maxnode % BITS_PER_LONG) == 0)
861 endmask = ~0UL;
862 else
863 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
864
865 /* When the user specified more nodes than supported just check
866 if the non supported part is all zero. */
867 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
868 if (nlongs > PAGE_SIZE/sizeof(long))
869 return -EINVAL;
870 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
871 unsigned long t;
872 if (get_user(t, nmask + k))
873 return -EFAULT;
874 if (k == nlongs - 1) {
875 if (t & endmask)
876 return -EINVAL;
877 } else if (t)
878 return -EINVAL;
879 }
880 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
881 endmask = ~0UL;
882 }
883
884 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
885 return -EFAULT;
886 nodes_addr(*nodes)[nlongs-1] &= endmask;
887 return 0;
888}
889
890/* Copy a kernel node mask to user space */
891static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
892 nodemask_t *nodes)
893{
894 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
895 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
896
897 if (copy > nbytes) {
898 if (copy > PAGE_SIZE)
899 return -EINVAL;
900 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
901 return -EFAULT;
902 copy = nbytes;
903 }
904 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
905}
906
907asmlinkage long sys_mbind(unsigned long start, unsigned long len,
908 unsigned long mode,
909 unsigned long __user *nmask, unsigned long maxnode,
910 unsigned flags)
911{
912 nodemask_t nodes;
913 int err;
David Rientjes028fec42008-04-28 02:12:25 -0700914 unsigned short mode_flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700915
David Rientjes028fec42008-04-28 02:12:25 -0700916 mode_flags = mode & MPOL_MODE_FLAGS;
917 mode &= ~MPOL_MODE_FLAGS;
David Rientjesa3b51e02008-04-28 02:12:23 -0700918 if (mode >= MPOL_MAX)
919 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700920 err = get_nodes(&nodes, nmask, maxnode);
921 if (err)
922 return err;
David Rientjes028fec42008-04-28 02:12:25 -0700923 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
Christoph Lameter8bccd852005-10-29 18:16:59 -0700924}
925
926/* Set the process memory policy */
927asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
928 unsigned long maxnode)
929{
930 int err;
931 nodemask_t nodes;
David Rientjes028fec42008-04-28 02:12:25 -0700932 unsigned short flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700933
David Rientjes028fec42008-04-28 02:12:25 -0700934 flags = mode & MPOL_MODE_FLAGS;
935 mode &= ~MPOL_MODE_FLAGS;
936 if ((unsigned int)mode >= MPOL_MAX)
Christoph Lameter8bccd852005-10-29 18:16:59 -0700937 return -EINVAL;
938 err = get_nodes(&nodes, nmask, maxnode);
939 if (err)
940 return err;
David Rientjes028fec42008-04-28 02:12:25 -0700941 return do_set_mempolicy(mode, flags, &nodes);
Christoph Lameter8bccd852005-10-29 18:16:59 -0700942}
943
Christoph Lameter39743882006-01-08 01:00:51 -0800944asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
945 const unsigned long __user *old_nodes,
946 const unsigned long __user *new_nodes)
947{
948 struct mm_struct *mm;
949 struct task_struct *task;
950 nodemask_t old;
951 nodemask_t new;
952 nodemask_t task_nodes;
953 int err;
954
955 err = get_nodes(&old, old_nodes, maxnode);
956 if (err)
957 return err;
958
959 err = get_nodes(&new, new_nodes, maxnode);
960 if (err)
961 return err;
962
963 /* Find the mm_struct */
964 read_lock(&tasklist_lock);
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -0700965 task = pid ? find_task_by_vpid(pid) : current;
Christoph Lameter39743882006-01-08 01:00:51 -0800966 if (!task) {
967 read_unlock(&tasklist_lock);
968 return -ESRCH;
969 }
970 mm = get_task_mm(task);
971 read_unlock(&tasklist_lock);
972
973 if (!mm)
974 return -EINVAL;
975
976 /*
977 * Check if this process has the right to modify the specified
978 * process. The right exists if the process has administrative
Alexey Dobriyan7f927fc2006-03-28 01:56:53 -0800979 * capabilities, superuser privileges or the same
Christoph Lameter39743882006-01-08 01:00:51 -0800980 * userid as the target process.
981 */
982 if ((current->euid != task->suid) && (current->euid != task->uid) &&
983 (current->uid != task->suid) && (current->uid != task->uid) &&
Christoph Lameter74c00242006-03-14 19:50:21 -0800984 !capable(CAP_SYS_NICE)) {
Christoph Lameter39743882006-01-08 01:00:51 -0800985 err = -EPERM;
986 goto out;
987 }
988
989 task_nodes = cpuset_mems_allowed(task);
990 /* Is the user allowed to access the target nodes? */
Christoph Lameter74c00242006-03-14 19:50:21 -0800991 if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
Christoph Lameter39743882006-01-08 01:00:51 -0800992 err = -EPERM;
993 goto out;
994 }
995
Lee Schermerhorn37b07e42007-10-16 01:25:39 -0700996 if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
Christoph Lameter3b42d282007-08-31 00:12:08 -0700997 err = -EINVAL;
998 goto out;
999 }
1000
David Quigley86c3a762006-06-23 02:04:02 -07001001 err = security_task_movememory(task);
1002 if (err)
1003 goto out;
1004
Christoph Lameter511030b2006-02-28 16:58:57 -08001005 err = do_migrate_pages(mm, &old, &new,
Christoph Lameter74c00242006-03-14 19:50:21 -08001006 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
Christoph Lameter39743882006-01-08 01:00:51 -08001007out:
1008 mmput(mm);
1009 return err;
1010}
1011
1012
Christoph Lameter8bccd852005-10-29 18:16:59 -07001013/* Retrieve NUMA policy */
1014asmlinkage long sys_get_mempolicy(int __user *policy,
1015 unsigned long __user *nmask,
1016 unsigned long maxnode,
1017 unsigned long addr, unsigned long flags)
1018{
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001019 int err;
1020 int uninitialized_var(pval);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001021 nodemask_t nodes;
1022
1023 if (nmask != NULL && maxnode < MAX_NUMNODES)
1024 return -EINVAL;
1025
1026 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1027
1028 if (err)
1029 return err;
1030
1031 if (policy && put_user(pval, policy))
1032 return -EFAULT;
1033
1034 if (nmask)
1035 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1036
1037 return err;
1038}
1039
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040#ifdef CONFIG_COMPAT
1041
1042asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1043 compat_ulong_t __user *nmask,
1044 compat_ulong_t maxnode,
1045 compat_ulong_t addr, compat_ulong_t flags)
1046{
1047 long err;
1048 unsigned long __user *nm = NULL;
1049 unsigned long nr_bits, alloc_size;
1050 DECLARE_BITMAP(bm, MAX_NUMNODES);
1051
1052 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1053 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1054
1055 if (nmask)
1056 nm = compat_alloc_user_space(alloc_size);
1057
1058 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1059
1060 if (!err && nmask) {
1061 err = copy_from_user(bm, nm, alloc_size);
1062 /* ensure entire bitmap is zeroed */
1063 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1064 err |= compat_put_bitmap(nmask, bm, nr_bits);
1065 }
1066
1067 return err;
1068}
1069
1070asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1071 compat_ulong_t maxnode)
1072{
1073 long err = 0;
1074 unsigned long __user *nm = NULL;
1075 unsigned long nr_bits, alloc_size;
1076 DECLARE_BITMAP(bm, MAX_NUMNODES);
1077
1078 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1079 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1080
1081 if (nmask) {
1082 err = compat_get_bitmap(bm, nmask, nr_bits);
1083 nm = compat_alloc_user_space(alloc_size);
1084 err |= copy_to_user(nm, bm, alloc_size);
1085 }
1086
1087 if (err)
1088 return -EFAULT;
1089
1090 return sys_set_mempolicy(mode, nm, nr_bits+1);
1091}
1092
1093asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1094 compat_ulong_t mode, compat_ulong_t __user *nmask,
1095 compat_ulong_t maxnode, compat_ulong_t flags)
1096{
1097 long err = 0;
1098 unsigned long __user *nm = NULL;
1099 unsigned long nr_bits, alloc_size;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001100 nodemask_t bm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101
1102 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1103 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1104
1105 if (nmask) {
Andi Kleendfcd3c02005-10-29 18:15:48 -07001106 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107 nm = compat_alloc_user_space(alloc_size);
Andi Kleendfcd3c02005-10-29 18:15:48 -07001108 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 }
1110
1111 if (err)
1112 return -EFAULT;
1113
1114 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1115}
1116
1117#endif
1118
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001119/*
1120 * get_vma_policy(@task, @vma, @addr)
1121 * @task - task for fallback if vma policy == default
1122 * @vma - virtual memory area whose policy is sought
1123 * @addr - address in @vma for shared policy lookup
1124 *
1125 * Returns effective policy for a VMA at specified address.
1126 * Falls back to @task or system default policy, as necessary.
1127 * Returned policy has extra reference count if shared, vma,
1128 * or some other task's policy [show_numa_maps() can pass
1129 * @task != current]. It is the caller's responsibility to
1130 * free the reference in these cases.
1131 */
Christoph Lameter48fce342006-01-08 01:01:03 -08001132static struct mempolicy * get_vma_policy(struct task_struct *task,
1133 struct vm_area_struct *vma, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134{
Christoph Lameter6e21c8f2005-09-03 15:54:45 -07001135 struct mempolicy *pol = task->mempolicy;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001136 int shared_pol = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137
1138 if (vma) {
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001139 if (vma->vm_ops && vma->vm_ops->get_policy) {
Christoph Lameter8bccd852005-10-29 18:16:59 -07001140 pol = vma->vm_ops->get_policy(vma, addr);
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001141 shared_pol = 1; /* if pol non-NULL, add ref below */
1142 } else if (vma->vm_policy &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 vma->vm_policy->policy != MPOL_DEFAULT)
1144 pol = vma->vm_policy;
1145 }
1146 if (!pol)
1147 pol = &default_policy;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001148 else if (!shared_pol && pol != current->mempolicy)
1149 mpol_get(pol); /* vma or other task's policy */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 return pol;
1151}
1152
Mel Gorman19770b32008-04-28 02:12:18 -07001153/* Return a nodemask representing a mempolicy */
1154static nodemask_t *nodemask_policy(gfp_t gfp, struct mempolicy *policy)
1155{
1156 /* Lower zones don't get a nodemask applied for MPOL_BIND */
1157 if (unlikely(policy->policy == MPOL_BIND) &&
1158 gfp_zone(gfp) >= policy_zone &&
1159 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1160 return &policy->v.nodes;
1161
1162 return NULL;
1163}
1164
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165/* Return a zonelist representing a mempolicy */
Al Virodd0fc662005-10-07 07:46:04 +01001166static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167{
1168 int nd;
1169
1170 switch (policy->policy) {
1171 case MPOL_PREFERRED:
1172 nd = policy->v.preferred_node;
1173 if (nd < 0)
1174 nd = numa_node_id();
1175 break;
1176 case MPOL_BIND:
Mel Gorman19770b32008-04-28 02:12:18 -07001177 /*
1178 * Normally, MPOL_BIND allocations node-local are node-local
1179 * within the allowed nodemask. However, if __GFP_THISNODE is
1180 * set and the current node is part of the mask, we use the
1181 * the zonelist for the first node in the mask instead.
1182 */
1183 nd = numa_node_id();
1184 if (unlikely(gfp & __GFP_THISNODE) &&
1185 unlikely(!node_isset(nd, policy->v.nodes)))
1186 nd = first_node(policy->v.nodes);
1187 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 case MPOL_INTERLEAVE: /* should not happen */
1189 case MPOL_DEFAULT:
1190 nd = numa_node_id();
1191 break;
1192 default:
1193 nd = 0;
1194 BUG();
1195 }
Mel Gorman0e884602008-04-28 02:12:14 -07001196 return node_zonelist(nd, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197}
1198
1199/* Do dynamic interleaving for a process */
1200static unsigned interleave_nodes(struct mempolicy *policy)
1201{
1202 unsigned nid, next;
1203 struct task_struct *me = current;
1204
1205 nid = me->il_next;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001206 next = next_node(nid, policy->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 if (next >= MAX_NUMNODES)
Andi Kleendfcd3c02005-10-29 18:15:48 -07001208 next = first_node(policy->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 me->il_next = next;
1210 return nid;
1211}
1212
Christoph Lameterdc85da12006-01-18 17:42:36 -08001213/*
1214 * Depending on the memory policy provide a node from which to allocate the
1215 * next slab entry.
1216 */
1217unsigned slab_node(struct mempolicy *policy)
1218{
David Rientjesa3b51e02008-04-28 02:12:23 -07001219 unsigned short pol = policy ? policy->policy : MPOL_DEFAULT;
Christoph Lameter765c4502006-09-27 01:50:08 -07001220
1221 switch (pol) {
Christoph Lameterdc85da12006-01-18 17:42:36 -08001222 case MPOL_INTERLEAVE:
1223 return interleave_nodes(policy);
1224
Mel Gormandd1a2392008-04-28 02:12:17 -07001225 case MPOL_BIND: {
Christoph Lameterdc85da12006-01-18 17:42:36 -08001226 /*
1227 * Follow bind policy behavior and start allocation at the
1228 * first node.
1229 */
Mel Gorman19770b32008-04-28 02:12:18 -07001230 struct zonelist *zonelist;
1231 struct zone *zone;
1232 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1233 zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1234 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1235 &policy->v.nodes,
1236 &zone);
1237 return zone->node;
Mel Gormandd1a2392008-04-28 02:12:17 -07001238 }
Christoph Lameterdc85da12006-01-18 17:42:36 -08001239
1240 case MPOL_PREFERRED:
1241 if (policy->v.preferred_node >= 0)
1242 return policy->v.preferred_node;
1243 /* Fall through */
1244
1245 default:
1246 return numa_node_id();
1247 }
1248}
1249
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250/* Do static interleaving for a VMA with known offset. */
1251static unsigned offset_il_node(struct mempolicy *pol,
1252 struct vm_area_struct *vma, unsigned long off)
1253{
Andi Kleendfcd3c02005-10-29 18:15:48 -07001254 unsigned nnodes = nodes_weight(pol->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 unsigned target = (unsigned)off % nnodes;
1256 int c;
1257 int nid = -1;
1258
1259 c = 0;
1260 do {
Andi Kleendfcd3c02005-10-29 18:15:48 -07001261 nid = next_node(nid, pol->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262 c++;
1263 } while (c <= target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 return nid;
1265}
1266
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001267/* Determine a node number for interleave */
1268static inline unsigned interleave_nid(struct mempolicy *pol,
1269 struct vm_area_struct *vma, unsigned long addr, int shift)
1270{
1271 if (vma) {
1272 unsigned long off;
1273
Nishanth Aravamudan3b98b082006-08-31 21:27:53 -07001274 /*
1275 * for small pages, there is no difference between
1276 * shift and PAGE_SHIFT, so the bit-shift is safe.
1277 * for huge pages, since vm_pgoff is in units of small
1278 * pages, we need to shift off the always 0 bits to get
1279 * a useful offset.
1280 */
1281 BUG_ON(shift < PAGE_SHIFT);
1282 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001283 off += (addr - vma->vm_start) >> shift;
1284 return offset_il_node(pol, vma, off);
1285 } else
1286 return interleave_nodes(pol);
1287}
1288
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001289#ifdef CONFIG_HUGETLBFS
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001290/*
1291 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1292 * @vma = virtual memory area whose policy is sought
1293 * @addr = address in @vma for shared policy lookup and interleave policy
1294 * @gfp_flags = for requested zone
Mel Gorman19770b32008-04-28 02:12:18 -07001295 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1296 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001297 *
1298 * Returns a zonelist suitable for a huge page allocation.
Mel Gorman19770b32008-04-28 02:12:18 -07001299 * If the effective policy is 'BIND, returns pointer to local node's zonelist,
1300 * and a pointer to the mempolicy's @nodemask for filtering the zonelist.
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001301 * If it is also a policy for which get_vma_policy() returns an extra
Mel Gorman19770b32008-04-28 02:12:18 -07001302 * reference, we must hold that reference until after the allocation.
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001303 * In that case, return policy via @mpol so hugetlb allocation can drop
Mel Gorman19770b32008-04-28 02:12:18 -07001304 * the reference. For non-'BIND referenced policies, we can/do drop the
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001305 * reference here, so the caller doesn't need to know about the special case
1306 * for default and current task policy.
1307 */
Mel Gorman396faf02007-07-17 04:03:13 -07001308struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
Mel Gorman19770b32008-04-28 02:12:18 -07001309 gfp_t gfp_flags, struct mempolicy **mpol,
1310 nodemask_t **nodemask)
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001311{
1312 struct mempolicy *pol = get_vma_policy(current, vma, addr);
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001313 struct zonelist *zl;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001314
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001315 *mpol = NULL; /* probably no unref needed */
Mel Gorman19770b32008-04-28 02:12:18 -07001316 *nodemask = NULL; /* assume !MPOL_BIND */
1317 if (pol->policy == MPOL_BIND) {
1318 *nodemask = &pol->v.nodes;
1319 } else if (pol->policy == MPOL_INTERLEAVE) {
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001320 unsigned nid;
1321
1322 nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
Lee Schermerhorn69682d82008-03-10 11:43:45 -07001323 if (unlikely(pol != &default_policy &&
1324 pol != current->mempolicy))
1325 __mpol_free(pol); /* finished with pol */
Mel Gorman0e884602008-04-28 02:12:14 -07001326 return node_zonelist(nid, gfp_flags);
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001327 }
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001328
1329 zl = zonelist_policy(GFP_HIGHUSER, pol);
1330 if (unlikely(pol != &default_policy && pol != current->mempolicy)) {
1331 if (pol->policy != MPOL_BIND)
1332 __mpol_free(pol); /* finished with pol */
1333 else
1334 *mpol = pol; /* unref needed after allocation */
1335 }
1336 return zl;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001337}
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001338#endif
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001339
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340/* Allocate a page in interleaved policy.
1341 Own path because it needs to do special accounting. */
Andi Kleen662f3a02005-10-29 18:15:49 -07001342static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1343 unsigned nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344{
1345 struct zonelist *zl;
1346 struct page *page;
1347
Mel Gorman0e884602008-04-28 02:12:14 -07001348 zl = node_zonelist(nid, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 page = __alloc_pages(gfp, order, zl);
Mel Gormandd1a2392008-04-28 02:12:17 -07001350 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
Christoph Lameterca889e62006-06-30 01:55:44 -07001351 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 return page;
1353}
1354
1355/**
1356 * alloc_page_vma - Allocate a page for a VMA.
1357 *
1358 * @gfp:
1359 * %GFP_USER user allocation.
1360 * %GFP_KERNEL kernel allocations,
1361 * %GFP_HIGHMEM highmem/user allocations,
1362 * %GFP_FS allocation should not call back into a file system.
1363 * %GFP_ATOMIC don't sleep.
1364 *
1365 * @vma: Pointer to VMA or NULL if not available.
1366 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1367 *
1368 * This function allocates a page from the kernel page pool and applies
1369 * a NUMA policy associated with the VMA or the current process.
1370 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1371 * mm_struct of the VMA to prevent it from going away. Should be used for
1372 * all allocations for pages that will be mapped into
1373 * user space. Returns NULL when no page can be allocated.
1374 *
1375 * Should be called with the mm_sem of the vma hold.
1376 */
1377struct page *
Al Virodd0fc662005-10-07 07:46:04 +01001378alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379{
Christoph Lameter6e21c8f2005-09-03 15:54:45 -07001380 struct mempolicy *pol = get_vma_policy(current, vma, addr);
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001381 struct zonelist *zl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382
Paul Jacksoncf2a4732006-01-08 01:01:54 -08001383 cpuset_update_task_memory_state();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384
1385 if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
1386 unsigned nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001387
1388 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
Lee Schermerhorn69682d82008-03-10 11:43:45 -07001389 if (unlikely(pol != &default_policy &&
1390 pol != current->mempolicy))
1391 __mpol_free(pol); /* finished with pol */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 return alloc_page_interleave(gfp, 0, nid);
1393 }
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001394 zl = zonelist_policy(gfp, pol);
1395 if (pol != &default_policy && pol != current->mempolicy) {
1396 /*
1397 * slow path: ref counted policy -- shared or vma
1398 */
Mel Gorman19770b32008-04-28 02:12:18 -07001399 struct page *page = __alloc_pages_nodemask(gfp, 0,
1400 zl, nodemask_policy(gfp, pol));
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001401 __mpol_free(pol);
1402 return page;
1403 }
1404 /*
1405 * fast path: default or task policy
1406 */
Mel Gorman19770b32008-04-28 02:12:18 -07001407 return __alloc_pages_nodemask(gfp, 0, zl, nodemask_policy(gfp, pol));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408}
1409
1410/**
1411 * alloc_pages_current - Allocate pages.
1412 *
1413 * @gfp:
1414 * %GFP_USER user allocation,
1415 * %GFP_KERNEL kernel allocation,
1416 * %GFP_HIGHMEM highmem allocation,
1417 * %GFP_FS don't call back into a file system.
1418 * %GFP_ATOMIC don't sleep.
1419 * @order: Power of two of allocation size in pages. 0 is a single page.
1420 *
1421 * Allocate a page from the kernel page pool. When not in
1422 * interrupt context and apply the current process NUMA policy.
1423 * Returns NULL when no page can be allocated.
1424 *
Paul Jacksoncf2a4732006-01-08 01:01:54 -08001425 * Don't call cpuset_update_task_memory_state() unless
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 * 1) it's ok to take cpuset_sem (can WAIT), and
1427 * 2) allocating for current task (not interrupt).
1428 */
Al Virodd0fc662005-10-07 07:46:04 +01001429struct page *alloc_pages_current(gfp_t gfp, unsigned order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430{
1431 struct mempolicy *pol = current->mempolicy;
1432
1433 if ((gfp & __GFP_WAIT) && !in_interrupt())
Paul Jacksoncf2a4732006-01-08 01:01:54 -08001434 cpuset_update_task_memory_state();
Christoph Lameter9b819d22006-09-25 23:31:40 -07001435 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 pol = &default_policy;
1437 if (pol->policy == MPOL_INTERLEAVE)
1438 return alloc_page_interleave(gfp, order, interleave_nodes(pol));
Mel Gorman19770b32008-04-28 02:12:18 -07001439 return __alloc_pages_nodemask(gfp, order,
1440 zonelist_policy(gfp, pol), nodemask_policy(gfp, pol));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441}
1442EXPORT_SYMBOL(alloc_pages_current);
1443
Paul Jackson42253992006-01-08 01:01:59 -08001444/*
1445 * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it
1446 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1447 * with the mems_allowed returned by cpuset_mems_allowed(). This
1448 * keeps mempolicies cpuset relative after its cpuset moves. See
1449 * further kernel/cpuset.c update_nodemask().
1450 */
Paul Jackson42253992006-01-08 01:01:59 -08001451
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452/* Slow path of a mempolicy copy */
1453struct mempolicy *__mpol_copy(struct mempolicy *old)
1454{
1455 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1456
1457 if (!new)
1458 return ERR_PTR(-ENOMEM);
Paul Jackson42253992006-01-08 01:01:59 -08001459 if (current_cpuset_is_being_rebound()) {
1460 nodemask_t mems = cpuset_mems_allowed(current);
1461 mpol_rebind_policy(old, &mems);
1462 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 *new = *old;
1464 atomic_set(&new->refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 return new;
1466}
1467
1468/* Slow path of a mempolicy comparison */
1469int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1470{
1471 if (!a || !b)
1472 return 0;
1473 if (a->policy != b->policy)
1474 return 0;
1475 switch (a->policy) {
1476 case MPOL_DEFAULT:
1477 return 1;
Mel Gorman19770b32008-04-28 02:12:18 -07001478 case MPOL_BIND:
1479 /* Fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 case MPOL_INTERLEAVE:
Andi Kleendfcd3c02005-10-29 18:15:48 -07001481 return nodes_equal(a->v.nodes, b->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 case MPOL_PREFERRED:
1483 return a->v.preferred_node == b->v.preferred_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 default:
1485 BUG();
1486 return 0;
1487 }
1488}
1489
1490/* Slow path of a mpol destructor. */
1491void __mpol_free(struct mempolicy *p)
1492{
1493 if (!atomic_dec_and_test(&p->refcnt))
1494 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 p->policy = MPOL_DEFAULT;
1496 kmem_cache_free(policy_cache, p);
1497}
1498
1499/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 * Shared memory backing store policy support.
1501 *
1502 * Remember policies even when nobody has shared memory mapped.
1503 * The policies are kept in Red-Black tree linked from the inode.
1504 * They are protected by the sp->lock spinlock, which should be held
1505 * for any accesses to the tree.
1506 */
1507
1508/* lookup first element intersecting start-end */
1509/* Caller holds sp->lock */
1510static struct sp_node *
1511sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
1512{
1513 struct rb_node *n = sp->root.rb_node;
1514
1515 while (n) {
1516 struct sp_node *p = rb_entry(n, struct sp_node, nd);
1517
1518 if (start >= p->end)
1519 n = n->rb_right;
1520 else if (end <= p->start)
1521 n = n->rb_left;
1522 else
1523 break;
1524 }
1525 if (!n)
1526 return NULL;
1527 for (;;) {
1528 struct sp_node *w = NULL;
1529 struct rb_node *prev = rb_prev(n);
1530 if (!prev)
1531 break;
1532 w = rb_entry(prev, struct sp_node, nd);
1533 if (w->end <= start)
1534 break;
1535 n = prev;
1536 }
1537 return rb_entry(n, struct sp_node, nd);
1538}
1539
1540/* Insert a new shared policy into the list. */
1541/* Caller holds sp->lock */
1542static void sp_insert(struct shared_policy *sp, struct sp_node *new)
1543{
1544 struct rb_node **p = &sp->root.rb_node;
1545 struct rb_node *parent = NULL;
1546 struct sp_node *nd;
1547
1548 while (*p) {
1549 parent = *p;
1550 nd = rb_entry(parent, struct sp_node, nd);
1551 if (new->start < nd->start)
1552 p = &(*p)->rb_left;
1553 else if (new->end > nd->end)
1554 p = &(*p)->rb_right;
1555 else
1556 BUG();
1557 }
1558 rb_link_node(&new->nd, parent, p);
1559 rb_insert_color(&new->nd, &sp->root);
Paul Mundt140d5a42007-07-15 23:38:16 -07001560 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 new->policy ? new->policy->policy : 0);
1562}
1563
1564/* Find shared policy intersecting idx */
1565struct mempolicy *
1566mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
1567{
1568 struct mempolicy *pol = NULL;
1569 struct sp_node *sn;
1570
1571 if (!sp->root.rb_node)
1572 return NULL;
1573 spin_lock(&sp->lock);
1574 sn = sp_lookup(sp, idx, idx+1);
1575 if (sn) {
1576 mpol_get(sn->policy);
1577 pol = sn->policy;
1578 }
1579 spin_unlock(&sp->lock);
1580 return pol;
1581}
1582
1583static void sp_delete(struct shared_policy *sp, struct sp_node *n)
1584{
Paul Mundt140d5a42007-07-15 23:38:16 -07001585 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 rb_erase(&n->nd, &sp->root);
1587 mpol_free(n->policy);
1588 kmem_cache_free(sn_cache, n);
1589}
1590
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001591static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
1592 struct mempolicy *pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593{
1594 struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1595
1596 if (!n)
1597 return NULL;
1598 n->start = start;
1599 n->end = end;
1600 mpol_get(pol);
1601 n->policy = pol;
1602 return n;
1603}
1604
1605/* Replace a policy range. */
1606static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
1607 unsigned long end, struct sp_node *new)
1608{
1609 struct sp_node *n, *new2 = NULL;
1610
1611restart:
1612 spin_lock(&sp->lock);
1613 n = sp_lookup(sp, start, end);
1614 /* Take care of old policies in the same range. */
1615 while (n && n->start < end) {
1616 struct rb_node *next = rb_next(&n->nd);
1617 if (n->start >= start) {
1618 if (n->end <= end)
1619 sp_delete(sp, n);
1620 else
1621 n->start = end;
1622 } else {
1623 /* Old policy spanning whole new range. */
1624 if (n->end > end) {
1625 if (!new2) {
1626 spin_unlock(&sp->lock);
1627 new2 = sp_alloc(end, n->end, n->policy);
1628 if (!new2)
1629 return -ENOMEM;
1630 goto restart;
1631 }
1632 n->end = start;
1633 sp_insert(sp, new2);
1634 new2 = NULL;
1635 break;
1636 } else
1637 n->end = start;
1638 }
1639 if (!next)
1640 break;
1641 n = rb_entry(next, struct sp_node, nd);
1642 }
1643 if (new)
1644 sp_insert(sp, new);
1645 spin_unlock(&sp->lock);
1646 if (new2) {
1647 mpol_free(new2->policy);
1648 kmem_cache_free(sn_cache, new2);
1649 }
1650 return 0;
1651}
1652
David Rientjesa3b51e02008-04-28 02:12:23 -07001653void mpol_shared_policy_init(struct shared_policy *info, unsigned short policy,
David Rientjes028fec42008-04-28 02:12:25 -07001654 unsigned short flags, nodemask_t *policy_nodes)
Robin Holt7339ff82006-01-14 13:20:48 -08001655{
1656 info->root = RB_ROOT;
1657 spin_lock_init(&info->lock);
1658
1659 if (policy != MPOL_DEFAULT) {
1660 struct mempolicy *newpol;
1661
1662 /* Falls back to MPOL_DEFAULT on any error */
David Rientjes028fec42008-04-28 02:12:25 -07001663 newpol = mpol_new(policy, flags, policy_nodes);
Robin Holt7339ff82006-01-14 13:20:48 -08001664 if (!IS_ERR(newpol)) {
1665 /* Create pseudo-vma that contains just the policy */
1666 struct vm_area_struct pvma;
1667
1668 memset(&pvma, 0, sizeof(struct vm_area_struct));
1669 /* Policy covers entire file */
1670 pvma.vm_end = TASK_SIZE;
1671 mpol_set_shared_policy(info, &pvma, newpol);
1672 mpol_free(newpol);
1673 }
1674 }
1675}
1676
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677int mpol_set_shared_policy(struct shared_policy *info,
1678 struct vm_area_struct *vma, struct mempolicy *npol)
1679{
1680 int err;
1681 struct sp_node *new = NULL;
1682 unsigned long sz = vma_pages(vma);
1683
David Rientjes028fec42008-04-28 02:12:25 -07001684 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 vma->vm_pgoff,
David Rientjes028fec42008-04-28 02:12:25 -07001686 sz, npol ? npol->policy : -1,
1687 npol ? npol->flags : -1,
Paul Mundt140d5a42007-07-15 23:38:16 -07001688 npol ? nodes_addr(npol->v.nodes)[0] : -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689
1690 if (npol) {
1691 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
1692 if (!new)
1693 return -ENOMEM;
1694 }
1695 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
1696 if (err && new)
1697 kmem_cache_free(sn_cache, new);
1698 return err;
1699}
1700
1701/* Free a backing policy store on inode delete. */
1702void mpol_free_shared_policy(struct shared_policy *p)
1703{
1704 struct sp_node *n;
1705 struct rb_node *next;
1706
1707 if (!p->root.rb_node)
1708 return;
1709 spin_lock(&p->lock);
1710 next = rb_first(&p->root);
1711 while (next) {
1712 n = rb_entry(next, struct sp_node, nd);
1713 next = rb_next(&n->nd);
Andi Kleen90c50292005-07-27 11:43:50 -07001714 rb_erase(&n->nd, &p->root);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 mpol_free(n->policy);
1716 kmem_cache_free(sn_cache, n);
1717 }
1718 spin_unlock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719}
1720
1721/* assumes fs == KERNEL_DS */
1722void __init numa_policy_init(void)
1723{
Paul Mundtb71636e2007-07-15 23:38:15 -07001724 nodemask_t interleave_nodes;
1725 unsigned long largest = 0;
1726 int nid, prefer = 0;
1727
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 policy_cache = kmem_cache_create("numa_policy",
1729 sizeof(struct mempolicy),
Paul Mundt20c2df82007-07-20 10:11:58 +09001730 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731
1732 sn_cache = kmem_cache_create("shared_policy_node",
1733 sizeof(struct sp_node),
Paul Mundt20c2df82007-07-20 10:11:58 +09001734 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735
Paul Mundtb71636e2007-07-15 23:38:15 -07001736 /*
1737 * Set interleaving policy for system init. Interleaving is only
1738 * enabled across suitably sized nodes (default is >= 16MB), or
1739 * fall back to the largest node if they're all smaller.
1740 */
1741 nodes_clear(interleave_nodes);
Christoph Lameter56bbd652007-10-16 01:25:35 -07001742 for_each_node_state(nid, N_HIGH_MEMORY) {
Paul Mundtb71636e2007-07-15 23:38:15 -07001743 unsigned long total_pages = node_present_pages(nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744
Paul Mundtb71636e2007-07-15 23:38:15 -07001745 /* Preserve the largest node */
1746 if (largest < total_pages) {
1747 largest = total_pages;
1748 prefer = nid;
1749 }
1750
1751 /* Interleave this node? */
1752 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
1753 node_set(nid, interleave_nodes);
1754 }
1755
1756 /* All too small, use the largest */
1757 if (unlikely(nodes_empty(interleave_nodes)))
1758 node_set(prefer, interleave_nodes);
1759
David Rientjes028fec42008-04-28 02:12:25 -07001760 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 printk("numa_policy_init: interleaving failed\n");
1762}
1763
Christoph Lameter8bccd852005-10-29 18:16:59 -07001764/* Reset policy of current process to default */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765void numa_default_policy(void)
1766{
David Rientjes028fec42008-04-28 02:12:25 -07001767 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768}
Paul Jackson68860ec2005-10-30 15:02:36 -08001769
1770/* Migrate a policy to a different set of nodes */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001771static void mpol_rebind_policy(struct mempolicy *pol,
1772 const nodemask_t *newmask)
Paul Jackson68860ec2005-10-30 15:02:36 -08001773{
Paul Jackson74cb2152006-01-08 01:01:56 -08001774 nodemask_t *mpolmask;
Paul Jackson68860ec2005-10-30 15:02:36 -08001775 nodemask_t tmp;
1776
1777 if (!pol)
1778 return;
Paul Jackson74cb2152006-01-08 01:01:56 -08001779 mpolmask = &pol->cpuset_mems_allowed;
1780 if (nodes_equal(*mpolmask, *newmask))
1781 return;
Paul Jackson68860ec2005-10-30 15:02:36 -08001782
1783 switch (pol->policy) {
1784 case MPOL_DEFAULT:
1785 break;
Mel Gorman19770b32008-04-28 02:12:18 -07001786 case MPOL_BIND:
1787 /* Fall through */
Paul Jackson68860ec2005-10-30 15:02:36 -08001788 case MPOL_INTERLEAVE:
Paul Jackson74cb2152006-01-08 01:01:56 -08001789 nodes_remap(tmp, pol->v.nodes, *mpolmask, *newmask);
Paul Jackson68860ec2005-10-30 15:02:36 -08001790 pol->v.nodes = tmp;
Paul Jackson74cb2152006-01-08 01:01:56 -08001791 *mpolmask = *newmask;
1792 current->il_next = node_remap(current->il_next,
1793 *mpolmask, *newmask);
Paul Jackson68860ec2005-10-30 15:02:36 -08001794 break;
1795 case MPOL_PREFERRED:
1796 pol->v.preferred_node = node_remap(pol->v.preferred_node,
Paul Jackson74cb2152006-01-08 01:01:56 -08001797 *mpolmask, *newmask);
1798 *mpolmask = *newmask;
Paul Jackson68860ec2005-10-30 15:02:36 -08001799 break;
Paul Jackson68860ec2005-10-30 15:02:36 -08001800 default:
1801 BUG();
1802 break;
1803 }
1804}
1805
1806/*
Paul Jackson74cb2152006-01-08 01:01:56 -08001807 * Wrapper for mpol_rebind_policy() that just requires task
1808 * pointer, and updates task mempolicy.
Paul Jackson68860ec2005-10-30 15:02:36 -08001809 */
Paul Jackson74cb2152006-01-08 01:01:56 -08001810
1811void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
Paul Jackson68860ec2005-10-30 15:02:36 -08001812{
Paul Jackson74cb2152006-01-08 01:01:56 -08001813 mpol_rebind_policy(tsk->mempolicy, new);
Paul Jackson68860ec2005-10-30 15:02:36 -08001814}
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001815
1816/*
Paul Jackson42253992006-01-08 01:01:59 -08001817 * Rebind each vma in mm to new nodemask.
1818 *
1819 * Call holding a reference to mm. Takes mm->mmap_sem during call.
1820 */
1821
1822void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
1823{
1824 struct vm_area_struct *vma;
1825
1826 down_write(&mm->mmap_sem);
1827 for (vma = mm->mmap; vma; vma = vma->vm_next)
1828 mpol_rebind_policy(vma->vm_policy, new);
1829 up_write(&mm->mmap_sem);
1830}
1831
1832/*
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001833 * Display pages allocated per node and memory policy via /proc.
1834 */
1835
Helge Deller15ad7cd2006-12-06 20:40:36 -08001836static const char * const policy_types[] =
1837 { "default", "prefer", "bind", "interleave" };
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001838
1839/*
1840 * Convert a mempolicy into a string.
1841 * Returns the number of characters in buffer (if positive)
1842 * or an error (negative)
1843 */
1844static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1845{
1846 char *p = buffer;
1847 int l;
1848 nodemask_t nodes;
David Rientjesa3b51e02008-04-28 02:12:23 -07001849 unsigned short mode = pol ? pol->policy : MPOL_DEFAULT;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001850
1851 switch (mode) {
1852 case MPOL_DEFAULT:
1853 nodes_clear(nodes);
1854 break;
1855
1856 case MPOL_PREFERRED:
1857 nodes_clear(nodes);
1858 node_set(pol->v.preferred_node, nodes);
1859 break;
1860
1861 case MPOL_BIND:
Mel Gorman19770b32008-04-28 02:12:18 -07001862 /* Fall through */
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001863 case MPOL_INTERLEAVE:
1864 nodes = pol->v.nodes;
1865 break;
1866
1867 default:
1868 BUG();
1869 return -EFAULT;
1870 }
1871
1872 l = strlen(policy_types[mode]);
1873 if (buffer + maxlen < p + l + 1)
1874 return -ENOSPC;
1875
1876 strcpy(p, policy_types[mode]);
1877 p += l;
1878
1879 if (!nodes_empty(nodes)) {
1880 if (buffer + maxlen < p + 2)
1881 return -ENOSPC;
1882 *p++ = '=';
1883 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
1884 }
1885 return p - buffer;
1886}
1887
1888struct numa_maps {
1889 unsigned long pages;
1890 unsigned long anon;
Christoph Lameter397874d2006-03-06 15:42:53 -08001891 unsigned long active;
1892 unsigned long writeback;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001893 unsigned long mapcount_max;
Christoph Lameter397874d2006-03-06 15:42:53 -08001894 unsigned long dirty;
1895 unsigned long swapcache;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001896 unsigned long node[MAX_NUMNODES];
1897};
1898
Christoph Lameter397874d2006-03-06 15:42:53 -08001899static void gather_stats(struct page *page, void *private, int pte_dirty)
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001900{
1901 struct numa_maps *md = private;
1902 int count = page_mapcount(page);
1903
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001904 md->pages++;
Christoph Lameter397874d2006-03-06 15:42:53 -08001905 if (pte_dirty || PageDirty(page))
1906 md->dirty++;
1907
1908 if (PageSwapCache(page))
1909 md->swapcache++;
1910
1911 if (PageActive(page))
1912 md->active++;
1913
1914 if (PageWriteback(page))
1915 md->writeback++;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001916
1917 if (PageAnon(page))
1918 md->anon++;
1919
Christoph Lameter397874d2006-03-06 15:42:53 -08001920 if (count > md->mapcount_max)
1921 md->mapcount_max = count;
1922
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001923 md->node[page_to_nid(page)]++;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001924}
1925
Andrew Morton7f709ed2006-03-07 21:55:22 -08001926#ifdef CONFIG_HUGETLB_PAGE
Christoph Lameter397874d2006-03-06 15:42:53 -08001927static void check_huge_range(struct vm_area_struct *vma,
1928 unsigned long start, unsigned long end,
1929 struct numa_maps *md)
1930{
1931 unsigned long addr;
1932 struct page *page;
1933
1934 for (addr = start; addr < end; addr += HPAGE_SIZE) {
1935 pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK);
1936 pte_t pte;
1937
1938 if (!ptep)
1939 continue;
1940
1941 pte = *ptep;
1942 if (pte_none(pte))
1943 continue;
1944
1945 page = pte_page(pte);
1946 if (!page)
1947 continue;
1948
1949 gather_stats(page, md, pte_dirty(*ptep));
1950 }
1951}
Andrew Morton7f709ed2006-03-07 21:55:22 -08001952#else
1953static inline void check_huge_range(struct vm_area_struct *vma,
1954 unsigned long start, unsigned long end,
1955 struct numa_maps *md)
1956{
1957}
1958#endif
Christoph Lameter397874d2006-03-06 15:42:53 -08001959
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001960int show_numa_map(struct seq_file *m, void *v)
1961{
Eric W. Biederman99f89552006-06-26 00:25:55 -07001962 struct proc_maps_private *priv = m->private;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001963 struct vm_area_struct *vma = v;
1964 struct numa_maps *md;
Christoph Lameter397874d2006-03-06 15:42:53 -08001965 struct file *file = vma->vm_file;
1966 struct mm_struct *mm = vma->vm_mm;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001967 struct mempolicy *pol;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001968 int n;
1969 char buffer[50];
1970
Christoph Lameter397874d2006-03-06 15:42:53 -08001971 if (!mm)
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001972 return 0;
1973
1974 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
1975 if (!md)
1976 return 0;
1977
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001978 pol = get_vma_policy(priv->task, vma, vma->vm_start);
1979 mpol_to_str(buffer, sizeof(buffer), pol);
1980 /*
1981 * unref shared or other task's mempolicy
1982 */
1983 if (pol != &default_policy && pol != current->mempolicy)
1984 __mpol_free(pol);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001985
Christoph Lameter397874d2006-03-06 15:42:53 -08001986 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001987
Christoph Lameter397874d2006-03-06 15:42:53 -08001988 if (file) {
1989 seq_printf(m, " file=");
Jan Blunckc32c2f62008-02-14 19:38:43 -08001990 seq_path(m, &file->f_path, "\n\t= ");
Christoph Lameter397874d2006-03-06 15:42:53 -08001991 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1992 seq_printf(m, " heap");
1993 } else if (vma->vm_start <= mm->start_stack &&
1994 vma->vm_end >= mm->start_stack) {
1995 seq_printf(m, " stack");
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001996 }
Christoph Lameter397874d2006-03-06 15:42:53 -08001997
1998 if (is_vm_hugetlb_page(vma)) {
1999 check_huge_range(vma, vma->vm_start, vma->vm_end, md);
2000 seq_printf(m, " huge");
2001 } else {
2002 check_pgd_range(vma, vma->vm_start, vma->vm_end,
Christoph Lameter56bbd652007-10-16 01:25:35 -07002003 &node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
Christoph Lameter397874d2006-03-06 15:42:53 -08002004 }
2005
2006 if (!md->pages)
2007 goto out;
2008
2009 if (md->anon)
2010 seq_printf(m," anon=%lu",md->anon);
2011
2012 if (md->dirty)
2013 seq_printf(m," dirty=%lu",md->dirty);
2014
2015 if (md->pages != md->anon && md->pages != md->dirty)
2016 seq_printf(m, " mapped=%lu", md->pages);
2017
2018 if (md->mapcount_max > 1)
2019 seq_printf(m, " mapmax=%lu", md->mapcount_max);
2020
2021 if (md->swapcache)
2022 seq_printf(m," swapcache=%lu", md->swapcache);
2023
2024 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2025 seq_printf(m," active=%lu", md->active);
2026
2027 if (md->writeback)
2028 seq_printf(m," writeback=%lu", md->writeback);
2029
Christoph Lameter56bbd652007-10-16 01:25:35 -07002030 for_each_node_state(n, N_HIGH_MEMORY)
Christoph Lameter397874d2006-03-06 15:42:53 -08002031 if (md->node[n])
2032 seq_printf(m, " N%d=%lu", n, md->node[n]);
2033out:
2034 seq_putc(m, '\n');
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002035 kfree(md);
2036
2037 if (m->count < m->size)
Eric W. Biederman99f89552006-06-26 00:25:55 -07002038 m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002039 return 0;
2040}