blob: d44c524e5ae49b6ace535abee9edaf608379bea2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
Christoph Lameter8bccd852005-10-29 18:16:59 -07005 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
Christoph Lameter8bccd852005-10-29 18:16:59 -070021 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
Christoph Lameter8bccd852005-10-29 18:16:59 -070024 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 * preferred Try a specific node first before normal fallback.
29 * As a special case node -1 here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
Christoph Lameter8bccd852005-10-29 18:16:59 -070033 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
66 could replace all the switch()es with a mempolicy_ops structure.
67*/
68
69#include <linux/mempolicy.h>
70#include <linux/mm.h>
71#include <linux/highmem.h>
72#include <linux/hugetlb.h>
73#include <linux/kernel.h>
74#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070075#include <linux/nodemask.h>
76#include <linux/cpuset.h>
77#include <linux/gfp.h>
78#include <linux/slab.h>
79#include <linux/string.h>
80#include <linux/module.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070081#include <linux/nsproxy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070082#include <linux/interrupt.h>
83#include <linux/init.h>
84#include <linux/compat.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080085#include <linux/swap.h>
Christoph Lameter1a75a6c2006-01-08 01:01:02 -080086#include <linux/seq_file.h>
87#include <linux/proc_fs.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080088#include <linux/migrate.h>
Christoph Lameter95a402c2006-06-23 02:03:53 -070089#include <linux/rmap.h>
David Quigley86c3a762006-06-23 02:04:02 -070090#include <linux/security.h>
Adrian Bunkdbcb0f12007-10-16 01:26:26 -070091#include <linux/syscalls.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080092
Linus Torvalds1da177e2005-04-16 15:20:36 -070093#include <asm/tlbflush.h>
94#include <asm/uaccess.h>
95
Christoph Lameter38e35862006-01-08 01:01:01 -080096/* Internal flags */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080097#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
Christoph Lameter38e35862006-01-08 01:01:01 -080098#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
Christoph Lameter1a75a6c2006-01-08 01:01:02 -080099#define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800100
Pekka Enbergfcc234f2006-03-22 00:08:13 -0800101static struct kmem_cache *policy_cache;
102static struct kmem_cache *sn_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104/* Highest zone. An specific allocation for a zone below that is not
105 policied. */
Christoph Lameter62672762007-02-10 01:43:07 -0800106enum zone_type policy_zone = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Andi Kleend42c6992005-07-06 19:56:03 +0200108struct mempolicy default_policy = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 .refcnt = ATOMIC_INIT(1), /* never free it */
110 .policy = MPOL_DEFAULT,
111};
112
Mel Gorman19770b32008-04-28 02:12:18 -0700113/* Check that the nodemask contains at least one populated zone */
114static int is_valid_nodemask(nodemask_t *nodemask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115{
Mel Gorman19770b32008-04-28 02:12:18 -0700116 int nd, k;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117
Mel Gorman19770b32008-04-28 02:12:18 -0700118 /* Check that there is something useful in this mask */
119 k = policy_zone;
120
121 for_each_node_mask(nd, *nodemask) {
122 struct zone *z;
123
124 for (k = 0; k <= policy_zone; k++) {
125 z = &NODE_DATA(nd)->node_zones[k];
126 if (z->present_pages > 0)
127 return 1;
Andi Kleendd942ae2006-02-17 01:39:16 +0100128 }
129 }
Mel Gorman19770b32008-04-28 02:12:18 -0700130
131 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132}
133
David Rientjesf5b087b2008-04-28 02:12:27 -0700134static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
135{
David Rientjes4c50bc02008-04-28 02:12:30 -0700136 return pol->flags & (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES);
137}
138
139static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
140 const nodemask_t *rel)
141{
142 nodemask_t tmp;
143 nodes_fold(tmp, *orig, nodes_weight(*rel));
144 nodes_onto(*ret, tmp, *rel);
David Rientjesf5b087b2008-04-28 02:12:27 -0700145}
146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147/* Create a new policy */
David Rientjes028fec42008-04-28 02:12:25 -0700148static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
149 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150{
151 struct mempolicy *policy;
David Rientjesf5b087b2008-04-28 02:12:27 -0700152 nodemask_t cpuset_context_nmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
David Rientjes028fec42008-04-28 02:12:25 -0700154 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
155 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
Paul Mundt140d5a42007-07-15 23:38:16 -0700156
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 if (mode == MPOL_DEFAULT)
David Rientjesf5b087b2008-04-28 02:12:27 -0700158 return (nodes && nodes_weight(*nodes)) ? ERR_PTR(-EINVAL) :
159 NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
161 if (!policy)
162 return ERR_PTR(-ENOMEM);
163 atomic_set(&policy->refcnt, 1);
David Rientjesf5b087b2008-04-28 02:12:27 -0700164 cpuset_update_task_memory_state();
David Rientjes4c50bc02008-04-28 02:12:30 -0700165 if (flags & MPOL_F_RELATIVE_NODES)
166 mpol_relative_nodemask(&cpuset_context_nmask, nodes,
167 &cpuset_current_mems_allowed);
168 else
169 nodes_and(cpuset_context_nmask, *nodes,
170 cpuset_current_mems_allowed);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 switch (mode) {
172 case MPOL_INTERLEAVE:
David Rientjesf5b087b2008-04-28 02:12:27 -0700173 if (nodes_empty(*nodes) || nodes_empty(cpuset_context_nmask))
174 goto free;
175 policy->v.nodes = cpuset_context_nmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 break;
177 case MPOL_PREFERRED:
David Rientjesf5b087b2008-04-28 02:12:27 -0700178 policy->v.preferred_node = first_node(cpuset_context_nmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 if (policy->v.preferred_node >= MAX_NUMNODES)
David Rientjesf5b087b2008-04-28 02:12:27 -0700180 goto free;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 break;
182 case MPOL_BIND:
David Rientjesf5b087b2008-04-28 02:12:27 -0700183 if (!is_valid_nodemask(&cpuset_context_nmask))
184 goto free;
185 policy->v.nodes = cpuset_context_nmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 break;
David Rientjesa3b51e02008-04-28 02:12:23 -0700187 default:
188 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 }
190 policy->policy = mode;
David Rientjes028fec42008-04-28 02:12:25 -0700191 policy->flags = flags;
David Rientjesf5b087b2008-04-28 02:12:27 -0700192 if (mpol_store_user_nodemask(policy))
193 policy->w.user_nodemask = *nodes;
194 else
195 policy->w.cpuset_mems_allowed = cpuset_mems_allowed(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 return policy;
David Rientjesf5b087b2008-04-28 02:12:27 -0700197
198free:
199 kmem_cache_free(policy_cache, policy);
200 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201}
202
David Rientjes1d0d2682008-04-28 02:12:32 -0700203/* Migrate a policy to a different set of nodes */
204static void mpol_rebind_policy(struct mempolicy *pol,
205 const nodemask_t *newmask)
206{
207 nodemask_t tmp;
208 int static_nodes;
209 int relative_nodes;
210
211 if (!pol)
212 return;
213 static_nodes = pol->flags & MPOL_F_STATIC_NODES;
214 relative_nodes = pol->flags & MPOL_F_RELATIVE_NODES;
215 if (!mpol_store_user_nodemask(pol) &&
216 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
217 return;
218
219 switch (pol->policy) {
220 case MPOL_DEFAULT:
221 break;
222 case MPOL_BIND:
223 /* Fall through */
224 case MPOL_INTERLEAVE:
225 if (static_nodes)
226 nodes_and(tmp, pol->w.user_nodemask, *newmask);
227 else if (relative_nodes)
228 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask,
229 newmask);
230 else {
231 nodes_remap(tmp, pol->v.nodes,
232 pol->w.cpuset_mems_allowed, *newmask);
233 pol->w.cpuset_mems_allowed = *newmask;
234 }
235 pol->v.nodes = tmp;
236 if (!node_isset(current->il_next, tmp)) {
237 current->il_next = next_node(current->il_next, tmp);
238 if (current->il_next >= MAX_NUMNODES)
239 current->il_next = first_node(tmp);
240 if (current->il_next >= MAX_NUMNODES)
241 current->il_next = numa_node_id();
242 }
243 break;
244 case MPOL_PREFERRED:
245 if (static_nodes) {
246 int node = first_node(pol->w.user_nodemask);
247
248 if (node_isset(node, *newmask))
249 pol->v.preferred_node = node;
250 else
251 pol->v.preferred_node = -1;
252 } else if (relative_nodes) {
253 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask,
254 newmask);
255 pol->v.preferred_node = first_node(tmp);
256 } else {
257 pol->v.preferred_node = node_remap(pol->v.preferred_node,
258 pol->w.cpuset_mems_allowed, *newmask);
259 pol->w.cpuset_mems_allowed = *newmask;
260 }
261 break;
262 default:
263 BUG();
264 break;
265 }
266}
267
268/*
269 * Wrapper for mpol_rebind_policy() that just requires task
270 * pointer, and updates task mempolicy.
271 */
272
273void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
274{
275 mpol_rebind_policy(tsk->mempolicy, new);
276}
277
278/*
279 * Rebind each vma in mm to new nodemask.
280 *
281 * Call holding a reference to mm. Takes mm->mmap_sem during call.
282 */
283
284void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
285{
286 struct vm_area_struct *vma;
287
288 down_write(&mm->mmap_sem);
289 for (vma = mm->mmap; vma; vma = vma->vm_next)
290 mpol_rebind_policy(vma->vm_policy, new);
291 up_write(&mm->mmap_sem);
292}
293
Christoph Lameter397874d2006-03-06 15:42:53 -0800294static void gather_stats(struct page *, void *, int pte_dirty);
Christoph Lameterfc301282006-01-18 17:42:29 -0800295static void migrate_page_add(struct page *page, struct list_head *pagelist,
296 unsigned long flags);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -0800297
Christoph Lameter38e35862006-01-08 01:01:01 -0800298/* Scan through pages checking if pages follow certain conditions. */
Nick Pigginb5810032005-10-29 18:16:12 -0700299static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800300 unsigned long addr, unsigned long end,
301 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800302 void *private)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303{
Hugh Dickins91612e02005-06-21 17:15:07 -0700304 pte_t *orig_pte;
305 pte_t *pte;
Hugh Dickins705e87c2005-10-29 18:16:27 -0700306 spinlock_t *ptl;
Hugh Dickins941150a2005-06-21 17:15:06 -0700307
Hugh Dickins705e87c2005-10-29 18:16:27 -0700308 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
Hugh Dickins91612e02005-06-21 17:15:07 -0700309 do {
Linus Torvalds6aab3412005-11-28 14:34:23 -0800310 struct page *page;
Andy Whitcroft25ba77c2006-12-06 20:33:03 -0800311 int nid;
Hugh Dickins91612e02005-06-21 17:15:07 -0700312
313 if (!pte_present(*pte))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800315 page = vm_normal_page(vma, addr, *pte);
316 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 continue;
Nick Piggin053837f2006-01-18 17:42:27 -0800318 /*
319 * The check for PageReserved here is important to avoid
320 * handling zero pages and other pages that may have been
321 * marked special by the system.
322 *
323 * If the PageReserved would not be checked here then f.e.
324 * the location of the zero page could have an influence
325 * on MPOL_MF_STRICT, zero pages would be counted for
326 * the per node stats, and there would be useless attempts
327 * to put zero pages on the migration list.
328 */
Christoph Lameterf4598c82006-01-12 01:05:20 -0800329 if (PageReserved(page))
330 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800331 nid = page_to_nid(page);
Christoph Lameter38e35862006-01-08 01:01:01 -0800332 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
333 continue;
334
Christoph Lameter1a75a6c2006-01-08 01:01:02 -0800335 if (flags & MPOL_MF_STATS)
Christoph Lameter397874d2006-03-06 15:42:53 -0800336 gather_stats(page, private, pte_dirty(*pte));
Nick Piggin053837f2006-01-18 17:42:27 -0800337 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
Christoph Lameterfc301282006-01-18 17:42:29 -0800338 migrate_page_add(page, private, flags);
Christoph Lameter38e35862006-01-08 01:01:01 -0800339 else
340 break;
Hugh Dickins91612e02005-06-21 17:15:07 -0700341 } while (pte++, addr += PAGE_SIZE, addr != end);
Hugh Dickins705e87c2005-10-29 18:16:27 -0700342 pte_unmap_unlock(orig_pte, ptl);
Hugh Dickins91612e02005-06-21 17:15:07 -0700343 return addr != end;
344}
345
Nick Pigginb5810032005-10-29 18:16:12 -0700346static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800347 unsigned long addr, unsigned long end,
348 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800349 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700350{
351 pmd_t *pmd;
352 unsigned long next;
353
354 pmd = pmd_offset(pud, addr);
355 do {
356 next = pmd_addr_end(addr, end);
357 if (pmd_none_or_clear_bad(pmd))
358 continue;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800359 if (check_pte_range(vma, pmd, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800360 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700361 return -EIO;
362 } while (pmd++, addr = next, addr != end);
363 return 0;
364}
365
Nick Pigginb5810032005-10-29 18:16:12 -0700366static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800367 unsigned long addr, unsigned long end,
368 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800369 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700370{
371 pud_t *pud;
372 unsigned long next;
373
374 pud = pud_offset(pgd, addr);
375 do {
376 next = pud_addr_end(addr, end);
377 if (pud_none_or_clear_bad(pud))
378 continue;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800379 if (check_pmd_range(vma, pud, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800380 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700381 return -EIO;
382 } while (pud++, addr = next, addr != end);
383 return 0;
384}
385
Nick Pigginb5810032005-10-29 18:16:12 -0700386static inline int check_pgd_range(struct vm_area_struct *vma,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800387 unsigned long addr, unsigned long end,
388 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800389 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700390{
391 pgd_t *pgd;
392 unsigned long next;
393
Nick Pigginb5810032005-10-29 18:16:12 -0700394 pgd = pgd_offset(vma->vm_mm, addr);
Hugh Dickins91612e02005-06-21 17:15:07 -0700395 do {
396 next = pgd_addr_end(addr, end);
397 if (pgd_none_or_clear_bad(pgd))
398 continue;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800399 if (check_pud_range(vma, pgd, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800400 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700401 return -EIO;
402 } while (pgd++, addr = next, addr != end);
403 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404}
405
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800406/*
407 * Check if all pages in a range are on a set of nodes.
408 * If pagelist != NULL then isolate pages from the LRU and
409 * put them on the pagelist.
410 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411static struct vm_area_struct *
412check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
Christoph Lameter38e35862006-01-08 01:01:01 -0800413 const nodemask_t *nodes, unsigned long flags, void *private)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414{
415 int err;
416 struct vm_area_struct *first, *vma, *prev;
417
Christoph Lameter90036ee2006-03-16 23:03:59 -0800418 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
Christoph Lameter90036ee2006-03-16 23:03:59 -0800419
Christoph Lameterb20a3502006-03-22 00:09:12 -0800420 err = migrate_prep();
421 if (err)
422 return ERR_PTR(err);
Christoph Lameter90036ee2006-03-16 23:03:59 -0800423 }
Nick Piggin053837f2006-01-18 17:42:27 -0800424
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 first = find_vma(mm, start);
426 if (!first)
427 return ERR_PTR(-EFAULT);
428 prev = NULL;
429 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800430 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
431 if (!vma->vm_next && vma->vm_end < end)
432 return ERR_PTR(-EFAULT);
433 if (prev && prev->vm_end < vma->vm_start)
434 return ERR_PTR(-EFAULT);
435 }
436 if (!is_vm_hugetlb_page(vma) &&
437 ((flags & MPOL_MF_STRICT) ||
438 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
439 vma_migratable(vma)))) {
Andi Kleen5b952b32005-09-13 01:25:08 -0700440 unsigned long endvma = vma->vm_end;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800441
Andi Kleen5b952b32005-09-13 01:25:08 -0700442 if (endvma > end)
443 endvma = end;
444 if (vma->vm_start > start)
445 start = vma->vm_start;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800446 err = check_pgd_range(vma, start, endvma, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800447 flags, private);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 if (err) {
449 first = ERR_PTR(err);
450 break;
451 }
452 }
453 prev = vma;
454 }
455 return first;
456}
457
458/* Apply policy to a single VMA */
459static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
460{
461 int err = 0;
462 struct mempolicy *old = vma->vm_policy;
463
Paul Mundt140d5a42007-07-15 23:38:16 -0700464 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 vma->vm_start, vma->vm_end, vma->vm_pgoff,
466 vma->vm_ops, vma->vm_file,
467 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
468
469 if (vma->vm_ops && vma->vm_ops->set_policy)
470 err = vma->vm_ops->set_policy(vma, new);
471 if (!err) {
472 mpol_get(new);
473 vma->vm_policy = new;
474 mpol_free(old);
475 }
476 return err;
477}
478
479/* Step 2: apply policy to a range and do splits. */
480static int mbind_range(struct vm_area_struct *vma, unsigned long start,
481 unsigned long end, struct mempolicy *new)
482{
483 struct vm_area_struct *next;
484 int err;
485
486 err = 0;
487 for (; vma && vma->vm_start < end; vma = next) {
488 next = vma->vm_next;
489 if (vma->vm_start < start)
490 err = split_vma(vma->vm_mm, vma, start, 1);
491 if (!err && vma->vm_end > end)
492 err = split_vma(vma->vm_mm, vma, end, 0);
493 if (!err)
494 err = policy_vma(vma, new);
495 if (err)
496 break;
497 }
498 return err;
499}
500
Paul Jacksonc61afb12006-03-24 03:16:08 -0800501/*
502 * Update task->flags PF_MEMPOLICY bit: set iff non-default
503 * mempolicy. Allows more rapid checking of this (combined perhaps
504 * with other PF_* flag bits) on memory allocation hot code paths.
505 *
506 * If called from outside this file, the task 'p' should -only- be
507 * a newly forked child not yet visible on the task list, because
508 * manipulating the task flags of a visible task is not safe.
509 *
510 * The above limitation is why this routine has the funny name
511 * mpol_fix_fork_child_flag().
512 *
513 * It is also safe to call this with a task pointer of current,
514 * which the static wrapper mpol_set_task_struct_flag() does,
515 * for use within this file.
516 */
517
518void mpol_fix_fork_child_flag(struct task_struct *p)
519{
520 if (p->mempolicy)
521 p->flags |= PF_MEMPOLICY;
522 else
523 p->flags &= ~PF_MEMPOLICY;
524}
525
526static void mpol_set_task_struct_flag(void)
527{
528 mpol_fix_fork_child_flag(current);
529}
530
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531/* Set the process memory policy */
David Rientjes028fec42008-04-28 02:12:25 -0700532static long do_set_mempolicy(unsigned short mode, unsigned short flags,
533 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 struct mempolicy *new;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536
David Rientjes028fec42008-04-28 02:12:25 -0700537 new = mpol_new(mode, flags, nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 if (IS_ERR(new))
539 return PTR_ERR(new);
540 mpol_free(current->mempolicy);
541 current->mempolicy = new;
Paul Jacksonc61afb12006-03-24 03:16:08 -0800542 mpol_set_task_struct_flag();
David Rientjesf5b087b2008-04-28 02:12:27 -0700543 if (new && new->policy == MPOL_INTERLEAVE &&
544 nodes_weight(new->v.nodes))
Andi Kleendfcd3c02005-10-29 18:15:48 -0700545 current->il_next = first_node(new->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 return 0;
547}
548
549/* Fill a zone bitmap for a policy */
Andi Kleendfcd3c02005-10-29 18:15:48 -0700550static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551{
Andi Kleendfcd3c02005-10-29 18:15:48 -0700552 nodes_clear(*nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 switch (p->policy) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 case MPOL_DEFAULT:
555 break;
Mel Gorman19770b32008-04-28 02:12:18 -0700556 case MPOL_BIND:
557 /* Fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 case MPOL_INTERLEAVE:
Andi Kleendfcd3c02005-10-29 18:15:48 -0700559 *nodes = p->v.nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 break;
561 case MPOL_PREFERRED:
Christoph Lameter56bbd652007-10-16 01:25:35 -0700562 /* or use current node instead of memory_map? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 if (p->v.preferred_node < 0)
Christoph Lameter56bbd652007-10-16 01:25:35 -0700564 *nodes = node_states[N_HIGH_MEMORY];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 else
Andi Kleendfcd3c02005-10-29 18:15:48 -0700566 node_set(p->v.preferred_node, *nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 break;
568 default:
569 BUG();
570 }
571}
572
573static int lookup_node(struct mm_struct *mm, unsigned long addr)
574{
575 struct page *p;
576 int err;
577
578 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
579 if (err >= 0) {
580 err = page_to_nid(p);
581 put_page(p);
582 }
583 return err;
584}
585
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586/* Retrieve NUMA policy */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700587static long do_get_mempolicy(int *policy, nodemask_t *nmask,
588 unsigned long addr, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589{
Christoph Lameter8bccd852005-10-29 18:16:59 -0700590 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 struct mm_struct *mm = current->mm;
592 struct vm_area_struct *vma = NULL;
593 struct mempolicy *pol = current->mempolicy;
594
Paul Jacksoncf2a4732006-01-08 01:01:54 -0800595 cpuset_update_task_memory_state();
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700596 if (flags &
597 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 return -EINVAL;
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700599
600 if (flags & MPOL_F_MEMS_ALLOWED) {
601 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
602 return -EINVAL;
603 *policy = 0; /* just so it's initialized */
604 *nmask = cpuset_current_mems_allowed;
605 return 0;
606 }
607
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 if (flags & MPOL_F_ADDR) {
609 down_read(&mm->mmap_sem);
610 vma = find_vma_intersection(mm, addr, addr+1);
611 if (!vma) {
612 up_read(&mm->mmap_sem);
613 return -EFAULT;
614 }
615 if (vma->vm_ops && vma->vm_ops->get_policy)
616 pol = vma->vm_ops->get_policy(vma, addr);
617 else
618 pol = vma->vm_policy;
619 } else if (addr)
620 return -EINVAL;
621
622 if (!pol)
623 pol = &default_policy;
624
625 if (flags & MPOL_F_NODE) {
626 if (flags & MPOL_F_ADDR) {
627 err = lookup_node(mm, addr);
628 if (err < 0)
629 goto out;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700630 *policy = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 } else if (pol == current->mempolicy &&
632 pol->policy == MPOL_INTERLEAVE) {
Christoph Lameter8bccd852005-10-29 18:16:59 -0700633 *policy = current->il_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 } else {
635 err = -EINVAL;
636 goto out;
637 }
638 } else
David Rientjes028fec42008-04-28 02:12:25 -0700639 *policy = pol->policy | pol->flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640
641 if (vma) {
642 up_read(&current->mm->mmap_sem);
643 vma = NULL;
644 }
645
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 err = 0;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700647 if (nmask)
648 get_zonemask(pol, nmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649
650 out:
651 if (vma)
652 up_read(&current->mm->mmap_sem);
653 return err;
654}
655
Christoph Lameterb20a3502006-03-22 00:09:12 -0800656#ifdef CONFIG_MIGRATION
Christoph Lameter8bccd852005-10-29 18:16:59 -0700657/*
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800658 * page migration
659 */
Christoph Lameterfc301282006-01-18 17:42:29 -0800660static void migrate_page_add(struct page *page, struct list_head *pagelist,
661 unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800662{
663 /*
Christoph Lameterfc301282006-01-18 17:42:29 -0800664 * Avoid migrating a page that is shared with others.
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800665 */
Christoph Lameterb20a3502006-03-22 00:09:12 -0800666 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1)
667 isolate_lru_page(page, pagelist);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800668}
669
Christoph Lameter742755a2006-06-23 02:03:55 -0700670static struct page *new_node_page(struct page *page, unsigned long node, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -0700671{
Mel Gorman769848c2007-07-17 04:03:05 -0700672 return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
Christoph Lameter95a402c2006-06-23 02:03:53 -0700673}
674
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800675/*
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800676 * Migrate pages from one node to a target node.
677 * Returns error or the number of pages not migrated.
678 */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700679static int migrate_to_node(struct mm_struct *mm, int source, int dest,
680 int flags)
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800681{
682 nodemask_t nmask;
683 LIST_HEAD(pagelist);
684 int err = 0;
685
686 nodes_clear(nmask);
687 node_set(source, nmask);
688
689 check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
690 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
691
Christoph Lameteraaa994b2006-06-23 02:03:52 -0700692 if (!list_empty(&pagelist))
Christoph Lameter95a402c2006-06-23 02:03:53 -0700693 err = migrate_pages(&pagelist, new_node_page, dest);
694
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800695 return err;
696}
697
698/*
699 * Move pages between the two nodesets so as to preserve the physical
700 * layout as much as possible.
Christoph Lameter39743882006-01-08 01:00:51 -0800701 *
702 * Returns the number of page that could not be moved.
703 */
704int do_migrate_pages(struct mm_struct *mm,
705 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
706{
707 LIST_HEAD(pagelist);
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800708 int busy = 0;
709 int err = 0;
710 nodemask_t tmp;
Christoph Lameter39743882006-01-08 01:00:51 -0800711
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800712 down_read(&mm->mmap_sem);
Christoph Lameter39743882006-01-08 01:00:51 -0800713
Christoph Lameter7b2259b2006-06-25 05:46:48 -0700714 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
715 if (err)
716 goto out;
717
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800718/*
719 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
720 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
721 * bit in 'tmp', and return that <source, dest> pair for migration.
722 * The pair of nodemasks 'to' and 'from' define the map.
723 *
724 * If no pair of bits is found that way, fallback to picking some
725 * pair of 'source' and 'dest' bits that are not the same. If the
726 * 'source' and 'dest' bits are the same, this represents a node
727 * that will be migrating to itself, so no pages need move.
728 *
729 * If no bits are left in 'tmp', or if all remaining bits left
730 * in 'tmp' correspond to the same bit in 'to', return false
731 * (nothing left to migrate).
732 *
733 * This lets us pick a pair of nodes to migrate between, such that
734 * if possible the dest node is not already occupied by some other
735 * source node, minimizing the risk of overloading the memory on a
736 * node that would happen if we migrated incoming memory to a node
737 * before migrating outgoing memory source that same node.
738 *
739 * A single scan of tmp is sufficient. As we go, we remember the
740 * most recent <s, d> pair that moved (s != d). If we find a pair
741 * that not only moved, but what's better, moved to an empty slot
742 * (d is not set in tmp), then we break out then, with that pair.
743 * Otherwise when we finish scannng from_tmp, we at least have the
744 * most recent <s, d> pair that moved. If we get all the way through
745 * the scan of tmp without finding any node that moved, much less
746 * moved to an empty node, then there is nothing left worth migrating.
747 */
Christoph Lameterd4984712006-01-08 01:00:55 -0800748
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800749 tmp = *from_nodes;
750 while (!nodes_empty(tmp)) {
751 int s,d;
752 int source = -1;
753 int dest = 0;
754
755 for_each_node_mask(s, tmp) {
756 d = node_remap(s, *from_nodes, *to_nodes);
757 if (s == d)
758 continue;
759
760 source = s; /* Node moved. Memorize */
761 dest = d;
762
763 /* dest not in remaining from nodes? */
764 if (!node_isset(dest, tmp))
765 break;
766 }
767 if (source == -1)
768 break;
769
770 node_clear(source, tmp);
771 err = migrate_to_node(mm, source, dest, flags);
772 if (err > 0)
773 busy += err;
774 if (err < 0)
775 break;
Christoph Lameter39743882006-01-08 01:00:51 -0800776 }
Christoph Lameter7b2259b2006-06-25 05:46:48 -0700777out:
Christoph Lameter39743882006-01-08 01:00:51 -0800778 up_read(&mm->mmap_sem);
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800779 if (err < 0)
780 return err;
781 return busy;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800782
Christoph Lameter39743882006-01-08 01:00:51 -0800783}
784
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -0800785/*
786 * Allocate a new page for page migration based on vma policy.
787 * Start assuming that page is mapped by vma pointed to by @private.
788 * Search forward from there, if not. N.B., this assumes that the
789 * list of pages handed to migrate_pages()--which is how we get here--
790 * is in virtual address order.
791 */
Christoph Lameter742755a2006-06-23 02:03:55 -0700792static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -0700793{
794 struct vm_area_struct *vma = (struct vm_area_struct *)private;
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -0800795 unsigned long uninitialized_var(address);
Christoph Lameter95a402c2006-06-23 02:03:53 -0700796
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -0800797 while (vma) {
798 address = page_address_in_vma(page, vma);
799 if (address != -EFAULT)
800 break;
801 vma = vma->vm_next;
802 }
803
804 /*
805 * if !vma, alloc_page_vma() will use task or system default policy
806 */
807 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
Christoph Lameter95a402c2006-06-23 02:03:53 -0700808}
Christoph Lameterb20a3502006-03-22 00:09:12 -0800809#else
810
811static void migrate_page_add(struct page *page, struct list_head *pagelist,
812 unsigned long flags)
813{
814}
815
816int do_migrate_pages(struct mm_struct *mm,
817 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
818{
819 return -ENOSYS;
820}
Christoph Lameter95a402c2006-06-23 02:03:53 -0700821
Keith Owens69939742006-10-11 01:21:28 -0700822static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -0700823{
824 return NULL;
825}
Christoph Lameterb20a3502006-03-22 00:09:12 -0800826#endif
827
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700828static long do_mbind(unsigned long start, unsigned long len,
David Rientjes028fec42008-04-28 02:12:25 -0700829 unsigned short mode, unsigned short mode_flags,
830 nodemask_t *nmask, unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800831{
832 struct vm_area_struct *vma;
833 struct mm_struct *mm = current->mm;
834 struct mempolicy *new;
835 unsigned long end;
836 int err;
837 LIST_HEAD(pagelist);
838
David Rientjesa3b51e02008-04-28 02:12:23 -0700839 if (flags & ~(unsigned long)(MPOL_MF_STRICT |
840 MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800841 return -EINVAL;
Christoph Lameter74c00242006-03-14 19:50:21 -0800842 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800843 return -EPERM;
844
845 if (start & ~PAGE_MASK)
846 return -EINVAL;
847
848 if (mode == MPOL_DEFAULT)
849 flags &= ~MPOL_MF_STRICT;
850
851 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
852 end = start + len;
853
854 if (end < start)
855 return -EINVAL;
856 if (end == start)
857 return 0;
858
David Rientjes028fec42008-04-28 02:12:25 -0700859 new = mpol_new(mode, mode_flags, nmask);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800860 if (IS_ERR(new))
861 return PTR_ERR(new);
862
863 /*
864 * If we are using the default policy then operation
865 * on discontinuous address spaces is okay after all
866 */
867 if (!new)
868 flags |= MPOL_MF_DISCONTIG_OK;
869
David Rientjes028fec42008-04-28 02:12:25 -0700870 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
871 start, start + len, mode, mode_flags,
872 nmask ? nodes_addr(*nmask)[0] : -1);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800873
874 down_write(&mm->mmap_sem);
875 vma = check_range(mm, start, end, nmask,
876 flags | MPOL_MF_INVERT, &pagelist);
877
878 err = PTR_ERR(vma);
879 if (!IS_ERR(vma)) {
880 int nr_failed = 0;
881
882 err = mbind_range(vma, start, end, new);
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800883
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800884 if (!list_empty(&pagelist))
Christoph Lameter95a402c2006-06-23 02:03:53 -0700885 nr_failed = migrate_pages(&pagelist, new_vma_page,
886 (unsigned long)vma);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800887
888 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
889 err = -EIO;
890 }
Christoph Lameterb20a3502006-03-22 00:09:12 -0800891
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800892 up_write(&mm->mmap_sem);
893 mpol_free(new);
894 return err;
895}
896
Christoph Lameter39743882006-01-08 01:00:51 -0800897/*
Christoph Lameter8bccd852005-10-29 18:16:59 -0700898 * User space interface with variable sized bitmaps for nodelists.
899 */
900
901/* Copy a node mask from user space. */
Christoph Lameter39743882006-01-08 01:00:51 -0800902static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
Christoph Lameter8bccd852005-10-29 18:16:59 -0700903 unsigned long maxnode)
904{
905 unsigned long k;
906 unsigned long nlongs;
907 unsigned long endmask;
908
909 --maxnode;
910 nodes_clear(*nodes);
911 if (maxnode == 0 || !nmask)
912 return 0;
Andi Kleena9c930b2006-02-20 18:27:59 -0800913 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
Chris Wright636f13c2006-02-17 13:59:36 -0800914 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700915
916 nlongs = BITS_TO_LONGS(maxnode);
917 if ((maxnode % BITS_PER_LONG) == 0)
918 endmask = ~0UL;
919 else
920 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
921
922 /* When the user specified more nodes than supported just check
923 if the non supported part is all zero. */
924 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
925 if (nlongs > PAGE_SIZE/sizeof(long))
926 return -EINVAL;
927 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
928 unsigned long t;
929 if (get_user(t, nmask + k))
930 return -EFAULT;
931 if (k == nlongs - 1) {
932 if (t & endmask)
933 return -EINVAL;
934 } else if (t)
935 return -EINVAL;
936 }
937 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
938 endmask = ~0UL;
939 }
940
941 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
942 return -EFAULT;
943 nodes_addr(*nodes)[nlongs-1] &= endmask;
944 return 0;
945}
946
947/* Copy a kernel node mask to user space */
948static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
949 nodemask_t *nodes)
950{
951 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
952 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
953
954 if (copy > nbytes) {
955 if (copy > PAGE_SIZE)
956 return -EINVAL;
957 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
958 return -EFAULT;
959 copy = nbytes;
960 }
961 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
962}
963
964asmlinkage long sys_mbind(unsigned long start, unsigned long len,
965 unsigned long mode,
966 unsigned long __user *nmask, unsigned long maxnode,
967 unsigned flags)
968{
969 nodemask_t nodes;
970 int err;
David Rientjes028fec42008-04-28 02:12:25 -0700971 unsigned short mode_flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700972
David Rientjes028fec42008-04-28 02:12:25 -0700973 mode_flags = mode & MPOL_MODE_FLAGS;
974 mode &= ~MPOL_MODE_FLAGS;
David Rientjesa3b51e02008-04-28 02:12:23 -0700975 if (mode >= MPOL_MAX)
976 return -EINVAL;
David Rientjes4c50bc02008-04-28 02:12:30 -0700977 if ((mode_flags & MPOL_F_STATIC_NODES) &&
978 (mode_flags & MPOL_F_RELATIVE_NODES))
979 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700980 err = get_nodes(&nodes, nmask, maxnode);
981 if (err)
982 return err;
David Rientjes028fec42008-04-28 02:12:25 -0700983 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
Christoph Lameter8bccd852005-10-29 18:16:59 -0700984}
985
986/* Set the process memory policy */
987asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
988 unsigned long maxnode)
989{
990 int err;
991 nodemask_t nodes;
David Rientjes028fec42008-04-28 02:12:25 -0700992 unsigned short flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700993
David Rientjes028fec42008-04-28 02:12:25 -0700994 flags = mode & MPOL_MODE_FLAGS;
995 mode &= ~MPOL_MODE_FLAGS;
996 if ((unsigned int)mode >= MPOL_MAX)
Christoph Lameter8bccd852005-10-29 18:16:59 -0700997 return -EINVAL;
David Rientjes4c50bc02008-04-28 02:12:30 -0700998 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
999 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001000 err = get_nodes(&nodes, nmask, maxnode);
1001 if (err)
1002 return err;
David Rientjes028fec42008-04-28 02:12:25 -07001003 return do_set_mempolicy(mode, flags, &nodes);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001004}
1005
Christoph Lameter39743882006-01-08 01:00:51 -08001006asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
1007 const unsigned long __user *old_nodes,
1008 const unsigned long __user *new_nodes)
1009{
1010 struct mm_struct *mm;
1011 struct task_struct *task;
1012 nodemask_t old;
1013 nodemask_t new;
1014 nodemask_t task_nodes;
1015 int err;
1016
1017 err = get_nodes(&old, old_nodes, maxnode);
1018 if (err)
1019 return err;
1020
1021 err = get_nodes(&new, new_nodes, maxnode);
1022 if (err)
1023 return err;
1024
1025 /* Find the mm_struct */
1026 read_lock(&tasklist_lock);
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001027 task = pid ? find_task_by_vpid(pid) : current;
Christoph Lameter39743882006-01-08 01:00:51 -08001028 if (!task) {
1029 read_unlock(&tasklist_lock);
1030 return -ESRCH;
1031 }
1032 mm = get_task_mm(task);
1033 read_unlock(&tasklist_lock);
1034
1035 if (!mm)
1036 return -EINVAL;
1037
1038 /*
1039 * Check if this process has the right to modify the specified
1040 * process. The right exists if the process has administrative
Alexey Dobriyan7f927fc2006-03-28 01:56:53 -08001041 * capabilities, superuser privileges or the same
Christoph Lameter39743882006-01-08 01:00:51 -08001042 * userid as the target process.
1043 */
1044 if ((current->euid != task->suid) && (current->euid != task->uid) &&
1045 (current->uid != task->suid) && (current->uid != task->uid) &&
Christoph Lameter74c00242006-03-14 19:50:21 -08001046 !capable(CAP_SYS_NICE)) {
Christoph Lameter39743882006-01-08 01:00:51 -08001047 err = -EPERM;
1048 goto out;
1049 }
1050
1051 task_nodes = cpuset_mems_allowed(task);
1052 /* Is the user allowed to access the target nodes? */
Christoph Lameter74c00242006-03-14 19:50:21 -08001053 if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
Christoph Lameter39743882006-01-08 01:00:51 -08001054 err = -EPERM;
1055 goto out;
1056 }
1057
Lee Schermerhorn37b07e42007-10-16 01:25:39 -07001058 if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
Christoph Lameter3b42d282007-08-31 00:12:08 -07001059 err = -EINVAL;
1060 goto out;
1061 }
1062
David Quigley86c3a762006-06-23 02:04:02 -07001063 err = security_task_movememory(task);
1064 if (err)
1065 goto out;
1066
Christoph Lameter511030b2006-02-28 16:58:57 -08001067 err = do_migrate_pages(mm, &old, &new,
Christoph Lameter74c00242006-03-14 19:50:21 -08001068 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
Christoph Lameter39743882006-01-08 01:00:51 -08001069out:
1070 mmput(mm);
1071 return err;
1072}
1073
1074
Christoph Lameter8bccd852005-10-29 18:16:59 -07001075/* Retrieve NUMA policy */
1076asmlinkage long sys_get_mempolicy(int __user *policy,
1077 unsigned long __user *nmask,
1078 unsigned long maxnode,
1079 unsigned long addr, unsigned long flags)
1080{
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001081 int err;
1082 int uninitialized_var(pval);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001083 nodemask_t nodes;
1084
1085 if (nmask != NULL && maxnode < MAX_NUMNODES)
1086 return -EINVAL;
1087
1088 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1089
1090 if (err)
1091 return err;
1092
1093 if (policy && put_user(pval, policy))
1094 return -EFAULT;
1095
1096 if (nmask)
1097 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1098
1099 return err;
1100}
1101
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102#ifdef CONFIG_COMPAT
1103
1104asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1105 compat_ulong_t __user *nmask,
1106 compat_ulong_t maxnode,
1107 compat_ulong_t addr, compat_ulong_t flags)
1108{
1109 long err;
1110 unsigned long __user *nm = NULL;
1111 unsigned long nr_bits, alloc_size;
1112 DECLARE_BITMAP(bm, MAX_NUMNODES);
1113
1114 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1115 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1116
1117 if (nmask)
1118 nm = compat_alloc_user_space(alloc_size);
1119
1120 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1121
1122 if (!err && nmask) {
1123 err = copy_from_user(bm, nm, alloc_size);
1124 /* ensure entire bitmap is zeroed */
1125 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1126 err |= compat_put_bitmap(nmask, bm, nr_bits);
1127 }
1128
1129 return err;
1130}
1131
1132asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1133 compat_ulong_t maxnode)
1134{
1135 long err = 0;
1136 unsigned long __user *nm = NULL;
1137 unsigned long nr_bits, alloc_size;
1138 DECLARE_BITMAP(bm, MAX_NUMNODES);
1139
1140 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1141 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1142
1143 if (nmask) {
1144 err = compat_get_bitmap(bm, nmask, nr_bits);
1145 nm = compat_alloc_user_space(alloc_size);
1146 err |= copy_to_user(nm, bm, alloc_size);
1147 }
1148
1149 if (err)
1150 return -EFAULT;
1151
1152 return sys_set_mempolicy(mode, nm, nr_bits+1);
1153}
1154
1155asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1156 compat_ulong_t mode, compat_ulong_t __user *nmask,
1157 compat_ulong_t maxnode, compat_ulong_t flags)
1158{
1159 long err = 0;
1160 unsigned long __user *nm = NULL;
1161 unsigned long nr_bits, alloc_size;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001162 nodemask_t bm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163
1164 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1165 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1166
1167 if (nmask) {
Andi Kleendfcd3c02005-10-29 18:15:48 -07001168 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 nm = compat_alloc_user_space(alloc_size);
Andi Kleendfcd3c02005-10-29 18:15:48 -07001170 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 }
1172
1173 if (err)
1174 return -EFAULT;
1175
1176 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1177}
1178
1179#endif
1180
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001181/*
1182 * get_vma_policy(@task, @vma, @addr)
1183 * @task - task for fallback if vma policy == default
1184 * @vma - virtual memory area whose policy is sought
1185 * @addr - address in @vma for shared policy lookup
1186 *
1187 * Returns effective policy for a VMA at specified address.
1188 * Falls back to @task or system default policy, as necessary.
1189 * Returned policy has extra reference count if shared, vma,
1190 * or some other task's policy [show_numa_maps() can pass
1191 * @task != current]. It is the caller's responsibility to
1192 * free the reference in these cases.
1193 */
Christoph Lameter48fce342006-01-08 01:01:03 -08001194static struct mempolicy * get_vma_policy(struct task_struct *task,
1195 struct vm_area_struct *vma, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196{
Christoph Lameter6e21c8f2005-09-03 15:54:45 -07001197 struct mempolicy *pol = task->mempolicy;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001198 int shared_pol = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199
1200 if (vma) {
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001201 if (vma->vm_ops && vma->vm_ops->get_policy) {
Christoph Lameter8bccd852005-10-29 18:16:59 -07001202 pol = vma->vm_ops->get_policy(vma, addr);
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001203 shared_pol = 1; /* if pol non-NULL, add ref below */
1204 } else if (vma->vm_policy &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 vma->vm_policy->policy != MPOL_DEFAULT)
1206 pol = vma->vm_policy;
1207 }
1208 if (!pol)
1209 pol = &default_policy;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001210 else if (!shared_pol && pol != current->mempolicy)
1211 mpol_get(pol); /* vma or other task's policy */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 return pol;
1213}
1214
Mel Gorman19770b32008-04-28 02:12:18 -07001215/* Return a nodemask representing a mempolicy */
1216static nodemask_t *nodemask_policy(gfp_t gfp, struct mempolicy *policy)
1217{
1218 /* Lower zones don't get a nodemask applied for MPOL_BIND */
1219 if (unlikely(policy->policy == MPOL_BIND) &&
1220 gfp_zone(gfp) >= policy_zone &&
1221 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1222 return &policy->v.nodes;
1223
1224 return NULL;
1225}
1226
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227/* Return a zonelist representing a mempolicy */
Al Virodd0fc662005-10-07 07:46:04 +01001228static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229{
1230 int nd;
1231
1232 switch (policy->policy) {
1233 case MPOL_PREFERRED:
1234 nd = policy->v.preferred_node;
1235 if (nd < 0)
1236 nd = numa_node_id();
1237 break;
1238 case MPOL_BIND:
Mel Gorman19770b32008-04-28 02:12:18 -07001239 /*
1240 * Normally, MPOL_BIND allocations node-local are node-local
1241 * within the allowed nodemask. However, if __GFP_THISNODE is
1242 * set and the current node is part of the mask, we use the
1243 * the zonelist for the first node in the mask instead.
1244 */
1245 nd = numa_node_id();
1246 if (unlikely(gfp & __GFP_THISNODE) &&
1247 unlikely(!node_isset(nd, policy->v.nodes)))
1248 nd = first_node(policy->v.nodes);
1249 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 case MPOL_INTERLEAVE: /* should not happen */
1251 case MPOL_DEFAULT:
1252 nd = numa_node_id();
1253 break;
1254 default:
1255 nd = 0;
1256 BUG();
1257 }
Mel Gorman0e884602008-04-28 02:12:14 -07001258 return node_zonelist(nd, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259}
1260
1261/* Do dynamic interleaving for a process */
1262static unsigned interleave_nodes(struct mempolicy *policy)
1263{
1264 unsigned nid, next;
1265 struct task_struct *me = current;
1266
1267 nid = me->il_next;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001268 next = next_node(nid, policy->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 if (next >= MAX_NUMNODES)
Andi Kleendfcd3c02005-10-29 18:15:48 -07001270 next = first_node(policy->v.nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001271 if (next < MAX_NUMNODES)
1272 me->il_next = next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 return nid;
1274}
1275
Christoph Lameterdc85da12006-01-18 17:42:36 -08001276/*
1277 * Depending on the memory policy provide a node from which to allocate the
1278 * next slab entry.
1279 */
1280unsigned slab_node(struct mempolicy *policy)
1281{
David Rientjesa3b51e02008-04-28 02:12:23 -07001282 unsigned short pol = policy ? policy->policy : MPOL_DEFAULT;
Christoph Lameter765c4502006-09-27 01:50:08 -07001283
1284 switch (pol) {
Christoph Lameterdc85da12006-01-18 17:42:36 -08001285 case MPOL_INTERLEAVE:
1286 return interleave_nodes(policy);
1287
Mel Gormandd1a2392008-04-28 02:12:17 -07001288 case MPOL_BIND: {
Christoph Lameterdc85da12006-01-18 17:42:36 -08001289 /*
1290 * Follow bind policy behavior and start allocation at the
1291 * first node.
1292 */
Mel Gorman19770b32008-04-28 02:12:18 -07001293 struct zonelist *zonelist;
1294 struct zone *zone;
1295 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1296 zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1297 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1298 &policy->v.nodes,
1299 &zone);
1300 return zone->node;
Mel Gormandd1a2392008-04-28 02:12:17 -07001301 }
Christoph Lameterdc85da12006-01-18 17:42:36 -08001302
1303 case MPOL_PREFERRED:
1304 if (policy->v.preferred_node >= 0)
1305 return policy->v.preferred_node;
1306 /* Fall through */
1307
1308 default:
1309 return numa_node_id();
1310 }
1311}
1312
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313/* Do static interleaving for a VMA with known offset. */
1314static unsigned offset_il_node(struct mempolicy *pol,
1315 struct vm_area_struct *vma, unsigned long off)
1316{
Andi Kleendfcd3c02005-10-29 18:15:48 -07001317 unsigned nnodes = nodes_weight(pol->v.nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001318 unsigned target;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 int c;
1320 int nid = -1;
1321
David Rientjesf5b087b2008-04-28 02:12:27 -07001322 if (!nnodes)
1323 return numa_node_id();
1324 target = (unsigned int)off % nnodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 c = 0;
1326 do {
Andi Kleendfcd3c02005-10-29 18:15:48 -07001327 nid = next_node(nid, pol->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 c++;
1329 } while (c <= target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 return nid;
1331}
1332
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001333/* Determine a node number for interleave */
1334static inline unsigned interleave_nid(struct mempolicy *pol,
1335 struct vm_area_struct *vma, unsigned long addr, int shift)
1336{
1337 if (vma) {
1338 unsigned long off;
1339
Nishanth Aravamudan3b98b082006-08-31 21:27:53 -07001340 /*
1341 * for small pages, there is no difference between
1342 * shift and PAGE_SHIFT, so the bit-shift is safe.
1343 * for huge pages, since vm_pgoff is in units of small
1344 * pages, we need to shift off the always 0 bits to get
1345 * a useful offset.
1346 */
1347 BUG_ON(shift < PAGE_SHIFT);
1348 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001349 off += (addr - vma->vm_start) >> shift;
1350 return offset_il_node(pol, vma, off);
1351 } else
1352 return interleave_nodes(pol);
1353}
1354
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001355#ifdef CONFIG_HUGETLBFS
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001356/*
1357 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1358 * @vma = virtual memory area whose policy is sought
1359 * @addr = address in @vma for shared policy lookup and interleave policy
1360 * @gfp_flags = for requested zone
Mel Gorman19770b32008-04-28 02:12:18 -07001361 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1362 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001363 *
1364 * Returns a zonelist suitable for a huge page allocation.
Mel Gorman19770b32008-04-28 02:12:18 -07001365 * If the effective policy is 'BIND, returns pointer to local node's zonelist,
1366 * and a pointer to the mempolicy's @nodemask for filtering the zonelist.
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001367 * If it is also a policy for which get_vma_policy() returns an extra
Mel Gorman19770b32008-04-28 02:12:18 -07001368 * reference, we must hold that reference until after the allocation.
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001369 * In that case, return policy via @mpol so hugetlb allocation can drop
Mel Gorman19770b32008-04-28 02:12:18 -07001370 * the reference. For non-'BIND referenced policies, we can/do drop the
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001371 * reference here, so the caller doesn't need to know about the special case
1372 * for default and current task policy.
1373 */
Mel Gorman396faf02007-07-17 04:03:13 -07001374struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
Mel Gorman19770b32008-04-28 02:12:18 -07001375 gfp_t gfp_flags, struct mempolicy **mpol,
1376 nodemask_t **nodemask)
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001377{
1378 struct mempolicy *pol = get_vma_policy(current, vma, addr);
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001379 struct zonelist *zl;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001380
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001381 *mpol = NULL; /* probably no unref needed */
Mel Gorman19770b32008-04-28 02:12:18 -07001382 *nodemask = NULL; /* assume !MPOL_BIND */
1383 if (pol->policy == MPOL_BIND) {
1384 *nodemask = &pol->v.nodes;
1385 } else if (pol->policy == MPOL_INTERLEAVE) {
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001386 unsigned nid;
1387
1388 nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
Lee Schermerhorn69682d82008-03-10 11:43:45 -07001389 if (unlikely(pol != &default_policy &&
1390 pol != current->mempolicy))
1391 __mpol_free(pol); /* finished with pol */
Mel Gorman0e884602008-04-28 02:12:14 -07001392 return node_zonelist(nid, gfp_flags);
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001393 }
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001394
1395 zl = zonelist_policy(GFP_HIGHUSER, pol);
1396 if (unlikely(pol != &default_policy && pol != current->mempolicy)) {
1397 if (pol->policy != MPOL_BIND)
1398 __mpol_free(pol); /* finished with pol */
1399 else
1400 *mpol = pol; /* unref needed after allocation */
1401 }
1402 return zl;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001403}
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001404#endif
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001405
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406/* Allocate a page in interleaved policy.
1407 Own path because it needs to do special accounting. */
Andi Kleen662f3a02005-10-29 18:15:49 -07001408static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1409 unsigned nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410{
1411 struct zonelist *zl;
1412 struct page *page;
1413
Mel Gorman0e884602008-04-28 02:12:14 -07001414 zl = node_zonelist(nid, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 page = __alloc_pages(gfp, order, zl);
Mel Gormandd1a2392008-04-28 02:12:17 -07001416 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
Christoph Lameterca889e62006-06-30 01:55:44 -07001417 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 return page;
1419}
1420
1421/**
1422 * alloc_page_vma - Allocate a page for a VMA.
1423 *
1424 * @gfp:
1425 * %GFP_USER user allocation.
1426 * %GFP_KERNEL kernel allocations,
1427 * %GFP_HIGHMEM highmem/user allocations,
1428 * %GFP_FS allocation should not call back into a file system.
1429 * %GFP_ATOMIC don't sleep.
1430 *
1431 * @vma: Pointer to VMA or NULL if not available.
1432 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1433 *
1434 * This function allocates a page from the kernel page pool and applies
1435 * a NUMA policy associated with the VMA or the current process.
1436 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1437 * mm_struct of the VMA to prevent it from going away. Should be used for
1438 * all allocations for pages that will be mapped into
1439 * user space. Returns NULL when no page can be allocated.
1440 *
1441 * Should be called with the mm_sem of the vma hold.
1442 */
1443struct page *
Al Virodd0fc662005-10-07 07:46:04 +01001444alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445{
Christoph Lameter6e21c8f2005-09-03 15:54:45 -07001446 struct mempolicy *pol = get_vma_policy(current, vma, addr);
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001447 struct zonelist *zl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448
Paul Jacksoncf2a4732006-01-08 01:01:54 -08001449 cpuset_update_task_memory_state();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450
1451 if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
1452 unsigned nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001453
1454 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
Lee Schermerhorn69682d82008-03-10 11:43:45 -07001455 if (unlikely(pol != &default_policy &&
1456 pol != current->mempolicy))
1457 __mpol_free(pol); /* finished with pol */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 return alloc_page_interleave(gfp, 0, nid);
1459 }
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001460 zl = zonelist_policy(gfp, pol);
1461 if (pol != &default_policy && pol != current->mempolicy) {
1462 /*
1463 * slow path: ref counted policy -- shared or vma
1464 */
Mel Gorman19770b32008-04-28 02:12:18 -07001465 struct page *page = __alloc_pages_nodemask(gfp, 0,
1466 zl, nodemask_policy(gfp, pol));
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001467 __mpol_free(pol);
1468 return page;
1469 }
1470 /*
1471 * fast path: default or task policy
1472 */
Mel Gorman19770b32008-04-28 02:12:18 -07001473 return __alloc_pages_nodemask(gfp, 0, zl, nodemask_policy(gfp, pol));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474}
1475
1476/**
1477 * alloc_pages_current - Allocate pages.
1478 *
1479 * @gfp:
1480 * %GFP_USER user allocation,
1481 * %GFP_KERNEL kernel allocation,
1482 * %GFP_HIGHMEM highmem allocation,
1483 * %GFP_FS don't call back into a file system.
1484 * %GFP_ATOMIC don't sleep.
1485 * @order: Power of two of allocation size in pages. 0 is a single page.
1486 *
1487 * Allocate a page from the kernel page pool. When not in
1488 * interrupt context and apply the current process NUMA policy.
1489 * Returns NULL when no page can be allocated.
1490 *
Paul Jacksoncf2a4732006-01-08 01:01:54 -08001491 * Don't call cpuset_update_task_memory_state() unless
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 * 1) it's ok to take cpuset_sem (can WAIT), and
1493 * 2) allocating for current task (not interrupt).
1494 */
Al Virodd0fc662005-10-07 07:46:04 +01001495struct page *alloc_pages_current(gfp_t gfp, unsigned order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496{
1497 struct mempolicy *pol = current->mempolicy;
1498
1499 if ((gfp & __GFP_WAIT) && !in_interrupt())
Paul Jacksoncf2a4732006-01-08 01:01:54 -08001500 cpuset_update_task_memory_state();
Christoph Lameter9b819d22006-09-25 23:31:40 -07001501 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 pol = &default_policy;
1503 if (pol->policy == MPOL_INTERLEAVE)
1504 return alloc_page_interleave(gfp, order, interleave_nodes(pol));
Mel Gorman19770b32008-04-28 02:12:18 -07001505 return __alloc_pages_nodemask(gfp, order,
1506 zonelist_policy(gfp, pol), nodemask_policy(gfp, pol));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507}
1508EXPORT_SYMBOL(alloc_pages_current);
1509
Paul Jackson42253992006-01-08 01:01:59 -08001510/*
1511 * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it
1512 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1513 * with the mems_allowed returned by cpuset_mems_allowed(). This
1514 * keeps mempolicies cpuset relative after its cpuset moves. See
1515 * further kernel/cpuset.c update_nodemask().
1516 */
Paul Jackson42253992006-01-08 01:01:59 -08001517
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518/* Slow path of a mempolicy copy */
1519struct mempolicy *__mpol_copy(struct mempolicy *old)
1520{
1521 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1522
1523 if (!new)
1524 return ERR_PTR(-ENOMEM);
Paul Jackson42253992006-01-08 01:01:59 -08001525 if (current_cpuset_is_being_rebound()) {
1526 nodemask_t mems = cpuset_mems_allowed(current);
1527 mpol_rebind_policy(old, &mems);
1528 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 *new = *old;
1530 atomic_set(&new->refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 return new;
1532}
1533
David Rientjesf5b087b2008-04-28 02:12:27 -07001534static int mpol_match_intent(const struct mempolicy *a,
1535 const struct mempolicy *b)
1536{
1537 if (a->flags != b->flags)
1538 return 0;
1539 if (!mpol_store_user_nodemask(a))
1540 return 1;
1541 return nodes_equal(a->w.user_nodemask, b->w.user_nodemask);
1542}
1543
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544/* Slow path of a mempolicy comparison */
1545int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1546{
1547 if (!a || !b)
1548 return 0;
1549 if (a->policy != b->policy)
1550 return 0;
David Rientjesf5b087b2008-04-28 02:12:27 -07001551 if (a->policy != MPOL_DEFAULT && !mpol_match_intent(a, b))
1552 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 switch (a->policy) {
1554 case MPOL_DEFAULT:
1555 return 1;
Mel Gorman19770b32008-04-28 02:12:18 -07001556 case MPOL_BIND:
1557 /* Fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 case MPOL_INTERLEAVE:
Andi Kleendfcd3c02005-10-29 18:15:48 -07001559 return nodes_equal(a->v.nodes, b->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 case MPOL_PREFERRED:
1561 return a->v.preferred_node == b->v.preferred_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 default:
1563 BUG();
1564 return 0;
1565 }
1566}
1567
1568/* Slow path of a mpol destructor. */
1569void __mpol_free(struct mempolicy *p)
1570{
1571 if (!atomic_dec_and_test(&p->refcnt))
1572 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 p->policy = MPOL_DEFAULT;
1574 kmem_cache_free(policy_cache, p);
1575}
1576
1577/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 * Shared memory backing store policy support.
1579 *
1580 * Remember policies even when nobody has shared memory mapped.
1581 * The policies are kept in Red-Black tree linked from the inode.
1582 * They are protected by the sp->lock spinlock, which should be held
1583 * for any accesses to the tree.
1584 */
1585
1586/* lookup first element intersecting start-end */
1587/* Caller holds sp->lock */
1588static struct sp_node *
1589sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
1590{
1591 struct rb_node *n = sp->root.rb_node;
1592
1593 while (n) {
1594 struct sp_node *p = rb_entry(n, struct sp_node, nd);
1595
1596 if (start >= p->end)
1597 n = n->rb_right;
1598 else if (end <= p->start)
1599 n = n->rb_left;
1600 else
1601 break;
1602 }
1603 if (!n)
1604 return NULL;
1605 for (;;) {
1606 struct sp_node *w = NULL;
1607 struct rb_node *prev = rb_prev(n);
1608 if (!prev)
1609 break;
1610 w = rb_entry(prev, struct sp_node, nd);
1611 if (w->end <= start)
1612 break;
1613 n = prev;
1614 }
1615 return rb_entry(n, struct sp_node, nd);
1616}
1617
1618/* Insert a new shared policy into the list. */
1619/* Caller holds sp->lock */
1620static void sp_insert(struct shared_policy *sp, struct sp_node *new)
1621{
1622 struct rb_node **p = &sp->root.rb_node;
1623 struct rb_node *parent = NULL;
1624 struct sp_node *nd;
1625
1626 while (*p) {
1627 parent = *p;
1628 nd = rb_entry(parent, struct sp_node, nd);
1629 if (new->start < nd->start)
1630 p = &(*p)->rb_left;
1631 else if (new->end > nd->end)
1632 p = &(*p)->rb_right;
1633 else
1634 BUG();
1635 }
1636 rb_link_node(&new->nd, parent, p);
1637 rb_insert_color(&new->nd, &sp->root);
Paul Mundt140d5a42007-07-15 23:38:16 -07001638 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 new->policy ? new->policy->policy : 0);
1640}
1641
1642/* Find shared policy intersecting idx */
1643struct mempolicy *
1644mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
1645{
1646 struct mempolicy *pol = NULL;
1647 struct sp_node *sn;
1648
1649 if (!sp->root.rb_node)
1650 return NULL;
1651 spin_lock(&sp->lock);
1652 sn = sp_lookup(sp, idx, idx+1);
1653 if (sn) {
1654 mpol_get(sn->policy);
1655 pol = sn->policy;
1656 }
1657 spin_unlock(&sp->lock);
1658 return pol;
1659}
1660
1661static void sp_delete(struct shared_policy *sp, struct sp_node *n)
1662{
Paul Mundt140d5a42007-07-15 23:38:16 -07001663 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 rb_erase(&n->nd, &sp->root);
1665 mpol_free(n->policy);
1666 kmem_cache_free(sn_cache, n);
1667}
1668
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001669static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
1670 struct mempolicy *pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671{
1672 struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1673
1674 if (!n)
1675 return NULL;
1676 n->start = start;
1677 n->end = end;
1678 mpol_get(pol);
1679 n->policy = pol;
1680 return n;
1681}
1682
1683/* Replace a policy range. */
1684static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
1685 unsigned long end, struct sp_node *new)
1686{
1687 struct sp_node *n, *new2 = NULL;
1688
1689restart:
1690 spin_lock(&sp->lock);
1691 n = sp_lookup(sp, start, end);
1692 /* Take care of old policies in the same range. */
1693 while (n && n->start < end) {
1694 struct rb_node *next = rb_next(&n->nd);
1695 if (n->start >= start) {
1696 if (n->end <= end)
1697 sp_delete(sp, n);
1698 else
1699 n->start = end;
1700 } else {
1701 /* Old policy spanning whole new range. */
1702 if (n->end > end) {
1703 if (!new2) {
1704 spin_unlock(&sp->lock);
1705 new2 = sp_alloc(end, n->end, n->policy);
1706 if (!new2)
1707 return -ENOMEM;
1708 goto restart;
1709 }
1710 n->end = start;
1711 sp_insert(sp, new2);
1712 new2 = NULL;
1713 break;
1714 } else
1715 n->end = start;
1716 }
1717 if (!next)
1718 break;
1719 n = rb_entry(next, struct sp_node, nd);
1720 }
1721 if (new)
1722 sp_insert(sp, new);
1723 spin_unlock(&sp->lock);
1724 if (new2) {
1725 mpol_free(new2->policy);
1726 kmem_cache_free(sn_cache, new2);
1727 }
1728 return 0;
1729}
1730
David Rientjesa3b51e02008-04-28 02:12:23 -07001731void mpol_shared_policy_init(struct shared_policy *info, unsigned short policy,
David Rientjes028fec42008-04-28 02:12:25 -07001732 unsigned short flags, nodemask_t *policy_nodes)
Robin Holt7339ff82006-01-14 13:20:48 -08001733{
1734 info->root = RB_ROOT;
1735 spin_lock_init(&info->lock);
1736
1737 if (policy != MPOL_DEFAULT) {
1738 struct mempolicy *newpol;
1739
1740 /* Falls back to MPOL_DEFAULT on any error */
David Rientjes028fec42008-04-28 02:12:25 -07001741 newpol = mpol_new(policy, flags, policy_nodes);
Robin Holt7339ff82006-01-14 13:20:48 -08001742 if (!IS_ERR(newpol)) {
1743 /* Create pseudo-vma that contains just the policy */
1744 struct vm_area_struct pvma;
1745
1746 memset(&pvma, 0, sizeof(struct vm_area_struct));
1747 /* Policy covers entire file */
1748 pvma.vm_end = TASK_SIZE;
1749 mpol_set_shared_policy(info, &pvma, newpol);
1750 mpol_free(newpol);
1751 }
1752 }
1753}
1754
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755int mpol_set_shared_policy(struct shared_policy *info,
1756 struct vm_area_struct *vma, struct mempolicy *npol)
1757{
1758 int err;
1759 struct sp_node *new = NULL;
1760 unsigned long sz = vma_pages(vma);
1761
David Rientjes028fec42008-04-28 02:12:25 -07001762 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 vma->vm_pgoff,
David Rientjes028fec42008-04-28 02:12:25 -07001764 sz, npol ? npol->policy : -1,
1765 npol ? npol->flags : -1,
Paul Mundt140d5a42007-07-15 23:38:16 -07001766 npol ? nodes_addr(npol->v.nodes)[0] : -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767
1768 if (npol) {
1769 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
1770 if (!new)
1771 return -ENOMEM;
1772 }
1773 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
1774 if (err && new)
1775 kmem_cache_free(sn_cache, new);
1776 return err;
1777}
1778
1779/* Free a backing policy store on inode delete. */
1780void mpol_free_shared_policy(struct shared_policy *p)
1781{
1782 struct sp_node *n;
1783 struct rb_node *next;
1784
1785 if (!p->root.rb_node)
1786 return;
1787 spin_lock(&p->lock);
1788 next = rb_first(&p->root);
1789 while (next) {
1790 n = rb_entry(next, struct sp_node, nd);
1791 next = rb_next(&n->nd);
Andi Kleen90c50292005-07-27 11:43:50 -07001792 rb_erase(&n->nd, &p->root);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 mpol_free(n->policy);
1794 kmem_cache_free(sn_cache, n);
1795 }
1796 spin_unlock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797}
1798
1799/* assumes fs == KERNEL_DS */
1800void __init numa_policy_init(void)
1801{
Paul Mundtb71636e2007-07-15 23:38:15 -07001802 nodemask_t interleave_nodes;
1803 unsigned long largest = 0;
1804 int nid, prefer = 0;
1805
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 policy_cache = kmem_cache_create("numa_policy",
1807 sizeof(struct mempolicy),
Paul Mundt20c2df82007-07-20 10:11:58 +09001808 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809
1810 sn_cache = kmem_cache_create("shared_policy_node",
1811 sizeof(struct sp_node),
Paul Mundt20c2df82007-07-20 10:11:58 +09001812 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813
Paul Mundtb71636e2007-07-15 23:38:15 -07001814 /*
1815 * Set interleaving policy for system init. Interleaving is only
1816 * enabled across suitably sized nodes (default is >= 16MB), or
1817 * fall back to the largest node if they're all smaller.
1818 */
1819 nodes_clear(interleave_nodes);
Christoph Lameter56bbd652007-10-16 01:25:35 -07001820 for_each_node_state(nid, N_HIGH_MEMORY) {
Paul Mundtb71636e2007-07-15 23:38:15 -07001821 unsigned long total_pages = node_present_pages(nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822
Paul Mundtb71636e2007-07-15 23:38:15 -07001823 /* Preserve the largest node */
1824 if (largest < total_pages) {
1825 largest = total_pages;
1826 prefer = nid;
1827 }
1828
1829 /* Interleave this node? */
1830 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
1831 node_set(nid, interleave_nodes);
1832 }
1833
1834 /* All too small, use the largest */
1835 if (unlikely(nodes_empty(interleave_nodes)))
1836 node_set(prefer, interleave_nodes);
1837
David Rientjes028fec42008-04-28 02:12:25 -07001838 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839 printk("numa_policy_init: interleaving failed\n");
1840}
1841
Christoph Lameter8bccd852005-10-29 18:16:59 -07001842/* Reset policy of current process to default */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843void numa_default_policy(void)
1844{
David Rientjes028fec42008-04-28 02:12:25 -07001845 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846}
Paul Jackson68860ec2005-10-30 15:02:36 -08001847
Paul Jackson42253992006-01-08 01:01:59 -08001848/*
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001849 * Display pages allocated per node and memory policy via /proc.
1850 */
1851
Helge Deller15ad7cd2006-12-06 20:40:36 -08001852static const char * const policy_types[] =
1853 { "default", "prefer", "bind", "interleave" };
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001854
1855/*
1856 * Convert a mempolicy into a string.
1857 * Returns the number of characters in buffer (if positive)
1858 * or an error (negative)
1859 */
1860static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1861{
1862 char *p = buffer;
1863 int l;
1864 nodemask_t nodes;
David Rientjesa3b51e02008-04-28 02:12:23 -07001865 unsigned short mode = pol ? pol->policy : MPOL_DEFAULT;
David Rientjesf5b087b2008-04-28 02:12:27 -07001866 unsigned short flags = pol ? pol->flags : 0;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001867
1868 switch (mode) {
1869 case MPOL_DEFAULT:
1870 nodes_clear(nodes);
1871 break;
1872
1873 case MPOL_PREFERRED:
1874 nodes_clear(nodes);
1875 node_set(pol->v.preferred_node, nodes);
1876 break;
1877
1878 case MPOL_BIND:
Mel Gorman19770b32008-04-28 02:12:18 -07001879 /* Fall through */
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001880 case MPOL_INTERLEAVE:
1881 nodes = pol->v.nodes;
1882 break;
1883
1884 default:
1885 BUG();
1886 return -EFAULT;
1887 }
1888
1889 l = strlen(policy_types[mode]);
1890 if (buffer + maxlen < p + l + 1)
1891 return -ENOSPC;
1892
1893 strcpy(p, policy_types[mode]);
1894 p += l;
1895
David Rientjesf5b087b2008-04-28 02:12:27 -07001896 if (flags) {
1897 int need_bar = 0;
1898
1899 if (buffer + maxlen < p + 2)
1900 return -ENOSPC;
1901 *p++ = '=';
1902
1903 if (flags & MPOL_F_STATIC_NODES)
1904 p += sprintf(p, "%sstatic", need_bar++ ? "|" : "");
David Rientjes4c50bc02008-04-28 02:12:30 -07001905 if (flags & MPOL_F_RELATIVE_NODES)
1906 p += sprintf(p, "%srelative", need_bar++ ? "|" : "");
David Rientjesf5b087b2008-04-28 02:12:27 -07001907 }
1908
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001909 if (!nodes_empty(nodes)) {
1910 if (buffer + maxlen < p + 2)
1911 return -ENOSPC;
1912 *p++ = '=';
1913 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
1914 }
1915 return p - buffer;
1916}
1917
1918struct numa_maps {
1919 unsigned long pages;
1920 unsigned long anon;
Christoph Lameter397874d2006-03-06 15:42:53 -08001921 unsigned long active;
1922 unsigned long writeback;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001923 unsigned long mapcount_max;
Christoph Lameter397874d2006-03-06 15:42:53 -08001924 unsigned long dirty;
1925 unsigned long swapcache;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001926 unsigned long node[MAX_NUMNODES];
1927};
1928
Christoph Lameter397874d2006-03-06 15:42:53 -08001929static void gather_stats(struct page *page, void *private, int pte_dirty)
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001930{
1931 struct numa_maps *md = private;
1932 int count = page_mapcount(page);
1933
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001934 md->pages++;
Christoph Lameter397874d2006-03-06 15:42:53 -08001935 if (pte_dirty || PageDirty(page))
1936 md->dirty++;
1937
1938 if (PageSwapCache(page))
1939 md->swapcache++;
1940
1941 if (PageActive(page))
1942 md->active++;
1943
1944 if (PageWriteback(page))
1945 md->writeback++;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001946
1947 if (PageAnon(page))
1948 md->anon++;
1949
Christoph Lameter397874d2006-03-06 15:42:53 -08001950 if (count > md->mapcount_max)
1951 md->mapcount_max = count;
1952
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001953 md->node[page_to_nid(page)]++;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001954}
1955
Andrew Morton7f709ed2006-03-07 21:55:22 -08001956#ifdef CONFIG_HUGETLB_PAGE
Christoph Lameter397874d2006-03-06 15:42:53 -08001957static void check_huge_range(struct vm_area_struct *vma,
1958 unsigned long start, unsigned long end,
1959 struct numa_maps *md)
1960{
1961 unsigned long addr;
1962 struct page *page;
1963
1964 for (addr = start; addr < end; addr += HPAGE_SIZE) {
1965 pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK);
1966 pte_t pte;
1967
1968 if (!ptep)
1969 continue;
1970
1971 pte = *ptep;
1972 if (pte_none(pte))
1973 continue;
1974
1975 page = pte_page(pte);
1976 if (!page)
1977 continue;
1978
1979 gather_stats(page, md, pte_dirty(*ptep));
1980 }
1981}
Andrew Morton7f709ed2006-03-07 21:55:22 -08001982#else
1983static inline void check_huge_range(struct vm_area_struct *vma,
1984 unsigned long start, unsigned long end,
1985 struct numa_maps *md)
1986{
1987}
1988#endif
Christoph Lameter397874d2006-03-06 15:42:53 -08001989
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001990int show_numa_map(struct seq_file *m, void *v)
1991{
Eric W. Biederman99f89552006-06-26 00:25:55 -07001992 struct proc_maps_private *priv = m->private;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001993 struct vm_area_struct *vma = v;
1994 struct numa_maps *md;
Christoph Lameter397874d2006-03-06 15:42:53 -08001995 struct file *file = vma->vm_file;
1996 struct mm_struct *mm = vma->vm_mm;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001997 struct mempolicy *pol;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001998 int n;
1999 char buffer[50];
2000
Christoph Lameter397874d2006-03-06 15:42:53 -08002001 if (!mm)
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002002 return 0;
2003
2004 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
2005 if (!md)
2006 return 0;
2007
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07002008 pol = get_vma_policy(priv->task, vma, vma->vm_start);
2009 mpol_to_str(buffer, sizeof(buffer), pol);
2010 /*
2011 * unref shared or other task's mempolicy
2012 */
2013 if (pol != &default_policy && pol != current->mempolicy)
2014 __mpol_free(pol);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002015
Christoph Lameter397874d2006-03-06 15:42:53 -08002016 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002017
Christoph Lameter397874d2006-03-06 15:42:53 -08002018 if (file) {
2019 seq_printf(m, " file=");
Jan Blunckc32c2f62008-02-14 19:38:43 -08002020 seq_path(m, &file->f_path, "\n\t= ");
Christoph Lameter397874d2006-03-06 15:42:53 -08002021 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
2022 seq_printf(m, " heap");
2023 } else if (vma->vm_start <= mm->start_stack &&
2024 vma->vm_end >= mm->start_stack) {
2025 seq_printf(m, " stack");
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002026 }
Christoph Lameter397874d2006-03-06 15:42:53 -08002027
2028 if (is_vm_hugetlb_page(vma)) {
2029 check_huge_range(vma, vma->vm_start, vma->vm_end, md);
2030 seq_printf(m, " huge");
2031 } else {
2032 check_pgd_range(vma, vma->vm_start, vma->vm_end,
Christoph Lameter56bbd652007-10-16 01:25:35 -07002033 &node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
Christoph Lameter397874d2006-03-06 15:42:53 -08002034 }
2035
2036 if (!md->pages)
2037 goto out;
2038
2039 if (md->anon)
2040 seq_printf(m," anon=%lu",md->anon);
2041
2042 if (md->dirty)
2043 seq_printf(m," dirty=%lu",md->dirty);
2044
2045 if (md->pages != md->anon && md->pages != md->dirty)
2046 seq_printf(m, " mapped=%lu", md->pages);
2047
2048 if (md->mapcount_max > 1)
2049 seq_printf(m, " mapmax=%lu", md->mapcount_max);
2050
2051 if (md->swapcache)
2052 seq_printf(m," swapcache=%lu", md->swapcache);
2053
2054 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2055 seq_printf(m," active=%lu", md->active);
2056
2057 if (md->writeback)
2058 seq_printf(m," writeback=%lu", md->writeback);
2059
Christoph Lameter56bbd652007-10-16 01:25:35 -07002060 for_each_node_state(n, N_HIGH_MEMORY)
Christoph Lameter397874d2006-03-06 15:42:53 -08002061 if (md->node[n])
2062 seq_printf(m, " N%d=%lu", n, md->node[n]);
2063out:
2064 seq_putc(m, '\n');
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002065 kfree(md);
2066
2067 if (m->count < m->size)
Eric W. Biederman99f89552006-06-26 00:25:55 -07002068 m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002069 return 0;
2070}