blob: 25788b1b7fcff4b6d116dffab3abe6e1ecc17d08 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
Christoph Lameter8bccd852005-10-29 18:16:59 -07005 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
Christoph Lameter8bccd852005-10-29 18:16:59 -070021 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
Christoph Lameter8bccd852005-10-29 18:16:59 -070024 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 * preferred Try a specific node first before normal fallback.
29 * As a special case node -1 here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
Christoph Lameter8bccd852005-10-29 18:16:59 -070033 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
66 could replace all the switch()es with a mempolicy_ops structure.
67*/
68
69#include <linux/mempolicy.h>
70#include <linux/mm.h>
71#include <linux/highmem.h>
72#include <linux/hugetlb.h>
73#include <linux/kernel.h>
74#include <linux/sched.h>
75#include <linux/mm.h>
76#include <linux/nodemask.h>
77#include <linux/cpuset.h>
78#include <linux/gfp.h>
79#include <linux/slab.h>
80#include <linux/string.h>
81#include <linux/module.h>
82#include <linux/interrupt.h>
83#include <linux/init.h>
84#include <linux/compat.h>
85#include <linux/mempolicy.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080086#include <linux/swap.h>
Christoph Lameter1a75a6c2006-01-08 01:01:02 -080087#include <linux/seq_file.h>
88#include <linux/proc_fs.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080089#include <linux/migrate.h>
Christoph Lameter95a402c2006-06-23 02:03:53 -070090#include <linux/rmap.h>
David Quigley86c3a762006-06-23 02:04:02 -070091#include <linux/security.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080092
Linus Torvalds1da177e2005-04-16 15:20:36 -070093#include <asm/tlbflush.h>
94#include <asm/uaccess.h>
95
Christoph Lameter38e35862006-01-08 01:01:01 -080096/* Internal flags */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080097#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
Christoph Lameter38e35862006-01-08 01:01:01 -080098#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
Christoph Lameter1a75a6c2006-01-08 01:01:02 -080099#define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800100
Pekka Enbergfcc234f2006-03-22 00:08:13 -0800101static struct kmem_cache *policy_cache;
102static struct kmem_cache *sn_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104#define PDprintk(fmt...)
105
106/* Highest zone. An specific allocation for a zone below that is not
107 policied. */
Christoph Lameter2f6726e2006-09-25 23:31:18 -0700108enum zone_type policy_zone = ZONE_DMA;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Andi Kleend42c6992005-07-06 19:56:03 +0200110struct mempolicy default_policy = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 .refcnt = ATOMIC_INIT(1), /* never free it */
112 .policy = MPOL_DEFAULT,
113};
114
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115/* Do sanity checking on a policy */
Andi Kleendfcd3c02005-10-29 18:15:48 -0700116static int mpol_check_policy(int mode, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117{
Andi Kleendfcd3c02005-10-29 18:15:48 -0700118 int empty = nodes_empty(*nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
120 switch (mode) {
121 case MPOL_DEFAULT:
122 if (!empty)
123 return -EINVAL;
124 break;
125 case MPOL_BIND:
126 case MPOL_INTERLEAVE:
127 /* Preferred will only use the first bit, but allow
128 more for now. */
129 if (empty)
130 return -EINVAL;
131 break;
132 }
Andi Kleendfcd3c02005-10-29 18:15:48 -0700133 return nodes_subset(*nodes, node_online_map) ? 0 : -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134}
Andi Kleendd942ae2006-02-17 01:39:16 +0100135
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136/* Generate a custom zonelist for the BIND policy. */
Andi Kleendfcd3c02005-10-29 18:15:48 -0700137static struct zonelist *bind_zonelist(nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138{
139 struct zonelist *zl;
Christoph Lameter2f6726e2006-09-25 23:31:18 -0700140 int num, max, nd;
141 enum zone_type k;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
Andi Kleendfcd3c02005-10-29 18:15:48 -0700143 max = 1 + MAX_NR_ZONES * nodes_weight(*nodes);
Andi Kleendd942ae2006-02-17 01:39:16 +0100144 zl = kmalloc(sizeof(struct zone *) * max, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 if (!zl)
146 return NULL;
147 num = 0;
Andi Kleendd942ae2006-02-17 01:39:16 +0100148 /* First put in the highest zones from all nodes, then all the next
149 lower zones etc. Avoid empty zones because the memory allocator
150 doesn't like them. If you implement node hot removal you
151 have to fix that. */
Christoph Lameter2f6726e2006-09-25 23:31:18 -0700152 k = policy_zone;
153 while (1) {
Andi Kleendd942ae2006-02-17 01:39:16 +0100154 for_each_node_mask(nd, *nodes) {
155 struct zone *z = &NODE_DATA(nd)->node_zones[k];
156 if (z->present_pages > 0)
157 zl->zones[num++] = z;
158 }
Christoph Lameter2f6726e2006-09-25 23:31:18 -0700159 if (k == 0)
160 break;
161 k--;
Andi Kleendd942ae2006-02-17 01:39:16 +0100162 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 zl->zones[num] = NULL;
164 return zl;
165}
166
167/* Create a new policy */
Andi Kleendfcd3c02005-10-29 18:15:48 -0700168static struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169{
170 struct mempolicy *policy;
171
Andi Kleendfcd3c02005-10-29 18:15:48 -0700172 PDprintk("setting mode %d nodes[0] %lx\n", mode, nodes_addr(*nodes)[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 if (mode == MPOL_DEFAULT)
174 return NULL;
175 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
176 if (!policy)
177 return ERR_PTR(-ENOMEM);
178 atomic_set(&policy->refcnt, 1);
179 switch (mode) {
180 case MPOL_INTERLEAVE:
Andi Kleendfcd3c02005-10-29 18:15:48 -0700181 policy->v.nodes = *nodes;
Andi Kleen8f493d72006-01-03 00:07:28 +0100182 if (nodes_weight(*nodes) == 0) {
183 kmem_cache_free(policy_cache, policy);
184 return ERR_PTR(-EINVAL);
185 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 break;
187 case MPOL_PREFERRED:
Andi Kleendfcd3c02005-10-29 18:15:48 -0700188 policy->v.preferred_node = first_node(*nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 if (policy->v.preferred_node >= MAX_NUMNODES)
190 policy->v.preferred_node = -1;
191 break;
192 case MPOL_BIND:
193 policy->v.zonelist = bind_zonelist(nodes);
194 if (policy->v.zonelist == NULL) {
195 kmem_cache_free(policy_cache, policy);
196 return ERR_PTR(-ENOMEM);
197 }
198 break;
199 }
200 policy->policy = mode;
Paul Jackson74cb2152006-01-08 01:01:56 -0800201 policy->cpuset_mems_allowed = cpuset_mems_allowed(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 return policy;
203}
204
Christoph Lameter397874d2006-03-06 15:42:53 -0800205static void gather_stats(struct page *, void *, int pte_dirty);
Christoph Lameterfc301282006-01-18 17:42:29 -0800206static void migrate_page_add(struct page *page, struct list_head *pagelist,
207 unsigned long flags);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -0800208
Christoph Lameter38e35862006-01-08 01:01:01 -0800209/* Scan through pages checking if pages follow certain conditions. */
Nick Pigginb5810032005-10-29 18:16:12 -0700210static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800211 unsigned long addr, unsigned long end,
212 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800213 void *private)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214{
Hugh Dickins91612e02005-06-21 17:15:07 -0700215 pte_t *orig_pte;
216 pte_t *pte;
Hugh Dickins705e87c2005-10-29 18:16:27 -0700217 spinlock_t *ptl;
Hugh Dickins941150a2005-06-21 17:15:06 -0700218
Hugh Dickins705e87c2005-10-29 18:16:27 -0700219 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
Hugh Dickins91612e02005-06-21 17:15:07 -0700220 do {
Linus Torvalds6aab3412005-11-28 14:34:23 -0800221 struct page *page;
Hugh Dickins91612e02005-06-21 17:15:07 -0700222 unsigned int nid;
223
224 if (!pte_present(*pte))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800226 page = vm_normal_page(vma, addr, *pte);
227 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 continue;
Nick Piggin053837f2006-01-18 17:42:27 -0800229 /*
230 * The check for PageReserved here is important to avoid
231 * handling zero pages and other pages that may have been
232 * marked special by the system.
233 *
234 * If the PageReserved would not be checked here then f.e.
235 * the location of the zero page could have an influence
236 * on MPOL_MF_STRICT, zero pages would be counted for
237 * the per node stats, and there would be useless attempts
238 * to put zero pages on the migration list.
239 */
Christoph Lameterf4598c82006-01-12 01:05:20 -0800240 if (PageReserved(page))
241 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800242 nid = page_to_nid(page);
Christoph Lameter38e35862006-01-08 01:01:01 -0800243 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
244 continue;
245
Christoph Lameter1a75a6c2006-01-08 01:01:02 -0800246 if (flags & MPOL_MF_STATS)
Christoph Lameter397874d2006-03-06 15:42:53 -0800247 gather_stats(page, private, pte_dirty(*pte));
Nick Piggin053837f2006-01-18 17:42:27 -0800248 else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
Christoph Lameterfc301282006-01-18 17:42:29 -0800249 migrate_page_add(page, private, flags);
Christoph Lameter38e35862006-01-08 01:01:01 -0800250 else
251 break;
Hugh Dickins91612e02005-06-21 17:15:07 -0700252 } while (pte++, addr += PAGE_SIZE, addr != end);
Hugh Dickins705e87c2005-10-29 18:16:27 -0700253 pte_unmap_unlock(orig_pte, ptl);
Hugh Dickins91612e02005-06-21 17:15:07 -0700254 return addr != end;
255}
256
Nick Pigginb5810032005-10-29 18:16:12 -0700257static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800258 unsigned long addr, unsigned long end,
259 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800260 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700261{
262 pmd_t *pmd;
263 unsigned long next;
264
265 pmd = pmd_offset(pud, addr);
266 do {
267 next = pmd_addr_end(addr, end);
268 if (pmd_none_or_clear_bad(pmd))
269 continue;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800270 if (check_pte_range(vma, pmd, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800271 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700272 return -EIO;
273 } while (pmd++, addr = next, addr != end);
274 return 0;
275}
276
Nick Pigginb5810032005-10-29 18:16:12 -0700277static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800278 unsigned long addr, unsigned long end,
279 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800280 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700281{
282 pud_t *pud;
283 unsigned long next;
284
285 pud = pud_offset(pgd, addr);
286 do {
287 next = pud_addr_end(addr, end);
288 if (pud_none_or_clear_bad(pud))
289 continue;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800290 if (check_pmd_range(vma, pud, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800291 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700292 return -EIO;
293 } while (pud++, addr = next, addr != end);
294 return 0;
295}
296
Nick Pigginb5810032005-10-29 18:16:12 -0700297static inline int check_pgd_range(struct vm_area_struct *vma,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800298 unsigned long addr, unsigned long end,
299 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800300 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700301{
302 pgd_t *pgd;
303 unsigned long next;
304
Nick Pigginb5810032005-10-29 18:16:12 -0700305 pgd = pgd_offset(vma->vm_mm, addr);
Hugh Dickins91612e02005-06-21 17:15:07 -0700306 do {
307 next = pgd_addr_end(addr, end);
308 if (pgd_none_or_clear_bad(pgd))
309 continue;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800310 if (check_pud_range(vma, pgd, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800311 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700312 return -EIO;
313 } while (pgd++, addr = next, addr != end);
314 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315}
316
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800317/* Check if a vma is migratable */
318static inline int vma_migratable(struct vm_area_struct *vma)
319{
320 if (vma->vm_flags & (
Christoph Lameterf4598c82006-01-12 01:05:20 -0800321 VM_LOCKED|VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED))
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800322 return 0;
323 return 1;
324}
325
326/*
327 * Check if all pages in a range are on a set of nodes.
328 * If pagelist != NULL then isolate pages from the LRU and
329 * put them on the pagelist.
330 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331static struct vm_area_struct *
332check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
Christoph Lameter38e35862006-01-08 01:01:01 -0800333 const nodemask_t *nodes, unsigned long flags, void *private)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334{
335 int err;
336 struct vm_area_struct *first, *vma, *prev;
337
Christoph Lameter90036ee2006-03-16 23:03:59 -0800338 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
Christoph Lameter90036ee2006-03-16 23:03:59 -0800339
Christoph Lameterb20a3502006-03-22 00:09:12 -0800340 err = migrate_prep();
341 if (err)
342 return ERR_PTR(err);
Christoph Lameter90036ee2006-03-16 23:03:59 -0800343 }
Nick Piggin053837f2006-01-18 17:42:27 -0800344
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 first = find_vma(mm, start);
346 if (!first)
347 return ERR_PTR(-EFAULT);
348 prev = NULL;
349 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800350 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
351 if (!vma->vm_next && vma->vm_end < end)
352 return ERR_PTR(-EFAULT);
353 if (prev && prev->vm_end < vma->vm_start)
354 return ERR_PTR(-EFAULT);
355 }
356 if (!is_vm_hugetlb_page(vma) &&
357 ((flags & MPOL_MF_STRICT) ||
358 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
359 vma_migratable(vma)))) {
Andi Kleen5b952b32005-09-13 01:25:08 -0700360 unsigned long endvma = vma->vm_end;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800361
Andi Kleen5b952b32005-09-13 01:25:08 -0700362 if (endvma > end)
363 endvma = end;
364 if (vma->vm_start > start)
365 start = vma->vm_start;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800366 err = check_pgd_range(vma, start, endvma, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800367 flags, private);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 if (err) {
369 first = ERR_PTR(err);
370 break;
371 }
372 }
373 prev = vma;
374 }
375 return first;
376}
377
378/* Apply policy to a single VMA */
379static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
380{
381 int err = 0;
382 struct mempolicy *old = vma->vm_policy;
383
384 PDprintk("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
385 vma->vm_start, vma->vm_end, vma->vm_pgoff,
386 vma->vm_ops, vma->vm_file,
387 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
388
389 if (vma->vm_ops && vma->vm_ops->set_policy)
390 err = vma->vm_ops->set_policy(vma, new);
391 if (!err) {
392 mpol_get(new);
393 vma->vm_policy = new;
394 mpol_free(old);
395 }
396 return err;
397}
398
399/* Step 2: apply policy to a range and do splits. */
400static int mbind_range(struct vm_area_struct *vma, unsigned long start,
401 unsigned long end, struct mempolicy *new)
402{
403 struct vm_area_struct *next;
404 int err;
405
406 err = 0;
407 for (; vma && vma->vm_start < end; vma = next) {
408 next = vma->vm_next;
409 if (vma->vm_start < start)
410 err = split_vma(vma->vm_mm, vma, start, 1);
411 if (!err && vma->vm_end > end)
412 err = split_vma(vma->vm_mm, vma, end, 0);
413 if (!err)
414 err = policy_vma(vma, new);
415 if (err)
416 break;
417 }
418 return err;
419}
420
Christoph Lameter8bccd852005-10-29 18:16:59 -0700421static int contextualize_policy(int mode, nodemask_t *nodes)
422{
423 if (!nodes)
424 return 0;
425
Paul Jacksoncf2a4732006-01-08 01:01:54 -0800426 cpuset_update_task_memory_state();
Paul Jackson59665142006-01-08 01:01:47 -0800427 if (!cpuset_nodes_subset_current_mems_allowed(*nodes))
428 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700429 return mpol_check_policy(mode, nodes);
430}
431
Paul Jacksonc61afb12006-03-24 03:16:08 -0800432
433/*
434 * Update task->flags PF_MEMPOLICY bit: set iff non-default
435 * mempolicy. Allows more rapid checking of this (combined perhaps
436 * with other PF_* flag bits) on memory allocation hot code paths.
437 *
438 * If called from outside this file, the task 'p' should -only- be
439 * a newly forked child not yet visible on the task list, because
440 * manipulating the task flags of a visible task is not safe.
441 *
442 * The above limitation is why this routine has the funny name
443 * mpol_fix_fork_child_flag().
444 *
445 * It is also safe to call this with a task pointer of current,
446 * which the static wrapper mpol_set_task_struct_flag() does,
447 * for use within this file.
448 */
449
450void mpol_fix_fork_child_flag(struct task_struct *p)
451{
452 if (p->mempolicy)
453 p->flags |= PF_MEMPOLICY;
454 else
455 p->flags &= ~PF_MEMPOLICY;
456}
457
458static void mpol_set_task_struct_flag(void)
459{
460 mpol_fix_fork_child_flag(current);
461}
462
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463/* Set the process memory policy */
Christoph Lameter8bccd852005-10-29 18:16:59 -0700464long do_set_mempolicy(int mode, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 struct mempolicy *new;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467
Christoph Lameter8bccd852005-10-29 18:16:59 -0700468 if (contextualize_policy(mode, nodes))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700470 new = mpol_new(mode, nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 if (IS_ERR(new))
472 return PTR_ERR(new);
473 mpol_free(current->mempolicy);
474 current->mempolicy = new;
Paul Jacksonc61afb12006-03-24 03:16:08 -0800475 mpol_set_task_struct_flag();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 if (new && new->policy == MPOL_INTERLEAVE)
Andi Kleendfcd3c02005-10-29 18:15:48 -0700477 current->il_next = first_node(new->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 return 0;
479}
480
481/* Fill a zone bitmap for a policy */
Andi Kleendfcd3c02005-10-29 18:15:48 -0700482static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483{
484 int i;
485
Andi Kleendfcd3c02005-10-29 18:15:48 -0700486 nodes_clear(*nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 switch (p->policy) {
488 case MPOL_BIND:
489 for (i = 0; p->v.zonelist->zones[i]; i++)
Christoph Lameter89fa3022006-09-25 23:31:55 -0700490 node_set(zone_to_nid(p->v.zonelist->zones[i]),
Christoph Lameter8bccd852005-10-29 18:16:59 -0700491 *nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 break;
493 case MPOL_DEFAULT:
494 break;
495 case MPOL_INTERLEAVE:
Andi Kleendfcd3c02005-10-29 18:15:48 -0700496 *nodes = p->v.nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 break;
498 case MPOL_PREFERRED:
499 /* or use current node instead of online map? */
500 if (p->v.preferred_node < 0)
Andi Kleendfcd3c02005-10-29 18:15:48 -0700501 *nodes = node_online_map;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 else
Andi Kleendfcd3c02005-10-29 18:15:48 -0700503 node_set(p->v.preferred_node, *nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 break;
505 default:
506 BUG();
507 }
508}
509
510static int lookup_node(struct mm_struct *mm, unsigned long addr)
511{
512 struct page *p;
513 int err;
514
515 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
516 if (err >= 0) {
517 err = page_to_nid(p);
518 put_page(p);
519 }
520 return err;
521}
522
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523/* Retrieve NUMA policy */
Christoph Lameter8bccd852005-10-29 18:16:59 -0700524long do_get_mempolicy(int *policy, nodemask_t *nmask,
525 unsigned long addr, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526{
Christoph Lameter8bccd852005-10-29 18:16:59 -0700527 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 struct mm_struct *mm = current->mm;
529 struct vm_area_struct *vma = NULL;
530 struct mempolicy *pol = current->mempolicy;
531
Paul Jacksoncf2a4732006-01-08 01:01:54 -0800532 cpuset_update_task_memory_state();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 if (flags & ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR))
534 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 if (flags & MPOL_F_ADDR) {
536 down_read(&mm->mmap_sem);
537 vma = find_vma_intersection(mm, addr, addr+1);
538 if (!vma) {
539 up_read(&mm->mmap_sem);
540 return -EFAULT;
541 }
542 if (vma->vm_ops && vma->vm_ops->get_policy)
543 pol = vma->vm_ops->get_policy(vma, addr);
544 else
545 pol = vma->vm_policy;
546 } else if (addr)
547 return -EINVAL;
548
549 if (!pol)
550 pol = &default_policy;
551
552 if (flags & MPOL_F_NODE) {
553 if (flags & MPOL_F_ADDR) {
554 err = lookup_node(mm, addr);
555 if (err < 0)
556 goto out;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700557 *policy = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 } else if (pol == current->mempolicy &&
559 pol->policy == MPOL_INTERLEAVE) {
Christoph Lameter8bccd852005-10-29 18:16:59 -0700560 *policy = current->il_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 } else {
562 err = -EINVAL;
563 goto out;
564 }
565 } else
Christoph Lameter8bccd852005-10-29 18:16:59 -0700566 *policy = pol->policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567
568 if (vma) {
569 up_read(&current->mm->mmap_sem);
570 vma = NULL;
571 }
572
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 err = 0;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700574 if (nmask)
575 get_zonemask(pol, nmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
577 out:
578 if (vma)
579 up_read(&current->mm->mmap_sem);
580 return err;
581}
582
Christoph Lameterb20a3502006-03-22 00:09:12 -0800583#ifdef CONFIG_MIGRATION
Christoph Lameter8bccd852005-10-29 18:16:59 -0700584/*
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800585 * page migration
586 */
Christoph Lameterfc301282006-01-18 17:42:29 -0800587static void migrate_page_add(struct page *page, struct list_head *pagelist,
588 unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800589{
590 /*
Christoph Lameterfc301282006-01-18 17:42:29 -0800591 * Avoid migrating a page that is shared with others.
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800592 */
Christoph Lameterb20a3502006-03-22 00:09:12 -0800593 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1)
594 isolate_lru_page(page, pagelist);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800595}
596
Christoph Lameter742755a2006-06-23 02:03:55 -0700597static struct page *new_node_page(struct page *page, unsigned long node, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -0700598{
599 return alloc_pages_node(node, GFP_HIGHUSER, 0);
600}
601
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800602/*
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800603 * Migrate pages from one node to a target node.
604 * Returns error or the number of pages not migrated.
605 */
606int migrate_to_node(struct mm_struct *mm, int source, int dest, int flags)
607{
608 nodemask_t nmask;
609 LIST_HEAD(pagelist);
610 int err = 0;
611
612 nodes_clear(nmask);
613 node_set(source, nmask);
614
615 check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
616 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
617
Christoph Lameteraaa994b2006-06-23 02:03:52 -0700618 if (!list_empty(&pagelist))
Christoph Lameter95a402c2006-06-23 02:03:53 -0700619 err = migrate_pages(&pagelist, new_node_page, dest);
620
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800621 return err;
622}
623
624/*
625 * Move pages between the two nodesets so as to preserve the physical
626 * layout as much as possible.
Christoph Lameter39743882006-01-08 01:00:51 -0800627 *
628 * Returns the number of page that could not be moved.
629 */
630int do_migrate_pages(struct mm_struct *mm,
631 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
632{
633 LIST_HEAD(pagelist);
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800634 int busy = 0;
635 int err = 0;
636 nodemask_t tmp;
Christoph Lameter39743882006-01-08 01:00:51 -0800637
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800638 down_read(&mm->mmap_sem);
Christoph Lameter39743882006-01-08 01:00:51 -0800639
Christoph Lameter7b2259b2006-06-25 05:46:48 -0700640 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
641 if (err)
642 goto out;
643
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800644/*
645 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
646 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
647 * bit in 'tmp', and return that <source, dest> pair for migration.
648 * The pair of nodemasks 'to' and 'from' define the map.
649 *
650 * If no pair of bits is found that way, fallback to picking some
651 * pair of 'source' and 'dest' bits that are not the same. If the
652 * 'source' and 'dest' bits are the same, this represents a node
653 * that will be migrating to itself, so no pages need move.
654 *
655 * If no bits are left in 'tmp', or if all remaining bits left
656 * in 'tmp' correspond to the same bit in 'to', return false
657 * (nothing left to migrate).
658 *
659 * This lets us pick a pair of nodes to migrate between, such that
660 * if possible the dest node is not already occupied by some other
661 * source node, minimizing the risk of overloading the memory on a
662 * node that would happen if we migrated incoming memory to a node
663 * before migrating outgoing memory source that same node.
664 *
665 * A single scan of tmp is sufficient. As we go, we remember the
666 * most recent <s, d> pair that moved (s != d). If we find a pair
667 * that not only moved, but what's better, moved to an empty slot
668 * (d is not set in tmp), then we break out then, with that pair.
669 * Otherwise when we finish scannng from_tmp, we at least have the
670 * most recent <s, d> pair that moved. If we get all the way through
671 * the scan of tmp without finding any node that moved, much less
672 * moved to an empty node, then there is nothing left worth migrating.
673 */
Christoph Lameterd4984712006-01-08 01:00:55 -0800674
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800675 tmp = *from_nodes;
676 while (!nodes_empty(tmp)) {
677 int s,d;
678 int source = -1;
679 int dest = 0;
680
681 for_each_node_mask(s, tmp) {
682 d = node_remap(s, *from_nodes, *to_nodes);
683 if (s == d)
684 continue;
685
686 source = s; /* Node moved. Memorize */
687 dest = d;
688
689 /* dest not in remaining from nodes? */
690 if (!node_isset(dest, tmp))
691 break;
692 }
693 if (source == -1)
694 break;
695
696 node_clear(source, tmp);
697 err = migrate_to_node(mm, source, dest, flags);
698 if (err > 0)
699 busy += err;
700 if (err < 0)
701 break;
Christoph Lameter39743882006-01-08 01:00:51 -0800702 }
Christoph Lameter7b2259b2006-06-25 05:46:48 -0700703out:
Christoph Lameter39743882006-01-08 01:00:51 -0800704 up_read(&mm->mmap_sem);
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800705 if (err < 0)
706 return err;
707 return busy;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800708
Christoph Lameter39743882006-01-08 01:00:51 -0800709}
710
Christoph Lameter742755a2006-06-23 02:03:55 -0700711static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -0700712{
713 struct vm_area_struct *vma = (struct vm_area_struct *)private;
714
715 return alloc_page_vma(GFP_HIGHUSER, vma, page_address_in_vma(page, vma));
716}
Christoph Lameterb20a3502006-03-22 00:09:12 -0800717#else
718
719static void migrate_page_add(struct page *page, struct list_head *pagelist,
720 unsigned long flags)
721{
722}
723
724int do_migrate_pages(struct mm_struct *mm,
725 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
726{
727 return -ENOSYS;
728}
Christoph Lameter95a402c2006-06-23 02:03:53 -0700729
730static struct page *new_vma_page(struct page *page, unsigned long private)
731{
732 return NULL;
733}
Christoph Lameterb20a3502006-03-22 00:09:12 -0800734#endif
735
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800736long do_mbind(unsigned long start, unsigned long len,
737 unsigned long mode, nodemask_t *nmask, unsigned long flags)
738{
739 struct vm_area_struct *vma;
740 struct mm_struct *mm = current->mm;
741 struct mempolicy *new;
742 unsigned long end;
743 int err;
744 LIST_HEAD(pagelist);
745
746 if ((flags & ~(unsigned long)(MPOL_MF_STRICT |
747 MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
748 || mode > MPOL_MAX)
749 return -EINVAL;
Christoph Lameter74c00242006-03-14 19:50:21 -0800750 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800751 return -EPERM;
752
753 if (start & ~PAGE_MASK)
754 return -EINVAL;
755
756 if (mode == MPOL_DEFAULT)
757 flags &= ~MPOL_MF_STRICT;
758
759 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
760 end = start + len;
761
762 if (end < start)
763 return -EINVAL;
764 if (end == start)
765 return 0;
766
767 if (mpol_check_policy(mode, nmask))
768 return -EINVAL;
769
770 new = mpol_new(mode, nmask);
771 if (IS_ERR(new))
772 return PTR_ERR(new);
773
774 /*
775 * If we are using the default policy then operation
776 * on discontinuous address spaces is okay after all
777 */
778 if (!new)
779 flags |= MPOL_MF_DISCONTIG_OK;
780
781 PDprintk("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len,
782 mode,nodes_addr(nodes)[0]);
783
784 down_write(&mm->mmap_sem);
785 vma = check_range(mm, start, end, nmask,
786 flags | MPOL_MF_INVERT, &pagelist);
787
788 err = PTR_ERR(vma);
789 if (!IS_ERR(vma)) {
790 int nr_failed = 0;
791
792 err = mbind_range(vma, start, end, new);
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800793
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800794 if (!list_empty(&pagelist))
Christoph Lameter95a402c2006-06-23 02:03:53 -0700795 nr_failed = migrate_pages(&pagelist, new_vma_page,
796 (unsigned long)vma);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800797
798 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
799 err = -EIO;
800 }
Christoph Lameterb20a3502006-03-22 00:09:12 -0800801
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800802 up_write(&mm->mmap_sem);
803 mpol_free(new);
804 return err;
805}
806
Christoph Lameter39743882006-01-08 01:00:51 -0800807/*
Christoph Lameter8bccd852005-10-29 18:16:59 -0700808 * User space interface with variable sized bitmaps for nodelists.
809 */
810
811/* Copy a node mask from user space. */
Christoph Lameter39743882006-01-08 01:00:51 -0800812static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
Christoph Lameter8bccd852005-10-29 18:16:59 -0700813 unsigned long maxnode)
814{
815 unsigned long k;
816 unsigned long nlongs;
817 unsigned long endmask;
818
819 --maxnode;
820 nodes_clear(*nodes);
821 if (maxnode == 0 || !nmask)
822 return 0;
Andi Kleena9c930b2006-02-20 18:27:59 -0800823 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
Chris Wright636f13c2006-02-17 13:59:36 -0800824 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700825
826 nlongs = BITS_TO_LONGS(maxnode);
827 if ((maxnode % BITS_PER_LONG) == 0)
828 endmask = ~0UL;
829 else
830 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
831
832 /* When the user specified more nodes than supported just check
833 if the non supported part is all zero. */
834 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
835 if (nlongs > PAGE_SIZE/sizeof(long))
836 return -EINVAL;
837 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
838 unsigned long t;
839 if (get_user(t, nmask + k))
840 return -EFAULT;
841 if (k == nlongs - 1) {
842 if (t & endmask)
843 return -EINVAL;
844 } else if (t)
845 return -EINVAL;
846 }
847 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
848 endmask = ~0UL;
849 }
850
851 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
852 return -EFAULT;
853 nodes_addr(*nodes)[nlongs-1] &= endmask;
854 return 0;
855}
856
857/* Copy a kernel node mask to user space */
858static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
859 nodemask_t *nodes)
860{
861 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
862 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
863
864 if (copy > nbytes) {
865 if (copy > PAGE_SIZE)
866 return -EINVAL;
867 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
868 return -EFAULT;
869 copy = nbytes;
870 }
871 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
872}
873
874asmlinkage long sys_mbind(unsigned long start, unsigned long len,
875 unsigned long mode,
876 unsigned long __user *nmask, unsigned long maxnode,
877 unsigned flags)
878{
879 nodemask_t nodes;
880 int err;
881
882 err = get_nodes(&nodes, nmask, maxnode);
883 if (err)
884 return err;
885 return do_mbind(start, len, mode, &nodes, flags);
886}
887
888/* Set the process memory policy */
889asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
890 unsigned long maxnode)
891{
892 int err;
893 nodemask_t nodes;
894
895 if (mode < 0 || mode > MPOL_MAX)
896 return -EINVAL;
897 err = get_nodes(&nodes, nmask, maxnode);
898 if (err)
899 return err;
900 return do_set_mempolicy(mode, &nodes);
901}
902
Christoph Lameter39743882006-01-08 01:00:51 -0800903asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
904 const unsigned long __user *old_nodes,
905 const unsigned long __user *new_nodes)
906{
907 struct mm_struct *mm;
908 struct task_struct *task;
909 nodemask_t old;
910 nodemask_t new;
911 nodemask_t task_nodes;
912 int err;
913
914 err = get_nodes(&old, old_nodes, maxnode);
915 if (err)
916 return err;
917
918 err = get_nodes(&new, new_nodes, maxnode);
919 if (err)
920 return err;
921
922 /* Find the mm_struct */
923 read_lock(&tasklist_lock);
924 task = pid ? find_task_by_pid(pid) : current;
925 if (!task) {
926 read_unlock(&tasklist_lock);
927 return -ESRCH;
928 }
929 mm = get_task_mm(task);
930 read_unlock(&tasklist_lock);
931
932 if (!mm)
933 return -EINVAL;
934
935 /*
936 * Check if this process has the right to modify the specified
937 * process. The right exists if the process has administrative
Alexey Dobriyan7f927fc2006-03-28 01:56:53 -0800938 * capabilities, superuser privileges or the same
Christoph Lameter39743882006-01-08 01:00:51 -0800939 * userid as the target process.
940 */
941 if ((current->euid != task->suid) && (current->euid != task->uid) &&
942 (current->uid != task->suid) && (current->uid != task->uid) &&
Christoph Lameter74c00242006-03-14 19:50:21 -0800943 !capable(CAP_SYS_NICE)) {
Christoph Lameter39743882006-01-08 01:00:51 -0800944 err = -EPERM;
945 goto out;
946 }
947
948 task_nodes = cpuset_mems_allowed(task);
949 /* Is the user allowed to access the target nodes? */
Christoph Lameter74c00242006-03-14 19:50:21 -0800950 if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
Christoph Lameter39743882006-01-08 01:00:51 -0800951 err = -EPERM;
952 goto out;
953 }
954
David Quigley86c3a762006-06-23 02:04:02 -0700955 err = security_task_movememory(task);
956 if (err)
957 goto out;
958
Christoph Lameter511030b2006-02-28 16:58:57 -0800959 err = do_migrate_pages(mm, &old, &new,
Christoph Lameter74c00242006-03-14 19:50:21 -0800960 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
Christoph Lameter39743882006-01-08 01:00:51 -0800961out:
962 mmput(mm);
963 return err;
964}
965
966
Christoph Lameter8bccd852005-10-29 18:16:59 -0700967/* Retrieve NUMA policy */
968asmlinkage long sys_get_mempolicy(int __user *policy,
969 unsigned long __user *nmask,
970 unsigned long maxnode,
971 unsigned long addr, unsigned long flags)
972{
973 int err, pval;
974 nodemask_t nodes;
975
976 if (nmask != NULL && maxnode < MAX_NUMNODES)
977 return -EINVAL;
978
979 err = do_get_mempolicy(&pval, &nodes, addr, flags);
980
981 if (err)
982 return err;
983
984 if (policy && put_user(pval, policy))
985 return -EFAULT;
986
987 if (nmask)
988 err = copy_nodes_to_user(nmask, maxnode, &nodes);
989
990 return err;
991}
992
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993#ifdef CONFIG_COMPAT
994
995asmlinkage long compat_sys_get_mempolicy(int __user *policy,
996 compat_ulong_t __user *nmask,
997 compat_ulong_t maxnode,
998 compat_ulong_t addr, compat_ulong_t flags)
999{
1000 long err;
1001 unsigned long __user *nm = NULL;
1002 unsigned long nr_bits, alloc_size;
1003 DECLARE_BITMAP(bm, MAX_NUMNODES);
1004
1005 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1006 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1007
1008 if (nmask)
1009 nm = compat_alloc_user_space(alloc_size);
1010
1011 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1012
1013 if (!err && nmask) {
1014 err = copy_from_user(bm, nm, alloc_size);
1015 /* ensure entire bitmap is zeroed */
1016 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1017 err |= compat_put_bitmap(nmask, bm, nr_bits);
1018 }
1019
1020 return err;
1021}
1022
1023asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1024 compat_ulong_t maxnode)
1025{
1026 long err = 0;
1027 unsigned long __user *nm = NULL;
1028 unsigned long nr_bits, alloc_size;
1029 DECLARE_BITMAP(bm, MAX_NUMNODES);
1030
1031 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1032 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1033
1034 if (nmask) {
1035 err = compat_get_bitmap(bm, nmask, nr_bits);
1036 nm = compat_alloc_user_space(alloc_size);
1037 err |= copy_to_user(nm, bm, alloc_size);
1038 }
1039
1040 if (err)
1041 return -EFAULT;
1042
1043 return sys_set_mempolicy(mode, nm, nr_bits+1);
1044}
1045
1046asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1047 compat_ulong_t mode, compat_ulong_t __user *nmask,
1048 compat_ulong_t maxnode, compat_ulong_t flags)
1049{
1050 long err = 0;
1051 unsigned long __user *nm = NULL;
1052 unsigned long nr_bits, alloc_size;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001053 nodemask_t bm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054
1055 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1056 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1057
1058 if (nmask) {
Andi Kleendfcd3c02005-10-29 18:15:48 -07001059 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 nm = compat_alloc_user_space(alloc_size);
Andi Kleendfcd3c02005-10-29 18:15:48 -07001061 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 }
1063
1064 if (err)
1065 return -EFAULT;
1066
1067 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1068}
1069
1070#endif
1071
1072/* Return effective policy for a VMA */
Christoph Lameter48fce342006-01-08 01:01:03 -08001073static struct mempolicy * get_vma_policy(struct task_struct *task,
1074 struct vm_area_struct *vma, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075{
Christoph Lameter6e21c8f2005-09-03 15:54:45 -07001076 struct mempolicy *pol = task->mempolicy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077
1078 if (vma) {
1079 if (vma->vm_ops && vma->vm_ops->get_policy)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001080 pol = vma->vm_ops->get_policy(vma, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 else if (vma->vm_policy &&
1082 vma->vm_policy->policy != MPOL_DEFAULT)
1083 pol = vma->vm_policy;
1084 }
1085 if (!pol)
1086 pol = &default_policy;
1087 return pol;
1088}
1089
1090/* Return a zonelist representing a mempolicy */
Al Virodd0fc662005-10-07 07:46:04 +01001091static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092{
1093 int nd;
1094
1095 switch (policy->policy) {
1096 case MPOL_PREFERRED:
1097 nd = policy->v.preferred_node;
1098 if (nd < 0)
1099 nd = numa_node_id();
1100 break;
1101 case MPOL_BIND:
1102 /* Lower zones don't get a policy applied */
1103 /* Careful: current->mems_allowed might have moved */
Christoph Lameter19655d32006-09-25 23:31:19 -07001104 if (gfp_zone(gfp) >= policy_zone)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist))
1106 return policy->v.zonelist;
1107 /*FALL THROUGH*/
1108 case MPOL_INTERLEAVE: /* should not happen */
1109 case MPOL_DEFAULT:
1110 nd = numa_node_id();
1111 break;
1112 default:
1113 nd = 0;
1114 BUG();
1115 }
Al Viroaf4ca452005-10-21 02:55:38 -04001116 return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117}
1118
1119/* Do dynamic interleaving for a process */
1120static unsigned interleave_nodes(struct mempolicy *policy)
1121{
1122 unsigned nid, next;
1123 struct task_struct *me = current;
1124
1125 nid = me->il_next;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001126 next = next_node(nid, policy->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127 if (next >= MAX_NUMNODES)
Andi Kleendfcd3c02005-10-29 18:15:48 -07001128 next = first_node(policy->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 me->il_next = next;
1130 return nid;
1131}
1132
Christoph Lameterdc85da12006-01-18 17:42:36 -08001133/*
1134 * Depending on the memory policy provide a node from which to allocate the
1135 * next slab entry.
1136 */
1137unsigned slab_node(struct mempolicy *policy)
1138{
Christoph Lameter765c4502006-09-27 01:50:08 -07001139 int pol = policy ? policy->policy : MPOL_DEFAULT;
1140
1141 switch (pol) {
Christoph Lameterdc85da12006-01-18 17:42:36 -08001142 case MPOL_INTERLEAVE:
1143 return interleave_nodes(policy);
1144
1145 case MPOL_BIND:
1146 /*
1147 * Follow bind policy behavior and start allocation at the
1148 * first node.
1149 */
Christoph Lameter89fa3022006-09-25 23:31:55 -07001150 return zone_to_nid(policy->v.zonelist->zones[0]);
Christoph Lameterdc85da12006-01-18 17:42:36 -08001151
1152 case MPOL_PREFERRED:
1153 if (policy->v.preferred_node >= 0)
1154 return policy->v.preferred_node;
1155 /* Fall through */
1156
1157 default:
1158 return numa_node_id();
1159 }
1160}
1161
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162/* Do static interleaving for a VMA with known offset. */
1163static unsigned offset_il_node(struct mempolicy *pol,
1164 struct vm_area_struct *vma, unsigned long off)
1165{
Andi Kleendfcd3c02005-10-29 18:15:48 -07001166 unsigned nnodes = nodes_weight(pol->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 unsigned target = (unsigned)off % nnodes;
1168 int c;
1169 int nid = -1;
1170
1171 c = 0;
1172 do {
Andi Kleendfcd3c02005-10-29 18:15:48 -07001173 nid = next_node(nid, pol->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 c++;
1175 } while (c <= target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 return nid;
1177}
1178
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001179/* Determine a node number for interleave */
1180static inline unsigned interleave_nid(struct mempolicy *pol,
1181 struct vm_area_struct *vma, unsigned long addr, int shift)
1182{
1183 if (vma) {
1184 unsigned long off;
1185
Nishanth Aravamudan3b98b082006-08-31 21:27:53 -07001186 /*
1187 * for small pages, there is no difference between
1188 * shift and PAGE_SHIFT, so the bit-shift is safe.
1189 * for huge pages, since vm_pgoff is in units of small
1190 * pages, we need to shift off the always 0 bits to get
1191 * a useful offset.
1192 */
1193 BUG_ON(shift < PAGE_SHIFT);
1194 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001195 off += (addr - vma->vm_start) >> shift;
1196 return offset_il_node(pol, vma, off);
1197 } else
1198 return interleave_nodes(pol);
1199}
1200
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001201#ifdef CONFIG_HUGETLBFS
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001202/* Return a zonelist suitable for a huge page allocation. */
1203struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr)
1204{
1205 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1206
1207 if (pol->policy == MPOL_INTERLEAVE) {
1208 unsigned nid;
1209
1210 nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
1211 return NODE_DATA(nid)->node_zonelists + gfp_zone(GFP_HIGHUSER);
1212 }
1213 return zonelist_policy(GFP_HIGHUSER, pol);
1214}
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001215#endif
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001216
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217/* Allocate a page in interleaved policy.
1218 Own path because it needs to do special accounting. */
Andi Kleen662f3a02005-10-29 18:15:49 -07001219static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1220 unsigned nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221{
1222 struct zonelist *zl;
1223 struct page *page;
1224
Al Viroaf4ca452005-10-21 02:55:38 -04001225 zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 page = __alloc_pages(gfp, order, zl);
Christoph Lameterca889e62006-06-30 01:55:44 -07001227 if (page && page_zone(page) == zl->zones[0])
1228 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 return page;
1230}
1231
1232/**
1233 * alloc_page_vma - Allocate a page for a VMA.
1234 *
1235 * @gfp:
1236 * %GFP_USER user allocation.
1237 * %GFP_KERNEL kernel allocations,
1238 * %GFP_HIGHMEM highmem/user allocations,
1239 * %GFP_FS allocation should not call back into a file system.
1240 * %GFP_ATOMIC don't sleep.
1241 *
1242 * @vma: Pointer to VMA or NULL if not available.
1243 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1244 *
1245 * This function allocates a page from the kernel page pool and applies
1246 * a NUMA policy associated with the VMA or the current process.
1247 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1248 * mm_struct of the VMA to prevent it from going away. Should be used for
1249 * all allocations for pages that will be mapped into
1250 * user space. Returns NULL when no page can be allocated.
1251 *
1252 * Should be called with the mm_sem of the vma hold.
1253 */
1254struct page *
Al Virodd0fc662005-10-07 07:46:04 +01001255alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256{
Christoph Lameter6e21c8f2005-09-03 15:54:45 -07001257 struct mempolicy *pol = get_vma_policy(current, vma, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258
Paul Jacksoncf2a4732006-01-08 01:01:54 -08001259 cpuset_update_task_memory_state();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260
1261 if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
1262 unsigned nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001263
1264 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 return alloc_page_interleave(gfp, 0, nid);
1266 }
1267 return __alloc_pages(gfp, 0, zonelist_policy(gfp, pol));
1268}
1269
1270/**
1271 * alloc_pages_current - Allocate pages.
1272 *
1273 * @gfp:
1274 * %GFP_USER user allocation,
1275 * %GFP_KERNEL kernel allocation,
1276 * %GFP_HIGHMEM highmem allocation,
1277 * %GFP_FS don't call back into a file system.
1278 * %GFP_ATOMIC don't sleep.
1279 * @order: Power of two of allocation size in pages. 0 is a single page.
1280 *
1281 * Allocate a page from the kernel page pool. When not in
1282 * interrupt context and apply the current process NUMA policy.
1283 * Returns NULL when no page can be allocated.
1284 *
Paul Jacksoncf2a4732006-01-08 01:01:54 -08001285 * Don't call cpuset_update_task_memory_state() unless
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 * 1) it's ok to take cpuset_sem (can WAIT), and
1287 * 2) allocating for current task (not interrupt).
1288 */
Al Virodd0fc662005-10-07 07:46:04 +01001289struct page *alloc_pages_current(gfp_t gfp, unsigned order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290{
1291 struct mempolicy *pol = current->mempolicy;
1292
1293 if ((gfp & __GFP_WAIT) && !in_interrupt())
Paul Jacksoncf2a4732006-01-08 01:01:54 -08001294 cpuset_update_task_memory_state();
Christoph Lameter9b819d22006-09-25 23:31:40 -07001295 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 pol = &default_policy;
1297 if (pol->policy == MPOL_INTERLEAVE)
1298 return alloc_page_interleave(gfp, order, interleave_nodes(pol));
1299 return __alloc_pages(gfp, order, zonelist_policy(gfp, pol));
1300}
1301EXPORT_SYMBOL(alloc_pages_current);
1302
Paul Jackson42253992006-01-08 01:01:59 -08001303/*
1304 * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it
1305 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1306 * with the mems_allowed returned by cpuset_mems_allowed(). This
1307 * keeps mempolicies cpuset relative after its cpuset moves. See
1308 * further kernel/cpuset.c update_nodemask().
1309 */
1310void *cpuset_being_rebound;
1311
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312/* Slow path of a mempolicy copy */
1313struct mempolicy *__mpol_copy(struct mempolicy *old)
1314{
1315 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1316
1317 if (!new)
1318 return ERR_PTR(-ENOMEM);
Paul Jackson42253992006-01-08 01:01:59 -08001319 if (current_cpuset_is_being_rebound()) {
1320 nodemask_t mems = cpuset_mems_allowed(current);
1321 mpol_rebind_policy(old, &mems);
1322 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 *new = *old;
1324 atomic_set(&new->refcnt, 1);
1325 if (new->policy == MPOL_BIND) {
1326 int sz = ksize(old->v.zonelist);
Alexey Dobriyan52978be2006-09-30 23:27:21 -07001327 new->v.zonelist = kmemdup(old->v.zonelist, sz, SLAB_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 if (!new->v.zonelist) {
1329 kmem_cache_free(policy_cache, new);
1330 return ERR_PTR(-ENOMEM);
1331 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 }
1333 return new;
1334}
1335
1336/* Slow path of a mempolicy comparison */
1337int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1338{
1339 if (!a || !b)
1340 return 0;
1341 if (a->policy != b->policy)
1342 return 0;
1343 switch (a->policy) {
1344 case MPOL_DEFAULT:
1345 return 1;
1346 case MPOL_INTERLEAVE:
Andi Kleendfcd3c02005-10-29 18:15:48 -07001347 return nodes_equal(a->v.nodes, b->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 case MPOL_PREFERRED:
1349 return a->v.preferred_node == b->v.preferred_node;
1350 case MPOL_BIND: {
1351 int i;
1352 for (i = 0; a->v.zonelist->zones[i]; i++)
1353 if (a->v.zonelist->zones[i] != b->v.zonelist->zones[i])
1354 return 0;
1355 return b->v.zonelist->zones[i] == NULL;
1356 }
1357 default:
1358 BUG();
1359 return 0;
1360 }
1361}
1362
1363/* Slow path of a mpol destructor. */
1364void __mpol_free(struct mempolicy *p)
1365{
1366 if (!atomic_dec_and_test(&p->refcnt))
1367 return;
1368 if (p->policy == MPOL_BIND)
1369 kfree(p->v.zonelist);
1370 p->policy = MPOL_DEFAULT;
1371 kmem_cache_free(policy_cache, p);
1372}
1373
1374/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 * Shared memory backing store policy support.
1376 *
1377 * Remember policies even when nobody has shared memory mapped.
1378 * The policies are kept in Red-Black tree linked from the inode.
1379 * They are protected by the sp->lock spinlock, which should be held
1380 * for any accesses to the tree.
1381 */
1382
1383/* lookup first element intersecting start-end */
1384/* Caller holds sp->lock */
1385static struct sp_node *
1386sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
1387{
1388 struct rb_node *n = sp->root.rb_node;
1389
1390 while (n) {
1391 struct sp_node *p = rb_entry(n, struct sp_node, nd);
1392
1393 if (start >= p->end)
1394 n = n->rb_right;
1395 else if (end <= p->start)
1396 n = n->rb_left;
1397 else
1398 break;
1399 }
1400 if (!n)
1401 return NULL;
1402 for (;;) {
1403 struct sp_node *w = NULL;
1404 struct rb_node *prev = rb_prev(n);
1405 if (!prev)
1406 break;
1407 w = rb_entry(prev, struct sp_node, nd);
1408 if (w->end <= start)
1409 break;
1410 n = prev;
1411 }
1412 return rb_entry(n, struct sp_node, nd);
1413}
1414
1415/* Insert a new shared policy into the list. */
1416/* Caller holds sp->lock */
1417static void sp_insert(struct shared_policy *sp, struct sp_node *new)
1418{
1419 struct rb_node **p = &sp->root.rb_node;
1420 struct rb_node *parent = NULL;
1421 struct sp_node *nd;
1422
1423 while (*p) {
1424 parent = *p;
1425 nd = rb_entry(parent, struct sp_node, nd);
1426 if (new->start < nd->start)
1427 p = &(*p)->rb_left;
1428 else if (new->end > nd->end)
1429 p = &(*p)->rb_right;
1430 else
1431 BUG();
1432 }
1433 rb_link_node(&new->nd, parent, p);
1434 rb_insert_color(&new->nd, &sp->root);
1435 PDprintk("inserting %lx-%lx: %d\n", new->start, new->end,
1436 new->policy ? new->policy->policy : 0);
1437}
1438
1439/* Find shared policy intersecting idx */
1440struct mempolicy *
1441mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
1442{
1443 struct mempolicy *pol = NULL;
1444 struct sp_node *sn;
1445
1446 if (!sp->root.rb_node)
1447 return NULL;
1448 spin_lock(&sp->lock);
1449 sn = sp_lookup(sp, idx, idx+1);
1450 if (sn) {
1451 mpol_get(sn->policy);
1452 pol = sn->policy;
1453 }
1454 spin_unlock(&sp->lock);
1455 return pol;
1456}
1457
1458static void sp_delete(struct shared_policy *sp, struct sp_node *n)
1459{
1460 PDprintk("deleting %lx-l%x\n", n->start, n->end);
1461 rb_erase(&n->nd, &sp->root);
1462 mpol_free(n->policy);
1463 kmem_cache_free(sn_cache, n);
1464}
1465
1466struct sp_node *
1467sp_alloc(unsigned long start, unsigned long end, struct mempolicy *pol)
1468{
1469 struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1470
1471 if (!n)
1472 return NULL;
1473 n->start = start;
1474 n->end = end;
1475 mpol_get(pol);
1476 n->policy = pol;
1477 return n;
1478}
1479
1480/* Replace a policy range. */
1481static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
1482 unsigned long end, struct sp_node *new)
1483{
1484 struct sp_node *n, *new2 = NULL;
1485
1486restart:
1487 spin_lock(&sp->lock);
1488 n = sp_lookup(sp, start, end);
1489 /* Take care of old policies in the same range. */
1490 while (n && n->start < end) {
1491 struct rb_node *next = rb_next(&n->nd);
1492 if (n->start >= start) {
1493 if (n->end <= end)
1494 sp_delete(sp, n);
1495 else
1496 n->start = end;
1497 } else {
1498 /* Old policy spanning whole new range. */
1499 if (n->end > end) {
1500 if (!new2) {
1501 spin_unlock(&sp->lock);
1502 new2 = sp_alloc(end, n->end, n->policy);
1503 if (!new2)
1504 return -ENOMEM;
1505 goto restart;
1506 }
1507 n->end = start;
1508 sp_insert(sp, new2);
1509 new2 = NULL;
1510 break;
1511 } else
1512 n->end = start;
1513 }
1514 if (!next)
1515 break;
1516 n = rb_entry(next, struct sp_node, nd);
1517 }
1518 if (new)
1519 sp_insert(sp, new);
1520 spin_unlock(&sp->lock);
1521 if (new2) {
1522 mpol_free(new2->policy);
1523 kmem_cache_free(sn_cache, new2);
1524 }
1525 return 0;
1526}
1527
Robin Holt7339ff82006-01-14 13:20:48 -08001528void mpol_shared_policy_init(struct shared_policy *info, int policy,
1529 nodemask_t *policy_nodes)
1530{
1531 info->root = RB_ROOT;
1532 spin_lock_init(&info->lock);
1533
1534 if (policy != MPOL_DEFAULT) {
1535 struct mempolicy *newpol;
1536
1537 /* Falls back to MPOL_DEFAULT on any error */
1538 newpol = mpol_new(policy, policy_nodes);
1539 if (!IS_ERR(newpol)) {
1540 /* Create pseudo-vma that contains just the policy */
1541 struct vm_area_struct pvma;
1542
1543 memset(&pvma, 0, sizeof(struct vm_area_struct));
1544 /* Policy covers entire file */
1545 pvma.vm_end = TASK_SIZE;
1546 mpol_set_shared_policy(info, &pvma, newpol);
1547 mpol_free(newpol);
1548 }
1549 }
1550}
1551
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552int mpol_set_shared_policy(struct shared_policy *info,
1553 struct vm_area_struct *vma, struct mempolicy *npol)
1554{
1555 int err;
1556 struct sp_node *new = NULL;
1557 unsigned long sz = vma_pages(vma);
1558
1559 PDprintk("set_shared_policy %lx sz %lu %d %lx\n",
1560 vma->vm_pgoff,
1561 sz, npol? npol->policy : -1,
Andi Kleendfcd3c02005-10-29 18:15:48 -07001562 npol ? nodes_addr(npol->v.nodes)[0] : -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563
1564 if (npol) {
1565 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
1566 if (!new)
1567 return -ENOMEM;
1568 }
1569 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
1570 if (err && new)
1571 kmem_cache_free(sn_cache, new);
1572 return err;
1573}
1574
1575/* Free a backing policy store on inode delete. */
1576void mpol_free_shared_policy(struct shared_policy *p)
1577{
1578 struct sp_node *n;
1579 struct rb_node *next;
1580
1581 if (!p->root.rb_node)
1582 return;
1583 spin_lock(&p->lock);
1584 next = rb_first(&p->root);
1585 while (next) {
1586 n = rb_entry(next, struct sp_node, nd);
1587 next = rb_next(&n->nd);
Andi Kleen90c50292005-07-27 11:43:50 -07001588 rb_erase(&n->nd, &p->root);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 mpol_free(n->policy);
1590 kmem_cache_free(sn_cache, n);
1591 }
1592 spin_unlock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593}
1594
1595/* assumes fs == KERNEL_DS */
1596void __init numa_policy_init(void)
1597{
1598 policy_cache = kmem_cache_create("numa_policy",
1599 sizeof(struct mempolicy),
1600 0, SLAB_PANIC, NULL, NULL);
1601
1602 sn_cache = kmem_cache_create("shared_policy_node",
1603 sizeof(struct sp_node),
1604 0, SLAB_PANIC, NULL, NULL);
1605
1606 /* Set interleaving policy for system init. This way not all
1607 the data structures allocated at system boot end up in node zero. */
1608
Christoph Lameter8bccd852005-10-29 18:16:59 -07001609 if (do_set_mempolicy(MPOL_INTERLEAVE, &node_online_map))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 printk("numa_policy_init: interleaving failed\n");
1611}
1612
Christoph Lameter8bccd852005-10-29 18:16:59 -07001613/* Reset policy of current process to default */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614void numa_default_policy(void)
1615{
Christoph Lameter8bccd852005-10-29 18:16:59 -07001616 do_set_mempolicy(MPOL_DEFAULT, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617}
Paul Jackson68860ec2005-10-30 15:02:36 -08001618
1619/* Migrate a policy to a different set of nodes */
Paul Jackson74cb2152006-01-08 01:01:56 -08001620void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
Paul Jackson68860ec2005-10-30 15:02:36 -08001621{
Paul Jackson74cb2152006-01-08 01:01:56 -08001622 nodemask_t *mpolmask;
Paul Jackson68860ec2005-10-30 15:02:36 -08001623 nodemask_t tmp;
1624
1625 if (!pol)
1626 return;
Paul Jackson74cb2152006-01-08 01:01:56 -08001627 mpolmask = &pol->cpuset_mems_allowed;
1628 if (nodes_equal(*mpolmask, *newmask))
1629 return;
Paul Jackson68860ec2005-10-30 15:02:36 -08001630
1631 switch (pol->policy) {
1632 case MPOL_DEFAULT:
1633 break;
1634 case MPOL_INTERLEAVE:
Paul Jackson74cb2152006-01-08 01:01:56 -08001635 nodes_remap(tmp, pol->v.nodes, *mpolmask, *newmask);
Paul Jackson68860ec2005-10-30 15:02:36 -08001636 pol->v.nodes = tmp;
Paul Jackson74cb2152006-01-08 01:01:56 -08001637 *mpolmask = *newmask;
1638 current->il_next = node_remap(current->il_next,
1639 *mpolmask, *newmask);
Paul Jackson68860ec2005-10-30 15:02:36 -08001640 break;
1641 case MPOL_PREFERRED:
1642 pol->v.preferred_node = node_remap(pol->v.preferred_node,
Paul Jackson74cb2152006-01-08 01:01:56 -08001643 *mpolmask, *newmask);
1644 *mpolmask = *newmask;
Paul Jackson68860ec2005-10-30 15:02:36 -08001645 break;
1646 case MPOL_BIND: {
1647 nodemask_t nodes;
1648 struct zone **z;
1649 struct zonelist *zonelist;
1650
1651 nodes_clear(nodes);
1652 for (z = pol->v.zonelist->zones; *z; z++)
Christoph Lameter89fa3022006-09-25 23:31:55 -07001653 node_set(zone_to_nid(*z), nodes);
Paul Jackson74cb2152006-01-08 01:01:56 -08001654 nodes_remap(tmp, nodes, *mpolmask, *newmask);
Paul Jackson68860ec2005-10-30 15:02:36 -08001655 nodes = tmp;
1656
1657 zonelist = bind_zonelist(&nodes);
1658
1659 /* If no mem, then zonelist is NULL and we keep old zonelist.
1660 * If that old zonelist has no remaining mems_allowed nodes,
1661 * then zonelist_policy() will "FALL THROUGH" to MPOL_DEFAULT.
1662 */
1663
1664 if (zonelist) {
1665 /* Good - got mem - substitute new zonelist */
1666 kfree(pol->v.zonelist);
1667 pol->v.zonelist = zonelist;
1668 }
Paul Jackson74cb2152006-01-08 01:01:56 -08001669 *mpolmask = *newmask;
Paul Jackson68860ec2005-10-30 15:02:36 -08001670 break;
1671 }
1672 default:
1673 BUG();
1674 break;
1675 }
1676}
1677
1678/*
Paul Jackson74cb2152006-01-08 01:01:56 -08001679 * Wrapper for mpol_rebind_policy() that just requires task
1680 * pointer, and updates task mempolicy.
Paul Jackson68860ec2005-10-30 15:02:36 -08001681 */
Paul Jackson74cb2152006-01-08 01:01:56 -08001682
1683void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
Paul Jackson68860ec2005-10-30 15:02:36 -08001684{
Paul Jackson74cb2152006-01-08 01:01:56 -08001685 mpol_rebind_policy(tsk->mempolicy, new);
Paul Jackson68860ec2005-10-30 15:02:36 -08001686}
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001687
1688/*
Paul Jackson42253992006-01-08 01:01:59 -08001689 * Rebind each vma in mm to new nodemask.
1690 *
1691 * Call holding a reference to mm. Takes mm->mmap_sem during call.
1692 */
1693
1694void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
1695{
1696 struct vm_area_struct *vma;
1697
1698 down_write(&mm->mmap_sem);
1699 for (vma = mm->mmap; vma; vma = vma->vm_next)
1700 mpol_rebind_policy(vma->vm_policy, new);
1701 up_write(&mm->mmap_sem);
1702}
1703
1704/*
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001705 * Display pages allocated per node and memory policy via /proc.
1706 */
1707
1708static const char *policy_types[] = { "default", "prefer", "bind",
1709 "interleave" };
1710
1711/*
1712 * Convert a mempolicy into a string.
1713 * Returns the number of characters in buffer (if positive)
1714 * or an error (negative)
1715 */
1716static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1717{
1718 char *p = buffer;
1719 int l;
1720 nodemask_t nodes;
1721 int mode = pol ? pol->policy : MPOL_DEFAULT;
1722
1723 switch (mode) {
1724 case MPOL_DEFAULT:
1725 nodes_clear(nodes);
1726 break;
1727
1728 case MPOL_PREFERRED:
1729 nodes_clear(nodes);
1730 node_set(pol->v.preferred_node, nodes);
1731 break;
1732
1733 case MPOL_BIND:
1734 get_zonemask(pol, &nodes);
1735 break;
1736
1737 case MPOL_INTERLEAVE:
1738 nodes = pol->v.nodes;
1739 break;
1740
1741 default:
1742 BUG();
1743 return -EFAULT;
1744 }
1745
1746 l = strlen(policy_types[mode]);
1747 if (buffer + maxlen < p + l + 1)
1748 return -ENOSPC;
1749
1750 strcpy(p, policy_types[mode]);
1751 p += l;
1752
1753 if (!nodes_empty(nodes)) {
1754 if (buffer + maxlen < p + 2)
1755 return -ENOSPC;
1756 *p++ = '=';
1757 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
1758 }
1759 return p - buffer;
1760}
1761
1762struct numa_maps {
1763 unsigned long pages;
1764 unsigned long anon;
Christoph Lameter397874d2006-03-06 15:42:53 -08001765 unsigned long active;
1766 unsigned long writeback;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001767 unsigned long mapcount_max;
Christoph Lameter397874d2006-03-06 15:42:53 -08001768 unsigned long dirty;
1769 unsigned long swapcache;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001770 unsigned long node[MAX_NUMNODES];
1771};
1772
Christoph Lameter397874d2006-03-06 15:42:53 -08001773static void gather_stats(struct page *page, void *private, int pte_dirty)
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001774{
1775 struct numa_maps *md = private;
1776 int count = page_mapcount(page);
1777
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001778 md->pages++;
Christoph Lameter397874d2006-03-06 15:42:53 -08001779 if (pte_dirty || PageDirty(page))
1780 md->dirty++;
1781
1782 if (PageSwapCache(page))
1783 md->swapcache++;
1784
1785 if (PageActive(page))
1786 md->active++;
1787
1788 if (PageWriteback(page))
1789 md->writeback++;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001790
1791 if (PageAnon(page))
1792 md->anon++;
1793
Christoph Lameter397874d2006-03-06 15:42:53 -08001794 if (count > md->mapcount_max)
1795 md->mapcount_max = count;
1796
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001797 md->node[page_to_nid(page)]++;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001798}
1799
Andrew Morton7f709ed2006-03-07 21:55:22 -08001800#ifdef CONFIG_HUGETLB_PAGE
Christoph Lameter397874d2006-03-06 15:42:53 -08001801static void check_huge_range(struct vm_area_struct *vma,
1802 unsigned long start, unsigned long end,
1803 struct numa_maps *md)
1804{
1805 unsigned long addr;
1806 struct page *page;
1807
1808 for (addr = start; addr < end; addr += HPAGE_SIZE) {
1809 pte_t *ptep = huge_pte_offset(vma->vm_mm, addr & HPAGE_MASK);
1810 pte_t pte;
1811
1812 if (!ptep)
1813 continue;
1814
1815 pte = *ptep;
1816 if (pte_none(pte))
1817 continue;
1818
1819 page = pte_page(pte);
1820 if (!page)
1821 continue;
1822
1823 gather_stats(page, md, pte_dirty(*ptep));
1824 }
1825}
Andrew Morton7f709ed2006-03-07 21:55:22 -08001826#else
1827static inline void check_huge_range(struct vm_area_struct *vma,
1828 unsigned long start, unsigned long end,
1829 struct numa_maps *md)
1830{
1831}
1832#endif
Christoph Lameter397874d2006-03-06 15:42:53 -08001833
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001834int show_numa_map(struct seq_file *m, void *v)
1835{
Eric W. Biederman99f89552006-06-26 00:25:55 -07001836 struct proc_maps_private *priv = m->private;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001837 struct vm_area_struct *vma = v;
1838 struct numa_maps *md;
Christoph Lameter397874d2006-03-06 15:42:53 -08001839 struct file *file = vma->vm_file;
1840 struct mm_struct *mm = vma->vm_mm;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001841 int n;
1842 char buffer[50];
1843
Christoph Lameter397874d2006-03-06 15:42:53 -08001844 if (!mm)
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001845 return 0;
1846
1847 md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
1848 if (!md)
1849 return 0;
1850
Christoph Lameter397874d2006-03-06 15:42:53 -08001851 mpol_to_str(buffer, sizeof(buffer),
Eric W. Biederman99f89552006-06-26 00:25:55 -07001852 get_vma_policy(priv->task, vma, vma->vm_start));
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001853
Christoph Lameter397874d2006-03-06 15:42:53 -08001854 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001855
Christoph Lameter397874d2006-03-06 15:42:53 -08001856 if (file) {
1857 seq_printf(m, " file=");
1858 seq_path(m, file->f_vfsmnt, file->f_dentry, "\n\t= ");
1859 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1860 seq_printf(m, " heap");
1861 } else if (vma->vm_start <= mm->start_stack &&
1862 vma->vm_end >= mm->start_stack) {
1863 seq_printf(m, " stack");
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001864 }
Christoph Lameter397874d2006-03-06 15:42:53 -08001865
1866 if (is_vm_hugetlb_page(vma)) {
1867 check_huge_range(vma, vma->vm_start, vma->vm_end, md);
1868 seq_printf(m, " huge");
1869 } else {
1870 check_pgd_range(vma, vma->vm_start, vma->vm_end,
1871 &node_online_map, MPOL_MF_STATS, md);
1872 }
1873
1874 if (!md->pages)
1875 goto out;
1876
1877 if (md->anon)
1878 seq_printf(m," anon=%lu",md->anon);
1879
1880 if (md->dirty)
1881 seq_printf(m," dirty=%lu",md->dirty);
1882
1883 if (md->pages != md->anon && md->pages != md->dirty)
1884 seq_printf(m, " mapped=%lu", md->pages);
1885
1886 if (md->mapcount_max > 1)
1887 seq_printf(m, " mapmax=%lu", md->mapcount_max);
1888
1889 if (md->swapcache)
1890 seq_printf(m," swapcache=%lu", md->swapcache);
1891
1892 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1893 seq_printf(m," active=%lu", md->active);
1894
1895 if (md->writeback)
1896 seq_printf(m," writeback=%lu", md->writeback);
1897
1898 for_each_online_node(n)
1899 if (md->node[n])
1900 seq_printf(m, " N%d=%lu", n, md->node[n]);
1901out:
1902 seq_putc(m, '\n');
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001903 kfree(md);
1904
1905 if (m->count < m->size)
Eric W. Biederman99f89552006-06-26 00:25:55 -07001906 m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08001907 return 0;
1908}
1909