blob: bba08bf0608200d4d57d546aac3e4685b7f67366 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
Christoph Lameter8bccd852005-10-29 18:16:59 -07005 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
Christoph Lameter8bccd852005-10-29 18:16:59 -070021 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
Christoph Lameter8bccd852005-10-29 18:16:59 -070024 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 * preferred Try a specific node first before normal fallback.
29 * As a special case node -1 here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
Christoph Lameter8bccd852005-10-29 18:16:59 -070033 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
Linus Torvalds1da177e2005-04-16 15:20:36 -070066*/
67
68#include <linux/mempolicy.h>
69#include <linux/mm.h>
70#include <linux/highmem.h>
71#include <linux/hugetlb.h>
72#include <linux/kernel.h>
73#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070074#include <linux/nodemask.h>
75#include <linux/cpuset.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/slab.h>
77#include <linux/string.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040078#include <linux/export.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070079#include <linux/nsproxy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070080#include <linux/interrupt.h>
81#include <linux/init.h>
82#include <linux/compat.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080083#include <linux/swap.h>
Christoph Lameter1a75a6c2006-01-08 01:01:02 -080084#include <linux/seq_file.h>
85#include <linux/proc_fs.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080086#include <linux/migrate.h>
Hugh Dickins62b61f62009-12-14 17:59:33 -080087#include <linux/ksm.h>
Christoph Lameter95a402c2006-06-23 02:03:53 -070088#include <linux/rmap.h>
David Quigley86c3a762006-06-23 02:04:02 -070089#include <linux/security.h>
Adrian Bunkdbcb0f12007-10-16 01:26:26 -070090#include <linux/syscalls.h>
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -070091#include <linux/ctype.h>
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -080092#include <linux/mm_inline.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080093
Linus Torvalds1da177e2005-04-16 15:20:36 -070094#include <asm/tlbflush.h>
95#include <asm/uaccess.h>
Michal Hocko778d3b02011-07-26 16:08:30 -070096#include <linux/random.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Nick Piggin62695a82008-10-18 20:26:09 -070098#include "internal.h"
99
Christoph Lameter38e35862006-01-08 01:01:01 -0800100/* Internal flags */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800101#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
Christoph Lameter38e35862006-01-08 01:01:01 -0800102#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800103
Pekka Enbergfcc234f2006-03-22 00:08:13 -0800104static struct kmem_cache *policy_cache;
105static struct kmem_cache *sn_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107/* Highest zone. An specific allocation for a zone below that is not
108 policied. */
Christoph Lameter62672762007-02-10 01:43:07 -0800109enum zone_type policy_zone = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700111/*
112 * run-time system-wide default policy => local allocation
113 */
H Hartley Sweetene754d792011-10-31 17:09:23 -0700114static struct mempolicy default_policy = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 .refcnt = ATOMIC_INIT(1), /* never free it */
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700116 .mode = MPOL_PREFERRED,
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700117 .flags = MPOL_F_LOCAL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118};
119
David Rientjes37012942008-04-28 02:12:33 -0700120static const struct mempolicy_operations {
121 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
Miao Xie708c1bb2010-05-24 14:32:07 -0700122 /*
123 * If read-side task has no lock to protect task->mempolicy, write-side
124 * task will rebind the task->mempolicy by two step. The first step is
125 * setting all the newly nodes, and the second step is cleaning all the
126 * disallowed nodes. In this way, we can avoid finding no node to alloc
127 * page.
128 * If we have a lock to protect task->mempolicy in read-side, we do
129 * rebind directly.
130 *
131 * step:
132 * MPOL_REBIND_ONCE - do rebind work at once
133 * MPOL_REBIND_STEP1 - set all the newly nodes
134 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
135 */
136 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
137 enum mpol_rebind_step step);
David Rientjes37012942008-04-28 02:12:33 -0700138} mpol_ops[MPOL_MAX];
139
Mel Gorman19770b32008-04-28 02:12:18 -0700140/* Check that the nodemask contains at least one populated zone */
David Rientjes37012942008-04-28 02:12:33 -0700141static int is_valid_nodemask(const nodemask_t *nodemask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142{
Mel Gorman19770b32008-04-28 02:12:18 -0700143 int nd, k;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
Mel Gorman19770b32008-04-28 02:12:18 -0700145 for_each_node_mask(nd, *nodemask) {
146 struct zone *z;
147
148 for (k = 0; k <= policy_zone; k++) {
149 z = &NODE_DATA(nd)->node_zones[k];
150 if (z->present_pages > 0)
151 return 1;
Andi Kleendd942ae2006-02-17 01:39:16 +0100152 }
153 }
Mel Gorman19770b32008-04-28 02:12:18 -0700154
155 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156}
157
David Rientjesf5b087b2008-04-28 02:12:27 -0700158static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
159{
Bob Liu6d556292010-05-24 14:31:59 -0700160 return pol->flags & MPOL_MODE_FLAGS;
David Rientjes4c50bc02008-04-28 02:12:30 -0700161}
162
163static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
164 const nodemask_t *rel)
165{
166 nodemask_t tmp;
167 nodes_fold(tmp, *orig, nodes_weight(*rel));
168 nodes_onto(*ret, tmp, *rel);
David Rientjesf5b087b2008-04-28 02:12:27 -0700169}
170
David Rientjes37012942008-04-28 02:12:33 -0700171static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
172{
173 if (nodes_empty(*nodes))
174 return -EINVAL;
175 pol->v.nodes = *nodes;
176 return 0;
177}
178
179static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
180{
181 if (!nodes)
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700182 pol->flags |= MPOL_F_LOCAL; /* local allocation */
David Rientjes37012942008-04-28 02:12:33 -0700183 else if (nodes_empty(*nodes))
184 return -EINVAL; /* no allowed nodes */
185 else
186 pol->v.preferred_node = first_node(*nodes);
187 return 0;
188}
189
190static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
191{
192 if (!is_valid_nodemask(nodes))
193 return -EINVAL;
194 pol->v.nodes = *nodes;
195 return 0;
196}
197
Miao Xie58568d22009-06-16 15:31:49 -0700198/*
199 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
200 * any, for the new policy. mpol_new() has already validated the nodes
201 * parameter with respect to the policy mode and flags. But, we need to
202 * handle an empty nodemask with MPOL_PREFERRED here.
203 *
204 * Must be called holding task's alloc_lock to protect task's mems_allowed
205 * and mempolicy. May also be called holding the mmap_semaphore for write.
206 */
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700207static int mpol_set_nodemask(struct mempolicy *pol,
208 const nodemask_t *nodes, struct nodemask_scratch *nsc)
Miao Xie58568d22009-06-16 15:31:49 -0700209{
Miao Xie58568d22009-06-16 15:31:49 -0700210 int ret;
211
212 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
213 if (pol == NULL)
214 return 0;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700215 /* Check N_HIGH_MEMORY */
216 nodes_and(nsc->mask1,
217 cpuset_current_mems_allowed, node_states[N_HIGH_MEMORY]);
Miao Xie58568d22009-06-16 15:31:49 -0700218
219 VM_BUG_ON(!nodes);
220 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
221 nodes = NULL; /* explicit local allocation */
222 else {
223 if (pol->flags & MPOL_F_RELATIVE_NODES)
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700224 mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
Miao Xie58568d22009-06-16 15:31:49 -0700225 else
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700226 nodes_and(nsc->mask2, *nodes, nsc->mask1);
227
Miao Xie58568d22009-06-16 15:31:49 -0700228 if (mpol_store_user_nodemask(pol))
229 pol->w.user_nodemask = *nodes;
230 else
231 pol->w.cpuset_mems_allowed =
232 cpuset_current_mems_allowed;
233 }
234
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700235 if (nodes)
236 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
237 else
238 ret = mpol_ops[pol->mode].create(pol, NULL);
Miao Xie58568d22009-06-16 15:31:49 -0700239 return ret;
240}
241
242/*
243 * This function just creates a new policy, does some check and simple
244 * initialization. You must invoke mpol_set_nodemask() to set nodes.
245 */
David Rientjes028fec42008-04-28 02:12:25 -0700246static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
247 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248{
249 struct mempolicy *policy;
250
David Rientjes028fec42008-04-28 02:12:25 -0700251 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
252 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
Paul Mundt140d5a42007-07-15 23:38:16 -0700253
David Rientjes3e1f0642008-04-28 02:12:34 -0700254 if (mode == MPOL_DEFAULT) {
255 if (nodes && !nodes_empty(*nodes))
David Rientjes37012942008-04-28 02:12:33 -0700256 return ERR_PTR(-EINVAL);
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700257 return NULL; /* simply delete any existing policy */
David Rientjes37012942008-04-28 02:12:33 -0700258 }
David Rientjes3e1f0642008-04-28 02:12:34 -0700259 VM_BUG_ON(!nodes);
260
261 /*
262 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
263 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
264 * All other modes require a valid pointer to a non-empty nodemask.
265 */
266 if (mode == MPOL_PREFERRED) {
267 if (nodes_empty(*nodes)) {
268 if (((flags & MPOL_F_STATIC_NODES) ||
269 (flags & MPOL_F_RELATIVE_NODES)))
270 return ERR_PTR(-EINVAL);
David Rientjes3e1f0642008-04-28 02:12:34 -0700271 }
272 } else if (nodes_empty(*nodes))
273 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
275 if (!policy)
276 return ERR_PTR(-ENOMEM);
277 atomic_set(&policy->refcnt, 1);
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700278 policy->mode = mode;
David Rientjes3e1f0642008-04-28 02:12:34 -0700279 policy->flags = flags;
David Rientjesf5b087b2008-04-28 02:12:27 -0700280
David Rientjes37012942008-04-28 02:12:33 -0700281 return policy;
282}
283
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700284/* Slow path of a mpol destructor. */
285void __mpol_put(struct mempolicy *p)
286{
287 if (!atomic_dec_and_test(&p->refcnt))
288 return;
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700289 kmem_cache_free(policy_cache, p);
290}
291
Miao Xie708c1bb2010-05-24 14:32:07 -0700292static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
293 enum mpol_rebind_step step)
David Rientjes37012942008-04-28 02:12:33 -0700294{
295}
296
Miao Xie708c1bb2010-05-24 14:32:07 -0700297/*
298 * step:
299 * MPOL_REBIND_ONCE - do rebind work at once
300 * MPOL_REBIND_STEP1 - set all the newly nodes
301 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
302 */
303static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
304 enum mpol_rebind_step step)
David Rientjes37012942008-04-28 02:12:33 -0700305{
306 nodemask_t tmp;
307
308 if (pol->flags & MPOL_F_STATIC_NODES)
309 nodes_and(tmp, pol->w.user_nodemask, *nodes);
310 else if (pol->flags & MPOL_F_RELATIVE_NODES)
311 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
312 else {
Miao Xie708c1bb2010-05-24 14:32:07 -0700313 /*
314 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
315 * result
316 */
317 if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
318 nodes_remap(tmp, pol->v.nodes,
319 pol->w.cpuset_mems_allowed, *nodes);
320 pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
321 } else if (step == MPOL_REBIND_STEP2) {
322 tmp = pol->w.cpuset_mems_allowed;
323 pol->w.cpuset_mems_allowed = *nodes;
324 } else
325 BUG();
David Rientjes37012942008-04-28 02:12:33 -0700326 }
327
Miao Xie708c1bb2010-05-24 14:32:07 -0700328 if (nodes_empty(tmp))
329 tmp = *nodes;
330
331 if (step == MPOL_REBIND_STEP1)
332 nodes_or(pol->v.nodes, pol->v.nodes, tmp);
333 else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
334 pol->v.nodes = tmp;
335 else
336 BUG();
337
David Rientjes37012942008-04-28 02:12:33 -0700338 if (!node_isset(current->il_next, tmp)) {
339 current->il_next = next_node(current->il_next, tmp);
340 if (current->il_next >= MAX_NUMNODES)
341 current->il_next = first_node(tmp);
342 if (current->il_next >= MAX_NUMNODES)
343 current->il_next = numa_node_id();
344 }
345}
346
347static void mpol_rebind_preferred(struct mempolicy *pol,
Miao Xie708c1bb2010-05-24 14:32:07 -0700348 const nodemask_t *nodes,
349 enum mpol_rebind_step step)
David Rientjes37012942008-04-28 02:12:33 -0700350{
351 nodemask_t tmp;
352
David Rientjes37012942008-04-28 02:12:33 -0700353 if (pol->flags & MPOL_F_STATIC_NODES) {
354 int node = first_node(pol->w.user_nodemask);
355
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700356 if (node_isset(node, *nodes)) {
David Rientjes37012942008-04-28 02:12:33 -0700357 pol->v.preferred_node = node;
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700358 pol->flags &= ~MPOL_F_LOCAL;
359 } else
360 pol->flags |= MPOL_F_LOCAL;
David Rientjes37012942008-04-28 02:12:33 -0700361 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
362 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
363 pol->v.preferred_node = first_node(tmp);
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700364 } else if (!(pol->flags & MPOL_F_LOCAL)) {
David Rientjes37012942008-04-28 02:12:33 -0700365 pol->v.preferred_node = node_remap(pol->v.preferred_node,
366 pol->w.cpuset_mems_allowed,
367 *nodes);
368 pol->w.cpuset_mems_allowed = *nodes;
369 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370}
371
Miao Xie708c1bb2010-05-24 14:32:07 -0700372/*
373 * mpol_rebind_policy - Migrate a policy to a different set of nodes
374 *
375 * If read-side task has no lock to protect task->mempolicy, write-side
376 * task will rebind the task->mempolicy by two step. The first step is
377 * setting all the newly nodes, and the second step is cleaning all the
378 * disallowed nodes. In this way, we can avoid finding no node to alloc
379 * page.
380 * If we have a lock to protect task->mempolicy in read-side, we do
381 * rebind directly.
382 *
383 * step:
384 * MPOL_REBIND_ONCE - do rebind work at once
385 * MPOL_REBIND_STEP1 - set all the newly nodes
386 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
387 */
388static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
389 enum mpol_rebind_step step)
David Rientjes1d0d2682008-04-28 02:12:32 -0700390{
David Rientjes1d0d2682008-04-28 02:12:32 -0700391 if (!pol)
392 return;
Miao Xie708c1bb2010-05-24 14:32:07 -0700393 if (!mpol_store_user_nodemask(pol) && step == 0 &&
David Rientjes1d0d2682008-04-28 02:12:32 -0700394 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
395 return;
Miao Xie708c1bb2010-05-24 14:32:07 -0700396
397 if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
398 return;
399
400 if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
401 BUG();
402
403 if (step == MPOL_REBIND_STEP1)
404 pol->flags |= MPOL_F_REBINDING;
405 else if (step == MPOL_REBIND_STEP2)
406 pol->flags &= ~MPOL_F_REBINDING;
407 else if (step >= MPOL_REBIND_NSTEP)
408 BUG();
409
410 mpol_ops[pol->mode].rebind(pol, newmask, step);
David Rientjes1d0d2682008-04-28 02:12:32 -0700411}
412
413/*
414 * Wrapper for mpol_rebind_policy() that just requires task
415 * pointer, and updates task mempolicy.
Miao Xie58568d22009-06-16 15:31:49 -0700416 *
417 * Called with task's alloc_lock held.
David Rientjes1d0d2682008-04-28 02:12:32 -0700418 */
419
Miao Xie708c1bb2010-05-24 14:32:07 -0700420void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
421 enum mpol_rebind_step step)
David Rientjes1d0d2682008-04-28 02:12:32 -0700422{
Miao Xie708c1bb2010-05-24 14:32:07 -0700423 mpol_rebind_policy(tsk->mempolicy, new, step);
David Rientjes1d0d2682008-04-28 02:12:32 -0700424}
425
426/*
427 * Rebind each vma in mm to new nodemask.
428 *
429 * Call holding a reference to mm. Takes mm->mmap_sem during call.
430 */
431
432void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
433{
434 struct vm_area_struct *vma;
435
436 down_write(&mm->mmap_sem);
437 for (vma = mm->mmap; vma; vma = vma->vm_next)
Miao Xie708c1bb2010-05-24 14:32:07 -0700438 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
David Rientjes1d0d2682008-04-28 02:12:32 -0700439 up_write(&mm->mmap_sem);
440}
441
David Rientjes37012942008-04-28 02:12:33 -0700442static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
443 [MPOL_DEFAULT] = {
444 .rebind = mpol_rebind_default,
445 },
446 [MPOL_INTERLEAVE] = {
447 .create = mpol_new_interleave,
448 .rebind = mpol_rebind_nodemask,
449 },
450 [MPOL_PREFERRED] = {
451 .create = mpol_new_preferred,
452 .rebind = mpol_rebind_preferred,
453 },
454 [MPOL_BIND] = {
455 .create = mpol_new_bind,
456 .rebind = mpol_rebind_nodemask,
457 },
458};
459
Christoph Lameterfc301282006-01-18 17:42:29 -0800460static void migrate_page_add(struct page *page, struct list_head *pagelist,
461 unsigned long flags);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -0800462
Christoph Lameter38e35862006-01-08 01:01:01 -0800463/* Scan through pages checking if pages follow certain conditions. */
Nick Pigginb5810032005-10-29 18:16:12 -0700464static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800465 unsigned long addr, unsigned long end,
466 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800467 void *private)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468{
Hugh Dickins91612e02005-06-21 17:15:07 -0700469 pte_t *orig_pte;
470 pte_t *pte;
Hugh Dickins705e87c2005-10-29 18:16:27 -0700471 spinlock_t *ptl;
Hugh Dickins941150a2005-06-21 17:15:06 -0700472
Hugh Dickins705e87c2005-10-29 18:16:27 -0700473 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
Hugh Dickins91612e02005-06-21 17:15:07 -0700474 do {
Linus Torvalds6aab3412005-11-28 14:34:23 -0800475 struct page *page;
Andy Whitcroft25ba77c2006-12-06 20:33:03 -0800476 int nid;
Hugh Dickins91612e02005-06-21 17:15:07 -0700477
478 if (!pte_present(*pte))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800480 page = vm_normal_page(vma, addr, *pte);
481 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 continue;
Nick Piggin053837f2006-01-18 17:42:27 -0800483 /*
Hugh Dickins62b61f62009-12-14 17:59:33 -0800484 * vm_normal_page() filters out zero pages, but there might
485 * still be PageReserved pages to skip, perhaps in a VDSO.
486 * And we cannot move PageKsm pages sensibly or safely yet.
Nick Piggin053837f2006-01-18 17:42:27 -0800487 */
Hugh Dickins62b61f62009-12-14 17:59:33 -0800488 if (PageReserved(page) || PageKsm(page))
Christoph Lameterf4598c82006-01-12 01:05:20 -0800489 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800490 nid = page_to_nid(page);
Christoph Lameter38e35862006-01-08 01:01:01 -0800491 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
492 continue;
493
Stephen Wilsonb1f72d12011-05-24 17:12:43 -0700494 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
Christoph Lameterfc301282006-01-18 17:42:29 -0800495 migrate_page_add(page, private, flags);
Christoph Lameter38e35862006-01-08 01:01:01 -0800496 else
497 break;
Hugh Dickins91612e02005-06-21 17:15:07 -0700498 } while (pte++, addr += PAGE_SIZE, addr != end);
Hugh Dickins705e87c2005-10-29 18:16:27 -0700499 pte_unmap_unlock(orig_pte, ptl);
Hugh Dickins91612e02005-06-21 17:15:07 -0700500 return addr != end;
501}
502
Nick Pigginb5810032005-10-29 18:16:12 -0700503static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800504 unsigned long addr, unsigned long end,
505 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800506 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700507{
508 pmd_t *pmd;
509 unsigned long next;
510
511 pmd = pmd_offset(pud, addr);
512 do {
513 next = pmd_addr_end(addr, end);
Andrea Arcangelibae9c192011-01-13 15:46:46 -0800514 split_huge_page_pmd(vma->vm_mm, pmd);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -0700515 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
Hugh Dickins91612e02005-06-21 17:15:07 -0700516 continue;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800517 if (check_pte_range(vma, pmd, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800518 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700519 return -EIO;
520 } while (pmd++, addr = next, addr != end);
521 return 0;
522}
523
Nick Pigginb5810032005-10-29 18:16:12 -0700524static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800525 unsigned long addr, unsigned long end,
526 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800527 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700528{
529 pud_t *pud;
530 unsigned long next;
531
532 pud = pud_offset(pgd, addr);
533 do {
534 next = pud_addr_end(addr, end);
535 if (pud_none_or_clear_bad(pud))
536 continue;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800537 if (check_pmd_range(vma, pud, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800538 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700539 return -EIO;
540 } while (pud++, addr = next, addr != end);
541 return 0;
542}
543
Nick Pigginb5810032005-10-29 18:16:12 -0700544static inline int check_pgd_range(struct vm_area_struct *vma,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800545 unsigned long addr, unsigned long end,
546 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800547 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700548{
549 pgd_t *pgd;
550 unsigned long next;
551
Nick Pigginb5810032005-10-29 18:16:12 -0700552 pgd = pgd_offset(vma->vm_mm, addr);
Hugh Dickins91612e02005-06-21 17:15:07 -0700553 do {
554 next = pgd_addr_end(addr, end);
555 if (pgd_none_or_clear_bad(pgd))
556 continue;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800557 if (check_pud_range(vma, pgd, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800558 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700559 return -EIO;
560 } while (pgd++, addr = next, addr != end);
561 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562}
563
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800564/*
565 * Check if all pages in a range are on a set of nodes.
566 * If pagelist != NULL then isolate pages from the LRU and
567 * put them on the pagelist.
568 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569static struct vm_area_struct *
570check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
Christoph Lameter38e35862006-01-08 01:01:01 -0800571 const nodemask_t *nodes, unsigned long flags, void *private)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572{
573 int err;
574 struct vm_area_struct *first, *vma, *prev;
575
Nick Piggin053837f2006-01-18 17:42:27 -0800576
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 first = find_vma(mm, start);
578 if (!first)
579 return ERR_PTR(-EFAULT);
580 prev = NULL;
581 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800582 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
583 if (!vma->vm_next && vma->vm_end < end)
584 return ERR_PTR(-EFAULT);
585 if (prev && prev->vm_end < vma->vm_start)
586 return ERR_PTR(-EFAULT);
587 }
588 if (!is_vm_hugetlb_page(vma) &&
589 ((flags & MPOL_MF_STRICT) ||
590 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
591 vma_migratable(vma)))) {
Andi Kleen5b952b32005-09-13 01:25:08 -0700592 unsigned long endvma = vma->vm_end;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800593
Andi Kleen5b952b32005-09-13 01:25:08 -0700594 if (endvma > end)
595 endvma = end;
596 if (vma->vm_start > start)
597 start = vma->vm_start;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800598 err = check_pgd_range(vma, start, endvma, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800599 flags, private);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 if (err) {
601 first = ERR_PTR(err);
602 break;
603 }
604 }
605 prev = vma;
606 }
607 return first;
608}
609
610/* Apply policy to a single VMA */
611static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
612{
613 int err = 0;
614 struct mempolicy *old = vma->vm_policy;
615
Paul Mundt140d5a42007-07-15 23:38:16 -0700616 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 vma->vm_start, vma->vm_end, vma->vm_pgoff,
618 vma->vm_ops, vma->vm_file,
619 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
620
621 if (vma->vm_ops && vma->vm_ops->set_policy)
622 err = vma->vm_ops->set_policy(vma, new);
623 if (!err) {
624 mpol_get(new);
625 vma->vm_policy = new;
Lee Schermerhornf0be3d32008-04-28 02:13:08 -0700626 mpol_put(old);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 }
628 return err;
629}
630
631/* Step 2: apply policy to a range and do splits. */
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800632static int mbind_range(struct mm_struct *mm, unsigned long start,
633 unsigned long end, struct mempolicy *new_pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634{
635 struct vm_area_struct *next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800636 struct vm_area_struct *prev;
637 struct vm_area_struct *vma;
638 int err = 0;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800639 pgoff_t pgoff;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800640 unsigned long vmstart;
641 unsigned long vmend;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642
Linus Torvalds097d5912012-03-06 18:23:36 -0800643 vma = find_vma(mm, start);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800644 if (!vma || vma->vm_start > start)
645 return -EFAULT;
646
Linus Torvalds097d5912012-03-06 18:23:36 -0800647 prev = vma->vm_prev;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800648 if (start > vma->vm_start)
649 prev = vma;
650
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800651 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 next = vma->vm_next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800653 vmstart = max(start, vma->vm_start);
654 vmend = min(end, vma->vm_end);
655
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800656 if (mpol_equal(vma_policy(vma), new_pol))
657 continue;
658
659 pgoff = vma->vm_pgoff +
660 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800661 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800662 vma->anon_vma, vma->vm_file, pgoff,
Colin Crossa9e6b182013-06-26 17:26:01 -0700663 new_pol, vma_get_anon_name(name));
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800664 if (prev) {
665 vma = prev;
666 next = vma->vm_next;
667 continue;
668 }
669 if (vma->vm_start != vmstart) {
670 err = split_vma(vma->vm_mm, vma, vmstart, 1);
671 if (err)
672 goto out;
673 }
674 if (vma->vm_end != vmend) {
675 err = split_vma(vma->vm_mm, vma, vmend, 0);
676 if (err)
677 goto out;
678 }
679 err = policy_vma(vma, new_pol);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 if (err)
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800681 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 }
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800683
684 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 return err;
686}
687
Paul Jacksonc61afb12006-03-24 03:16:08 -0800688/*
689 * Update task->flags PF_MEMPOLICY bit: set iff non-default
690 * mempolicy. Allows more rapid checking of this (combined perhaps
691 * with other PF_* flag bits) on memory allocation hot code paths.
692 *
693 * If called from outside this file, the task 'p' should -only- be
694 * a newly forked child not yet visible on the task list, because
695 * manipulating the task flags of a visible task is not safe.
696 *
697 * The above limitation is why this routine has the funny name
698 * mpol_fix_fork_child_flag().
699 *
700 * It is also safe to call this with a task pointer of current,
701 * which the static wrapper mpol_set_task_struct_flag() does,
702 * for use within this file.
703 */
704
705void mpol_fix_fork_child_flag(struct task_struct *p)
706{
707 if (p->mempolicy)
708 p->flags |= PF_MEMPOLICY;
709 else
710 p->flags &= ~PF_MEMPOLICY;
711}
712
713static void mpol_set_task_struct_flag(void)
714{
715 mpol_fix_fork_child_flag(current);
716}
717
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718/* Set the process memory policy */
David Rientjes028fec42008-04-28 02:12:25 -0700719static long do_set_mempolicy(unsigned short mode, unsigned short flags,
720 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721{
Miao Xie58568d22009-06-16 15:31:49 -0700722 struct mempolicy *new, *old;
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700723 struct mm_struct *mm = current->mm;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700724 NODEMASK_SCRATCH(scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700725 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700727 if (!scratch)
728 return -ENOMEM;
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700729
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700730 new = mpol_new(mode, flags, nodes);
731 if (IS_ERR(new)) {
732 ret = PTR_ERR(new);
733 goto out;
734 }
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700735 /*
736 * prevent changing our mempolicy while show_numa_maps()
737 * is using it.
738 * Note: do_set_mempolicy() can be called at init time
739 * with no 'mm'.
740 */
741 if (mm)
742 down_write(&mm->mmap_sem);
Miao Xie58568d22009-06-16 15:31:49 -0700743 task_lock(current);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700744 ret = mpol_set_nodemask(new, nodes, scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700745 if (ret) {
746 task_unlock(current);
747 if (mm)
748 up_write(&mm->mmap_sem);
749 mpol_put(new);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700750 goto out;
Miao Xie58568d22009-06-16 15:31:49 -0700751 }
752 old = current->mempolicy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 current->mempolicy = new;
Paul Jacksonc61afb12006-03-24 03:16:08 -0800754 mpol_set_task_struct_flag();
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700755 if (new && new->mode == MPOL_INTERLEAVE &&
David Rientjesf5b087b2008-04-28 02:12:27 -0700756 nodes_weight(new->v.nodes))
Andi Kleendfcd3c02005-10-29 18:15:48 -0700757 current->il_next = first_node(new->v.nodes);
Miao Xie58568d22009-06-16 15:31:49 -0700758 task_unlock(current);
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700759 if (mm)
760 up_write(&mm->mmap_sem);
761
Miao Xie58568d22009-06-16 15:31:49 -0700762 mpol_put(old);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700763 ret = 0;
764out:
765 NODEMASK_SCRATCH_FREE(scratch);
766 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767}
768
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700769/*
770 * Return nodemask for policy for get_mempolicy() query
Miao Xie58568d22009-06-16 15:31:49 -0700771 *
772 * Called with task's alloc_lock held
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700773 */
774static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775{
Andi Kleendfcd3c02005-10-29 18:15:48 -0700776 nodes_clear(*nodes);
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700777 if (p == &default_policy)
778 return;
779
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700780 switch (p->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -0700781 case MPOL_BIND:
782 /* Fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 case MPOL_INTERLEAVE:
Andi Kleendfcd3c02005-10-29 18:15:48 -0700784 *nodes = p->v.nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 break;
786 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700787 if (!(p->flags & MPOL_F_LOCAL))
Andi Kleendfcd3c02005-10-29 18:15:48 -0700788 node_set(p->v.preferred_node, *nodes);
Lee Schermerhorn53f25562008-04-28 02:13:20 -0700789 /* else return empty node mask for local allocation */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 break;
791 default:
792 BUG();
793 }
794}
795
796static int lookup_node(struct mm_struct *mm, unsigned long addr)
797{
798 struct page *p;
799 int err;
800
801 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
802 if (err >= 0) {
803 err = page_to_nid(p);
804 put_page(p);
805 }
806 return err;
807}
808
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809/* Retrieve NUMA policy */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700810static long do_get_mempolicy(int *policy, nodemask_t *nmask,
811 unsigned long addr, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812{
Christoph Lameter8bccd852005-10-29 18:16:59 -0700813 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 struct mm_struct *mm = current->mm;
815 struct vm_area_struct *vma = NULL;
816 struct mempolicy *pol = current->mempolicy;
817
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700818 if (flags &
819 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 return -EINVAL;
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700821
822 if (flags & MPOL_F_MEMS_ALLOWED) {
823 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
824 return -EINVAL;
825 *policy = 0; /* just so it's initialized */
Miao Xie58568d22009-06-16 15:31:49 -0700826 task_lock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700827 *nmask = cpuset_current_mems_allowed;
Miao Xie58568d22009-06-16 15:31:49 -0700828 task_unlock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700829 return 0;
830 }
831
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 if (flags & MPOL_F_ADDR) {
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700833 /*
834 * Do NOT fall back to task policy if the
835 * vma/shared policy at addr is NULL. We
836 * want to return MPOL_DEFAULT in this case.
837 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 down_read(&mm->mmap_sem);
839 vma = find_vma_intersection(mm, addr, addr+1);
840 if (!vma) {
841 up_read(&mm->mmap_sem);
842 return -EFAULT;
843 }
844 if (vma->vm_ops && vma->vm_ops->get_policy)
845 pol = vma->vm_ops->get_policy(vma, addr);
846 else
847 pol = vma->vm_policy;
848 } else if (addr)
849 return -EINVAL;
850
851 if (!pol)
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700852 pol = &default_policy; /* indicates default behavior */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853
854 if (flags & MPOL_F_NODE) {
855 if (flags & MPOL_F_ADDR) {
856 err = lookup_node(mm, addr);
857 if (err < 0)
858 goto out;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700859 *policy = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 } else if (pol == current->mempolicy &&
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700861 pol->mode == MPOL_INTERLEAVE) {
Christoph Lameter8bccd852005-10-29 18:16:59 -0700862 *policy = current->il_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 } else {
864 err = -EINVAL;
865 goto out;
866 }
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700867 } else {
868 *policy = pol == &default_policy ? MPOL_DEFAULT :
869 pol->mode;
David Rientjesd79df632008-07-04 12:24:13 -0700870 /*
871 * Internal mempolicy flags must be masked off before exposing
872 * the policy to userspace.
873 */
874 *policy |= (pol->flags & MPOL_MODE_FLAGS);
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700875 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876
877 if (vma) {
878 up_read(&current->mm->mmap_sem);
879 vma = NULL;
880 }
881
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 err = 0;
Miao Xie58568d22009-06-16 15:31:49 -0700883 if (nmask) {
Lee Schermerhornc6b6ef82010-03-23 13:35:41 -0700884 if (mpol_store_user_nodemask(pol)) {
885 *nmask = pol->w.user_nodemask;
886 } else {
887 task_lock(current);
888 get_policy_nodemask(pol, nmask);
889 task_unlock(current);
890 }
Miao Xie58568d22009-06-16 15:31:49 -0700891 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892
893 out:
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700894 mpol_cond_put(pol);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 if (vma)
896 up_read(&current->mm->mmap_sem);
897 return err;
898}
899
Christoph Lameterb20a3502006-03-22 00:09:12 -0800900#ifdef CONFIG_MIGRATION
Christoph Lameter8bccd852005-10-29 18:16:59 -0700901/*
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800902 * page migration
903 */
Christoph Lameterfc301282006-01-18 17:42:29 -0800904static void migrate_page_add(struct page *page, struct list_head *pagelist,
905 unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800906{
907 /*
Christoph Lameterfc301282006-01-18 17:42:29 -0800908 * Avoid migrating a page that is shared with others.
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800909 */
Nick Piggin62695a82008-10-18 20:26:09 -0700910 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
911 if (!isolate_lru_page(page)) {
912 list_add_tail(&page->lru, pagelist);
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -0800913 inc_zone_page_state(page, NR_ISOLATED_ANON +
914 page_is_file_cache(page));
Nick Piggin62695a82008-10-18 20:26:09 -0700915 }
916 }
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800917}
918
Christoph Lameter742755a2006-06-23 02:03:55 -0700919static struct page *new_node_page(struct page *page, unsigned long node, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -0700920{
Mel Gorman6484eb32009-06-16 15:31:54 -0700921 return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
Christoph Lameter95a402c2006-06-23 02:03:53 -0700922}
923
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800924/*
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800925 * Migrate pages from one node to a target node.
926 * Returns error or the number of pages not migrated.
927 */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700928static int migrate_to_node(struct mm_struct *mm, int source, int dest,
929 int flags)
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800930{
931 nodemask_t nmask;
932 LIST_HEAD(pagelist);
933 int err = 0;
Vasiliy Kulikov0def08e2010-10-26 14:21:32 -0700934 struct vm_area_struct *vma;
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800935
936 nodes_clear(nmask);
937 node_set(source, nmask);
938
Vasiliy Kulikov0def08e2010-10-26 14:21:32 -0700939 vma = check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800940 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
Vasiliy Kulikov0def08e2010-10-26 14:21:32 -0700941 if (IS_ERR(vma))
942 return PTR_ERR(vma);
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800943
Minchan Kimcf608ac2010-10-26 14:21:29 -0700944 if (!list_empty(&pagelist)) {
Mel Gorman7f0f2492011-01-13 15:45:58 -0800945 err = migrate_pages(&pagelist, new_node_page, dest,
Mel Gormana6bc32b2012-01-12 17:19:43 -0800946 false, MIGRATE_SYNC);
Minchan Kimcf608ac2010-10-26 14:21:29 -0700947 if (err)
948 putback_lru_pages(&pagelist);
949 }
Christoph Lameter95a402c2006-06-23 02:03:53 -0700950
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800951 return err;
952}
953
954/*
955 * Move pages between the two nodesets so as to preserve the physical
956 * layout as much as possible.
Christoph Lameter39743882006-01-08 01:00:51 -0800957 *
958 * Returns the number of page that could not be moved.
959 */
960int do_migrate_pages(struct mm_struct *mm,
961 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
962{
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800963 int busy = 0;
Christoph Lameter0aedadf2008-11-06 12:53:30 -0800964 int err;
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800965 nodemask_t tmp;
Christoph Lameter39743882006-01-08 01:00:51 -0800966
Christoph Lameter0aedadf2008-11-06 12:53:30 -0800967 err = migrate_prep();
968 if (err)
969 return err;
970
Lee Schermerhorn53f25562008-04-28 02:13:20 -0700971 down_read(&mm->mmap_sem);
Christoph Lameter39743882006-01-08 01:00:51 -0800972
Christoph Lameter7b2259b2006-06-25 05:46:48 -0700973 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
974 if (err)
975 goto out;
976
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -0800977 /*
978 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
979 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
980 * bit in 'tmp', and return that <source, dest> pair for migration.
981 * The pair of nodemasks 'to' and 'from' define the map.
982 *
983 * If no pair of bits is found that way, fallback to picking some
984 * pair of 'source' and 'dest' bits that are not the same. If the
985 * 'source' and 'dest' bits are the same, this represents a node
986 * that will be migrating to itself, so no pages need move.
987 *
988 * If no bits are left in 'tmp', or if all remaining bits left
989 * in 'tmp' correspond to the same bit in 'to', return false
990 * (nothing left to migrate).
991 *
992 * This lets us pick a pair of nodes to migrate between, such that
993 * if possible the dest node is not already occupied by some other
994 * source node, minimizing the risk of overloading the memory on a
995 * node that would happen if we migrated incoming memory to a node
996 * before migrating outgoing memory source that same node.
997 *
998 * A single scan of tmp is sufficient. As we go, we remember the
999 * most recent <s, d> pair that moved (s != d). If we find a pair
1000 * that not only moved, but what's better, moved to an empty slot
1001 * (d is not set in tmp), then we break out then, with that pair.
Justin P. Mattockae0e47f2011-03-01 15:06:02 +01001002 * Otherwise when we finish scanning from_tmp, we at least have the
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -08001003 * most recent <s, d> pair that moved. If we get all the way through
1004 * the scan of tmp without finding any node that moved, much less
1005 * moved to an empty node, then there is nothing left worth migrating.
1006 */
Christoph Lameterd4984712006-01-08 01:00:55 -08001007
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001008 tmp = *from_nodes;
1009 while (!nodes_empty(tmp)) {
1010 int s,d;
1011 int source = -1;
1012 int dest = 0;
1013
1014 for_each_node_mask(s, tmp) {
1015 d = node_remap(s, *from_nodes, *to_nodes);
1016 if (s == d)
1017 continue;
1018
1019 source = s; /* Node moved. Memorize */
1020 dest = d;
1021
1022 /* dest not in remaining from nodes? */
1023 if (!node_isset(dest, tmp))
1024 break;
1025 }
1026 if (source == -1)
1027 break;
1028
1029 node_clear(source, tmp);
1030 err = migrate_to_node(mm, source, dest, flags);
1031 if (err > 0)
1032 busy += err;
1033 if (err < 0)
1034 break;
Christoph Lameter39743882006-01-08 01:00:51 -08001035 }
Christoph Lameter7b2259b2006-06-25 05:46:48 -07001036out:
Christoph Lameter39743882006-01-08 01:00:51 -08001037 up_read(&mm->mmap_sem);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001038 if (err < 0)
1039 return err;
1040 return busy;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001041
Christoph Lameter39743882006-01-08 01:00:51 -08001042}
1043
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001044/*
1045 * Allocate a new page for page migration based on vma policy.
1046 * Start assuming that page is mapped by vma pointed to by @private.
1047 * Search forward from there, if not. N.B., this assumes that the
1048 * list of pages handed to migrate_pages()--which is how we get here--
1049 * is in virtual address order.
1050 */
Christoph Lameter742755a2006-06-23 02:03:55 -07001051static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001052{
1053 struct vm_area_struct *vma = (struct vm_area_struct *)private;
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001054 unsigned long uninitialized_var(address);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001055
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001056 while (vma) {
1057 address = page_address_in_vma(page, vma);
1058 if (address != -EFAULT)
1059 break;
1060 vma = vma->vm_next;
1061 }
1062
1063 /*
1064 * if !vma, alloc_page_vma() will use task or system default policy
1065 */
1066 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001067}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001068#else
1069
1070static void migrate_page_add(struct page *page, struct list_head *pagelist,
1071 unsigned long flags)
1072{
1073}
1074
1075int do_migrate_pages(struct mm_struct *mm,
1076 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
1077{
1078 return -ENOSYS;
1079}
Christoph Lameter95a402c2006-06-23 02:03:53 -07001080
Keith Owens69939742006-10-11 01:21:28 -07001081static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001082{
1083 return NULL;
1084}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001085#endif
1086
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001087static long do_mbind(unsigned long start, unsigned long len,
David Rientjes028fec42008-04-28 02:12:25 -07001088 unsigned short mode, unsigned short mode_flags,
1089 nodemask_t *nmask, unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001090{
1091 struct vm_area_struct *vma;
1092 struct mm_struct *mm = current->mm;
1093 struct mempolicy *new;
1094 unsigned long end;
1095 int err;
1096 LIST_HEAD(pagelist);
1097
David Rientjesa3b51e02008-04-28 02:12:23 -07001098 if (flags & ~(unsigned long)(MPOL_MF_STRICT |
1099 MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001100 return -EINVAL;
Christoph Lameter74c00242006-03-14 19:50:21 -08001101 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001102 return -EPERM;
1103
1104 if (start & ~PAGE_MASK)
1105 return -EINVAL;
1106
1107 if (mode == MPOL_DEFAULT)
1108 flags &= ~MPOL_MF_STRICT;
1109
1110 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1111 end = start + len;
1112
1113 if (end < start)
1114 return -EINVAL;
1115 if (end == start)
1116 return 0;
1117
David Rientjes028fec42008-04-28 02:12:25 -07001118 new = mpol_new(mode, mode_flags, nmask);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001119 if (IS_ERR(new))
1120 return PTR_ERR(new);
1121
1122 /*
1123 * If we are using the default policy then operation
1124 * on discontinuous address spaces is okay after all
1125 */
1126 if (!new)
1127 flags |= MPOL_MF_DISCONTIG_OK;
1128
David Rientjes028fec42008-04-28 02:12:25 -07001129 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1130 start, start + len, mode, mode_flags,
1131 nmask ? nodes_addr(*nmask)[0] : -1);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001132
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001133 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1134
1135 err = migrate_prep();
1136 if (err)
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001137 goto mpol_out;
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001138 }
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07001139 {
1140 NODEMASK_SCRATCH(scratch);
1141 if (scratch) {
1142 down_write(&mm->mmap_sem);
1143 task_lock(current);
1144 err = mpol_set_nodemask(new, nmask, scratch);
1145 task_unlock(current);
1146 if (err)
1147 up_write(&mm->mmap_sem);
1148 } else
1149 err = -ENOMEM;
1150 NODEMASK_SCRATCH_FREE(scratch);
1151 }
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001152 if (err)
1153 goto mpol_out;
1154
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001155 vma = check_range(mm, start, end, nmask,
1156 flags | MPOL_MF_INVERT, &pagelist);
1157
1158 err = PTR_ERR(vma);
1159 if (!IS_ERR(vma)) {
1160 int nr_failed = 0;
1161
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -08001162 err = mbind_range(mm, start, end, new);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001163
Minchan Kimcf608ac2010-10-26 14:21:29 -07001164 if (!list_empty(&pagelist)) {
Christoph Lameter95a402c2006-06-23 02:03:53 -07001165 nr_failed = migrate_pages(&pagelist, new_vma_page,
Mel Gorman7f0f2492011-01-13 15:45:58 -08001166 (unsigned long)vma,
1167 false, true);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001168 if (nr_failed)
1169 putback_lru_pages(&pagelist);
1170 }
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001171
1172 if (!err && nr_failed && (flags & MPOL_MF_STRICT))
1173 err = -EIO;
KOSAKI Motohiroab8a3e12009-10-26 16:49:58 -07001174 } else
1175 putback_lru_pages(&pagelist);
Christoph Lameterb20a3502006-03-22 00:09:12 -08001176
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001177 up_write(&mm->mmap_sem);
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001178 mpol_out:
Lee Schermerhornf0be3d32008-04-28 02:13:08 -07001179 mpol_put(new);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001180 return err;
1181}
1182
Christoph Lameter39743882006-01-08 01:00:51 -08001183/*
Christoph Lameter8bccd852005-10-29 18:16:59 -07001184 * User space interface with variable sized bitmaps for nodelists.
1185 */
1186
1187/* Copy a node mask from user space. */
Christoph Lameter39743882006-01-08 01:00:51 -08001188static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
Christoph Lameter8bccd852005-10-29 18:16:59 -07001189 unsigned long maxnode)
1190{
1191 unsigned long k;
1192 unsigned long nlongs;
1193 unsigned long endmask;
1194
1195 --maxnode;
1196 nodes_clear(*nodes);
1197 if (maxnode == 0 || !nmask)
1198 return 0;
Andi Kleena9c930b2006-02-20 18:27:59 -08001199 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
Chris Wright636f13c2006-02-17 13:59:36 -08001200 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001201
1202 nlongs = BITS_TO_LONGS(maxnode);
1203 if ((maxnode % BITS_PER_LONG) == 0)
1204 endmask = ~0UL;
1205 else
1206 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1207
1208 /* When the user specified more nodes than supported just check
1209 if the non supported part is all zero. */
1210 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1211 if (nlongs > PAGE_SIZE/sizeof(long))
1212 return -EINVAL;
1213 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1214 unsigned long t;
1215 if (get_user(t, nmask + k))
1216 return -EFAULT;
1217 if (k == nlongs - 1) {
1218 if (t & endmask)
1219 return -EINVAL;
1220 } else if (t)
1221 return -EINVAL;
1222 }
1223 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1224 endmask = ~0UL;
1225 }
1226
1227 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1228 return -EFAULT;
1229 nodes_addr(*nodes)[nlongs-1] &= endmask;
1230 return 0;
1231}
1232
1233/* Copy a kernel node mask to user space */
1234static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1235 nodemask_t *nodes)
1236{
1237 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1238 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1239
1240 if (copy > nbytes) {
1241 if (copy > PAGE_SIZE)
1242 return -EINVAL;
1243 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1244 return -EFAULT;
1245 copy = nbytes;
1246 }
1247 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1248}
1249
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001250SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1251 unsigned long, mode, unsigned long __user *, nmask,
1252 unsigned long, maxnode, unsigned, flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001253{
1254 nodemask_t nodes;
1255 int err;
David Rientjes028fec42008-04-28 02:12:25 -07001256 unsigned short mode_flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001257
David Rientjes028fec42008-04-28 02:12:25 -07001258 mode_flags = mode & MPOL_MODE_FLAGS;
1259 mode &= ~MPOL_MODE_FLAGS;
David Rientjesa3b51e02008-04-28 02:12:23 -07001260 if (mode >= MPOL_MAX)
1261 return -EINVAL;
David Rientjes4c50bc02008-04-28 02:12:30 -07001262 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1263 (mode_flags & MPOL_F_RELATIVE_NODES))
1264 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001265 err = get_nodes(&nodes, nmask, maxnode);
1266 if (err)
1267 return err;
David Rientjes028fec42008-04-28 02:12:25 -07001268 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001269}
1270
1271/* Set the process memory policy */
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001272SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1273 unsigned long, maxnode)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001274{
1275 int err;
1276 nodemask_t nodes;
David Rientjes028fec42008-04-28 02:12:25 -07001277 unsigned short flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001278
David Rientjes028fec42008-04-28 02:12:25 -07001279 flags = mode & MPOL_MODE_FLAGS;
1280 mode &= ~MPOL_MODE_FLAGS;
1281 if ((unsigned int)mode >= MPOL_MAX)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001282 return -EINVAL;
David Rientjes4c50bc02008-04-28 02:12:30 -07001283 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1284 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001285 err = get_nodes(&nodes, nmask, maxnode);
1286 if (err)
1287 return err;
David Rientjes028fec42008-04-28 02:12:25 -07001288 return do_set_mempolicy(mode, flags, &nodes);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001289}
1290
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001291SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1292 const unsigned long __user *, old_nodes,
1293 const unsigned long __user *, new_nodes)
Christoph Lameter39743882006-01-08 01:00:51 -08001294{
David Howellsc69e8d92008-11-14 10:39:19 +11001295 const struct cred *cred = current_cred(), *tcred;
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001296 struct mm_struct *mm = NULL;
Christoph Lameter39743882006-01-08 01:00:51 -08001297 struct task_struct *task;
Christoph Lameter39743882006-01-08 01:00:51 -08001298 nodemask_t task_nodes;
1299 int err;
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001300 nodemask_t *old;
1301 nodemask_t *new;
1302 NODEMASK_SCRATCH(scratch);
Christoph Lameter39743882006-01-08 01:00:51 -08001303
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001304 if (!scratch)
1305 return -ENOMEM;
Christoph Lameter39743882006-01-08 01:00:51 -08001306
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001307 old = &scratch->mask1;
1308 new = &scratch->mask2;
1309
1310 err = get_nodes(old, old_nodes, maxnode);
Christoph Lameter39743882006-01-08 01:00:51 -08001311 if (err)
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001312 goto out;
1313
1314 err = get_nodes(new, new_nodes, maxnode);
1315 if (err)
1316 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001317
1318 /* Find the mm_struct */
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001319 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001320 task = pid ? find_task_by_vpid(pid) : current;
Christoph Lameter39743882006-01-08 01:00:51 -08001321 if (!task) {
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001322 rcu_read_unlock();
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001323 err = -ESRCH;
1324 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001325 }
Christoph Lameter3268c632012-03-21 16:34:06 -07001326 get_task_struct(task);
Christoph Lameter39743882006-01-08 01:00:51 -08001327
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001328 err = -EINVAL;
Christoph Lameter39743882006-01-08 01:00:51 -08001329
1330 /*
1331 * Check if this process has the right to modify the specified
1332 * process. The right exists if the process has administrative
Alexey Dobriyan7f927fc2006-03-28 01:56:53 -08001333 * capabilities, superuser privileges or the same
Christoph Lameter39743882006-01-08 01:00:51 -08001334 * userid as the target process.
1335 */
David Howellsc69e8d92008-11-14 10:39:19 +11001336 tcred = __task_cred(task);
David Howellsb6dff3e2008-11-14 10:39:16 +11001337 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
1338 cred->uid != tcred->suid && cred->uid != tcred->uid &&
Christoph Lameter74c00242006-03-14 19:50:21 -08001339 !capable(CAP_SYS_NICE)) {
David Howellsc69e8d92008-11-14 10:39:19 +11001340 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001341 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001342 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001343 }
David Howellsc69e8d92008-11-14 10:39:19 +11001344 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001345
1346 task_nodes = cpuset_mems_allowed(task);
1347 /* Is the user allowed to access the target nodes? */
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001348 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
Christoph Lameter39743882006-01-08 01:00:51 -08001349 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001350 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001351 }
1352
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001353 if (!nodes_subset(*new, node_states[N_HIGH_MEMORY])) {
Christoph Lameter3b42d282007-08-31 00:12:08 -07001354 err = -EINVAL;
Christoph Lameter3268c632012-03-21 16:34:06 -07001355 goto out_put;
Christoph Lameter3b42d282007-08-31 00:12:08 -07001356 }
1357
David Quigley86c3a762006-06-23 02:04:02 -07001358 err = security_task_movememory(task);
1359 if (err)
Christoph Lameter3268c632012-03-21 16:34:06 -07001360 goto out_put;
David Quigley86c3a762006-06-23 02:04:02 -07001361
Christoph Lameter3268c632012-03-21 16:34:06 -07001362 mm = get_task_mm(task);
1363 put_task_struct(task);
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001364
1365 if (!mm) {
Christoph Lameter3268c632012-03-21 16:34:06 -07001366 err = -EINVAL;
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001367 goto out;
1368 }
1369
1370 err = do_migrate_pages(mm, old, new,
1371 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
Christoph Lameter3268c632012-03-21 16:34:06 -07001372
1373 mmput(mm);
1374out:
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001375 NODEMASK_SCRATCH_FREE(scratch);
1376
Christoph Lameter39743882006-01-08 01:00:51 -08001377 return err;
Christoph Lameter3268c632012-03-21 16:34:06 -07001378
1379out_put:
1380 put_task_struct(task);
1381 goto out;
1382
Christoph Lameter39743882006-01-08 01:00:51 -08001383}
1384
1385
Christoph Lameter8bccd852005-10-29 18:16:59 -07001386/* Retrieve NUMA policy */
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001387SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1388 unsigned long __user *, nmask, unsigned long, maxnode,
1389 unsigned long, addr, unsigned long, flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001390{
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001391 int err;
1392 int uninitialized_var(pval);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001393 nodemask_t nodes;
1394
1395 if (nmask != NULL && maxnode < MAX_NUMNODES)
1396 return -EINVAL;
1397
1398 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1399
1400 if (err)
1401 return err;
1402
1403 if (policy && put_user(pval, policy))
1404 return -EFAULT;
1405
1406 if (nmask)
1407 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1408
1409 return err;
1410}
1411
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412#ifdef CONFIG_COMPAT
1413
1414asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1415 compat_ulong_t __user *nmask,
1416 compat_ulong_t maxnode,
1417 compat_ulong_t addr, compat_ulong_t flags)
1418{
1419 long err;
1420 unsigned long __user *nm = NULL;
1421 unsigned long nr_bits, alloc_size;
1422 DECLARE_BITMAP(bm, MAX_NUMNODES);
1423
1424 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1425 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1426
1427 if (nmask)
1428 nm = compat_alloc_user_space(alloc_size);
1429
1430 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1431
1432 if (!err && nmask) {
KAMEZAWA Hiroyuki2bbff6c2011-09-14 16:21:02 -07001433 unsigned long copy_size;
1434 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1435 err = copy_from_user(bm, nm, copy_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 /* ensure entire bitmap is zeroed */
1437 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1438 err |= compat_put_bitmap(nmask, bm, nr_bits);
1439 }
1440
1441 return err;
1442}
1443
1444asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1445 compat_ulong_t maxnode)
1446{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 unsigned long __user *nm = NULL;
1448 unsigned long nr_bits, alloc_size;
1449 DECLARE_BITMAP(bm, MAX_NUMNODES);
1450
1451 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1452 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1453
1454 if (nmask) {
Chris Salls616acca2017-04-07 23:48:11 -07001455 if (compat_get_bitmap(bm, nmask, nr_bits))
1456 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 nm = compat_alloc_user_space(alloc_size);
Chris Salls616acca2017-04-07 23:48:11 -07001458 if (copy_to_user(nm, bm, alloc_size))
1459 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 }
1461
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 return sys_set_mempolicy(mode, nm, nr_bits+1);
1463}
1464
1465asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1466 compat_ulong_t mode, compat_ulong_t __user *nmask,
1467 compat_ulong_t maxnode, compat_ulong_t flags)
1468{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 unsigned long __user *nm = NULL;
1470 unsigned long nr_bits, alloc_size;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001471 nodemask_t bm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472
1473 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1474 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1475
1476 if (nmask) {
Chris Salls616acca2017-04-07 23:48:11 -07001477 if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1478 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 nm = compat_alloc_user_space(alloc_size);
Chris Salls616acca2017-04-07 23:48:11 -07001480 if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1481 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 }
1483
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1485}
1486
1487#endif
1488
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001489/*
1490 * get_vma_policy(@task, @vma, @addr)
1491 * @task - task for fallback if vma policy == default
1492 * @vma - virtual memory area whose policy is sought
1493 * @addr - address in @vma for shared policy lookup
1494 *
1495 * Returns effective policy for a VMA at specified address.
1496 * Falls back to @task or system default policy, as necessary.
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001497 * Current or other task's task mempolicy and non-shared vma policies
1498 * are protected by the task's mmap_sem, which must be held for read by
1499 * the caller.
1500 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1501 * count--added by the get_policy() vm_op, as appropriate--to protect against
1502 * freeing by another task. It is the caller's responsibility to free the
1503 * extra reference for shared policies.
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001504 */
Stephen Wilsond98f6cb2011-05-24 17:12:41 -07001505struct mempolicy *get_vma_policy(struct task_struct *task,
Christoph Lameter48fce342006-01-08 01:01:03 -08001506 struct vm_area_struct *vma, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507{
Christoph Lameter6e21c8f2005-09-03 15:54:45 -07001508 struct mempolicy *pol = task->mempolicy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509
1510 if (vma) {
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001511 if (vma->vm_ops && vma->vm_ops->get_policy) {
Lee Schermerhornae4d8c12008-04-28 02:13:11 -07001512 struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1513 addr);
1514 if (vpol)
1515 pol = vpol;
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001516 } else if (vma->vm_policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 pol = vma->vm_policy;
1518 }
1519 if (!pol)
1520 pol = &default_policy;
1521 return pol;
1522}
1523
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001524/*
1525 * Return a nodemask representing a mempolicy for filtering nodes for
1526 * page allocation
1527 */
1528static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
Mel Gorman19770b32008-04-28 02:12:18 -07001529{
1530 /* Lower zones don't get a nodemask applied for MPOL_BIND */
Lee Schermerhorn45c47452008-04-28 02:13:12 -07001531 if (unlikely(policy->mode == MPOL_BIND) &&
Mel Gorman19770b32008-04-28 02:12:18 -07001532 gfp_zone(gfp) >= policy_zone &&
1533 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1534 return &policy->v.nodes;
1535
1536 return NULL;
1537}
1538
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001539/* Return a zonelist indicated by gfp for node representing a mempolicy */
Andi Kleen2f5f9482011-03-04 17:36:29 -08001540static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1541 int nd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542{
Lee Schermerhorn45c47452008-04-28 02:13:12 -07001543 switch (policy->mode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001545 if (!(policy->flags & MPOL_F_LOCAL))
1546 nd = policy->v.preferred_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 break;
1548 case MPOL_BIND:
Mel Gorman19770b32008-04-28 02:12:18 -07001549 /*
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001550 * Normally, MPOL_BIND allocations are node-local within the
1551 * allowed nodemask. However, if __GFP_THISNODE is set and the
Bob Liu6eb27e12010-05-24 14:32:00 -07001552 * current node isn't part of the mask, we use the zonelist for
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001553 * the first node in the mask instead.
Mel Gorman19770b32008-04-28 02:12:18 -07001554 */
Mel Gorman19770b32008-04-28 02:12:18 -07001555 if (unlikely(gfp & __GFP_THISNODE) &&
1556 unlikely(!node_isset(nd, policy->v.nodes)))
1557 nd = first_node(policy->v.nodes);
1558 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 default:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 BUG();
1561 }
Mel Gorman0e884602008-04-28 02:12:14 -07001562 return node_zonelist(nd, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563}
1564
1565/* Do dynamic interleaving for a process */
1566static unsigned interleave_nodes(struct mempolicy *policy)
1567{
1568 unsigned nid, next;
1569 struct task_struct *me = current;
1570
1571 nid = me->il_next;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001572 next = next_node(nid, policy->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 if (next >= MAX_NUMNODES)
Andi Kleendfcd3c02005-10-29 18:15:48 -07001574 next = first_node(policy->v.nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001575 if (next < MAX_NUMNODES)
1576 me->il_next = next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 return nid;
1578}
1579
Christoph Lameterdc85da12006-01-18 17:42:36 -08001580/*
1581 * Depending on the memory policy provide a node from which to allocate the
1582 * next slab entry.
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001583 * @policy must be protected by freeing by the caller. If @policy is
1584 * the current task's mempolicy, this protection is implicit, as only the
1585 * task can change it's policy. The system default policy requires no
1586 * such protection.
Christoph Lameterdc85da12006-01-18 17:42:36 -08001587 */
1588unsigned slab_node(struct mempolicy *policy)
1589{
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001590 if (!policy || policy->flags & MPOL_F_LOCAL)
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001591 return numa_node_id();
Christoph Lameter765c4502006-09-27 01:50:08 -07001592
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001593 switch (policy->mode) {
1594 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001595 /*
1596 * handled MPOL_F_LOCAL above
1597 */
1598 return policy->v.preferred_node;
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001599
Christoph Lameterdc85da12006-01-18 17:42:36 -08001600 case MPOL_INTERLEAVE:
1601 return interleave_nodes(policy);
1602
Mel Gormandd1a2392008-04-28 02:12:17 -07001603 case MPOL_BIND: {
Christoph Lameterdc85da12006-01-18 17:42:36 -08001604 /*
1605 * Follow bind policy behavior and start allocation at the
1606 * first node.
1607 */
Mel Gorman19770b32008-04-28 02:12:18 -07001608 struct zonelist *zonelist;
1609 struct zone *zone;
1610 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1611 zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1612 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1613 &policy->v.nodes,
1614 &zone);
Eric Dumazet800416f2010-10-27 19:33:43 +02001615 return zone ? zone->node : numa_node_id();
Mel Gormandd1a2392008-04-28 02:12:17 -07001616 }
Christoph Lameterdc85da12006-01-18 17:42:36 -08001617
Christoph Lameterdc85da12006-01-18 17:42:36 -08001618 default:
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001619 BUG();
Christoph Lameterdc85da12006-01-18 17:42:36 -08001620 }
1621}
1622
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623/* Do static interleaving for a VMA with known offset. */
1624static unsigned offset_il_node(struct mempolicy *pol,
1625 struct vm_area_struct *vma, unsigned long off)
1626{
Andi Kleendfcd3c02005-10-29 18:15:48 -07001627 unsigned nnodes = nodes_weight(pol->v.nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001628 unsigned target;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 int c;
1630 int nid = -1;
1631
David Rientjesf5b087b2008-04-28 02:12:27 -07001632 if (!nnodes)
1633 return numa_node_id();
1634 target = (unsigned int)off % nnodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 c = 0;
1636 do {
Andi Kleendfcd3c02005-10-29 18:15:48 -07001637 nid = next_node(nid, pol->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638 c++;
1639 } while (c <= target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 return nid;
1641}
1642
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001643/* Determine a node number for interleave */
1644static inline unsigned interleave_nid(struct mempolicy *pol,
1645 struct vm_area_struct *vma, unsigned long addr, int shift)
1646{
1647 if (vma) {
1648 unsigned long off;
1649
Nishanth Aravamudan3b98b082006-08-31 21:27:53 -07001650 /*
1651 * for small pages, there is no difference between
1652 * shift and PAGE_SHIFT, so the bit-shift is safe.
1653 * for huge pages, since vm_pgoff is in units of small
1654 * pages, we need to shift off the always 0 bits to get
1655 * a useful offset.
1656 */
1657 BUG_ON(shift < PAGE_SHIFT);
1658 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001659 off += (addr - vma->vm_start) >> shift;
1660 return offset_il_node(pol, vma, off);
1661 } else
1662 return interleave_nodes(pol);
1663}
1664
Michal Hocko778d3b02011-07-26 16:08:30 -07001665/*
1666 * Return the bit number of a random bit set in the nodemask.
1667 * (returns -1 if nodemask is empty)
1668 */
1669int node_random(const nodemask_t *maskp)
1670{
1671 int w, bit = -1;
1672
1673 w = nodes_weight(*maskp);
1674 if (w)
1675 bit = bitmap_ord_to_pos(maskp->bits,
1676 get_random_int() % w, MAX_NUMNODES);
1677 return bit;
1678}
1679
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001680#ifdef CONFIG_HUGETLBFS
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001681/*
1682 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1683 * @vma = virtual memory area whose policy is sought
1684 * @addr = address in @vma for shared policy lookup and interleave policy
1685 * @gfp_flags = for requested zone
Mel Gorman19770b32008-04-28 02:12:18 -07001686 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1687 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001688 *
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001689 * Returns a zonelist suitable for a huge page allocation and a pointer
1690 * to the struct mempolicy for conditional unref after allocation.
1691 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1692 * @nodemask for filtering the zonelist.
Miao Xiec0ff7452010-05-24 14:32:08 -07001693 *
1694 * Must be protected by get_mems_allowed()
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001695 */
Mel Gorman396faf02007-07-17 04:03:13 -07001696struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
Mel Gorman19770b32008-04-28 02:12:18 -07001697 gfp_t gfp_flags, struct mempolicy **mpol,
1698 nodemask_t **nodemask)
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001699{
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001700 struct zonelist *zl;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001701
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001702 *mpol = get_vma_policy(current, vma, addr);
Mel Gorman19770b32008-04-28 02:12:18 -07001703 *nodemask = NULL; /* assume !MPOL_BIND */
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001704
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001705 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1706 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
Andi Kleena5516432008-07-23 21:27:41 -07001707 huge_page_shift(hstate_vma(vma))), gfp_flags);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001708 } else {
Andi Kleen2f5f9482011-03-04 17:36:29 -08001709 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001710 if ((*mpol)->mode == MPOL_BIND)
1711 *nodemask = &(*mpol)->v.nodes;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001712 }
1713 return zl;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001714}
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001715
1716/*
1717 * init_nodemask_of_mempolicy
1718 *
1719 * If the current task's mempolicy is "default" [NULL], return 'false'
1720 * to indicate default policy. Otherwise, extract the policy nodemask
1721 * for 'bind' or 'interleave' policy into the argument nodemask, or
1722 * initialize the argument nodemask to contain the single node for
1723 * 'preferred' or 'local' policy and return 'true' to indicate presence
1724 * of non-default mempolicy.
1725 *
1726 * We don't bother with reference counting the mempolicy [mpol_get/put]
1727 * because the current task is examining it's own mempolicy and a task's
1728 * mempolicy is only ever changed by the task itself.
1729 *
1730 * N.B., it is the caller's responsibility to free a returned nodemask.
1731 */
1732bool init_nodemask_of_mempolicy(nodemask_t *mask)
1733{
1734 struct mempolicy *mempolicy;
1735 int nid;
1736
1737 if (!(mask && current->mempolicy))
1738 return false;
1739
Miao Xiec0ff7452010-05-24 14:32:08 -07001740 task_lock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001741 mempolicy = current->mempolicy;
1742 switch (mempolicy->mode) {
1743 case MPOL_PREFERRED:
1744 if (mempolicy->flags & MPOL_F_LOCAL)
1745 nid = numa_node_id();
1746 else
1747 nid = mempolicy->v.preferred_node;
1748 init_nodemask_of_node(mask, nid);
1749 break;
1750
1751 case MPOL_BIND:
1752 /* Fall through */
1753 case MPOL_INTERLEAVE:
1754 *mask = mempolicy->v.nodes;
1755 break;
1756
1757 default:
1758 BUG();
1759 }
Miao Xiec0ff7452010-05-24 14:32:08 -07001760 task_unlock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001761
1762 return true;
1763}
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001764#endif
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001765
David Rientjes6f48d0eb2010-08-09 17:18:52 -07001766/*
1767 * mempolicy_nodemask_intersects
1768 *
1769 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1770 * policy. Otherwise, check for intersection between mask and the policy
1771 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1772 * policy, always return true since it may allocate elsewhere on fallback.
1773 *
1774 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1775 */
1776bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1777 const nodemask_t *mask)
1778{
1779 struct mempolicy *mempolicy;
1780 bool ret = true;
1781
1782 if (!mask)
1783 return ret;
1784 task_lock(tsk);
1785 mempolicy = tsk->mempolicy;
1786 if (!mempolicy)
1787 goto out;
1788
1789 switch (mempolicy->mode) {
1790 case MPOL_PREFERRED:
1791 /*
1792 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1793 * allocate from, they may fallback to other nodes when oom.
1794 * Thus, it's possible for tsk to have allocated memory from
1795 * nodes in mask.
1796 */
1797 break;
1798 case MPOL_BIND:
1799 case MPOL_INTERLEAVE:
1800 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1801 break;
1802 default:
1803 BUG();
1804 }
1805out:
1806 task_unlock(tsk);
1807 return ret;
1808}
1809
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810/* Allocate a page in interleaved policy.
1811 Own path because it needs to do special accounting. */
Andi Kleen662f3a02005-10-29 18:15:49 -07001812static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1813 unsigned nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814{
1815 struct zonelist *zl;
1816 struct page *page;
1817
Mel Gorman0e884602008-04-28 02:12:14 -07001818 zl = node_zonelist(nid, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 page = __alloc_pages(gfp, order, zl);
Mel Gormandd1a2392008-04-28 02:12:17 -07001820 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
Christoph Lameterca889e62006-06-30 01:55:44 -07001821 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 return page;
1823}
1824
1825/**
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08001826 * alloc_pages_vma - Allocate a page for a VMA.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 *
1828 * @gfp:
1829 * %GFP_USER user allocation.
1830 * %GFP_KERNEL kernel allocations,
1831 * %GFP_HIGHMEM highmem/user allocations,
1832 * %GFP_FS allocation should not call back into a file system.
1833 * %GFP_ATOMIC don't sleep.
1834 *
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08001835 * @order:Order of the GFP allocation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 * @vma: Pointer to VMA or NULL if not available.
1837 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1838 *
1839 * This function allocates a page from the kernel page pool and applies
1840 * a NUMA policy associated with the VMA or the current process.
1841 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1842 * mm_struct of the VMA to prevent it from going away. Should be used for
1843 * all allocations for pages that will be mapped into
1844 * user space. Returns NULL when no page can be allocated.
1845 *
1846 * Should be called with the mm_sem of the vma hold.
1847 */
1848struct page *
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08001849alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
Andi Kleen2f5f9482011-03-04 17:36:29 -08001850 unsigned long addr, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851{
Mel Gormancc9a6c82012-03-21 16:34:11 -07001852 struct mempolicy *pol;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001853 struct zonelist *zl;
Miao Xiec0ff7452010-05-24 14:32:08 -07001854 struct page *page;
Mel Gormancc9a6c82012-03-21 16:34:11 -07001855 unsigned int cpuset_mems_cookie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856
Mel Gormancc9a6c82012-03-21 16:34:11 -07001857retry_cpuset:
1858 pol = get_vma_policy(current, vma, addr);
1859 cpuset_mems_cookie = get_mems_allowed();
1860
Lee Schermerhorn45c47452008-04-28 02:13:12 -07001861 if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862 unsigned nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001863
Andi Kleen8eac5632011-02-25 14:44:28 -08001864 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001865 mpol_cond_put(pol);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08001866 page = alloc_page_interleave(gfp, order, nid);
Mel Gormancc9a6c82012-03-21 16:34:11 -07001867 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
1868 goto retry_cpuset;
1869
Miao Xiec0ff7452010-05-24 14:32:08 -07001870 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871 }
Andi Kleen2f5f9482011-03-04 17:36:29 -08001872 zl = policy_zonelist(gfp, pol, node);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001873 if (unlikely(mpol_needs_cond_ref(pol))) {
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001874 /*
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001875 * slow path: ref counted shared policy
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001876 */
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08001877 struct page *page = __alloc_pages_nodemask(gfp, order,
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001878 zl, policy_nodemask(gfp, pol));
Lee Schermerhornf0be3d32008-04-28 02:13:08 -07001879 __mpol_put(pol);
Mel Gormancc9a6c82012-03-21 16:34:11 -07001880 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
1881 goto retry_cpuset;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001882 return page;
1883 }
1884 /*
1885 * fast path: default or task policy
1886 */
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08001887 page = __alloc_pages_nodemask(gfp, order, zl,
1888 policy_nodemask(gfp, pol));
Mel Gormancc9a6c82012-03-21 16:34:11 -07001889 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
1890 goto retry_cpuset;
Miao Xiec0ff7452010-05-24 14:32:08 -07001891 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892}
1893
1894/**
1895 * alloc_pages_current - Allocate pages.
1896 *
1897 * @gfp:
1898 * %GFP_USER user allocation,
1899 * %GFP_KERNEL kernel allocation,
1900 * %GFP_HIGHMEM highmem allocation,
1901 * %GFP_FS don't call back into a file system.
1902 * %GFP_ATOMIC don't sleep.
1903 * @order: Power of two of allocation size in pages. 0 is a single page.
1904 *
1905 * Allocate a page from the kernel page pool. When not in
1906 * interrupt context and apply the current process NUMA policy.
1907 * Returns NULL when no page can be allocated.
1908 *
Paul Jacksoncf2a4732006-01-08 01:01:54 -08001909 * Don't call cpuset_update_task_memory_state() unless
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910 * 1) it's ok to take cpuset_sem (can WAIT), and
1911 * 2) allocating for current task (not interrupt).
1912 */
Al Virodd0fc662005-10-07 07:46:04 +01001913struct page *alloc_pages_current(gfp_t gfp, unsigned order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914{
1915 struct mempolicy *pol = current->mempolicy;
Miao Xiec0ff7452010-05-24 14:32:08 -07001916 struct page *page;
Mel Gormancc9a6c82012-03-21 16:34:11 -07001917 unsigned int cpuset_mems_cookie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918
Christoph Lameter9b819d22006-09-25 23:31:40 -07001919 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 pol = &default_policy;
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001921
Mel Gormancc9a6c82012-03-21 16:34:11 -07001922retry_cpuset:
1923 cpuset_mems_cookie = get_mems_allowed();
1924
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001925 /*
1926 * No reference counting needed for current->mempolicy
1927 * nor system default_policy
1928 */
Lee Schermerhorn45c47452008-04-28 02:13:12 -07001929 if (pol->mode == MPOL_INTERLEAVE)
Miao Xiec0ff7452010-05-24 14:32:08 -07001930 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
1931 else
1932 page = __alloc_pages_nodemask(gfp, order,
Andi Kleen5c4b4be2011-03-04 17:36:32 -08001933 policy_zonelist(gfp, pol, numa_node_id()),
1934 policy_nodemask(gfp, pol));
Mel Gormancc9a6c82012-03-21 16:34:11 -07001935
1936 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
1937 goto retry_cpuset;
1938
Miao Xiec0ff7452010-05-24 14:32:08 -07001939 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940}
1941EXPORT_SYMBOL(alloc_pages_current);
1942
Paul Jackson42253992006-01-08 01:01:59 -08001943/*
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07001944 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
Paul Jackson42253992006-01-08 01:01:59 -08001945 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1946 * with the mems_allowed returned by cpuset_mems_allowed(). This
1947 * keeps mempolicies cpuset relative after its cpuset moves. See
1948 * further kernel/cpuset.c update_nodemask().
Miao Xie708c1bb2010-05-24 14:32:07 -07001949 *
1950 * current's mempolicy may be rebinded by the other task(the task that changes
1951 * cpuset's mems), so we needn't do rebind work for current task.
Paul Jackson42253992006-01-08 01:01:59 -08001952 */
Paul Jackson42253992006-01-08 01:01:59 -08001953
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07001954/* Slow path of a mempolicy duplicate */
1955struct mempolicy *__mpol_dup(struct mempolicy *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956{
1957 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1958
1959 if (!new)
1960 return ERR_PTR(-ENOMEM);
Miao Xie708c1bb2010-05-24 14:32:07 -07001961
1962 /* task's mempolicy is protected by alloc_lock */
1963 if (old == current->mempolicy) {
1964 task_lock(current);
1965 *new = *old;
1966 task_unlock(current);
1967 } else
1968 *new = *old;
1969
Paul E. McKenney99ee4ca2010-03-03 17:50:17 -08001970 rcu_read_lock();
Paul Jackson42253992006-01-08 01:01:59 -08001971 if (current_cpuset_is_being_rebound()) {
1972 nodemask_t mems = cpuset_mems_allowed(current);
Miao Xie708c1bb2010-05-24 14:32:07 -07001973 if (new->flags & MPOL_F_REBINDING)
1974 mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
1975 else
1976 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
Paul Jackson42253992006-01-08 01:01:59 -08001977 }
Paul E. McKenney99ee4ca2010-03-03 17:50:17 -08001978 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979 atomic_set(&new->refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 return new;
1981}
1982
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001983/*
1984 * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
1985 * eliminate the * MPOL_F_* flags that require conditional ref and
1986 * [NOTE!!!] drop the extra ref. Not safe to reference *frompol directly
1987 * after return. Use the returned value.
1988 *
1989 * Allows use of a mempolicy for, e.g., multiple allocations with a single
1990 * policy lookup, even if the policy needs/has extra ref on lookup.
1991 * shmem_readahead needs this.
1992 */
1993struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
1994 struct mempolicy *frompol)
1995{
1996 if (!mpol_needs_cond_ref(frompol))
1997 return frompol;
1998
1999 *tompol = *frompol;
2000 tompol->flags &= ~MPOL_F_SHARED; /* copy doesn't need unref */
2001 __mpol_put(frompol);
2002 return tompol;
2003}
2004
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005/* Slow path of a mempolicy comparison */
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002006bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007{
2008 if (!a || !b)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002009 return false;
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002010 if (a->mode != b->mode)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002011 return false;
Bob Liu19800502010-05-24 14:32:01 -07002012 if (a->flags != b->flags)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002013 return false;
Bob Liu19800502010-05-24 14:32:01 -07002014 if (mpol_store_user_nodemask(a))
2015 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002016 return false;
Bob Liu19800502010-05-24 14:32:01 -07002017
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002018 switch (a->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -07002019 case MPOL_BIND:
2020 /* Fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021 case MPOL_INTERLEAVE:
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002022 return !!nodes_equal(a->v.nodes, b->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023 case MPOL_PREFERRED:
Namhyung Kim75719662011-03-22 16:33:02 -07002024 return a->v.preferred_node == b->v.preferred_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 default:
2026 BUG();
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002027 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028 }
2029}
2030
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032 * Shared memory backing store policy support.
2033 *
2034 * Remember policies even when nobody has shared memory mapped.
2035 * The policies are kept in Red-Black tree linked from the inode.
2036 * They are protected by the sp->lock spinlock, which should be held
2037 * for any accesses to the tree.
2038 */
2039
2040/* lookup first element intersecting start-end */
2041/* Caller holds sp->lock */
2042static struct sp_node *
2043sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2044{
2045 struct rb_node *n = sp->root.rb_node;
2046
2047 while (n) {
2048 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2049
2050 if (start >= p->end)
2051 n = n->rb_right;
2052 else if (end <= p->start)
2053 n = n->rb_left;
2054 else
2055 break;
2056 }
2057 if (!n)
2058 return NULL;
2059 for (;;) {
2060 struct sp_node *w = NULL;
2061 struct rb_node *prev = rb_prev(n);
2062 if (!prev)
2063 break;
2064 w = rb_entry(prev, struct sp_node, nd);
2065 if (w->end <= start)
2066 break;
2067 n = prev;
2068 }
2069 return rb_entry(n, struct sp_node, nd);
2070}
2071
2072/* Insert a new shared policy into the list. */
2073/* Caller holds sp->lock */
2074static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2075{
2076 struct rb_node **p = &sp->root.rb_node;
2077 struct rb_node *parent = NULL;
2078 struct sp_node *nd;
2079
2080 while (*p) {
2081 parent = *p;
2082 nd = rb_entry(parent, struct sp_node, nd);
2083 if (new->start < nd->start)
2084 p = &(*p)->rb_left;
2085 else if (new->end > nd->end)
2086 p = &(*p)->rb_right;
2087 else
2088 BUG();
2089 }
2090 rb_link_node(&new->nd, parent, p);
2091 rb_insert_color(&new->nd, &sp->root);
Paul Mundt140d5a42007-07-15 23:38:16 -07002092 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002093 new->policy ? new->policy->mode : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094}
2095
2096/* Find shared policy intersecting idx */
2097struct mempolicy *
2098mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2099{
2100 struct mempolicy *pol = NULL;
2101 struct sp_node *sn;
2102
2103 if (!sp->root.rb_node)
2104 return NULL;
2105 spin_lock(&sp->lock);
2106 sn = sp_lookup(sp, idx, idx+1);
2107 if (sn) {
2108 mpol_get(sn->policy);
2109 pol = sn->policy;
2110 }
2111 spin_unlock(&sp->lock);
2112 return pol;
2113}
2114
2115static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2116{
Paul Mundt140d5a42007-07-15 23:38:16 -07002117 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118 rb_erase(&n->nd, &sp->root);
Lee Schermerhornf0be3d32008-04-28 02:13:08 -07002119 mpol_put(n->policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120 kmem_cache_free(sn_cache, n);
2121}
2122
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07002123static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2124 struct mempolicy *pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125{
2126 struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2127
2128 if (!n)
2129 return NULL;
2130 n->start = start;
2131 n->end = end;
2132 mpol_get(pol);
Lee Schermerhornaab0b102008-04-28 02:13:13 -07002133 pol->flags |= MPOL_F_SHARED; /* for unref */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 n->policy = pol;
2135 return n;
2136}
2137
2138/* Replace a policy range. */
2139static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2140 unsigned long end, struct sp_node *new)
2141{
2142 struct sp_node *n, *new2 = NULL;
2143
2144restart:
2145 spin_lock(&sp->lock);
2146 n = sp_lookup(sp, start, end);
2147 /* Take care of old policies in the same range. */
2148 while (n && n->start < end) {
2149 struct rb_node *next = rb_next(&n->nd);
2150 if (n->start >= start) {
2151 if (n->end <= end)
2152 sp_delete(sp, n);
2153 else
2154 n->start = end;
2155 } else {
2156 /* Old policy spanning whole new range. */
2157 if (n->end > end) {
2158 if (!new2) {
2159 spin_unlock(&sp->lock);
2160 new2 = sp_alloc(end, n->end, n->policy);
2161 if (!new2)
2162 return -ENOMEM;
2163 goto restart;
2164 }
2165 n->end = start;
2166 sp_insert(sp, new2);
2167 new2 = NULL;
2168 break;
2169 } else
2170 n->end = start;
2171 }
2172 if (!next)
2173 break;
2174 n = rb_entry(next, struct sp_node, nd);
2175 }
2176 if (new)
2177 sp_insert(sp, new);
2178 spin_unlock(&sp->lock);
2179 if (new2) {
Lee Schermerhornf0be3d32008-04-28 02:13:08 -07002180 mpol_put(new2->policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 kmem_cache_free(sn_cache, new2);
2182 }
2183 return 0;
2184}
2185
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002186/**
2187 * mpol_shared_policy_init - initialize shared policy for inode
2188 * @sp: pointer to inode shared policy
2189 * @mpol: struct mempolicy to install
2190 *
2191 * Install non-NULL @mpol in inode's shared policy rb-tree.
2192 * On entry, the current task has a reference on a non-NULL @mpol.
2193 * This must be released on exit.
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002194 * This is called at get_inode() calls and we can use GFP_KERNEL.
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002195 */
2196void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
Robin Holt7339ff82006-01-14 13:20:48 -08002197{
Miao Xie58568d22009-06-16 15:31:49 -07002198 int ret;
2199
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002200 sp->root = RB_ROOT; /* empty tree == default mempolicy */
2201 spin_lock_init(&sp->lock);
Robin Holt7339ff82006-01-14 13:20:48 -08002202
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002203 if (mpol) {
2204 struct vm_area_struct pvma;
2205 struct mempolicy *new;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002206 NODEMASK_SCRATCH(scratch);
Robin Holt7339ff82006-01-14 13:20:48 -08002207
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002208 if (!scratch)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002209 goto put_mpol;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002210 /* contextualize the tmpfs mount point mempolicy */
2211 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002212 if (IS_ERR(new))
Dan Carpenter0cae3452010-05-25 23:42:58 -07002213 goto free_scratch; /* no valid nodemask intersection */
Miao Xie58568d22009-06-16 15:31:49 -07002214
2215 task_lock(current);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002216 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
Miao Xie58568d22009-06-16 15:31:49 -07002217 task_unlock(current);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002218 if (ret)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002219 goto put_new;
Robin Holt7339ff82006-01-14 13:20:48 -08002220
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002221 /* Create pseudo-vma that contains just the policy */
2222 memset(&pvma, 0, sizeof(struct vm_area_struct));
2223 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2224 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002225
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002226put_new:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002227 mpol_put(new); /* drop initial ref */
Dan Carpenter0cae3452010-05-25 23:42:58 -07002228free_scratch:
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002229 NODEMASK_SCRATCH_FREE(scratch);
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002230put_mpol:
2231 mpol_put(mpol); /* drop our incoming ref on sb mpol */
Robin Holt7339ff82006-01-14 13:20:48 -08002232 }
2233}
2234
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235int mpol_set_shared_policy(struct shared_policy *info,
2236 struct vm_area_struct *vma, struct mempolicy *npol)
2237{
2238 int err;
2239 struct sp_node *new = NULL;
2240 unsigned long sz = vma_pages(vma);
2241
David Rientjes028fec42008-04-28 02:12:25 -07002242 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 vma->vm_pgoff,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002244 sz, npol ? npol->mode : -1,
David Rientjes028fec42008-04-28 02:12:25 -07002245 npol ? npol->flags : -1,
Paul Mundt140d5a42007-07-15 23:38:16 -07002246 npol ? nodes_addr(npol->v.nodes)[0] : -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247
2248 if (npol) {
2249 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2250 if (!new)
2251 return -ENOMEM;
2252 }
2253 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2254 if (err && new)
2255 kmem_cache_free(sn_cache, new);
2256 return err;
2257}
2258
2259/* Free a backing policy store on inode delete. */
2260void mpol_free_shared_policy(struct shared_policy *p)
2261{
2262 struct sp_node *n;
2263 struct rb_node *next;
2264
2265 if (!p->root.rb_node)
2266 return;
2267 spin_lock(&p->lock);
2268 next = rb_first(&p->root);
2269 while (next) {
2270 n = rb_entry(next, struct sp_node, nd);
2271 next = rb_next(&n->nd);
Andi Kleen90c50292005-07-27 11:43:50 -07002272 rb_erase(&n->nd, &p->root);
Lee Schermerhornf0be3d32008-04-28 02:13:08 -07002273 mpol_put(n->policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 kmem_cache_free(sn_cache, n);
2275 }
2276 spin_unlock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277}
2278
2279/* assumes fs == KERNEL_DS */
2280void __init numa_policy_init(void)
2281{
Paul Mundtb71636e2007-07-15 23:38:15 -07002282 nodemask_t interleave_nodes;
2283 unsigned long largest = 0;
2284 int nid, prefer = 0;
2285
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286 policy_cache = kmem_cache_create("numa_policy",
2287 sizeof(struct mempolicy),
Paul Mundt20c2df82007-07-20 10:11:58 +09002288 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289
2290 sn_cache = kmem_cache_create("shared_policy_node",
2291 sizeof(struct sp_node),
Paul Mundt20c2df82007-07-20 10:11:58 +09002292 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293
Paul Mundtb71636e2007-07-15 23:38:15 -07002294 /*
2295 * Set interleaving policy for system init. Interleaving is only
2296 * enabled across suitably sized nodes (default is >= 16MB), or
2297 * fall back to the largest node if they're all smaller.
2298 */
2299 nodes_clear(interleave_nodes);
Christoph Lameter56bbd652007-10-16 01:25:35 -07002300 for_each_node_state(nid, N_HIGH_MEMORY) {
Paul Mundtb71636e2007-07-15 23:38:15 -07002301 unsigned long total_pages = node_present_pages(nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302
Paul Mundtb71636e2007-07-15 23:38:15 -07002303 /* Preserve the largest node */
2304 if (largest < total_pages) {
2305 largest = total_pages;
2306 prefer = nid;
2307 }
2308
2309 /* Interleave this node? */
2310 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2311 node_set(nid, interleave_nodes);
2312 }
2313
2314 /* All too small, use the largest */
2315 if (unlikely(nodes_empty(interleave_nodes)))
2316 node_set(prefer, interleave_nodes);
2317
David Rientjes028fec42008-04-28 02:12:25 -07002318 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 printk("numa_policy_init: interleaving failed\n");
2320}
2321
Christoph Lameter8bccd852005-10-29 18:16:59 -07002322/* Reset policy of current process to default */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323void numa_default_policy(void)
2324{
David Rientjes028fec42008-04-28 02:12:25 -07002325 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326}
Paul Jackson68860ec2005-10-30 15:02:36 -08002327
Paul Jackson42253992006-01-08 01:01:59 -08002328/*
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002329 * Parse and format mempolicy from/to strings
2330 */
2331
2332/*
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002333 * "local" is pseudo-policy: MPOL_PREFERRED with MPOL_F_LOCAL flag
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002334 * Used only for mpol_parse_str() and mpol_to_str()
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002335 */
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002336#define MPOL_LOCAL MPOL_MAX
2337static const char * const policy_modes[] =
2338{
2339 [MPOL_DEFAULT] = "default",
2340 [MPOL_PREFERRED] = "prefer",
2341 [MPOL_BIND] = "bind",
2342 [MPOL_INTERLEAVE] = "interleave",
2343 [MPOL_LOCAL] = "local"
2344};
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002345
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002346
2347#ifdef CONFIG_TMPFS
2348/**
2349 * mpol_parse_str - parse string to mempolicy
2350 * @str: string containing mempolicy to parse
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002351 * @mpol: pointer to struct mempolicy pointer, returned on success.
2352 * @no_context: flag whether to "contextualize" the mempolicy
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002353 *
2354 * Format of input:
2355 * <mode>[=<flags>][:<nodelist>]
2356 *
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002357 * if @no_context is true, save the input nodemask in w.user_nodemask in
2358 * the returned mempolicy. This will be used to "clone" the mempolicy in
2359 * a specific context [cpuset] at a later time. Used to parse tmpfs mpol
2360 * mount option. Note that if 'static' or 'relative' mode flags were
2361 * specified, the input nodemask will already have been saved. Saving
2362 * it again is redundant, but safe.
2363 *
2364 * On success, returns 0, else 1
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002365 */
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002366int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002367{
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002368 struct mempolicy *new = NULL;
Lee Schermerhornb4652e82010-05-24 14:32:03 -07002369 unsigned short mode;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002370 unsigned short uninitialized_var(mode_flags);
2371 nodemask_t nodes;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002372 char *nodelist = strchr(str, ':');
2373 char *flags = strchr(str, '=');
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002374 int err = 1;
2375
2376 if (nodelist) {
2377 /* NUL-terminate mode or flags string */
2378 *nodelist++ = '\0';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002379 if (nodelist_parse(nodelist, nodes))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002380 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002381 if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY]))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002382 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002383 } else
2384 nodes_clear(nodes);
2385
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002386 if (flags)
2387 *flags++ = '\0'; /* terminate mode string */
2388
Lee Schermerhornb4652e82010-05-24 14:32:03 -07002389 for (mode = 0; mode <= MPOL_LOCAL; mode++) {
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002390 if (!strcmp(str, policy_modes[mode])) {
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002391 break;
2392 }
2393 }
Lee Schermerhornb4652e82010-05-24 14:32:03 -07002394 if (mode > MPOL_LOCAL)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002395 goto out;
2396
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002397 switch (mode) {
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002398 case MPOL_PREFERRED:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002399 /*
2400 * Insist on a nodelist of one node only
2401 */
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002402 if (nodelist) {
2403 char *rest = nodelist;
2404 while (isdigit(*rest))
2405 rest++;
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002406 if (*rest)
2407 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002408 }
2409 break;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002410 case MPOL_INTERLEAVE:
2411 /*
2412 * Default to online nodes with memory if no nodelist
2413 */
2414 if (!nodelist)
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002415 nodes = node_states[N_HIGH_MEMORY];
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002416 break;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002417 case MPOL_LOCAL:
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002418 /*
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002419 * Don't allow a nodelist; mpol_new() checks flags
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002420 */
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002421 if (nodelist)
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002422 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002423 mode = MPOL_PREFERRED;
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002424 break;
Ravikiran G Thirumalai413b43d2010-03-23 13:35:28 -07002425 case MPOL_DEFAULT:
2426 /*
2427 * Insist on a empty nodelist
2428 */
2429 if (!nodelist)
2430 err = 0;
2431 goto out;
KOSAKI Motohirod69b2e62010-03-23 13:35:30 -07002432 case MPOL_BIND:
2433 /*
2434 * Insist on a nodelist
2435 */
2436 if (!nodelist)
2437 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002438 }
2439
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002440 mode_flags = 0;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002441 if (flags) {
2442 /*
2443 * Currently, we only support two mutually exclusive
2444 * mode flags.
2445 */
2446 if (!strcmp(flags, "static"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002447 mode_flags |= MPOL_F_STATIC_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002448 else if (!strcmp(flags, "relative"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002449 mode_flags |= MPOL_F_RELATIVE_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002450 else
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002451 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002452 }
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002453
2454 new = mpol_new(mode, mode_flags, &nodes);
2455 if (IS_ERR(new))
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002456 goto out;
2457
Lee Schermerhorne17f74a2010-05-24 14:32:02 -07002458 if (no_context) {
2459 /* save for contextualization */
2460 new->w.user_nodemask = nodes;
2461 } else {
Miao Xie58568d22009-06-16 15:31:49 -07002462 int ret;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002463 NODEMASK_SCRATCH(scratch);
2464 if (scratch) {
2465 task_lock(current);
2466 ret = mpol_set_nodemask(new, &nodes, scratch);
2467 task_unlock(current);
2468 } else
2469 ret = -ENOMEM;
2470 NODEMASK_SCRATCH_FREE(scratch);
2471 if (ret) {
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002472 mpol_put(new);
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002473 goto out;
Miao Xie58568d22009-06-16 15:31:49 -07002474 }
2475 }
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002476 err = 0;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002477
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002478out:
2479 /* Restore string for error message */
2480 if (nodelist)
2481 *--nodelist = ':';
2482 if (flags)
2483 *--flags = '=';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002484 if (!err)
2485 *mpol = new;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002486 return err;
2487}
2488#endif /* CONFIG_TMPFS */
2489
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002490/**
2491 * mpol_to_str - format a mempolicy structure for printing
2492 * @buffer: to contain formatted mempolicy string
2493 * @maxlen: length of @buffer
2494 * @pol: pointer to mempolicy to be formatted
2495 * @no_context: "context free" mempolicy - use nodemask in w.user_nodemask
2496 *
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002497 * Convert a mempolicy into a string.
2498 * Returns the number of characters in buffer (if positive)
2499 * or an error (negative)
2500 */
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002501int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002502{
2503 char *p = buffer;
2504 int l;
2505 nodemask_t nodes;
Lee Schermerhornbea904d2008-04-28 02:13:18 -07002506 unsigned short mode;
David Rientjesf5b087b2008-04-28 02:12:27 -07002507 unsigned short flags = pol ? pol->flags : 0;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002508
Lee Schermerhorn22919902008-04-28 02:13:22 -07002509 /*
2510 * Sanity check: room for longest mode, flag and some nodes
2511 */
2512 VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
2513
Lee Schermerhornbea904d2008-04-28 02:13:18 -07002514 if (!pol || pol == &default_policy)
2515 mode = MPOL_DEFAULT;
2516 else
2517 mode = pol->mode;
2518
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002519 switch (mode) {
2520 case MPOL_DEFAULT:
2521 nodes_clear(nodes);
2522 break;
2523
2524 case MPOL_PREFERRED:
2525 nodes_clear(nodes);
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002526 if (flags & MPOL_F_LOCAL)
Lee Schermerhorn53f25562008-04-28 02:13:20 -07002527 mode = MPOL_LOCAL; /* pseudo-policy */
2528 else
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002529 node_set(pol->v.preferred_node, nodes);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002530 break;
2531
2532 case MPOL_BIND:
Mel Gorman19770b32008-04-28 02:12:18 -07002533 /* Fall through */
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002534 case MPOL_INTERLEAVE:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002535 if (no_context)
2536 nodes = pol->w.user_nodemask;
2537 else
2538 nodes = pol->v.nodes;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002539 break;
2540
2541 default:
2542 BUG();
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002543 }
2544
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002545 l = strlen(policy_modes[mode]);
Lee Schermerhorn53f25562008-04-28 02:13:20 -07002546 if (buffer + maxlen < p + l + 1)
2547 return -ENOSPC;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002548
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002549 strcpy(p, policy_modes[mode]);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002550 p += l;
2551
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002552 if (flags & MPOL_MODE_FLAGS) {
David Rientjesf5b087b2008-04-28 02:12:27 -07002553 if (buffer + maxlen < p + 2)
2554 return -ENOSPC;
2555 *p++ = '=';
2556
Lee Schermerhorn22919902008-04-28 02:13:22 -07002557 /*
2558 * Currently, the only defined flags are mutually exclusive
2559 */
David Rientjesf5b087b2008-04-28 02:12:27 -07002560 if (flags & MPOL_F_STATIC_NODES)
Lee Schermerhorn22919902008-04-28 02:13:22 -07002561 p += snprintf(p, buffer + maxlen - p, "static");
2562 else if (flags & MPOL_F_RELATIVE_NODES)
2563 p += snprintf(p, buffer + maxlen - p, "relative");
David Rientjesf5b087b2008-04-28 02:12:27 -07002564 }
2565
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002566 if (!nodes_empty(nodes)) {
2567 if (buffer + maxlen < p + 2)
2568 return -ENOSPC;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002569 *p++ = ':';
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002570 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2571 }
2572 return p - buffer;
2573}