blob: a5867ef24bdad75dbb7cedfc10a6a4fd8468e738 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
Christoph Lameter8bccd852005-10-29 18:16:59 -07005 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
Christoph Lameter8bccd852005-10-29 18:16:59 -070021 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
Christoph Lameter8bccd852005-10-29 18:16:59 -070024 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 * preferred Try a specific node first before normal fallback.
David Rientjes00ef2d22013-02-22 16:35:36 -080029 * As a special case NUMA_NO_NODE here means do the allocation
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
Christoph Lameter8bccd852005-10-29 18:16:59 -070033 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
Linus Torvalds1da177e2005-04-16 15:20:36 -070066*/
67
68#include <linux/mempolicy.h>
69#include <linux/mm.h>
70#include <linux/highmem.h>
71#include <linux/hugetlb.h>
72#include <linux/kernel.h>
73#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070074#include <linux/nodemask.h>
75#include <linux/cpuset.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/slab.h>
77#include <linux/string.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040078#include <linux/export.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070079#include <linux/nsproxy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070080#include <linux/interrupt.h>
81#include <linux/init.h>
82#include <linux/compat.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080083#include <linux/swap.h>
Christoph Lameter1a75a6c2006-01-08 01:01:02 -080084#include <linux/seq_file.h>
85#include <linux/proc_fs.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080086#include <linux/migrate.h>
Hugh Dickins62b61f62009-12-14 17:59:33 -080087#include <linux/ksm.h>
Christoph Lameter95a402c2006-06-23 02:03:53 -070088#include <linux/rmap.h>
David Quigley86c3a762006-06-23 02:04:02 -070089#include <linux/security.h>
Adrian Bunkdbcb0f12007-10-16 01:26:26 -070090#include <linux/syscalls.h>
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -070091#include <linux/ctype.h>
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -080092#include <linux/mm_inline.h>
Lee Schermerhornb24f53a2012-10-25 14:16:32 +020093#include <linux/mmu_notifier.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080094
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <asm/tlbflush.h>
96#include <asm/uaccess.h>
Michal Hocko778d3b02011-07-26 16:08:30 -070097#include <linux/random.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
Nick Piggin62695a82008-10-18 20:26:09 -070099#include "internal.h"
100
Christoph Lameter38e35862006-01-08 01:01:01 -0800101/* Internal flags */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800102#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
Christoph Lameter38e35862006-01-08 01:01:01 -0800103#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800104
Pekka Enbergfcc234f2006-03-22 00:08:13 -0800105static struct kmem_cache *policy_cache;
106static struct kmem_cache *sn_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108/* Highest zone. An specific allocation for a zone below that is not
109 policied. */
Christoph Lameter62672762007-02-10 01:43:07 -0800110enum zone_type policy_zone = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700112/*
113 * run-time system-wide default policy => local allocation
114 */
H Hartley Sweetene754d792011-10-31 17:09:23 -0700115static struct mempolicy default_policy = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 .refcnt = ATOMIC_INIT(1), /* never free it */
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700117 .mode = MPOL_PREFERRED,
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700118 .flags = MPOL_F_LOCAL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119};
120
Mel Gorman5606e382012-11-02 18:19:13 +0000121static struct mempolicy preferred_node_policy[MAX_NUMNODES];
122
123static struct mempolicy *get_task_policy(struct task_struct *p)
124{
125 struct mempolicy *pol = p->mempolicy;
Mel Gorman5606e382012-11-02 18:19:13 +0000126
127 if (!pol) {
Jianguo Wu1da6f0e2013-09-11 14:21:25 -0700128 int node = numa_node_id();
Mel Gorman5606e382012-11-02 18:19:13 +0000129
Jianguo Wu1da6f0e2013-09-11 14:21:25 -0700130 if (node != NUMA_NO_NODE) {
131 pol = &preferred_node_policy[node];
132 /*
133 * preferred_node_policy is not initialised early in
134 * boot
135 */
136 if (!pol->mode)
137 pol = NULL;
138 }
Mel Gorman5606e382012-11-02 18:19:13 +0000139 }
140
141 return pol;
142}
143
David Rientjes37012942008-04-28 02:12:33 -0700144static const struct mempolicy_operations {
145 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
Miao Xie708c1bb2010-05-24 14:32:07 -0700146 /*
147 * If read-side task has no lock to protect task->mempolicy, write-side
148 * task will rebind the task->mempolicy by two step. The first step is
149 * setting all the newly nodes, and the second step is cleaning all the
150 * disallowed nodes. In this way, we can avoid finding no node to alloc
151 * page.
152 * If we have a lock to protect task->mempolicy in read-side, we do
153 * rebind directly.
154 *
155 * step:
156 * MPOL_REBIND_ONCE - do rebind work at once
157 * MPOL_REBIND_STEP1 - set all the newly nodes
158 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
159 */
160 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
161 enum mpol_rebind_step step);
David Rientjes37012942008-04-28 02:12:33 -0700162} mpol_ops[MPOL_MAX];
163
Mel Gorman19770b32008-04-28 02:12:18 -0700164/* Check that the nodemask contains at least one populated zone */
David Rientjes37012942008-04-28 02:12:33 -0700165static int is_valid_nodemask(const nodemask_t *nodemask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166{
Lai Jiangshand3eb1572013-02-22 16:33:22 -0800167 return nodes_intersects(*nodemask, node_states[N_MEMORY]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168}
169
David Rientjesf5b087b2008-04-28 02:12:27 -0700170static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
171{
Bob Liu6d556292010-05-24 14:31:59 -0700172 return pol->flags & MPOL_MODE_FLAGS;
David Rientjes4c50bc02008-04-28 02:12:30 -0700173}
174
175static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
176 const nodemask_t *rel)
177{
178 nodemask_t tmp;
179 nodes_fold(tmp, *orig, nodes_weight(*rel));
180 nodes_onto(*ret, tmp, *rel);
David Rientjesf5b087b2008-04-28 02:12:27 -0700181}
182
David Rientjes37012942008-04-28 02:12:33 -0700183static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
184{
185 if (nodes_empty(*nodes))
186 return -EINVAL;
187 pol->v.nodes = *nodes;
188 return 0;
189}
190
191static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
192{
193 if (!nodes)
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700194 pol->flags |= MPOL_F_LOCAL; /* local allocation */
David Rientjes37012942008-04-28 02:12:33 -0700195 else if (nodes_empty(*nodes))
196 return -EINVAL; /* no allowed nodes */
197 else
198 pol->v.preferred_node = first_node(*nodes);
199 return 0;
200}
201
202static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
203{
204 if (!is_valid_nodemask(nodes))
205 return -EINVAL;
206 pol->v.nodes = *nodes;
207 return 0;
208}
209
Miao Xie58568d22009-06-16 15:31:49 -0700210/*
211 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
212 * any, for the new policy. mpol_new() has already validated the nodes
213 * parameter with respect to the policy mode and flags. But, we need to
214 * handle an empty nodemask with MPOL_PREFERRED here.
215 *
216 * Must be called holding task's alloc_lock to protect task's mems_allowed
217 * and mempolicy. May also be called holding the mmap_semaphore for write.
218 */
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700219static int mpol_set_nodemask(struct mempolicy *pol,
220 const nodemask_t *nodes, struct nodemask_scratch *nsc)
Miao Xie58568d22009-06-16 15:31:49 -0700221{
Miao Xie58568d22009-06-16 15:31:49 -0700222 int ret;
223
224 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
225 if (pol == NULL)
226 return 0;
Lai Jiangshan01f13bd2012-12-12 13:51:33 -0800227 /* Check N_MEMORY */
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700228 nodes_and(nsc->mask1,
Lai Jiangshan01f13bd2012-12-12 13:51:33 -0800229 cpuset_current_mems_allowed, node_states[N_MEMORY]);
Miao Xie58568d22009-06-16 15:31:49 -0700230
231 VM_BUG_ON(!nodes);
232 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
233 nodes = NULL; /* explicit local allocation */
234 else {
235 if (pol->flags & MPOL_F_RELATIVE_NODES)
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700236 mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
Miao Xie58568d22009-06-16 15:31:49 -0700237 else
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700238 nodes_and(nsc->mask2, *nodes, nsc->mask1);
239
Miao Xie58568d22009-06-16 15:31:49 -0700240 if (mpol_store_user_nodemask(pol))
241 pol->w.user_nodemask = *nodes;
242 else
243 pol->w.cpuset_mems_allowed =
244 cpuset_current_mems_allowed;
245 }
246
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700247 if (nodes)
248 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
249 else
250 ret = mpol_ops[pol->mode].create(pol, NULL);
Miao Xie58568d22009-06-16 15:31:49 -0700251 return ret;
252}
253
254/*
255 * This function just creates a new policy, does some check and simple
256 * initialization. You must invoke mpol_set_nodemask() to set nodes.
257 */
David Rientjes028fec42008-04-28 02:12:25 -0700258static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
259 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260{
261 struct mempolicy *policy;
262
David Rientjes028fec42008-04-28 02:12:25 -0700263 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
David Rientjes00ef2d22013-02-22 16:35:36 -0800264 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
Paul Mundt140d5a42007-07-15 23:38:16 -0700265
David Rientjes3e1f0642008-04-28 02:12:34 -0700266 if (mode == MPOL_DEFAULT) {
267 if (nodes && !nodes_empty(*nodes))
David Rientjes37012942008-04-28 02:12:33 -0700268 return ERR_PTR(-EINVAL);
Lee Schermerhornd3a71032012-10-25 14:16:29 +0200269 return NULL;
David Rientjes37012942008-04-28 02:12:33 -0700270 }
David Rientjes3e1f0642008-04-28 02:12:34 -0700271 VM_BUG_ON(!nodes);
272
273 /*
274 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
275 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
276 * All other modes require a valid pointer to a non-empty nodemask.
277 */
278 if (mode == MPOL_PREFERRED) {
279 if (nodes_empty(*nodes)) {
280 if (((flags & MPOL_F_STATIC_NODES) ||
281 (flags & MPOL_F_RELATIVE_NODES)))
282 return ERR_PTR(-EINVAL);
David Rientjes3e1f0642008-04-28 02:12:34 -0700283 }
Peter Zijlstra479e2802012-10-25 14:16:28 +0200284 } else if (mode == MPOL_LOCAL) {
285 if (!nodes_empty(*nodes))
286 return ERR_PTR(-EINVAL);
287 mode = MPOL_PREFERRED;
David Rientjes3e1f0642008-04-28 02:12:34 -0700288 } else if (nodes_empty(*nodes))
289 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
291 if (!policy)
292 return ERR_PTR(-ENOMEM);
293 atomic_set(&policy->refcnt, 1);
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700294 policy->mode = mode;
David Rientjes3e1f0642008-04-28 02:12:34 -0700295 policy->flags = flags;
David Rientjesf5b087b2008-04-28 02:12:27 -0700296
David Rientjes37012942008-04-28 02:12:33 -0700297 return policy;
298}
299
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700300/* Slow path of a mpol destructor. */
301void __mpol_put(struct mempolicy *p)
302{
303 if (!atomic_dec_and_test(&p->refcnt))
304 return;
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700305 kmem_cache_free(policy_cache, p);
306}
307
Miao Xie708c1bb2010-05-24 14:32:07 -0700308static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
309 enum mpol_rebind_step step)
David Rientjes37012942008-04-28 02:12:33 -0700310{
311}
312
Miao Xie708c1bb2010-05-24 14:32:07 -0700313/*
314 * step:
315 * MPOL_REBIND_ONCE - do rebind work at once
316 * MPOL_REBIND_STEP1 - set all the newly nodes
317 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
318 */
319static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
320 enum mpol_rebind_step step)
David Rientjes37012942008-04-28 02:12:33 -0700321{
322 nodemask_t tmp;
323
324 if (pol->flags & MPOL_F_STATIC_NODES)
325 nodes_and(tmp, pol->w.user_nodemask, *nodes);
326 else if (pol->flags & MPOL_F_RELATIVE_NODES)
327 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
328 else {
Miao Xie708c1bb2010-05-24 14:32:07 -0700329 /*
330 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
331 * result
332 */
333 if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
334 nodes_remap(tmp, pol->v.nodes,
335 pol->w.cpuset_mems_allowed, *nodes);
336 pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
337 } else if (step == MPOL_REBIND_STEP2) {
338 tmp = pol->w.cpuset_mems_allowed;
339 pol->w.cpuset_mems_allowed = *nodes;
340 } else
341 BUG();
David Rientjes37012942008-04-28 02:12:33 -0700342 }
343
Miao Xie708c1bb2010-05-24 14:32:07 -0700344 if (nodes_empty(tmp))
345 tmp = *nodes;
346
347 if (step == MPOL_REBIND_STEP1)
348 nodes_or(pol->v.nodes, pol->v.nodes, tmp);
349 else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
350 pol->v.nodes = tmp;
351 else
352 BUG();
353
David Rientjes37012942008-04-28 02:12:33 -0700354 if (!node_isset(current->il_next, tmp)) {
355 current->il_next = next_node(current->il_next, tmp);
356 if (current->il_next >= MAX_NUMNODES)
357 current->il_next = first_node(tmp);
358 if (current->il_next >= MAX_NUMNODES)
359 current->il_next = numa_node_id();
360 }
361}
362
363static void mpol_rebind_preferred(struct mempolicy *pol,
Miao Xie708c1bb2010-05-24 14:32:07 -0700364 const nodemask_t *nodes,
365 enum mpol_rebind_step step)
David Rientjes37012942008-04-28 02:12:33 -0700366{
367 nodemask_t tmp;
368
David Rientjes37012942008-04-28 02:12:33 -0700369 if (pol->flags & MPOL_F_STATIC_NODES) {
370 int node = first_node(pol->w.user_nodemask);
371
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700372 if (node_isset(node, *nodes)) {
David Rientjes37012942008-04-28 02:12:33 -0700373 pol->v.preferred_node = node;
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700374 pol->flags &= ~MPOL_F_LOCAL;
375 } else
376 pol->flags |= MPOL_F_LOCAL;
David Rientjes37012942008-04-28 02:12:33 -0700377 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
378 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
379 pol->v.preferred_node = first_node(tmp);
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700380 } else if (!(pol->flags & MPOL_F_LOCAL)) {
David Rientjes37012942008-04-28 02:12:33 -0700381 pol->v.preferred_node = node_remap(pol->v.preferred_node,
382 pol->w.cpuset_mems_allowed,
383 *nodes);
384 pol->w.cpuset_mems_allowed = *nodes;
385 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386}
387
Miao Xie708c1bb2010-05-24 14:32:07 -0700388/*
389 * mpol_rebind_policy - Migrate a policy to a different set of nodes
390 *
391 * If read-side task has no lock to protect task->mempolicy, write-side
392 * task will rebind the task->mempolicy by two step. The first step is
393 * setting all the newly nodes, and the second step is cleaning all the
394 * disallowed nodes. In this way, we can avoid finding no node to alloc
395 * page.
396 * If we have a lock to protect task->mempolicy in read-side, we do
397 * rebind directly.
398 *
399 * step:
400 * MPOL_REBIND_ONCE - do rebind work at once
401 * MPOL_REBIND_STEP1 - set all the newly nodes
402 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
403 */
404static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
405 enum mpol_rebind_step step)
David Rientjes1d0d2682008-04-28 02:12:32 -0700406{
David Rientjes1d0d2682008-04-28 02:12:32 -0700407 if (!pol)
408 return;
Wang Sheng-Hui89c522c2012-05-29 15:06:16 -0700409 if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
David Rientjes1d0d2682008-04-28 02:12:32 -0700410 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
411 return;
Miao Xie708c1bb2010-05-24 14:32:07 -0700412
413 if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
414 return;
415
416 if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
417 BUG();
418
419 if (step == MPOL_REBIND_STEP1)
420 pol->flags |= MPOL_F_REBINDING;
421 else if (step == MPOL_REBIND_STEP2)
422 pol->flags &= ~MPOL_F_REBINDING;
423 else if (step >= MPOL_REBIND_NSTEP)
424 BUG();
425
426 mpol_ops[pol->mode].rebind(pol, newmask, step);
David Rientjes1d0d2682008-04-28 02:12:32 -0700427}
428
429/*
430 * Wrapper for mpol_rebind_policy() that just requires task
431 * pointer, and updates task mempolicy.
Miao Xie58568d22009-06-16 15:31:49 -0700432 *
433 * Called with task's alloc_lock held.
David Rientjes1d0d2682008-04-28 02:12:32 -0700434 */
435
Miao Xie708c1bb2010-05-24 14:32:07 -0700436void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
437 enum mpol_rebind_step step)
David Rientjes1d0d2682008-04-28 02:12:32 -0700438{
Miao Xie708c1bb2010-05-24 14:32:07 -0700439 mpol_rebind_policy(tsk->mempolicy, new, step);
David Rientjes1d0d2682008-04-28 02:12:32 -0700440}
441
442/*
443 * Rebind each vma in mm to new nodemask.
444 *
445 * Call holding a reference to mm. Takes mm->mmap_sem during call.
446 */
447
448void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
449{
450 struct vm_area_struct *vma;
451
452 down_write(&mm->mmap_sem);
453 for (vma = mm->mmap; vma; vma = vma->vm_next)
Miao Xie708c1bb2010-05-24 14:32:07 -0700454 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
David Rientjes1d0d2682008-04-28 02:12:32 -0700455 up_write(&mm->mmap_sem);
456}
457
David Rientjes37012942008-04-28 02:12:33 -0700458static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
459 [MPOL_DEFAULT] = {
460 .rebind = mpol_rebind_default,
461 },
462 [MPOL_INTERLEAVE] = {
463 .create = mpol_new_interleave,
464 .rebind = mpol_rebind_nodemask,
465 },
466 [MPOL_PREFERRED] = {
467 .create = mpol_new_preferred,
468 .rebind = mpol_rebind_preferred,
469 },
470 [MPOL_BIND] = {
471 .create = mpol_new_bind,
472 .rebind = mpol_rebind_nodemask,
473 },
474};
475
Christoph Lameterfc301282006-01-18 17:42:29 -0800476static void migrate_page_add(struct page *page, struct list_head *pagelist,
477 unsigned long flags);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -0800478
Naoya Horiguchi98094942013-09-11 14:22:14 -0700479/*
480 * Scan through pages checking if pages follow certain conditions,
481 * and move them to the pagelist if they do.
482 */
483static int queue_pages_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800484 unsigned long addr, unsigned long end,
485 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800486 void *private)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487{
Hugh Dickins91612e02005-06-21 17:15:07 -0700488 pte_t *orig_pte;
489 pte_t *pte;
Hugh Dickins705e87c2005-10-29 18:16:27 -0700490 spinlock_t *ptl;
Hugh Dickins941150a2005-06-21 17:15:06 -0700491
Hugh Dickins705e87c2005-10-29 18:16:27 -0700492 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
Hugh Dickins91612e02005-06-21 17:15:07 -0700493 do {
Linus Torvalds6aab3412005-11-28 14:34:23 -0800494 struct page *page;
Andy Whitcroft25ba77c2006-12-06 20:33:03 -0800495 int nid;
Hugh Dickins91612e02005-06-21 17:15:07 -0700496
497 if (!pte_present(*pte))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800499 page = vm_normal_page(vma, addr, *pte);
500 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 continue;
Nick Piggin053837f2006-01-18 17:42:27 -0800502 /*
Hugh Dickins62b61f62009-12-14 17:59:33 -0800503 * vm_normal_page() filters out zero pages, but there might
504 * still be PageReserved pages to skip, perhaps in a VDSO.
Nick Piggin053837f2006-01-18 17:42:27 -0800505 */
Hugh Dickinsb79bc0a2013-02-22 16:35:13 -0800506 if (PageReserved(page))
Christoph Lameterf4598c82006-01-12 01:05:20 -0800507 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800508 nid = page_to_nid(page);
Christoph Lameter38e35862006-01-08 01:01:01 -0800509 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
510 continue;
511
Stephen Wilsonb1f72d12011-05-24 17:12:43 -0700512 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
Christoph Lameterfc301282006-01-18 17:42:29 -0800513 migrate_page_add(page, private, flags);
Christoph Lameter38e35862006-01-08 01:01:01 -0800514 else
515 break;
Hugh Dickins91612e02005-06-21 17:15:07 -0700516 } while (pte++, addr += PAGE_SIZE, addr != end);
Hugh Dickins705e87c2005-10-29 18:16:27 -0700517 pte_unmap_unlock(orig_pte, ptl);
Hugh Dickins91612e02005-06-21 17:15:07 -0700518 return addr != end;
519}
520
Naoya Horiguchi98094942013-09-11 14:22:14 -0700521static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma,
522 pmd_t *pmd, const nodemask_t *nodes, unsigned long flags,
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700523 void *private)
524{
525#ifdef CONFIG_HUGETLB_PAGE
526 int nid;
527 struct page *page;
528
529 spin_lock(&vma->vm_mm->page_table_lock);
530 page = pte_page(huge_ptep_get((pte_t *)pmd));
531 nid = page_to_nid(page);
532 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
533 goto unlock;
534 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
535 if (flags & (MPOL_MF_MOVE_ALL) ||
536 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
537 isolate_huge_page(page, private);
538unlock:
539 spin_unlock(&vma->vm_mm->page_table_lock);
540#else
541 BUG();
542#endif
543}
544
Naoya Horiguchi98094942013-09-11 14:22:14 -0700545static inline int queue_pages_pmd_range(struct vm_area_struct *vma, pud_t *pud,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800546 unsigned long addr, unsigned long end,
547 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800548 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700549{
550 pmd_t *pmd;
551 unsigned long next;
552
553 pmd = pmd_offset(pud, addr);
554 do {
555 next = pmd_addr_end(addr, end);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700556 if (!pmd_present(*pmd))
557 continue;
558 if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) {
Naoya Horiguchi98094942013-09-11 14:22:14 -0700559 queue_pages_hugetlb_pmd_range(vma, pmd, nodes,
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700560 flags, private);
561 continue;
562 }
Kirill A. Shutemove1803772012-12-12 13:50:59 -0800563 split_huge_page_pmd(vma, addr, pmd);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -0700564 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
Hugh Dickins91612e02005-06-21 17:15:07 -0700565 continue;
Naoya Horiguchi98094942013-09-11 14:22:14 -0700566 if (queue_pages_pte_range(vma, pmd, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800567 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700568 return -EIO;
569 } while (pmd++, addr = next, addr != end);
570 return 0;
571}
572
Naoya Horiguchi98094942013-09-11 14:22:14 -0700573static inline int queue_pages_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800574 unsigned long addr, unsigned long end,
575 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800576 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700577{
578 pud_t *pud;
579 unsigned long next;
580
581 pud = pud_offset(pgd, addr);
582 do {
583 next = pud_addr_end(addr, end);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700584 if (pud_huge(*pud) && is_vm_hugetlb_page(vma))
585 continue;
Hugh Dickins91612e02005-06-21 17:15:07 -0700586 if (pud_none_or_clear_bad(pud))
587 continue;
Naoya Horiguchi98094942013-09-11 14:22:14 -0700588 if (queue_pages_pmd_range(vma, pud, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800589 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700590 return -EIO;
591 } while (pud++, addr = next, addr != end);
592 return 0;
593}
594
Naoya Horiguchi98094942013-09-11 14:22:14 -0700595static inline int queue_pages_pgd_range(struct vm_area_struct *vma,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800596 unsigned long addr, unsigned long end,
597 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800598 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700599{
600 pgd_t *pgd;
601 unsigned long next;
602
Nick Pigginb5810032005-10-29 18:16:12 -0700603 pgd = pgd_offset(vma->vm_mm, addr);
Hugh Dickins91612e02005-06-21 17:15:07 -0700604 do {
605 next = pgd_addr_end(addr, end);
606 if (pgd_none_or_clear_bad(pgd))
607 continue;
Naoya Horiguchi98094942013-09-11 14:22:14 -0700608 if (queue_pages_pud_range(vma, pgd, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800609 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700610 return -EIO;
611 } while (pgd++, addr = next, addr != end);
612 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613}
614
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200615#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
616/*
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200617 * This is used to mark a range of virtual addresses to be inaccessible.
618 * These are later cleared by a NUMA hinting fault. Depending on these
619 * faults, pages may be migrated for better NUMA placement.
620 *
621 * This is assuming that NUMA faults are handled using PROT_NONE. If
622 * an architecture makes a different choice, it will need further
623 * changes to the core.
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200624 */
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200625unsigned long change_prot_numa(struct vm_area_struct *vma,
626 unsigned long addr, unsigned long end)
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200627{
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200628 int nr_updated;
629 BUILD_BUG_ON(_PAGE_NUMA != _PAGE_PROTNONE);
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200630
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200631 nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1);
Mel Gorman03c5a6e2012-11-02 14:52:48 +0000632 if (nr_updated)
633 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200634
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200635 return nr_updated;
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200636}
637#else
638static unsigned long change_prot_numa(struct vm_area_struct *vma,
639 unsigned long addr, unsigned long end)
640{
641 return 0;
642}
643#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
644
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800645/*
Naoya Horiguchi98094942013-09-11 14:22:14 -0700646 * Walk through page tables and collect pages to be migrated.
647 *
648 * If pages found in a given range are on a set of nodes (determined by
649 * @nodes and @flags,) it's isolated and queued to the pagelist which is
650 * passed via @private.)
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800651 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652static struct vm_area_struct *
Naoya Horiguchi98094942013-09-11 14:22:14 -0700653queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
Christoph Lameter38e35862006-01-08 01:01:01 -0800654 const nodemask_t *nodes, unsigned long flags, void *private)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655{
656 int err;
657 struct vm_area_struct *first, *vma, *prev;
658
Nick Piggin053837f2006-01-18 17:42:27 -0800659
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 first = find_vma(mm, start);
661 if (!first)
662 return ERR_PTR(-EFAULT);
663 prev = NULL;
664 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200665 unsigned long endvma = vma->vm_end;
666
667 if (endvma > end)
668 endvma = end;
669 if (vma->vm_start > start)
670 start = vma->vm_start;
671
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800672 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
673 if (!vma->vm_next && vma->vm_end < end)
674 return ERR_PTR(-EFAULT);
675 if (prev && prev->vm_end < vma->vm_start)
676 return ERR_PTR(-EFAULT);
677 }
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800678
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200679 if (flags & MPOL_MF_LAZY) {
680 change_prot_numa(vma, start, endvma);
681 goto next;
682 }
683
684 if ((flags & MPOL_MF_STRICT) ||
685 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
686 vma_migratable(vma))) {
687
Naoya Horiguchi98094942013-09-11 14:22:14 -0700688 err = queue_pages_pgd_range(vma, start, endvma, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800689 flags, private);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 if (err) {
691 first = ERR_PTR(err);
692 break;
693 }
694 }
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200695next:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 prev = vma;
697 }
698 return first;
699}
700
KOSAKI Motohiro869833f22012-10-08 16:29:16 -0700701/*
702 * Apply policy to a single VMA
703 * This must be called with the mmap_sem held for writing.
704 */
705static int vma_replace_policy(struct vm_area_struct *vma,
706 struct mempolicy *pol)
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700707{
KOSAKI Motohiro869833f22012-10-08 16:29:16 -0700708 int err;
709 struct mempolicy *old;
710 struct mempolicy *new;
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700711
712 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
713 vma->vm_start, vma->vm_end, vma->vm_pgoff,
714 vma->vm_ops, vma->vm_file,
715 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
716
KOSAKI Motohiro869833f22012-10-08 16:29:16 -0700717 new = mpol_dup(pol);
718 if (IS_ERR(new))
719 return PTR_ERR(new);
720
721 if (vma->vm_ops && vma->vm_ops->set_policy) {
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700722 err = vma->vm_ops->set_policy(vma, new);
KOSAKI Motohiro869833f22012-10-08 16:29:16 -0700723 if (err)
724 goto err_out;
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700725 }
KOSAKI Motohiro869833f22012-10-08 16:29:16 -0700726
727 old = vma->vm_policy;
728 vma->vm_policy = new; /* protected by mmap_sem */
729 mpol_put(old);
730
731 return 0;
732 err_out:
733 mpol_put(new);
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700734 return err;
735}
736
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737/* Step 2: apply policy to a range and do splits. */
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800738static int mbind_range(struct mm_struct *mm, unsigned long start,
739 unsigned long end, struct mempolicy *new_pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740{
741 struct vm_area_struct *next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800742 struct vm_area_struct *prev;
743 struct vm_area_struct *vma;
744 int err = 0;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800745 pgoff_t pgoff;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800746 unsigned long vmstart;
747 unsigned long vmend;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748
Linus Torvalds097d5912012-03-06 18:23:36 -0800749 vma = find_vma(mm, start);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800750 if (!vma || vma->vm_start > start)
751 return -EFAULT;
752
Linus Torvalds097d5912012-03-06 18:23:36 -0800753 prev = vma->vm_prev;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800754 if (start > vma->vm_start)
755 prev = vma;
756
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800757 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 next = vma->vm_next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800759 vmstart = max(start, vma->vm_start);
760 vmend = min(end, vma->vm_end);
761
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800762 if (mpol_equal(vma_policy(vma), new_pol))
763 continue;
764
765 pgoff = vma->vm_pgoff +
766 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800767 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800768 vma->anon_vma, vma->vm_file, pgoff,
Caspar Zhang8aacc9f2011-09-14 16:20:58 -0700769 new_pol);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800770 if (prev) {
771 vma = prev;
772 next = vma->vm_next;
Oleg Nesterov3964acd2013-07-31 13:53:28 -0700773 if (mpol_equal(vma_policy(vma), new_pol))
774 continue;
775 /* vma_merge() joined vma && vma->next, case 8 */
776 goto replace;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800777 }
778 if (vma->vm_start != vmstart) {
779 err = split_vma(vma->vm_mm, vma, vmstart, 1);
780 if (err)
781 goto out;
782 }
783 if (vma->vm_end != vmend) {
784 err = split_vma(vma->vm_mm, vma, vmend, 0);
785 if (err)
786 goto out;
787 }
Oleg Nesterov3964acd2013-07-31 13:53:28 -0700788 replace:
KOSAKI Motohiro869833f22012-10-08 16:29:16 -0700789 err = vma_replace_policy(vma, new_pol);
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700790 if (err)
791 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 }
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800793
794 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 return err;
796}
797
Paul Jacksonc61afb12006-03-24 03:16:08 -0800798/*
799 * Update task->flags PF_MEMPOLICY bit: set iff non-default
800 * mempolicy. Allows more rapid checking of this (combined perhaps
801 * with other PF_* flag bits) on memory allocation hot code paths.
802 *
803 * If called from outside this file, the task 'p' should -only- be
804 * a newly forked child not yet visible on the task list, because
805 * manipulating the task flags of a visible task is not safe.
806 *
807 * The above limitation is why this routine has the funny name
808 * mpol_fix_fork_child_flag().
809 *
810 * It is also safe to call this with a task pointer of current,
811 * which the static wrapper mpol_set_task_struct_flag() does,
812 * for use within this file.
813 */
814
815void mpol_fix_fork_child_flag(struct task_struct *p)
816{
817 if (p->mempolicy)
818 p->flags |= PF_MEMPOLICY;
819 else
820 p->flags &= ~PF_MEMPOLICY;
821}
822
823static void mpol_set_task_struct_flag(void)
824{
825 mpol_fix_fork_child_flag(current);
826}
827
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828/* Set the process memory policy */
David Rientjes028fec42008-04-28 02:12:25 -0700829static long do_set_mempolicy(unsigned short mode, unsigned short flags,
830 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831{
Miao Xie58568d22009-06-16 15:31:49 -0700832 struct mempolicy *new, *old;
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700833 struct mm_struct *mm = current->mm;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700834 NODEMASK_SCRATCH(scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700835 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700837 if (!scratch)
838 return -ENOMEM;
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700839
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700840 new = mpol_new(mode, flags, nodes);
841 if (IS_ERR(new)) {
842 ret = PTR_ERR(new);
843 goto out;
844 }
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700845 /*
846 * prevent changing our mempolicy while show_numa_maps()
847 * is using it.
848 * Note: do_set_mempolicy() can be called at init time
849 * with no 'mm'.
850 */
851 if (mm)
852 down_write(&mm->mmap_sem);
Miao Xie58568d22009-06-16 15:31:49 -0700853 task_lock(current);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700854 ret = mpol_set_nodemask(new, nodes, scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700855 if (ret) {
856 task_unlock(current);
857 if (mm)
858 up_write(&mm->mmap_sem);
859 mpol_put(new);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700860 goto out;
Miao Xie58568d22009-06-16 15:31:49 -0700861 }
862 old = current->mempolicy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 current->mempolicy = new;
Paul Jacksonc61afb12006-03-24 03:16:08 -0800864 mpol_set_task_struct_flag();
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700865 if (new && new->mode == MPOL_INTERLEAVE &&
David Rientjesf5b087b2008-04-28 02:12:27 -0700866 nodes_weight(new->v.nodes))
Andi Kleendfcd3c02005-10-29 18:15:48 -0700867 current->il_next = first_node(new->v.nodes);
Miao Xie58568d22009-06-16 15:31:49 -0700868 task_unlock(current);
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700869 if (mm)
870 up_write(&mm->mmap_sem);
871
Miao Xie58568d22009-06-16 15:31:49 -0700872 mpol_put(old);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700873 ret = 0;
874out:
875 NODEMASK_SCRATCH_FREE(scratch);
876 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877}
878
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700879/*
880 * Return nodemask for policy for get_mempolicy() query
Miao Xie58568d22009-06-16 15:31:49 -0700881 *
882 * Called with task's alloc_lock held
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700883 */
884static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885{
Andi Kleendfcd3c02005-10-29 18:15:48 -0700886 nodes_clear(*nodes);
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700887 if (p == &default_policy)
888 return;
889
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700890 switch (p->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -0700891 case MPOL_BIND:
892 /* Fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 case MPOL_INTERLEAVE:
Andi Kleendfcd3c02005-10-29 18:15:48 -0700894 *nodes = p->v.nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 break;
896 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700897 if (!(p->flags & MPOL_F_LOCAL))
Andi Kleendfcd3c02005-10-29 18:15:48 -0700898 node_set(p->v.preferred_node, *nodes);
Lee Schermerhorn53f25562008-04-28 02:13:20 -0700899 /* else return empty node mask for local allocation */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 break;
901 default:
902 BUG();
903 }
904}
905
906static int lookup_node(struct mm_struct *mm, unsigned long addr)
907{
908 struct page *p;
909 int err;
910
911 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
912 if (err >= 0) {
913 err = page_to_nid(p);
914 put_page(p);
915 }
916 return err;
917}
918
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919/* Retrieve NUMA policy */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700920static long do_get_mempolicy(int *policy, nodemask_t *nmask,
921 unsigned long addr, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922{
Christoph Lameter8bccd852005-10-29 18:16:59 -0700923 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 struct mm_struct *mm = current->mm;
925 struct vm_area_struct *vma = NULL;
926 struct mempolicy *pol = current->mempolicy;
927
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700928 if (flags &
929 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 return -EINVAL;
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700931
932 if (flags & MPOL_F_MEMS_ALLOWED) {
933 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
934 return -EINVAL;
935 *policy = 0; /* just so it's initialized */
Miao Xie58568d22009-06-16 15:31:49 -0700936 task_lock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700937 *nmask = cpuset_current_mems_allowed;
Miao Xie58568d22009-06-16 15:31:49 -0700938 task_unlock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700939 return 0;
940 }
941
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 if (flags & MPOL_F_ADDR) {
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700943 /*
944 * Do NOT fall back to task policy if the
945 * vma/shared policy at addr is NULL. We
946 * want to return MPOL_DEFAULT in this case.
947 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 down_read(&mm->mmap_sem);
949 vma = find_vma_intersection(mm, addr, addr+1);
950 if (!vma) {
951 up_read(&mm->mmap_sem);
952 return -EFAULT;
953 }
954 if (vma->vm_ops && vma->vm_ops->get_policy)
955 pol = vma->vm_ops->get_policy(vma, addr);
956 else
957 pol = vma->vm_policy;
958 } else if (addr)
959 return -EINVAL;
960
961 if (!pol)
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700962 pol = &default_policy; /* indicates default behavior */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963
964 if (flags & MPOL_F_NODE) {
965 if (flags & MPOL_F_ADDR) {
966 err = lookup_node(mm, addr);
967 if (err < 0)
968 goto out;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700969 *policy = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 } else if (pol == current->mempolicy &&
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700971 pol->mode == MPOL_INTERLEAVE) {
Christoph Lameter8bccd852005-10-29 18:16:59 -0700972 *policy = current->il_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 } else {
974 err = -EINVAL;
975 goto out;
976 }
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700977 } else {
978 *policy = pol == &default_policy ? MPOL_DEFAULT :
979 pol->mode;
David Rientjesd79df632008-07-04 12:24:13 -0700980 /*
981 * Internal mempolicy flags must be masked off before exposing
982 * the policy to userspace.
983 */
984 *policy |= (pol->flags & MPOL_MODE_FLAGS);
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700985 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986
987 if (vma) {
988 up_read(&current->mm->mmap_sem);
989 vma = NULL;
990 }
991
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 err = 0;
Miao Xie58568d22009-06-16 15:31:49 -0700993 if (nmask) {
Lee Schermerhornc6b6ef82010-03-23 13:35:41 -0700994 if (mpol_store_user_nodemask(pol)) {
995 *nmask = pol->w.user_nodemask;
996 } else {
997 task_lock(current);
998 get_policy_nodemask(pol, nmask);
999 task_unlock(current);
1000 }
Miao Xie58568d22009-06-16 15:31:49 -07001001 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002
1003 out:
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001004 mpol_cond_put(pol);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 if (vma)
1006 up_read(&current->mm->mmap_sem);
1007 return err;
1008}
1009
Christoph Lameterb20a3502006-03-22 00:09:12 -08001010#ifdef CONFIG_MIGRATION
Christoph Lameter8bccd852005-10-29 18:16:59 -07001011/*
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001012 * page migration
1013 */
Christoph Lameterfc301282006-01-18 17:42:29 -08001014static void migrate_page_add(struct page *page, struct list_head *pagelist,
1015 unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001016{
1017 /*
Christoph Lameterfc301282006-01-18 17:42:29 -08001018 * Avoid migrating a page that is shared with others.
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001019 */
Nick Piggin62695a82008-10-18 20:26:09 -07001020 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
1021 if (!isolate_lru_page(page)) {
1022 list_add_tail(&page->lru, pagelist);
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -08001023 inc_zone_page_state(page, NR_ISOLATED_ANON +
1024 page_is_file_cache(page));
Nick Piggin62695a82008-10-18 20:26:09 -07001025 }
1026 }
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001027}
1028
Christoph Lameter742755a2006-06-23 02:03:55 -07001029static struct page *new_node_page(struct page *page, unsigned long node, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001030{
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -07001031 if (PageHuge(page))
1032 return alloc_huge_page_node(page_hstate(compound_head(page)),
1033 node);
1034 else
1035 return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001036}
1037
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001038/*
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001039 * Migrate pages from one node to a target node.
1040 * Returns error or the number of pages not migrated.
1041 */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001042static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1043 int flags)
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001044{
1045 nodemask_t nmask;
1046 LIST_HEAD(pagelist);
1047 int err = 0;
1048
1049 nodes_clear(nmask);
1050 node_set(source, nmask);
1051
Minchan Kim08270802012-10-08 16:33:38 -07001052 /*
1053 * This does not "check" the range but isolates all pages that
1054 * need migration. Between passing in the full user address
1055 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1056 */
1057 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
Naoya Horiguchi98094942013-09-11 14:22:14 -07001058 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001059 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1060
Minchan Kimcf608ac2010-10-26 14:21:29 -07001061 if (!list_empty(&pagelist)) {
Mel Gorman7f0f2492011-01-13 15:45:58 -08001062 err = migrate_pages(&pagelist, new_node_page, dest,
Hugh Dickins9c620e22013-02-22 16:35:14 -08001063 MIGRATE_SYNC, MR_SYSCALL);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001064 if (err)
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -07001065 putback_movable_pages(&pagelist);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001066 }
Christoph Lameter95a402c2006-06-23 02:03:53 -07001067
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001068 return err;
1069}
1070
1071/*
1072 * Move pages between the two nodesets so as to preserve the physical
1073 * layout as much as possible.
Christoph Lameter39743882006-01-08 01:00:51 -08001074 *
1075 * Returns the number of page that could not be moved.
1076 */
Andrew Morton0ce72d42012-05-29 15:06:24 -07001077int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1078 const nodemask_t *to, int flags)
Christoph Lameter39743882006-01-08 01:00:51 -08001079{
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001080 int busy = 0;
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001081 int err;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001082 nodemask_t tmp;
Christoph Lameter39743882006-01-08 01:00:51 -08001083
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001084 err = migrate_prep();
1085 if (err)
1086 return err;
1087
Lee Schermerhorn53f25562008-04-28 02:13:20 -07001088 down_read(&mm->mmap_sem);
Christoph Lameter39743882006-01-08 01:00:51 -08001089
Andrew Morton0ce72d42012-05-29 15:06:24 -07001090 err = migrate_vmas(mm, from, to, flags);
Christoph Lameter7b2259b2006-06-25 05:46:48 -07001091 if (err)
1092 goto out;
1093
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -08001094 /*
1095 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1096 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1097 * bit in 'tmp', and return that <source, dest> pair for migration.
1098 * The pair of nodemasks 'to' and 'from' define the map.
1099 *
1100 * If no pair of bits is found that way, fallback to picking some
1101 * pair of 'source' and 'dest' bits that are not the same. If the
1102 * 'source' and 'dest' bits are the same, this represents a node
1103 * that will be migrating to itself, so no pages need move.
1104 *
1105 * If no bits are left in 'tmp', or if all remaining bits left
1106 * in 'tmp' correspond to the same bit in 'to', return false
1107 * (nothing left to migrate).
1108 *
1109 * This lets us pick a pair of nodes to migrate between, such that
1110 * if possible the dest node is not already occupied by some other
1111 * source node, minimizing the risk of overloading the memory on a
1112 * node that would happen if we migrated incoming memory to a node
1113 * before migrating outgoing memory source that same node.
1114 *
1115 * A single scan of tmp is sufficient. As we go, we remember the
1116 * most recent <s, d> pair that moved (s != d). If we find a pair
1117 * that not only moved, but what's better, moved to an empty slot
1118 * (d is not set in tmp), then we break out then, with that pair.
Justin P. Mattockae0e47f2011-03-01 15:06:02 +01001119 * Otherwise when we finish scanning from_tmp, we at least have the
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -08001120 * most recent <s, d> pair that moved. If we get all the way through
1121 * the scan of tmp without finding any node that moved, much less
1122 * moved to an empty node, then there is nothing left worth migrating.
1123 */
Christoph Lameterd4984712006-01-08 01:00:55 -08001124
Andrew Morton0ce72d42012-05-29 15:06:24 -07001125 tmp = *from;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001126 while (!nodes_empty(tmp)) {
1127 int s,d;
1128 int source = -1;
1129 int dest = 0;
1130
1131 for_each_node_mask(s, tmp) {
Larry Woodman4a5b18c2012-05-29 15:06:24 -07001132
1133 /*
1134 * do_migrate_pages() tries to maintain the relative
1135 * node relationship of the pages established between
1136 * threads and memory areas.
1137 *
1138 * However if the number of source nodes is not equal to
1139 * the number of destination nodes we can not preserve
1140 * this node relative relationship. In that case, skip
1141 * copying memory from a node that is in the destination
1142 * mask.
1143 *
1144 * Example: [2,3,4] -> [3,4,5] moves everything.
1145 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1146 */
1147
Andrew Morton0ce72d42012-05-29 15:06:24 -07001148 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1149 (node_isset(s, *to)))
Larry Woodman4a5b18c2012-05-29 15:06:24 -07001150 continue;
1151
Andrew Morton0ce72d42012-05-29 15:06:24 -07001152 d = node_remap(s, *from, *to);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001153 if (s == d)
1154 continue;
1155
1156 source = s; /* Node moved. Memorize */
1157 dest = d;
1158
1159 /* dest not in remaining from nodes? */
1160 if (!node_isset(dest, tmp))
1161 break;
1162 }
1163 if (source == -1)
1164 break;
1165
1166 node_clear(source, tmp);
1167 err = migrate_to_node(mm, source, dest, flags);
1168 if (err > 0)
1169 busy += err;
1170 if (err < 0)
1171 break;
Christoph Lameter39743882006-01-08 01:00:51 -08001172 }
Christoph Lameter7b2259b2006-06-25 05:46:48 -07001173out:
Christoph Lameter39743882006-01-08 01:00:51 -08001174 up_read(&mm->mmap_sem);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001175 if (err < 0)
1176 return err;
1177 return busy;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001178
Christoph Lameter39743882006-01-08 01:00:51 -08001179}
1180
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001181/*
1182 * Allocate a new page for page migration based on vma policy.
1183 * Start assuming that page is mapped by vma pointed to by @private.
1184 * Search forward from there, if not. N.B., this assumes that the
1185 * list of pages handed to migrate_pages()--which is how we get here--
1186 * is in virtual address order.
1187 */
Christoph Lameter742755a2006-06-23 02:03:55 -07001188static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001189{
1190 struct vm_area_struct *vma = (struct vm_area_struct *)private;
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001191 unsigned long uninitialized_var(address);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001192
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001193 while (vma) {
1194 address = page_address_in_vma(page, vma);
1195 if (address != -EFAULT)
1196 break;
1197 vma = vma->vm_next;
1198 }
Naoya Horiguchi0bf598d2013-09-11 14:22:16 -07001199 /*
1200 * queue_pages_range() confirms that @page belongs to some vma,
1201 * so vma shouldn't be NULL.
1202 */
1203 BUG_ON(!vma);
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001204
Naoya Horiguchi74060e42013-09-11 14:22:06 -07001205 if (PageHuge(page))
1206 return alloc_huge_page_noerr(vma, address, 1);
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001207 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001208}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001209#else
1210
1211static void migrate_page_add(struct page *page, struct list_head *pagelist,
1212 unsigned long flags)
1213{
1214}
1215
Andrew Morton0ce72d42012-05-29 15:06:24 -07001216int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1217 const nodemask_t *to, int flags)
Christoph Lameterb20a3502006-03-22 00:09:12 -08001218{
1219 return -ENOSYS;
1220}
Christoph Lameter95a402c2006-06-23 02:03:53 -07001221
Keith Owens69939742006-10-11 01:21:28 -07001222static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001223{
1224 return NULL;
1225}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001226#endif
1227
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001228static long do_mbind(unsigned long start, unsigned long len,
David Rientjes028fec42008-04-28 02:12:25 -07001229 unsigned short mode, unsigned short mode_flags,
1230 nodemask_t *nmask, unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001231{
1232 struct vm_area_struct *vma;
1233 struct mm_struct *mm = current->mm;
1234 struct mempolicy *new;
1235 unsigned long end;
1236 int err;
1237 LIST_HEAD(pagelist);
1238
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001239 if (flags & ~(unsigned long)MPOL_MF_VALID)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001240 return -EINVAL;
Christoph Lameter74c00242006-03-14 19:50:21 -08001241 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001242 return -EPERM;
1243
1244 if (start & ~PAGE_MASK)
1245 return -EINVAL;
1246
1247 if (mode == MPOL_DEFAULT)
1248 flags &= ~MPOL_MF_STRICT;
1249
1250 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1251 end = start + len;
1252
1253 if (end < start)
1254 return -EINVAL;
1255 if (end == start)
1256 return 0;
1257
David Rientjes028fec42008-04-28 02:12:25 -07001258 new = mpol_new(mode, mode_flags, nmask);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001259 if (IS_ERR(new))
1260 return PTR_ERR(new);
1261
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001262 if (flags & MPOL_MF_LAZY)
1263 new->flags |= MPOL_F_MOF;
1264
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001265 /*
1266 * If we are using the default policy then operation
1267 * on discontinuous address spaces is okay after all
1268 */
1269 if (!new)
1270 flags |= MPOL_MF_DISCONTIG_OK;
1271
David Rientjes028fec42008-04-28 02:12:25 -07001272 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1273 start, start + len, mode, mode_flags,
David Rientjes00ef2d22013-02-22 16:35:36 -08001274 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001275
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001276 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1277
1278 err = migrate_prep();
1279 if (err)
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001280 goto mpol_out;
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001281 }
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07001282 {
1283 NODEMASK_SCRATCH(scratch);
1284 if (scratch) {
1285 down_write(&mm->mmap_sem);
1286 task_lock(current);
1287 err = mpol_set_nodemask(new, nmask, scratch);
1288 task_unlock(current);
1289 if (err)
1290 up_write(&mm->mmap_sem);
1291 } else
1292 err = -ENOMEM;
1293 NODEMASK_SCRATCH_FREE(scratch);
1294 }
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001295 if (err)
1296 goto mpol_out;
1297
Naoya Horiguchi98094942013-09-11 14:22:14 -07001298 vma = queue_pages_range(mm, start, end, nmask,
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001299 flags | MPOL_MF_INVERT, &pagelist);
1300
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001301 err = PTR_ERR(vma); /* maybe ... */
Mel Gormana7200942012-11-16 09:37:58 +00001302 if (!IS_ERR(vma))
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -08001303 err = mbind_range(mm, start, end, new);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001304
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001305 if (!err) {
1306 int nr_failed = 0;
1307
Minchan Kimcf608ac2010-10-26 14:21:29 -07001308 if (!list_empty(&pagelist)) {
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001309 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001310 nr_failed = migrate_pages(&pagelist, new_vma_page,
Hugh Dickins9c620e22013-02-22 16:35:14 -08001311 (unsigned long)vma,
1312 MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001313 if (nr_failed)
Naoya Horiguchi74060e42013-09-11 14:22:06 -07001314 putback_movable_pages(&pagelist);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001315 }
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001316
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001317 if (nr_failed && (flags & MPOL_MF_STRICT))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001318 err = -EIO;
KOSAKI Motohiroab8a3e12009-10-26 16:49:58 -07001319 } else
1320 putback_lru_pages(&pagelist);
Christoph Lameterb20a3502006-03-22 00:09:12 -08001321
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001322 up_write(&mm->mmap_sem);
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001323 mpol_out:
Lee Schermerhornf0be3d32008-04-28 02:13:08 -07001324 mpol_put(new);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001325 return err;
1326}
1327
Christoph Lameter39743882006-01-08 01:00:51 -08001328/*
Christoph Lameter8bccd852005-10-29 18:16:59 -07001329 * User space interface with variable sized bitmaps for nodelists.
1330 */
1331
1332/* Copy a node mask from user space. */
Christoph Lameter39743882006-01-08 01:00:51 -08001333static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
Christoph Lameter8bccd852005-10-29 18:16:59 -07001334 unsigned long maxnode)
1335{
1336 unsigned long k;
1337 unsigned long nlongs;
1338 unsigned long endmask;
1339
1340 --maxnode;
1341 nodes_clear(*nodes);
1342 if (maxnode == 0 || !nmask)
1343 return 0;
Andi Kleena9c930b2006-02-20 18:27:59 -08001344 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
Chris Wright636f13c2006-02-17 13:59:36 -08001345 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001346
1347 nlongs = BITS_TO_LONGS(maxnode);
1348 if ((maxnode % BITS_PER_LONG) == 0)
1349 endmask = ~0UL;
1350 else
1351 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1352
1353 /* When the user specified more nodes than supported just check
1354 if the non supported part is all zero. */
1355 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1356 if (nlongs > PAGE_SIZE/sizeof(long))
1357 return -EINVAL;
1358 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1359 unsigned long t;
1360 if (get_user(t, nmask + k))
1361 return -EFAULT;
1362 if (k == nlongs - 1) {
1363 if (t & endmask)
1364 return -EINVAL;
1365 } else if (t)
1366 return -EINVAL;
1367 }
1368 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1369 endmask = ~0UL;
1370 }
1371
1372 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1373 return -EFAULT;
1374 nodes_addr(*nodes)[nlongs-1] &= endmask;
1375 return 0;
1376}
1377
1378/* Copy a kernel node mask to user space */
1379static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1380 nodemask_t *nodes)
1381{
1382 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1383 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1384
1385 if (copy > nbytes) {
1386 if (copy > PAGE_SIZE)
1387 return -EINVAL;
1388 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1389 return -EFAULT;
1390 copy = nbytes;
1391 }
1392 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1393}
1394
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001395SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1396 unsigned long, mode, unsigned long __user *, nmask,
1397 unsigned long, maxnode, unsigned, flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001398{
1399 nodemask_t nodes;
1400 int err;
David Rientjes028fec42008-04-28 02:12:25 -07001401 unsigned short mode_flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001402
David Rientjes028fec42008-04-28 02:12:25 -07001403 mode_flags = mode & MPOL_MODE_FLAGS;
1404 mode &= ~MPOL_MODE_FLAGS;
David Rientjesa3b51e02008-04-28 02:12:23 -07001405 if (mode >= MPOL_MAX)
1406 return -EINVAL;
David Rientjes4c50bc02008-04-28 02:12:30 -07001407 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1408 (mode_flags & MPOL_F_RELATIVE_NODES))
1409 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001410 err = get_nodes(&nodes, nmask, maxnode);
1411 if (err)
1412 return err;
David Rientjes028fec42008-04-28 02:12:25 -07001413 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001414}
1415
1416/* Set the process memory policy */
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001417SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1418 unsigned long, maxnode)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001419{
1420 int err;
1421 nodemask_t nodes;
David Rientjes028fec42008-04-28 02:12:25 -07001422 unsigned short flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001423
David Rientjes028fec42008-04-28 02:12:25 -07001424 flags = mode & MPOL_MODE_FLAGS;
1425 mode &= ~MPOL_MODE_FLAGS;
1426 if ((unsigned int)mode >= MPOL_MAX)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001427 return -EINVAL;
David Rientjes4c50bc02008-04-28 02:12:30 -07001428 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1429 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001430 err = get_nodes(&nodes, nmask, maxnode);
1431 if (err)
1432 return err;
David Rientjes028fec42008-04-28 02:12:25 -07001433 return do_set_mempolicy(mode, flags, &nodes);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001434}
1435
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001436SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1437 const unsigned long __user *, old_nodes,
1438 const unsigned long __user *, new_nodes)
Christoph Lameter39743882006-01-08 01:00:51 -08001439{
David Howellsc69e8d92008-11-14 10:39:19 +11001440 const struct cred *cred = current_cred(), *tcred;
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001441 struct mm_struct *mm = NULL;
Christoph Lameter39743882006-01-08 01:00:51 -08001442 struct task_struct *task;
Christoph Lameter39743882006-01-08 01:00:51 -08001443 nodemask_t task_nodes;
1444 int err;
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001445 nodemask_t *old;
1446 nodemask_t *new;
1447 NODEMASK_SCRATCH(scratch);
Christoph Lameter39743882006-01-08 01:00:51 -08001448
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001449 if (!scratch)
1450 return -ENOMEM;
Christoph Lameter39743882006-01-08 01:00:51 -08001451
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001452 old = &scratch->mask1;
1453 new = &scratch->mask2;
1454
1455 err = get_nodes(old, old_nodes, maxnode);
Christoph Lameter39743882006-01-08 01:00:51 -08001456 if (err)
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001457 goto out;
1458
1459 err = get_nodes(new, new_nodes, maxnode);
1460 if (err)
1461 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001462
1463 /* Find the mm_struct */
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001464 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001465 task = pid ? find_task_by_vpid(pid) : current;
Christoph Lameter39743882006-01-08 01:00:51 -08001466 if (!task) {
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001467 rcu_read_unlock();
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001468 err = -ESRCH;
1469 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001470 }
Christoph Lameter3268c632012-03-21 16:34:06 -07001471 get_task_struct(task);
Christoph Lameter39743882006-01-08 01:00:51 -08001472
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001473 err = -EINVAL;
Christoph Lameter39743882006-01-08 01:00:51 -08001474
1475 /*
1476 * Check if this process has the right to modify the specified
1477 * process. The right exists if the process has administrative
Alexey Dobriyan7f927fc2006-03-28 01:56:53 -08001478 * capabilities, superuser privileges or the same
Christoph Lameter39743882006-01-08 01:00:51 -08001479 * userid as the target process.
1480 */
David Howellsc69e8d92008-11-14 10:39:19 +11001481 tcred = __task_cred(task);
Eric W. Biedermanb38a86e2012-03-12 15:48:24 -07001482 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1483 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
Christoph Lameter74c00242006-03-14 19:50:21 -08001484 !capable(CAP_SYS_NICE)) {
David Howellsc69e8d92008-11-14 10:39:19 +11001485 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001486 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001487 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001488 }
David Howellsc69e8d92008-11-14 10:39:19 +11001489 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001490
1491 task_nodes = cpuset_mems_allowed(task);
1492 /* Is the user allowed to access the target nodes? */
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001493 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
Christoph Lameter39743882006-01-08 01:00:51 -08001494 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001495 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001496 }
1497
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08001498 if (!nodes_subset(*new, node_states[N_MEMORY])) {
Christoph Lameter3b42d282007-08-31 00:12:08 -07001499 err = -EINVAL;
Christoph Lameter3268c632012-03-21 16:34:06 -07001500 goto out_put;
Christoph Lameter3b42d282007-08-31 00:12:08 -07001501 }
1502
David Quigley86c3a762006-06-23 02:04:02 -07001503 err = security_task_movememory(task);
1504 if (err)
Christoph Lameter3268c632012-03-21 16:34:06 -07001505 goto out_put;
David Quigley86c3a762006-06-23 02:04:02 -07001506
Christoph Lameter3268c632012-03-21 16:34:06 -07001507 mm = get_task_mm(task);
1508 put_task_struct(task);
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001509
1510 if (!mm) {
Christoph Lameter3268c632012-03-21 16:34:06 -07001511 err = -EINVAL;
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001512 goto out;
1513 }
1514
1515 err = do_migrate_pages(mm, old, new,
1516 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
Christoph Lameter3268c632012-03-21 16:34:06 -07001517
1518 mmput(mm);
1519out:
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001520 NODEMASK_SCRATCH_FREE(scratch);
1521
Christoph Lameter39743882006-01-08 01:00:51 -08001522 return err;
Christoph Lameter3268c632012-03-21 16:34:06 -07001523
1524out_put:
1525 put_task_struct(task);
1526 goto out;
1527
Christoph Lameter39743882006-01-08 01:00:51 -08001528}
1529
1530
Christoph Lameter8bccd852005-10-29 18:16:59 -07001531/* Retrieve NUMA policy */
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001532SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1533 unsigned long __user *, nmask, unsigned long, maxnode,
1534 unsigned long, addr, unsigned long, flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001535{
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001536 int err;
1537 int uninitialized_var(pval);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001538 nodemask_t nodes;
1539
1540 if (nmask != NULL && maxnode < MAX_NUMNODES)
1541 return -EINVAL;
1542
1543 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1544
1545 if (err)
1546 return err;
1547
1548 if (policy && put_user(pval, policy))
1549 return -EFAULT;
1550
1551 if (nmask)
1552 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1553
1554 return err;
1555}
1556
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557#ifdef CONFIG_COMPAT
1558
1559asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1560 compat_ulong_t __user *nmask,
1561 compat_ulong_t maxnode,
1562 compat_ulong_t addr, compat_ulong_t flags)
1563{
1564 long err;
1565 unsigned long __user *nm = NULL;
1566 unsigned long nr_bits, alloc_size;
1567 DECLARE_BITMAP(bm, MAX_NUMNODES);
1568
1569 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1570 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1571
1572 if (nmask)
1573 nm = compat_alloc_user_space(alloc_size);
1574
1575 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1576
1577 if (!err && nmask) {
KAMEZAWA Hiroyuki2bbff6c2011-09-14 16:21:02 -07001578 unsigned long copy_size;
1579 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1580 err = copy_from_user(bm, nm, copy_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 /* ensure entire bitmap is zeroed */
1582 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1583 err |= compat_put_bitmap(nmask, bm, nr_bits);
1584 }
1585
1586 return err;
1587}
1588
1589asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1590 compat_ulong_t maxnode)
1591{
1592 long err = 0;
1593 unsigned long __user *nm = NULL;
1594 unsigned long nr_bits, alloc_size;
1595 DECLARE_BITMAP(bm, MAX_NUMNODES);
1596
1597 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1598 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1599
1600 if (nmask) {
1601 err = compat_get_bitmap(bm, nmask, nr_bits);
1602 nm = compat_alloc_user_space(alloc_size);
1603 err |= copy_to_user(nm, bm, alloc_size);
1604 }
1605
1606 if (err)
1607 return -EFAULT;
1608
1609 return sys_set_mempolicy(mode, nm, nr_bits+1);
1610}
1611
1612asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1613 compat_ulong_t mode, compat_ulong_t __user *nmask,
1614 compat_ulong_t maxnode, compat_ulong_t flags)
1615{
1616 long err = 0;
1617 unsigned long __user *nm = NULL;
1618 unsigned long nr_bits, alloc_size;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001619 nodemask_t bm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620
1621 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1622 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1623
1624 if (nmask) {
Andi Kleendfcd3c02005-10-29 18:15:48 -07001625 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 nm = compat_alloc_user_space(alloc_size);
Andi Kleendfcd3c02005-10-29 18:15:48 -07001627 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 }
1629
1630 if (err)
1631 return -EFAULT;
1632
1633 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1634}
1635
1636#endif
1637
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001638/*
1639 * get_vma_policy(@task, @vma, @addr)
1640 * @task - task for fallback if vma policy == default
1641 * @vma - virtual memory area whose policy is sought
1642 * @addr - address in @vma for shared policy lookup
1643 *
1644 * Returns effective policy for a VMA at specified address.
1645 * Falls back to @task or system default policy, as necessary.
David Rientjes32f85162012-10-16 17:31:23 -07001646 * Current or other task's task mempolicy and non-shared vma policies must be
1647 * protected by task_lock(task) by the caller.
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001648 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1649 * count--added by the get_policy() vm_op, as appropriate--to protect against
1650 * freeing by another task. It is the caller's responsibility to free the
1651 * extra reference for shared policies.
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001652 */
Stephen Wilsond98f6cb2011-05-24 17:12:41 -07001653struct mempolicy *get_vma_policy(struct task_struct *task,
Christoph Lameter48fce342006-01-08 01:01:03 -08001654 struct vm_area_struct *vma, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655{
Mel Gorman5606e382012-11-02 18:19:13 +00001656 struct mempolicy *pol = get_task_policy(task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657
1658 if (vma) {
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001659 if (vma->vm_ops && vma->vm_ops->get_policy) {
Lee Schermerhornae4d8c12008-04-28 02:13:11 -07001660 struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1661 addr);
1662 if (vpol)
1663 pol = vpol;
Mel Gorman00442ad2012-10-08 16:29:20 -07001664 } else if (vma->vm_policy) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 pol = vma->vm_policy;
Mel Gorman00442ad2012-10-08 16:29:20 -07001666
1667 /*
1668 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1669 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1670 * count on these policies which will be dropped by
1671 * mpol_cond_put() later
1672 */
1673 if (mpol_needs_cond_ref(pol))
1674 mpol_get(pol);
1675 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676 }
1677 if (!pol)
1678 pol = &default_policy;
1679 return pol;
1680}
1681
Mel Gormanfc3147242013-10-07 11:29:09 +01001682bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma)
1683{
1684 struct mempolicy *pol = get_task_policy(task);
1685 if (vma) {
1686 if (vma->vm_ops && vma->vm_ops->get_policy) {
1687 bool ret = false;
1688
1689 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1690 if (pol && (pol->flags & MPOL_F_MOF))
1691 ret = true;
1692 mpol_cond_put(pol);
1693
1694 return ret;
1695 } else if (vma->vm_policy) {
1696 pol = vma->vm_policy;
1697 }
1698 }
1699
1700 if (!pol)
1701 return default_policy.flags & MPOL_F_MOF;
1702
1703 return pol->flags & MPOL_F_MOF;
1704}
1705
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001706static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1707{
1708 enum zone_type dynamic_policy_zone = policy_zone;
1709
1710 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1711
1712 /*
1713 * if policy->v.nodes has movable memory only,
1714 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1715 *
1716 * policy->v.nodes is intersect with node_states[N_MEMORY].
1717 * so if the following test faile, it implies
1718 * policy->v.nodes has movable memory only.
1719 */
1720 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1721 dynamic_policy_zone = ZONE_MOVABLE;
1722
1723 return zone >= dynamic_policy_zone;
1724}
1725
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001726/*
1727 * Return a nodemask representing a mempolicy for filtering nodes for
1728 * page allocation
1729 */
1730static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
Mel Gorman19770b32008-04-28 02:12:18 -07001731{
1732 /* Lower zones don't get a nodemask applied for MPOL_BIND */
Lee Schermerhorn45c47452008-04-28 02:13:12 -07001733 if (unlikely(policy->mode == MPOL_BIND) &&
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001734 apply_policy_zone(policy, gfp_zone(gfp)) &&
Mel Gorman19770b32008-04-28 02:12:18 -07001735 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1736 return &policy->v.nodes;
1737
1738 return NULL;
1739}
1740
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001741/* Return a zonelist indicated by gfp for node representing a mempolicy */
Andi Kleen2f5f9482011-03-04 17:36:29 -08001742static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1743 int nd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744{
Lee Schermerhorn45c47452008-04-28 02:13:12 -07001745 switch (policy->mode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001747 if (!(policy->flags & MPOL_F_LOCAL))
1748 nd = policy->v.preferred_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 break;
1750 case MPOL_BIND:
Mel Gorman19770b32008-04-28 02:12:18 -07001751 /*
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001752 * Normally, MPOL_BIND allocations are node-local within the
1753 * allowed nodemask. However, if __GFP_THISNODE is set and the
Bob Liu6eb27e12010-05-24 14:32:00 -07001754 * current node isn't part of the mask, we use the zonelist for
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001755 * the first node in the mask instead.
Mel Gorman19770b32008-04-28 02:12:18 -07001756 */
Mel Gorman19770b32008-04-28 02:12:18 -07001757 if (unlikely(gfp & __GFP_THISNODE) &&
1758 unlikely(!node_isset(nd, policy->v.nodes)))
1759 nd = first_node(policy->v.nodes);
1760 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 default:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 BUG();
1763 }
Mel Gorman0e884602008-04-28 02:12:14 -07001764 return node_zonelist(nd, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765}
1766
1767/* Do dynamic interleaving for a process */
1768static unsigned interleave_nodes(struct mempolicy *policy)
1769{
1770 unsigned nid, next;
1771 struct task_struct *me = current;
1772
1773 nid = me->il_next;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001774 next = next_node(nid, policy->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 if (next >= MAX_NUMNODES)
Andi Kleendfcd3c02005-10-29 18:15:48 -07001776 next = first_node(policy->v.nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001777 if (next < MAX_NUMNODES)
1778 me->il_next = next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 return nid;
1780}
1781
Christoph Lameterdc85da12006-01-18 17:42:36 -08001782/*
1783 * Depending on the memory policy provide a node from which to allocate the
1784 * next slab entry.
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001785 * @policy must be protected by freeing by the caller. If @policy is
1786 * the current task's mempolicy, this protection is implicit, as only the
1787 * task can change it's policy. The system default policy requires no
1788 * such protection.
Christoph Lameterdc85da12006-01-18 17:42:36 -08001789 */
Andi Kleene7b691b2012-06-09 02:40:03 -07001790unsigned slab_node(void)
Christoph Lameterdc85da12006-01-18 17:42:36 -08001791{
Andi Kleene7b691b2012-06-09 02:40:03 -07001792 struct mempolicy *policy;
1793
1794 if (in_interrupt())
1795 return numa_node_id();
1796
1797 policy = current->mempolicy;
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001798 if (!policy || policy->flags & MPOL_F_LOCAL)
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001799 return numa_node_id();
Christoph Lameter765c4502006-09-27 01:50:08 -07001800
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001801 switch (policy->mode) {
1802 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001803 /*
1804 * handled MPOL_F_LOCAL above
1805 */
1806 return policy->v.preferred_node;
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001807
Christoph Lameterdc85da12006-01-18 17:42:36 -08001808 case MPOL_INTERLEAVE:
1809 return interleave_nodes(policy);
1810
Mel Gormandd1a2392008-04-28 02:12:17 -07001811 case MPOL_BIND: {
Christoph Lameterdc85da12006-01-18 17:42:36 -08001812 /*
1813 * Follow bind policy behavior and start allocation at the
1814 * first node.
1815 */
Mel Gorman19770b32008-04-28 02:12:18 -07001816 struct zonelist *zonelist;
1817 struct zone *zone;
1818 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1819 zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1820 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1821 &policy->v.nodes,
1822 &zone);
Eric Dumazet800416f2010-10-27 19:33:43 +02001823 return zone ? zone->node : numa_node_id();
Mel Gormandd1a2392008-04-28 02:12:17 -07001824 }
Christoph Lameterdc85da12006-01-18 17:42:36 -08001825
Christoph Lameterdc85da12006-01-18 17:42:36 -08001826 default:
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001827 BUG();
Christoph Lameterdc85da12006-01-18 17:42:36 -08001828 }
1829}
1830
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831/* Do static interleaving for a VMA with known offset. */
1832static unsigned offset_il_node(struct mempolicy *pol,
1833 struct vm_area_struct *vma, unsigned long off)
1834{
Andi Kleendfcd3c02005-10-29 18:15:48 -07001835 unsigned nnodes = nodes_weight(pol->v.nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001836 unsigned target;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 int c;
1838 int nid = -1;
1839
David Rientjesf5b087b2008-04-28 02:12:27 -07001840 if (!nnodes)
1841 return numa_node_id();
1842 target = (unsigned int)off % nnodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843 c = 0;
1844 do {
Andi Kleendfcd3c02005-10-29 18:15:48 -07001845 nid = next_node(nid, pol->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846 c++;
1847 } while (c <= target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848 return nid;
1849}
1850
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001851/* Determine a node number for interleave */
1852static inline unsigned interleave_nid(struct mempolicy *pol,
1853 struct vm_area_struct *vma, unsigned long addr, int shift)
1854{
1855 if (vma) {
1856 unsigned long off;
1857
Nishanth Aravamudan3b98b082006-08-31 21:27:53 -07001858 /*
1859 * for small pages, there is no difference between
1860 * shift and PAGE_SHIFT, so the bit-shift is safe.
1861 * for huge pages, since vm_pgoff is in units of small
1862 * pages, we need to shift off the always 0 bits to get
1863 * a useful offset.
1864 */
1865 BUG_ON(shift < PAGE_SHIFT);
1866 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001867 off += (addr - vma->vm_start) >> shift;
1868 return offset_il_node(pol, vma, off);
1869 } else
1870 return interleave_nodes(pol);
1871}
1872
Michal Hocko778d3b02011-07-26 16:08:30 -07001873/*
1874 * Return the bit number of a random bit set in the nodemask.
1875 * (returns -1 if nodemask is empty)
1876 */
1877int node_random(const nodemask_t *maskp)
1878{
1879 int w, bit = -1;
1880
1881 w = nodes_weight(*maskp);
1882 if (w)
1883 bit = bitmap_ord_to_pos(maskp->bits,
1884 get_random_int() % w, MAX_NUMNODES);
1885 return bit;
1886}
1887
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001888#ifdef CONFIG_HUGETLBFS
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001889/*
1890 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1891 * @vma = virtual memory area whose policy is sought
1892 * @addr = address in @vma for shared policy lookup and interleave policy
1893 * @gfp_flags = for requested zone
Mel Gorman19770b32008-04-28 02:12:18 -07001894 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1895 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001896 *
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001897 * Returns a zonelist suitable for a huge page allocation and a pointer
1898 * to the struct mempolicy for conditional unref after allocation.
1899 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1900 * @nodemask for filtering the zonelist.
Miao Xiec0ff7452010-05-24 14:32:08 -07001901 *
1902 * Must be protected by get_mems_allowed()
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001903 */
Mel Gorman396faf02007-07-17 04:03:13 -07001904struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
Mel Gorman19770b32008-04-28 02:12:18 -07001905 gfp_t gfp_flags, struct mempolicy **mpol,
1906 nodemask_t **nodemask)
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001907{
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001908 struct zonelist *zl;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001909
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001910 *mpol = get_vma_policy(current, vma, addr);
Mel Gorman19770b32008-04-28 02:12:18 -07001911 *nodemask = NULL; /* assume !MPOL_BIND */
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001912
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001913 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1914 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
Andi Kleena5516432008-07-23 21:27:41 -07001915 huge_page_shift(hstate_vma(vma))), gfp_flags);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001916 } else {
Andi Kleen2f5f9482011-03-04 17:36:29 -08001917 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001918 if ((*mpol)->mode == MPOL_BIND)
1919 *nodemask = &(*mpol)->v.nodes;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001920 }
1921 return zl;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001922}
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001923
1924/*
1925 * init_nodemask_of_mempolicy
1926 *
1927 * If the current task's mempolicy is "default" [NULL], return 'false'
1928 * to indicate default policy. Otherwise, extract the policy nodemask
1929 * for 'bind' or 'interleave' policy into the argument nodemask, or
1930 * initialize the argument nodemask to contain the single node for
1931 * 'preferred' or 'local' policy and return 'true' to indicate presence
1932 * of non-default mempolicy.
1933 *
1934 * We don't bother with reference counting the mempolicy [mpol_get/put]
1935 * because the current task is examining it's own mempolicy and a task's
1936 * mempolicy is only ever changed by the task itself.
1937 *
1938 * N.B., it is the caller's responsibility to free a returned nodemask.
1939 */
1940bool init_nodemask_of_mempolicy(nodemask_t *mask)
1941{
1942 struct mempolicy *mempolicy;
1943 int nid;
1944
1945 if (!(mask && current->mempolicy))
1946 return false;
1947
Miao Xiec0ff7452010-05-24 14:32:08 -07001948 task_lock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001949 mempolicy = current->mempolicy;
1950 switch (mempolicy->mode) {
1951 case MPOL_PREFERRED:
1952 if (mempolicy->flags & MPOL_F_LOCAL)
1953 nid = numa_node_id();
1954 else
1955 nid = mempolicy->v.preferred_node;
1956 init_nodemask_of_node(mask, nid);
1957 break;
1958
1959 case MPOL_BIND:
1960 /* Fall through */
1961 case MPOL_INTERLEAVE:
1962 *mask = mempolicy->v.nodes;
1963 break;
1964
1965 default:
1966 BUG();
1967 }
Miao Xiec0ff7452010-05-24 14:32:08 -07001968 task_unlock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001969
1970 return true;
1971}
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001972#endif
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001973
David Rientjes6f48d0eb2010-08-09 17:18:52 -07001974/*
1975 * mempolicy_nodemask_intersects
1976 *
1977 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1978 * policy. Otherwise, check for intersection between mask and the policy
1979 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1980 * policy, always return true since it may allocate elsewhere on fallback.
1981 *
1982 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1983 */
1984bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1985 const nodemask_t *mask)
1986{
1987 struct mempolicy *mempolicy;
1988 bool ret = true;
1989
1990 if (!mask)
1991 return ret;
1992 task_lock(tsk);
1993 mempolicy = tsk->mempolicy;
1994 if (!mempolicy)
1995 goto out;
1996
1997 switch (mempolicy->mode) {
1998 case MPOL_PREFERRED:
1999 /*
2000 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
2001 * allocate from, they may fallback to other nodes when oom.
2002 * Thus, it's possible for tsk to have allocated memory from
2003 * nodes in mask.
2004 */
2005 break;
2006 case MPOL_BIND:
2007 case MPOL_INTERLEAVE:
2008 ret = nodes_intersects(mempolicy->v.nodes, *mask);
2009 break;
2010 default:
2011 BUG();
2012 }
2013out:
2014 task_unlock(tsk);
2015 return ret;
2016}
2017
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018/* Allocate a page in interleaved policy.
2019 Own path because it needs to do special accounting. */
Andi Kleen662f3a02005-10-29 18:15:49 -07002020static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2021 unsigned nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022{
2023 struct zonelist *zl;
2024 struct page *page;
2025
Mel Gorman0e884602008-04-28 02:12:14 -07002026 zl = node_zonelist(nid, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027 page = __alloc_pages(gfp, order, zl);
Mel Gormandd1a2392008-04-28 02:12:17 -07002028 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
Christoph Lameterca889e62006-06-30 01:55:44 -07002029 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030 return page;
2031}
2032
2033/**
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002034 * alloc_pages_vma - Allocate a page for a VMA.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 *
2036 * @gfp:
2037 * %GFP_USER user allocation.
2038 * %GFP_KERNEL kernel allocations,
2039 * %GFP_HIGHMEM highmem/user allocations,
2040 * %GFP_FS allocation should not call back into a file system.
2041 * %GFP_ATOMIC don't sleep.
2042 *
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002043 * @order:Order of the GFP allocation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044 * @vma: Pointer to VMA or NULL if not available.
2045 * @addr: Virtual Address of the allocation. Must be inside the VMA.
2046 *
2047 * This function allocates a page from the kernel page pool and applies
2048 * a NUMA policy associated with the VMA or the current process.
2049 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
2050 * mm_struct of the VMA to prevent it from going away. Should be used for
2051 * all allocations for pages that will be mapped into
2052 * user space. Returns NULL when no page can be allocated.
2053 *
2054 * Should be called with the mm_sem of the vma hold.
2055 */
2056struct page *
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002057alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
Andi Kleen2f5f9482011-03-04 17:36:29 -08002058 unsigned long addr, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059{
Mel Gormancc9a6c82012-03-21 16:34:11 -07002060 struct mempolicy *pol;
Miao Xiec0ff7452010-05-24 14:32:08 -07002061 struct page *page;
Mel Gormancc9a6c82012-03-21 16:34:11 -07002062 unsigned int cpuset_mems_cookie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063
Mel Gormancc9a6c82012-03-21 16:34:11 -07002064retry_cpuset:
2065 pol = get_vma_policy(current, vma, addr);
2066 cpuset_mems_cookie = get_mems_allowed();
2067
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002068 if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 unsigned nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002070
Andi Kleen8eac5632011-02-25 14:44:28 -08002071 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002072 mpol_cond_put(pol);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002073 page = alloc_page_interleave(gfp, order, nid);
Mel Gormancc9a6c82012-03-21 16:34:11 -07002074 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2075 goto retry_cpuset;
2076
Miao Xiec0ff7452010-05-24 14:32:08 -07002077 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 }
David Rientjes212a0a62012-12-11 16:02:51 -08002079 page = __alloc_pages_nodemask(gfp, order,
2080 policy_zonelist(gfp, pol, node),
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002081 policy_nodemask(gfp, pol));
David Rientjes212a0a62012-12-11 16:02:51 -08002082 if (unlikely(mpol_needs_cond_ref(pol)))
2083 __mpol_put(pol);
Mel Gormancc9a6c82012-03-21 16:34:11 -07002084 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2085 goto retry_cpuset;
Miao Xiec0ff7452010-05-24 14:32:08 -07002086 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087}
2088
2089/**
2090 * alloc_pages_current - Allocate pages.
2091 *
2092 * @gfp:
2093 * %GFP_USER user allocation,
2094 * %GFP_KERNEL kernel allocation,
2095 * %GFP_HIGHMEM highmem allocation,
2096 * %GFP_FS don't call back into a file system.
2097 * %GFP_ATOMIC don't sleep.
2098 * @order: Power of two of allocation size in pages. 0 is a single page.
2099 *
2100 * Allocate a page from the kernel page pool. When not in
2101 * interrupt context and apply the current process NUMA policy.
2102 * Returns NULL when no page can be allocated.
2103 *
Paul Jacksoncf2a473c2006-01-08 01:01:54 -08002104 * Don't call cpuset_update_task_memory_state() unless
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105 * 1) it's ok to take cpuset_sem (can WAIT), and
2106 * 2) allocating for current task (not interrupt).
2107 */
Al Virodd0fc662005-10-07 07:46:04 +01002108struct page *alloc_pages_current(gfp_t gfp, unsigned order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109{
Mel Gorman5606e382012-11-02 18:19:13 +00002110 struct mempolicy *pol = get_task_policy(current);
Miao Xiec0ff7452010-05-24 14:32:08 -07002111 struct page *page;
Mel Gormancc9a6c82012-03-21 16:34:11 -07002112 unsigned int cpuset_mems_cookie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113
Christoph Lameter9b819d22006-09-25 23:31:40 -07002114 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 pol = &default_policy;
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002116
Mel Gormancc9a6c82012-03-21 16:34:11 -07002117retry_cpuset:
2118 cpuset_mems_cookie = get_mems_allowed();
2119
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002120 /*
2121 * No reference counting needed for current->mempolicy
2122 * nor system default_policy
2123 */
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002124 if (pol->mode == MPOL_INTERLEAVE)
Miao Xiec0ff7452010-05-24 14:32:08 -07002125 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2126 else
2127 page = __alloc_pages_nodemask(gfp, order,
Andi Kleen5c4b4be2011-03-04 17:36:32 -08002128 policy_zonelist(gfp, pol, numa_node_id()),
2129 policy_nodemask(gfp, pol));
Mel Gormancc9a6c82012-03-21 16:34:11 -07002130
2131 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2132 goto retry_cpuset;
2133
Miao Xiec0ff7452010-05-24 14:32:08 -07002134 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135}
2136EXPORT_SYMBOL(alloc_pages_current);
2137
Oleg Nesterovef0855d2013-09-11 14:20:14 -07002138int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2139{
2140 struct mempolicy *pol = mpol_dup(vma_policy(src));
2141
2142 if (IS_ERR(pol))
2143 return PTR_ERR(pol);
2144 dst->vm_policy = pol;
2145 return 0;
2146}
2147
Paul Jackson42253992006-01-08 01:01:59 -08002148/*
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07002149 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
Paul Jackson42253992006-01-08 01:01:59 -08002150 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2151 * with the mems_allowed returned by cpuset_mems_allowed(). This
2152 * keeps mempolicies cpuset relative after its cpuset moves. See
2153 * further kernel/cpuset.c update_nodemask().
Miao Xie708c1bb2010-05-24 14:32:07 -07002154 *
2155 * current's mempolicy may be rebinded by the other task(the task that changes
2156 * cpuset's mems), so we needn't do rebind work for current task.
Paul Jackson42253992006-01-08 01:01:59 -08002157 */
Paul Jackson42253992006-01-08 01:01:59 -08002158
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07002159/* Slow path of a mempolicy duplicate */
2160struct mempolicy *__mpol_dup(struct mempolicy *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161{
2162 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2163
2164 if (!new)
2165 return ERR_PTR(-ENOMEM);
Miao Xie708c1bb2010-05-24 14:32:07 -07002166
2167 /* task's mempolicy is protected by alloc_lock */
2168 if (old == current->mempolicy) {
2169 task_lock(current);
2170 *new = *old;
2171 task_unlock(current);
2172 } else
2173 *new = *old;
2174
Paul E. McKenney99ee4ca2010-03-03 17:50:17 -08002175 rcu_read_lock();
Paul Jackson42253992006-01-08 01:01:59 -08002176 if (current_cpuset_is_being_rebound()) {
2177 nodemask_t mems = cpuset_mems_allowed(current);
Miao Xie708c1bb2010-05-24 14:32:07 -07002178 if (new->flags & MPOL_F_REBINDING)
2179 mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2180 else
2181 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
Paul Jackson42253992006-01-08 01:01:59 -08002182 }
Paul E. McKenney99ee4ca2010-03-03 17:50:17 -08002183 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 atomic_set(&new->refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 return new;
2186}
2187
2188/* Slow path of a mempolicy comparison */
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002189bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190{
2191 if (!a || !b)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002192 return false;
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002193 if (a->mode != b->mode)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002194 return false;
Bob Liu19800502010-05-24 14:32:01 -07002195 if (a->flags != b->flags)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002196 return false;
Bob Liu19800502010-05-24 14:32:01 -07002197 if (mpol_store_user_nodemask(a))
2198 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002199 return false;
Bob Liu19800502010-05-24 14:32:01 -07002200
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002201 switch (a->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -07002202 case MPOL_BIND:
2203 /* Fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204 case MPOL_INTERLEAVE:
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002205 return !!nodes_equal(a->v.nodes, b->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206 case MPOL_PREFERRED:
Namhyung Kim75719662011-03-22 16:33:02 -07002207 return a->v.preferred_node == b->v.preferred_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208 default:
2209 BUG();
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002210 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211 }
2212}
2213
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215 * Shared memory backing store policy support.
2216 *
2217 * Remember policies even when nobody has shared memory mapped.
2218 * The policies are kept in Red-Black tree linked from the inode.
2219 * They are protected by the sp->lock spinlock, which should be held
2220 * for any accesses to the tree.
2221 */
2222
2223/* lookup first element intersecting start-end */
Mel Gorman42288fe2012-12-21 23:10:25 +00002224/* Caller holds sp->lock */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225static struct sp_node *
2226sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2227{
2228 struct rb_node *n = sp->root.rb_node;
2229
2230 while (n) {
2231 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2232
2233 if (start >= p->end)
2234 n = n->rb_right;
2235 else if (end <= p->start)
2236 n = n->rb_left;
2237 else
2238 break;
2239 }
2240 if (!n)
2241 return NULL;
2242 for (;;) {
2243 struct sp_node *w = NULL;
2244 struct rb_node *prev = rb_prev(n);
2245 if (!prev)
2246 break;
2247 w = rb_entry(prev, struct sp_node, nd);
2248 if (w->end <= start)
2249 break;
2250 n = prev;
2251 }
2252 return rb_entry(n, struct sp_node, nd);
2253}
2254
2255/* Insert a new shared policy into the list. */
2256/* Caller holds sp->lock */
2257static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2258{
2259 struct rb_node **p = &sp->root.rb_node;
2260 struct rb_node *parent = NULL;
2261 struct sp_node *nd;
2262
2263 while (*p) {
2264 parent = *p;
2265 nd = rb_entry(parent, struct sp_node, nd);
2266 if (new->start < nd->start)
2267 p = &(*p)->rb_left;
2268 else if (new->end > nd->end)
2269 p = &(*p)->rb_right;
2270 else
2271 BUG();
2272 }
2273 rb_link_node(&new->nd, parent, p);
2274 rb_insert_color(&new->nd, &sp->root);
Paul Mundt140d5a42007-07-15 23:38:16 -07002275 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002276 new->policy ? new->policy->mode : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277}
2278
2279/* Find shared policy intersecting idx */
2280struct mempolicy *
2281mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2282{
2283 struct mempolicy *pol = NULL;
2284 struct sp_node *sn;
2285
2286 if (!sp->root.rb_node)
2287 return NULL;
Mel Gorman42288fe2012-12-21 23:10:25 +00002288 spin_lock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289 sn = sp_lookup(sp, idx, idx+1);
2290 if (sn) {
2291 mpol_get(sn->policy);
2292 pol = sn->policy;
2293 }
Mel Gorman42288fe2012-12-21 23:10:25 +00002294 spin_unlock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 return pol;
2296}
2297
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002298static void sp_free(struct sp_node *n)
2299{
2300 mpol_put(n->policy);
2301 kmem_cache_free(sn_cache, n);
2302}
2303
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002304/**
2305 * mpol_misplaced - check whether current page node is valid in policy
2306 *
2307 * @page - page to be checked
2308 * @vma - vm area where page mapped
2309 * @addr - virtual address where page mapped
2310 *
2311 * Lookup current policy node id for vma,addr and "compare to" page's
2312 * node id.
2313 *
2314 * Returns:
2315 * -1 - not misplaced, page is in the right node
2316 * node - node id where the page should be
2317 *
2318 * Policy determination "mimics" alloc_page_vma().
2319 * Called from fault path where we know the vma and faulting address.
2320 */
2321int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2322{
2323 struct mempolicy *pol;
2324 struct zone *zone;
2325 int curnid = page_to_nid(page);
2326 unsigned long pgoff;
Peter Zijlstra90572892013-10-07 11:29:20 +01002327 int thiscpu = raw_smp_processor_id();
2328 int thisnid = cpu_to_node(thiscpu);
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002329 int polnid = -1;
2330 int ret = -1;
2331
2332 BUG_ON(!vma);
2333
2334 pol = get_vma_policy(current, vma, addr);
2335 if (!(pol->flags & MPOL_F_MOF))
2336 goto out;
2337
2338 switch (pol->mode) {
2339 case MPOL_INTERLEAVE:
2340 BUG_ON(addr >= vma->vm_end);
2341 BUG_ON(addr < vma->vm_start);
2342
2343 pgoff = vma->vm_pgoff;
2344 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2345 polnid = offset_il_node(pol, vma, pgoff);
2346 break;
2347
2348 case MPOL_PREFERRED:
2349 if (pol->flags & MPOL_F_LOCAL)
2350 polnid = numa_node_id();
2351 else
2352 polnid = pol->v.preferred_node;
2353 break;
2354
2355 case MPOL_BIND:
2356 /*
2357 * allows binding to multiple nodes.
2358 * use current page if in policy nodemask,
2359 * else select nearest allowed node, if any.
2360 * If no allowed nodes, use current [!misplaced].
2361 */
2362 if (node_isset(curnid, pol->v.nodes))
2363 goto out;
2364 (void)first_zones_zonelist(
2365 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2366 gfp_zone(GFP_HIGHUSER),
2367 &pol->v.nodes, &zone);
2368 polnid = zone->node;
2369 break;
2370
2371 default:
2372 BUG();
2373 }
Mel Gorman5606e382012-11-02 18:19:13 +00002374
2375 /* Migrate the page towards the node whose CPU is referencing it */
Mel Gormane42c8ff2012-11-12 09:17:07 +00002376 if (pol->flags & MPOL_F_MORON) {
Peter Zijlstra90572892013-10-07 11:29:20 +01002377 int last_cpupid;
2378 int this_cpupid;
Mel Gormane42c8ff2012-11-12 09:17:07 +00002379
Peter Zijlstra90572892013-10-07 11:29:20 +01002380 polnid = thisnid;
2381 this_cpupid = cpu_pid_to_cpupid(thiscpu, current->pid);
Mel Gorman5606e382012-11-02 18:19:13 +00002382
Mel Gormane42c8ff2012-11-12 09:17:07 +00002383 /*
2384 * Multi-stage node selection is used in conjunction
2385 * with a periodic migration fault to build a temporal
2386 * task<->page relation. By using a two-stage filter we
2387 * remove short/unlikely relations.
2388 *
2389 * Using P(p) ~ n_p / n_t as per frequentist
2390 * probability, we can equate a task's usage of a
2391 * particular page (n_p) per total usage of this
2392 * page (n_t) (in a given time-span) to a probability.
2393 *
2394 * Our periodic faults will sample this probability and
2395 * getting the same result twice in a row, given these
2396 * samples are fully independent, is then given by
2397 * P(n)^2, provided our sample period is sufficiently
2398 * short compared to the usage pattern.
2399 *
2400 * This quadric squishes small probabilities, making
2401 * it less likely we act on an unlikely task<->page
2402 * relation.
2403 */
Peter Zijlstra90572892013-10-07 11:29:20 +01002404 last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
2405 if (!cpupid_pid_unset(last_cpupid) && cpupid_to_nid(last_cpupid) != thisnid)
Mel Gormane42c8ff2012-11-12 09:17:07 +00002406 goto out;
Rik van Riel6fe6b2d2013-10-07 11:29:08 +01002407
2408#ifdef CONFIG_NUMA_BALANCING
2409 /*
2410 * If the scheduler has just moved us away from our
2411 * preferred node, do not bother migrating pages yet.
2412 * This way a short and temporary process migration will
2413 * not cause excessive memory migration.
2414 */
Peter Zijlstra90572892013-10-07 11:29:20 +01002415 if (thisnid != current->numa_preferred_nid &&
Rik van Riel6fe6b2d2013-10-07 11:29:08 +01002416 !current->numa_migrate_seq)
2417 goto out;
2418#endif
Mel Gormane42c8ff2012-11-12 09:17:07 +00002419 }
2420
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002421 if (curnid != polnid)
2422 ret = polnid;
2423out:
2424 mpol_cond_put(pol);
2425
2426 return ret;
2427}
2428
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2430{
Paul Mundt140d5a42007-07-15 23:38:16 -07002431 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432 rb_erase(&n->nd, &sp->root);
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002433 sp_free(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434}
2435
Mel Gorman42288fe2012-12-21 23:10:25 +00002436static void sp_node_init(struct sp_node *node, unsigned long start,
2437 unsigned long end, struct mempolicy *pol)
2438{
2439 node->start = start;
2440 node->end = end;
2441 node->policy = pol;
2442}
2443
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07002444static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2445 struct mempolicy *pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446{
KOSAKI Motohiro869833f22012-10-08 16:29:16 -07002447 struct sp_node *n;
2448 struct mempolicy *newpol;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449
KOSAKI Motohiro869833f22012-10-08 16:29:16 -07002450 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451 if (!n)
2452 return NULL;
KOSAKI Motohiro869833f22012-10-08 16:29:16 -07002453
2454 newpol = mpol_dup(pol);
2455 if (IS_ERR(newpol)) {
2456 kmem_cache_free(sn_cache, n);
2457 return NULL;
2458 }
2459 newpol->flags |= MPOL_F_SHARED;
Mel Gorman42288fe2012-12-21 23:10:25 +00002460 sp_node_init(n, start, end, newpol);
KOSAKI Motohiro869833f22012-10-08 16:29:16 -07002461
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462 return n;
2463}
2464
2465/* Replace a policy range. */
2466static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2467 unsigned long end, struct sp_node *new)
2468{
Mel Gormanb22d1272012-10-08 16:29:17 -07002469 struct sp_node *n;
Mel Gorman42288fe2012-12-21 23:10:25 +00002470 struct sp_node *n_new = NULL;
2471 struct mempolicy *mpol_new = NULL;
Mel Gormanb22d1272012-10-08 16:29:17 -07002472 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473
Mel Gorman42288fe2012-12-21 23:10:25 +00002474restart:
2475 spin_lock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476 n = sp_lookup(sp, start, end);
2477 /* Take care of old policies in the same range. */
2478 while (n && n->start < end) {
2479 struct rb_node *next = rb_next(&n->nd);
2480 if (n->start >= start) {
2481 if (n->end <= end)
2482 sp_delete(sp, n);
2483 else
2484 n->start = end;
2485 } else {
2486 /* Old policy spanning whole new range. */
2487 if (n->end > end) {
Mel Gorman42288fe2012-12-21 23:10:25 +00002488 if (!n_new)
2489 goto alloc_new;
2490
2491 *mpol_new = *n->policy;
2492 atomic_set(&mpol_new->refcnt, 1);
KOSAKI Motohiro78806392013-03-08 12:43:29 -08002493 sp_node_init(n_new, end, n->end, mpol_new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494 n->end = start;
Hillf Danton5ca39572013-03-08 12:43:28 -08002495 sp_insert(sp, n_new);
Mel Gorman42288fe2012-12-21 23:10:25 +00002496 n_new = NULL;
2497 mpol_new = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498 break;
2499 } else
2500 n->end = start;
2501 }
2502 if (!next)
2503 break;
2504 n = rb_entry(next, struct sp_node, nd);
2505 }
2506 if (new)
2507 sp_insert(sp, new);
Mel Gorman42288fe2012-12-21 23:10:25 +00002508 spin_unlock(&sp->lock);
2509 ret = 0;
2510
2511err_out:
2512 if (mpol_new)
2513 mpol_put(mpol_new);
2514 if (n_new)
2515 kmem_cache_free(sn_cache, n_new);
2516
Mel Gormanb22d1272012-10-08 16:29:17 -07002517 return ret;
Mel Gorman42288fe2012-12-21 23:10:25 +00002518
2519alloc_new:
2520 spin_unlock(&sp->lock);
2521 ret = -ENOMEM;
2522 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2523 if (!n_new)
2524 goto err_out;
2525 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2526 if (!mpol_new)
2527 goto err_out;
2528 goto restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529}
2530
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002531/**
2532 * mpol_shared_policy_init - initialize shared policy for inode
2533 * @sp: pointer to inode shared policy
2534 * @mpol: struct mempolicy to install
2535 *
2536 * Install non-NULL @mpol in inode's shared policy rb-tree.
2537 * On entry, the current task has a reference on a non-NULL @mpol.
2538 * This must be released on exit.
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002539 * This is called at get_inode() calls and we can use GFP_KERNEL.
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002540 */
2541void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
Robin Holt7339ff82006-01-14 13:20:48 -08002542{
Miao Xie58568d22009-06-16 15:31:49 -07002543 int ret;
2544
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002545 sp->root = RB_ROOT; /* empty tree == default mempolicy */
Mel Gorman42288fe2012-12-21 23:10:25 +00002546 spin_lock_init(&sp->lock);
Robin Holt7339ff82006-01-14 13:20:48 -08002547
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002548 if (mpol) {
2549 struct vm_area_struct pvma;
2550 struct mempolicy *new;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002551 NODEMASK_SCRATCH(scratch);
Robin Holt7339ff82006-01-14 13:20:48 -08002552
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002553 if (!scratch)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002554 goto put_mpol;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002555 /* contextualize the tmpfs mount point mempolicy */
2556 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002557 if (IS_ERR(new))
Dan Carpenter0cae3452010-05-25 23:42:58 -07002558 goto free_scratch; /* no valid nodemask intersection */
Miao Xie58568d22009-06-16 15:31:49 -07002559
2560 task_lock(current);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002561 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
Miao Xie58568d22009-06-16 15:31:49 -07002562 task_unlock(current);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002563 if (ret)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002564 goto put_new;
Robin Holt7339ff82006-01-14 13:20:48 -08002565
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002566 /* Create pseudo-vma that contains just the policy */
2567 memset(&pvma, 0, sizeof(struct vm_area_struct));
2568 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2569 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002570
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002571put_new:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002572 mpol_put(new); /* drop initial ref */
Dan Carpenter0cae3452010-05-25 23:42:58 -07002573free_scratch:
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002574 NODEMASK_SCRATCH_FREE(scratch);
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002575put_mpol:
2576 mpol_put(mpol); /* drop our incoming ref on sb mpol */
Robin Holt7339ff82006-01-14 13:20:48 -08002577 }
2578}
2579
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580int mpol_set_shared_policy(struct shared_policy *info,
2581 struct vm_area_struct *vma, struct mempolicy *npol)
2582{
2583 int err;
2584 struct sp_node *new = NULL;
2585 unsigned long sz = vma_pages(vma);
2586
David Rientjes028fec42008-04-28 02:12:25 -07002587 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588 vma->vm_pgoff,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002589 sz, npol ? npol->mode : -1,
David Rientjes028fec42008-04-28 02:12:25 -07002590 npol ? npol->flags : -1,
David Rientjes00ef2d22013-02-22 16:35:36 -08002591 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592
2593 if (npol) {
2594 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2595 if (!new)
2596 return -ENOMEM;
2597 }
2598 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2599 if (err && new)
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002600 sp_free(new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002601 return err;
2602}
2603
2604/* Free a backing policy store on inode delete. */
2605void mpol_free_shared_policy(struct shared_policy *p)
2606{
2607 struct sp_node *n;
2608 struct rb_node *next;
2609
2610 if (!p->root.rb_node)
2611 return;
Mel Gorman42288fe2012-12-21 23:10:25 +00002612 spin_lock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613 next = rb_first(&p->root);
2614 while (next) {
2615 n = rb_entry(next, struct sp_node, nd);
2616 next = rb_next(&n->nd);
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002617 sp_delete(p, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002618 }
Mel Gorman42288fe2012-12-21 23:10:25 +00002619 spin_unlock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620}
2621
Mel Gorman1a687c22012-11-22 11:16:36 +00002622#ifdef CONFIG_NUMA_BALANCING
2623static bool __initdata numabalancing_override;
2624
2625static void __init check_numabalancing_enable(void)
2626{
2627 bool numabalancing_default = false;
2628
2629 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2630 numabalancing_default = true;
2631
2632 if (nr_node_ids > 1 && !numabalancing_override) {
2633 printk(KERN_INFO "Enabling automatic NUMA balancing. "
2634 "Configure with numa_balancing= or sysctl");
2635 set_numabalancing_state(numabalancing_default);
2636 }
2637}
2638
2639static int __init setup_numabalancing(char *str)
2640{
2641 int ret = 0;
2642 if (!str)
2643 goto out;
2644 numabalancing_override = true;
2645
2646 if (!strcmp(str, "enable")) {
2647 set_numabalancing_state(true);
2648 ret = 1;
2649 } else if (!strcmp(str, "disable")) {
2650 set_numabalancing_state(false);
2651 ret = 1;
2652 }
2653out:
2654 if (!ret)
2655 printk(KERN_WARNING "Unable to parse numa_balancing=\n");
2656
2657 return ret;
2658}
2659__setup("numa_balancing=", setup_numabalancing);
2660#else
2661static inline void __init check_numabalancing_enable(void)
2662{
2663}
2664#endif /* CONFIG_NUMA_BALANCING */
2665
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666/* assumes fs == KERNEL_DS */
2667void __init numa_policy_init(void)
2668{
Paul Mundtb71636e2007-07-15 23:38:15 -07002669 nodemask_t interleave_nodes;
2670 unsigned long largest = 0;
2671 int nid, prefer = 0;
2672
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673 policy_cache = kmem_cache_create("numa_policy",
2674 sizeof(struct mempolicy),
Paul Mundt20c2df82007-07-20 10:11:58 +09002675 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676
2677 sn_cache = kmem_cache_create("shared_policy_node",
2678 sizeof(struct sp_node),
Paul Mundt20c2df82007-07-20 10:11:58 +09002679 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002680
Mel Gorman5606e382012-11-02 18:19:13 +00002681 for_each_node(nid) {
2682 preferred_node_policy[nid] = (struct mempolicy) {
2683 .refcnt = ATOMIC_INIT(1),
2684 .mode = MPOL_PREFERRED,
2685 .flags = MPOL_F_MOF | MPOL_F_MORON,
2686 .v = { .preferred_node = nid, },
2687 };
2688 }
2689
Paul Mundtb71636e2007-07-15 23:38:15 -07002690 /*
2691 * Set interleaving policy for system init. Interleaving is only
2692 * enabled across suitably sized nodes (default is >= 16MB), or
2693 * fall back to the largest node if they're all smaller.
2694 */
2695 nodes_clear(interleave_nodes);
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002696 for_each_node_state(nid, N_MEMORY) {
Paul Mundtb71636e2007-07-15 23:38:15 -07002697 unsigned long total_pages = node_present_pages(nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698
Paul Mundtb71636e2007-07-15 23:38:15 -07002699 /* Preserve the largest node */
2700 if (largest < total_pages) {
2701 largest = total_pages;
2702 prefer = nid;
2703 }
2704
2705 /* Interleave this node? */
2706 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2707 node_set(nid, interleave_nodes);
2708 }
2709
2710 /* All too small, use the largest */
2711 if (unlikely(nodes_empty(interleave_nodes)))
2712 node_set(prefer, interleave_nodes);
2713
David Rientjes028fec42008-04-28 02:12:25 -07002714 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715 printk("numa_policy_init: interleaving failed\n");
Mel Gorman1a687c22012-11-22 11:16:36 +00002716
2717 check_numabalancing_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718}
2719
Christoph Lameter8bccd852005-10-29 18:16:59 -07002720/* Reset policy of current process to default */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721void numa_default_policy(void)
2722{
David Rientjes028fec42008-04-28 02:12:25 -07002723 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724}
Paul Jackson68860ec2005-10-30 15:02:36 -08002725
Paul Jackson42253992006-01-08 01:01:59 -08002726/*
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002727 * Parse and format mempolicy from/to strings
2728 */
2729
2730/*
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002731 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002732 */
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002733static const char * const policy_modes[] =
2734{
2735 [MPOL_DEFAULT] = "default",
2736 [MPOL_PREFERRED] = "prefer",
2737 [MPOL_BIND] = "bind",
2738 [MPOL_INTERLEAVE] = "interleave",
Lee Schermerhornd3a71032012-10-25 14:16:29 +02002739 [MPOL_LOCAL] = "local",
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002740};
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002741
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002742
2743#ifdef CONFIG_TMPFS
2744/**
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002745 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002746 * @str: string containing mempolicy to parse
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002747 * @mpol: pointer to struct mempolicy pointer, returned on success.
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002748 *
2749 * Format of input:
2750 * <mode>[=<flags>][:<nodelist>]
2751 *
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002752 * On success, returns 0, else 1
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002753 */
Hugh Dickinsa7a88b22013-01-02 02:04:23 -08002754int mpol_parse_str(char *str, struct mempolicy **mpol)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002755{
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002756 struct mempolicy *new = NULL;
Lee Schermerhornb4652e82010-05-24 14:32:03 -07002757 unsigned short mode;
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002758 unsigned short mode_flags;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002759 nodemask_t nodes;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002760 char *nodelist = strchr(str, ':');
2761 char *flags = strchr(str, '=');
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002762 int err = 1;
2763
2764 if (nodelist) {
2765 /* NUL-terminate mode or flags string */
2766 *nodelist++ = '\0';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002767 if (nodelist_parse(nodelist, nodes))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002768 goto out;
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002769 if (!nodes_subset(nodes, node_states[N_MEMORY]))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002770 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002771 } else
2772 nodes_clear(nodes);
2773
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002774 if (flags)
2775 *flags++ = '\0'; /* terminate mode string */
2776
Peter Zijlstra479e2802012-10-25 14:16:28 +02002777 for (mode = 0; mode < MPOL_MAX; mode++) {
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002778 if (!strcmp(str, policy_modes[mode])) {
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002779 break;
2780 }
2781 }
Mel Gormana7200942012-11-16 09:37:58 +00002782 if (mode >= MPOL_MAX)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002783 goto out;
2784
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002785 switch (mode) {
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002786 case MPOL_PREFERRED:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002787 /*
2788 * Insist on a nodelist of one node only
2789 */
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002790 if (nodelist) {
2791 char *rest = nodelist;
2792 while (isdigit(*rest))
2793 rest++;
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002794 if (*rest)
2795 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002796 }
2797 break;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002798 case MPOL_INTERLEAVE:
2799 /*
2800 * Default to online nodes with memory if no nodelist
2801 */
2802 if (!nodelist)
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002803 nodes = node_states[N_MEMORY];
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002804 break;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002805 case MPOL_LOCAL:
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002806 /*
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002807 * Don't allow a nodelist; mpol_new() checks flags
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002808 */
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002809 if (nodelist)
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002810 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002811 mode = MPOL_PREFERRED;
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002812 break;
Ravikiran G Thirumalai413b43d2010-03-23 13:35:28 -07002813 case MPOL_DEFAULT:
2814 /*
2815 * Insist on a empty nodelist
2816 */
2817 if (!nodelist)
2818 err = 0;
2819 goto out;
KOSAKI Motohirod69b2e62010-03-23 13:35:30 -07002820 case MPOL_BIND:
2821 /*
2822 * Insist on a nodelist
2823 */
2824 if (!nodelist)
2825 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002826 }
2827
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002828 mode_flags = 0;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002829 if (flags) {
2830 /*
2831 * Currently, we only support two mutually exclusive
2832 * mode flags.
2833 */
2834 if (!strcmp(flags, "static"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002835 mode_flags |= MPOL_F_STATIC_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002836 else if (!strcmp(flags, "relative"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002837 mode_flags |= MPOL_F_RELATIVE_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002838 else
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002839 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002840 }
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002841
2842 new = mpol_new(mode, mode_flags, &nodes);
2843 if (IS_ERR(new))
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002844 goto out;
2845
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002846 /*
2847 * Save nodes for mpol_to_str() to show the tmpfs mount options
2848 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2849 */
2850 if (mode != MPOL_PREFERRED)
2851 new->v.nodes = nodes;
2852 else if (nodelist)
2853 new->v.preferred_node = first_node(nodes);
2854 else
2855 new->flags |= MPOL_F_LOCAL;
2856
2857 /*
2858 * Save nodes for contextualization: this will be used to "clone"
2859 * the mempolicy in a specific context [cpuset] at a later time.
2860 */
2861 new->w.user_nodemask = nodes;
2862
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002863 err = 0;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002864
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002865out:
2866 /* Restore string for error message */
2867 if (nodelist)
2868 *--nodelist = ':';
2869 if (flags)
2870 *--flags = '=';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002871 if (!err)
2872 *mpol = new;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002873 return err;
2874}
2875#endif /* CONFIG_TMPFS */
2876
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002877/**
2878 * mpol_to_str - format a mempolicy structure for printing
2879 * @buffer: to contain formatted mempolicy string
2880 * @maxlen: length of @buffer
2881 * @pol: pointer to mempolicy to be formatted
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002882 *
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002883 * Convert a mempolicy into a string.
2884 * Returns the number of characters in buffer (if positive)
2885 * or an error (negative)
2886 */
Hugh Dickinsa7a88b22013-01-02 02:04:23 -08002887int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002888{
2889 char *p = buffer;
2890 int l;
2891 nodemask_t nodes;
Lee Schermerhornbea904d2008-04-28 02:13:18 -07002892 unsigned short mode;
David Rientjesf5b087b2008-04-28 02:12:27 -07002893 unsigned short flags = pol ? pol->flags : 0;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002894
Lee Schermerhorn22919902008-04-28 02:13:22 -07002895 /*
2896 * Sanity check: room for longest mode, flag and some nodes
2897 */
2898 VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
2899
Lee Schermerhornbea904d2008-04-28 02:13:18 -07002900 if (!pol || pol == &default_policy)
2901 mode = MPOL_DEFAULT;
2902 else
2903 mode = pol->mode;
2904
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002905 switch (mode) {
2906 case MPOL_DEFAULT:
2907 nodes_clear(nodes);
2908 break;
2909
2910 case MPOL_PREFERRED:
2911 nodes_clear(nodes);
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002912 if (flags & MPOL_F_LOCAL)
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002913 mode = MPOL_LOCAL;
Lee Schermerhorn53f25562008-04-28 02:13:20 -07002914 else
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002915 node_set(pol->v.preferred_node, nodes);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002916 break;
2917
2918 case MPOL_BIND:
Mel Gorman19770b32008-04-28 02:12:18 -07002919 /* Fall through */
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002920 case MPOL_INTERLEAVE:
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002921 nodes = pol->v.nodes;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002922 break;
2923
2924 default:
Dave Jones80de7c32012-09-06 12:01:00 -04002925 return -EINVAL;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002926 }
2927
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002928 l = strlen(policy_modes[mode]);
Lee Schermerhorn53f25562008-04-28 02:13:20 -07002929 if (buffer + maxlen < p + l + 1)
2930 return -ENOSPC;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002931
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002932 strcpy(p, policy_modes[mode]);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002933 p += l;
2934
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002935 if (flags & MPOL_MODE_FLAGS) {
David Rientjesf5b087b2008-04-28 02:12:27 -07002936 if (buffer + maxlen < p + 2)
2937 return -ENOSPC;
2938 *p++ = '=';
2939
Lee Schermerhorn22919902008-04-28 02:13:22 -07002940 /*
2941 * Currently, the only defined flags are mutually exclusive
2942 */
David Rientjesf5b087b2008-04-28 02:12:27 -07002943 if (flags & MPOL_F_STATIC_NODES)
Lee Schermerhorn22919902008-04-28 02:13:22 -07002944 p += snprintf(p, buffer + maxlen - p, "static");
2945 else if (flags & MPOL_F_RELATIVE_NODES)
2946 p += snprintf(p, buffer + maxlen - p, "relative");
David Rientjesf5b087b2008-04-28 02:12:27 -07002947 }
2948
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002949 if (!nodes_empty(nodes)) {
2950 if (buffer + maxlen < p + 2)
2951 return -ENOSPC;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002952 *p++ = ':';
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002953 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2954 }
2955 return p - buffer;
2956}