blob: c7c359213ae1ff2527e6167d15a91241068931d4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
Christoph Lameter8bccd852005-10-29 18:16:59 -07005 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
Christoph Lameter8bccd852005-10-29 18:16:59 -070021 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
Christoph Lameter8bccd852005-10-29 18:16:59 -070024 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 * preferred Try a specific node first before normal fallback.
David Rientjes00ef2d22013-02-22 16:35:36 -080029 * As a special case NUMA_NO_NODE here means do the allocation
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
Christoph Lameter8bccd852005-10-29 18:16:59 -070033 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
Linus Torvalds1da177e2005-04-16 15:20:36 -070066*/
67
68#include <linux/mempolicy.h>
69#include <linux/mm.h>
70#include <linux/highmem.h>
71#include <linux/hugetlb.h>
72#include <linux/kernel.h>
73#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070074#include <linux/nodemask.h>
75#include <linux/cpuset.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/slab.h>
77#include <linux/string.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040078#include <linux/export.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070079#include <linux/nsproxy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070080#include <linux/interrupt.h>
81#include <linux/init.h>
82#include <linux/compat.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080083#include <linux/swap.h>
Christoph Lameter1a75a6c2006-01-08 01:01:02 -080084#include <linux/seq_file.h>
85#include <linux/proc_fs.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080086#include <linux/migrate.h>
Hugh Dickins62b61f62009-12-14 17:59:33 -080087#include <linux/ksm.h>
Christoph Lameter95a402c2006-06-23 02:03:53 -070088#include <linux/rmap.h>
David Quigley86c3a762006-06-23 02:04:02 -070089#include <linux/security.h>
Adrian Bunkdbcb0f12007-10-16 01:26:26 -070090#include <linux/syscalls.h>
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -070091#include <linux/ctype.h>
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -080092#include <linux/mm_inline.h>
Lee Schermerhornb24f53a2012-10-25 14:16:32 +020093#include <linux/mmu_notifier.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080094
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <asm/tlbflush.h>
96#include <asm/uaccess.h>
Michal Hocko778d3b02011-07-26 16:08:30 -070097#include <linux/random.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
Nick Piggin62695a82008-10-18 20:26:09 -070099#include "internal.h"
100
Christoph Lameter38e35862006-01-08 01:01:01 -0800101/* Internal flags */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800102#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
Christoph Lameter38e35862006-01-08 01:01:01 -0800103#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800104
Pekka Enbergfcc234f2006-03-22 00:08:13 -0800105static struct kmem_cache *policy_cache;
106static struct kmem_cache *sn_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108/* Highest zone. An specific allocation for a zone below that is not
109 policied. */
Christoph Lameter62672762007-02-10 01:43:07 -0800110enum zone_type policy_zone = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700112/*
113 * run-time system-wide default policy => local allocation
114 */
H Hartley Sweetene754d792011-10-31 17:09:23 -0700115static struct mempolicy default_policy = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 .refcnt = ATOMIC_INIT(1), /* never free it */
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700117 .mode = MPOL_PREFERRED,
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700118 .flags = MPOL_F_LOCAL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119};
120
Mel Gorman5606e382012-11-02 18:19:13 +0000121static struct mempolicy preferred_node_policy[MAX_NUMNODES];
122
123static struct mempolicy *get_task_policy(struct task_struct *p)
124{
125 struct mempolicy *pol = p->mempolicy;
Mel Gorman5606e382012-11-02 18:19:13 +0000126
127 if (!pol) {
Jianguo Wu1da6f0e2013-09-11 14:21:25 -0700128 int node = numa_node_id();
Mel Gorman5606e382012-11-02 18:19:13 +0000129
Jianguo Wu1da6f0e2013-09-11 14:21:25 -0700130 if (node != NUMA_NO_NODE) {
131 pol = &preferred_node_policy[node];
132 /*
133 * preferred_node_policy is not initialised early in
134 * boot
135 */
136 if (!pol->mode)
137 pol = NULL;
138 }
Mel Gorman5606e382012-11-02 18:19:13 +0000139 }
140
141 return pol;
142}
143
David Rientjes37012942008-04-28 02:12:33 -0700144static const struct mempolicy_operations {
145 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
Miao Xie708c1bb2010-05-24 14:32:07 -0700146 /*
147 * If read-side task has no lock to protect task->mempolicy, write-side
148 * task will rebind the task->mempolicy by two step. The first step is
149 * setting all the newly nodes, and the second step is cleaning all the
150 * disallowed nodes. In this way, we can avoid finding no node to alloc
151 * page.
152 * If we have a lock to protect task->mempolicy in read-side, we do
153 * rebind directly.
154 *
155 * step:
156 * MPOL_REBIND_ONCE - do rebind work at once
157 * MPOL_REBIND_STEP1 - set all the newly nodes
158 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
159 */
160 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
161 enum mpol_rebind_step step);
David Rientjes37012942008-04-28 02:12:33 -0700162} mpol_ops[MPOL_MAX];
163
Mel Gorman19770b32008-04-28 02:12:18 -0700164/* Check that the nodemask contains at least one populated zone */
David Rientjes37012942008-04-28 02:12:33 -0700165static int is_valid_nodemask(const nodemask_t *nodemask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166{
Lai Jiangshand3eb1572013-02-22 16:33:22 -0800167 return nodes_intersects(*nodemask, node_states[N_MEMORY]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168}
169
David Rientjesf5b087b2008-04-28 02:12:27 -0700170static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
171{
Bob Liu6d556292010-05-24 14:31:59 -0700172 return pol->flags & MPOL_MODE_FLAGS;
David Rientjes4c50bc02008-04-28 02:12:30 -0700173}
174
175static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
176 const nodemask_t *rel)
177{
178 nodemask_t tmp;
179 nodes_fold(tmp, *orig, nodes_weight(*rel));
180 nodes_onto(*ret, tmp, *rel);
David Rientjesf5b087b2008-04-28 02:12:27 -0700181}
182
David Rientjes37012942008-04-28 02:12:33 -0700183static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
184{
185 if (nodes_empty(*nodes))
186 return -EINVAL;
187 pol->v.nodes = *nodes;
188 return 0;
189}
190
191static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
192{
193 if (!nodes)
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700194 pol->flags |= MPOL_F_LOCAL; /* local allocation */
David Rientjes37012942008-04-28 02:12:33 -0700195 else if (nodes_empty(*nodes))
196 return -EINVAL; /* no allowed nodes */
197 else
198 pol->v.preferred_node = first_node(*nodes);
199 return 0;
200}
201
202static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
203{
204 if (!is_valid_nodemask(nodes))
205 return -EINVAL;
206 pol->v.nodes = *nodes;
207 return 0;
208}
209
Miao Xie58568d22009-06-16 15:31:49 -0700210/*
211 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
212 * any, for the new policy. mpol_new() has already validated the nodes
213 * parameter with respect to the policy mode and flags. But, we need to
214 * handle an empty nodemask with MPOL_PREFERRED here.
215 *
216 * Must be called holding task's alloc_lock to protect task's mems_allowed
217 * and mempolicy. May also be called holding the mmap_semaphore for write.
218 */
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700219static int mpol_set_nodemask(struct mempolicy *pol,
220 const nodemask_t *nodes, struct nodemask_scratch *nsc)
Miao Xie58568d22009-06-16 15:31:49 -0700221{
Miao Xie58568d22009-06-16 15:31:49 -0700222 int ret;
223
224 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
225 if (pol == NULL)
226 return 0;
Lai Jiangshan01f13bd2012-12-12 13:51:33 -0800227 /* Check N_MEMORY */
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700228 nodes_and(nsc->mask1,
Lai Jiangshan01f13bd2012-12-12 13:51:33 -0800229 cpuset_current_mems_allowed, node_states[N_MEMORY]);
Miao Xie58568d22009-06-16 15:31:49 -0700230
231 VM_BUG_ON(!nodes);
232 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
233 nodes = NULL; /* explicit local allocation */
234 else {
235 if (pol->flags & MPOL_F_RELATIVE_NODES)
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700236 mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
Miao Xie58568d22009-06-16 15:31:49 -0700237 else
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700238 nodes_and(nsc->mask2, *nodes, nsc->mask1);
239
Miao Xie58568d22009-06-16 15:31:49 -0700240 if (mpol_store_user_nodemask(pol))
241 pol->w.user_nodemask = *nodes;
242 else
243 pol->w.cpuset_mems_allowed =
244 cpuset_current_mems_allowed;
245 }
246
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700247 if (nodes)
248 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
249 else
250 ret = mpol_ops[pol->mode].create(pol, NULL);
Miao Xie58568d22009-06-16 15:31:49 -0700251 return ret;
252}
253
254/*
255 * This function just creates a new policy, does some check and simple
256 * initialization. You must invoke mpol_set_nodemask() to set nodes.
257 */
David Rientjes028fec42008-04-28 02:12:25 -0700258static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
259 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260{
261 struct mempolicy *policy;
262
David Rientjes028fec42008-04-28 02:12:25 -0700263 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
David Rientjes00ef2d22013-02-22 16:35:36 -0800264 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
Paul Mundt140d5a42007-07-15 23:38:16 -0700265
David Rientjes3e1f0642008-04-28 02:12:34 -0700266 if (mode == MPOL_DEFAULT) {
267 if (nodes && !nodes_empty(*nodes))
David Rientjes37012942008-04-28 02:12:33 -0700268 return ERR_PTR(-EINVAL);
Lee Schermerhornd3a71032012-10-25 14:16:29 +0200269 return NULL;
David Rientjes37012942008-04-28 02:12:33 -0700270 }
David Rientjes3e1f0642008-04-28 02:12:34 -0700271 VM_BUG_ON(!nodes);
272
273 /*
274 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
275 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
276 * All other modes require a valid pointer to a non-empty nodemask.
277 */
278 if (mode == MPOL_PREFERRED) {
279 if (nodes_empty(*nodes)) {
280 if (((flags & MPOL_F_STATIC_NODES) ||
281 (flags & MPOL_F_RELATIVE_NODES)))
282 return ERR_PTR(-EINVAL);
David Rientjes3e1f0642008-04-28 02:12:34 -0700283 }
Peter Zijlstra479e2802012-10-25 14:16:28 +0200284 } else if (mode == MPOL_LOCAL) {
285 if (!nodes_empty(*nodes))
286 return ERR_PTR(-EINVAL);
287 mode = MPOL_PREFERRED;
David Rientjes3e1f0642008-04-28 02:12:34 -0700288 } else if (nodes_empty(*nodes))
289 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
291 if (!policy)
292 return ERR_PTR(-ENOMEM);
293 atomic_set(&policy->refcnt, 1);
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700294 policy->mode = mode;
David Rientjes3e1f0642008-04-28 02:12:34 -0700295 policy->flags = flags;
David Rientjesf5b087b2008-04-28 02:12:27 -0700296
David Rientjes37012942008-04-28 02:12:33 -0700297 return policy;
298}
299
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700300/* Slow path of a mpol destructor. */
301void __mpol_put(struct mempolicy *p)
302{
303 if (!atomic_dec_and_test(&p->refcnt))
304 return;
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700305 kmem_cache_free(policy_cache, p);
306}
307
Miao Xie708c1bb2010-05-24 14:32:07 -0700308static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
309 enum mpol_rebind_step step)
David Rientjes37012942008-04-28 02:12:33 -0700310{
311}
312
Miao Xie708c1bb2010-05-24 14:32:07 -0700313/*
314 * step:
315 * MPOL_REBIND_ONCE - do rebind work at once
316 * MPOL_REBIND_STEP1 - set all the newly nodes
317 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
318 */
319static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
320 enum mpol_rebind_step step)
David Rientjes37012942008-04-28 02:12:33 -0700321{
322 nodemask_t tmp;
323
324 if (pol->flags & MPOL_F_STATIC_NODES)
325 nodes_and(tmp, pol->w.user_nodemask, *nodes);
326 else if (pol->flags & MPOL_F_RELATIVE_NODES)
327 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
328 else {
Miao Xie708c1bb2010-05-24 14:32:07 -0700329 /*
330 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
331 * result
332 */
333 if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
334 nodes_remap(tmp, pol->v.nodes,
335 pol->w.cpuset_mems_allowed, *nodes);
336 pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
337 } else if (step == MPOL_REBIND_STEP2) {
338 tmp = pol->w.cpuset_mems_allowed;
339 pol->w.cpuset_mems_allowed = *nodes;
340 } else
341 BUG();
David Rientjes37012942008-04-28 02:12:33 -0700342 }
343
Miao Xie708c1bb2010-05-24 14:32:07 -0700344 if (nodes_empty(tmp))
345 tmp = *nodes;
346
347 if (step == MPOL_REBIND_STEP1)
348 nodes_or(pol->v.nodes, pol->v.nodes, tmp);
349 else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
350 pol->v.nodes = tmp;
351 else
352 BUG();
353
David Rientjes37012942008-04-28 02:12:33 -0700354 if (!node_isset(current->il_next, tmp)) {
355 current->il_next = next_node(current->il_next, tmp);
356 if (current->il_next >= MAX_NUMNODES)
357 current->il_next = first_node(tmp);
358 if (current->il_next >= MAX_NUMNODES)
359 current->il_next = numa_node_id();
360 }
361}
362
363static void mpol_rebind_preferred(struct mempolicy *pol,
Miao Xie708c1bb2010-05-24 14:32:07 -0700364 const nodemask_t *nodes,
365 enum mpol_rebind_step step)
David Rientjes37012942008-04-28 02:12:33 -0700366{
367 nodemask_t tmp;
368
David Rientjes37012942008-04-28 02:12:33 -0700369 if (pol->flags & MPOL_F_STATIC_NODES) {
370 int node = first_node(pol->w.user_nodemask);
371
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700372 if (node_isset(node, *nodes)) {
David Rientjes37012942008-04-28 02:12:33 -0700373 pol->v.preferred_node = node;
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700374 pol->flags &= ~MPOL_F_LOCAL;
375 } else
376 pol->flags |= MPOL_F_LOCAL;
David Rientjes37012942008-04-28 02:12:33 -0700377 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
378 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
379 pol->v.preferred_node = first_node(tmp);
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700380 } else if (!(pol->flags & MPOL_F_LOCAL)) {
David Rientjes37012942008-04-28 02:12:33 -0700381 pol->v.preferred_node = node_remap(pol->v.preferred_node,
382 pol->w.cpuset_mems_allowed,
383 *nodes);
384 pol->w.cpuset_mems_allowed = *nodes;
385 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386}
387
Miao Xie708c1bb2010-05-24 14:32:07 -0700388/*
389 * mpol_rebind_policy - Migrate a policy to a different set of nodes
390 *
391 * If read-side task has no lock to protect task->mempolicy, write-side
392 * task will rebind the task->mempolicy by two step. The first step is
393 * setting all the newly nodes, and the second step is cleaning all the
394 * disallowed nodes. In this way, we can avoid finding no node to alloc
395 * page.
396 * If we have a lock to protect task->mempolicy in read-side, we do
397 * rebind directly.
398 *
399 * step:
400 * MPOL_REBIND_ONCE - do rebind work at once
401 * MPOL_REBIND_STEP1 - set all the newly nodes
402 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
403 */
404static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
405 enum mpol_rebind_step step)
David Rientjes1d0d2682008-04-28 02:12:32 -0700406{
David Rientjes1d0d2682008-04-28 02:12:32 -0700407 if (!pol)
408 return;
Wang Sheng-Hui89c522c2012-05-29 15:06:16 -0700409 if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
David Rientjes1d0d2682008-04-28 02:12:32 -0700410 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
411 return;
Miao Xie708c1bb2010-05-24 14:32:07 -0700412
413 if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
414 return;
415
416 if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
417 BUG();
418
419 if (step == MPOL_REBIND_STEP1)
420 pol->flags |= MPOL_F_REBINDING;
421 else if (step == MPOL_REBIND_STEP2)
422 pol->flags &= ~MPOL_F_REBINDING;
423 else if (step >= MPOL_REBIND_NSTEP)
424 BUG();
425
426 mpol_ops[pol->mode].rebind(pol, newmask, step);
David Rientjes1d0d2682008-04-28 02:12:32 -0700427}
428
429/*
430 * Wrapper for mpol_rebind_policy() that just requires task
431 * pointer, and updates task mempolicy.
Miao Xie58568d22009-06-16 15:31:49 -0700432 *
433 * Called with task's alloc_lock held.
David Rientjes1d0d2682008-04-28 02:12:32 -0700434 */
435
Miao Xie708c1bb2010-05-24 14:32:07 -0700436void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
437 enum mpol_rebind_step step)
David Rientjes1d0d2682008-04-28 02:12:32 -0700438{
Miao Xie708c1bb2010-05-24 14:32:07 -0700439 mpol_rebind_policy(tsk->mempolicy, new, step);
David Rientjes1d0d2682008-04-28 02:12:32 -0700440}
441
442/*
443 * Rebind each vma in mm to new nodemask.
444 *
445 * Call holding a reference to mm. Takes mm->mmap_sem during call.
446 */
447
448void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
449{
450 struct vm_area_struct *vma;
451
452 down_write(&mm->mmap_sem);
453 for (vma = mm->mmap; vma; vma = vma->vm_next)
Miao Xie708c1bb2010-05-24 14:32:07 -0700454 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
David Rientjes1d0d2682008-04-28 02:12:32 -0700455 up_write(&mm->mmap_sem);
456}
457
David Rientjes37012942008-04-28 02:12:33 -0700458static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
459 [MPOL_DEFAULT] = {
460 .rebind = mpol_rebind_default,
461 },
462 [MPOL_INTERLEAVE] = {
463 .create = mpol_new_interleave,
464 .rebind = mpol_rebind_nodemask,
465 },
466 [MPOL_PREFERRED] = {
467 .create = mpol_new_preferred,
468 .rebind = mpol_rebind_preferred,
469 },
470 [MPOL_BIND] = {
471 .create = mpol_new_bind,
472 .rebind = mpol_rebind_nodemask,
473 },
474};
475
Christoph Lameterfc301282006-01-18 17:42:29 -0800476static void migrate_page_add(struct page *page, struct list_head *pagelist,
477 unsigned long flags);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -0800478
Christoph Lameter38e35862006-01-08 01:01:01 -0800479/* Scan through pages checking if pages follow certain conditions. */
Nick Pigginb5810032005-10-29 18:16:12 -0700480static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800481 unsigned long addr, unsigned long end,
482 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800483 void *private)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484{
Hugh Dickins91612e02005-06-21 17:15:07 -0700485 pte_t *orig_pte;
486 pte_t *pte;
Hugh Dickins705e87c2005-10-29 18:16:27 -0700487 spinlock_t *ptl;
Hugh Dickins941150a2005-06-21 17:15:06 -0700488
Hugh Dickins705e87c2005-10-29 18:16:27 -0700489 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
Hugh Dickins91612e02005-06-21 17:15:07 -0700490 do {
Linus Torvalds6aab3412005-11-28 14:34:23 -0800491 struct page *page;
Andy Whitcroft25ba77c2006-12-06 20:33:03 -0800492 int nid;
Hugh Dickins91612e02005-06-21 17:15:07 -0700493
494 if (!pte_present(*pte))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800496 page = vm_normal_page(vma, addr, *pte);
497 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 continue;
Nick Piggin053837f2006-01-18 17:42:27 -0800499 /*
Hugh Dickins62b61f62009-12-14 17:59:33 -0800500 * vm_normal_page() filters out zero pages, but there might
501 * still be PageReserved pages to skip, perhaps in a VDSO.
Nick Piggin053837f2006-01-18 17:42:27 -0800502 */
Hugh Dickinsb79bc0a2013-02-22 16:35:13 -0800503 if (PageReserved(page))
Christoph Lameterf4598c82006-01-12 01:05:20 -0800504 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800505 nid = page_to_nid(page);
Christoph Lameter38e35862006-01-08 01:01:01 -0800506 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
507 continue;
508
Stephen Wilsonb1f72d12011-05-24 17:12:43 -0700509 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
Christoph Lameterfc301282006-01-18 17:42:29 -0800510 migrate_page_add(page, private, flags);
Christoph Lameter38e35862006-01-08 01:01:01 -0800511 else
512 break;
Hugh Dickins91612e02005-06-21 17:15:07 -0700513 } while (pte++, addr += PAGE_SIZE, addr != end);
Hugh Dickins705e87c2005-10-29 18:16:27 -0700514 pte_unmap_unlock(orig_pte, ptl);
Hugh Dickins91612e02005-06-21 17:15:07 -0700515 return addr != end;
516}
517
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700518static void check_hugetlb_pmd_range(struct vm_area_struct *vma, pmd_t *pmd,
519 const nodemask_t *nodes, unsigned long flags,
520 void *private)
521{
522#ifdef CONFIG_HUGETLB_PAGE
523 int nid;
524 struct page *page;
525
526 spin_lock(&vma->vm_mm->page_table_lock);
527 page = pte_page(huge_ptep_get((pte_t *)pmd));
528 nid = page_to_nid(page);
529 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
530 goto unlock;
531 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
532 if (flags & (MPOL_MF_MOVE_ALL) ||
533 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
534 isolate_huge_page(page, private);
535unlock:
536 spin_unlock(&vma->vm_mm->page_table_lock);
537#else
538 BUG();
539#endif
540}
541
Nick Pigginb5810032005-10-29 18:16:12 -0700542static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800543 unsigned long addr, unsigned long end,
544 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800545 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700546{
547 pmd_t *pmd;
548 unsigned long next;
549
550 pmd = pmd_offset(pud, addr);
551 do {
552 next = pmd_addr_end(addr, end);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700553 if (!pmd_present(*pmd))
554 continue;
555 if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) {
556 check_hugetlb_pmd_range(vma, pmd, nodes,
557 flags, private);
558 continue;
559 }
Kirill A. Shutemove1803772012-12-12 13:50:59 -0800560 split_huge_page_pmd(vma, addr, pmd);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -0700561 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
Hugh Dickins91612e02005-06-21 17:15:07 -0700562 continue;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800563 if (check_pte_range(vma, pmd, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800564 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700565 return -EIO;
566 } while (pmd++, addr = next, addr != end);
567 return 0;
568}
569
Nick Pigginb5810032005-10-29 18:16:12 -0700570static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800571 unsigned long addr, unsigned long end,
572 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800573 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700574{
575 pud_t *pud;
576 unsigned long next;
577
578 pud = pud_offset(pgd, addr);
579 do {
580 next = pud_addr_end(addr, end);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700581 if (pud_huge(*pud) && is_vm_hugetlb_page(vma))
582 continue;
Hugh Dickins91612e02005-06-21 17:15:07 -0700583 if (pud_none_or_clear_bad(pud))
584 continue;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800585 if (check_pmd_range(vma, pud, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800586 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700587 return -EIO;
588 } while (pud++, addr = next, addr != end);
589 return 0;
590}
591
Nick Pigginb5810032005-10-29 18:16:12 -0700592static inline int check_pgd_range(struct vm_area_struct *vma,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800593 unsigned long addr, unsigned long end,
594 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800595 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700596{
597 pgd_t *pgd;
598 unsigned long next;
599
Nick Pigginb5810032005-10-29 18:16:12 -0700600 pgd = pgd_offset(vma->vm_mm, addr);
Hugh Dickins91612e02005-06-21 17:15:07 -0700601 do {
602 next = pgd_addr_end(addr, end);
603 if (pgd_none_or_clear_bad(pgd))
604 continue;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800605 if (check_pud_range(vma, pgd, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800606 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700607 return -EIO;
608 } while (pgd++, addr = next, addr != end);
609 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610}
611
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200612#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
613/*
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200614 * This is used to mark a range of virtual addresses to be inaccessible.
615 * These are later cleared by a NUMA hinting fault. Depending on these
616 * faults, pages may be migrated for better NUMA placement.
617 *
618 * This is assuming that NUMA faults are handled using PROT_NONE. If
619 * an architecture makes a different choice, it will need further
620 * changes to the core.
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200621 */
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200622unsigned long change_prot_numa(struct vm_area_struct *vma,
623 unsigned long addr, unsigned long end)
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200624{
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200625 int nr_updated;
626 BUILD_BUG_ON(_PAGE_NUMA != _PAGE_PROTNONE);
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200627
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200628 nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1);
Mel Gorman03c5a6e2012-11-02 14:52:48 +0000629 if (nr_updated)
630 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200631
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200632 return nr_updated;
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200633}
634#else
635static unsigned long change_prot_numa(struct vm_area_struct *vma,
636 unsigned long addr, unsigned long end)
637{
638 return 0;
639}
640#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
641
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800642/*
643 * Check if all pages in a range are on a set of nodes.
644 * If pagelist != NULL then isolate pages from the LRU and
645 * put them on the pagelist.
646 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647static struct vm_area_struct *
648check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
Christoph Lameter38e35862006-01-08 01:01:01 -0800649 const nodemask_t *nodes, unsigned long flags, void *private)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650{
651 int err;
652 struct vm_area_struct *first, *vma, *prev;
653
Nick Piggin053837f2006-01-18 17:42:27 -0800654
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 first = find_vma(mm, start);
656 if (!first)
657 return ERR_PTR(-EFAULT);
658 prev = NULL;
659 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200660 unsigned long endvma = vma->vm_end;
661
662 if (endvma > end)
663 endvma = end;
664 if (vma->vm_start > start)
665 start = vma->vm_start;
666
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800667 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
668 if (!vma->vm_next && vma->vm_end < end)
669 return ERR_PTR(-EFAULT);
670 if (prev && prev->vm_end < vma->vm_start)
671 return ERR_PTR(-EFAULT);
672 }
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800673
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200674 if (flags & MPOL_MF_LAZY) {
675 change_prot_numa(vma, start, endvma);
676 goto next;
677 }
678
679 if ((flags & MPOL_MF_STRICT) ||
680 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
681 vma_migratable(vma))) {
682
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800683 err = check_pgd_range(vma, start, endvma, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800684 flags, private);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 if (err) {
686 first = ERR_PTR(err);
687 break;
688 }
689 }
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200690next:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 prev = vma;
692 }
693 return first;
694}
695
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700696/*
697 * Apply policy to a single VMA
698 * This must be called with the mmap_sem held for writing.
699 */
700static int vma_replace_policy(struct vm_area_struct *vma,
701 struct mempolicy *pol)
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700702{
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700703 int err;
704 struct mempolicy *old;
705 struct mempolicy *new;
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700706
707 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
708 vma->vm_start, vma->vm_end, vma->vm_pgoff,
709 vma->vm_ops, vma->vm_file,
710 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
711
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700712 new = mpol_dup(pol);
713 if (IS_ERR(new))
714 return PTR_ERR(new);
715
716 if (vma->vm_ops && vma->vm_ops->set_policy) {
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700717 err = vma->vm_ops->set_policy(vma, new);
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700718 if (err)
719 goto err_out;
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700720 }
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700721
722 old = vma->vm_policy;
723 vma->vm_policy = new; /* protected by mmap_sem */
724 mpol_put(old);
725
726 return 0;
727 err_out:
728 mpol_put(new);
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700729 return err;
730}
731
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732/* Step 2: apply policy to a range and do splits. */
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800733static int mbind_range(struct mm_struct *mm, unsigned long start,
734 unsigned long end, struct mempolicy *new_pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735{
736 struct vm_area_struct *next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800737 struct vm_area_struct *prev;
738 struct vm_area_struct *vma;
739 int err = 0;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800740 pgoff_t pgoff;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800741 unsigned long vmstart;
742 unsigned long vmend;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743
Linus Torvalds097d5912012-03-06 18:23:36 -0800744 vma = find_vma(mm, start);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800745 if (!vma || vma->vm_start > start)
746 return -EFAULT;
747
Linus Torvalds097d5912012-03-06 18:23:36 -0800748 prev = vma->vm_prev;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800749 if (start > vma->vm_start)
750 prev = vma;
751
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800752 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 next = vma->vm_next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800754 vmstart = max(start, vma->vm_start);
755 vmend = min(end, vma->vm_end);
756
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800757 if (mpol_equal(vma_policy(vma), new_pol))
758 continue;
759
760 pgoff = vma->vm_pgoff +
761 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800762 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800763 vma->anon_vma, vma->vm_file, pgoff,
Caspar Zhang8aacc9f2011-09-14 16:20:58 -0700764 new_pol);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800765 if (prev) {
766 vma = prev;
767 next = vma->vm_next;
Oleg Nesterov3964acd2013-07-31 13:53:28 -0700768 if (mpol_equal(vma_policy(vma), new_pol))
769 continue;
770 /* vma_merge() joined vma && vma->next, case 8 */
771 goto replace;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800772 }
773 if (vma->vm_start != vmstart) {
774 err = split_vma(vma->vm_mm, vma, vmstart, 1);
775 if (err)
776 goto out;
777 }
778 if (vma->vm_end != vmend) {
779 err = split_vma(vma->vm_mm, vma, vmend, 0);
780 if (err)
781 goto out;
782 }
Oleg Nesterov3964acd2013-07-31 13:53:28 -0700783 replace:
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700784 err = vma_replace_policy(vma, new_pol);
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700785 if (err)
786 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 }
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800788
789 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 return err;
791}
792
Paul Jacksonc61afb12006-03-24 03:16:08 -0800793/*
794 * Update task->flags PF_MEMPOLICY bit: set iff non-default
795 * mempolicy. Allows more rapid checking of this (combined perhaps
796 * with other PF_* flag bits) on memory allocation hot code paths.
797 *
798 * If called from outside this file, the task 'p' should -only- be
799 * a newly forked child not yet visible on the task list, because
800 * manipulating the task flags of a visible task is not safe.
801 *
802 * The above limitation is why this routine has the funny name
803 * mpol_fix_fork_child_flag().
804 *
805 * It is also safe to call this with a task pointer of current,
806 * which the static wrapper mpol_set_task_struct_flag() does,
807 * for use within this file.
808 */
809
810void mpol_fix_fork_child_flag(struct task_struct *p)
811{
812 if (p->mempolicy)
813 p->flags |= PF_MEMPOLICY;
814 else
815 p->flags &= ~PF_MEMPOLICY;
816}
817
818static void mpol_set_task_struct_flag(void)
819{
820 mpol_fix_fork_child_flag(current);
821}
822
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823/* Set the process memory policy */
David Rientjes028fec42008-04-28 02:12:25 -0700824static long do_set_mempolicy(unsigned short mode, unsigned short flags,
825 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826{
Miao Xie58568d22009-06-16 15:31:49 -0700827 struct mempolicy *new, *old;
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700828 struct mm_struct *mm = current->mm;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700829 NODEMASK_SCRATCH(scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700830 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700832 if (!scratch)
833 return -ENOMEM;
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700834
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700835 new = mpol_new(mode, flags, nodes);
836 if (IS_ERR(new)) {
837 ret = PTR_ERR(new);
838 goto out;
839 }
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700840 /*
841 * prevent changing our mempolicy while show_numa_maps()
842 * is using it.
843 * Note: do_set_mempolicy() can be called at init time
844 * with no 'mm'.
845 */
846 if (mm)
847 down_write(&mm->mmap_sem);
Miao Xie58568d22009-06-16 15:31:49 -0700848 task_lock(current);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700849 ret = mpol_set_nodemask(new, nodes, scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700850 if (ret) {
851 task_unlock(current);
852 if (mm)
853 up_write(&mm->mmap_sem);
854 mpol_put(new);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700855 goto out;
Miao Xie58568d22009-06-16 15:31:49 -0700856 }
857 old = current->mempolicy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 current->mempolicy = new;
Paul Jacksonc61afb12006-03-24 03:16:08 -0800859 mpol_set_task_struct_flag();
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700860 if (new && new->mode == MPOL_INTERLEAVE &&
David Rientjesf5b087b2008-04-28 02:12:27 -0700861 nodes_weight(new->v.nodes))
Andi Kleendfcd3c02005-10-29 18:15:48 -0700862 current->il_next = first_node(new->v.nodes);
Miao Xie58568d22009-06-16 15:31:49 -0700863 task_unlock(current);
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700864 if (mm)
865 up_write(&mm->mmap_sem);
866
Miao Xie58568d22009-06-16 15:31:49 -0700867 mpol_put(old);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700868 ret = 0;
869out:
870 NODEMASK_SCRATCH_FREE(scratch);
871 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872}
873
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700874/*
875 * Return nodemask for policy for get_mempolicy() query
Miao Xie58568d22009-06-16 15:31:49 -0700876 *
877 * Called with task's alloc_lock held
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700878 */
879static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880{
Andi Kleendfcd3c02005-10-29 18:15:48 -0700881 nodes_clear(*nodes);
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700882 if (p == &default_policy)
883 return;
884
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700885 switch (p->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -0700886 case MPOL_BIND:
887 /* Fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 case MPOL_INTERLEAVE:
Andi Kleendfcd3c02005-10-29 18:15:48 -0700889 *nodes = p->v.nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 break;
891 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700892 if (!(p->flags & MPOL_F_LOCAL))
Andi Kleendfcd3c02005-10-29 18:15:48 -0700893 node_set(p->v.preferred_node, *nodes);
Lee Schermerhorn53f25562008-04-28 02:13:20 -0700894 /* else return empty node mask for local allocation */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 break;
896 default:
897 BUG();
898 }
899}
900
901static int lookup_node(struct mm_struct *mm, unsigned long addr)
902{
903 struct page *p;
904 int err;
905
906 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
907 if (err >= 0) {
908 err = page_to_nid(p);
909 put_page(p);
910 }
911 return err;
912}
913
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914/* Retrieve NUMA policy */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700915static long do_get_mempolicy(int *policy, nodemask_t *nmask,
916 unsigned long addr, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917{
Christoph Lameter8bccd852005-10-29 18:16:59 -0700918 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 struct mm_struct *mm = current->mm;
920 struct vm_area_struct *vma = NULL;
921 struct mempolicy *pol = current->mempolicy;
922
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700923 if (flags &
924 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 return -EINVAL;
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700926
927 if (flags & MPOL_F_MEMS_ALLOWED) {
928 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
929 return -EINVAL;
930 *policy = 0; /* just so it's initialized */
Miao Xie58568d22009-06-16 15:31:49 -0700931 task_lock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700932 *nmask = cpuset_current_mems_allowed;
Miao Xie58568d22009-06-16 15:31:49 -0700933 task_unlock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700934 return 0;
935 }
936
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 if (flags & MPOL_F_ADDR) {
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700938 /*
939 * Do NOT fall back to task policy if the
940 * vma/shared policy at addr is NULL. We
941 * want to return MPOL_DEFAULT in this case.
942 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 down_read(&mm->mmap_sem);
944 vma = find_vma_intersection(mm, addr, addr+1);
945 if (!vma) {
946 up_read(&mm->mmap_sem);
947 return -EFAULT;
948 }
949 if (vma->vm_ops && vma->vm_ops->get_policy)
950 pol = vma->vm_ops->get_policy(vma, addr);
951 else
952 pol = vma->vm_policy;
953 } else if (addr)
954 return -EINVAL;
955
956 if (!pol)
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700957 pol = &default_policy; /* indicates default behavior */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958
959 if (flags & MPOL_F_NODE) {
960 if (flags & MPOL_F_ADDR) {
961 err = lookup_node(mm, addr);
962 if (err < 0)
963 goto out;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700964 *policy = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 } else if (pol == current->mempolicy &&
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700966 pol->mode == MPOL_INTERLEAVE) {
Christoph Lameter8bccd852005-10-29 18:16:59 -0700967 *policy = current->il_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 } else {
969 err = -EINVAL;
970 goto out;
971 }
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700972 } else {
973 *policy = pol == &default_policy ? MPOL_DEFAULT :
974 pol->mode;
David Rientjesd79df632008-07-04 12:24:13 -0700975 /*
976 * Internal mempolicy flags must be masked off before exposing
977 * the policy to userspace.
978 */
979 *policy |= (pol->flags & MPOL_MODE_FLAGS);
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700980 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981
982 if (vma) {
983 up_read(&current->mm->mmap_sem);
984 vma = NULL;
985 }
986
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 err = 0;
Miao Xie58568d22009-06-16 15:31:49 -0700988 if (nmask) {
Lee Schermerhornc6b6ef82010-03-23 13:35:41 -0700989 if (mpol_store_user_nodemask(pol)) {
990 *nmask = pol->w.user_nodemask;
991 } else {
992 task_lock(current);
993 get_policy_nodemask(pol, nmask);
994 task_unlock(current);
995 }
Miao Xie58568d22009-06-16 15:31:49 -0700996 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997
998 out:
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700999 mpol_cond_put(pol);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 if (vma)
1001 up_read(&current->mm->mmap_sem);
1002 return err;
1003}
1004
Christoph Lameterb20a3502006-03-22 00:09:12 -08001005#ifdef CONFIG_MIGRATION
Christoph Lameter8bccd852005-10-29 18:16:59 -07001006/*
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001007 * page migration
1008 */
Christoph Lameterfc301282006-01-18 17:42:29 -08001009static void migrate_page_add(struct page *page, struct list_head *pagelist,
1010 unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001011{
1012 /*
Christoph Lameterfc301282006-01-18 17:42:29 -08001013 * Avoid migrating a page that is shared with others.
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001014 */
Nick Piggin62695a82008-10-18 20:26:09 -07001015 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
1016 if (!isolate_lru_page(page)) {
1017 list_add_tail(&page->lru, pagelist);
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -08001018 inc_zone_page_state(page, NR_ISOLATED_ANON +
1019 page_is_file_cache(page));
Nick Piggin62695a82008-10-18 20:26:09 -07001020 }
1021 }
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001022}
1023
Christoph Lameter742755a2006-06-23 02:03:55 -07001024static struct page *new_node_page(struct page *page, unsigned long node, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001025{
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -07001026 if (PageHuge(page))
1027 return alloc_huge_page_node(page_hstate(compound_head(page)),
1028 node);
1029 else
1030 return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001031}
1032
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001033/*
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001034 * Migrate pages from one node to a target node.
1035 * Returns error or the number of pages not migrated.
1036 */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001037static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1038 int flags)
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001039{
1040 nodemask_t nmask;
1041 LIST_HEAD(pagelist);
1042 int err = 0;
1043
1044 nodes_clear(nmask);
1045 node_set(source, nmask);
1046
Minchan Kim08270802012-10-08 16:33:38 -07001047 /*
1048 * This does not "check" the range but isolates all pages that
1049 * need migration. Between passing in the full user address
1050 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1051 */
1052 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1053 check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001054 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1055
Minchan Kimcf608ac2010-10-26 14:21:29 -07001056 if (!list_empty(&pagelist)) {
Mel Gorman7f0f2492011-01-13 15:45:58 -08001057 err = migrate_pages(&pagelist, new_node_page, dest,
Hugh Dickins9c620e22013-02-22 16:35:14 -08001058 MIGRATE_SYNC, MR_SYSCALL);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001059 if (err)
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -07001060 putback_movable_pages(&pagelist);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001061 }
Christoph Lameter95a402c2006-06-23 02:03:53 -07001062
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001063 return err;
1064}
1065
1066/*
1067 * Move pages between the two nodesets so as to preserve the physical
1068 * layout as much as possible.
Christoph Lameter39743882006-01-08 01:00:51 -08001069 *
1070 * Returns the number of page that could not be moved.
1071 */
Andrew Morton0ce72d42012-05-29 15:06:24 -07001072int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1073 const nodemask_t *to, int flags)
Christoph Lameter39743882006-01-08 01:00:51 -08001074{
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001075 int busy = 0;
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001076 int err;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001077 nodemask_t tmp;
Christoph Lameter39743882006-01-08 01:00:51 -08001078
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001079 err = migrate_prep();
1080 if (err)
1081 return err;
1082
Lee Schermerhorn53f25562008-04-28 02:13:20 -07001083 down_read(&mm->mmap_sem);
Christoph Lameter39743882006-01-08 01:00:51 -08001084
Andrew Morton0ce72d42012-05-29 15:06:24 -07001085 err = migrate_vmas(mm, from, to, flags);
Christoph Lameter7b2259b2006-06-25 05:46:48 -07001086 if (err)
1087 goto out;
1088
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -08001089 /*
1090 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1091 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1092 * bit in 'tmp', and return that <source, dest> pair for migration.
1093 * The pair of nodemasks 'to' and 'from' define the map.
1094 *
1095 * If no pair of bits is found that way, fallback to picking some
1096 * pair of 'source' and 'dest' bits that are not the same. If the
1097 * 'source' and 'dest' bits are the same, this represents a node
1098 * that will be migrating to itself, so no pages need move.
1099 *
1100 * If no bits are left in 'tmp', or if all remaining bits left
1101 * in 'tmp' correspond to the same bit in 'to', return false
1102 * (nothing left to migrate).
1103 *
1104 * This lets us pick a pair of nodes to migrate between, such that
1105 * if possible the dest node is not already occupied by some other
1106 * source node, minimizing the risk of overloading the memory on a
1107 * node that would happen if we migrated incoming memory to a node
1108 * before migrating outgoing memory source that same node.
1109 *
1110 * A single scan of tmp is sufficient. As we go, we remember the
1111 * most recent <s, d> pair that moved (s != d). If we find a pair
1112 * that not only moved, but what's better, moved to an empty slot
1113 * (d is not set in tmp), then we break out then, with that pair.
Justin P. Mattockae0e47f2011-03-01 15:06:02 +01001114 * Otherwise when we finish scanning from_tmp, we at least have the
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -08001115 * most recent <s, d> pair that moved. If we get all the way through
1116 * the scan of tmp without finding any node that moved, much less
1117 * moved to an empty node, then there is nothing left worth migrating.
1118 */
Christoph Lameterd4984712006-01-08 01:00:55 -08001119
Andrew Morton0ce72d42012-05-29 15:06:24 -07001120 tmp = *from;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001121 while (!nodes_empty(tmp)) {
1122 int s,d;
1123 int source = -1;
1124 int dest = 0;
1125
1126 for_each_node_mask(s, tmp) {
Larry Woodman4a5b18c2012-05-29 15:06:24 -07001127
1128 /*
1129 * do_migrate_pages() tries to maintain the relative
1130 * node relationship of the pages established between
1131 * threads and memory areas.
1132 *
1133 * However if the number of source nodes is not equal to
1134 * the number of destination nodes we can not preserve
1135 * this node relative relationship. In that case, skip
1136 * copying memory from a node that is in the destination
1137 * mask.
1138 *
1139 * Example: [2,3,4] -> [3,4,5] moves everything.
1140 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1141 */
1142
Andrew Morton0ce72d42012-05-29 15:06:24 -07001143 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1144 (node_isset(s, *to)))
Larry Woodman4a5b18c2012-05-29 15:06:24 -07001145 continue;
1146
Andrew Morton0ce72d42012-05-29 15:06:24 -07001147 d = node_remap(s, *from, *to);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001148 if (s == d)
1149 continue;
1150
1151 source = s; /* Node moved. Memorize */
1152 dest = d;
1153
1154 /* dest not in remaining from nodes? */
1155 if (!node_isset(dest, tmp))
1156 break;
1157 }
1158 if (source == -1)
1159 break;
1160
1161 node_clear(source, tmp);
1162 err = migrate_to_node(mm, source, dest, flags);
1163 if (err > 0)
1164 busy += err;
1165 if (err < 0)
1166 break;
Christoph Lameter39743882006-01-08 01:00:51 -08001167 }
Christoph Lameter7b2259b2006-06-25 05:46:48 -07001168out:
Christoph Lameter39743882006-01-08 01:00:51 -08001169 up_read(&mm->mmap_sem);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001170 if (err < 0)
1171 return err;
1172 return busy;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001173
Christoph Lameter39743882006-01-08 01:00:51 -08001174}
1175
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001176/*
1177 * Allocate a new page for page migration based on vma policy.
1178 * Start assuming that page is mapped by vma pointed to by @private.
1179 * Search forward from there, if not. N.B., this assumes that the
1180 * list of pages handed to migrate_pages()--which is how we get here--
1181 * is in virtual address order.
1182 */
Christoph Lameter742755a2006-06-23 02:03:55 -07001183static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001184{
1185 struct vm_area_struct *vma = (struct vm_area_struct *)private;
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001186 unsigned long uninitialized_var(address);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001187
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001188 while (vma) {
1189 address = page_address_in_vma(page, vma);
1190 if (address != -EFAULT)
1191 break;
1192 vma = vma->vm_next;
1193 }
1194
Naoya Horiguchi74060e42013-09-11 14:22:06 -07001195 if (PageHuge(page))
1196 return alloc_huge_page_noerr(vma, address, 1);
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001197 /*
1198 * if !vma, alloc_page_vma() will use task or system default policy
1199 */
1200 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001201}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001202#else
1203
1204static void migrate_page_add(struct page *page, struct list_head *pagelist,
1205 unsigned long flags)
1206{
1207}
1208
Andrew Morton0ce72d42012-05-29 15:06:24 -07001209int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1210 const nodemask_t *to, int flags)
Christoph Lameterb20a3502006-03-22 00:09:12 -08001211{
1212 return -ENOSYS;
1213}
Christoph Lameter95a402c2006-06-23 02:03:53 -07001214
Keith Owens69939742006-10-11 01:21:28 -07001215static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001216{
1217 return NULL;
1218}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001219#endif
1220
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001221static long do_mbind(unsigned long start, unsigned long len,
David Rientjes028fec42008-04-28 02:12:25 -07001222 unsigned short mode, unsigned short mode_flags,
1223 nodemask_t *nmask, unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001224{
1225 struct vm_area_struct *vma;
1226 struct mm_struct *mm = current->mm;
1227 struct mempolicy *new;
1228 unsigned long end;
1229 int err;
1230 LIST_HEAD(pagelist);
1231
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001232 if (flags & ~(unsigned long)MPOL_MF_VALID)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001233 return -EINVAL;
Christoph Lameter74c00242006-03-14 19:50:21 -08001234 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001235 return -EPERM;
1236
1237 if (start & ~PAGE_MASK)
1238 return -EINVAL;
1239
1240 if (mode == MPOL_DEFAULT)
1241 flags &= ~MPOL_MF_STRICT;
1242
1243 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1244 end = start + len;
1245
1246 if (end < start)
1247 return -EINVAL;
1248 if (end == start)
1249 return 0;
1250
David Rientjes028fec42008-04-28 02:12:25 -07001251 new = mpol_new(mode, mode_flags, nmask);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001252 if (IS_ERR(new))
1253 return PTR_ERR(new);
1254
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001255 if (flags & MPOL_MF_LAZY)
1256 new->flags |= MPOL_F_MOF;
1257
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001258 /*
1259 * If we are using the default policy then operation
1260 * on discontinuous address spaces is okay after all
1261 */
1262 if (!new)
1263 flags |= MPOL_MF_DISCONTIG_OK;
1264
David Rientjes028fec42008-04-28 02:12:25 -07001265 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1266 start, start + len, mode, mode_flags,
David Rientjes00ef2d22013-02-22 16:35:36 -08001267 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001268
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001269 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1270
1271 err = migrate_prep();
1272 if (err)
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001273 goto mpol_out;
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001274 }
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07001275 {
1276 NODEMASK_SCRATCH(scratch);
1277 if (scratch) {
1278 down_write(&mm->mmap_sem);
1279 task_lock(current);
1280 err = mpol_set_nodemask(new, nmask, scratch);
1281 task_unlock(current);
1282 if (err)
1283 up_write(&mm->mmap_sem);
1284 } else
1285 err = -ENOMEM;
1286 NODEMASK_SCRATCH_FREE(scratch);
1287 }
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001288 if (err)
1289 goto mpol_out;
1290
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001291 vma = check_range(mm, start, end, nmask,
1292 flags | MPOL_MF_INVERT, &pagelist);
1293
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001294 err = PTR_ERR(vma); /* maybe ... */
Mel Gormana7200942012-11-16 09:37:58 +00001295 if (!IS_ERR(vma))
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -08001296 err = mbind_range(mm, start, end, new);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001297
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001298 if (!err) {
1299 int nr_failed = 0;
1300
Minchan Kimcf608ac2010-10-26 14:21:29 -07001301 if (!list_empty(&pagelist)) {
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001302 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001303 nr_failed = migrate_pages(&pagelist, new_vma_page,
Hugh Dickins9c620e22013-02-22 16:35:14 -08001304 (unsigned long)vma,
1305 MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001306 if (nr_failed)
Naoya Horiguchi74060e42013-09-11 14:22:06 -07001307 putback_movable_pages(&pagelist);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001308 }
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001309
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001310 if (nr_failed && (flags & MPOL_MF_STRICT))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001311 err = -EIO;
KOSAKI Motohiroab8a3e12009-10-26 16:49:58 -07001312 } else
1313 putback_lru_pages(&pagelist);
Christoph Lameterb20a3502006-03-22 00:09:12 -08001314
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001315 up_write(&mm->mmap_sem);
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001316 mpol_out:
Lee Schermerhornf0be3d32008-04-28 02:13:08 -07001317 mpol_put(new);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001318 return err;
1319}
1320
Christoph Lameter39743882006-01-08 01:00:51 -08001321/*
Christoph Lameter8bccd852005-10-29 18:16:59 -07001322 * User space interface with variable sized bitmaps for nodelists.
1323 */
1324
1325/* Copy a node mask from user space. */
Christoph Lameter39743882006-01-08 01:00:51 -08001326static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
Christoph Lameter8bccd852005-10-29 18:16:59 -07001327 unsigned long maxnode)
1328{
1329 unsigned long k;
1330 unsigned long nlongs;
1331 unsigned long endmask;
1332
1333 --maxnode;
1334 nodes_clear(*nodes);
1335 if (maxnode == 0 || !nmask)
1336 return 0;
Andi Kleena9c930b2006-02-20 18:27:59 -08001337 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
Chris Wright636f13c2006-02-17 13:59:36 -08001338 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001339
1340 nlongs = BITS_TO_LONGS(maxnode);
1341 if ((maxnode % BITS_PER_LONG) == 0)
1342 endmask = ~0UL;
1343 else
1344 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1345
1346 /* When the user specified more nodes than supported just check
1347 if the non supported part is all zero. */
1348 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1349 if (nlongs > PAGE_SIZE/sizeof(long))
1350 return -EINVAL;
1351 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1352 unsigned long t;
1353 if (get_user(t, nmask + k))
1354 return -EFAULT;
1355 if (k == nlongs - 1) {
1356 if (t & endmask)
1357 return -EINVAL;
1358 } else if (t)
1359 return -EINVAL;
1360 }
1361 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1362 endmask = ~0UL;
1363 }
1364
1365 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1366 return -EFAULT;
1367 nodes_addr(*nodes)[nlongs-1] &= endmask;
1368 return 0;
1369}
1370
1371/* Copy a kernel node mask to user space */
1372static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1373 nodemask_t *nodes)
1374{
1375 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1376 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1377
1378 if (copy > nbytes) {
1379 if (copy > PAGE_SIZE)
1380 return -EINVAL;
1381 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1382 return -EFAULT;
1383 copy = nbytes;
1384 }
1385 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1386}
1387
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001388SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1389 unsigned long, mode, unsigned long __user *, nmask,
1390 unsigned long, maxnode, unsigned, flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001391{
1392 nodemask_t nodes;
1393 int err;
David Rientjes028fec42008-04-28 02:12:25 -07001394 unsigned short mode_flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001395
David Rientjes028fec42008-04-28 02:12:25 -07001396 mode_flags = mode & MPOL_MODE_FLAGS;
1397 mode &= ~MPOL_MODE_FLAGS;
David Rientjesa3b51e02008-04-28 02:12:23 -07001398 if (mode >= MPOL_MAX)
1399 return -EINVAL;
David Rientjes4c50bc02008-04-28 02:12:30 -07001400 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1401 (mode_flags & MPOL_F_RELATIVE_NODES))
1402 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001403 err = get_nodes(&nodes, nmask, maxnode);
1404 if (err)
1405 return err;
David Rientjes028fec42008-04-28 02:12:25 -07001406 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001407}
1408
1409/* Set the process memory policy */
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001410SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1411 unsigned long, maxnode)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001412{
1413 int err;
1414 nodemask_t nodes;
David Rientjes028fec42008-04-28 02:12:25 -07001415 unsigned short flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001416
David Rientjes028fec42008-04-28 02:12:25 -07001417 flags = mode & MPOL_MODE_FLAGS;
1418 mode &= ~MPOL_MODE_FLAGS;
1419 if ((unsigned int)mode >= MPOL_MAX)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001420 return -EINVAL;
David Rientjes4c50bc02008-04-28 02:12:30 -07001421 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1422 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001423 err = get_nodes(&nodes, nmask, maxnode);
1424 if (err)
1425 return err;
David Rientjes028fec42008-04-28 02:12:25 -07001426 return do_set_mempolicy(mode, flags, &nodes);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001427}
1428
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001429SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1430 const unsigned long __user *, old_nodes,
1431 const unsigned long __user *, new_nodes)
Christoph Lameter39743882006-01-08 01:00:51 -08001432{
David Howellsc69e8d92008-11-14 10:39:19 +11001433 const struct cred *cred = current_cred(), *tcred;
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001434 struct mm_struct *mm = NULL;
Christoph Lameter39743882006-01-08 01:00:51 -08001435 struct task_struct *task;
Christoph Lameter39743882006-01-08 01:00:51 -08001436 nodemask_t task_nodes;
1437 int err;
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001438 nodemask_t *old;
1439 nodemask_t *new;
1440 NODEMASK_SCRATCH(scratch);
Christoph Lameter39743882006-01-08 01:00:51 -08001441
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001442 if (!scratch)
1443 return -ENOMEM;
Christoph Lameter39743882006-01-08 01:00:51 -08001444
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001445 old = &scratch->mask1;
1446 new = &scratch->mask2;
1447
1448 err = get_nodes(old, old_nodes, maxnode);
Christoph Lameter39743882006-01-08 01:00:51 -08001449 if (err)
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001450 goto out;
1451
1452 err = get_nodes(new, new_nodes, maxnode);
1453 if (err)
1454 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001455
1456 /* Find the mm_struct */
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001457 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001458 task = pid ? find_task_by_vpid(pid) : current;
Christoph Lameter39743882006-01-08 01:00:51 -08001459 if (!task) {
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001460 rcu_read_unlock();
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001461 err = -ESRCH;
1462 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001463 }
Christoph Lameter3268c632012-03-21 16:34:06 -07001464 get_task_struct(task);
Christoph Lameter39743882006-01-08 01:00:51 -08001465
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001466 err = -EINVAL;
Christoph Lameter39743882006-01-08 01:00:51 -08001467
1468 /*
1469 * Check if this process has the right to modify the specified
1470 * process. The right exists if the process has administrative
Alexey Dobriyan7f927fc2006-03-28 01:56:53 -08001471 * capabilities, superuser privileges or the same
Christoph Lameter39743882006-01-08 01:00:51 -08001472 * userid as the target process.
1473 */
David Howellsc69e8d92008-11-14 10:39:19 +11001474 tcred = __task_cred(task);
Eric W. Biedermanb38a86e2012-03-12 15:48:24 -07001475 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1476 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
Christoph Lameter74c00242006-03-14 19:50:21 -08001477 !capable(CAP_SYS_NICE)) {
David Howellsc69e8d92008-11-14 10:39:19 +11001478 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001479 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001480 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001481 }
David Howellsc69e8d92008-11-14 10:39:19 +11001482 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001483
1484 task_nodes = cpuset_mems_allowed(task);
1485 /* Is the user allowed to access the target nodes? */
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001486 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
Christoph Lameter39743882006-01-08 01:00:51 -08001487 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001488 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001489 }
1490
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08001491 if (!nodes_subset(*new, node_states[N_MEMORY])) {
Christoph Lameter3b42d282007-08-31 00:12:08 -07001492 err = -EINVAL;
Christoph Lameter3268c632012-03-21 16:34:06 -07001493 goto out_put;
Christoph Lameter3b42d282007-08-31 00:12:08 -07001494 }
1495
David Quigley86c3a762006-06-23 02:04:02 -07001496 err = security_task_movememory(task);
1497 if (err)
Christoph Lameter3268c632012-03-21 16:34:06 -07001498 goto out_put;
David Quigley86c3a762006-06-23 02:04:02 -07001499
Christoph Lameter3268c632012-03-21 16:34:06 -07001500 mm = get_task_mm(task);
1501 put_task_struct(task);
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001502
1503 if (!mm) {
Christoph Lameter3268c632012-03-21 16:34:06 -07001504 err = -EINVAL;
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001505 goto out;
1506 }
1507
1508 err = do_migrate_pages(mm, old, new,
1509 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
Christoph Lameter3268c632012-03-21 16:34:06 -07001510
1511 mmput(mm);
1512out:
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001513 NODEMASK_SCRATCH_FREE(scratch);
1514
Christoph Lameter39743882006-01-08 01:00:51 -08001515 return err;
Christoph Lameter3268c632012-03-21 16:34:06 -07001516
1517out_put:
1518 put_task_struct(task);
1519 goto out;
1520
Christoph Lameter39743882006-01-08 01:00:51 -08001521}
1522
1523
Christoph Lameter8bccd852005-10-29 18:16:59 -07001524/* Retrieve NUMA policy */
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001525SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1526 unsigned long __user *, nmask, unsigned long, maxnode,
1527 unsigned long, addr, unsigned long, flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001528{
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001529 int err;
1530 int uninitialized_var(pval);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001531 nodemask_t nodes;
1532
1533 if (nmask != NULL && maxnode < MAX_NUMNODES)
1534 return -EINVAL;
1535
1536 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1537
1538 if (err)
1539 return err;
1540
1541 if (policy && put_user(pval, policy))
1542 return -EFAULT;
1543
1544 if (nmask)
1545 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1546
1547 return err;
1548}
1549
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550#ifdef CONFIG_COMPAT
1551
1552asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1553 compat_ulong_t __user *nmask,
1554 compat_ulong_t maxnode,
1555 compat_ulong_t addr, compat_ulong_t flags)
1556{
1557 long err;
1558 unsigned long __user *nm = NULL;
1559 unsigned long nr_bits, alloc_size;
1560 DECLARE_BITMAP(bm, MAX_NUMNODES);
1561
1562 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1563 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1564
1565 if (nmask)
1566 nm = compat_alloc_user_space(alloc_size);
1567
1568 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1569
1570 if (!err && nmask) {
KAMEZAWA Hiroyuki2bbff6c2011-09-14 16:21:02 -07001571 unsigned long copy_size;
1572 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1573 err = copy_from_user(bm, nm, copy_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 /* ensure entire bitmap is zeroed */
1575 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1576 err |= compat_put_bitmap(nmask, bm, nr_bits);
1577 }
1578
1579 return err;
1580}
1581
1582asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1583 compat_ulong_t maxnode)
1584{
1585 long err = 0;
1586 unsigned long __user *nm = NULL;
1587 unsigned long nr_bits, alloc_size;
1588 DECLARE_BITMAP(bm, MAX_NUMNODES);
1589
1590 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1591 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1592
1593 if (nmask) {
1594 err = compat_get_bitmap(bm, nmask, nr_bits);
1595 nm = compat_alloc_user_space(alloc_size);
1596 err |= copy_to_user(nm, bm, alloc_size);
1597 }
1598
1599 if (err)
1600 return -EFAULT;
1601
1602 return sys_set_mempolicy(mode, nm, nr_bits+1);
1603}
1604
1605asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1606 compat_ulong_t mode, compat_ulong_t __user *nmask,
1607 compat_ulong_t maxnode, compat_ulong_t flags)
1608{
1609 long err = 0;
1610 unsigned long __user *nm = NULL;
1611 unsigned long nr_bits, alloc_size;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001612 nodemask_t bm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613
1614 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1615 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1616
1617 if (nmask) {
Andi Kleendfcd3c02005-10-29 18:15:48 -07001618 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 nm = compat_alloc_user_space(alloc_size);
Andi Kleendfcd3c02005-10-29 18:15:48 -07001620 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621 }
1622
1623 if (err)
1624 return -EFAULT;
1625
1626 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1627}
1628
1629#endif
1630
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001631/*
1632 * get_vma_policy(@task, @vma, @addr)
1633 * @task - task for fallback if vma policy == default
1634 * @vma - virtual memory area whose policy is sought
1635 * @addr - address in @vma for shared policy lookup
1636 *
1637 * Returns effective policy for a VMA at specified address.
1638 * Falls back to @task or system default policy, as necessary.
David Rientjes32f85162012-10-16 17:31:23 -07001639 * Current or other task's task mempolicy and non-shared vma policies must be
1640 * protected by task_lock(task) by the caller.
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001641 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1642 * count--added by the get_policy() vm_op, as appropriate--to protect against
1643 * freeing by another task. It is the caller's responsibility to free the
1644 * extra reference for shared policies.
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001645 */
Stephen Wilsond98f6cb2011-05-24 17:12:41 -07001646struct mempolicy *get_vma_policy(struct task_struct *task,
Christoph Lameter48fce342006-01-08 01:01:03 -08001647 struct vm_area_struct *vma, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648{
Mel Gorman5606e382012-11-02 18:19:13 +00001649 struct mempolicy *pol = get_task_policy(task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650
1651 if (vma) {
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001652 if (vma->vm_ops && vma->vm_ops->get_policy) {
Lee Schermerhornae4d8c12008-04-28 02:13:11 -07001653 struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1654 addr);
1655 if (vpol)
1656 pol = vpol;
Mel Gorman00442ad2012-10-08 16:29:20 -07001657 } else if (vma->vm_policy) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 pol = vma->vm_policy;
Mel Gorman00442ad2012-10-08 16:29:20 -07001659
1660 /*
1661 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1662 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1663 * count on these policies which will be dropped by
1664 * mpol_cond_put() later
1665 */
1666 if (mpol_needs_cond_ref(pol))
1667 mpol_get(pol);
1668 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669 }
1670 if (!pol)
1671 pol = &default_policy;
1672 return pol;
1673}
1674
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001675static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1676{
1677 enum zone_type dynamic_policy_zone = policy_zone;
1678
1679 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1680
1681 /*
1682 * if policy->v.nodes has movable memory only,
1683 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1684 *
1685 * policy->v.nodes is intersect with node_states[N_MEMORY].
1686 * so if the following test faile, it implies
1687 * policy->v.nodes has movable memory only.
1688 */
1689 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1690 dynamic_policy_zone = ZONE_MOVABLE;
1691
1692 return zone >= dynamic_policy_zone;
1693}
1694
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001695/*
1696 * Return a nodemask representing a mempolicy for filtering nodes for
1697 * page allocation
1698 */
1699static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
Mel Gorman19770b32008-04-28 02:12:18 -07001700{
1701 /* Lower zones don't get a nodemask applied for MPOL_BIND */
Lee Schermerhorn45c47452008-04-28 02:13:12 -07001702 if (unlikely(policy->mode == MPOL_BIND) &&
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001703 apply_policy_zone(policy, gfp_zone(gfp)) &&
Mel Gorman19770b32008-04-28 02:12:18 -07001704 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1705 return &policy->v.nodes;
1706
1707 return NULL;
1708}
1709
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001710/* Return a zonelist indicated by gfp for node representing a mempolicy */
Andi Kleen2f5f9482011-03-04 17:36:29 -08001711static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1712 int nd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713{
Lee Schermerhorn45c47452008-04-28 02:13:12 -07001714 switch (policy->mode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001716 if (!(policy->flags & MPOL_F_LOCAL))
1717 nd = policy->v.preferred_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 break;
1719 case MPOL_BIND:
Mel Gorman19770b32008-04-28 02:12:18 -07001720 /*
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001721 * Normally, MPOL_BIND allocations are node-local within the
1722 * allowed nodemask. However, if __GFP_THISNODE is set and the
Bob Liu6eb27e12010-05-24 14:32:00 -07001723 * current node isn't part of the mask, we use the zonelist for
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001724 * the first node in the mask instead.
Mel Gorman19770b32008-04-28 02:12:18 -07001725 */
Mel Gorman19770b32008-04-28 02:12:18 -07001726 if (unlikely(gfp & __GFP_THISNODE) &&
1727 unlikely(!node_isset(nd, policy->v.nodes)))
1728 nd = first_node(policy->v.nodes);
1729 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 default:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731 BUG();
1732 }
Mel Gorman0e884602008-04-28 02:12:14 -07001733 return node_zonelist(nd, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734}
1735
1736/* Do dynamic interleaving for a process */
1737static unsigned interleave_nodes(struct mempolicy *policy)
1738{
1739 unsigned nid, next;
1740 struct task_struct *me = current;
1741
1742 nid = me->il_next;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001743 next = next_node(nid, policy->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744 if (next >= MAX_NUMNODES)
Andi Kleendfcd3c02005-10-29 18:15:48 -07001745 next = first_node(policy->v.nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001746 if (next < MAX_NUMNODES)
1747 me->il_next = next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 return nid;
1749}
1750
Christoph Lameterdc85da12006-01-18 17:42:36 -08001751/*
1752 * Depending on the memory policy provide a node from which to allocate the
1753 * next slab entry.
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001754 * @policy must be protected by freeing by the caller. If @policy is
1755 * the current task's mempolicy, this protection is implicit, as only the
1756 * task can change it's policy. The system default policy requires no
1757 * such protection.
Christoph Lameterdc85da12006-01-18 17:42:36 -08001758 */
Andi Kleene7b691b2012-06-09 02:40:03 -07001759unsigned slab_node(void)
Christoph Lameterdc85da12006-01-18 17:42:36 -08001760{
Andi Kleene7b691b2012-06-09 02:40:03 -07001761 struct mempolicy *policy;
1762
1763 if (in_interrupt())
1764 return numa_node_id();
1765
1766 policy = current->mempolicy;
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001767 if (!policy || policy->flags & MPOL_F_LOCAL)
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001768 return numa_node_id();
Christoph Lameter765c4502006-09-27 01:50:08 -07001769
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001770 switch (policy->mode) {
1771 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001772 /*
1773 * handled MPOL_F_LOCAL above
1774 */
1775 return policy->v.preferred_node;
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001776
Christoph Lameterdc85da12006-01-18 17:42:36 -08001777 case MPOL_INTERLEAVE:
1778 return interleave_nodes(policy);
1779
Mel Gormandd1a2392008-04-28 02:12:17 -07001780 case MPOL_BIND: {
Christoph Lameterdc85da12006-01-18 17:42:36 -08001781 /*
1782 * Follow bind policy behavior and start allocation at the
1783 * first node.
1784 */
Mel Gorman19770b32008-04-28 02:12:18 -07001785 struct zonelist *zonelist;
1786 struct zone *zone;
1787 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1788 zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1789 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1790 &policy->v.nodes,
1791 &zone);
Eric Dumazet800416f2010-10-27 19:33:43 +02001792 return zone ? zone->node : numa_node_id();
Mel Gormandd1a2392008-04-28 02:12:17 -07001793 }
Christoph Lameterdc85da12006-01-18 17:42:36 -08001794
Christoph Lameterdc85da12006-01-18 17:42:36 -08001795 default:
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001796 BUG();
Christoph Lameterdc85da12006-01-18 17:42:36 -08001797 }
1798}
1799
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800/* Do static interleaving for a VMA with known offset. */
1801static unsigned offset_il_node(struct mempolicy *pol,
1802 struct vm_area_struct *vma, unsigned long off)
1803{
Andi Kleendfcd3c02005-10-29 18:15:48 -07001804 unsigned nnodes = nodes_weight(pol->v.nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001805 unsigned target;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 int c;
1807 int nid = -1;
1808
David Rientjesf5b087b2008-04-28 02:12:27 -07001809 if (!nnodes)
1810 return numa_node_id();
1811 target = (unsigned int)off % nnodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 c = 0;
1813 do {
Andi Kleendfcd3c02005-10-29 18:15:48 -07001814 nid = next_node(nid, pol->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 c++;
1816 } while (c <= target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817 return nid;
1818}
1819
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001820/* Determine a node number for interleave */
1821static inline unsigned interleave_nid(struct mempolicy *pol,
1822 struct vm_area_struct *vma, unsigned long addr, int shift)
1823{
1824 if (vma) {
1825 unsigned long off;
1826
Nishanth Aravamudan3b98b082006-08-31 21:27:53 -07001827 /*
1828 * for small pages, there is no difference between
1829 * shift and PAGE_SHIFT, so the bit-shift is safe.
1830 * for huge pages, since vm_pgoff is in units of small
1831 * pages, we need to shift off the always 0 bits to get
1832 * a useful offset.
1833 */
1834 BUG_ON(shift < PAGE_SHIFT);
1835 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001836 off += (addr - vma->vm_start) >> shift;
1837 return offset_il_node(pol, vma, off);
1838 } else
1839 return interleave_nodes(pol);
1840}
1841
Michal Hocko778d3b02011-07-26 16:08:30 -07001842/*
1843 * Return the bit number of a random bit set in the nodemask.
1844 * (returns -1 if nodemask is empty)
1845 */
1846int node_random(const nodemask_t *maskp)
1847{
1848 int w, bit = -1;
1849
1850 w = nodes_weight(*maskp);
1851 if (w)
1852 bit = bitmap_ord_to_pos(maskp->bits,
1853 get_random_int() % w, MAX_NUMNODES);
1854 return bit;
1855}
1856
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001857#ifdef CONFIG_HUGETLBFS
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001858/*
1859 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1860 * @vma = virtual memory area whose policy is sought
1861 * @addr = address in @vma for shared policy lookup and interleave policy
1862 * @gfp_flags = for requested zone
Mel Gorman19770b32008-04-28 02:12:18 -07001863 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1864 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001865 *
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001866 * Returns a zonelist suitable for a huge page allocation and a pointer
1867 * to the struct mempolicy for conditional unref after allocation.
1868 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1869 * @nodemask for filtering the zonelist.
Miao Xiec0ff7452010-05-24 14:32:08 -07001870 *
1871 * Must be protected by get_mems_allowed()
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001872 */
Mel Gorman396faf02007-07-17 04:03:13 -07001873struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
Mel Gorman19770b32008-04-28 02:12:18 -07001874 gfp_t gfp_flags, struct mempolicy **mpol,
1875 nodemask_t **nodemask)
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001876{
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001877 struct zonelist *zl;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001878
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001879 *mpol = get_vma_policy(current, vma, addr);
Mel Gorman19770b32008-04-28 02:12:18 -07001880 *nodemask = NULL; /* assume !MPOL_BIND */
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001881
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001882 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1883 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
Andi Kleena5516432008-07-23 21:27:41 -07001884 huge_page_shift(hstate_vma(vma))), gfp_flags);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001885 } else {
Andi Kleen2f5f9482011-03-04 17:36:29 -08001886 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001887 if ((*mpol)->mode == MPOL_BIND)
1888 *nodemask = &(*mpol)->v.nodes;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001889 }
1890 return zl;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001891}
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001892
1893/*
1894 * init_nodemask_of_mempolicy
1895 *
1896 * If the current task's mempolicy is "default" [NULL], return 'false'
1897 * to indicate default policy. Otherwise, extract the policy nodemask
1898 * for 'bind' or 'interleave' policy into the argument nodemask, or
1899 * initialize the argument nodemask to contain the single node for
1900 * 'preferred' or 'local' policy and return 'true' to indicate presence
1901 * of non-default mempolicy.
1902 *
1903 * We don't bother with reference counting the mempolicy [mpol_get/put]
1904 * because the current task is examining it's own mempolicy and a task's
1905 * mempolicy is only ever changed by the task itself.
1906 *
1907 * N.B., it is the caller's responsibility to free a returned nodemask.
1908 */
1909bool init_nodemask_of_mempolicy(nodemask_t *mask)
1910{
1911 struct mempolicy *mempolicy;
1912 int nid;
1913
1914 if (!(mask && current->mempolicy))
1915 return false;
1916
Miao Xiec0ff7452010-05-24 14:32:08 -07001917 task_lock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001918 mempolicy = current->mempolicy;
1919 switch (mempolicy->mode) {
1920 case MPOL_PREFERRED:
1921 if (mempolicy->flags & MPOL_F_LOCAL)
1922 nid = numa_node_id();
1923 else
1924 nid = mempolicy->v.preferred_node;
1925 init_nodemask_of_node(mask, nid);
1926 break;
1927
1928 case MPOL_BIND:
1929 /* Fall through */
1930 case MPOL_INTERLEAVE:
1931 *mask = mempolicy->v.nodes;
1932 break;
1933
1934 default:
1935 BUG();
1936 }
Miao Xiec0ff7452010-05-24 14:32:08 -07001937 task_unlock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001938
1939 return true;
1940}
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001941#endif
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001942
David Rientjes6f48d0eb2010-08-09 17:18:52 -07001943/*
1944 * mempolicy_nodemask_intersects
1945 *
1946 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1947 * policy. Otherwise, check for intersection between mask and the policy
1948 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1949 * policy, always return true since it may allocate elsewhere on fallback.
1950 *
1951 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1952 */
1953bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1954 const nodemask_t *mask)
1955{
1956 struct mempolicy *mempolicy;
1957 bool ret = true;
1958
1959 if (!mask)
1960 return ret;
1961 task_lock(tsk);
1962 mempolicy = tsk->mempolicy;
1963 if (!mempolicy)
1964 goto out;
1965
1966 switch (mempolicy->mode) {
1967 case MPOL_PREFERRED:
1968 /*
1969 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1970 * allocate from, they may fallback to other nodes when oom.
1971 * Thus, it's possible for tsk to have allocated memory from
1972 * nodes in mask.
1973 */
1974 break;
1975 case MPOL_BIND:
1976 case MPOL_INTERLEAVE:
1977 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1978 break;
1979 default:
1980 BUG();
1981 }
1982out:
1983 task_unlock(tsk);
1984 return ret;
1985}
1986
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987/* Allocate a page in interleaved policy.
1988 Own path because it needs to do special accounting. */
Andi Kleen662f3a02005-10-29 18:15:49 -07001989static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1990 unsigned nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991{
1992 struct zonelist *zl;
1993 struct page *page;
1994
Mel Gorman0e884602008-04-28 02:12:14 -07001995 zl = node_zonelist(nid, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 page = __alloc_pages(gfp, order, zl);
Mel Gormandd1a2392008-04-28 02:12:17 -07001997 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
Christoph Lameterca889e62006-06-30 01:55:44 -07001998 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999 return page;
2000}
2001
2002/**
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002003 * alloc_pages_vma - Allocate a page for a VMA.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004 *
2005 * @gfp:
2006 * %GFP_USER user allocation.
2007 * %GFP_KERNEL kernel allocations,
2008 * %GFP_HIGHMEM highmem/user allocations,
2009 * %GFP_FS allocation should not call back into a file system.
2010 * %GFP_ATOMIC don't sleep.
2011 *
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002012 * @order:Order of the GFP allocation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013 * @vma: Pointer to VMA or NULL if not available.
2014 * @addr: Virtual Address of the allocation. Must be inside the VMA.
2015 *
2016 * This function allocates a page from the kernel page pool and applies
2017 * a NUMA policy associated with the VMA or the current process.
2018 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
2019 * mm_struct of the VMA to prevent it from going away. Should be used for
2020 * all allocations for pages that will be mapped into
2021 * user space. Returns NULL when no page can be allocated.
2022 *
2023 * Should be called with the mm_sem of the vma hold.
2024 */
2025struct page *
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002026alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
Andi Kleen2f5f9482011-03-04 17:36:29 -08002027 unsigned long addr, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028{
Mel Gormancc9a6c82012-03-21 16:34:11 -07002029 struct mempolicy *pol;
Miao Xiec0ff7452010-05-24 14:32:08 -07002030 struct page *page;
Mel Gormancc9a6c82012-03-21 16:34:11 -07002031 unsigned int cpuset_mems_cookie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032
Mel Gormancc9a6c82012-03-21 16:34:11 -07002033retry_cpuset:
2034 pol = get_vma_policy(current, vma, addr);
2035 cpuset_mems_cookie = get_mems_allowed();
2036
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002037 if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 unsigned nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002039
Andi Kleen8eac5632011-02-25 14:44:28 -08002040 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002041 mpol_cond_put(pol);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002042 page = alloc_page_interleave(gfp, order, nid);
Mel Gormancc9a6c82012-03-21 16:34:11 -07002043 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2044 goto retry_cpuset;
2045
Miao Xiec0ff7452010-05-24 14:32:08 -07002046 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047 }
David Rientjes212a0a62012-12-11 16:02:51 -08002048 page = __alloc_pages_nodemask(gfp, order,
2049 policy_zonelist(gfp, pol, node),
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002050 policy_nodemask(gfp, pol));
David Rientjes212a0a62012-12-11 16:02:51 -08002051 if (unlikely(mpol_needs_cond_ref(pol)))
2052 __mpol_put(pol);
Mel Gormancc9a6c82012-03-21 16:34:11 -07002053 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2054 goto retry_cpuset;
Miao Xiec0ff7452010-05-24 14:32:08 -07002055 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056}
2057
2058/**
2059 * alloc_pages_current - Allocate pages.
2060 *
2061 * @gfp:
2062 * %GFP_USER user allocation,
2063 * %GFP_KERNEL kernel allocation,
2064 * %GFP_HIGHMEM highmem allocation,
2065 * %GFP_FS don't call back into a file system.
2066 * %GFP_ATOMIC don't sleep.
2067 * @order: Power of two of allocation size in pages. 0 is a single page.
2068 *
2069 * Allocate a page from the kernel page pool. When not in
2070 * interrupt context and apply the current process NUMA policy.
2071 * Returns NULL when no page can be allocated.
2072 *
Paul Jacksoncf2a473c2006-01-08 01:01:54 -08002073 * Don't call cpuset_update_task_memory_state() unless
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074 * 1) it's ok to take cpuset_sem (can WAIT), and
2075 * 2) allocating for current task (not interrupt).
2076 */
Al Virodd0fc662005-10-07 07:46:04 +01002077struct page *alloc_pages_current(gfp_t gfp, unsigned order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078{
Mel Gorman5606e382012-11-02 18:19:13 +00002079 struct mempolicy *pol = get_task_policy(current);
Miao Xiec0ff7452010-05-24 14:32:08 -07002080 struct page *page;
Mel Gormancc9a6c82012-03-21 16:34:11 -07002081 unsigned int cpuset_mems_cookie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082
Christoph Lameter9b819d22006-09-25 23:31:40 -07002083 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084 pol = &default_policy;
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002085
Mel Gormancc9a6c82012-03-21 16:34:11 -07002086retry_cpuset:
2087 cpuset_mems_cookie = get_mems_allowed();
2088
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002089 /*
2090 * No reference counting needed for current->mempolicy
2091 * nor system default_policy
2092 */
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002093 if (pol->mode == MPOL_INTERLEAVE)
Miao Xiec0ff7452010-05-24 14:32:08 -07002094 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2095 else
2096 page = __alloc_pages_nodemask(gfp, order,
Andi Kleen5c4b4be2011-03-04 17:36:32 -08002097 policy_zonelist(gfp, pol, numa_node_id()),
2098 policy_nodemask(gfp, pol));
Mel Gormancc9a6c82012-03-21 16:34:11 -07002099
2100 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2101 goto retry_cpuset;
2102
Miao Xiec0ff7452010-05-24 14:32:08 -07002103 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104}
2105EXPORT_SYMBOL(alloc_pages_current);
2106
Oleg Nesterovef0855d2013-09-11 14:20:14 -07002107int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2108{
2109 struct mempolicy *pol = mpol_dup(vma_policy(src));
2110
2111 if (IS_ERR(pol))
2112 return PTR_ERR(pol);
2113 dst->vm_policy = pol;
2114 return 0;
2115}
2116
Paul Jackson42253992006-01-08 01:01:59 -08002117/*
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07002118 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
Paul Jackson42253992006-01-08 01:01:59 -08002119 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2120 * with the mems_allowed returned by cpuset_mems_allowed(). This
2121 * keeps mempolicies cpuset relative after its cpuset moves. See
2122 * further kernel/cpuset.c update_nodemask().
Miao Xie708c1bb2010-05-24 14:32:07 -07002123 *
2124 * current's mempolicy may be rebinded by the other task(the task that changes
2125 * cpuset's mems), so we needn't do rebind work for current task.
Paul Jackson42253992006-01-08 01:01:59 -08002126 */
Paul Jackson42253992006-01-08 01:01:59 -08002127
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07002128/* Slow path of a mempolicy duplicate */
2129struct mempolicy *__mpol_dup(struct mempolicy *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130{
2131 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2132
2133 if (!new)
2134 return ERR_PTR(-ENOMEM);
Miao Xie708c1bb2010-05-24 14:32:07 -07002135
2136 /* task's mempolicy is protected by alloc_lock */
2137 if (old == current->mempolicy) {
2138 task_lock(current);
2139 *new = *old;
2140 task_unlock(current);
2141 } else
2142 *new = *old;
2143
Paul E. McKenney99ee4ca2010-03-03 17:50:17 -08002144 rcu_read_lock();
Paul Jackson42253992006-01-08 01:01:59 -08002145 if (current_cpuset_is_being_rebound()) {
2146 nodemask_t mems = cpuset_mems_allowed(current);
Miao Xie708c1bb2010-05-24 14:32:07 -07002147 if (new->flags & MPOL_F_REBINDING)
2148 mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2149 else
2150 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
Paul Jackson42253992006-01-08 01:01:59 -08002151 }
Paul E. McKenney99ee4ca2010-03-03 17:50:17 -08002152 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153 atomic_set(&new->refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154 return new;
2155}
2156
2157/* Slow path of a mempolicy comparison */
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002158bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159{
2160 if (!a || !b)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002161 return false;
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002162 if (a->mode != b->mode)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002163 return false;
Bob Liu19800502010-05-24 14:32:01 -07002164 if (a->flags != b->flags)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002165 return false;
Bob Liu19800502010-05-24 14:32:01 -07002166 if (mpol_store_user_nodemask(a))
2167 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002168 return false;
Bob Liu19800502010-05-24 14:32:01 -07002169
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002170 switch (a->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -07002171 case MPOL_BIND:
2172 /* Fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 case MPOL_INTERLEAVE:
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002174 return !!nodes_equal(a->v.nodes, b->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 case MPOL_PREFERRED:
Namhyung Kim75719662011-03-22 16:33:02 -07002176 return a->v.preferred_node == b->v.preferred_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177 default:
2178 BUG();
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002179 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 }
2181}
2182
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 * Shared memory backing store policy support.
2185 *
2186 * Remember policies even when nobody has shared memory mapped.
2187 * The policies are kept in Red-Black tree linked from the inode.
2188 * They are protected by the sp->lock spinlock, which should be held
2189 * for any accesses to the tree.
2190 */
2191
2192/* lookup first element intersecting start-end */
Mel Gorman42288fe2012-12-21 23:10:25 +00002193/* Caller holds sp->lock */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194static struct sp_node *
2195sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2196{
2197 struct rb_node *n = sp->root.rb_node;
2198
2199 while (n) {
2200 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2201
2202 if (start >= p->end)
2203 n = n->rb_right;
2204 else if (end <= p->start)
2205 n = n->rb_left;
2206 else
2207 break;
2208 }
2209 if (!n)
2210 return NULL;
2211 for (;;) {
2212 struct sp_node *w = NULL;
2213 struct rb_node *prev = rb_prev(n);
2214 if (!prev)
2215 break;
2216 w = rb_entry(prev, struct sp_node, nd);
2217 if (w->end <= start)
2218 break;
2219 n = prev;
2220 }
2221 return rb_entry(n, struct sp_node, nd);
2222}
2223
2224/* Insert a new shared policy into the list. */
2225/* Caller holds sp->lock */
2226static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2227{
2228 struct rb_node **p = &sp->root.rb_node;
2229 struct rb_node *parent = NULL;
2230 struct sp_node *nd;
2231
2232 while (*p) {
2233 parent = *p;
2234 nd = rb_entry(parent, struct sp_node, nd);
2235 if (new->start < nd->start)
2236 p = &(*p)->rb_left;
2237 else if (new->end > nd->end)
2238 p = &(*p)->rb_right;
2239 else
2240 BUG();
2241 }
2242 rb_link_node(&new->nd, parent, p);
2243 rb_insert_color(&new->nd, &sp->root);
Paul Mundt140d5a42007-07-15 23:38:16 -07002244 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002245 new->policy ? new->policy->mode : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246}
2247
2248/* Find shared policy intersecting idx */
2249struct mempolicy *
2250mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2251{
2252 struct mempolicy *pol = NULL;
2253 struct sp_node *sn;
2254
2255 if (!sp->root.rb_node)
2256 return NULL;
Mel Gorman42288fe2012-12-21 23:10:25 +00002257 spin_lock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258 sn = sp_lookup(sp, idx, idx+1);
2259 if (sn) {
2260 mpol_get(sn->policy);
2261 pol = sn->policy;
2262 }
Mel Gorman42288fe2012-12-21 23:10:25 +00002263 spin_unlock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264 return pol;
2265}
2266
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002267static void sp_free(struct sp_node *n)
2268{
2269 mpol_put(n->policy);
2270 kmem_cache_free(sn_cache, n);
2271}
2272
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002273/**
2274 * mpol_misplaced - check whether current page node is valid in policy
2275 *
2276 * @page - page to be checked
2277 * @vma - vm area where page mapped
2278 * @addr - virtual address where page mapped
2279 *
2280 * Lookup current policy node id for vma,addr and "compare to" page's
2281 * node id.
2282 *
2283 * Returns:
2284 * -1 - not misplaced, page is in the right node
2285 * node - node id where the page should be
2286 *
2287 * Policy determination "mimics" alloc_page_vma().
2288 * Called from fault path where we know the vma and faulting address.
2289 */
2290int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2291{
2292 struct mempolicy *pol;
2293 struct zone *zone;
2294 int curnid = page_to_nid(page);
2295 unsigned long pgoff;
2296 int polnid = -1;
2297 int ret = -1;
2298
2299 BUG_ON(!vma);
2300
2301 pol = get_vma_policy(current, vma, addr);
2302 if (!(pol->flags & MPOL_F_MOF))
2303 goto out;
2304
2305 switch (pol->mode) {
2306 case MPOL_INTERLEAVE:
2307 BUG_ON(addr >= vma->vm_end);
2308 BUG_ON(addr < vma->vm_start);
2309
2310 pgoff = vma->vm_pgoff;
2311 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2312 polnid = offset_il_node(pol, vma, pgoff);
2313 break;
2314
2315 case MPOL_PREFERRED:
2316 if (pol->flags & MPOL_F_LOCAL)
2317 polnid = numa_node_id();
2318 else
2319 polnid = pol->v.preferred_node;
2320 break;
2321
2322 case MPOL_BIND:
2323 /*
2324 * allows binding to multiple nodes.
2325 * use current page if in policy nodemask,
2326 * else select nearest allowed node, if any.
2327 * If no allowed nodes, use current [!misplaced].
2328 */
2329 if (node_isset(curnid, pol->v.nodes))
2330 goto out;
2331 (void)first_zones_zonelist(
2332 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2333 gfp_zone(GFP_HIGHUSER),
2334 &pol->v.nodes, &zone);
2335 polnid = zone->node;
2336 break;
2337
2338 default:
2339 BUG();
2340 }
Mel Gorman5606e382012-11-02 18:19:13 +00002341
2342 /* Migrate the page towards the node whose CPU is referencing it */
Mel Gormane42c8ff2012-11-12 09:17:07 +00002343 if (pol->flags & MPOL_F_MORON) {
2344 int last_nid;
2345
Mel Gorman5606e382012-11-02 18:19:13 +00002346 polnid = numa_node_id();
2347
Mel Gormane42c8ff2012-11-12 09:17:07 +00002348 /*
2349 * Multi-stage node selection is used in conjunction
2350 * with a periodic migration fault to build a temporal
2351 * task<->page relation. By using a two-stage filter we
2352 * remove short/unlikely relations.
2353 *
2354 * Using P(p) ~ n_p / n_t as per frequentist
2355 * probability, we can equate a task's usage of a
2356 * particular page (n_p) per total usage of this
2357 * page (n_t) (in a given time-span) to a probability.
2358 *
2359 * Our periodic faults will sample this probability and
2360 * getting the same result twice in a row, given these
2361 * samples are fully independent, is then given by
2362 * P(n)^2, provided our sample period is sufficiently
2363 * short compared to the usage pattern.
2364 *
2365 * This quadric squishes small probabilities, making
2366 * it less likely we act on an unlikely task<->page
2367 * relation.
2368 */
Mel Gorman22b751c2013-02-22 16:34:59 -08002369 last_nid = page_nid_xchg_last(page, polnid);
Mel Gormane42c8ff2012-11-12 09:17:07 +00002370 if (last_nid != polnid)
2371 goto out;
2372 }
2373
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002374 if (curnid != polnid)
2375 ret = polnid;
2376out:
2377 mpol_cond_put(pol);
2378
2379 return ret;
2380}
2381
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2383{
Paul Mundt140d5a42007-07-15 23:38:16 -07002384 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385 rb_erase(&n->nd, &sp->root);
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002386 sp_free(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387}
2388
Mel Gorman42288fe2012-12-21 23:10:25 +00002389static void sp_node_init(struct sp_node *node, unsigned long start,
2390 unsigned long end, struct mempolicy *pol)
2391{
2392 node->start = start;
2393 node->end = end;
2394 node->policy = pol;
2395}
2396
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07002397static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2398 struct mempolicy *pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399{
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002400 struct sp_node *n;
2401 struct mempolicy *newpol;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002403 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404 if (!n)
2405 return NULL;
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002406
2407 newpol = mpol_dup(pol);
2408 if (IS_ERR(newpol)) {
2409 kmem_cache_free(sn_cache, n);
2410 return NULL;
2411 }
2412 newpol->flags |= MPOL_F_SHARED;
Mel Gorman42288fe2012-12-21 23:10:25 +00002413 sp_node_init(n, start, end, newpol);
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002414
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415 return n;
2416}
2417
2418/* Replace a policy range. */
2419static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2420 unsigned long end, struct sp_node *new)
2421{
Mel Gormanb22d1272012-10-08 16:29:17 -07002422 struct sp_node *n;
Mel Gorman42288fe2012-12-21 23:10:25 +00002423 struct sp_node *n_new = NULL;
2424 struct mempolicy *mpol_new = NULL;
Mel Gormanb22d1272012-10-08 16:29:17 -07002425 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426
Mel Gorman42288fe2012-12-21 23:10:25 +00002427restart:
2428 spin_lock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 n = sp_lookup(sp, start, end);
2430 /* Take care of old policies in the same range. */
2431 while (n && n->start < end) {
2432 struct rb_node *next = rb_next(&n->nd);
2433 if (n->start >= start) {
2434 if (n->end <= end)
2435 sp_delete(sp, n);
2436 else
2437 n->start = end;
2438 } else {
2439 /* Old policy spanning whole new range. */
2440 if (n->end > end) {
Mel Gorman42288fe2012-12-21 23:10:25 +00002441 if (!n_new)
2442 goto alloc_new;
2443
2444 *mpol_new = *n->policy;
2445 atomic_set(&mpol_new->refcnt, 1);
KOSAKI Motohiro78806392013-03-08 12:43:29 -08002446 sp_node_init(n_new, end, n->end, mpol_new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447 n->end = start;
Hillf Danton5ca39572013-03-08 12:43:28 -08002448 sp_insert(sp, n_new);
Mel Gorman42288fe2012-12-21 23:10:25 +00002449 n_new = NULL;
2450 mpol_new = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451 break;
2452 } else
2453 n->end = start;
2454 }
2455 if (!next)
2456 break;
2457 n = rb_entry(next, struct sp_node, nd);
2458 }
2459 if (new)
2460 sp_insert(sp, new);
Mel Gorman42288fe2012-12-21 23:10:25 +00002461 spin_unlock(&sp->lock);
2462 ret = 0;
2463
2464err_out:
2465 if (mpol_new)
2466 mpol_put(mpol_new);
2467 if (n_new)
2468 kmem_cache_free(sn_cache, n_new);
2469
Mel Gormanb22d1272012-10-08 16:29:17 -07002470 return ret;
Mel Gorman42288fe2012-12-21 23:10:25 +00002471
2472alloc_new:
2473 spin_unlock(&sp->lock);
2474 ret = -ENOMEM;
2475 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2476 if (!n_new)
2477 goto err_out;
2478 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2479 if (!mpol_new)
2480 goto err_out;
2481 goto restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482}
2483
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002484/**
2485 * mpol_shared_policy_init - initialize shared policy for inode
2486 * @sp: pointer to inode shared policy
2487 * @mpol: struct mempolicy to install
2488 *
2489 * Install non-NULL @mpol in inode's shared policy rb-tree.
2490 * On entry, the current task has a reference on a non-NULL @mpol.
2491 * This must be released on exit.
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002492 * This is called at get_inode() calls and we can use GFP_KERNEL.
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002493 */
2494void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
Robin Holt7339ff82006-01-14 13:20:48 -08002495{
Miao Xie58568d22009-06-16 15:31:49 -07002496 int ret;
2497
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002498 sp->root = RB_ROOT; /* empty tree == default mempolicy */
Mel Gorman42288fe2012-12-21 23:10:25 +00002499 spin_lock_init(&sp->lock);
Robin Holt7339ff82006-01-14 13:20:48 -08002500
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002501 if (mpol) {
2502 struct vm_area_struct pvma;
2503 struct mempolicy *new;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002504 NODEMASK_SCRATCH(scratch);
Robin Holt7339ff82006-01-14 13:20:48 -08002505
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002506 if (!scratch)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002507 goto put_mpol;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002508 /* contextualize the tmpfs mount point mempolicy */
2509 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002510 if (IS_ERR(new))
Dan Carpenter0cae3452010-05-25 23:42:58 -07002511 goto free_scratch; /* no valid nodemask intersection */
Miao Xie58568d22009-06-16 15:31:49 -07002512
2513 task_lock(current);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002514 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
Miao Xie58568d22009-06-16 15:31:49 -07002515 task_unlock(current);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002516 if (ret)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002517 goto put_new;
Robin Holt7339ff82006-01-14 13:20:48 -08002518
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002519 /* Create pseudo-vma that contains just the policy */
2520 memset(&pvma, 0, sizeof(struct vm_area_struct));
2521 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2522 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002523
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002524put_new:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002525 mpol_put(new); /* drop initial ref */
Dan Carpenter0cae3452010-05-25 23:42:58 -07002526free_scratch:
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002527 NODEMASK_SCRATCH_FREE(scratch);
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002528put_mpol:
2529 mpol_put(mpol); /* drop our incoming ref on sb mpol */
Robin Holt7339ff82006-01-14 13:20:48 -08002530 }
2531}
2532
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533int mpol_set_shared_policy(struct shared_policy *info,
2534 struct vm_area_struct *vma, struct mempolicy *npol)
2535{
2536 int err;
2537 struct sp_node *new = NULL;
2538 unsigned long sz = vma_pages(vma);
2539
David Rientjes028fec42008-04-28 02:12:25 -07002540 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541 vma->vm_pgoff,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002542 sz, npol ? npol->mode : -1,
David Rientjes028fec42008-04-28 02:12:25 -07002543 npol ? npol->flags : -1,
David Rientjes00ef2d22013-02-22 16:35:36 -08002544 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545
2546 if (npol) {
2547 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2548 if (!new)
2549 return -ENOMEM;
2550 }
2551 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2552 if (err && new)
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002553 sp_free(new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554 return err;
2555}
2556
2557/* Free a backing policy store on inode delete. */
2558void mpol_free_shared_policy(struct shared_policy *p)
2559{
2560 struct sp_node *n;
2561 struct rb_node *next;
2562
2563 if (!p->root.rb_node)
2564 return;
Mel Gorman42288fe2012-12-21 23:10:25 +00002565 spin_lock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566 next = rb_first(&p->root);
2567 while (next) {
2568 n = rb_entry(next, struct sp_node, nd);
2569 next = rb_next(&n->nd);
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002570 sp_delete(p, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571 }
Mel Gorman42288fe2012-12-21 23:10:25 +00002572 spin_unlock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573}
2574
Mel Gorman1a687c22012-11-22 11:16:36 +00002575#ifdef CONFIG_NUMA_BALANCING
2576static bool __initdata numabalancing_override;
2577
2578static void __init check_numabalancing_enable(void)
2579{
2580 bool numabalancing_default = false;
2581
2582 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2583 numabalancing_default = true;
2584
2585 if (nr_node_ids > 1 && !numabalancing_override) {
2586 printk(KERN_INFO "Enabling automatic NUMA balancing. "
2587 "Configure with numa_balancing= or sysctl");
2588 set_numabalancing_state(numabalancing_default);
2589 }
2590}
2591
2592static int __init setup_numabalancing(char *str)
2593{
2594 int ret = 0;
2595 if (!str)
2596 goto out;
2597 numabalancing_override = true;
2598
2599 if (!strcmp(str, "enable")) {
2600 set_numabalancing_state(true);
2601 ret = 1;
2602 } else if (!strcmp(str, "disable")) {
2603 set_numabalancing_state(false);
2604 ret = 1;
2605 }
2606out:
2607 if (!ret)
2608 printk(KERN_WARNING "Unable to parse numa_balancing=\n");
2609
2610 return ret;
2611}
2612__setup("numa_balancing=", setup_numabalancing);
2613#else
2614static inline void __init check_numabalancing_enable(void)
2615{
2616}
2617#endif /* CONFIG_NUMA_BALANCING */
2618
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619/* assumes fs == KERNEL_DS */
2620void __init numa_policy_init(void)
2621{
Paul Mundtb71636e2007-07-15 23:38:15 -07002622 nodemask_t interleave_nodes;
2623 unsigned long largest = 0;
2624 int nid, prefer = 0;
2625
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626 policy_cache = kmem_cache_create("numa_policy",
2627 sizeof(struct mempolicy),
Paul Mundt20c2df82007-07-20 10:11:58 +09002628 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629
2630 sn_cache = kmem_cache_create("shared_policy_node",
2631 sizeof(struct sp_node),
Paul Mundt20c2df82007-07-20 10:11:58 +09002632 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633
Mel Gorman5606e382012-11-02 18:19:13 +00002634 for_each_node(nid) {
2635 preferred_node_policy[nid] = (struct mempolicy) {
2636 .refcnt = ATOMIC_INIT(1),
2637 .mode = MPOL_PREFERRED,
2638 .flags = MPOL_F_MOF | MPOL_F_MORON,
2639 .v = { .preferred_node = nid, },
2640 };
2641 }
2642
Paul Mundtb71636e2007-07-15 23:38:15 -07002643 /*
2644 * Set interleaving policy for system init. Interleaving is only
2645 * enabled across suitably sized nodes (default is >= 16MB), or
2646 * fall back to the largest node if they're all smaller.
2647 */
2648 nodes_clear(interleave_nodes);
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002649 for_each_node_state(nid, N_MEMORY) {
Paul Mundtb71636e2007-07-15 23:38:15 -07002650 unsigned long total_pages = node_present_pages(nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002651
Paul Mundtb71636e2007-07-15 23:38:15 -07002652 /* Preserve the largest node */
2653 if (largest < total_pages) {
2654 largest = total_pages;
2655 prefer = nid;
2656 }
2657
2658 /* Interleave this node? */
2659 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2660 node_set(nid, interleave_nodes);
2661 }
2662
2663 /* All too small, use the largest */
2664 if (unlikely(nodes_empty(interleave_nodes)))
2665 node_set(prefer, interleave_nodes);
2666
David Rientjes028fec42008-04-28 02:12:25 -07002667 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002668 printk("numa_policy_init: interleaving failed\n");
Mel Gorman1a687c22012-11-22 11:16:36 +00002669
2670 check_numabalancing_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671}
2672
Christoph Lameter8bccd852005-10-29 18:16:59 -07002673/* Reset policy of current process to default */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674void numa_default_policy(void)
2675{
David Rientjes028fec42008-04-28 02:12:25 -07002676 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677}
Paul Jackson68860ec2005-10-30 15:02:36 -08002678
Paul Jackson42253992006-01-08 01:01:59 -08002679/*
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002680 * Parse and format mempolicy from/to strings
2681 */
2682
2683/*
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002684 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002685 */
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002686static const char * const policy_modes[] =
2687{
2688 [MPOL_DEFAULT] = "default",
2689 [MPOL_PREFERRED] = "prefer",
2690 [MPOL_BIND] = "bind",
2691 [MPOL_INTERLEAVE] = "interleave",
Lee Schermerhornd3a71032012-10-25 14:16:29 +02002692 [MPOL_LOCAL] = "local",
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002693};
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002694
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002695
2696#ifdef CONFIG_TMPFS
2697/**
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002698 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002699 * @str: string containing mempolicy to parse
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002700 * @mpol: pointer to struct mempolicy pointer, returned on success.
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002701 *
2702 * Format of input:
2703 * <mode>[=<flags>][:<nodelist>]
2704 *
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002705 * On success, returns 0, else 1
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002706 */
Hugh Dickinsa7a88b22013-01-02 02:04:23 -08002707int mpol_parse_str(char *str, struct mempolicy **mpol)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002708{
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002709 struct mempolicy *new = NULL;
Lee Schermerhornb4652e82010-05-24 14:32:03 -07002710 unsigned short mode;
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002711 unsigned short mode_flags;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002712 nodemask_t nodes;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002713 char *nodelist = strchr(str, ':');
2714 char *flags = strchr(str, '=');
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002715 int err = 1;
2716
2717 if (nodelist) {
2718 /* NUL-terminate mode or flags string */
2719 *nodelist++ = '\0';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002720 if (nodelist_parse(nodelist, nodes))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002721 goto out;
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002722 if (!nodes_subset(nodes, node_states[N_MEMORY]))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002723 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002724 } else
2725 nodes_clear(nodes);
2726
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002727 if (flags)
2728 *flags++ = '\0'; /* terminate mode string */
2729
Peter Zijlstra479e2802012-10-25 14:16:28 +02002730 for (mode = 0; mode < MPOL_MAX; mode++) {
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002731 if (!strcmp(str, policy_modes[mode])) {
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002732 break;
2733 }
2734 }
Mel Gormana7200942012-11-16 09:37:58 +00002735 if (mode >= MPOL_MAX)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002736 goto out;
2737
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002738 switch (mode) {
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002739 case MPOL_PREFERRED:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002740 /*
2741 * Insist on a nodelist of one node only
2742 */
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002743 if (nodelist) {
2744 char *rest = nodelist;
2745 while (isdigit(*rest))
2746 rest++;
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002747 if (*rest)
2748 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002749 }
2750 break;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002751 case MPOL_INTERLEAVE:
2752 /*
2753 * Default to online nodes with memory if no nodelist
2754 */
2755 if (!nodelist)
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002756 nodes = node_states[N_MEMORY];
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002757 break;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002758 case MPOL_LOCAL:
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002759 /*
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002760 * Don't allow a nodelist; mpol_new() checks flags
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002761 */
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002762 if (nodelist)
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002763 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002764 mode = MPOL_PREFERRED;
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002765 break;
Ravikiran G Thirumalai413b43d2010-03-23 13:35:28 -07002766 case MPOL_DEFAULT:
2767 /*
2768 * Insist on a empty nodelist
2769 */
2770 if (!nodelist)
2771 err = 0;
2772 goto out;
KOSAKI Motohirod69b2e62010-03-23 13:35:30 -07002773 case MPOL_BIND:
2774 /*
2775 * Insist on a nodelist
2776 */
2777 if (!nodelist)
2778 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002779 }
2780
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002781 mode_flags = 0;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002782 if (flags) {
2783 /*
2784 * Currently, we only support two mutually exclusive
2785 * mode flags.
2786 */
2787 if (!strcmp(flags, "static"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002788 mode_flags |= MPOL_F_STATIC_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002789 else if (!strcmp(flags, "relative"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002790 mode_flags |= MPOL_F_RELATIVE_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002791 else
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002792 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002793 }
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002794
2795 new = mpol_new(mode, mode_flags, &nodes);
2796 if (IS_ERR(new))
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002797 goto out;
2798
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002799 /*
2800 * Save nodes for mpol_to_str() to show the tmpfs mount options
2801 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2802 */
2803 if (mode != MPOL_PREFERRED)
2804 new->v.nodes = nodes;
2805 else if (nodelist)
2806 new->v.preferred_node = first_node(nodes);
2807 else
2808 new->flags |= MPOL_F_LOCAL;
2809
2810 /*
2811 * Save nodes for contextualization: this will be used to "clone"
2812 * the mempolicy in a specific context [cpuset] at a later time.
2813 */
2814 new->w.user_nodemask = nodes;
2815
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002816 err = 0;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002817
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002818out:
2819 /* Restore string for error message */
2820 if (nodelist)
2821 *--nodelist = ':';
2822 if (flags)
2823 *--flags = '=';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002824 if (!err)
2825 *mpol = new;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002826 return err;
2827}
2828#endif /* CONFIG_TMPFS */
2829
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002830/**
2831 * mpol_to_str - format a mempolicy structure for printing
2832 * @buffer: to contain formatted mempolicy string
2833 * @maxlen: length of @buffer
2834 * @pol: pointer to mempolicy to be formatted
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002835 *
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002836 * Convert a mempolicy into a string.
2837 * Returns the number of characters in buffer (if positive)
2838 * or an error (negative)
2839 */
Hugh Dickinsa7a88b22013-01-02 02:04:23 -08002840int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002841{
2842 char *p = buffer;
2843 int l;
2844 nodemask_t nodes;
Lee Schermerhornbea904d2008-04-28 02:13:18 -07002845 unsigned short mode;
David Rientjesf5b087b2008-04-28 02:12:27 -07002846 unsigned short flags = pol ? pol->flags : 0;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002847
Lee Schermerhorn22919902008-04-28 02:13:22 -07002848 /*
2849 * Sanity check: room for longest mode, flag and some nodes
2850 */
2851 VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
2852
Lee Schermerhornbea904d2008-04-28 02:13:18 -07002853 if (!pol || pol == &default_policy)
2854 mode = MPOL_DEFAULT;
2855 else
2856 mode = pol->mode;
2857
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002858 switch (mode) {
2859 case MPOL_DEFAULT:
2860 nodes_clear(nodes);
2861 break;
2862
2863 case MPOL_PREFERRED:
2864 nodes_clear(nodes);
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002865 if (flags & MPOL_F_LOCAL)
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002866 mode = MPOL_LOCAL;
Lee Schermerhorn53f25562008-04-28 02:13:20 -07002867 else
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002868 node_set(pol->v.preferred_node, nodes);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002869 break;
2870
2871 case MPOL_BIND:
Mel Gorman19770b32008-04-28 02:12:18 -07002872 /* Fall through */
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002873 case MPOL_INTERLEAVE:
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002874 nodes = pol->v.nodes;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002875 break;
2876
2877 default:
Dave Jones80de7c32012-09-06 12:01:00 -04002878 return -EINVAL;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002879 }
2880
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002881 l = strlen(policy_modes[mode]);
Lee Schermerhorn53f25562008-04-28 02:13:20 -07002882 if (buffer + maxlen < p + l + 1)
2883 return -ENOSPC;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002884
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002885 strcpy(p, policy_modes[mode]);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002886 p += l;
2887
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002888 if (flags & MPOL_MODE_FLAGS) {
David Rientjesf5b087b2008-04-28 02:12:27 -07002889 if (buffer + maxlen < p + 2)
2890 return -ENOSPC;
2891 *p++ = '=';
2892
Lee Schermerhorn22919902008-04-28 02:13:22 -07002893 /*
2894 * Currently, the only defined flags are mutually exclusive
2895 */
David Rientjesf5b087b2008-04-28 02:12:27 -07002896 if (flags & MPOL_F_STATIC_NODES)
Lee Schermerhorn22919902008-04-28 02:13:22 -07002897 p += snprintf(p, buffer + maxlen - p, "static");
2898 else if (flags & MPOL_F_RELATIVE_NODES)
2899 p += snprintf(p, buffer + maxlen - p, "relative");
David Rientjesf5b087b2008-04-28 02:12:27 -07002900 }
2901
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002902 if (!nodes_empty(nodes)) {
2903 if (buffer + maxlen < p + 2)
2904 return -ENOSPC;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002905 *p++ = ':';
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002906 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2907 }
2908 return p - buffer;
2909}