blob: 4626be621e747b1df304d62b927a7ec0f0ccc020 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
Christoph Lameter8bccd852005-10-29 18:16:59 -07005 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
Christoph Lameter8bccd852005-10-29 18:16:59 -070021 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
Christoph Lameter8bccd852005-10-29 18:16:59 -070024 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 * preferred Try a specific node first before normal fallback.
David Rientjes00ef2d22013-02-22 16:35:36 -080029 * As a special case NUMA_NO_NODE here means do the allocation
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
Christoph Lameter8bccd852005-10-29 18:16:59 -070033 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
Linus Torvalds1da177e2005-04-16 15:20:36 -070066*/
67
68#include <linux/mempolicy.h>
69#include <linux/mm.h>
70#include <linux/highmem.h>
71#include <linux/hugetlb.h>
72#include <linux/kernel.h>
73#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070074#include <linux/nodemask.h>
75#include <linux/cpuset.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/slab.h>
77#include <linux/string.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040078#include <linux/export.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070079#include <linux/nsproxy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070080#include <linux/interrupt.h>
81#include <linux/init.h>
82#include <linux/compat.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080083#include <linux/swap.h>
Christoph Lameter1a75a6c2006-01-08 01:01:02 -080084#include <linux/seq_file.h>
85#include <linux/proc_fs.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080086#include <linux/migrate.h>
Hugh Dickins62b61f62009-12-14 17:59:33 -080087#include <linux/ksm.h>
Christoph Lameter95a402c2006-06-23 02:03:53 -070088#include <linux/rmap.h>
David Quigley86c3a762006-06-23 02:04:02 -070089#include <linux/security.h>
Adrian Bunkdbcb0f12007-10-16 01:26:26 -070090#include <linux/syscalls.h>
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -070091#include <linux/ctype.h>
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -080092#include <linux/mm_inline.h>
Lee Schermerhornb24f53a2012-10-25 14:16:32 +020093#include <linux/mmu_notifier.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080094
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#include <asm/tlbflush.h>
96#include <asm/uaccess.h>
Michal Hocko778d3b02011-07-26 16:08:30 -070097#include <linux/random.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
Nick Piggin62695a82008-10-18 20:26:09 -070099#include "internal.h"
100
Christoph Lameter38e35862006-01-08 01:01:01 -0800101/* Internal flags */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800102#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
Christoph Lameter38e35862006-01-08 01:01:01 -0800103#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800104
Pekka Enbergfcc234f2006-03-22 00:08:13 -0800105static struct kmem_cache *policy_cache;
106static struct kmem_cache *sn_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108/* Highest zone. An specific allocation for a zone below that is not
109 policied. */
Christoph Lameter62672762007-02-10 01:43:07 -0800110enum zone_type policy_zone = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700112/*
113 * run-time system-wide default policy => local allocation
114 */
H Hartley Sweetene754d792011-10-31 17:09:23 -0700115static struct mempolicy default_policy = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 .refcnt = ATOMIC_INIT(1), /* never free it */
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700117 .mode = MPOL_PREFERRED,
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700118 .flags = MPOL_F_LOCAL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119};
120
Mel Gorman5606e382012-11-02 18:19:13 +0000121static struct mempolicy preferred_node_policy[MAX_NUMNODES];
122
123static struct mempolicy *get_task_policy(struct task_struct *p)
124{
125 struct mempolicy *pol = p->mempolicy;
Mel Gorman5606e382012-11-02 18:19:13 +0000126
127 if (!pol) {
Jianguo Wu1da6f0e2013-09-11 14:21:25 -0700128 int node = numa_node_id();
Mel Gorman5606e382012-11-02 18:19:13 +0000129
Jianguo Wu1da6f0e2013-09-11 14:21:25 -0700130 if (node != NUMA_NO_NODE) {
131 pol = &preferred_node_policy[node];
132 /*
133 * preferred_node_policy is not initialised early in
134 * boot
135 */
136 if (!pol->mode)
137 pol = NULL;
138 }
Mel Gorman5606e382012-11-02 18:19:13 +0000139 }
140
141 return pol;
142}
143
David Rientjes37012942008-04-28 02:12:33 -0700144static const struct mempolicy_operations {
145 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
Miao Xie708c1bb2010-05-24 14:32:07 -0700146 /*
147 * If read-side task has no lock to protect task->mempolicy, write-side
148 * task will rebind the task->mempolicy by two step. The first step is
149 * setting all the newly nodes, and the second step is cleaning all the
150 * disallowed nodes. In this way, we can avoid finding no node to alloc
151 * page.
152 * If we have a lock to protect task->mempolicy in read-side, we do
153 * rebind directly.
154 *
155 * step:
156 * MPOL_REBIND_ONCE - do rebind work at once
157 * MPOL_REBIND_STEP1 - set all the newly nodes
158 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
159 */
160 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
161 enum mpol_rebind_step step);
David Rientjes37012942008-04-28 02:12:33 -0700162} mpol_ops[MPOL_MAX];
163
Mel Gorman19770b32008-04-28 02:12:18 -0700164/* Check that the nodemask contains at least one populated zone */
David Rientjes37012942008-04-28 02:12:33 -0700165static int is_valid_nodemask(const nodemask_t *nodemask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166{
Lai Jiangshand3eb1572013-02-22 16:33:22 -0800167 return nodes_intersects(*nodemask, node_states[N_MEMORY]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168}
169
David Rientjesf5b087b2008-04-28 02:12:27 -0700170static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
171{
Bob Liu6d556292010-05-24 14:31:59 -0700172 return pol->flags & MPOL_MODE_FLAGS;
David Rientjes4c50bc02008-04-28 02:12:30 -0700173}
174
175static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
176 const nodemask_t *rel)
177{
178 nodemask_t tmp;
179 nodes_fold(tmp, *orig, nodes_weight(*rel));
180 nodes_onto(*ret, tmp, *rel);
David Rientjesf5b087b2008-04-28 02:12:27 -0700181}
182
David Rientjes37012942008-04-28 02:12:33 -0700183static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
184{
185 if (nodes_empty(*nodes))
186 return -EINVAL;
187 pol->v.nodes = *nodes;
188 return 0;
189}
190
191static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
192{
193 if (!nodes)
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700194 pol->flags |= MPOL_F_LOCAL; /* local allocation */
David Rientjes37012942008-04-28 02:12:33 -0700195 else if (nodes_empty(*nodes))
196 return -EINVAL; /* no allowed nodes */
197 else
198 pol->v.preferred_node = first_node(*nodes);
199 return 0;
200}
201
202static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
203{
204 if (!is_valid_nodemask(nodes))
205 return -EINVAL;
206 pol->v.nodes = *nodes;
207 return 0;
208}
209
Miao Xie58568d22009-06-16 15:31:49 -0700210/*
211 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
212 * any, for the new policy. mpol_new() has already validated the nodes
213 * parameter with respect to the policy mode and flags. But, we need to
214 * handle an empty nodemask with MPOL_PREFERRED here.
215 *
216 * Must be called holding task's alloc_lock to protect task's mems_allowed
217 * and mempolicy. May also be called holding the mmap_semaphore for write.
218 */
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700219static int mpol_set_nodemask(struct mempolicy *pol,
220 const nodemask_t *nodes, struct nodemask_scratch *nsc)
Miao Xie58568d22009-06-16 15:31:49 -0700221{
Miao Xie58568d22009-06-16 15:31:49 -0700222 int ret;
223
224 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
225 if (pol == NULL)
226 return 0;
Lai Jiangshan01f13bd2012-12-12 13:51:33 -0800227 /* Check N_MEMORY */
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700228 nodes_and(nsc->mask1,
Lai Jiangshan01f13bd2012-12-12 13:51:33 -0800229 cpuset_current_mems_allowed, node_states[N_MEMORY]);
Miao Xie58568d22009-06-16 15:31:49 -0700230
231 VM_BUG_ON(!nodes);
232 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
233 nodes = NULL; /* explicit local allocation */
234 else {
235 if (pol->flags & MPOL_F_RELATIVE_NODES)
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700236 mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
Miao Xie58568d22009-06-16 15:31:49 -0700237 else
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700238 nodes_and(nsc->mask2, *nodes, nsc->mask1);
239
Miao Xie58568d22009-06-16 15:31:49 -0700240 if (mpol_store_user_nodemask(pol))
241 pol->w.user_nodemask = *nodes;
242 else
243 pol->w.cpuset_mems_allowed =
244 cpuset_current_mems_allowed;
245 }
246
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700247 if (nodes)
248 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
249 else
250 ret = mpol_ops[pol->mode].create(pol, NULL);
Miao Xie58568d22009-06-16 15:31:49 -0700251 return ret;
252}
253
254/*
255 * This function just creates a new policy, does some check and simple
256 * initialization. You must invoke mpol_set_nodemask() to set nodes.
257 */
David Rientjes028fec42008-04-28 02:12:25 -0700258static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
259 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260{
261 struct mempolicy *policy;
262
David Rientjes028fec42008-04-28 02:12:25 -0700263 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
David Rientjes00ef2d22013-02-22 16:35:36 -0800264 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
Paul Mundt140d5a42007-07-15 23:38:16 -0700265
David Rientjes3e1f0642008-04-28 02:12:34 -0700266 if (mode == MPOL_DEFAULT) {
267 if (nodes && !nodes_empty(*nodes))
David Rientjes37012942008-04-28 02:12:33 -0700268 return ERR_PTR(-EINVAL);
Lee Schermerhornd3a71032012-10-25 14:16:29 +0200269 return NULL;
David Rientjes37012942008-04-28 02:12:33 -0700270 }
David Rientjes3e1f0642008-04-28 02:12:34 -0700271 VM_BUG_ON(!nodes);
272
273 /*
274 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
275 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
276 * All other modes require a valid pointer to a non-empty nodemask.
277 */
278 if (mode == MPOL_PREFERRED) {
279 if (nodes_empty(*nodes)) {
280 if (((flags & MPOL_F_STATIC_NODES) ||
281 (flags & MPOL_F_RELATIVE_NODES)))
282 return ERR_PTR(-EINVAL);
David Rientjes3e1f0642008-04-28 02:12:34 -0700283 }
Peter Zijlstra479e2802012-10-25 14:16:28 +0200284 } else if (mode == MPOL_LOCAL) {
285 if (!nodes_empty(*nodes))
286 return ERR_PTR(-EINVAL);
287 mode = MPOL_PREFERRED;
David Rientjes3e1f0642008-04-28 02:12:34 -0700288 } else if (nodes_empty(*nodes))
289 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
291 if (!policy)
292 return ERR_PTR(-ENOMEM);
293 atomic_set(&policy->refcnt, 1);
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700294 policy->mode = mode;
David Rientjes3e1f0642008-04-28 02:12:34 -0700295 policy->flags = flags;
David Rientjesf5b087b2008-04-28 02:12:27 -0700296
David Rientjes37012942008-04-28 02:12:33 -0700297 return policy;
298}
299
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700300/* Slow path of a mpol destructor. */
301void __mpol_put(struct mempolicy *p)
302{
303 if (!atomic_dec_and_test(&p->refcnt))
304 return;
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700305 kmem_cache_free(policy_cache, p);
306}
307
Miao Xie708c1bb2010-05-24 14:32:07 -0700308static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
309 enum mpol_rebind_step step)
David Rientjes37012942008-04-28 02:12:33 -0700310{
311}
312
Miao Xie708c1bb2010-05-24 14:32:07 -0700313/*
314 * step:
315 * MPOL_REBIND_ONCE - do rebind work at once
316 * MPOL_REBIND_STEP1 - set all the newly nodes
317 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
318 */
319static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
320 enum mpol_rebind_step step)
David Rientjes37012942008-04-28 02:12:33 -0700321{
322 nodemask_t tmp;
323
324 if (pol->flags & MPOL_F_STATIC_NODES)
325 nodes_and(tmp, pol->w.user_nodemask, *nodes);
326 else if (pol->flags & MPOL_F_RELATIVE_NODES)
327 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
328 else {
Miao Xie708c1bb2010-05-24 14:32:07 -0700329 /*
330 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
331 * result
332 */
333 if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
334 nodes_remap(tmp, pol->v.nodes,
335 pol->w.cpuset_mems_allowed, *nodes);
336 pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
337 } else if (step == MPOL_REBIND_STEP2) {
338 tmp = pol->w.cpuset_mems_allowed;
339 pol->w.cpuset_mems_allowed = *nodes;
340 } else
341 BUG();
David Rientjes37012942008-04-28 02:12:33 -0700342 }
343
Miao Xie708c1bb2010-05-24 14:32:07 -0700344 if (nodes_empty(tmp))
345 tmp = *nodes;
346
347 if (step == MPOL_REBIND_STEP1)
348 nodes_or(pol->v.nodes, pol->v.nodes, tmp);
349 else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
350 pol->v.nodes = tmp;
351 else
352 BUG();
353
David Rientjes37012942008-04-28 02:12:33 -0700354 if (!node_isset(current->il_next, tmp)) {
355 current->il_next = next_node(current->il_next, tmp);
356 if (current->il_next >= MAX_NUMNODES)
357 current->il_next = first_node(tmp);
358 if (current->il_next >= MAX_NUMNODES)
359 current->il_next = numa_node_id();
360 }
361}
362
363static void mpol_rebind_preferred(struct mempolicy *pol,
Miao Xie708c1bb2010-05-24 14:32:07 -0700364 const nodemask_t *nodes,
365 enum mpol_rebind_step step)
David Rientjes37012942008-04-28 02:12:33 -0700366{
367 nodemask_t tmp;
368
David Rientjes37012942008-04-28 02:12:33 -0700369 if (pol->flags & MPOL_F_STATIC_NODES) {
370 int node = first_node(pol->w.user_nodemask);
371
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700372 if (node_isset(node, *nodes)) {
David Rientjes37012942008-04-28 02:12:33 -0700373 pol->v.preferred_node = node;
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700374 pol->flags &= ~MPOL_F_LOCAL;
375 } else
376 pol->flags |= MPOL_F_LOCAL;
David Rientjes37012942008-04-28 02:12:33 -0700377 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
378 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
379 pol->v.preferred_node = first_node(tmp);
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700380 } else if (!(pol->flags & MPOL_F_LOCAL)) {
David Rientjes37012942008-04-28 02:12:33 -0700381 pol->v.preferred_node = node_remap(pol->v.preferred_node,
382 pol->w.cpuset_mems_allowed,
383 *nodes);
384 pol->w.cpuset_mems_allowed = *nodes;
385 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386}
387
Miao Xie708c1bb2010-05-24 14:32:07 -0700388/*
389 * mpol_rebind_policy - Migrate a policy to a different set of nodes
390 *
391 * If read-side task has no lock to protect task->mempolicy, write-side
392 * task will rebind the task->mempolicy by two step. The first step is
393 * setting all the newly nodes, and the second step is cleaning all the
394 * disallowed nodes. In this way, we can avoid finding no node to alloc
395 * page.
396 * If we have a lock to protect task->mempolicy in read-side, we do
397 * rebind directly.
398 *
399 * step:
400 * MPOL_REBIND_ONCE - do rebind work at once
401 * MPOL_REBIND_STEP1 - set all the newly nodes
402 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
403 */
404static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
405 enum mpol_rebind_step step)
David Rientjes1d0d2682008-04-28 02:12:32 -0700406{
David Rientjes1d0d2682008-04-28 02:12:32 -0700407 if (!pol)
408 return;
Wang Sheng-Hui89c522c2012-05-29 15:06:16 -0700409 if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
David Rientjes1d0d2682008-04-28 02:12:32 -0700410 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
411 return;
Miao Xie708c1bb2010-05-24 14:32:07 -0700412
413 if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
414 return;
415
416 if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
417 BUG();
418
419 if (step == MPOL_REBIND_STEP1)
420 pol->flags |= MPOL_F_REBINDING;
421 else if (step == MPOL_REBIND_STEP2)
422 pol->flags &= ~MPOL_F_REBINDING;
423 else if (step >= MPOL_REBIND_NSTEP)
424 BUG();
425
426 mpol_ops[pol->mode].rebind(pol, newmask, step);
David Rientjes1d0d2682008-04-28 02:12:32 -0700427}
428
429/*
430 * Wrapper for mpol_rebind_policy() that just requires task
431 * pointer, and updates task mempolicy.
Miao Xie58568d22009-06-16 15:31:49 -0700432 *
433 * Called with task's alloc_lock held.
David Rientjes1d0d2682008-04-28 02:12:32 -0700434 */
435
Miao Xie708c1bb2010-05-24 14:32:07 -0700436void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
437 enum mpol_rebind_step step)
David Rientjes1d0d2682008-04-28 02:12:32 -0700438{
Miao Xie708c1bb2010-05-24 14:32:07 -0700439 mpol_rebind_policy(tsk->mempolicy, new, step);
David Rientjes1d0d2682008-04-28 02:12:32 -0700440}
441
442/*
443 * Rebind each vma in mm to new nodemask.
444 *
445 * Call holding a reference to mm. Takes mm->mmap_sem during call.
446 */
447
448void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
449{
450 struct vm_area_struct *vma;
451
452 down_write(&mm->mmap_sem);
453 for (vma = mm->mmap; vma; vma = vma->vm_next)
Miao Xie708c1bb2010-05-24 14:32:07 -0700454 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
David Rientjes1d0d2682008-04-28 02:12:32 -0700455 up_write(&mm->mmap_sem);
456}
457
David Rientjes37012942008-04-28 02:12:33 -0700458static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
459 [MPOL_DEFAULT] = {
460 .rebind = mpol_rebind_default,
461 },
462 [MPOL_INTERLEAVE] = {
463 .create = mpol_new_interleave,
464 .rebind = mpol_rebind_nodemask,
465 },
466 [MPOL_PREFERRED] = {
467 .create = mpol_new_preferred,
468 .rebind = mpol_rebind_preferred,
469 },
470 [MPOL_BIND] = {
471 .create = mpol_new_bind,
472 .rebind = mpol_rebind_nodemask,
473 },
474};
475
Christoph Lameterfc301282006-01-18 17:42:29 -0800476static void migrate_page_add(struct page *page, struct list_head *pagelist,
477 unsigned long flags);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -0800478
Christoph Lameter38e35862006-01-08 01:01:01 -0800479/* Scan through pages checking if pages follow certain conditions. */
Nick Pigginb5810032005-10-29 18:16:12 -0700480static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800481 unsigned long addr, unsigned long end,
482 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800483 void *private)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484{
Hugh Dickins91612e02005-06-21 17:15:07 -0700485 pte_t *orig_pte;
486 pte_t *pte;
Hugh Dickins705e87c2005-10-29 18:16:27 -0700487 spinlock_t *ptl;
Hugh Dickins941150a2005-06-21 17:15:06 -0700488
Hugh Dickins705e87c2005-10-29 18:16:27 -0700489 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
Hugh Dickins91612e02005-06-21 17:15:07 -0700490 do {
Linus Torvalds6aab3412005-11-28 14:34:23 -0800491 struct page *page;
Andy Whitcroft25ba77c2006-12-06 20:33:03 -0800492 int nid;
Hugh Dickins91612e02005-06-21 17:15:07 -0700493
494 if (!pte_present(*pte))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800496 page = vm_normal_page(vma, addr, *pte);
497 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 continue;
Nick Piggin053837f2006-01-18 17:42:27 -0800499 /*
Hugh Dickins62b61f62009-12-14 17:59:33 -0800500 * vm_normal_page() filters out zero pages, but there might
501 * still be PageReserved pages to skip, perhaps in a VDSO.
Nick Piggin053837f2006-01-18 17:42:27 -0800502 */
Hugh Dickinsb79bc0a2013-02-22 16:35:13 -0800503 if (PageReserved(page))
Christoph Lameterf4598c82006-01-12 01:05:20 -0800504 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800505 nid = page_to_nid(page);
Christoph Lameter38e35862006-01-08 01:01:01 -0800506 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
507 continue;
508
Stephen Wilsonb1f72d12011-05-24 17:12:43 -0700509 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
Christoph Lameterfc301282006-01-18 17:42:29 -0800510 migrate_page_add(page, private, flags);
Christoph Lameter38e35862006-01-08 01:01:01 -0800511 else
512 break;
Hugh Dickins91612e02005-06-21 17:15:07 -0700513 } while (pte++, addr += PAGE_SIZE, addr != end);
Hugh Dickins705e87c2005-10-29 18:16:27 -0700514 pte_unmap_unlock(orig_pte, ptl);
Hugh Dickins91612e02005-06-21 17:15:07 -0700515 return addr != end;
516}
517
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700518static void check_hugetlb_pmd_range(struct vm_area_struct *vma, pmd_t *pmd,
519 const nodemask_t *nodes, unsigned long flags,
520 void *private)
521{
522#ifdef CONFIG_HUGETLB_PAGE
523 int nid;
524 struct page *page;
525
526 spin_lock(&vma->vm_mm->page_table_lock);
527 page = pte_page(huge_ptep_get((pte_t *)pmd));
528 nid = page_to_nid(page);
529 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
530 goto unlock;
531 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
532 if (flags & (MPOL_MF_MOVE_ALL) ||
533 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
534 isolate_huge_page(page, private);
535unlock:
536 spin_unlock(&vma->vm_mm->page_table_lock);
537#else
538 BUG();
539#endif
540}
541
Nick Pigginb5810032005-10-29 18:16:12 -0700542static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800543 unsigned long addr, unsigned long end,
544 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800545 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700546{
547 pmd_t *pmd;
548 unsigned long next;
549
550 pmd = pmd_offset(pud, addr);
551 do {
552 next = pmd_addr_end(addr, end);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700553 if (!pmd_present(*pmd))
554 continue;
555 if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) {
556 check_hugetlb_pmd_range(vma, pmd, nodes,
557 flags, private);
558 continue;
559 }
Kirill A. Shutemove1803772012-12-12 13:50:59 -0800560 split_huge_page_pmd(vma, addr, pmd);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -0700561 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
Hugh Dickins91612e02005-06-21 17:15:07 -0700562 continue;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800563 if (check_pte_range(vma, pmd, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800564 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700565 return -EIO;
566 } while (pmd++, addr = next, addr != end);
567 return 0;
568}
569
Nick Pigginb5810032005-10-29 18:16:12 -0700570static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800571 unsigned long addr, unsigned long end,
572 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800573 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700574{
575 pud_t *pud;
576 unsigned long next;
577
578 pud = pud_offset(pgd, addr);
579 do {
580 next = pud_addr_end(addr, end);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700581 if (pud_huge(*pud) && is_vm_hugetlb_page(vma))
582 continue;
Hugh Dickins91612e02005-06-21 17:15:07 -0700583 if (pud_none_or_clear_bad(pud))
584 continue;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800585 if (check_pmd_range(vma, pud, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800586 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700587 return -EIO;
588 } while (pud++, addr = next, addr != end);
589 return 0;
590}
591
Nick Pigginb5810032005-10-29 18:16:12 -0700592static inline int check_pgd_range(struct vm_area_struct *vma,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800593 unsigned long addr, unsigned long end,
594 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800595 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700596{
597 pgd_t *pgd;
598 unsigned long next;
599
Nick Pigginb5810032005-10-29 18:16:12 -0700600 pgd = pgd_offset(vma->vm_mm, addr);
Hugh Dickins91612e02005-06-21 17:15:07 -0700601 do {
602 next = pgd_addr_end(addr, end);
603 if (pgd_none_or_clear_bad(pgd))
604 continue;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800605 if (check_pud_range(vma, pgd, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800606 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700607 return -EIO;
608 } while (pgd++, addr = next, addr != end);
609 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610}
611
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200612#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
613/*
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200614 * This is used to mark a range of virtual addresses to be inaccessible.
615 * These are later cleared by a NUMA hinting fault. Depending on these
616 * faults, pages may be migrated for better NUMA placement.
617 *
618 * This is assuming that NUMA faults are handled using PROT_NONE. If
619 * an architecture makes a different choice, it will need further
620 * changes to the core.
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200621 */
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200622unsigned long change_prot_numa(struct vm_area_struct *vma,
623 unsigned long addr, unsigned long end)
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200624{
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200625 int nr_updated;
626 BUILD_BUG_ON(_PAGE_NUMA != _PAGE_PROTNONE);
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200627
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200628 nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1);
Mel Gorman03c5a6e2012-11-02 14:52:48 +0000629 if (nr_updated)
630 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200631
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200632 return nr_updated;
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200633}
634#else
635static unsigned long change_prot_numa(struct vm_area_struct *vma,
636 unsigned long addr, unsigned long end)
637{
638 return 0;
639}
640#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
641
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800642/*
643 * Check if all pages in a range are on a set of nodes.
644 * If pagelist != NULL then isolate pages from the LRU and
645 * put them on the pagelist.
646 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647static struct vm_area_struct *
648check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
Christoph Lameter38e35862006-01-08 01:01:01 -0800649 const nodemask_t *nodes, unsigned long flags, void *private)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650{
651 int err;
652 struct vm_area_struct *first, *vma, *prev;
653
Nick Piggin053837f2006-01-18 17:42:27 -0800654
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 first = find_vma(mm, start);
656 if (!first)
657 return ERR_PTR(-EFAULT);
658 prev = NULL;
659 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200660 unsigned long endvma = vma->vm_end;
661
662 if (endvma > end)
663 endvma = end;
664 if (vma->vm_start > start)
665 start = vma->vm_start;
666
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800667 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
668 if (!vma->vm_next && vma->vm_end < end)
669 return ERR_PTR(-EFAULT);
670 if (prev && prev->vm_end < vma->vm_start)
671 return ERR_PTR(-EFAULT);
672 }
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800673
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200674 if (flags & MPOL_MF_LAZY) {
675 change_prot_numa(vma, start, endvma);
676 goto next;
677 }
678
679 if ((flags & MPOL_MF_STRICT) ||
680 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
681 vma_migratable(vma))) {
682
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800683 err = check_pgd_range(vma, start, endvma, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800684 flags, private);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 if (err) {
686 first = ERR_PTR(err);
687 break;
688 }
689 }
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200690next:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 prev = vma;
692 }
693 return first;
694}
695
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700696/*
697 * Apply policy to a single VMA
698 * This must be called with the mmap_sem held for writing.
699 */
700static int vma_replace_policy(struct vm_area_struct *vma,
701 struct mempolicy *pol)
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700702{
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700703 int err;
704 struct mempolicy *old;
705 struct mempolicy *new;
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700706
707 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
708 vma->vm_start, vma->vm_end, vma->vm_pgoff,
709 vma->vm_ops, vma->vm_file,
710 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
711
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700712 new = mpol_dup(pol);
713 if (IS_ERR(new))
714 return PTR_ERR(new);
715
716 if (vma->vm_ops && vma->vm_ops->set_policy) {
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700717 err = vma->vm_ops->set_policy(vma, new);
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700718 if (err)
719 goto err_out;
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700720 }
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700721
722 old = vma->vm_policy;
723 vma->vm_policy = new; /* protected by mmap_sem */
724 mpol_put(old);
725
726 return 0;
727 err_out:
728 mpol_put(new);
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700729 return err;
730}
731
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732/* Step 2: apply policy to a range and do splits. */
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800733static int mbind_range(struct mm_struct *mm, unsigned long start,
734 unsigned long end, struct mempolicy *new_pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735{
736 struct vm_area_struct *next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800737 struct vm_area_struct *prev;
738 struct vm_area_struct *vma;
739 int err = 0;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800740 pgoff_t pgoff;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800741 unsigned long vmstart;
742 unsigned long vmend;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743
Linus Torvalds097d5912012-03-06 18:23:36 -0800744 vma = find_vma(mm, start);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800745 if (!vma || vma->vm_start > start)
746 return -EFAULT;
747
Linus Torvalds097d5912012-03-06 18:23:36 -0800748 prev = vma->vm_prev;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800749 if (start > vma->vm_start)
750 prev = vma;
751
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800752 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 next = vma->vm_next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800754 vmstart = max(start, vma->vm_start);
755 vmend = min(end, vma->vm_end);
756
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800757 if (mpol_equal(vma_policy(vma), new_pol))
758 continue;
759
760 pgoff = vma->vm_pgoff +
761 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800762 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800763 vma->anon_vma, vma->vm_file, pgoff,
Caspar Zhang8aacc9f2011-09-14 16:20:58 -0700764 new_pol);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800765 if (prev) {
766 vma = prev;
767 next = vma->vm_next;
Oleg Nesterov3964acd2013-07-31 13:53:28 -0700768 if (mpol_equal(vma_policy(vma), new_pol))
769 continue;
770 /* vma_merge() joined vma && vma->next, case 8 */
771 goto replace;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800772 }
773 if (vma->vm_start != vmstart) {
774 err = split_vma(vma->vm_mm, vma, vmstart, 1);
775 if (err)
776 goto out;
777 }
778 if (vma->vm_end != vmend) {
779 err = split_vma(vma->vm_mm, vma, vmend, 0);
780 if (err)
781 goto out;
782 }
Oleg Nesterov3964acd2013-07-31 13:53:28 -0700783 replace:
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700784 err = vma_replace_policy(vma, new_pol);
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700785 if (err)
786 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 }
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800788
789 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 return err;
791}
792
Paul Jacksonc61afb12006-03-24 03:16:08 -0800793/*
794 * Update task->flags PF_MEMPOLICY bit: set iff non-default
795 * mempolicy. Allows more rapid checking of this (combined perhaps
796 * with other PF_* flag bits) on memory allocation hot code paths.
797 *
798 * If called from outside this file, the task 'p' should -only- be
799 * a newly forked child not yet visible on the task list, because
800 * manipulating the task flags of a visible task is not safe.
801 *
802 * The above limitation is why this routine has the funny name
803 * mpol_fix_fork_child_flag().
804 *
805 * It is also safe to call this with a task pointer of current,
806 * which the static wrapper mpol_set_task_struct_flag() does,
807 * for use within this file.
808 */
809
810void mpol_fix_fork_child_flag(struct task_struct *p)
811{
812 if (p->mempolicy)
813 p->flags |= PF_MEMPOLICY;
814 else
815 p->flags &= ~PF_MEMPOLICY;
816}
817
818static void mpol_set_task_struct_flag(void)
819{
820 mpol_fix_fork_child_flag(current);
821}
822
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823/* Set the process memory policy */
David Rientjes028fec42008-04-28 02:12:25 -0700824static long do_set_mempolicy(unsigned short mode, unsigned short flags,
825 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826{
Miao Xie58568d22009-06-16 15:31:49 -0700827 struct mempolicy *new, *old;
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700828 struct mm_struct *mm = current->mm;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700829 NODEMASK_SCRATCH(scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700830 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700832 if (!scratch)
833 return -ENOMEM;
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700834
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700835 new = mpol_new(mode, flags, nodes);
836 if (IS_ERR(new)) {
837 ret = PTR_ERR(new);
838 goto out;
839 }
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700840 /*
841 * prevent changing our mempolicy while show_numa_maps()
842 * is using it.
843 * Note: do_set_mempolicy() can be called at init time
844 * with no 'mm'.
845 */
846 if (mm)
847 down_write(&mm->mmap_sem);
Miao Xie58568d22009-06-16 15:31:49 -0700848 task_lock(current);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700849 ret = mpol_set_nodemask(new, nodes, scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700850 if (ret) {
851 task_unlock(current);
852 if (mm)
853 up_write(&mm->mmap_sem);
854 mpol_put(new);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700855 goto out;
Miao Xie58568d22009-06-16 15:31:49 -0700856 }
857 old = current->mempolicy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 current->mempolicy = new;
Paul Jacksonc61afb12006-03-24 03:16:08 -0800859 mpol_set_task_struct_flag();
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700860 if (new && new->mode == MPOL_INTERLEAVE &&
David Rientjesf5b087b2008-04-28 02:12:27 -0700861 nodes_weight(new->v.nodes))
Andi Kleendfcd3c02005-10-29 18:15:48 -0700862 current->il_next = first_node(new->v.nodes);
Miao Xie58568d22009-06-16 15:31:49 -0700863 task_unlock(current);
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700864 if (mm)
865 up_write(&mm->mmap_sem);
866
Miao Xie58568d22009-06-16 15:31:49 -0700867 mpol_put(old);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700868 ret = 0;
869out:
870 NODEMASK_SCRATCH_FREE(scratch);
871 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872}
873
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700874/*
875 * Return nodemask for policy for get_mempolicy() query
Miao Xie58568d22009-06-16 15:31:49 -0700876 *
877 * Called with task's alloc_lock held
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700878 */
879static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880{
Andi Kleendfcd3c02005-10-29 18:15:48 -0700881 nodes_clear(*nodes);
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700882 if (p == &default_policy)
883 return;
884
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700885 switch (p->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -0700886 case MPOL_BIND:
887 /* Fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 case MPOL_INTERLEAVE:
Andi Kleendfcd3c02005-10-29 18:15:48 -0700889 *nodes = p->v.nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 break;
891 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700892 if (!(p->flags & MPOL_F_LOCAL))
Andi Kleendfcd3c02005-10-29 18:15:48 -0700893 node_set(p->v.preferred_node, *nodes);
Lee Schermerhorn53f25562008-04-28 02:13:20 -0700894 /* else return empty node mask for local allocation */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 break;
896 default:
897 BUG();
898 }
899}
900
901static int lookup_node(struct mm_struct *mm, unsigned long addr)
902{
903 struct page *p;
904 int err;
905
906 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
907 if (err >= 0) {
908 err = page_to_nid(p);
909 put_page(p);
910 }
911 return err;
912}
913
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914/* Retrieve NUMA policy */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700915static long do_get_mempolicy(int *policy, nodemask_t *nmask,
916 unsigned long addr, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917{
Christoph Lameter8bccd852005-10-29 18:16:59 -0700918 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 struct mm_struct *mm = current->mm;
920 struct vm_area_struct *vma = NULL;
921 struct mempolicy *pol = current->mempolicy;
922
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700923 if (flags &
924 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 return -EINVAL;
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700926
927 if (flags & MPOL_F_MEMS_ALLOWED) {
928 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
929 return -EINVAL;
930 *policy = 0; /* just so it's initialized */
Miao Xie58568d22009-06-16 15:31:49 -0700931 task_lock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700932 *nmask = cpuset_current_mems_allowed;
Miao Xie58568d22009-06-16 15:31:49 -0700933 task_unlock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700934 return 0;
935 }
936
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 if (flags & MPOL_F_ADDR) {
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700938 /*
939 * Do NOT fall back to task policy if the
940 * vma/shared policy at addr is NULL. We
941 * want to return MPOL_DEFAULT in this case.
942 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 down_read(&mm->mmap_sem);
944 vma = find_vma_intersection(mm, addr, addr+1);
945 if (!vma) {
946 up_read(&mm->mmap_sem);
947 return -EFAULT;
948 }
949 if (vma->vm_ops && vma->vm_ops->get_policy)
950 pol = vma->vm_ops->get_policy(vma, addr);
951 else
952 pol = vma->vm_policy;
953 } else if (addr)
954 return -EINVAL;
955
956 if (!pol)
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700957 pol = &default_policy; /* indicates default behavior */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958
959 if (flags & MPOL_F_NODE) {
960 if (flags & MPOL_F_ADDR) {
961 err = lookup_node(mm, addr);
962 if (err < 0)
963 goto out;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700964 *policy = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 } else if (pol == current->mempolicy &&
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700966 pol->mode == MPOL_INTERLEAVE) {
Christoph Lameter8bccd852005-10-29 18:16:59 -0700967 *policy = current->il_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 } else {
969 err = -EINVAL;
970 goto out;
971 }
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700972 } else {
973 *policy = pol == &default_policy ? MPOL_DEFAULT :
974 pol->mode;
David Rientjesd79df632008-07-04 12:24:13 -0700975 /*
976 * Internal mempolicy flags must be masked off before exposing
977 * the policy to userspace.
978 */
979 *policy |= (pol->flags & MPOL_MODE_FLAGS);
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700980 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981
982 if (vma) {
983 up_read(&current->mm->mmap_sem);
984 vma = NULL;
985 }
986
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 err = 0;
Miao Xie58568d22009-06-16 15:31:49 -0700988 if (nmask) {
Lee Schermerhornc6b6ef82010-03-23 13:35:41 -0700989 if (mpol_store_user_nodemask(pol)) {
990 *nmask = pol->w.user_nodemask;
991 } else {
992 task_lock(current);
993 get_policy_nodemask(pol, nmask);
994 task_unlock(current);
995 }
Miao Xie58568d22009-06-16 15:31:49 -0700996 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997
998 out:
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700999 mpol_cond_put(pol);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 if (vma)
1001 up_read(&current->mm->mmap_sem);
1002 return err;
1003}
1004
Christoph Lameterb20a3502006-03-22 00:09:12 -08001005#ifdef CONFIG_MIGRATION
Christoph Lameter8bccd852005-10-29 18:16:59 -07001006/*
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001007 * page migration
1008 */
Christoph Lameterfc301282006-01-18 17:42:29 -08001009static void migrate_page_add(struct page *page, struct list_head *pagelist,
1010 unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001011{
1012 /*
Christoph Lameterfc301282006-01-18 17:42:29 -08001013 * Avoid migrating a page that is shared with others.
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001014 */
Nick Piggin62695a82008-10-18 20:26:09 -07001015 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
1016 if (!isolate_lru_page(page)) {
1017 list_add_tail(&page->lru, pagelist);
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -08001018 inc_zone_page_state(page, NR_ISOLATED_ANON +
1019 page_is_file_cache(page));
Nick Piggin62695a82008-10-18 20:26:09 -07001020 }
1021 }
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001022}
1023
Christoph Lameter742755a2006-06-23 02:03:55 -07001024static struct page *new_node_page(struct page *page, unsigned long node, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001025{
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -07001026 if (PageHuge(page))
1027 return alloc_huge_page_node(page_hstate(compound_head(page)),
1028 node);
1029 else
1030 return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001031}
1032
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001033/*
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001034 * Migrate pages from one node to a target node.
1035 * Returns error or the number of pages not migrated.
1036 */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001037static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1038 int flags)
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001039{
1040 nodemask_t nmask;
1041 LIST_HEAD(pagelist);
1042 int err = 0;
1043
1044 nodes_clear(nmask);
1045 node_set(source, nmask);
1046
Minchan Kim08270802012-10-08 16:33:38 -07001047 /*
1048 * This does not "check" the range but isolates all pages that
1049 * need migration. Between passing in the full user address
1050 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1051 */
1052 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1053 check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001054 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1055
Minchan Kimcf608ac2010-10-26 14:21:29 -07001056 if (!list_empty(&pagelist)) {
Mel Gorman7f0f2492011-01-13 15:45:58 -08001057 err = migrate_pages(&pagelist, new_node_page, dest,
Hugh Dickins9c620e22013-02-22 16:35:14 -08001058 MIGRATE_SYNC, MR_SYSCALL);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001059 if (err)
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -07001060 putback_movable_pages(&pagelist);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001061 }
Christoph Lameter95a402c2006-06-23 02:03:53 -07001062
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001063 return err;
1064}
1065
1066/*
1067 * Move pages between the two nodesets so as to preserve the physical
1068 * layout as much as possible.
Christoph Lameter39743882006-01-08 01:00:51 -08001069 *
1070 * Returns the number of page that could not be moved.
1071 */
Andrew Morton0ce72d42012-05-29 15:06:24 -07001072int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1073 const nodemask_t *to, int flags)
Christoph Lameter39743882006-01-08 01:00:51 -08001074{
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001075 int busy = 0;
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001076 int err;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001077 nodemask_t tmp;
Christoph Lameter39743882006-01-08 01:00:51 -08001078
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001079 err = migrate_prep();
1080 if (err)
1081 return err;
1082
Lee Schermerhorn53f25562008-04-28 02:13:20 -07001083 down_read(&mm->mmap_sem);
Christoph Lameter39743882006-01-08 01:00:51 -08001084
Andrew Morton0ce72d42012-05-29 15:06:24 -07001085 err = migrate_vmas(mm, from, to, flags);
Christoph Lameter7b2259b2006-06-25 05:46:48 -07001086 if (err)
1087 goto out;
1088
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -08001089 /*
1090 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1091 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1092 * bit in 'tmp', and return that <source, dest> pair for migration.
1093 * The pair of nodemasks 'to' and 'from' define the map.
1094 *
1095 * If no pair of bits is found that way, fallback to picking some
1096 * pair of 'source' and 'dest' bits that are not the same. If the
1097 * 'source' and 'dest' bits are the same, this represents a node
1098 * that will be migrating to itself, so no pages need move.
1099 *
1100 * If no bits are left in 'tmp', or if all remaining bits left
1101 * in 'tmp' correspond to the same bit in 'to', return false
1102 * (nothing left to migrate).
1103 *
1104 * This lets us pick a pair of nodes to migrate between, such that
1105 * if possible the dest node is not already occupied by some other
1106 * source node, minimizing the risk of overloading the memory on a
1107 * node that would happen if we migrated incoming memory to a node
1108 * before migrating outgoing memory source that same node.
1109 *
1110 * A single scan of tmp is sufficient. As we go, we remember the
1111 * most recent <s, d> pair that moved (s != d). If we find a pair
1112 * that not only moved, but what's better, moved to an empty slot
1113 * (d is not set in tmp), then we break out then, with that pair.
Justin P. Mattockae0e47f2011-03-01 15:06:02 +01001114 * Otherwise when we finish scanning from_tmp, we at least have the
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -08001115 * most recent <s, d> pair that moved. If we get all the way through
1116 * the scan of tmp without finding any node that moved, much less
1117 * moved to an empty node, then there is nothing left worth migrating.
1118 */
Christoph Lameterd4984712006-01-08 01:00:55 -08001119
Andrew Morton0ce72d42012-05-29 15:06:24 -07001120 tmp = *from;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001121 while (!nodes_empty(tmp)) {
1122 int s,d;
1123 int source = -1;
1124 int dest = 0;
1125
1126 for_each_node_mask(s, tmp) {
Larry Woodman4a5b18c2012-05-29 15:06:24 -07001127
1128 /*
1129 * do_migrate_pages() tries to maintain the relative
1130 * node relationship of the pages established between
1131 * threads and memory areas.
1132 *
1133 * However if the number of source nodes is not equal to
1134 * the number of destination nodes we can not preserve
1135 * this node relative relationship. In that case, skip
1136 * copying memory from a node that is in the destination
1137 * mask.
1138 *
1139 * Example: [2,3,4] -> [3,4,5] moves everything.
1140 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1141 */
1142
Andrew Morton0ce72d42012-05-29 15:06:24 -07001143 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1144 (node_isset(s, *to)))
Larry Woodman4a5b18c2012-05-29 15:06:24 -07001145 continue;
1146
Andrew Morton0ce72d42012-05-29 15:06:24 -07001147 d = node_remap(s, *from, *to);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001148 if (s == d)
1149 continue;
1150
1151 source = s; /* Node moved. Memorize */
1152 dest = d;
1153
1154 /* dest not in remaining from nodes? */
1155 if (!node_isset(dest, tmp))
1156 break;
1157 }
1158 if (source == -1)
1159 break;
1160
1161 node_clear(source, tmp);
1162 err = migrate_to_node(mm, source, dest, flags);
1163 if (err > 0)
1164 busy += err;
1165 if (err < 0)
1166 break;
Christoph Lameter39743882006-01-08 01:00:51 -08001167 }
Christoph Lameter7b2259b2006-06-25 05:46:48 -07001168out:
Christoph Lameter39743882006-01-08 01:00:51 -08001169 up_read(&mm->mmap_sem);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001170 if (err < 0)
1171 return err;
1172 return busy;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001173
Christoph Lameter39743882006-01-08 01:00:51 -08001174}
1175
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001176/*
1177 * Allocate a new page for page migration based on vma policy.
1178 * Start assuming that page is mapped by vma pointed to by @private.
1179 * Search forward from there, if not. N.B., this assumes that the
1180 * list of pages handed to migrate_pages()--which is how we get here--
1181 * is in virtual address order.
1182 */
Christoph Lameter742755a2006-06-23 02:03:55 -07001183static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001184{
1185 struct vm_area_struct *vma = (struct vm_area_struct *)private;
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001186 unsigned long uninitialized_var(address);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001187
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001188 while (vma) {
1189 address = page_address_in_vma(page, vma);
1190 if (address != -EFAULT)
1191 break;
1192 vma = vma->vm_next;
1193 }
1194
1195 /*
1196 * if !vma, alloc_page_vma() will use task or system default policy
1197 */
1198 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001199}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001200#else
1201
1202static void migrate_page_add(struct page *page, struct list_head *pagelist,
1203 unsigned long flags)
1204{
1205}
1206
Andrew Morton0ce72d42012-05-29 15:06:24 -07001207int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1208 const nodemask_t *to, int flags)
Christoph Lameterb20a3502006-03-22 00:09:12 -08001209{
1210 return -ENOSYS;
1211}
Christoph Lameter95a402c2006-06-23 02:03:53 -07001212
Keith Owens69939742006-10-11 01:21:28 -07001213static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001214{
1215 return NULL;
1216}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001217#endif
1218
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001219static long do_mbind(unsigned long start, unsigned long len,
David Rientjes028fec42008-04-28 02:12:25 -07001220 unsigned short mode, unsigned short mode_flags,
1221 nodemask_t *nmask, unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001222{
1223 struct vm_area_struct *vma;
1224 struct mm_struct *mm = current->mm;
1225 struct mempolicy *new;
1226 unsigned long end;
1227 int err;
1228 LIST_HEAD(pagelist);
1229
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001230 if (flags & ~(unsigned long)MPOL_MF_VALID)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001231 return -EINVAL;
Christoph Lameter74c00242006-03-14 19:50:21 -08001232 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001233 return -EPERM;
1234
1235 if (start & ~PAGE_MASK)
1236 return -EINVAL;
1237
1238 if (mode == MPOL_DEFAULT)
1239 flags &= ~MPOL_MF_STRICT;
1240
1241 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1242 end = start + len;
1243
1244 if (end < start)
1245 return -EINVAL;
1246 if (end == start)
1247 return 0;
1248
David Rientjes028fec42008-04-28 02:12:25 -07001249 new = mpol_new(mode, mode_flags, nmask);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001250 if (IS_ERR(new))
1251 return PTR_ERR(new);
1252
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001253 if (flags & MPOL_MF_LAZY)
1254 new->flags |= MPOL_F_MOF;
1255
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001256 /*
1257 * If we are using the default policy then operation
1258 * on discontinuous address spaces is okay after all
1259 */
1260 if (!new)
1261 flags |= MPOL_MF_DISCONTIG_OK;
1262
David Rientjes028fec42008-04-28 02:12:25 -07001263 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1264 start, start + len, mode, mode_flags,
David Rientjes00ef2d22013-02-22 16:35:36 -08001265 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001266
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001267 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1268
1269 err = migrate_prep();
1270 if (err)
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001271 goto mpol_out;
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001272 }
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07001273 {
1274 NODEMASK_SCRATCH(scratch);
1275 if (scratch) {
1276 down_write(&mm->mmap_sem);
1277 task_lock(current);
1278 err = mpol_set_nodemask(new, nmask, scratch);
1279 task_unlock(current);
1280 if (err)
1281 up_write(&mm->mmap_sem);
1282 } else
1283 err = -ENOMEM;
1284 NODEMASK_SCRATCH_FREE(scratch);
1285 }
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001286 if (err)
1287 goto mpol_out;
1288
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001289 vma = check_range(mm, start, end, nmask,
1290 flags | MPOL_MF_INVERT, &pagelist);
1291
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001292 err = PTR_ERR(vma); /* maybe ... */
Mel Gormana7200942012-11-16 09:37:58 +00001293 if (!IS_ERR(vma))
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -08001294 err = mbind_range(mm, start, end, new);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001295
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001296 if (!err) {
1297 int nr_failed = 0;
1298
Minchan Kimcf608ac2010-10-26 14:21:29 -07001299 if (!list_empty(&pagelist)) {
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001300 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001301 nr_failed = migrate_pages(&pagelist, new_vma_page,
Hugh Dickins9c620e22013-02-22 16:35:14 -08001302 (unsigned long)vma,
1303 MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001304 if (nr_failed)
1305 putback_lru_pages(&pagelist);
1306 }
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001307
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001308 if (nr_failed && (flags & MPOL_MF_STRICT))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001309 err = -EIO;
KOSAKI Motohiroab8a3e12009-10-26 16:49:58 -07001310 } else
1311 putback_lru_pages(&pagelist);
Christoph Lameterb20a3502006-03-22 00:09:12 -08001312
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001313 up_write(&mm->mmap_sem);
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001314 mpol_out:
Lee Schermerhornf0be3d32008-04-28 02:13:08 -07001315 mpol_put(new);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001316 return err;
1317}
1318
Christoph Lameter39743882006-01-08 01:00:51 -08001319/*
Christoph Lameter8bccd852005-10-29 18:16:59 -07001320 * User space interface with variable sized bitmaps for nodelists.
1321 */
1322
1323/* Copy a node mask from user space. */
Christoph Lameter39743882006-01-08 01:00:51 -08001324static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
Christoph Lameter8bccd852005-10-29 18:16:59 -07001325 unsigned long maxnode)
1326{
1327 unsigned long k;
1328 unsigned long nlongs;
1329 unsigned long endmask;
1330
1331 --maxnode;
1332 nodes_clear(*nodes);
1333 if (maxnode == 0 || !nmask)
1334 return 0;
Andi Kleena9c930b2006-02-20 18:27:59 -08001335 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
Chris Wright636f13c2006-02-17 13:59:36 -08001336 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001337
1338 nlongs = BITS_TO_LONGS(maxnode);
1339 if ((maxnode % BITS_PER_LONG) == 0)
1340 endmask = ~0UL;
1341 else
1342 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1343
1344 /* When the user specified more nodes than supported just check
1345 if the non supported part is all zero. */
1346 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1347 if (nlongs > PAGE_SIZE/sizeof(long))
1348 return -EINVAL;
1349 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1350 unsigned long t;
1351 if (get_user(t, nmask + k))
1352 return -EFAULT;
1353 if (k == nlongs - 1) {
1354 if (t & endmask)
1355 return -EINVAL;
1356 } else if (t)
1357 return -EINVAL;
1358 }
1359 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1360 endmask = ~0UL;
1361 }
1362
1363 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1364 return -EFAULT;
1365 nodes_addr(*nodes)[nlongs-1] &= endmask;
1366 return 0;
1367}
1368
1369/* Copy a kernel node mask to user space */
1370static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1371 nodemask_t *nodes)
1372{
1373 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1374 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1375
1376 if (copy > nbytes) {
1377 if (copy > PAGE_SIZE)
1378 return -EINVAL;
1379 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1380 return -EFAULT;
1381 copy = nbytes;
1382 }
1383 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1384}
1385
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001386SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1387 unsigned long, mode, unsigned long __user *, nmask,
1388 unsigned long, maxnode, unsigned, flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001389{
1390 nodemask_t nodes;
1391 int err;
David Rientjes028fec42008-04-28 02:12:25 -07001392 unsigned short mode_flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001393
David Rientjes028fec42008-04-28 02:12:25 -07001394 mode_flags = mode & MPOL_MODE_FLAGS;
1395 mode &= ~MPOL_MODE_FLAGS;
David Rientjesa3b51e02008-04-28 02:12:23 -07001396 if (mode >= MPOL_MAX)
1397 return -EINVAL;
David Rientjes4c50bc02008-04-28 02:12:30 -07001398 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1399 (mode_flags & MPOL_F_RELATIVE_NODES))
1400 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001401 err = get_nodes(&nodes, nmask, maxnode);
1402 if (err)
1403 return err;
David Rientjes028fec42008-04-28 02:12:25 -07001404 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001405}
1406
1407/* Set the process memory policy */
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001408SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1409 unsigned long, maxnode)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001410{
1411 int err;
1412 nodemask_t nodes;
David Rientjes028fec42008-04-28 02:12:25 -07001413 unsigned short flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001414
David Rientjes028fec42008-04-28 02:12:25 -07001415 flags = mode & MPOL_MODE_FLAGS;
1416 mode &= ~MPOL_MODE_FLAGS;
1417 if ((unsigned int)mode >= MPOL_MAX)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001418 return -EINVAL;
David Rientjes4c50bc02008-04-28 02:12:30 -07001419 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1420 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001421 err = get_nodes(&nodes, nmask, maxnode);
1422 if (err)
1423 return err;
David Rientjes028fec42008-04-28 02:12:25 -07001424 return do_set_mempolicy(mode, flags, &nodes);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001425}
1426
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001427SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1428 const unsigned long __user *, old_nodes,
1429 const unsigned long __user *, new_nodes)
Christoph Lameter39743882006-01-08 01:00:51 -08001430{
David Howellsc69e8d92008-11-14 10:39:19 +11001431 const struct cred *cred = current_cred(), *tcred;
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001432 struct mm_struct *mm = NULL;
Christoph Lameter39743882006-01-08 01:00:51 -08001433 struct task_struct *task;
Christoph Lameter39743882006-01-08 01:00:51 -08001434 nodemask_t task_nodes;
1435 int err;
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001436 nodemask_t *old;
1437 nodemask_t *new;
1438 NODEMASK_SCRATCH(scratch);
Christoph Lameter39743882006-01-08 01:00:51 -08001439
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001440 if (!scratch)
1441 return -ENOMEM;
Christoph Lameter39743882006-01-08 01:00:51 -08001442
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001443 old = &scratch->mask1;
1444 new = &scratch->mask2;
1445
1446 err = get_nodes(old, old_nodes, maxnode);
Christoph Lameter39743882006-01-08 01:00:51 -08001447 if (err)
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001448 goto out;
1449
1450 err = get_nodes(new, new_nodes, maxnode);
1451 if (err)
1452 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001453
1454 /* Find the mm_struct */
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001455 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001456 task = pid ? find_task_by_vpid(pid) : current;
Christoph Lameter39743882006-01-08 01:00:51 -08001457 if (!task) {
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001458 rcu_read_unlock();
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001459 err = -ESRCH;
1460 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001461 }
Christoph Lameter3268c632012-03-21 16:34:06 -07001462 get_task_struct(task);
Christoph Lameter39743882006-01-08 01:00:51 -08001463
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001464 err = -EINVAL;
Christoph Lameter39743882006-01-08 01:00:51 -08001465
1466 /*
1467 * Check if this process has the right to modify the specified
1468 * process. The right exists if the process has administrative
Alexey Dobriyan7f927fc2006-03-28 01:56:53 -08001469 * capabilities, superuser privileges or the same
Christoph Lameter39743882006-01-08 01:00:51 -08001470 * userid as the target process.
1471 */
David Howellsc69e8d92008-11-14 10:39:19 +11001472 tcred = __task_cred(task);
Eric W. Biedermanb38a86e2012-03-12 15:48:24 -07001473 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1474 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
Christoph Lameter74c00242006-03-14 19:50:21 -08001475 !capable(CAP_SYS_NICE)) {
David Howellsc69e8d92008-11-14 10:39:19 +11001476 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001477 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001478 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001479 }
David Howellsc69e8d92008-11-14 10:39:19 +11001480 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001481
1482 task_nodes = cpuset_mems_allowed(task);
1483 /* Is the user allowed to access the target nodes? */
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001484 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
Christoph Lameter39743882006-01-08 01:00:51 -08001485 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001486 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001487 }
1488
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08001489 if (!nodes_subset(*new, node_states[N_MEMORY])) {
Christoph Lameter3b42d282007-08-31 00:12:08 -07001490 err = -EINVAL;
Christoph Lameter3268c632012-03-21 16:34:06 -07001491 goto out_put;
Christoph Lameter3b42d282007-08-31 00:12:08 -07001492 }
1493
David Quigley86c3a762006-06-23 02:04:02 -07001494 err = security_task_movememory(task);
1495 if (err)
Christoph Lameter3268c632012-03-21 16:34:06 -07001496 goto out_put;
David Quigley86c3a762006-06-23 02:04:02 -07001497
Christoph Lameter3268c632012-03-21 16:34:06 -07001498 mm = get_task_mm(task);
1499 put_task_struct(task);
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001500
1501 if (!mm) {
Christoph Lameter3268c632012-03-21 16:34:06 -07001502 err = -EINVAL;
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001503 goto out;
1504 }
1505
1506 err = do_migrate_pages(mm, old, new,
1507 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
Christoph Lameter3268c632012-03-21 16:34:06 -07001508
1509 mmput(mm);
1510out:
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001511 NODEMASK_SCRATCH_FREE(scratch);
1512
Christoph Lameter39743882006-01-08 01:00:51 -08001513 return err;
Christoph Lameter3268c632012-03-21 16:34:06 -07001514
1515out_put:
1516 put_task_struct(task);
1517 goto out;
1518
Christoph Lameter39743882006-01-08 01:00:51 -08001519}
1520
1521
Christoph Lameter8bccd852005-10-29 18:16:59 -07001522/* Retrieve NUMA policy */
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001523SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1524 unsigned long __user *, nmask, unsigned long, maxnode,
1525 unsigned long, addr, unsigned long, flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001526{
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001527 int err;
1528 int uninitialized_var(pval);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001529 nodemask_t nodes;
1530
1531 if (nmask != NULL && maxnode < MAX_NUMNODES)
1532 return -EINVAL;
1533
1534 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1535
1536 if (err)
1537 return err;
1538
1539 if (policy && put_user(pval, policy))
1540 return -EFAULT;
1541
1542 if (nmask)
1543 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1544
1545 return err;
1546}
1547
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548#ifdef CONFIG_COMPAT
1549
1550asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1551 compat_ulong_t __user *nmask,
1552 compat_ulong_t maxnode,
1553 compat_ulong_t addr, compat_ulong_t flags)
1554{
1555 long err;
1556 unsigned long __user *nm = NULL;
1557 unsigned long nr_bits, alloc_size;
1558 DECLARE_BITMAP(bm, MAX_NUMNODES);
1559
1560 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1561 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1562
1563 if (nmask)
1564 nm = compat_alloc_user_space(alloc_size);
1565
1566 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1567
1568 if (!err && nmask) {
KAMEZAWA Hiroyuki2bbff6c2011-09-14 16:21:02 -07001569 unsigned long copy_size;
1570 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1571 err = copy_from_user(bm, nm, copy_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 /* ensure entire bitmap is zeroed */
1573 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1574 err |= compat_put_bitmap(nmask, bm, nr_bits);
1575 }
1576
1577 return err;
1578}
1579
1580asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1581 compat_ulong_t maxnode)
1582{
1583 long err = 0;
1584 unsigned long __user *nm = NULL;
1585 unsigned long nr_bits, alloc_size;
1586 DECLARE_BITMAP(bm, MAX_NUMNODES);
1587
1588 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1589 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1590
1591 if (nmask) {
1592 err = compat_get_bitmap(bm, nmask, nr_bits);
1593 nm = compat_alloc_user_space(alloc_size);
1594 err |= copy_to_user(nm, bm, alloc_size);
1595 }
1596
1597 if (err)
1598 return -EFAULT;
1599
1600 return sys_set_mempolicy(mode, nm, nr_bits+1);
1601}
1602
1603asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1604 compat_ulong_t mode, compat_ulong_t __user *nmask,
1605 compat_ulong_t maxnode, compat_ulong_t flags)
1606{
1607 long err = 0;
1608 unsigned long __user *nm = NULL;
1609 unsigned long nr_bits, alloc_size;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001610 nodemask_t bm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611
1612 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1613 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1614
1615 if (nmask) {
Andi Kleendfcd3c02005-10-29 18:15:48 -07001616 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 nm = compat_alloc_user_space(alloc_size);
Andi Kleendfcd3c02005-10-29 18:15:48 -07001618 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 }
1620
1621 if (err)
1622 return -EFAULT;
1623
1624 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1625}
1626
1627#endif
1628
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001629/*
1630 * get_vma_policy(@task, @vma, @addr)
1631 * @task - task for fallback if vma policy == default
1632 * @vma - virtual memory area whose policy is sought
1633 * @addr - address in @vma for shared policy lookup
1634 *
1635 * Returns effective policy for a VMA at specified address.
1636 * Falls back to @task or system default policy, as necessary.
David Rientjes32f85162012-10-16 17:31:23 -07001637 * Current or other task's task mempolicy and non-shared vma policies must be
1638 * protected by task_lock(task) by the caller.
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001639 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1640 * count--added by the get_policy() vm_op, as appropriate--to protect against
1641 * freeing by another task. It is the caller's responsibility to free the
1642 * extra reference for shared policies.
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001643 */
Stephen Wilsond98f6cb2011-05-24 17:12:41 -07001644struct mempolicy *get_vma_policy(struct task_struct *task,
Christoph Lameter48fce342006-01-08 01:01:03 -08001645 struct vm_area_struct *vma, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646{
Mel Gorman5606e382012-11-02 18:19:13 +00001647 struct mempolicy *pol = get_task_policy(task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648
1649 if (vma) {
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001650 if (vma->vm_ops && vma->vm_ops->get_policy) {
Lee Schermerhornae4d8c12008-04-28 02:13:11 -07001651 struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1652 addr);
1653 if (vpol)
1654 pol = vpol;
Mel Gorman00442ad2012-10-08 16:29:20 -07001655 } else if (vma->vm_policy) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656 pol = vma->vm_policy;
Mel Gorman00442ad2012-10-08 16:29:20 -07001657
1658 /*
1659 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1660 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1661 * count on these policies which will be dropped by
1662 * mpol_cond_put() later
1663 */
1664 if (mpol_needs_cond_ref(pol))
1665 mpol_get(pol);
1666 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 }
1668 if (!pol)
1669 pol = &default_policy;
1670 return pol;
1671}
1672
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001673static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1674{
1675 enum zone_type dynamic_policy_zone = policy_zone;
1676
1677 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1678
1679 /*
1680 * if policy->v.nodes has movable memory only,
1681 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1682 *
1683 * policy->v.nodes is intersect with node_states[N_MEMORY].
1684 * so if the following test faile, it implies
1685 * policy->v.nodes has movable memory only.
1686 */
1687 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1688 dynamic_policy_zone = ZONE_MOVABLE;
1689
1690 return zone >= dynamic_policy_zone;
1691}
1692
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001693/*
1694 * Return a nodemask representing a mempolicy for filtering nodes for
1695 * page allocation
1696 */
1697static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
Mel Gorman19770b32008-04-28 02:12:18 -07001698{
1699 /* Lower zones don't get a nodemask applied for MPOL_BIND */
Lee Schermerhorn45c47452008-04-28 02:13:12 -07001700 if (unlikely(policy->mode == MPOL_BIND) &&
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001701 apply_policy_zone(policy, gfp_zone(gfp)) &&
Mel Gorman19770b32008-04-28 02:12:18 -07001702 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1703 return &policy->v.nodes;
1704
1705 return NULL;
1706}
1707
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001708/* Return a zonelist indicated by gfp for node representing a mempolicy */
Andi Kleen2f5f9482011-03-04 17:36:29 -08001709static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1710 int nd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711{
Lee Schermerhorn45c47452008-04-28 02:13:12 -07001712 switch (policy->mode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001714 if (!(policy->flags & MPOL_F_LOCAL))
1715 nd = policy->v.preferred_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 break;
1717 case MPOL_BIND:
Mel Gorman19770b32008-04-28 02:12:18 -07001718 /*
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001719 * Normally, MPOL_BIND allocations are node-local within the
1720 * allowed nodemask. However, if __GFP_THISNODE is set and the
Bob Liu6eb27e12010-05-24 14:32:00 -07001721 * current node isn't part of the mask, we use the zonelist for
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001722 * the first node in the mask instead.
Mel Gorman19770b32008-04-28 02:12:18 -07001723 */
Mel Gorman19770b32008-04-28 02:12:18 -07001724 if (unlikely(gfp & __GFP_THISNODE) &&
1725 unlikely(!node_isset(nd, policy->v.nodes)))
1726 nd = first_node(policy->v.nodes);
1727 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 default:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 BUG();
1730 }
Mel Gorman0e884602008-04-28 02:12:14 -07001731 return node_zonelist(nd, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732}
1733
1734/* Do dynamic interleaving for a process */
1735static unsigned interleave_nodes(struct mempolicy *policy)
1736{
1737 unsigned nid, next;
1738 struct task_struct *me = current;
1739
1740 nid = me->il_next;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001741 next = next_node(nid, policy->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 if (next >= MAX_NUMNODES)
Andi Kleendfcd3c02005-10-29 18:15:48 -07001743 next = first_node(policy->v.nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001744 if (next < MAX_NUMNODES)
1745 me->il_next = next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746 return nid;
1747}
1748
Christoph Lameterdc85da12006-01-18 17:42:36 -08001749/*
1750 * Depending on the memory policy provide a node from which to allocate the
1751 * next slab entry.
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001752 * @policy must be protected by freeing by the caller. If @policy is
1753 * the current task's mempolicy, this protection is implicit, as only the
1754 * task can change it's policy. The system default policy requires no
1755 * such protection.
Christoph Lameterdc85da12006-01-18 17:42:36 -08001756 */
Andi Kleene7b691b2012-06-09 02:40:03 -07001757unsigned slab_node(void)
Christoph Lameterdc85da12006-01-18 17:42:36 -08001758{
Andi Kleene7b691b2012-06-09 02:40:03 -07001759 struct mempolicy *policy;
1760
1761 if (in_interrupt())
1762 return numa_node_id();
1763
1764 policy = current->mempolicy;
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001765 if (!policy || policy->flags & MPOL_F_LOCAL)
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001766 return numa_node_id();
Christoph Lameter765c4502006-09-27 01:50:08 -07001767
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001768 switch (policy->mode) {
1769 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001770 /*
1771 * handled MPOL_F_LOCAL above
1772 */
1773 return policy->v.preferred_node;
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001774
Christoph Lameterdc85da12006-01-18 17:42:36 -08001775 case MPOL_INTERLEAVE:
1776 return interleave_nodes(policy);
1777
Mel Gormandd1a2392008-04-28 02:12:17 -07001778 case MPOL_BIND: {
Christoph Lameterdc85da12006-01-18 17:42:36 -08001779 /*
1780 * Follow bind policy behavior and start allocation at the
1781 * first node.
1782 */
Mel Gorman19770b32008-04-28 02:12:18 -07001783 struct zonelist *zonelist;
1784 struct zone *zone;
1785 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1786 zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1787 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1788 &policy->v.nodes,
1789 &zone);
Eric Dumazet800416f2010-10-27 19:33:43 +02001790 return zone ? zone->node : numa_node_id();
Mel Gormandd1a2392008-04-28 02:12:17 -07001791 }
Christoph Lameterdc85da12006-01-18 17:42:36 -08001792
Christoph Lameterdc85da12006-01-18 17:42:36 -08001793 default:
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001794 BUG();
Christoph Lameterdc85da12006-01-18 17:42:36 -08001795 }
1796}
1797
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798/* Do static interleaving for a VMA with known offset. */
1799static unsigned offset_il_node(struct mempolicy *pol,
1800 struct vm_area_struct *vma, unsigned long off)
1801{
Andi Kleendfcd3c02005-10-29 18:15:48 -07001802 unsigned nnodes = nodes_weight(pol->v.nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001803 unsigned target;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804 int c;
1805 int nid = -1;
1806
David Rientjesf5b087b2008-04-28 02:12:27 -07001807 if (!nnodes)
1808 return numa_node_id();
1809 target = (unsigned int)off % nnodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810 c = 0;
1811 do {
Andi Kleendfcd3c02005-10-29 18:15:48 -07001812 nid = next_node(nid, pol->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 c++;
1814 } while (c <= target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 return nid;
1816}
1817
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001818/* Determine a node number for interleave */
1819static inline unsigned interleave_nid(struct mempolicy *pol,
1820 struct vm_area_struct *vma, unsigned long addr, int shift)
1821{
1822 if (vma) {
1823 unsigned long off;
1824
Nishanth Aravamudan3b98b082006-08-31 21:27:53 -07001825 /*
1826 * for small pages, there is no difference between
1827 * shift and PAGE_SHIFT, so the bit-shift is safe.
1828 * for huge pages, since vm_pgoff is in units of small
1829 * pages, we need to shift off the always 0 bits to get
1830 * a useful offset.
1831 */
1832 BUG_ON(shift < PAGE_SHIFT);
1833 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001834 off += (addr - vma->vm_start) >> shift;
1835 return offset_il_node(pol, vma, off);
1836 } else
1837 return interleave_nodes(pol);
1838}
1839
Michal Hocko778d3b02011-07-26 16:08:30 -07001840/*
1841 * Return the bit number of a random bit set in the nodemask.
1842 * (returns -1 if nodemask is empty)
1843 */
1844int node_random(const nodemask_t *maskp)
1845{
1846 int w, bit = -1;
1847
1848 w = nodes_weight(*maskp);
1849 if (w)
1850 bit = bitmap_ord_to_pos(maskp->bits,
1851 get_random_int() % w, MAX_NUMNODES);
1852 return bit;
1853}
1854
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001855#ifdef CONFIG_HUGETLBFS
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001856/*
1857 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1858 * @vma = virtual memory area whose policy is sought
1859 * @addr = address in @vma for shared policy lookup and interleave policy
1860 * @gfp_flags = for requested zone
Mel Gorman19770b32008-04-28 02:12:18 -07001861 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1862 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001863 *
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001864 * Returns a zonelist suitable for a huge page allocation and a pointer
1865 * to the struct mempolicy for conditional unref after allocation.
1866 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1867 * @nodemask for filtering the zonelist.
Miao Xiec0ff7452010-05-24 14:32:08 -07001868 *
1869 * Must be protected by get_mems_allowed()
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001870 */
Mel Gorman396faf02007-07-17 04:03:13 -07001871struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
Mel Gorman19770b32008-04-28 02:12:18 -07001872 gfp_t gfp_flags, struct mempolicy **mpol,
1873 nodemask_t **nodemask)
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001874{
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001875 struct zonelist *zl;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001876
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001877 *mpol = get_vma_policy(current, vma, addr);
Mel Gorman19770b32008-04-28 02:12:18 -07001878 *nodemask = NULL; /* assume !MPOL_BIND */
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001879
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001880 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1881 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
Andi Kleena5516432008-07-23 21:27:41 -07001882 huge_page_shift(hstate_vma(vma))), gfp_flags);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001883 } else {
Andi Kleen2f5f9482011-03-04 17:36:29 -08001884 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001885 if ((*mpol)->mode == MPOL_BIND)
1886 *nodemask = &(*mpol)->v.nodes;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001887 }
1888 return zl;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001889}
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001890
1891/*
1892 * init_nodemask_of_mempolicy
1893 *
1894 * If the current task's mempolicy is "default" [NULL], return 'false'
1895 * to indicate default policy. Otherwise, extract the policy nodemask
1896 * for 'bind' or 'interleave' policy into the argument nodemask, or
1897 * initialize the argument nodemask to contain the single node for
1898 * 'preferred' or 'local' policy and return 'true' to indicate presence
1899 * of non-default mempolicy.
1900 *
1901 * We don't bother with reference counting the mempolicy [mpol_get/put]
1902 * because the current task is examining it's own mempolicy and a task's
1903 * mempolicy is only ever changed by the task itself.
1904 *
1905 * N.B., it is the caller's responsibility to free a returned nodemask.
1906 */
1907bool init_nodemask_of_mempolicy(nodemask_t *mask)
1908{
1909 struct mempolicy *mempolicy;
1910 int nid;
1911
1912 if (!(mask && current->mempolicy))
1913 return false;
1914
Miao Xiec0ff7452010-05-24 14:32:08 -07001915 task_lock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001916 mempolicy = current->mempolicy;
1917 switch (mempolicy->mode) {
1918 case MPOL_PREFERRED:
1919 if (mempolicy->flags & MPOL_F_LOCAL)
1920 nid = numa_node_id();
1921 else
1922 nid = mempolicy->v.preferred_node;
1923 init_nodemask_of_node(mask, nid);
1924 break;
1925
1926 case MPOL_BIND:
1927 /* Fall through */
1928 case MPOL_INTERLEAVE:
1929 *mask = mempolicy->v.nodes;
1930 break;
1931
1932 default:
1933 BUG();
1934 }
Miao Xiec0ff7452010-05-24 14:32:08 -07001935 task_unlock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001936
1937 return true;
1938}
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001939#endif
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001940
David Rientjes6f48d0eb2010-08-09 17:18:52 -07001941/*
1942 * mempolicy_nodemask_intersects
1943 *
1944 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1945 * policy. Otherwise, check for intersection between mask and the policy
1946 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1947 * policy, always return true since it may allocate elsewhere on fallback.
1948 *
1949 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1950 */
1951bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1952 const nodemask_t *mask)
1953{
1954 struct mempolicy *mempolicy;
1955 bool ret = true;
1956
1957 if (!mask)
1958 return ret;
1959 task_lock(tsk);
1960 mempolicy = tsk->mempolicy;
1961 if (!mempolicy)
1962 goto out;
1963
1964 switch (mempolicy->mode) {
1965 case MPOL_PREFERRED:
1966 /*
1967 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1968 * allocate from, they may fallback to other nodes when oom.
1969 * Thus, it's possible for tsk to have allocated memory from
1970 * nodes in mask.
1971 */
1972 break;
1973 case MPOL_BIND:
1974 case MPOL_INTERLEAVE:
1975 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1976 break;
1977 default:
1978 BUG();
1979 }
1980out:
1981 task_unlock(tsk);
1982 return ret;
1983}
1984
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985/* Allocate a page in interleaved policy.
1986 Own path because it needs to do special accounting. */
Andi Kleen662f3a02005-10-29 18:15:49 -07001987static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1988 unsigned nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989{
1990 struct zonelist *zl;
1991 struct page *page;
1992
Mel Gorman0e884602008-04-28 02:12:14 -07001993 zl = node_zonelist(nid, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 page = __alloc_pages(gfp, order, zl);
Mel Gormandd1a2392008-04-28 02:12:17 -07001995 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
Christoph Lameterca889e62006-06-30 01:55:44 -07001996 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 return page;
1998}
1999
2000/**
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002001 * alloc_pages_vma - Allocate a page for a VMA.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 *
2003 * @gfp:
2004 * %GFP_USER user allocation.
2005 * %GFP_KERNEL kernel allocations,
2006 * %GFP_HIGHMEM highmem/user allocations,
2007 * %GFP_FS allocation should not call back into a file system.
2008 * %GFP_ATOMIC don't sleep.
2009 *
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002010 * @order:Order of the GFP allocation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011 * @vma: Pointer to VMA or NULL if not available.
2012 * @addr: Virtual Address of the allocation. Must be inside the VMA.
2013 *
2014 * This function allocates a page from the kernel page pool and applies
2015 * a NUMA policy associated with the VMA or the current process.
2016 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
2017 * mm_struct of the VMA to prevent it from going away. Should be used for
2018 * all allocations for pages that will be mapped into
2019 * user space. Returns NULL when no page can be allocated.
2020 *
2021 * Should be called with the mm_sem of the vma hold.
2022 */
2023struct page *
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002024alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
Andi Kleen2f5f9482011-03-04 17:36:29 -08002025 unsigned long addr, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026{
Mel Gormancc9a6c82012-03-21 16:34:11 -07002027 struct mempolicy *pol;
Miao Xiec0ff7452010-05-24 14:32:08 -07002028 struct page *page;
Mel Gormancc9a6c82012-03-21 16:34:11 -07002029 unsigned int cpuset_mems_cookie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030
Mel Gormancc9a6c82012-03-21 16:34:11 -07002031retry_cpuset:
2032 pol = get_vma_policy(current, vma, addr);
2033 cpuset_mems_cookie = get_mems_allowed();
2034
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002035 if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036 unsigned nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002037
Andi Kleen8eac5632011-02-25 14:44:28 -08002038 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002039 mpol_cond_put(pol);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002040 page = alloc_page_interleave(gfp, order, nid);
Mel Gormancc9a6c82012-03-21 16:34:11 -07002041 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2042 goto retry_cpuset;
2043
Miao Xiec0ff7452010-05-24 14:32:08 -07002044 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045 }
David Rientjes212a0a62012-12-11 16:02:51 -08002046 page = __alloc_pages_nodemask(gfp, order,
2047 policy_zonelist(gfp, pol, node),
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002048 policy_nodemask(gfp, pol));
David Rientjes212a0a62012-12-11 16:02:51 -08002049 if (unlikely(mpol_needs_cond_ref(pol)))
2050 __mpol_put(pol);
Mel Gormancc9a6c82012-03-21 16:34:11 -07002051 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2052 goto retry_cpuset;
Miao Xiec0ff7452010-05-24 14:32:08 -07002053 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054}
2055
2056/**
2057 * alloc_pages_current - Allocate pages.
2058 *
2059 * @gfp:
2060 * %GFP_USER user allocation,
2061 * %GFP_KERNEL kernel allocation,
2062 * %GFP_HIGHMEM highmem allocation,
2063 * %GFP_FS don't call back into a file system.
2064 * %GFP_ATOMIC don't sleep.
2065 * @order: Power of two of allocation size in pages. 0 is a single page.
2066 *
2067 * Allocate a page from the kernel page pool. When not in
2068 * interrupt context and apply the current process NUMA policy.
2069 * Returns NULL when no page can be allocated.
2070 *
Paul Jacksoncf2a473c2006-01-08 01:01:54 -08002071 * Don't call cpuset_update_task_memory_state() unless
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072 * 1) it's ok to take cpuset_sem (can WAIT), and
2073 * 2) allocating for current task (not interrupt).
2074 */
Al Virodd0fc662005-10-07 07:46:04 +01002075struct page *alloc_pages_current(gfp_t gfp, unsigned order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076{
Mel Gorman5606e382012-11-02 18:19:13 +00002077 struct mempolicy *pol = get_task_policy(current);
Miao Xiec0ff7452010-05-24 14:32:08 -07002078 struct page *page;
Mel Gormancc9a6c82012-03-21 16:34:11 -07002079 unsigned int cpuset_mems_cookie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080
Christoph Lameter9b819d22006-09-25 23:31:40 -07002081 if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082 pol = &default_policy;
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002083
Mel Gormancc9a6c82012-03-21 16:34:11 -07002084retry_cpuset:
2085 cpuset_mems_cookie = get_mems_allowed();
2086
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002087 /*
2088 * No reference counting needed for current->mempolicy
2089 * nor system default_policy
2090 */
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002091 if (pol->mode == MPOL_INTERLEAVE)
Miao Xiec0ff7452010-05-24 14:32:08 -07002092 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2093 else
2094 page = __alloc_pages_nodemask(gfp, order,
Andi Kleen5c4b4be2011-03-04 17:36:32 -08002095 policy_zonelist(gfp, pol, numa_node_id()),
2096 policy_nodemask(gfp, pol));
Mel Gormancc9a6c82012-03-21 16:34:11 -07002097
2098 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2099 goto retry_cpuset;
2100
Miao Xiec0ff7452010-05-24 14:32:08 -07002101 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102}
2103EXPORT_SYMBOL(alloc_pages_current);
2104
Oleg Nesterovef0855d2013-09-11 14:20:14 -07002105int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2106{
2107 struct mempolicy *pol = mpol_dup(vma_policy(src));
2108
2109 if (IS_ERR(pol))
2110 return PTR_ERR(pol);
2111 dst->vm_policy = pol;
2112 return 0;
2113}
2114
Paul Jackson42253992006-01-08 01:01:59 -08002115/*
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07002116 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
Paul Jackson42253992006-01-08 01:01:59 -08002117 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2118 * with the mems_allowed returned by cpuset_mems_allowed(). This
2119 * keeps mempolicies cpuset relative after its cpuset moves. See
2120 * further kernel/cpuset.c update_nodemask().
Miao Xie708c1bb2010-05-24 14:32:07 -07002121 *
2122 * current's mempolicy may be rebinded by the other task(the task that changes
2123 * cpuset's mems), so we needn't do rebind work for current task.
Paul Jackson42253992006-01-08 01:01:59 -08002124 */
Paul Jackson42253992006-01-08 01:01:59 -08002125
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07002126/* Slow path of a mempolicy duplicate */
2127struct mempolicy *__mpol_dup(struct mempolicy *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128{
2129 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2130
2131 if (!new)
2132 return ERR_PTR(-ENOMEM);
Miao Xie708c1bb2010-05-24 14:32:07 -07002133
2134 /* task's mempolicy is protected by alloc_lock */
2135 if (old == current->mempolicy) {
2136 task_lock(current);
2137 *new = *old;
2138 task_unlock(current);
2139 } else
2140 *new = *old;
2141
Paul E. McKenney99ee4ca2010-03-03 17:50:17 -08002142 rcu_read_lock();
Paul Jackson42253992006-01-08 01:01:59 -08002143 if (current_cpuset_is_being_rebound()) {
2144 nodemask_t mems = cpuset_mems_allowed(current);
Miao Xie708c1bb2010-05-24 14:32:07 -07002145 if (new->flags & MPOL_F_REBINDING)
2146 mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2147 else
2148 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
Paul Jackson42253992006-01-08 01:01:59 -08002149 }
Paul E. McKenney99ee4ca2010-03-03 17:50:17 -08002150 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151 atomic_set(&new->refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 return new;
2153}
2154
2155/* Slow path of a mempolicy comparison */
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002156bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157{
2158 if (!a || !b)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002159 return false;
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002160 if (a->mode != b->mode)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002161 return false;
Bob Liu19800502010-05-24 14:32:01 -07002162 if (a->flags != b->flags)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002163 return false;
Bob Liu19800502010-05-24 14:32:01 -07002164 if (mpol_store_user_nodemask(a))
2165 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002166 return false;
Bob Liu19800502010-05-24 14:32:01 -07002167
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002168 switch (a->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -07002169 case MPOL_BIND:
2170 /* Fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171 case MPOL_INTERLEAVE:
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002172 return !!nodes_equal(a->v.nodes, b->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 case MPOL_PREFERRED:
Namhyung Kim75719662011-03-22 16:33:02 -07002174 return a->v.preferred_node == b->v.preferred_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 default:
2176 BUG();
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002177 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178 }
2179}
2180
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 * Shared memory backing store policy support.
2183 *
2184 * Remember policies even when nobody has shared memory mapped.
2185 * The policies are kept in Red-Black tree linked from the inode.
2186 * They are protected by the sp->lock spinlock, which should be held
2187 * for any accesses to the tree.
2188 */
2189
2190/* lookup first element intersecting start-end */
Mel Gorman42288fe2012-12-21 23:10:25 +00002191/* Caller holds sp->lock */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192static struct sp_node *
2193sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2194{
2195 struct rb_node *n = sp->root.rb_node;
2196
2197 while (n) {
2198 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2199
2200 if (start >= p->end)
2201 n = n->rb_right;
2202 else if (end <= p->start)
2203 n = n->rb_left;
2204 else
2205 break;
2206 }
2207 if (!n)
2208 return NULL;
2209 for (;;) {
2210 struct sp_node *w = NULL;
2211 struct rb_node *prev = rb_prev(n);
2212 if (!prev)
2213 break;
2214 w = rb_entry(prev, struct sp_node, nd);
2215 if (w->end <= start)
2216 break;
2217 n = prev;
2218 }
2219 return rb_entry(n, struct sp_node, nd);
2220}
2221
2222/* Insert a new shared policy into the list. */
2223/* Caller holds sp->lock */
2224static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2225{
2226 struct rb_node **p = &sp->root.rb_node;
2227 struct rb_node *parent = NULL;
2228 struct sp_node *nd;
2229
2230 while (*p) {
2231 parent = *p;
2232 nd = rb_entry(parent, struct sp_node, nd);
2233 if (new->start < nd->start)
2234 p = &(*p)->rb_left;
2235 else if (new->end > nd->end)
2236 p = &(*p)->rb_right;
2237 else
2238 BUG();
2239 }
2240 rb_link_node(&new->nd, parent, p);
2241 rb_insert_color(&new->nd, &sp->root);
Paul Mundt140d5a42007-07-15 23:38:16 -07002242 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002243 new->policy ? new->policy->mode : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244}
2245
2246/* Find shared policy intersecting idx */
2247struct mempolicy *
2248mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2249{
2250 struct mempolicy *pol = NULL;
2251 struct sp_node *sn;
2252
2253 if (!sp->root.rb_node)
2254 return NULL;
Mel Gorman42288fe2012-12-21 23:10:25 +00002255 spin_lock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 sn = sp_lookup(sp, idx, idx+1);
2257 if (sn) {
2258 mpol_get(sn->policy);
2259 pol = sn->policy;
2260 }
Mel Gorman42288fe2012-12-21 23:10:25 +00002261 spin_unlock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 return pol;
2263}
2264
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002265static void sp_free(struct sp_node *n)
2266{
2267 mpol_put(n->policy);
2268 kmem_cache_free(sn_cache, n);
2269}
2270
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002271/**
2272 * mpol_misplaced - check whether current page node is valid in policy
2273 *
2274 * @page - page to be checked
2275 * @vma - vm area where page mapped
2276 * @addr - virtual address where page mapped
2277 *
2278 * Lookup current policy node id for vma,addr and "compare to" page's
2279 * node id.
2280 *
2281 * Returns:
2282 * -1 - not misplaced, page is in the right node
2283 * node - node id where the page should be
2284 *
2285 * Policy determination "mimics" alloc_page_vma().
2286 * Called from fault path where we know the vma and faulting address.
2287 */
2288int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2289{
2290 struct mempolicy *pol;
2291 struct zone *zone;
2292 int curnid = page_to_nid(page);
2293 unsigned long pgoff;
2294 int polnid = -1;
2295 int ret = -1;
2296
2297 BUG_ON(!vma);
2298
2299 pol = get_vma_policy(current, vma, addr);
2300 if (!(pol->flags & MPOL_F_MOF))
2301 goto out;
2302
2303 switch (pol->mode) {
2304 case MPOL_INTERLEAVE:
2305 BUG_ON(addr >= vma->vm_end);
2306 BUG_ON(addr < vma->vm_start);
2307
2308 pgoff = vma->vm_pgoff;
2309 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2310 polnid = offset_il_node(pol, vma, pgoff);
2311 break;
2312
2313 case MPOL_PREFERRED:
2314 if (pol->flags & MPOL_F_LOCAL)
2315 polnid = numa_node_id();
2316 else
2317 polnid = pol->v.preferred_node;
2318 break;
2319
2320 case MPOL_BIND:
2321 /*
2322 * allows binding to multiple nodes.
2323 * use current page if in policy nodemask,
2324 * else select nearest allowed node, if any.
2325 * If no allowed nodes, use current [!misplaced].
2326 */
2327 if (node_isset(curnid, pol->v.nodes))
2328 goto out;
2329 (void)first_zones_zonelist(
2330 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2331 gfp_zone(GFP_HIGHUSER),
2332 &pol->v.nodes, &zone);
2333 polnid = zone->node;
2334 break;
2335
2336 default:
2337 BUG();
2338 }
Mel Gorman5606e382012-11-02 18:19:13 +00002339
2340 /* Migrate the page towards the node whose CPU is referencing it */
Mel Gormane42c8ff2012-11-12 09:17:07 +00002341 if (pol->flags & MPOL_F_MORON) {
2342 int last_nid;
2343
Mel Gorman5606e382012-11-02 18:19:13 +00002344 polnid = numa_node_id();
2345
Mel Gormane42c8ff2012-11-12 09:17:07 +00002346 /*
2347 * Multi-stage node selection is used in conjunction
2348 * with a periodic migration fault to build a temporal
2349 * task<->page relation. By using a two-stage filter we
2350 * remove short/unlikely relations.
2351 *
2352 * Using P(p) ~ n_p / n_t as per frequentist
2353 * probability, we can equate a task's usage of a
2354 * particular page (n_p) per total usage of this
2355 * page (n_t) (in a given time-span) to a probability.
2356 *
2357 * Our periodic faults will sample this probability and
2358 * getting the same result twice in a row, given these
2359 * samples are fully independent, is then given by
2360 * P(n)^2, provided our sample period is sufficiently
2361 * short compared to the usage pattern.
2362 *
2363 * This quadric squishes small probabilities, making
2364 * it less likely we act on an unlikely task<->page
2365 * relation.
2366 */
Mel Gorman22b751c2013-02-22 16:34:59 -08002367 last_nid = page_nid_xchg_last(page, polnid);
Mel Gormane42c8ff2012-11-12 09:17:07 +00002368 if (last_nid != polnid)
2369 goto out;
2370 }
2371
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002372 if (curnid != polnid)
2373 ret = polnid;
2374out:
2375 mpol_cond_put(pol);
2376
2377 return ret;
2378}
2379
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2381{
Paul Mundt140d5a42007-07-15 23:38:16 -07002382 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383 rb_erase(&n->nd, &sp->root);
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002384 sp_free(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385}
2386
Mel Gorman42288fe2012-12-21 23:10:25 +00002387static void sp_node_init(struct sp_node *node, unsigned long start,
2388 unsigned long end, struct mempolicy *pol)
2389{
2390 node->start = start;
2391 node->end = end;
2392 node->policy = pol;
2393}
2394
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07002395static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2396 struct mempolicy *pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397{
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002398 struct sp_node *n;
2399 struct mempolicy *newpol;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002401 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402 if (!n)
2403 return NULL;
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002404
2405 newpol = mpol_dup(pol);
2406 if (IS_ERR(newpol)) {
2407 kmem_cache_free(sn_cache, n);
2408 return NULL;
2409 }
2410 newpol->flags |= MPOL_F_SHARED;
Mel Gorman42288fe2012-12-21 23:10:25 +00002411 sp_node_init(n, start, end, newpol);
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002412
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413 return n;
2414}
2415
2416/* Replace a policy range. */
2417static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2418 unsigned long end, struct sp_node *new)
2419{
Mel Gormanb22d1272012-10-08 16:29:17 -07002420 struct sp_node *n;
Mel Gorman42288fe2012-12-21 23:10:25 +00002421 struct sp_node *n_new = NULL;
2422 struct mempolicy *mpol_new = NULL;
Mel Gormanb22d1272012-10-08 16:29:17 -07002423 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424
Mel Gorman42288fe2012-12-21 23:10:25 +00002425restart:
2426 spin_lock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427 n = sp_lookup(sp, start, end);
2428 /* Take care of old policies in the same range. */
2429 while (n && n->start < end) {
2430 struct rb_node *next = rb_next(&n->nd);
2431 if (n->start >= start) {
2432 if (n->end <= end)
2433 sp_delete(sp, n);
2434 else
2435 n->start = end;
2436 } else {
2437 /* Old policy spanning whole new range. */
2438 if (n->end > end) {
Mel Gorman42288fe2012-12-21 23:10:25 +00002439 if (!n_new)
2440 goto alloc_new;
2441
2442 *mpol_new = *n->policy;
2443 atomic_set(&mpol_new->refcnt, 1);
KOSAKI Motohiro78806392013-03-08 12:43:29 -08002444 sp_node_init(n_new, end, n->end, mpol_new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 n->end = start;
Hillf Danton5ca39572013-03-08 12:43:28 -08002446 sp_insert(sp, n_new);
Mel Gorman42288fe2012-12-21 23:10:25 +00002447 n_new = NULL;
2448 mpol_new = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 break;
2450 } else
2451 n->end = start;
2452 }
2453 if (!next)
2454 break;
2455 n = rb_entry(next, struct sp_node, nd);
2456 }
2457 if (new)
2458 sp_insert(sp, new);
Mel Gorman42288fe2012-12-21 23:10:25 +00002459 spin_unlock(&sp->lock);
2460 ret = 0;
2461
2462err_out:
2463 if (mpol_new)
2464 mpol_put(mpol_new);
2465 if (n_new)
2466 kmem_cache_free(sn_cache, n_new);
2467
Mel Gormanb22d1272012-10-08 16:29:17 -07002468 return ret;
Mel Gorman42288fe2012-12-21 23:10:25 +00002469
2470alloc_new:
2471 spin_unlock(&sp->lock);
2472 ret = -ENOMEM;
2473 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2474 if (!n_new)
2475 goto err_out;
2476 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2477 if (!mpol_new)
2478 goto err_out;
2479 goto restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480}
2481
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002482/**
2483 * mpol_shared_policy_init - initialize shared policy for inode
2484 * @sp: pointer to inode shared policy
2485 * @mpol: struct mempolicy to install
2486 *
2487 * Install non-NULL @mpol in inode's shared policy rb-tree.
2488 * On entry, the current task has a reference on a non-NULL @mpol.
2489 * This must be released on exit.
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002490 * This is called at get_inode() calls and we can use GFP_KERNEL.
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002491 */
2492void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
Robin Holt7339ff82006-01-14 13:20:48 -08002493{
Miao Xie58568d22009-06-16 15:31:49 -07002494 int ret;
2495
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002496 sp->root = RB_ROOT; /* empty tree == default mempolicy */
Mel Gorman42288fe2012-12-21 23:10:25 +00002497 spin_lock_init(&sp->lock);
Robin Holt7339ff82006-01-14 13:20:48 -08002498
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002499 if (mpol) {
2500 struct vm_area_struct pvma;
2501 struct mempolicy *new;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002502 NODEMASK_SCRATCH(scratch);
Robin Holt7339ff82006-01-14 13:20:48 -08002503
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002504 if (!scratch)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002505 goto put_mpol;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002506 /* contextualize the tmpfs mount point mempolicy */
2507 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002508 if (IS_ERR(new))
Dan Carpenter0cae3452010-05-25 23:42:58 -07002509 goto free_scratch; /* no valid nodemask intersection */
Miao Xie58568d22009-06-16 15:31:49 -07002510
2511 task_lock(current);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002512 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
Miao Xie58568d22009-06-16 15:31:49 -07002513 task_unlock(current);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002514 if (ret)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002515 goto put_new;
Robin Holt7339ff82006-01-14 13:20:48 -08002516
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002517 /* Create pseudo-vma that contains just the policy */
2518 memset(&pvma, 0, sizeof(struct vm_area_struct));
2519 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2520 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002521
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002522put_new:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002523 mpol_put(new); /* drop initial ref */
Dan Carpenter0cae3452010-05-25 23:42:58 -07002524free_scratch:
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002525 NODEMASK_SCRATCH_FREE(scratch);
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002526put_mpol:
2527 mpol_put(mpol); /* drop our incoming ref on sb mpol */
Robin Holt7339ff82006-01-14 13:20:48 -08002528 }
2529}
2530
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531int mpol_set_shared_policy(struct shared_policy *info,
2532 struct vm_area_struct *vma, struct mempolicy *npol)
2533{
2534 int err;
2535 struct sp_node *new = NULL;
2536 unsigned long sz = vma_pages(vma);
2537
David Rientjes028fec42008-04-28 02:12:25 -07002538 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539 vma->vm_pgoff,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002540 sz, npol ? npol->mode : -1,
David Rientjes028fec42008-04-28 02:12:25 -07002541 npol ? npol->flags : -1,
David Rientjes00ef2d22013-02-22 16:35:36 -08002542 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543
2544 if (npol) {
2545 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2546 if (!new)
2547 return -ENOMEM;
2548 }
2549 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2550 if (err && new)
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002551 sp_free(new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552 return err;
2553}
2554
2555/* Free a backing policy store on inode delete. */
2556void mpol_free_shared_policy(struct shared_policy *p)
2557{
2558 struct sp_node *n;
2559 struct rb_node *next;
2560
2561 if (!p->root.rb_node)
2562 return;
Mel Gorman42288fe2012-12-21 23:10:25 +00002563 spin_lock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564 next = rb_first(&p->root);
2565 while (next) {
2566 n = rb_entry(next, struct sp_node, nd);
2567 next = rb_next(&n->nd);
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002568 sp_delete(p, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569 }
Mel Gorman42288fe2012-12-21 23:10:25 +00002570 spin_unlock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571}
2572
Mel Gorman1a687c22012-11-22 11:16:36 +00002573#ifdef CONFIG_NUMA_BALANCING
2574static bool __initdata numabalancing_override;
2575
2576static void __init check_numabalancing_enable(void)
2577{
2578 bool numabalancing_default = false;
2579
2580 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2581 numabalancing_default = true;
2582
2583 if (nr_node_ids > 1 && !numabalancing_override) {
2584 printk(KERN_INFO "Enabling automatic NUMA balancing. "
2585 "Configure with numa_balancing= or sysctl");
2586 set_numabalancing_state(numabalancing_default);
2587 }
2588}
2589
2590static int __init setup_numabalancing(char *str)
2591{
2592 int ret = 0;
2593 if (!str)
2594 goto out;
2595 numabalancing_override = true;
2596
2597 if (!strcmp(str, "enable")) {
2598 set_numabalancing_state(true);
2599 ret = 1;
2600 } else if (!strcmp(str, "disable")) {
2601 set_numabalancing_state(false);
2602 ret = 1;
2603 }
2604out:
2605 if (!ret)
2606 printk(KERN_WARNING "Unable to parse numa_balancing=\n");
2607
2608 return ret;
2609}
2610__setup("numa_balancing=", setup_numabalancing);
2611#else
2612static inline void __init check_numabalancing_enable(void)
2613{
2614}
2615#endif /* CONFIG_NUMA_BALANCING */
2616
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617/* assumes fs == KERNEL_DS */
2618void __init numa_policy_init(void)
2619{
Paul Mundtb71636e2007-07-15 23:38:15 -07002620 nodemask_t interleave_nodes;
2621 unsigned long largest = 0;
2622 int nid, prefer = 0;
2623
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624 policy_cache = kmem_cache_create("numa_policy",
2625 sizeof(struct mempolicy),
Paul Mundt20c2df82007-07-20 10:11:58 +09002626 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627
2628 sn_cache = kmem_cache_create("shared_policy_node",
2629 sizeof(struct sp_node),
Paul Mundt20c2df82007-07-20 10:11:58 +09002630 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002631
Mel Gorman5606e382012-11-02 18:19:13 +00002632 for_each_node(nid) {
2633 preferred_node_policy[nid] = (struct mempolicy) {
2634 .refcnt = ATOMIC_INIT(1),
2635 .mode = MPOL_PREFERRED,
2636 .flags = MPOL_F_MOF | MPOL_F_MORON,
2637 .v = { .preferred_node = nid, },
2638 };
2639 }
2640
Paul Mundtb71636e2007-07-15 23:38:15 -07002641 /*
2642 * Set interleaving policy for system init. Interleaving is only
2643 * enabled across suitably sized nodes (default is >= 16MB), or
2644 * fall back to the largest node if they're all smaller.
2645 */
2646 nodes_clear(interleave_nodes);
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002647 for_each_node_state(nid, N_MEMORY) {
Paul Mundtb71636e2007-07-15 23:38:15 -07002648 unsigned long total_pages = node_present_pages(nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002649
Paul Mundtb71636e2007-07-15 23:38:15 -07002650 /* Preserve the largest node */
2651 if (largest < total_pages) {
2652 largest = total_pages;
2653 prefer = nid;
2654 }
2655
2656 /* Interleave this node? */
2657 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2658 node_set(nid, interleave_nodes);
2659 }
2660
2661 /* All too small, use the largest */
2662 if (unlikely(nodes_empty(interleave_nodes)))
2663 node_set(prefer, interleave_nodes);
2664
David Rientjes028fec42008-04-28 02:12:25 -07002665 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666 printk("numa_policy_init: interleaving failed\n");
Mel Gorman1a687c22012-11-22 11:16:36 +00002667
2668 check_numabalancing_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669}
2670
Christoph Lameter8bccd852005-10-29 18:16:59 -07002671/* Reset policy of current process to default */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672void numa_default_policy(void)
2673{
David Rientjes028fec42008-04-28 02:12:25 -07002674 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675}
Paul Jackson68860ec2005-10-30 15:02:36 -08002676
Paul Jackson42253992006-01-08 01:01:59 -08002677/*
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002678 * Parse and format mempolicy from/to strings
2679 */
2680
2681/*
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002682 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002683 */
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002684static const char * const policy_modes[] =
2685{
2686 [MPOL_DEFAULT] = "default",
2687 [MPOL_PREFERRED] = "prefer",
2688 [MPOL_BIND] = "bind",
2689 [MPOL_INTERLEAVE] = "interleave",
Lee Schermerhornd3a71032012-10-25 14:16:29 +02002690 [MPOL_LOCAL] = "local",
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002691};
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002692
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002693
2694#ifdef CONFIG_TMPFS
2695/**
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002696 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002697 * @str: string containing mempolicy to parse
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002698 * @mpol: pointer to struct mempolicy pointer, returned on success.
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002699 *
2700 * Format of input:
2701 * <mode>[=<flags>][:<nodelist>]
2702 *
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002703 * On success, returns 0, else 1
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002704 */
Hugh Dickinsa7a88b22013-01-02 02:04:23 -08002705int mpol_parse_str(char *str, struct mempolicy **mpol)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002706{
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002707 struct mempolicy *new = NULL;
Lee Schermerhornb4652e82010-05-24 14:32:03 -07002708 unsigned short mode;
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002709 unsigned short mode_flags;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002710 nodemask_t nodes;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002711 char *nodelist = strchr(str, ':');
2712 char *flags = strchr(str, '=');
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002713 int err = 1;
2714
2715 if (nodelist) {
2716 /* NUL-terminate mode or flags string */
2717 *nodelist++ = '\0';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002718 if (nodelist_parse(nodelist, nodes))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002719 goto out;
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002720 if (!nodes_subset(nodes, node_states[N_MEMORY]))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002721 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002722 } else
2723 nodes_clear(nodes);
2724
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002725 if (flags)
2726 *flags++ = '\0'; /* terminate mode string */
2727
Peter Zijlstra479e2802012-10-25 14:16:28 +02002728 for (mode = 0; mode < MPOL_MAX; mode++) {
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002729 if (!strcmp(str, policy_modes[mode])) {
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002730 break;
2731 }
2732 }
Mel Gormana7200942012-11-16 09:37:58 +00002733 if (mode >= MPOL_MAX)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002734 goto out;
2735
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002736 switch (mode) {
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002737 case MPOL_PREFERRED:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002738 /*
2739 * Insist on a nodelist of one node only
2740 */
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002741 if (nodelist) {
2742 char *rest = nodelist;
2743 while (isdigit(*rest))
2744 rest++;
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002745 if (*rest)
2746 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002747 }
2748 break;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002749 case MPOL_INTERLEAVE:
2750 /*
2751 * Default to online nodes with memory if no nodelist
2752 */
2753 if (!nodelist)
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002754 nodes = node_states[N_MEMORY];
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002755 break;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002756 case MPOL_LOCAL:
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002757 /*
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002758 * Don't allow a nodelist; mpol_new() checks flags
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002759 */
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002760 if (nodelist)
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002761 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002762 mode = MPOL_PREFERRED;
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002763 break;
Ravikiran G Thirumalai413b43d2010-03-23 13:35:28 -07002764 case MPOL_DEFAULT:
2765 /*
2766 * Insist on a empty nodelist
2767 */
2768 if (!nodelist)
2769 err = 0;
2770 goto out;
KOSAKI Motohirod69b2e62010-03-23 13:35:30 -07002771 case MPOL_BIND:
2772 /*
2773 * Insist on a nodelist
2774 */
2775 if (!nodelist)
2776 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002777 }
2778
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002779 mode_flags = 0;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002780 if (flags) {
2781 /*
2782 * Currently, we only support two mutually exclusive
2783 * mode flags.
2784 */
2785 if (!strcmp(flags, "static"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002786 mode_flags |= MPOL_F_STATIC_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002787 else if (!strcmp(flags, "relative"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002788 mode_flags |= MPOL_F_RELATIVE_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002789 else
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002790 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002791 }
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002792
2793 new = mpol_new(mode, mode_flags, &nodes);
2794 if (IS_ERR(new))
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002795 goto out;
2796
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002797 /*
2798 * Save nodes for mpol_to_str() to show the tmpfs mount options
2799 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2800 */
2801 if (mode != MPOL_PREFERRED)
2802 new->v.nodes = nodes;
2803 else if (nodelist)
2804 new->v.preferred_node = first_node(nodes);
2805 else
2806 new->flags |= MPOL_F_LOCAL;
2807
2808 /*
2809 * Save nodes for contextualization: this will be used to "clone"
2810 * the mempolicy in a specific context [cpuset] at a later time.
2811 */
2812 new->w.user_nodemask = nodes;
2813
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002814 err = 0;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002815
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002816out:
2817 /* Restore string for error message */
2818 if (nodelist)
2819 *--nodelist = ':';
2820 if (flags)
2821 *--flags = '=';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002822 if (!err)
2823 *mpol = new;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002824 return err;
2825}
2826#endif /* CONFIG_TMPFS */
2827
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002828/**
2829 * mpol_to_str - format a mempolicy structure for printing
2830 * @buffer: to contain formatted mempolicy string
2831 * @maxlen: length of @buffer
2832 * @pol: pointer to mempolicy to be formatted
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002833 *
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002834 * Convert a mempolicy into a string.
2835 * Returns the number of characters in buffer (if positive)
2836 * or an error (negative)
2837 */
Hugh Dickinsa7a88b22013-01-02 02:04:23 -08002838int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002839{
2840 char *p = buffer;
2841 int l;
2842 nodemask_t nodes;
Lee Schermerhornbea904d2008-04-28 02:13:18 -07002843 unsigned short mode;
David Rientjesf5b087b2008-04-28 02:12:27 -07002844 unsigned short flags = pol ? pol->flags : 0;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002845
Lee Schermerhorn22919902008-04-28 02:13:22 -07002846 /*
2847 * Sanity check: room for longest mode, flag and some nodes
2848 */
2849 VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
2850
Lee Schermerhornbea904d2008-04-28 02:13:18 -07002851 if (!pol || pol == &default_policy)
2852 mode = MPOL_DEFAULT;
2853 else
2854 mode = pol->mode;
2855
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002856 switch (mode) {
2857 case MPOL_DEFAULT:
2858 nodes_clear(nodes);
2859 break;
2860
2861 case MPOL_PREFERRED:
2862 nodes_clear(nodes);
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002863 if (flags & MPOL_F_LOCAL)
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002864 mode = MPOL_LOCAL;
Lee Schermerhorn53f25562008-04-28 02:13:20 -07002865 else
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002866 node_set(pol->v.preferred_node, nodes);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002867 break;
2868
2869 case MPOL_BIND:
Mel Gorman19770b32008-04-28 02:12:18 -07002870 /* Fall through */
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002871 case MPOL_INTERLEAVE:
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002872 nodes = pol->v.nodes;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002873 break;
2874
2875 default:
Dave Jones80de7c32012-09-06 12:01:00 -04002876 return -EINVAL;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002877 }
2878
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002879 l = strlen(policy_modes[mode]);
Lee Schermerhorn53f25562008-04-28 02:13:20 -07002880 if (buffer + maxlen < p + l + 1)
2881 return -ENOSPC;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002882
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002883 strcpy(p, policy_modes[mode]);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002884 p += l;
2885
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002886 if (flags & MPOL_MODE_FLAGS) {
David Rientjesf5b087b2008-04-28 02:12:27 -07002887 if (buffer + maxlen < p + 2)
2888 return -ENOSPC;
2889 *p++ = '=';
2890
Lee Schermerhorn22919902008-04-28 02:13:22 -07002891 /*
2892 * Currently, the only defined flags are mutually exclusive
2893 */
David Rientjesf5b087b2008-04-28 02:12:27 -07002894 if (flags & MPOL_F_STATIC_NODES)
Lee Schermerhorn22919902008-04-28 02:13:22 -07002895 p += snprintf(p, buffer + maxlen - p, "static");
2896 else if (flags & MPOL_F_RELATIVE_NODES)
2897 p += snprintf(p, buffer + maxlen - p, "relative");
David Rientjesf5b087b2008-04-28 02:12:27 -07002898 }
2899
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002900 if (!nodes_empty(nodes)) {
2901 if (buffer + maxlen < p + 2)
2902 return -ENOSPC;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002903 *p++ = ':';
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002904 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2905 }
2906 return p - buffer;
2907}