blob: 27d135408a22057a5b166e7eb2bf2e080bbd33f2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
Christoph Lameter8bccd852005-10-29 18:16:59 -07005 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
Christoph Lameter8bccd852005-10-29 18:16:59 -070021 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
Christoph Lameter8bccd852005-10-29 18:16:59 -070024 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 * preferred Try a specific node first before normal fallback.
David Rientjes00ef2d22013-02-22 16:35:36 -080029 * As a special case NUMA_NO_NODE here means do the allocation
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
Christoph Lameter8bccd852005-10-29 18:16:59 -070033 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
Linus Torvalds1da177e2005-04-16 15:20:36 -070066*/
67
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -070068#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#include <linux/mempolicy.h>
71#include <linux/mm.h>
72#include <linux/highmem.h>
73#include <linux/hugetlb.h>
74#include <linux/kernel.h>
75#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/nodemask.h>
77#include <linux/cpuset.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/slab.h>
79#include <linux/string.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040080#include <linux/export.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070081#include <linux/nsproxy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070082#include <linux/interrupt.h>
83#include <linux/init.h>
84#include <linux/compat.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080085#include <linux/swap.h>
Christoph Lameter1a75a6c2006-01-08 01:01:02 -080086#include <linux/seq_file.h>
87#include <linux/proc_fs.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080088#include <linux/migrate.h>
Hugh Dickins62b61f62009-12-14 17:59:33 -080089#include <linux/ksm.h>
Christoph Lameter95a402c2006-06-23 02:03:53 -070090#include <linux/rmap.h>
David Quigley86c3a762006-06-23 02:04:02 -070091#include <linux/security.h>
Adrian Bunkdbcb0f12007-10-16 01:26:26 -070092#include <linux/syscalls.h>
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -070093#include <linux/ctype.h>
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -080094#include <linux/mm_inline.h>
Lee Schermerhornb24f53a2012-10-25 14:16:32 +020095#include <linux/mmu_notifier.h>
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -070096#include <linux/printk.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080097
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <asm/tlbflush.h>
99#include <asm/uaccess.h>
Michal Hocko778d3b02011-07-26 16:08:30 -0700100#include <linux/random.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
Nick Piggin62695a82008-10-18 20:26:09 -0700102#include "internal.h"
103
Christoph Lameter38e35862006-01-08 01:01:01 -0800104/* Internal flags */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800105#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
Christoph Lameter38e35862006-01-08 01:01:01 -0800106#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800107
Pekka Enbergfcc234f2006-03-22 00:08:13 -0800108static struct kmem_cache *policy_cache;
109static struct kmem_cache *sn_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111/* Highest zone. An specific allocation for a zone below that is not
112 policied. */
Christoph Lameter62672762007-02-10 01:43:07 -0800113enum zone_type policy_zone = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700115/*
116 * run-time system-wide default policy => local allocation
117 */
H Hartley Sweetene754d792011-10-31 17:09:23 -0700118static struct mempolicy default_policy = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 .refcnt = ATOMIC_INIT(1), /* never free it */
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700120 .mode = MPOL_PREFERRED,
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700121 .flags = MPOL_F_LOCAL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122};
123
Mel Gorman5606e382012-11-02 18:19:13 +0000124static struct mempolicy preferred_node_policy[MAX_NUMNODES];
125
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -0700126struct mempolicy *get_task_policy(struct task_struct *p)
Mel Gorman5606e382012-11-02 18:19:13 +0000127{
128 struct mempolicy *pol = p->mempolicy;
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700129 int node;
Mel Gorman5606e382012-11-02 18:19:13 +0000130
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700131 if (pol)
132 return pol;
Mel Gorman5606e382012-11-02 18:19:13 +0000133
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700134 node = numa_node_id();
135 if (node != NUMA_NO_NODE) {
136 pol = &preferred_node_policy[node];
137 /* preferred_node_policy is not initialised early in boot */
138 if (pol->mode)
139 return pol;
Mel Gorman5606e382012-11-02 18:19:13 +0000140 }
141
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700142 return &default_policy;
Mel Gorman5606e382012-11-02 18:19:13 +0000143}
144
David Rientjes37012942008-04-28 02:12:33 -0700145static const struct mempolicy_operations {
146 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
Miao Xie708c1bb2010-05-24 14:32:07 -0700147 /*
148 * If read-side task has no lock to protect task->mempolicy, write-side
149 * task will rebind the task->mempolicy by two step. The first step is
150 * setting all the newly nodes, and the second step is cleaning all the
151 * disallowed nodes. In this way, we can avoid finding no node to alloc
152 * page.
153 * If we have a lock to protect task->mempolicy in read-side, we do
154 * rebind directly.
155 *
156 * step:
157 * MPOL_REBIND_ONCE - do rebind work at once
158 * MPOL_REBIND_STEP1 - set all the newly nodes
159 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
160 */
161 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
162 enum mpol_rebind_step step);
David Rientjes37012942008-04-28 02:12:33 -0700163} mpol_ops[MPOL_MAX];
164
David Rientjesf5b087b2008-04-28 02:12:27 -0700165static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
166{
Bob Liu6d556292010-05-24 14:31:59 -0700167 return pol->flags & MPOL_MODE_FLAGS;
David Rientjes4c50bc02008-04-28 02:12:30 -0700168}
169
170static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
171 const nodemask_t *rel)
172{
173 nodemask_t tmp;
174 nodes_fold(tmp, *orig, nodes_weight(*rel));
175 nodes_onto(*ret, tmp, *rel);
David Rientjesf5b087b2008-04-28 02:12:27 -0700176}
177
David Rientjes37012942008-04-28 02:12:33 -0700178static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
179{
180 if (nodes_empty(*nodes))
181 return -EINVAL;
182 pol->v.nodes = *nodes;
183 return 0;
184}
185
186static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
187{
188 if (!nodes)
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700189 pol->flags |= MPOL_F_LOCAL; /* local allocation */
David Rientjes37012942008-04-28 02:12:33 -0700190 else if (nodes_empty(*nodes))
191 return -EINVAL; /* no allowed nodes */
192 else
193 pol->v.preferred_node = first_node(*nodes);
194 return 0;
195}
196
197static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
198{
Zhihui Zhang859f7ef2014-12-18 16:17:09 -0800199 if (nodes_empty(*nodes))
David Rientjes37012942008-04-28 02:12:33 -0700200 return -EINVAL;
201 pol->v.nodes = *nodes;
202 return 0;
203}
204
Miao Xie58568d22009-06-16 15:31:49 -0700205/*
206 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
207 * any, for the new policy. mpol_new() has already validated the nodes
208 * parameter with respect to the policy mode and flags. But, we need to
209 * handle an empty nodemask with MPOL_PREFERRED here.
210 *
211 * Must be called holding task's alloc_lock to protect task's mems_allowed
212 * and mempolicy. May also be called holding the mmap_semaphore for write.
213 */
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700214static int mpol_set_nodemask(struct mempolicy *pol,
215 const nodemask_t *nodes, struct nodemask_scratch *nsc)
Miao Xie58568d22009-06-16 15:31:49 -0700216{
Miao Xie58568d22009-06-16 15:31:49 -0700217 int ret;
218
219 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
220 if (pol == NULL)
221 return 0;
Lai Jiangshan01f13bd2012-12-12 13:51:33 -0800222 /* Check N_MEMORY */
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700223 nodes_and(nsc->mask1,
Lai Jiangshan01f13bd2012-12-12 13:51:33 -0800224 cpuset_current_mems_allowed, node_states[N_MEMORY]);
Miao Xie58568d22009-06-16 15:31:49 -0700225
226 VM_BUG_ON(!nodes);
227 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
228 nodes = NULL; /* explicit local allocation */
229 else {
230 if (pol->flags & MPOL_F_RELATIVE_NODES)
Zhihui Zhang859f7ef2014-12-18 16:17:09 -0800231 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
Miao Xie58568d22009-06-16 15:31:49 -0700232 else
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700233 nodes_and(nsc->mask2, *nodes, nsc->mask1);
234
Miao Xie58568d22009-06-16 15:31:49 -0700235 if (mpol_store_user_nodemask(pol))
236 pol->w.user_nodemask = *nodes;
237 else
238 pol->w.cpuset_mems_allowed =
239 cpuset_current_mems_allowed;
240 }
241
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700242 if (nodes)
243 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
244 else
245 ret = mpol_ops[pol->mode].create(pol, NULL);
Miao Xie58568d22009-06-16 15:31:49 -0700246 return ret;
247}
248
249/*
250 * This function just creates a new policy, does some check and simple
251 * initialization. You must invoke mpol_set_nodemask() to set nodes.
252 */
David Rientjes028fec42008-04-28 02:12:25 -0700253static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
254 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255{
256 struct mempolicy *policy;
257
David Rientjes028fec42008-04-28 02:12:25 -0700258 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
David Rientjes00ef2d22013-02-22 16:35:36 -0800259 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
Paul Mundt140d5a42007-07-15 23:38:16 -0700260
David Rientjes3e1f0642008-04-28 02:12:34 -0700261 if (mode == MPOL_DEFAULT) {
262 if (nodes && !nodes_empty(*nodes))
David Rientjes37012942008-04-28 02:12:33 -0700263 return ERR_PTR(-EINVAL);
Lee Schermerhornd3a71032012-10-25 14:16:29 +0200264 return NULL;
David Rientjes37012942008-04-28 02:12:33 -0700265 }
David Rientjes3e1f0642008-04-28 02:12:34 -0700266 VM_BUG_ON(!nodes);
267
268 /*
269 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
270 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
271 * All other modes require a valid pointer to a non-empty nodemask.
272 */
273 if (mode == MPOL_PREFERRED) {
274 if (nodes_empty(*nodes)) {
275 if (((flags & MPOL_F_STATIC_NODES) ||
276 (flags & MPOL_F_RELATIVE_NODES)))
277 return ERR_PTR(-EINVAL);
David Rientjes3e1f0642008-04-28 02:12:34 -0700278 }
Peter Zijlstra479e2802012-10-25 14:16:28 +0200279 } else if (mode == MPOL_LOCAL) {
280 if (!nodes_empty(*nodes))
281 return ERR_PTR(-EINVAL);
282 mode = MPOL_PREFERRED;
David Rientjes3e1f0642008-04-28 02:12:34 -0700283 } else if (nodes_empty(*nodes))
284 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
286 if (!policy)
287 return ERR_PTR(-ENOMEM);
288 atomic_set(&policy->refcnt, 1);
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700289 policy->mode = mode;
David Rientjes3e1f0642008-04-28 02:12:34 -0700290 policy->flags = flags;
David Rientjesf5b087b2008-04-28 02:12:27 -0700291
David Rientjes37012942008-04-28 02:12:33 -0700292 return policy;
293}
294
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700295/* Slow path of a mpol destructor. */
296void __mpol_put(struct mempolicy *p)
297{
298 if (!atomic_dec_and_test(&p->refcnt))
299 return;
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700300 kmem_cache_free(policy_cache, p);
301}
302
Miao Xie708c1bb2010-05-24 14:32:07 -0700303static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
304 enum mpol_rebind_step step)
David Rientjes37012942008-04-28 02:12:33 -0700305{
306}
307
Miao Xie708c1bb2010-05-24 14:32:07 -0700308/*
309 * step:
310 * MPOL_REBIND_ONCE - do rebind work at once
311 * MPOL_REBIND_STEP1 - set all the newly nodes
312 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
313 */
314static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
315 enum mpol_rebind_step step)
David Rientjes37012942008-04-28 02:12:33 -0700316{
317 nodemask_t tmp;
318
319 if (pol->flags & MPOL_F_STATIC_NODES)
320 nodes_and(tmp, pol->w.user_nodemask, *nodes);
321 else if (pol->flags & MPOL_F_RELATIVE_NODES)
322 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
323 else {
Miao Xie708c1bb2010-05-24 14:32:07 -0700324 /*
325 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
326 * result
327 */
328 if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
329 nodes_remap(tmp, pol->v.nodes,
330 pol->w.cpuset_mems_allowed, *nodes);
331 pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
332 } else if (step == MPOL_REBIND_STEP2) {
333 tmp = pol->w.cpuset_mems_allowed;
334 pol->w.cpuset_mems_allowed = *nodes;
335 } else
336 BUG();
David Rientjes37012942008-04-28 02:12:33 -0700337 }
338
Miao Xie708c1bb2010-05-24 14:32:07 -0700339 if (nodes_empty(tmp))
340 tmp = *nodes;
341
342 if (step == MPOL_REBIND_STEP1)
343 nodes_or(pol->v.nodes, pol->v.nodes, tmp);
344 else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
345 pol->v.nodes = tmp;
346 else
347 BUG();
348
David Rientjes37012942008-04-28 02:12:33 -0700349 if (!node_isset(current->il_next, tmp)) {
350 current->il_next = next_node(current->il_next, tmp);
351 if (current->il_next >= MAX_NUMNODES)
352 current->il_next = first_node(tmp);
353 if (current->il_next >= MAX_NUMNODES)
354 current->il_next = numa_node_id();
355 }
356}
357
358static void mpol_rebind_preferred(struct mempolicy *pol,
Miao Xie708c1bb2010-05-24 14:32:07 -0700359 const nodemask_t *nodes,
360 enum mpol_rebind_step step)
David Rientjes37012942008-04-28 02:12:33 -0700361{
362 nodemask_t tmp;
363
David Rientjes37012942008-04-28 02:12:33 -0700364 if (pol->flags & MPOL_F_STATIC_NODES) {
365 int node = first_node(pol->w.user_nodemask);
366
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700367 if (node_isset(node, *nodes)) {
David Rientjes37012942008-04-28 02:12:33 -0700368 pol->v.preferred_node = node;
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700369 pol->flags &= ~MPOL_F_LOCAL;
370 } else
371 pol->flags |= MPOL_F_LOCAL;
David Rientjes37012942008-04-28 02:12:33 -0700372 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
373 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
374 pol->v.preferred_node = first_node(tmp);
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700375 } else if (!(pol->flags & MPOL_F_LOCAL)) {
David Rientjes37012942008-04-28 02:12:33 -0700376 pol->v.preferred_node = node_remap(pol->v.preferred_node,
377 pol->w.cpuset_mems_allowed,
378 *nodes);
379 pol->w.cpuset_mems_allowed = *nodes;
380 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381}
382
Miao Xie708c1bb2010-05-24 14:32:07 -0700383/*
384 * mpol_rebind_policy - Migrate a policy to a different set of nodes
385 *
386 * If read-side task has no lock to protect task->mempolicy, write-side
387 * task will rebind the task->mempolicy by two step. The first step is
388 * setting all the newly nodes, and the second step is cleaning all the
389 * disallowed nodes. In this way, we can avoid finding no node to alloc
390 * page.
391 * If we have a lock to protect task->mempolicy in read-side, we do
392 * rebind directly.
393 *
394 * step:
395 * MPOL_REBIND_ONCE - do rebind work at once
396 * MPOL_REBIND_STEP1 - set all the newly nodes
397 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
398 */
399static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
400 enum mpol_rebind_step step)
David Rientjes1d0d2682008-04-28 02:12:32 -0700401{
David Rientjes1d0d2682008-04-28 02:12:32 -0700402 if (!pol)
403 return;
Wang Sheng-Hui89c522c2012-05-29 15:06:16 -0700404 if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
David Rientjes1d0d2682008-04-28 02:12:32 -0700405 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
406 return;
Miao Xie708c1bb2010-05-24 14:32:07 -0700407
408 if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
409 return;
410
411 if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
412 BUG();
413
414 if (step == MPOL_REBIND_STEP1)
415 pol->flags |= MPOL_F_REBINDING;
416 else if (step == MPOL_REBIND_STEP2)
417 pol->flags &= ~MPOL_F_REBINDING;
418 else if (step >= MPOL_REBIND_NSTEP)
419 BUG();
420
421 mpol_ops[pol->mode].rebind(pol, newmask, step);
David Rientjes1d0d2682008-04-28 02:12:32 -0700422}
423
424/*
425 * Wrapper for mpol_rebind_policy() that just requires task
426 * pointer, and updates task mempolicy.
Miao Xie58568d22009-06-16 15:31:49 -0700427 *
428 * Called with task's alloc_lock held.
David Rientjes1d0d2682008-04-28 02:12:32 -0700429 */
430
Miao Xie708c1bb2010-05-24 14:32:07 -0700431void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
432 enum mpol_rebind_step step)
David Rientjes1d0d2682008-04-28 02:12:32 -0700433{
Miao Xie708c1bb2010-05-24 14:32:07 -0700434 mpol_rebind_policy(tsk->mempolicy, new, step);
David Rientjes1d0d2682008-04-28 02:12:32 -0700435}
436
437/*
438 * Rebind each vma in mm to new nodemask.
439 *
440 * Call holding a reference to mm. Takes mm->mmap_sem during call.
441 */
442
443void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
444{
445 struct vm_area_struct *vma;
446
447 down_write(&mm->mmap_sem);
448 for (vma = mm->mmap; vma; vma = vma->vm_next)
Miao Xie708c1bb2010-05-24 14:32:07 -0700449 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
David Rientjes1d0d2682008-04-28 02:12:32 -0700450 up_write(&mm->mmap_sem);
451}
452
David Rientjes37012942008-04-28 02:12:33 -0700453static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
454 [MPOL_DEFAULT] = {
455 .rebind = mpol_rebind_default,
456 },
457 [MPOL_INTERLEAVE] = {
458 .create = mpol_new_interleave,
459 .rebind = mpol_rebind_nodemask,
460 },
461 [MPOL_PREFERRED] = {
462 .create = mpol_new_preferred,
463 .rebind = mpol_rebind_preferred,
464 },
465 [MPOL_BIND] = {
466 .create = mpol_new_bind,
467 .rebind = mpol_rebind_nodemask,
468 },
469};
470
Christoph Lameterfc301282006-01-18 17:42:29 -0800471static void migrate_page_add(struct page *page, struct list_head *pagelist,
472 unsigned long flags);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -0800473
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800474struct queue_pages {
475 struct list_head *pagelist;
476 unsigned long flags;
477 nodemask_t *nmask;
478 struct vm_area_struct *prev;
479};
480
Naoya Horiguchi98094942013-09-11 14:22:14 -0700481/*
482 * Scan through pages checking if pages follow certain conditions,
483 * and move them to the pagelist if they do.
484 */
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800485static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
486 unsigned long end, struct mm_walk *walk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487{
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800488 struct vm_area_struct *vma = walk->vma;
489 struct page *page;
490 struct queue_pages *qp = walk->private;
491 unsigned long flags = qp->flags;
Kirill A. Shutemov248db922016-01-15 16:54:14 -0800492 int nid, ret;
Hugh Dickins91612e02005-06-21 17:15:07 -0700493 pte_t *pte;
Hugh Dickins705e87c2005-10-29 18:16:27 -0700494 spinlock_t *ptl;
Hugh Dickins941150a2005-06-21 17:15:06 -0700495
Kirill A. Shutemov248db922016-01-15 16:54:14 -0800496 if (pmd_trans_huge(*pmd)) {
497 ptl = pmd_lock(walk->mm, pmd);
498 if (pmd_trans_huge(*pmd)) {
499 page = pmd_page(*pmd);
500 if (is_huge_zero_page(page)) {
501 spin_unlock(ptl);
502 split_huge_pmd(vma, pmd, addr);
503 } else {
504 get_page(page);
505 spin_unlock(ptl);
506 lock_page(page);
507 ret = split_huge_page(page);
508 unlock_page(page);
509 put_page(page);
510 if (ret)
511 return 0;
512 }
513 } else {
514 spin_unlock(ptl);
515 }
516 }
Hugh Dickins91612e02005-06-21 17:15:07 -0700517
Kirill A. Shutemov248db922016-01-15 16:54:14 -0800518retry:
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800519 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
520 for (; addr != end; pte++, addr += PAGE_SIZE) {
Hugh Dickins91612e02005-06-21 17:15:07 -0700521 if (!pte_present(*pte))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800523 page = vm_normal_page(vma, addr, *pte);
524 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 continue;
Nick Piggin053837f2006-01-18 17:42:27 -0800526 /*
Hugh Dickins62b61f62009-12-14 17:59:33 -0800527 * vm_normal_page() filters out zero pages, but there might
528 * still be PageReserved pages to skip, perhaps in a VDSO.
Nick Piggin053837f2006-01-18 17:42:27 -0800529 */
Hugh Dickinsb79bc0a2013-02-22 16:35:13 -0800530 if (PageReserved(page))
Christoph Lameterf4598c82006-01-12 01:05:20 -0800531 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800532 nid = page_to_nid(page);
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800533 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
Christoph Lameter38e35862006-01-08 01:01:01 -0800534 continue;
Kirill A. Shutemov248db922016-01-15 16:54:14 -0800535 if (PageTail(page) && PageAnon(page)) {
536 get_page(page);
537 pte_unmap_unlock(pte, ptl);
538 lock_page(page);
539 ret = split_huge_page(page);
540 unlock_page(page);
541 put_page(page);
542 /* Failed to split -- skip. */
543 if (ret) {
544 pte = pte_offset_map_lock(walk->mm, pmd,
545 addr, &ptl);
546 continue;
547 }
548 goto retry;
549 }
Christoph Lameter38e35862006-01-08 01:01:01 -0800550
Stephen Wilsonb1f72d12011-05-24 17:12:43 -0700551 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800552 migrate_page_add(page, qp->pagelist, flags);
553 }
554 pte_unmap_unlock(pte - 1, ptl);
555 cond_resched();
556 return 0;
Hugh Dickins91612e02005-06-21 17:15:07 -0700557}
558
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800559static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
560 unsigned long addr, unsigned long end,
561 struct mm_walk *walk)
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700562{
563#ifdef CONFIG_HUGETLB_PAGE
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800564 struct queue_pages *qp = walk->private;
565 unsigned long flags = qp->flags;
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700566 int nid;
567 struct page *page;
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800568 spinlock_t *ptl;
Naoya Horiguchid4c54912014-06-06 10:00:01 -0400569 pte_t entry;
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700570
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800571 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
572 entry = huge_ptep_get(pte);
Naoya Horiguchid4c54912014-06-06 10:00:01 -0400573 if (!pte_present(entry))
574 goto unlock;
575 page = pte_page(entry);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700576 nid = page_to_nid(page);
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800577 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700578 goto unlock;
579 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
580 if (flags & (MPOL_MF_MOVE_ALL) ||
581 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800582 isolate_huge_page(page, qp->pagelist);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700583unlock:
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800584 spin_unlock(ptl);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700585#else
586 BUG();
587#endif
Hugh Dickins91612e02005-06-21 17:15:07 -0700588 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589}
590
Aneesh Kumar K.V58772312013-12-06 00:08:22 +0530591#ifdef CONFIG_NUMA_BALANCING
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200592/*
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200593 * This is used to mark a range of virtual addresses to be inaccessible.
594 * These are later cleared by a NUMA hinting fault. Depending on these
595 * faults, pages may be migrated for better NUMA placement.
596 *
597 * This is assuming that NUMA faults are handled using PROT_NONE. If
598 * an architecture makes a different choice, it will need further
599 * changes to the core.
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200600 */
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200601unsigned long change_prot_numa(struct vm_area_struct *vma,
602 unsigned long addr, unsigned long end)
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200603{
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200604 int nr_updated;
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200605
Mel Gorman4d942462015-02-12 14:58:28 -0800606 nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
Mel Gorman03c5a6e2012-11-02 14:52:48 +0000607 if (nr_updated)
608 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200609
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200610 return nr_updated;
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200611}
612#else
613static unsigned long change_prot_numa(struct vm_area_struct *vma,
614 unsigned long addr, unsigned long end)
615{
616 return 0;
617}
Aneesh Kumar K.V58772312013-12-06 00:08:22 +0530618#endif /* CONFIG_NUMA_BALANCING */
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200619
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800620static int queue_pages_test_walk(unsigned long start, unsigned long end,
621 struct mm_walk *walk)
622{
623 struct vm_area_struct *vma = walk->vma;
624 struct queue_pages *qp = walk->private;
625 unsigned long endvma = vma->vm_end;
626 unsigned long flags = qp->flags;
627
Naoya Horiguchi48684a62015-02-11 15:28:06 -0800628 if (vma->vm_flags & VM_PFNMAP)
629 return 1;
630
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800631 if (endvma > end)
632 endvma = end;
633 if (vma->vm_start > start)
634 start = vma->vm_start;
635
636 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
637 if (!vma->vm_next && vma->vm_end < end)
638 return -EFAULT;
639 if (qp->prev && qp->prev->vm_end < vma->vm_start)
640 return -EFAULT;
641 }
642
643 qp->prev = vma;
644
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800645 if (flags & MPOL_MF_LAZY) {
646 /* Similar to task_numa_work, skip inaccessible VMAs */
Liang Chend645fc02016-01-15 16:57:28 -0800647 if (vma_migratable(vma) &&
648 vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800649 change_prot_numa(vma, start, endvma);
650 return 1;
651 }
652
653 if ((flags & MPOL_MF_STRICT) ||
654 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
655 vma_migratable(vma)))
656 /* queue pages from current vma */
657 return 0;
658 return 1;
659}
660
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800661/*
Naoya Horiguchi98094942013-09-11 14:22:14 -0700662 * Walk through page tables and collect pages to be migrated.
663 *
664 * If pages found in a given range are on a set of nodes (determined by
665 * @nodes and @flags,) it's isolated and queued to the pagelist which is
666 * passed via @private.)
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800667 */
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -0700668static int
Naoya Horiguchi98094942013-09-11 14:22:14 -0700669queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800670 nodemask_t *nodes, unsigned long flags,
671 struct list_head *pagelist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672{
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800673 struct queue_pages qp = {
674 .pagelist = pagelist,
675 .flags = flags,
676 .nmask = nodes,
677 .prev = NULL,
678 };
679 struct mm_walk queue_pages_walk = {
680 .hugetlb_entry = queue_pages_hugetlb,
681 .pmd_entry = queue_pages_pte_range,
682 .test_walk = queue_pages_test_walk,
683 .mm = mm,
684 .private = &qp,
685 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800687 return walk_page_range(start, end, &queue_pages_walk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688}
689
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700690/*
691 * Apply policy to a single VMA
692 * This must be called with the mmap_sem held for writing.
693 */
694static int vma_replace_policy(struct vm_area_struct *vma,
695 struct mempolicy *pol)
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700696{
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700697 int err;
698 struct mempolicy *old;
699 struct mempolicy *new;
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700700
701 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
702 vma->vm_start, vma->vm_end, vma->vm_pgoff,
703 vma->vm_ops, vma->vm_file,
704 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
705
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700706 new = mpol_dup(pol);
707 if (IS_ERR(new))
708 return PTR_ERR(new);
709
710 if (vma->vm_ops && vma->vm_ops->set_policy) {
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700711 err = vma->vm_ops->set_policy(vma, new);
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700712 if (err)
713 goto err_out;
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700714 }
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700715
716 old = vma->vm_policy;
717 vma->vm_policy = new; /* protected by mmap_sem */
718 mpol_put(old);
719
720 return 0;
721 err_out:
722 mpol_put(new);
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700723 return err;
724}
725
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726/* Step 2: apply policy to a range and do splits. */
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800727static int mbind_range(struct mm_struct *mm, unsigned long start,
728 unsigned long end, struct mempolicy *new_pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729{
730 struct vm_area_struct *next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800731 struct vm_area_struct *prev;
732 struct vm_area_struct *vma;
733 int err = 0;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800734 pgoff_t pgoff;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800735 unsigned long vmstart;
736 unsigned long vmend;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737
Linus Torvalds097d5912012-03-06 18:23:36 -0800738 vma = find_vma(mm, start);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800739 if (!vma || vma->vm_start > start)
740 return -EFAULT;
741
Linus Torvalds097d5912012-03-06 18:23:36 -0800742 prev = vma->vm_prev;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800743 if (start > vma->vm_start)
744 prev = vma;
745
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800746 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 next = vma->vm_next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800748 vmstart = max(start, vma->vm_start);
749 vmend = min(end, vma->vm_end);
750
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800751 if (mpol_equal(vma_policy(vma), new_pol))
752 continue;
753
754 pgoff = vma->vm_pgoff +
755 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800756 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
Andrea Arcangeli19a809a2015-09-04 15:46:24 -0700757 vma->anon_vma, vma->vm_file, pgoff,
758 new_pol, vma->vm_userfaultfd_ctx);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800759 if (prev) {
760 vma = prev;
761 next = vma->vm_next;
Oleg Nesterov3964acd2013-07-31 13:53:28 -0700762 if (mpol_equal(vma_policy(vma), new_pol))
763 continue;
764 /* vma_merge() joined vma && vma->next, case 8 */
765 goto replace;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800766 }
767 if (vma->vm_start != vmstart) {
768 err = split_vma(vma->vm_mm, vma, vmstart, 1);
769 if (err)
770 goto out;
771 }
772 if (vma->vm_end != vmend) {
773 err = split_vma(vma->vm_mm, vma, vmend, 0);
774 if (err)
775 goto out;
776 }
Oleg Nesterov3964acd2013-07-31 13:53:28 -0700777 replace:
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700778 err = vma_replace_policy(vma, new_pol);
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700779 if (err)
780 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 }
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800782
783 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 return err;
785}
786
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787/* Set the process memory policy */
David Rientjes028fec42008-04-28 02:12:25 -0700788static long do_set_mempolicy(unsigned short mode, unsigned short flags,
789 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790{
Miao Xie58568d22009-06-16 15:31:49 -0700791 struct mempolicy *new, *old;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700792 NODEMASK_SCRATCH(scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700793 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700795 if (!scratch)
796 return -ENOMEM;
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700797
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700798 new = mpol_new(mode, flags, nodes);
799 if (IS_ERR(new)) {
800 ret = PTR_ERR(new);
801 goto out;
802 }
Oleg Nesterov2c7c3a72014-10-09 15:27:55 -0700803
Miao Xie58568d22009-06-16 15:31:49 -0700804 task_lock(current);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700805 ret = mpol_set_nodemask(new, nodes, scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700806 if (ret) {
807 task_unlock(current);
Miao Xie58568d22009-06-16 15:31:49 -0700808 mpol_put(new);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700809 goto out;
Miao Xie58568d22009-06-16 15:31:49 -0700810 }
811 old = current->mempolicy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 current->mempolicy = new;
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700813 if (new && new->mode == MPOL_INTERLEAVE &&
David Rientjesf5b087b2008-04-28 02:12:27 -0700814 nodes_weight(new->v.nodes))
Andi Kleendfcd3c02005-10-29 18:15:48 -0700815 current->il_next = first_node(new->v.nodes);
Miao Xie58568d22009-06-16 15:31:49 -0700816 task_unlock(current);
Miao Xie58568d22009-06-16 15:31:49 -0700817 mpol_put(old);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700818 ret = 0;
819out:
820 NODEMASK_SCRATCH_FREE(scratch);
821 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822}
823
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700824/*
825 * Return nodemask for policy for get_mempolicy() query
Miao Xie58568d22009-06-16 15:31:49 -0700826 *
827 * Called with task's alloc_lock held
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700828 */
829static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830{
Andi Kleendfcd3c02005-10-29 18:15:48 -0700831 nodes_clear(*nodes);
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700832 if (p == &default_policy)
833 return;
834
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700835 switch (p->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -0700836 case MPOL_BIND:
837 /* Fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 case MPOL_INTERLEAVE:
Andi Kleendfcd3c02005-10-29 18:15:48 -0700839 *nodes = p->v.nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 break;
841 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700842 if (!(p->flags & MPOL_F_LOCAL))
Andi Kleendfcd3c02005-10-29 18:15:48 -0700843 node_set(p->v.preferred_node, *nodes);
Lee Schermerhorn53f25562008-04-28 02:13:20 -0700844 /* else return empty node mask for local allocation */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 break;
846 default:
847 BUG();
848 }
849}
850
851static int lookup_node(struct mm_struct *mm, unsigned long addr)
852{
853 struct page *p;
854 int err;
855
856 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
857 if (err >= 0) {
858 err = page_to_nid(p);
859 put_page(p);
860 }
861 return err;
862}
863
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864/* Retrieve NUMA policy */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700865static long do_get_mempolicy(int *policy, nodemask_t *nmask,
866 unsigned long addr, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867{
Christoph Lameter8bccd852005-10-29 18:16:59 -0700868 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 struct mm_struct *mm = current->mm;
870 struct vm_area_struct *vma = NULL;
871 struct mempolicy *pol = current->mempolicy;
872
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700873 if (flags &
874 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 return -EINVAL;
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700876
877 if (flags & MPOL_F_MEMS_ALLOWED) {
878 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
879 return -EINVAL;
880 *policy = 0; /* just so it's initialized */
Miao Xie58568d22009-06-16 15:31:49 -0700881 task_lock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700882 *nmask = cpuset_current_mems_allowed;
Miao Xie58568d22009-06-16 15:31:49 -0700883 task_unlock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700884 return 0;
885 }
886
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 if (flags & MPOL_F_ADDR) {
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700888 /*
889 * Do NOT fall back to task policy if the
890 * vma/shared policy at addr is NULL. We
891 * want to return MPOL_DEFAULT in this case.
892 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 down_read(&mm->mmap_sem);
894 vma = find_vma_intersection(mm, addr, addr+1);
895 if (!vma) {
896 up_read(&mm->mmap_sem);
897 return -EFAULT;
898 }
899 if (vma->vm_ops && vma->vm_ops->get_policy)
900 pol = vma->vm_ops->get_policy(vma, addr);
901 else
902 pol = vma->vm_policy;
903 } else if (addr)
904 return -EINVAL;
905
906 if (!pol)
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700907 pol = &default_policy; /* indicates default behavior */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908
909 if (flags & MPOL_F_NODE) {
910 if (flags & MPOL_F_ADDR) {
911 err = lookup_node(mm, addr);
912 if (err < 0)
913 goto out;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700914 *policy = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 } else if (pol == current->mempolicy &&
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700916 pol->mode == MPOL_INTERLEAVE) {
Christoph Lameter8bccd852005-10-29 18:16:59 -0700917 *policy = current->il_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 } else {
919 err = -EINVAL;
920 goto out;
921 }
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700922 } else {
923 *policy = pol == &default_policy ? MPOL_DEFAULT :
924 pol->mode;
David Rientjesd79df632008-07-04 12:24:13 -0700925 /*
926 * Internal mempolicy flags must be masked off before exposing
927 * the policy to userspace.
928 */
929 *policy |= (pol->flags & MPOL_MODE_FLAGS);
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700930 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931
932 if (vma) {
933 up_read(&current->mm->mmap_sem);
934 vma = NULL;
935 }
936
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 err = 0;
Miao Xie58568d22009-06-16 15:31:49 -0700938 if (nmask) {
Lee Schermerhornc6b6ef82010-03-23 13:35:41 -0700939 if (mpol_store_user_nodemask(pol)) {
940 *nmask = pol->w.user_nodemask;
941 } else {
942 task_lock(current);
943 get_policy_nodemask(pol, nmask);
944 task_unlock(current);
945 }
Miao Xie58568d22009-06-16 15:31:49 -0700946 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947
948 out:
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700949 mpol_cond_put(pol);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 if (vma)
951 up_read(&current->mm->mmap_sem);
952 return err;
953}
954
Christoph Lameterb20a3502006-03-22 00:09:12 -0800955#ifdef CONFIG_MIGRATION
Christoph Lameter8bccd852005-10-29 18:16:59 -0700956/*
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800957 * page migration
958 */
Christoph Lameterfc301282006-01-18 17:42:29 -0800959static void migrate_page_add(struct page *page, struct list_head *pagelist,
960 unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800961{
962 /*
Christoph Lameterfc301282006-01-18 17:42:29 -0800963 * Avoid migrating a page that is shared with others.
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800964 */
Nick Piggin62695a82008-10-18 20:26:09 -0700965 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
966 if (!isolate_lru_page(page)) {
967 list_add_tail(&page->lru, pagelist);
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -0800968 inc_zone_page_state(page, NR_ISOLATED_ANON +
969 page_is_file_cache(page));
Nick Piggin62695a82008-10-18 20:26:09 -0700970 }
971 }
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800972}
973
Christoph Lameter742755a2006-06-23 02:03:55 -0700974static struct page *new_node_page(struct page *page, unsigned long node, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -0700975{
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700976 if (PageHuge(page))
977 return alloc_huge_page_node(page_hstate(compound_head(page)),
978 node);
979 else
Vlastimil Babka96db8002015-09-08 15:03:50 -0700980 return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
David Rientjesb360edb2015-04-14 15:46:52 -0700981 __GFP_THISNODE, 0);
Christoph Lameter95a402c2006-06-23 02:03:53 -0700982}
983
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800984/*
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800985 * Migrate pages from one node to a target node.
986 * Returns error or the number of pages not migrated.
987 */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700988static int migrate_to_node(struct mm_struct *mm, int source, int dest,
989 int flags)
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800990{
991 nodemask_t nmask;
992 LIST_HEAD(pagelist);
993 int err = 0;
994
995 nodes_clear(nmask);
996 node_set(source, nmask);
997
Minchan Kim08270802012-10-08 16:33:38 -0700998 /*
999 * This does not "check" the range but isolates all pages that
1000 * need migration. Between passing in the full user address
1001 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1002 */
1003 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
Naoya Horiguchi98094942013-09-11 14:22:14 -07001004 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001005 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1006
Minchan Kimcf608ac2010-10-26 14:21:29 -07001007 if (!list_empty(&pagelist)) {
David Rientjes68711a72014-06-04 16:08:25 -07001008 err = migrate_pages(&pagelist, new_node_page, NULL, dest,
Hugh Dickins9c620e22013-02-22 16:35:14 -08001009 MIGRATE_SYNC, MR_SYSCALL);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001010 if (err)
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -07001011 putback_movable_pages(&pagelist);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001012 }
Christoph Lameter95a402c2006-06-23 02:03:53 -07001013
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001014 return err;
1015}
1016
1017/*
1018 * Move pages between the two nodesets so as to preserve the physical
1019 * layout as much as possible.
Christoph Lameter39743882006-01-08 01:00:51 -08001020 *
1021 * Returns the number of page that could not be moved.
1022 */
Andrew Morton0ce72d42012-05-29 15:06:24 -07001023int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1024 const nodemask_t *to, int flags)
Christoph Lameter39743882006-01-08 01:00:51 -08001025{
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001026 int busy = 0;
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001027 int err;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001028 nodemask_t tmp;
Christoph Lameter39743882006-01-08 01:00:51 -08001029
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001030 err = migrate_prep();
1031 if (err)
1032 return err;
1033
Lee Schermerhorn53f25562008-04-28 02:13:20 -07001034 down_read(&mm->mmap_sem);
Christoph Lameter39743882006-01-08 01:00:51 -08001035
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -08001036 /*
1037 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1038 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1039 * bit in 'tmp', and return that <source, dest> pair for migration.
1040 * The pair of nodemasks 'to' and 'from' define the map.
1041 *
1042 * If no pair of bits is found that way, fallback to picking some
1043 * pair of 'source' and 'dest' bits that are not the same. If the
1044 * 'source' and 'dest' bits are the same, this represents a node
1045 * that will be migrating to itself, so no pages need move.
1046 *
1047 * If no bits are left in 'tmp', or if all remaining bits left
1048 * in 'tmp' correspond to the same bit in 'to', return false
1049 * (nothing left to migrate).
1050 *
1051 * This lets us pick a pair of nodes to migrate between, such that
1052 * if possible the dest node is not already occupied by some other
1053 * source node, minimizing the risk of overloading the memory on a
1054 * node that would happen if we migrated incoming memory to a node
1055 * before migrating outgoing memory source that same node.
1056 *
1057 * A single scan of tmp is sufficient. As we go, we remember the
1058 * most recent <s, d> pair that moved (s != d). If we find a pair
1059 * that not only moved, but what's better, moved to an empty slot
1060 * (d is not set in tmp), then we break out then, with that pair.
Justin P. Mattockae0e47f2011-03-01 15:06:02 +01001061 * Otherwise when we finish scanning from_tmp, we at least have the
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -08001062 * most recent <s, d> pair that moved. If we get all the way through
1063 * the scan of tmp without finding any node that moved, much less
1064 * moved to an empty node, then there is nothing left worth migrating.
1065 */
Christoph Lameterd4984712006-01-08 01:00:55 -08001066
Andrew Morton0ce72d42012-05-29 15:06:24 -07001067 tmp = *from;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001068 while (!nodes_empty(tmp)) {
1069 int s,d;
Jianguo Wub76ac7e2013-11-12 15:07:39 -08001070 int source = NUMA_NO_NODE;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001071 int dest = 0;
1072
1073 for_each_node_mask(s, tmp) {
Larry Woodman4a5b18c2012-05-29 15:06:24 -07001074
1075 /*
1076 * do_migrate_pages() tries to maintain the relative
1077 * node relationship of the pages established between
1078 * threads and memory areas.
1079 *
1080 * However if the number of source nodes is not equal to
1081 * the number of destination nodes we can not preserve
1082 * this node relative relationship. In that case, skip
1083 * copying memory from a node that is in the destination
1084 * mask.
1085 *
1086 * Example: [2,3,4] -> [3,4,5] moves everything.
1087 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1088 */
1089
Andrew Morton0ce72d42012-05-29 15:06:24 -07001090 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1091 (node_isset(s, *to)))
Larry Woodman4a5b18c2012-05-29 15:06:24 -07001092 continue;
1093
Andrew Morton0ce72d42012-05-29 15:06:24 -07001094 d = node_remap(s, *from, *to);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001095 if (s == d)
1096 continue;
1097
1098 source = s; /* Node moved. Memorize */
1099 dest = d;
1100
1101 /* dest not in remaining from nodes? */
1102 if (!node_isset(dest, tmp))
1103 break;
1104 }
Jianguo Wub76ac7e2013-11-12 15:07:39 -08001105 if (source == NUMA_NO_NODE)
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001106 break;
1107
1108 node_clear(source, tmp);
1109 err = migrate_to_node(mm, source, dest, flags);
1110 if (err > 0)
1111 busy += err;
1112 if (err < 0)
1113 break;
Christoph Lameter39743882006-01-08 01:00:51 -08001114 }
1115 up_read(&mm->mmap_sem);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001116 if (err < 0)
1117 return err;
1118 return busy;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001119
Christoph Lameter39743882006-01-08 01:00:51 -08001120}
1121
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001122/*
1123 * Allocate a new page for page migration based on vma policy.
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001124 * Start by assuming the page is mapped by the same vma as contains @start.
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001125 * Search forward from there, if not. N.B., this assumes that the
1126 * list of pages handed to migrate_pages()--which is how we get here--
1127 * is in virtual address order.
1128 */
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001129static struct page *new_page(struct page *page, unsigned long start, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001130{
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001131 struct vm_area_struct *vma;
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001132 unsigned long uninitialized_var(address);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001133
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001134 vma = find_vma(current->mm, start);
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001135 while (vma) {
1136 address = page_address_in_vma(page, vma);
1137 if (address != -EFAULT)
1138 break;
1139 vma = vma->vm_next;
1140 }
1141
Wanpeng Li11c731e2013-12-18 17:08:56 -08001142 if (PageHuge(page)) {
Michal Hockocc817172014-01-23 15:53:15 -08001143 BUG_ON(!vma);
1144 return alloc_huge_page_noerr(vma, address, 1);
Wanpeng Li11c731e2013-12-18 17:08:56 -08001145 }
1146 /*
1147 * if !vma, alloc_page_vma() will use task or system default policy
1148 */
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001149 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001150}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001151#else
1152
1153static void migrate_page_add(struct page *page, struct list_head *pagelist,
1154 unsigned long flags)
1155{
1156}
1157
Andrew Morton0ce72d42012-05-29 15:06:24 -07001158int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1159 const nodemask_t *to, int flags)
Christoph Lameterb20a3502006-03-22 00:09:12 -08001160{
1161 return -ENOSYS;
1162}
Christoph Lameter95a402c2006-06-23 02:03:53 -07001163
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001164static struct page *new_page(struct page *page, unsigned long start, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001165{
1166 return NULL;
1167}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001168#endif
1169
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001170static long do_mbind(unsigned long start, unsigned long len,
David Rientjes028fec42008-04-28 02:12:25 -07001171 unsigned short mode, unsigned short mode_flags,
1172 nodemask_t *nmask, unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001173{
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001174 struct mm_struct *mm = current->mm;
1175 struct mempolicy *new;
1176 unsigned long end;
1177 int err;
1178 LIST_HEAD(pagelist);
1179
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001180 if (flags & ~(unsigned long)MPOL_MF_VALID)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001181 return -EINVAL;
Christoph Lameter74c00242006-03-14 19:50:21 -08001182 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001183 return -EPERM;
1184
1185 if (start & ~PAGE_MASK)
1186 return -EINVAL;
1187
1188 if (mode == MPOL_DEFAULT)
1189 flags &= ~MPOL_MF_STRICT;
1190
1191 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1192 end = start + len;
1193
1194 if (end < start)
1195 return -EINVAL;
1196 if (end == start)
1197 return 0;
1198
David Rientjes028fec42008-04-28 02:12:25 -07001199 new = mpol_new(mode, mode_flags, nmask);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001200 if (IS_ERR(new))
1201 return PTR_ERR(new);
1202
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001203 if (flags & MPOL_MF_LAZY)
1204 new->flags |= MPOL_F_MOF;
1205
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001206 /*
1207 * If we are using the default policy then operation
1208 * on discontinuous address spaces is okay after all
1209 */
1210 if (!new)
1211 flags |= MPOL_MF_DISCONTIG_OK;
1212
David Rientjes028fec42008-04-28 02:12:25 -07001213 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1214 start, start + len, mode, mode_flags,
David Rientjes00ef2d22013-02-22 16:35:36 -08001215 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001216
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001217 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1218
1219 err = migrate_prep();
1220 if (err)
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001221 goto mpol_out;
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001222 }
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07001223 {
1224 NODEMASK_SCRATCH(scratch);
1225 if (scratch) {
1226 down_write(&mm->mmap_sem);
1227 task_lock(current);
1228 err = mpol_set_nodemask(new, nmask, scratch);
1229 task_unlock(current);
1230 if (err)
1231 up_write(&mm->mmap_sem);
1232 } else
1233 err = -ENOMEM;
1234 NODEMASK_SCRATCH_FREE(scratch);
1235 }
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001236 if (err)
1237 goto mpol_out;
1238
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001239 err = queue_pages_range(mm, start, end, nmask,
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001240 flags | MPOL_MF_INVERT, &pagelist);
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001241 if (!err)
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -08001242 err = mbind_range(mm, start, end, new);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001243
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001244 if (!err) {
1245 int nr_failed = 0;
1246
Minchan Kimcf608ac2010-10-26 14:21:29 -07001247 if (!list_empty(&pagelist)) {
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001248 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001249 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1250 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001251 if (nr_failed)
Naoya Horiguchi74060e42013-09-11 14:22:06 -07001252 putback_movable_pages(&pagelist);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001253 }
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001254
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001255 if (nr_failed && (flags & MPOL_MF_STRICT))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001256 err = -EIO;
KOSAKI Motohiroab8a3e12009-10-26 16:49:58 -07001257 } else
Joonsoo Kimb0e5fd72013-12-18 17:08:51 -08001258 putback_movable_pages(&pagelist);
Christoph Lameterb20a3502006-03-22 00:09:12 -08001259
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001260 up_write(&mm->mmap_sem);
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001261 mpol_out:
Lee Schermerhornf0be3d32008-04-28 02:13:08 -07001262 mpol_put(new);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001263 return err;
1264}
1265
Christoph Lameter39743882006-01-08 01:00:51 -08001266/*
Christoph Lameter8bccd852005-10-29 18:16:59 -07001267 * User space interface with variable sized bitmaps for nodelists.
1268 */
1269
1270/* Copy a node mask from user space. */
Christoph Lameter39743882006-01-08 01:00:51 -08001271static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
Christoph Lameter8bccd852005-10-29 18:16:59 -07001272 unsigned long maxnode)
1273{
1274 unsigned long k;
1275 unsigned long nlongs;
1276 unsigned long endmask;
1277
1278 --maxnode;
1279 nodes_clear(*nodes);
1280 if (maxnode == 0 || !nmask)
1281 return 0;
Andi Kleena9c930b2006-02-20 18:27:59 -08001282 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
Chris Wright636f13c2006-02-17 13:59:36 -08001283 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001284
1285 nlongs = BITS_TO_LONGS(maxnode);
1286 if ((maxnode % BITS_PER_LONG) == 0)
1287 endmask = ~0UL;
1288 else
1289 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1290
1291 /* When the user specified more nodes than supported just check
1292 if the non supported part is all zero. */
1293 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1294 if (nlongs > PAGE_SIZE/sizeof(long))
1295 return -EINVAL;
1296 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1297 unsigned long t;
1298 if (get_user(t, nmask + k))
1299 return -EFAULT;
1300 if (k == nlongs - 1) {
1301 if (t & endmask)
1302 return -EINVAL;
1303 } else if (t)
1304 return -EINVAL;
1305 }
1306 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1307 endmask = ~0UL;
1308 }
1309
1310 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1311 return -EFAULT;
1312 nodes_addr(*nodes)[nlongs-1] &= endmask;
1313 return 0;
1314}
1315
1316/* Copy a kernel node mask to user space */
1317static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1318 nodemask_t *nodes)
1319{
1320 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1321 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1322
1323 if (copy > nbytes) {
1324 if (copy > PAGE_SIZE)
1325 return -EINVAL;
1326 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1327 return -EFAULT;
1328 copy = nbytes;
1329 }
1330 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1331}
1332
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001333SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
Rasmus Villemoesf7f28ca2014-06-04 16:07:57 -07001334 unsigned long, mode, const unsigned long __user *, nmask,
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001335 unsigned long, maxnode, unsigned, flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001336{
1337 nodemask_t nodes;
1338 int err;
David Rientjes028fec42008-04-28 02:12:25 -07001339 unsigned short mode_flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001340
David Rientjes028fec42008-04-28 02:12:25 -07001341 mode_flags = mode & MPOL_MODE_FLAGS;
1342 mode &= ~MPOL_MODE_FLAGS;
David Rientjesa3b51e02008-04-28 02:12:23 -07001343 if (mode >= MPOL_MAX)
1344 return -EINVAL;
David Rientjes4c50bc02008-04-28 02:12:30 -07001345 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1346 (mode_flags & MPOL_F_RELATIVE_NODES))
1347 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001348 err = get_nodes(&nodes, nmask, maxnode);
1349 if (err)
1350 return err;
David Rientjes028fec42008-04-28 02:12:25 -07001351 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001352}
1353
1354/* Set the process memory policy */
Rasmus Villemoes23c89022014-06-04 16:07:58 -07001355SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001356 unsigned long, maxnode)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001357{
1358 int err;
1359 nodemask_t nodes;
David Rientjes028fec42008-04-28 02:12:25 -07001360 unsigned short flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001361
David Rientjes028fec42008-04-28 02:12:25 -07001362 flags = mode & MPOL_MODE_FLAGS;
1363 mode &= ~MPOL_MODE_FLAGS;
1364 if ((unsigned int)mode >= MPOL_MAX)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001365 return -EINVAL;
David Rientjes4c50bc02008-04-28 02:12:30 -07001366 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1367 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001368 err = get_nodes(&nodes, nmask, maxnode);
1369 if (err)
1370 return err;
David Rientjes028fec42008-04-28 02:12:25 -07001371 return do_set_mempolicy(mode, flags, &nodes);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001372}
1373
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001374SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1375 const unsigned long __user *, old_nodes,
1376 const unsigned long __user *, new_nodes)
Christoph Lameter39743882006-01-08 01:00:51 -08001377{
David Howellsc69e8d92008-11-14 10:39:19 +11001378 const struct cred *cred = current_cred(), *tcred;
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001379 struct mm_struct *mm = NULL;
Christoph Lameter39743882006-01-08 01:00:51 -08001380 struct task_struct *task;
Christoph Lameter39743882006-01-08 01:00:51 -08001381 nodemask_t task_nodes;
1382 int err;
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001383 nodemask_t *old;
1384 nodemask_t *new;
1385 NODEMASK_SCRATCH(scratch);
Christoph Lameter39743882006-01-08 01:00:51 -08001386
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001387 if (!scratch)
1388 return -ENOMEM;
Christoph Lameter39743882006-01-08 01:00:51 -08001389
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001390 old = &scratch->mask1;
1391 new = &scratch->mask2;
1392
1393 err = get_nodes(old, old_nodes, maxnode);
Christoph Lameter39743882006-01-08 01:00:51 -08001394 if (err)
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001395 goto out;
1396
1397 err = get_nodes(new, new_nodes, maxnode);
1398 if (err)
1399 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001400
1401 /* Find the mm_struct */
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001402 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001403 task = pid ? find_task_by_vpid(pid) : current;
Christoph Lameter39743882006-01-08 01:00:51 -08001404 if (!task) {
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001405 rcu_read_unlock();
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001406 err = -ESRCH;
1407 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001408 }
Christoph Lameter3268c632012-03-21 16:34:06 -07001409 get_task_struct(task);
Christoph Lameter39743882006-01-08 01:00:51 -08001410
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001411 err = -EINVAL;
Christoph Lameter39743882006-01-08 01:00:51 -08001412
1413 /*
1414 * Check if this process has the right to modify the specified
1415 * process. The right exists if the process has administrative
Alexey Dobriyan7f927fc2006-03-28 01:56:53 -08001416 * capabilities, superuser privileges or the same
Christoph Lameter39743882006-01-08 01:00:51 -08001417 * userid as the target process.
1418 */
David Howellsc69e8d92008-11-14 10:39:19 +11001419 tcred = __task_cred(task);
Eric W. Biedermanb38a86e2012-03-12 15:48:24 -07001420 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1421 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
Christoph Lameter74c00242006-03-14 19:50:21 -08001422 !capable(CAP_SYS_NICE)) {
David Howellsc69e8d92008-11-14 10:39:19 +11001423 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001424 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001425 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001426 }
David Howellsc69e8d92008-11-14 10:39:19 +11001427 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001428
1429 task_nodes = cpuset_mems_allowed(task);
1430 /* Is the user allowed to access the target nodes? */
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001431 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
Christoph Lameter39743882006-01-08 01:00:51 -08001432 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001433 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001434 }
1435
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08001436 if (!nodes_subset(*new, node_states[N_MEMORY])) {
Christoph Lameter3b42d282007-08-31 00:12:08 -07001437 err = -EINVAL;
Christoph Lameter3268c632012-03-21 16:34:06 -07001438 goto out_put;
Christoph Lameter3b42d282007-08-31 00:12:08 -07001439 }
1440
David Quigley86c3a762006-06-23 02:04:02 -07001441 err = security_task_movememory(task);
1442 if (err)
Christoph Lameter3268c632012-03-21 16:34:06 -07001443 goto out_put;
David Quigley86c3a762006-06-23 02:04:02 -07001444
Christoph Lameter3268c632012-03-21 16:34:06 -07001445 mm = get_task_mm(task);
1446 put_task_struct(task);
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001447
1448 if (!mm) {
Christoph Lameter3268c632012-03-21 16:34:06 -07001449 err = -EINVAL;
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001450 goto out;
1451 }
1452
1453 err = do_migrate_pages(mm, old, new,
1454 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
Christoph Lameter3268c632012-03-21 16:34:06 -07001455
1456 mmput(mm);
1457out:
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001458 NODEMASK_SCRATCH_FREE(scratch);
1459
Christoph Lameter39743882006-01-08 01:00:51 -08001460 return err;
Christoph Lameter3268c632012-03-21 16:34:06 -07001461
1462out_put:
1463 put_task_struct(task);
1464 goto out;
1465
Christoph Lameter39743882006-01-08 01:00:51 -08001466}
1467
1468
Christoph Lameter8bccd852005-10-29 18:16:59 -07001469/* Retrieve NUMA policy */
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001470SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1471 unsigned long __user *, nmask, unsigned long, maxnode,
1472 unsigned long, addr, unsigned long, flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001473{
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001474 int err;
1475 int uninitialized_var(pval);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001476 nodemask_t nodes;
1477
1478 if (nmask != NULL && maxnode < MAX_NUMNODES)
1479 return -EINVAL;
1480
1481 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1482
1483 if (err)
1484 return err;
1485
1486 if (policy && put_user(pval, policy))
1487 return -EFAULT;
1488
1489 if (nmask)
1490 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1491
1492 return err;
1493}
1494
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495#ifdef CONFIG_COMPAT
1496
Heiko Carstensc93e0f62014-03-03 16:32:26 +01001497COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1498 compat_ulong_t __user *, nmask,
1499 compat_ulong_t, maxnode,
1500 compat_ulong_t, addr, compat_ulong_t, flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501{
1502 long err;
1503 unsigned long __user *nm = NULL;
1504 unsigned long nr_bits, alloc_size;
1505 DECLARE_BITMAP(bm, MAX_NUMNODES);
1506
1507 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1508 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1509
1510 if (nmask)
1511 nm = compat_alloc_user_space(alloc_size);
1512
1513 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1514
1515 if (!err && nmask) {
KAMEZAWA Hiroyuki2bbff6c2011-09-14 16:21:02 -07001516 unsigned long copy_size;
1517 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1518 err = copy_from_user(bm, nm, copy_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 /* ensure entire bitmap is zeroed */
1520 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1521 err |= compat_put_bitmap(nmask, bm, nr_bits);
1522 }
1523
1524 return err;
1525}
1526
Heiko Carstensc93e0f62014-03-03 16:32:26 +01001527COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1528 compat_ulong_t, maxnode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529{
1530 long err = 0;
1531 unsigned long __user *nm = NULL;
1532 unsigned long nr_bits, alloc_size;
1533 DECLARE_BITMAP(bm, MAX_NUMNODES);
1534
1535 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1536 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1537
1538 if (nmask) {
1539 err = compat_get_bitmap(bm, nmask, nr_bits);
1540 nm = compat_alloc_user_space(alloc_size);
1541 err |= copy_to_user(nm, bm, alloc_size);
1542 }
1543
1544 if (err)
1545 return -EFAULT;
1546
1547 return sys_set_mempolicy(mode, nm, nr_bits+1);
1548}
1549
Heiko Carstensc93e0f62014-03-03 16:32:26 +01001550COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1551 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1552 compat_ulong_t, maxnode, compat_ulong_t, flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553{
1554 long err = 0;
1555 unsigned long __user *nm = NULL;
1556 unsigned long nr_bits, alloc_size;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001557 nodemask_t bm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558
1559 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1560 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1561
1562 if (nmask) {
Andi Kleendfcd3c02005-10-29 18:15:48 -07001563 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 nm = compat_alloc_user_space(alloc_size);
Andi Kleendfcd3c02005-10-29 18:15:48 -07001565 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 }
1567
1568 if (err)
1569 return -EFAULT;
1570
1571 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1572}
1573
1574#endif
1575
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001576struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1577 unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578{
Oleg Nesterov8d90274b2014-10-09 15:27:45 -07001579 struct mempolicy *pol = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580
1581 if (vma) {
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001582 if (vma->vm_ops && vma->vm_ops->get_policy) {
Oleg Nesterov8d90274b2014-10-09 15:27:45 -07001583 pol = vma->vm_ops->get_policy(vma, addr);
Mel Gorman00442ad2012-10-08 16:29:20 -07001584 } else if (vma->vm_policy) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585 pol = vma->vm_policy;
Mel Gorman00442ad2012-10-08 16:29:20 -07001586
1587 /*
1588 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1589 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1590 * count on these policies which will be dropped by
1591 * mpol_cond_put() later
1592 */
1593 if (mpol_needs_cond_ref(pol))
1594 mpol_get(pol);
1595 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 }
Oleg Nesterovf15ca782014-10-09 15:27:43 -07001597
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001598 return pol;
1599}
1600
1601/*
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001602 * get_vma_policy(@vma, @addr)
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001603 * @vma: virtual memory area whose policy is sought
1604 * @addr: address in @vma for shared policy lookup
1605 *
1606 * Returns effective policy for a VMA at specified address.
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001607 * Falls back to current->mempolicy or system default policy, as necessary.
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001608 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1609 * count--added by the get_policy() vm_op, as appropriate--to protect against
1610 * freeing by another task. It is the caller's responsibility to free the
1611 * extra reference for shared policies.
1612 */
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001613static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1614 unsigned long addr)
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001615{
1616 struct mempolicy *pol = __get_vma_policy(vma, addr);
1617
Oleg Nesterov8d90274b2014-10-09 15:27:45 -07001618 if (!pol)
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001619 pol = get_task_policy(current);
Oleg Nesterov8d90274b2014-10-09 15:27:45 -07001620
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621 return pol;
1622}
1623
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001624bool vma_policy_mof(struct vm_area_struct *vma)
Mel Gormanfc3147242013-10-07 11:29:09 +01001625{
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001626 struct mempolicy *pol;
Oleg Nesterovf15ca782014-10-09 15:27:43 -07001627
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001628 if (vma->vm_ops && vma->vm_ops->get_policy) {
1629 bool ret = false;
Mel Gormanfc3147242013-10-07 11:29:09 +01001630
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001631 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1632 if (pol && (pol->flags & MPOL_F_MOF))
1633 ret = true;
1634 mpol_cond_put(pol);
Mel Gormanfc3147242013-10-07 11:29:09 +01001635
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001636 return ret;
Mel Gormanfc3147242013-10-07 11:29:09 +01001637 }
1638
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001639 pol = vma->vm_policy;
Oleg Nesterov8d90274b2014-10-09 15:27:45 -07001640 if (!pol)
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001641 pol = get_task_policy(current);
Oleg Nesterov8d90274b2014-10-09 15:27:45 -07001642
Mel Gormanfc3147242013-10-07 11:29:09 +01001643 return pol->flags & MPOL_F_MOF;
1644}
1645
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001646static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1647{
1648 enum zone_type dynamic_policy_zone = policy_zone;
1649
1650 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1651
1652 /*
1653 * if policy->v.nodes has movable memory only,
1654 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1655 *
1656 * policy->v.nodes is intersect with node_states[N_MEMORY].
1657 * so if the following test faile, it implies
1658 * policy->v.nodes has movable memory only.
1659 */
1660 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1661 dynamic_policy_zone = ZONE_MOVABLE;
1662
1663 return zone >= dynamic_policy_zone;
1664}
1665
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001666/*
1667 * Return a nodemask representing a mempolicy for filtering nodes for
1668 * page allocation
1669 */
1670static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
Mel Gorman19770b32008-04-28 02:12:18 -07001671{
1672 /* Lower zones don't get a nodemask applied for MPOL_BIND */
Lee Schermerhorn45c47452008-04-28 02:13:12 -07001673 if (unlikely(policy->mode == MPOL_BIND) &&
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001674 apply_policy_zone(policy, gfp_zone(gfp)) &&
Mel Gorman19770b32008-04-28 02:12:18 -07001675 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1676 return &policy->v.nodes;
1677
1678 return NULL;
1679}
1680
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001681/* Return a zonelist indicated by gfp for node representing a mempolicy */
Andi Kleen2f5f9482011-03-04 17:36:29 -08001682static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1683 int nd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684{
Lee Schermerhorn45c47452008-04-28 02:13:12 -07001685 switch (policy->mode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001687 if (!(policy->flags & MPOL_F_LOCAL))
1688 nd = policy->v.preferred_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 break;
1690 case MPOL_BIND:
Mel Gorman19770b32008-04-28 02:12:18 -07001691 /*
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001692 * Normally, MPOL_BIND allocations are node-local within the
1693 * allowed nodemask. However, if __GFP_THISNODE is set and the
Bob Liu6eb27e12010-05-24 14:32:00 -07001694 * current node isn't part of the mask, we use the zonelist for
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001695 * the first node in the mask instead.
Mel Gorman19770b32008-04-28 02:12:18 -07001696 */
Mel Gorman19770b32008-04-28 02:12:18 -07001697 if (unlikely(gfp & __GFP_THISNODE) &&
1698 unlikely(!node_isset(nd, policy->v.nodes)))
1699 nd = first_node(policy->v.nodes);
1700 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 default:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 BUG();
1703 }
Mel Gorman0e884602008-04-28 02:12:14 -07001704 return node_zonelist(nd, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705}
1706
1707/* Do dynamic interleaving for a process */
1708static unsigned interleave_nodes(struct mempolicy *policy)
1709{
1710 unsigned nid, next;
1711 struct task_struct *me = current;
1712
1713 nid = me->il_next;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001714 next = next_node(nid, policy->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 if (next >= MAX_NUMNODES)
Andi Kleendfcd3c02005-10-29 18:15:48 -07001716 next = first_node(policy->v.nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001717 if (next < MAX_NUMNODES)
1718 me->il_next = next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 return nid;
1720}
1721
Christoph Lameterdc85da12006-01-18 17:42:36 -08001722/*
1723 * Depending on the memory policy provide a node from which to allocate the
1724 * next slab entry.
1725 */
David Rientjes2a389612014-04-07 15:37:29 -07001726unsigned int mempolicy_slab_node(void)
Christoph Lameterdc85da12006-01-18 17:42:36 -08001727{
Andi Kleene7b691b2012-06-09 02:40:03 -07001728 struct mempolicy *policy;
David Rientjes2a389612014-04-07 15:37:29 -07001729 int node = numa_mem_id();
Andi Kleene7b691b2012-06-09 02:40:03 -07001730
1731 if (in_interrupt())
David Rientjes2a389612014-04-07 15:37:29 -07001732 return node;
Andi Kleene7b691b2012-06-09 02:40:03 -07001733
1734 policy = current->mempolicy;
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001735 if (!policy || policy->flags & MPOL_F_LOCAL)
David Rientjes2a389612014-04-07 15:37:29 -07001736 return node;
Christoph Lameter765c4502006-09-27 01:50:08 -07001737
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001738 switch (policy->mode) {
1739 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001740 /*
1741 * handled MPOL_F_LOCAL above
1742 */
1743 return policy->v.preferred_node;
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001744
Christoph Lameterdc85da12006-01-18 17:42:36 -08001745 case MPOL_INTERLEAVE:
1746 return interleave_nodes(policy);
1747
Mel Gormandd1a2392008-04-28 02:12:17 -07001748 case MPOL_BIND: {
Christoph Lameterdc85da12006-01-18 17:42:36 -08001749 /*
1750 * Follow bind policy behavior and start allocation at the
1751 * first node.
1752 */
Mel Gorman19770b32008-04-28 02:12:18 -07001753 struct zonelist *zonelist;
1754 struct zone *zone;
1755 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
David Rientjes2a389612014-04-07 15:37:29 -07001756 zonelist = &NODE_DATA(node)->node_zonelists[0];
Mel Gorman19770b32008-04-28 02:12:18 -07001757 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1758 &policy->v.nodes,
1759 &zone);
David Rientjes2a389612014-04-07 15:37:29 -07001760 return zone ? zone->node : node;
Mel Gormandd1a2392008-04-28 02:12:17 -07001761 }
Christoph Lameterdc85da12006-01-18 17:42:36 -08001762
Christoph Lameterdc85da12006-01-18 17:42:36 -08001763 default:
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001764 BUG();
Christoph Lameterdc85da12006-01-18 17:42:36 -08001765 }
1766}
1767
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768/* Do static interleaving for a VMA with known offset. */
1769static unsigned offset_il_node(struct mempolicy *pol,
1770 struct vm_area_struct *vma, unsigned long off)
1771{
Andi Kleendfcd3c02005-10-29 18:15:48 -07001772 unsigned nnodes = nodes_weight(pol->v.nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001773 unsigned target;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774 int c;
Jianguo Wub76ac7e2013-11-12 15:07:39 -08001775 int nid = NUMA_NO_NODE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776
David Rientjesf5b087b2008-04-28 02:12:27 -07001777 if (!nnodes)
1778 return numa_node_id();
1779 target = (unsigned int)off % nnodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 c = 0;
1781 do {
Andi Kleendfcd3c02005-10-29 18:15:48 -07001782 nid = next_node(nid, pol->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783 c++;
1784 } while (c <= target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785 return nid;
1786}
1787
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001788/* Determine a node number for interleave */
1789static inline unsigned interleave_nid(struct mempolicy *pol,
1790 struct vm_area_struct *vma, unsigned long addr, int shift)
1791{
1792 if (vma) {
1793 unsigned long off;
1794
Nishanth Aravamudan3b98b082006-08-31 21:27:53 -07001795 /*
1796 * for small pages, there is no difference between
1797 * shift and PAGE_SHIFT, so the bit-shift is safe.
1798 * for huge pages, since vm_pgoff is in units of small
1799 * pages, we need to shift off the always 0 bits to get
1800 * a useful offset.
1801 */
1802 BUG_ON(shift < PAGE_SHIFT);
1803 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001804 off += (addr - vma->vm_start) >> shift;
1805 return offset_il_node(pol, vma, off);
1806 } else
1807 return interleave_nodes(pol);
1808}
1809
Michal Hocko778d3b02011-07-26 16:08:30 -07001810/*
1811 * Return the bit number of a random bit set in the nodemask.
Jianguo Wub76ac7e2013-11-12 15:07:39 -08001812 * (returns NUMA_NO_NODE if nodemask is empty)
Michal Hocko778d3b02011-07-26 16:08:30 -07001813 */
1814int node_random(const nodemask_t *maskp)
1815{
Jianguo Wub76ac7e2013-11-12 15:07:39 -08001816 int w, bit = NUMA_NO_NODE;
Michal Hocko778d3b02011-07-26 16:08:30 -07001817
1818 w = nodes_weight(*maskp);
1819 if (w)
1820 bit = bitmap_ord_to_pos(maskp->bits,
1821 get_random_int() % w, MAX_NUMNODES);
1822 return bit;
1823}
1824
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001825#ifdef CONFIG_HUGETLBFS
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001826/*
1827 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
Fabian Frederickb46e14a2014-06-04 16:08:18 -07001828 * @vma: virtual memory area whose policy is sought
1829 * @addr: address in @vma for shared policy lookup and interleave policy
1830 * @gfp_flags: for requested zone
1831 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1832 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001833 *
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001834 * Returns a zonelist suitable for a huge page allocation and a pointer
1835 * to the struct mempolicy for conditional unref after allocation.
1836 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1837 * @nodemask for filtering the zonelist.
Miao Xiec0ff7452010-05-24 14:32:08 -07001838 *
Mel Gormand26914d2014-04-03 14:47:24 -07001839 * Must be protected by read_mems_allowed_begin()
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001840 */
Mel Gorman396faf02007-07-17 04:03:13 -07001841struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
Mel Gorman19770b32008-04-28 02:12:18 -07001842 gfp_t gfp_flags, struct mempolicy **mpol,
1843 nodemask_t **nodemask)
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001844{
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001845 struct zonelist *zl;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001846
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001847 *mpol = get_vma_policy(vma, addr);
Mel Gorman19770b32008-04-28 02:12:18 -07001848 *nodemask = NULL; /* assume !MPOL_BIND */
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001849
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001850 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1851 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
Andi Kleena5516432008-07-23 21:27:41 -07001852 huge_page_shift(hstate_vma(vma))), gfp_flags);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001853 } else {
Andi Kleen2f5f9482011-03-04 17:36:29 -08001854 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001855 if ((*mpol)->mode == MPOL_BIND)
1856 *nodemask = &(*mpol)->v.nodes;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001857 }
1858 return zl;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001859}
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001860
1861/*
1862 * init_nodemask_of_mempolicy
1863 *
1864 * If the current task's mempolicy is "default" [NULL], return 'false'
1865 * to indicate default policy. Otherwise, extract the policy nodemask
1866 * for 'bind' or 'interleave' policy into the argument nodemask, or
1867 * initialize the argument nodemask to contain the single node for
1868 * 'preferred' or 'local' policy and return 'true' to indicate presence
1869 * of non-default mempolicy.
1870 *
1871 * We don't bother with reference counting the mempolicy [mpol_get/put]
1872 * because the current task is examining it's own mempolicy and a task's
1873 * mempolicy is only ever changed by the task itself.
1874 *
1875 * N.B., it is the caller's responsibility to free a returned nodemask.
1876 */
1877bool init_nodemask_of_mempolicy(nodemask_t *mask)
1878{
1879 struct mempolicy *mempolicy;
1880 int nid;
1881
1882 if (!(mask && current->mempolicy))
1883 return false;
1884
Miao Xiec0ff7452010-05-24 14:32:08 -07001885 task_lock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001886 mempolicy = current->mempolicy;
1887 switch (mempolicy->mode) {
1888 case MPOL_PREFERRED:
1889 if (mempolicy->flags & MPOL_F_LOCAL)
1890 nid = numa_node_id();
1891 else
1892 nid = mempolicy->v.preferred_node;
1893 init_nodemask_of_node(mask, nid);
1894 break;
1895
1896 case MPOL_BIND:
1897 /* Fall through */
1898 case MPOL_INTERLEAVE:
1899 *mask = mempolicy->v.nodes;
1900 break;
1901
1902 default:
1903 BUG();
1904 }
Miao Xiec0ff7452010-05-24 14:32:08 -07001905 task_unlock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001906
1907 return true;
1908}
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001909#endif
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001910
David Rientjes6f48d0eb2010-08-09 17:18:52 -07001911/*
1912 * mempolicy_nodemask_intersects
1913 *
1914 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1915 * policy. Otherwise, check for intersection between mask and the policy
1916 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1917 * policy, always return true since it may allocate elsewhere on fallback.
1918 *
1919 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1920 */
1921bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1922 const nodemask_t *mask)
1923{
1924 struct mempolicy *mempolicy;
1925 bool ret = true;
1926
1927 if (!mask)
1928 return ret;
1929 task_lock(tsk);
1930 mempolicy = tsk->mempolicy;
1931 if (!mempolicy)
1932 goto out;
1933
1934 switch (mempolicy->mode) {
1935 case MPOL_PREFERRED:
1936 /*
1937 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1938 * allocate from, they may fallback to other nodes when oom.
1939 * Thus, it's possible for tsk to have allocated memory from
1940 * nodes in mask.
1941 */
1942 break;
1943 case MPOL_BIND:
1944 case MPOL_INTERLEAVE:
1945 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1946 break;
1947 default:
1948 BUG();
1949 }
1950out:
1951 task_unlock(tsk);
1952 return ret;
1953}
1954
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955/* Allocate a page in interleaved policy.
1956 Own path because it needs to do special accounting. */
Andi Kleen662f3a02005-10-29 18:15:49 -07001957static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1958 unsigned nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959{
1960 struct zonelist *zl;
1961 struct page *page;
1962
Mel Gorman0e884602008-04-28 02:12:14 -07001963 zl = node_zonelist(nid, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964 page = __alloc_pages(gfp, order, zl);
Mel Gormandd1a2392008-04-28 02:12:17 -07001965 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
Christoph Lameterca889e62006-06-30 01:55:44 -07001966 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 return page;
1968}
1969
1970/**
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08001971 * alloc_pages_vma - Allocate a page for a VMA.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 *
1973 * @gfp:
1974 * %GFP_USER user allocation.
1975 * %GFP_KERNEL kernel allocations,
1976 * %GFP_HIGHMEM highmem/user allocations,
1977 * %GFP_FS allocation should not call back into a file system.
1978 * %GFP_ATOMIC don't sleep.
1979 *
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08001980 * @order:Order of the GFP allocation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 * @vma: Pointer to VMA or NULL if not available.
1982 * @addr: Virtual Address of the allocation. Must be inside the VMA.
Vlastimil Babkabe97a412015-02-11 15:27:15 -08001983 * @node: Which node to prefer for allocation (modulo policy).
1984 * @hugepage: for hugepages try only the preferred node if possible
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985 *
1986 * This function allocates a page from the kernel page pool and applies
1987 * a NUMA policy associated with the VMA or the current process.
1988 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1989 * mm_struct of the VMA to prevent it from going away. Should be used for
Vlastimil Babkabe97a412015-02-11 15:27:15 -08001990 * all allocations for pages that will be mapped into user space. Returns
1991 * NULL when no page can be allocated.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 */
1993struct page *
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08001994alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
Vlastimil Babkabe97a412015-02-11 15:27:15 -08001995 unsigned long addr, int node, bool hugepage)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996{
Mel Gormancc9a6c82012-03-21 16:34:11 -07001997 struct mempolicy *pol;
Miao Xiec0ff7452010-05-24 14:32:08 -07001998 struct page *page;
Mel Gormancc9a6c82012-03-21 16:34:11 -07001999 unsigned int cpuset_mems_cookie;
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002000 struct zonelist *zl;
2001 nodemask_t *nmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002
Mel Gormancc9a6c82012-03-21 16:34:11 -07002003retry_cpuset:
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07002004 pol = get_vma_policy(vma, addr);
Mel Gormand26914d2014-04-03 14:47:24 -07002005 cpuset_mems_cookie = read_mems_allowed_begin();
Mel Gormancc9a6c82012-03-21 16:34:11 -07002006
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002007 if (pol->mode == MPOL_INTERLEAVE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008 unsigned nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002009
Andi Kleen8eac5632011-02-25 14:44:28 -08002010 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002011 mpol_cond_put(pol);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002012 page = alloc_page_interleave(gfp, order, nid);
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002013 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014 }
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002015
Vlastimil Babka0867a572015-06-24 16:58:48 -07002016 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2017 int hpage_node = node;
2018
2019 /*
2020 * For hugepage allocation and non-interleave policy which
2021 * allows the current node (or other explicitly preferred
2022 * node) we only try to allocate from the current/preferred
2023 * node and don't fall back to other nodes, as the cost of
2024 * remote accesses would likely offset THP benefits.
2025 *
2026 * If the policy is interleave, or does not allow the current
2027 * node in its nodemask, we allocate the standard way.
2028 */
2029 if (pol->mode == MPOL_PREFERRED &&
2030 !(pol->flags & MPOL_F_LOCAL))
2031 hpage_node = pol->v.preferred_node;
2032
2033 nmask = policy_nodemask(gfp, pol);
2034 if (!nmask || node_isset(hpage_node, *nmask)) {
2035 mpol_cond_put(pol);
Vlastimil Babka96db8002015-09-08 15:03:50 -07002036 page = __alloc_pages_node(hpage_node,
Vlastimil Babka0867a572015-06-24 16:58:48 -07002037 gfp | __GFP_THISNODE, order);
2038 goto out;
2039 }
2040 }
2041
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002042 nmask = policy_nodemask(gfp, pol);
2043 zl = policy_zonelist(gfp, pol, node);
Oleg Nesterov23867402014-10-09 15:27:41 -07002044 mpol_cond_put(pol);
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002045 page = __alloc_pages_nodemask(gfp, order, zl, nmask);
2046out:
Mel Gormand26914d2014-04-03 14:47:24 -07002047 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
Mel Gormancc9a6c82012-03-21 16:34:11 -07002048 goto retry_cpuset;
Miao Xiec0ff7452010-05-24 14:32:08 -07002049 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050}
2051
2052/**
2053 * alloc_pages_current - Allocate pages.
2054 *
2055 * @gfp:
2056 * %GFP_USER user allocation,
2057 * %GFP_KERNEL kernel allocation,
2058 * %GFP_HIGHMEM highmem allocation,
2059 * %GFP_FS don't call back into a file system.
2060 * %GFP_ATOMIC don't sleep.
2061 * @order: Power of two of allocation size in pages. 0 is a single page.
2062 *
2063 * Allocate a page from the kernel page pool. When not in
2064 * interrupt context and apply the current process NUMA policy.
2065 * Returns NULL when no page can be allocated.
2066 *
Paul Jacksoncf2a473c2006-01-08 01:01:54 -08002067 * Don't call cpuset_update_task_memory_state() unless
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068 * 1) it's ok to take cpuset_sem (can WAIT), and
2069 * 2) allocating for current task (not interrupt).
2070 */
Al Virodd0fc662005-10-07 07:46:04 +01002071struct page *alloc_pages_current(gfp_t gfp, unsigned order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072{
Oleg Nesterov8d90274b2014-10-09 15:27:45 -07002073 struct mempolicy *pol = &default_policy;
Miao Xiec0ff7452010-05-24 14:32:08 -07002074 struct page *page;
Mel Gormancc9a6c82012-03-21 16:34:11 -07002075 unsigned int cpuset_mems_cookie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076
Oleg Nesterov8d90274b2014-10-09 15:27:45 -07002077 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2078 pol = get_task_policy(current);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002079
Mel Gormancc9a6c82012-03-21 16:34:11 -07002080retry_cpuset:
Mel Gormand26914d2014-04-03 14:47:24 -07002081 cpuset_mems_cookie = read_mems_allowed_begin();
Mel Gormancc9a6c82012-03-21 16:34:11 -07002082
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002083 /*
2084 * No reference counting needed for current->mempolicy
2085 * nor system default_policy
2086 */
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002087 if (pol->mode == MPOL_INTERLEAVE)
Miao Xiec0ff7452010-05-24 14:32:08 -07002088 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2089 else
2090 page = __alloc_pages_nodemask(gfp, order,
Andi Kleen5c4b4be2011-03-04 17:36:32 -08002091 policy_zonelist(gfp, pol, numa_node_id()),
2092 policy_nodemask(gfp, pol));
Mel Gormancc9a6c82012-03-21 16:34:11 -07002093
Mel Gormand26914d2014-04-03 14:47:24 -07002094 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
Mel Gormancc9a6c82012-03-21 16:34:11 -07002095 goto retry_cpuset;
2096
Miao Xiec0ff7452010-05-24 14:32:08 -07002097 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098}
2099EXPORT_SYMBOL(alloc_pages_current);
2100
Oleg Nesterovef0855d2013-09-11 14:20:14 -07002101int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2102{
2103 struct mempolicy *pol = mpol_dup(vma_policy(src));
2104
2105 if (IS_ERR(pol))
2106 return PTR_ERR(pol);
2107 dst->vm_policy = pol;
2108 return 0;
2109}
2110
Paul Jackson42253992006-01-08 01:01:59 -08002111/*
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07002112 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
Paul Jackson42253992006-01-08 01:01:59 -08002113 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2114 * with the mems_allowed returned by cpuset_mems_allowed(). This
2115 * keeps mempolicies cpuset relative after its cpuset moves. See
2116 * further kernel/cpuset.c update_nodemask().
Miao Xie708c1bb2010-05-24 14:32:07 -07002117 *
2118 * current's mempolicy may be rebinded by the other task(the task that changes
2119 * cpuset's mems), so we needn't do rebind work for current task.
Paul Jackson42253992006-01-08 01:01:59 -08002120 */
Paul Jackson42253992006-01-08 01:01:59 -08002121
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07002122/* Slow path of a mempolicy duplicate */
2123struct mempolicy *__mpol_dup(struct mempolicy *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124{
2125 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2126
2127 if (!new)
2128 return ERR_PTR(-ENOMEM);
Miao Xie708c1bb2010-05-24 14:32:07 -07002129
2130 /* task's mempolicy is protected by alloc_lock */
2131 if (old == current->mempolicy) {
2132 task_lock(current);
2133 *new = *old;
2134 task_unlock(current);
2135 } else
2136 *new = *old;
2137
Paul Jackson42253992006-01-08 01:01:59 -08002138 if (current_cpuset_is_being_rebound()) {
2139 nodemask_t mems = cpuset_mems_allowed(current);
Miao Xie708c1bb2010-05-24 14:32:07 -07002140 if (new->flags & MPOL_F_REBINDING)
2141 mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2142 else
2143 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
Paul Jackson42253992006-01-08 01:01:59 -08002144 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 atomic_set(&new->refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146 return new;
2147}
2148
2149/* Slow path of a mempolicy comparison */
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002150bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151{
2152 if (!a || !b)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002153 return false;
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002154 if (a->mode != b->mode)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002155 return false;
Bob Liu19800502010-05-24 14:32:01 -07002156 if (a->flags != b->flags)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002157 return false;
Bob Liu19800502010-05-24 14:32:01 -07002158 if (mpol_store_user_nodemask(a))
2159 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002160 return false;
Bob Liu19800502010-05-24 14:32:01 -07002161
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002162 switch (a->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -07002163 case MPOL_BIND:
2164 /* Fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 case MPOL_INTERLEAVE:
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002166 return !!nodes_equal(a->v.nodes, b->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 case MPOL_PREFERRED:
Namhyung Kim75719662011-03-22 16:33:02 -07002168 return a->v.preferred_node == b->v.preferred_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 default:
2170 BUG();
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002171 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 }
2173}
2174
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 * Shared memory backing store policy support.
2177 *
2178 * Remember policies even when nobody has shared memory mapped.
2179 * The policies are kept in Red-Black tree linked from the inode.
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002180 * They are protected by the sp->lock rwlock, which should be held
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 * for any accesses to the tree.
2182 */
2183
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002184/*
2185 * lookup first element intersecting start-end. Caller holds sp->lock for
2186 * reading or for writing
2187 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188static struct sp_node *
2189sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2190{
2191 struct rb_node *n = sp->root.rb_node;
2192
2193 while (n) {
2194 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2195
2196 if (start >= p->end)
2197 n = n->rb_right;
2198 else if (end <= p->start)
2199 n = n->rb_left;
2200 else
2201 break;
2202 }
2203 if (!n)
2204 return NULL;
2205 for (;;) {
2206 struct sp_node *w = NULL;
2207 struct rb_node *prev = rb_prev(n);
2208 if (!prev)
2209 break;
2210 w = rb_entry(prev, struct sp_node, nd);
2211 if (w->end <= start)
2212 break;
2213 n = prev;
2214 }
2215 return rb_entry(n, struct sp_node, nd);
2216}
2217
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002218/*
2219 * Insert a new shared policy into the list. Caller holds sp->lock for
2220 * writing.
2221 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2223{
2224 struct rb_node **p = &sp->root.rb_node;
2225 struct rb_node *parent = NULL;
2226 struct sp_node *nd;
2227
2228 while (*p) {
2229 parent = *p;
2230 nd = rb_entry(parent, struct sp_node, nd);
2231 if (new->start < nd->start)
2232 p = &(*p)->rb_left;
2233 else if (new->end > nd->end)
2234 p = &(*p)->rb_right;
2235 else
2236 BUG();
2237 }
2238 rb_link_node(&new->nd, parent, p);
2239 rb_insert_color(&new->nd, &sp->root);
Paul Mundt140d5a42007-07-15 23:38:16 -07002240 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002241 new->policy ? new->policy->mode : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242}
2243
2244/* Find shared policy intersecting idx */
2245struct mempolicy *
2246mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2247{
2248 struct mempolicy *pol = NULL;
2249 struct sp_node *sn;
2250
2251 if (!sp->root.rb_node)
2252 return NULL;
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002253 read_lock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 sn = sp_lookup(sp, idx, idx+1);
2255 if (sn) {
2256 mpol_get(sn->policy);
2257 pol = sn->policy;
2258 }
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002259 read_unlock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 return pol;
2261}
2262
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002263static void sp_free(struct sp_node *n)
2264{
2265 mpol_put(n->policy);
2266 kmem_cache_free(sn_cache, n);
2267}
2268
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002269/**
2270 * mpol_misplaced - check whether current page node is valid in policy
2271 *
Fabian Frederickb46e14a2014-06-04 16:08:18 -07002272 * @page: page to be checked
2273 * @vma: vm area where page mapped
2274 * @addr: virtual address where page mapped
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002275 *
2276 * Lookup current policy node id for vma,addr and "compare to" page's
2277 * node id.
2278 *
2279 * Returns:
2280 * -1 - not misplaced, page is in the right node
2281 * node - node id where the page should be
2282 *
2283 * Policy determination "mimics" alloc_page_vma().
2284 * Called from fault path where we know the vma and faulting address.
2285 */
2286int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2287{
2288 struct mempolicy *pol;
2289 struct zone *zone;
2290 int curnid = page_to_nid(page);
2291 unsigned long pgoff;
Peter Zijlstra90572892013-10-07 11:29:20 +01002292 int thiscpu = raw_smp_processor_id();
2293 int thisnid = cpu_to_node(thiscpu);
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002294 int polnid = -1;
2295 int ret = -1;
2296
2297 BUG_ON(!vma);
2298
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07002299 pol = get_vma_policy(vma, addr);
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002300 if (!(pol->flags & MPOL_F_MOF))
2301 goto out;
2302
2303 switch (pol->mode) {
2304 case MPOL_INTERLEAVE:
2305 BUG_ON(addr >= vma->vm_end);
2306 BUG_ON(addr < vma->vm_start);
2307
2308 pgoff = vma->vm_pgoff;
2309 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2310 polnid = offset_il_node(pol, vma, pgoff);
2311 break;
2312
2313 case MPOL_PREFERRED:
2314 if (pol->flags & MPOL_F_LOCAL)
2315 polnid = numa_node_id();
2316 else
2317 polnid = pol->v.preferred_node;
2318 break;
2319
2320 case MPOL_BIND:
2321 /*
2322 * allows binding to multiple nodes.
2323 * use current page if in policy nodemask,
2324 * else select nearest allowed node, if any.
2325 * If no allowed nodes, use current [!misplaced].
2326 */
2327 if (node_isset(curnid, pol->v.nodes))
2328 goto out;
2329 (void)first_zones_zonelist(
2330 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2331 gfp_zone(GFP_HIGHUSER),
2332 &pol->v.nodes, &zone);
2333 polnid = zone->node;
2334 break;
2335
2336 default:
2337 BUG();
2338 }
Mel Gorman5606e382012-11-02 18:19:13 +00002339
2340 /* Migrate the page towards the node whose CPU is referencing it */
Mel Gormane42c8ff2012-11-12 09:17:07 +00002341 if (pol->flags & MPOL_F_MORON) {
Peter Zijlstra90572892013-10-07 11:29:20 +01002342 polnid = thisnid;
Mel Gorman5606e382012-11-02 18:19:13 +00002343
Rik van Riel10f39042014-01-27 17:03:44 -05002344 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
Rik van Rielde1c9ce2013-10-07 11:29:39 +01002345 goto out;
Mel Gormane42c8ff2012-11-12 09:17:07 +00002346 }
2347
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002348 if (curnid != polnid)
2349 ret = polnid;
2350out:
2351 mpol_cond_put(pol);
2352
2353 return ret;
2354}
2355
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2357{
Paul Mundt140d5a42007-07-15 23:38:16 -07002358 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 rb_erase(&n->nd, &sp->root);
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002360 sp_free(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361}
2362
Mel Gorman42288fe2012-12-21 23:10:25 +00002363static void sp_node_init(struct sp_node *node, unsigned long start,
2364 unsigned long end, struct mempolicy *pol)
2365{
2366 node->start = start;
2367 node->end = end;
2368 node->policy = pol;
2369}
2370
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07002371static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2372 struct mempolicy *pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373{
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002374 struct sp_node *n;
2375 struct mempolicy *newpol;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002377 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378 if (!n)
2379 return NULL;
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002380
2381 newpol = mpol_dup(pol);
2382 if (IS_ERR(newpol)) {
2383 kmem_cache_free(sn_cache, n);
2384 return NULL;
2385 }
2386 newpol->flags |= MPOL_F_SHARED;
Mel Gorman42288fe2012-12-21 23:10:25 +00002387 sp_node_init(n, start, end, newpol);
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002388
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389 return n;
2390}
2391
2392/* Replace a policy range. */
2393static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2394 unsigned long end, struct sp_node *new)
2395{
Mel Gormanb22d1272012-10-08 16:29:17 -07002396 struct sp_node *n;
Mel Gorman42288fe2012-12-21 23:10:25 +00002397 struct sp_node *n_new = NULL;
2398 struct mempolicy *mpol_new = NULL;
Mel Gormanb22d1272012-10-08 16:29:17 -07002399 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400
Mel Gorman42288fe2012-12-21 23:10:25 +00002401restart:
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002402 write_lock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403 n = sp_lookup(sp, start, end);
2404 /* Take care of old policies in the same range. */
2405 while (n && n->start < end) {
2406 struct rb_node *next = rb_next(&n->nd);
2407 if (n->start >= start) {
2408 if (n->end <= end)
2409 sp_delete(sp, n);
2410 else
2411 n->start = end;
2412 } else {
2413 /* Old policy spanning whole new range. */
2414 if (n->end > end) {
Mel Gorman42288fe2012-12-21 23:10:25 +00002415 if (!n_new)
2416 goto alloc_new;
2417
2418 *mpol_new = *n->policy;
2419 atomic_set(&mpol_new->refcnt, 1);
KOSAKI Motohiro78806392013-03-08 12:43:29 -08002420 sp_node_init(n_new, end, n->end, mpol_new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421 n->end = start;
Hillf Danton5ca39572013-03-08 12:43:28 -08002422 sp_insert(sp, n_new);
Mel Gorman42288fe2012-12-21 23:10:25 +00002423 n_new = NULL;
2424 mpol_new = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425 break;
2426 } else
2427 n->end = start;
2428 }
2429 if (!next)
2430 break;
2431 n = rb_entry(next, struct sp_node, nd);
2432 }
2433 if (new)
2434 sp_insert(sp, new);
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002435 write_unlock(&sp->lock);
Mel Gorman42288fe2012-12-21 23:10:25 +00002436 ret = 0;
2437
2438err_out:
2439 if (mpol_new)
2440 mpol_put(mpol_new);
2441 if (n_new)
2442 kmem_cache_free(sn_cache, n_new);
2443
Mel Gormanb22d1272012-10-08 16:29:17 -07002444 return ret;
Mel Gorman42288fe2012-12-21 23:10:25 +00002445
2446alloc_new:
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002447 write_unlock(&sp->lock);
Mel Gorman42288fe2012-12-21 23:10:25 +00002448 ret = -ENOMEM;
2449 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2450 if (!n_new)
2451 goto err_out;
2452 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2453 if (!mpol_new)
2454 goto err_out;
2455 goto restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456}
2457
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002458/**
2459 * mpol_shared_policy_init - initialize shared policy for inode
2460 * @sp: pointer to inode shared policy
2461 * @mpol: struct mempolicy to install
2462 *
2463 * Install non-NULL @mpol in inode's shared policy rb-tree.
2464 * On entry, the current task has a reference on a non-NULL @mpol.
2465 * This must be released on exit.
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002466 * This is called at get_inode() calls and we can use GFP_KERNEL.
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002467 */
2468void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
Robin Holt7339ff82006-01-14 13:20:48 -08002469{
Miao Xie58568d22009-06-16 15:31:49 -07002470 int ret;
2471
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002472 sp->root = RB_ROOT; /* empty tree == default mempolicy */
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002473 rwlock_init(&sp->lock);
Robin Holt7339ff82006-01-14 13:20:48 -08002474
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002475 if (mpol) {
2476 struct vm_area_struct pvma;
2477 struct mempolicy *new;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002478 NODEMASK_SCRATCH(scratch);
Robin Holt7339ff82006-01-14 13:20:48 -08002479
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002480 if (!scratch)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002481 goto put_mpol;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002482 /* contextualize the tmpfs mount point mempolicy */
2483 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002484 if (IS_ERR(new))
Dan Carpenter0cae3452010-05-25 23:42:58 -07002485 goto free_scratch; /* no valid nodemask intersection */
Miao Xie58568d22009-06-16 15:31:49 -07002486
2487 task_lock(current);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002488 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
Miao Xie58568d22009-06-16 15:31:49 -07002489 task_unlock(current);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002490 if (ret)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002491 goto put_new;
Robin Holt7339ff82006-01-14 13:20:48 -08002492
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002493 /* Create pseudo-vma that contains just the policy */
2494 memset(&pvma, 0, sizeof(struct vm_area_struct));
2495 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2496 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002497
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002498put_new:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002499 mpol_put(new); /* drop initial ref */
Dan Carpenter0cae3452010-05-25 23:42:58 -07002500free_scratch:
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002501 NODEMASK_SCRATCH_FREE(scratch);
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002502put_mpol:
2503 mpol_put(mpol); /* drop our incoming ref on sb mpol */
Robin Holt7339ff82006-01-14 13:20:48 -08002504 }
2505}
2506
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507int mpol_set_shared_policy(struct shared_policy *info,
2508 struct vm_area_struct *vma, struct mempolicy *npol)
2509{
2510 int err;
2511 struct sp_node *new = NULL;
2512 unsigned long sz = vma_pages(vma);
2513
David Rientjes028fec42008-04-28 02:12:25 -07002514 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515 vma->vm_pgoff,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002516 sz, npol ? npol->mode : -1,
David Rientjes028fec42008-04-28 02:12:25 -07002517 npol ? npol->flags : -1,
David Rientjes00ef2d22013-02-22 16:35:36 -08002518 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519
2520 if (npol) {
2521 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2522 if (!new)
2523 return -ENOMEM;
2524 }
2525 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2526 if (err && new)
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002527 sp_free(new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 return err;
2529}
2530
2531/* Free a backing policy store on inode delete. */
2532void mpol_free_shared_policy(struct shared_policy *p)
2533{
2534 struct sp_node *n;
2535 struct rb_node *next;
2536
2537 if (!p->root.rb_node)
2538 return;
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002539 write_lock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540 next = rb_first(&p->root);
2541 while (next) {
2542 n = rb_entry(next, struct sp_node, nd);
2543 next = rb_next(&n->nd);
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002544 sp_delete(p, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545 }
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002546 write_unlock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547}
2548
Mel Gorman1a687c22012-11-22 11:16:36 +00002549#ifdef CONFIG_NUMA_BALANCING
Mel Gormanc2976632014-01-29 14:05:42 -08002550static int __initdata numabalancing_override;
Mel Gorman1a687c22012-11-22 11:16:36 +00002551
2552static void __init check_numabalancing_enable(void)
2553{
2554 bool numabalancing_default = false;
2555
2556 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2557 numabalancing_default = true;
2558
Mel Gormanc2976632014-01-29 14:05:42 -08002559 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2560 if (numabalancing_override)
2561 set_numabalancing_state(numabalancing_override == 1);
2562
Mel Gormanb0dc2b92015-05-14 15:17:09 -07002563 if (num_online_nodes() > 1 && !numabalancing_override) {
Andrew Morton4a404be2014-01-29 14:05:43 -08002564 pr_info("%s automatic NUMA balancing. "
Mel Gormanc2976632014-01-29 14:05:42 -08002565 "Configure with numa_balancing= or the "
2566 "kernel.numa_balancing sysctl",
2567 numabalancing_default ? "Enabling" : "Disabling");
Mel Gorman1a687c22012-11-22 11:16:36 +00002568 set_numabalancing_state(numabalancing_default);
2569 }
2570}
2571
2572static int __init setup_numabalancing(char *str)
2573{
2574 int ret = 0;
2575 if (!str)
2576 goto out;
Mel Gorman1a687c22012-11-22 11:16:36 +00002577
2578 if (!strcmp(str, "enable")) {
Mel Gormanc2976632014-01-29 14:05:42 -08002579 numabalancing_override = 1;
Mel Gorman1a687c22012-11-22 11:16:36 +00002580 ret = 1;
2581 } else if (!strcmp(str, "disable")) {
Mel Gormanc2976632014-01-29 14:05:42 -08002582 numabalancing_override = -1;
Mel Gorman1a687c22012-11-22 11:16:36 +00002583 ret = 1;
2584 }
2585out:
2586 if (!ret)
Andrew Morton4a404be2014-01-29 14:05:43 -08002587 pr_warn("Unable to parse numa_balancing=\n");
Mel Gorman1a687c22012-11-22 11:16:36 +00002588
2589 return ret;
2590}
2591__setup("numa_balancing=", setup_numabalancing);
2592#else
2593static inline void __init check_numabalancing_enable(void)
2594{
2595}
2596#endif /* CONFIG_NUMA_BALANCING */
2597
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598/* assumes fs == KERNEL_DS */
2599void __init numa_policy_init(void)
2600{
Paul Mundtb71636e2007-07-15 23:38:15 -07002601 nodemask_t interleave_nodes;
2602 unsigned long largest = 0;
2603 int nid, prefer = 0;
2604
Linus Torvalds1da177e2005-04-16 15:20:36 -07002605 policy_cache = kmem_cache_create("numa_policy",
2606 sizeof(struct mempolicy),
Paul Mundt20c2df82007-07-20 10:11:58 +09002607 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608
2609 sn_cache = kmem_cache_create("shared_policy_node",
2610 sizeof(struct sp_node),
Paul Mundt20c2df82007-07-20 10:11:58 +09002611 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612
Mel Gorman5606e382012-11-02 18:19:13 +00002613 for_each_node(nid) {
2614 preferred_node_policy[nid] = (struct mempolicy) {
2615 .refcnt = ATOMIC_INIT(1),
2616 .mode = MPOL_PREFERRED,
2617 .flags = MPOL_F_MOF | MPOL_F_MORON,
2618 .v = { .preferred_node = nid, },
2619 };
2620 }
2621
Paul Mundtb71636e2007-07-15 23:38:15 -07002622 /*
2623 * Set interleaving policy for system init. Interleaving is only
2624 * enabled across suitably sized nodes (default is >= 16MB), or
2625 * fall back to the largest node if they're all smaller.
2626 */
2627 nodes_clear(interleave_nodes);
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002628 for_each_node_state(nid, N_MEMORY) {
Paul Mundtb71636e2007-07-15 23:38:15 -07002629 unsigned long total_pages = node_present_pages(nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630
Paul Mundtb71636e2007-07-15 23:38:15 -07002631 /* Preserve the largest node */
2632 if (largest < total_pages) {
2633 largest = total_pages;
2634 prefer = nid;
2635 }
2636
2637 /* Interleave this node? */
2638 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2639 node_set(nid, interleave_nodes);
2640 }
2641
2642 /* All too small, use the largest */
2643 if (unlikely(nodes_empty(interleave_nodes)))
2644 node_set(prefer, interleave_nodes);
2645
David Rientjes028fec42008-04-28 02:12:25 -07002646 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -07002647 pr_err("%s: interleaving failed\n", __func__);
Mel Gorman1a687c22012-11-22 11:16:36 +00002648
2649 check_numabalancing_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650}
2651
Christoph Lameter8bccd852005-10-29 18:16:59 -07002652/* Reset policy of current process to default */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653void numa_default_policy(void)
2654{
David Rientjes028fec42008-04-28 02:12:25 -07002655 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656}
Paul Jackson68860ec2005-10-30 15:02:36 -08002657
Paul Jackson42253992006-01-08 01:01:59 -08002658/*
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002659 * Parse and format mempolicy from/to strings
2660 */
2661
2662/*
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002663 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002664 */
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002665static const char * const policy_modes[] =
2666{
2667 [MPOL_DEFAULT] = "default",
2668 [MPOL_PREFERRED] = "prefer",
2669 [MPOL_BIND] = "bind",
2670 [MPOL_INTERLEAVE] = "interleave",
Lee Schermerhornd3a71032012-10-25 14:16:29 +02002671 [MPOL_LOCAL] = "local",
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002672};
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002673
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002674
2675#ifdef CONFIG_TMPFS
2676/**
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002677 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002678 * @str: string containing mempolicy to parse
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002679 * @mpol: pointer to struct mempolicy pointer, returned on success.
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002680 *
2681 * Format of input:
2682 * <mode>[=<flags>][:<nodelist>]
2683 *
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002684 * On success, returns 0, else 1
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002685 */
Hugh Dickinsa7a88b22013-01-02 02:04:23 -08002686int mpol_parse_str(char *str, struct mempolicy **mpol)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002687{
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002688 struct mempolicy *new = NULL;
Lee Schermerhornb4652e82010-05-24 14:32:03 -07002689 unsigned short mode;
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002690 unsigned short mode_flags;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002691 nodemask_t nodes;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002692 char *nodelist = strchr(str, ':');
2693 char *flags = strchr(str, '=');
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002694 int err = 1;
2695
2696 if (nodelist) {
2697 /* NUL-terminate mode or flags string */
2698 *nodelist++ = '\0';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002699 if (nodelist_parse(nodelist, nodes))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002700 goto out;
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002701 if (!nodes_subset(nodes, node_states[N_MEMORY]))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002702 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002703 } else
2704 nodes_clear(nodes);
2705
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002706 if (flags)
2707 *flags++ = '\0'; /* terminate mode string */
2708
Peter Zijlstra479e2802012-10-25 14:16:28 +02002709 for (mode = 0; mode < MPOL_MAX; mode++) {
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002710 if (!strcmp(str, policy_modes[mode])) {
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002711 break;
2712 }
2713 }
Mel Gormana7200942012-11-16 09:37:58 +00002714 if (mode >= MPOL_MAX)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002715 goto out;
2716
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002717 switch (mode) {
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002718 case MPOL_PREFERRED:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002719 /*
2720 * Insist on a nodelist of one node only
2721 */
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002722 if (nodelist) {
2723 char *rest = nodelist;
2724 while (isdigit(*rest))
2725 rest++;
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002726 if (*rest)
2727 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002728 }
2729 break;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002730 case MPOL_INTERLEAVE:
2731 /*
2732 * Default to online nodes with memory if no nodelist
2733 */
2734 if (!nodelist)
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002735 nodes = node_states[N_MEMORY];
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002736 break;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002737 case MPOL_LOCAL:
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002738 /*
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002739 * Don't allow a nodelist; mpol_new() checks flags
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002740 */
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002741 if (nodelist)
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002742 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002743 mode = MPOL_PREFERRED;
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002744 break;
Ravikiran G Thirumalai413b43d2010-03-23 13:35:28 -07002745 case MPOL_DEFAULT:
2746 /*
2747 * Insist on a empty nodelist
2748 */
2749 if (!nodelist)
2750 err = 0;
2751 goto out;
KOSAKI Motohirod69b2e62010-03-23 13:35:30 -07002752 case MPOL_BIND:
2753 /*
2754 * Insist on a nodelist
2755 */
2756 if (!nodelist)
2757 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002758 }
2759
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002760 mode_flags = 0;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002761 if (flags) {
2762 /*
2763 * Currently, we only support two mutually exclusive
2764 * mode flags.
2765 */
2766 if (!strcmp(flags, "static"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002767 mode_flags |= MPOL_F_STATIC_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002768 else if (!strcmp(flags, "relative"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002769 mode_flags |= MPOL_F_RELATIVE_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002770 else
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002771 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002772 }
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002773
2774 new = mpol_new(mode, mode_flags, &nodes);
2775 if (IS_ERR(new))
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002776 goto out;
2777
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002778 /*
2779 * Save nodes for mpol_to_str() to show the tmpfs mount options
2780 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2781 */
2782 if (mode != MPOL_PREFERRED)
2783 new->v.nodes = nodes;
2784 else if (nodelist)
2785 new->v.preferred_node = first_node(nodes);
2786 else
2787 new->flags |= MPOL_F_LOCAL;
2788
2789 /*
2790 * Save nodes for contextualization: this will be used to "clone"
2791 * the mempolicy in a specific context [cpuset] at a later time.
2792 */
2793 new->w.user_nodemask = nodes;
2794
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002795 err = 0;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002796
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002797out:
2798 /* Restore string for error message */
2799 if (nodelist)
2800 *--nodelist = ':';
2801 if (flags)
2802 *--flags = '=';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002803 if (!err)
2804 *mpol = new;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002805 return err;
2806}
2807#endif /* CONFIG_TMPFS */
2808
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002809/**
2810 * mpol_to_str - format a mempolicy structure for printing
2811 * @buffer: to contain formatted mempolicy string
2812 * @maxlen: length of @buffer
2813 * @pol: pointer to mempolicy to be formatted
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002814 *
David Rientjes948927e2013-11-12 15:07:28 -08002815 * Convert @pol into a string. If @buffer is too short, truncate the string.
2816 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2817 * longest flag, "relative", and to display at least a few node ids.
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002818 */
David Rientjes948927e2013-11-12 15:07:28 -08002819void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002820{
2821 char *p = buffer;
David Rientjes948927e2013-11-12 15:07:28 -08002822 nodemask_t nodes = NODE_MASK_NONE;
2823 unsigned short mode = MPOL_DEFAULT;
2824 unsigned short flags = 0;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002825
David Rientjes8790c712014-01-30 15:46:08 -08002826 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
Lee Schermerhornbea904d2008-04-28 02:13:18 -07002827 mode = pol->mode;
David Rientjes948927e2013-11-12 15:07:28 -08002828 flags = pol->flags;
2829 }
Lee Schermerhornbea904d2008-04-28 02:13:18 -07002830
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002831 switch (mode) {
2832 case MPOL_DEFAULT:
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002833 break;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002834 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002835 if (flags & MPOL_F_LOCAL)
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002836 mode = MPOL_LOCAL;
Lee Schermerhorn53f25562008-04-28 02:13:20 -07002837 else
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002838 node_set(pol->v.preferred_node, nodes);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002839 break;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002840 case MPOL_BIND:
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002841 case MPOL_INTERLEAVE:
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002842 nodes = pol->v.nodes;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002843 break;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002844 default:
David Rientjes948927e2013-11-12 15:07:28 -08002845 WARN_ON_ONCE(1);
2846 snprintf(p, maxlen, "unknown");
2847 return;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002848 }
2849
David Rientjesb7a9f422013-11-21 14:32:06 -08002850 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002851
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002852 if (flags & MPOL_MODE_FLAGS) {
David Rientjes948927e2013-11-12 15:07:28 -08002853 p += snprintf(p, buffer + maxlen - p, "=");
David Rientjesf5b087b2008-04-28 02:12:27 -07002854
Lee Schermerhorn22919902008-04-28 02:13:22 -07002855 /*
2856 * Currently, the only defined flags are mutually exclusive
2857 */
David Rientjesf5b087b2008-04-28 02:12:27 -07002858 if (flags & MPOL_F_STATIC_NODES)
Lee Schermerhorn22919902008-04-28 02:13:22 -07002859 p += snprintf(p, buffer + maxlen - p, "static");
2860 else if (flags & MPOL_F_RELATIVE_NODES)
2861 p += snprintf(p, buffer + maxlen - p, "relative");
David Rientjesf5b087b2008-04-28 02:12:27 -07002862 }
2863
Tejun Heo9e763e02015-02-13 14:38:02 -08002864 if (!nodes_empty(nodes))
2865 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
2866 nodemask_pr_args(&nodes));
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002867}