blob: d0adeefa71c9a437a4b3b79c2687b867622f71c5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
Christoph Lameter8bccd852005-10-29 18:16:59 -07005 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
Christoph Lameter8bccd852005-10-29 18:16:59 -070021 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
Christoph Lameter8bccd852005-10-29 18:16:59 -070024 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 * preferred Try a specific node first before normal fallback.
David Rientjes00ef2d22013-02-22 16:35:36 -080029 * As a special case NUMA_NO_NODE here means do the allocation
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
Christoph Lameter8bccd852005-10-29 18:16:59 -070033 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
Linus Torvalds1da177e2005-04-16 15:20:36 -070066*/
67
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -070068#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#include <linux/mempolicy.h>
71#include <linux/mm.h>
72#include <linux/highmem.h>
73#include <linux/hugetlb.h>
74#include <linux/kernel.h>
75#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/nodemask.h>
77#include <linux/cpuset.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/slab.h>
79#include <linux/string.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040080#include <linux/export.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070081#include <linux/nsproxy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070082#include <linux/interrupt.h>
83#include <linux/init.h>
84#include <linux/compat.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080085#include <linux/swap.h>
Christoph Lameter1a75a6c2006-01-08 01:01:02 -080086#include <linux/seq_file.h>
87#include <linux/proc_fs.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080088#include <linux/migrate.h>
Hugh Dickins62b61f62009-12-14 17:59:33 -080089#include <linux/ksm.h>
Christoph Lameter95a402c2006-06-23 02:03:53 -070090#include <linux/rmap.h>
David Quigley86c3a762006-06-23 02:04:02 -070091#include <linux/security.h>
Adrian Bunkdbcb0f12007-10-16 01:26:26 -070092#include <linux/syscalls.h>
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -070093#include <linux/ctype.h>
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -080094#include <linux/mm_inline.h>
Lee Schermerhornb24f53a2012-10-25 14:16:32 +020095#include <linux/mmu_notifier.h>
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -070096#include <linux/printk.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080097
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <asm/tlbflush.h>
99#include <asm/uaccess.h>
100
Nick Piggin62695a82008-10-18 20:26:09 -0700101#include "internal.h"
102
Christoph Lameter38e35862006-01-08 01:01:01 -0800103/* Internal flags */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800104#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
Christoph Lameter38e35862006-01-08 01:01:01 -0800105#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800106
Pekka Enbergfcc234f2006-03-22 00:08:13 -0800107static struct kmem_cache *policy_cache;
108static struct kmem_cache *sn_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110/* Highest zone. An specific allocation for a zone below that is not
111 policied. */
Christoph Lameter62672762007-02-10 01:43:07 -0800112enum zone_type policy_zone = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700114/*
115 * run-time system-wide default policy => local allocation
116 */
H Hartley Sweetene754d792011-10-31 17:09:23 -0700117static struct mempolicy default_policy = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 .refcnt = ATOMIC_INIT(1), /* never free it */
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700119 .mode = MPOL_PREFERRED,
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700120 .flags = MPOL_F_LOCAL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121};
122
Mel Gorman5606e382012-11-02 18:19:13 +0000123static struct mempolicy preferred_node_policy[MAX_NUMNODES];
124
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -0700125struct mempolicy *get_task_policy(struct task_struct *p)
Mel Gorman5606e382012-11-02 18:19:13 +0000126{
127 struct mempolicy *pol = p->mempolicy;
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700128 int node;
Mel Gorman5606e382012-11-02 18:19:13 +0000129
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700130 if (pol)
131 return pol;
Mel Gorman5606e382012-11-02 18:19:13 +0000132
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700133 node = numa_node_id();
134 if (node != NUMA_NO_NODE) {
135 pol = &preferred_node_policy[node];
136 /* preferred_node_policy is not initialised early in boot */
137 if (pol->mode)
138 return pol;
Mel Gorman5606e382012-11-02 18:19:13 +0000139 }
140
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700141 return &default_policy;
Mel Gorman5606e382012-11-02 18:19:13 +0000142}
143
David Rientjes37012942008-04-28 02:12:33 -0700144static const struct mempolicy_operations {
145 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
Miao Xie708c1bb2010-05-24 14:32:07 -0700146 /*
147 * If read-side task has no lock to protect task->mempolicy, write-side
148 * task will rebind the task->mempolicy by two step. The first step is
149 * setting all the newly nodes, and the second step is cleaning all the
150 * disallowed nodes. In this way, we can avoid finding no node to alloc
151 * page.
152 * If we have a lock to protect task->mempolicy in read-side, we do
153 * rebind directly.
154 *
155 * step:
156 * MPOL_REBIND_ONCE - do rebind work at once
157 * MPOL_REBIND_STEP1 - set all the newly nodes
158 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
159 */
160 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
161 enum mpol_rebind_step step);
David Rientjes37012942008-04-28 02:12:33 -0700162} mpol_ops[MPOL_MAX];
163
David Rientjesf5b087b2008-04-28 02:12:27 -0700164static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
165{
Bob Liu6d556292010-05-24 14:31:59 -0700166 return pol->flags & MPOL_MODE_FLAGS;
David Rientjes4c50bc02008-04-28 02:12:30 -0700167}
168
169static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
170 const nodemask_t *rel)
171{
172 nodemask_t tmp;
173 nodes_fold(tmp, *orig, nodes_weight(*rel));
174 nodes_onto(*ret, tmp, *rel);
David Rientjesf5b087b2008-04-28 02:12:27 -0700175}
176
David Rientjes37012942008-04-28 02:12:33 -0700177static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
178{
179 if (nodes_empty(*nodes))
180 return -EINVAL;
181 pol->v.nodes = *nodes;
182 return 0;
183}
184
185static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
186{
187 if (!nodes)
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700188 pol->flags |= MPOL_F_LOCAL; /* local allocation */
David Rientjes37012942008-04-28 02:12:33 -0700189 else if (nodes_empty(*nodes))
190 return -EINVAL; /* no allowed nodes */
191 else
192 pol->v.preferred_node = first_node(*nodes);
193 return 0;
194}
195
196static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
197{
Zhihui Zhang859f7ef2014-12-18 16:17:09 -0800198 if (nodes_empty(*nodes))
David Rientjes37012942008-04-28 02:12:33 -0700199 return -EINVAL;
200 pol->v.nodes = *nodes;
201 return 0;
202}
203
Miao Xie58568d22009-06-16 15:31:49 -0700204/*
205 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
206 * any, for the new policy. mpol_new() has already validated the nodes
207 * parameter with respect to the policy mode and flags. But, we need to
208 * handle an empty nodemask with MPOL_PREFERRED here.
209 *
210 * Must be called holding task's alloc_lock to protect task's mems_allowed
211 * and mempolicy. May also be called holding the mmap_semaphore for write.
212 */
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700213static int mpol_set_nodemask(struct mempolicy *pol,
214 const nodemask_t *nodes, struct nodemask_scratch *nsc)
Miao Xie58568d22009-06-16 15:31:49 -0700215{
Miao Xie58568d22009-06-16 15:31:49 -0700216 int ret;
217
218 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
219 if (pol == NULL)
220 return 0;
Lai Jiangshan01f13bd2012-12-12 13:51:33 -0800221 /* Check N_MEMORY */
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700222 nodes_and(nsc->mask1,
Lai Jiangshan01f13bd2012-12-12 13:51:33 -0800223 cpuset_current_mems_allowed, node_states[N_MEMORY]);
Miao Xie58568d22009-06-16 15:31:49 -0700224
225 VM_BUG_ON(!nodes);
226 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
227 nodes = NULL; /* explicit local allocation */
228 else {
229 if (pol->flags & MPOL_F_RELATIVE_NODES)
Zhihui Zhang859f7ef2014-12-18 16:17:09 -0800230 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
Miao Xie58568d22009-06-16 15:31:49 -0700231 else
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700232 nodes_and(nsc->mask2, *nodes, nsc->mask1);
233
Miao Xie58568d22009-06-16 15:31:49 -0700234 if (mpol_store_user_nodemask(pol))
235 pol->w.user_nodemask = *nodes;
236 else
237 pol->w.cpuset_mems_allowed =
238 cpuset_current_mems_allowed;
239 }
240
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700241 if (nodes)
242 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
243 else
244 ret = mpol_ops[pol->mode].create(pol, NULL);
Miao Xie58568d22009-06-16 15:31:49 -0700245 return ret;
246}
247
248/*
249 * This function just creates a new policy, does some check and simple
250 * initialization. You must invoke mpol_set_nodemask() to set nodes.
251 */
David Rientjes028fec42008-04-28 02:12:25 -0700252static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
253 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254{
255 struct mempolicy *policy;
256
David Rientjes028fec42008-04-28 02:12:25 -0700257 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
David Rientjes00ef2d22013-02-22 16:35:36 -0800258 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
Paul Mundt140d5a42007-07-15 23:38:16 -0700259
David Rientjes3e1f0642008-04-28 02:12:34 -0700260 if (mode == MPOL_DEFAULT) {
261 if (nodes && !nodes_empty(*nodes))
David Rientjes37012942008-04-28 02:12:33 -0700262 return ERR_PTR(-EINVAL);
Lee Schermerhornd3a71032012-10-25 14:16:29 +0200263 return NULL;
David Rientjes37012942008-04-28 02:12:33 -0700264 }
David Rientjes3e1f0642008-04-28 02:12:34 -0700265 VM_BUG_ON(!nodes);
266
267 /*
268 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
269 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
270 * All other modes require a valid pointer to a non-empty nodemask.
271 */
272 if (mode == MPOL_PREFERRED) {
273 if (nodes_empty(*nodes)) {
274 if (((flags & MPOL_F_STATIC_NODES) ||
275 (flags & MPOL_F_RELATIVE_NODES)))
276 return ERR_PTR(-EINVAL);
David Rientjes3e1f0642008-04-28 02:12:34 -0700277 }
Peter Zijlstra479e2802012-10-25 14:16:28 +0200278 } else if (mode == MPOL_LOCAL) {
279 if (!nodes_empty(*nodes))
280 return ERR_PTR(-EINVAL);
281 mode = MPOL_PREFERRED;
David Rientjes3e1f0642008-04-28 02:12:34 -0700282 } else if (nodes_empty(*nodes))
283 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
285 if (!policy)
286 return ERR_PTR(-ENOMEM);
287 atomic_set(&policy->refcnt, 1);
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700288 policy->mode = mode;
David Rientjes3e1f0642008-04-28 02:12:34 -0700289 policy->flags = flags;
David Rientjesf5b087b2008-04-28 02:12:27 -0700290
David Rientjes37012942008-04-28 02:12:33 -0700291 return policy;
292}
293
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700294/* Slow path of a mpol destructor. */
295void __mpol_put(struct mempolicy *p)
296{
297 if (!atomic_dec_and_test(&p->refcnt))
298 return;
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700299 kmem_cache_free(policy_cache, p);
300}
301
Miao Xie708c1bb2010-05-24 14:32:07 -0700302static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
303 enum mpol_rebind_step step)
David Rientjes37012942008-04-28 02:12:33 -0700304{
305}
306
Miao Xie708c1bb2010-05-24 14:32:07 -0700307/*
308 * step:
309 * MPOL_REBIND_ONCE - do rebind work at once
310 * MPOL_REBIND_STEP1 - set all the newly nodes
311 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
312 */
313static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
314 enum mpol_rebind_step step)
David Rientjes37012942008-04-28 02:12:33 -0700315{
316 nodemask_t tmp;
317
318 if (pol->flags & MPOL_F_STATIC_NODES)
319 nodes_and(tmp, pol->w.user_nodemask, *nodes);
320 else if (pol->flags & MPOL_F_RELATIVE_NODES)
321 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
322 else {
Miao Xie708c1bb2010-05-24 14:32:07 -0700323 /*
324 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
325 * result
326 */
327 if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
328 nodes_remap(tmp, pol->v.nodes,
329 pol->w.cpuset_mems_allowed, *nodes);
330 pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
331 } else if (step == MPOL_REBIND_STEP2) {
332 tmp = pol->w.cpuset_mems_allowed;
333 pol->w.cpuset_mems_allowed = *nodes;
334 } else
335 BUG();
David Rientjes37012942008-04-28 02:12:33 -0700336 }
337
Miao Xie708c1bb2010-05-24 14:32:07 -0700338 if (nodes_empty(tmp))
339 tmp = *nodes;
340
341 if (step == MPOL_REBIND_STEP1)
342 nodes_or(pol->v.nodes, pol->v.nodes, tmp);
343 else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
344 pol->v.nodes = tmp;
345 else
346 BUG();
347
David Rientjes37012942008-04-28 02:12:33 -0700348 if (!node_isset(current->il_next, tmp)) {
Andrew Morton0edaf862016-05-19 17:10:58 -0700349 current->il_next = next_node_in(current->il_next, tmp);
David Rientjes37012942008-04-28 02:12:33 -0700350 if (current->il_next >= MAX_NUMNODES)
351 current->il_next = numa_node_id();
352 }
353}
354
355static void mpol_rebind_preferred(struct mempolicy *pol,
Miao Xie708c1bb2010-05-24 14:32:07 -0700356 const nodemask_t *nodes,
357 enum mpol_rebind_step step)
David Rientjes37012942008-04-28 02:12:33 -0700358{
359 nodemask_t tmp;
360
David Rientjes37012942008-04-28 02:12:33 -0700361 if (pol->flags & MPOL_F_STATIC_NODES) {
362 int node = first_node(pol->w.user_nodemask);
363
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700364 if (node_isset(node, *nodes)) {
David Rientjes37012942008-04-28 02:12:33 -0700365 pol->v.preferred_node = node;
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700366 pol->flags &= ~MPOL_F_LOCAL;
367 } else
368 pol->flags |= MPOL_F_LOCAL;
David Rientjes37012942008-04-28 02:12:33 -0700369 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
370 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
371 pol->v.preferred_node = first_node(tmp);
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700372 } else if (!(pol->flags & MPOL_F_LOCAL)) {
David Rientjes37012942008-04-28 02:12:33 -0700373 pol->v.preferred_node = node_remap(pol->v.preferred_node,
374 pol->w.cpuset_mems_allowed,
375 *nodes);
376 pol->w.cpuset_mems_allowed = *nodes;
377 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378}
379
Miao Xie708c1bb2010-05-24 14:32:07 -0700380/*
381 * mpol_rebind_policy - Migrate a policy to a different set of nodes
382 *
383 * If read-side task has no lock to protect task->mempolicy, write-side
384 * task will rebind the task->mempolicy by two step. The first step is
385 * setting all the newly nodes, and the second step is cleaning all the
386 * disallowed nodes. In this way, we can avoid finding no node to alloc
387 * page.
388 * If we have a lock to protect task->mempolicy in read-side, we do
389 * rebind directly.
390 *
391 * step:
392 * MPOL_REBIND_ONCE - do rebind work at once
393 * MPOL_REBIND_STEP1 - set all the newly nodes
394 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
395 */
396static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
397 enum mpol_rebind_step step)
David Rientjes1d0d2682008-04-28 02:12:32 -0700398{
David Rientjes1d0d2682008-04-28 02:12:32 -0700399 if (!pol)
400 return;
Wang Sheng-Hui89c522c2012-05-29 15:06:16 -0700401 if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
David Rientjes1d0d2682008-04-28 02:12:32 -0700402 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
403 return;
Miao Xie708c1bb2010-05-24 14:32:07 -0700404
405 if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
406 return;
407
408 if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
409 BUG();
410
411 if (step == MPOL_REBIND_STEP1)
412 pol->flags |= MPOL_F_REBINDING;
413 else if (step == MPOL_REBIND_STEP2)
414 pol->flags &= ~MPOL_F_REBINDING;
415 else if (step >= MPOL_REBIND_NSTEP)
416 BUG();
417
418 mpol_ops[pol->mode].rebind(pol, newmask, step);
David Rientjes1d0d2682008-04-28 02:12:32 -0700419}
420
421/*
422 * Wrapper for mpol_rebind_policy() that just requires task
423 * pointer, and updates task mempolicy.
Miao Xie58568d22009-06-16 15:31:49 -0700424 *
425 * Called with task's alloc_lock held.
David Rientjes1d0d2682008-04-28 02:12:32 -0700426 */
427
Miao Xie708c1bb2010-05-24 14:32:07 -0700428void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
429 enum mpol_rebind_step step)
David Rientjes1d0d2682008-04-28 02:12:32 -0700430{
Miao Xie708c1bb2010-05-24 14:32:07 -0700431 mpol_rebind_policy(tsk->mempolicy, new, step);
David Rientjes1d0d2682008-04-28 02:12:32 -0700432}
433
434/*
435 * Rebind each vma in mm to new nodemask.
436 *
437 * Call holding a reference to mm. Takes mm->mmap_sem during call.
438 */
439
440void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
441{
442 struct vm_area_struct *vma;
443
444 down_write(&mm->mmap_sem);
445 for (vma = mm->mmap; vma; vma = vma->vm_next)
Miao Xie708c1bb2010-05-24 14:32:07 -0700446 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
David Rientjes1d0d2682008-04-28 02:12:32 -0700447 up_write(&mm->mmap_sem);
448}
449
David Rientjes37012942008-04-28 02:12:33 -0700450static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
451 [MPOL_DEFAULT] = {
452 .rebind = mpol_rebind_default,
453 },
454 [MPOL_INTERLEAVE] = {
455 .create = mpol_new_interleave,
456 .rebind = mpol_rebind_nodemask,
457 },
458 [MPOL_PREFERRED] = {
459 .create = mpol_new_preferred,
460 .rebind = mpol_rebind_preferred,
461 },
462 [MPOL_BIND] = {
463 .create = mpol_new_bind,
464 .rebind = mpol_rebind_nodemask,
465 },
466};
467
Christoph Lameterfc301282006-01-18 17:42:29 -0800468static void migrate_page_add(struct page *page, struct list_head *pagelist,
469 unsigned long flags);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -0800470
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800471struct queue_pages {
472 struct list_head *pagelist;
473 unsigned long flags;
474 nodemask_t *nmask;
475 struct vm_area_struct *prev;
476};
477
Naoya Horiguchi98094942013-09-11 14:22:14 -0700478/*
479 * Scan through pages checking if pages follow certain conditions,
480 * and move them to the pagelist if they do.
481 */
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800482static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
483 unsigned long end, struct mm_walk *walk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484{
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800485 struct vm_area_struct *vma = walk->vma;
486 struct page *page;
487 struct queue_pages *qp = walk->private;
488 unsigned long flags = qp->flags;
Kirill A. Shutemov248db922016-01-15 16:54:14 -0800489 int nid, ret;
Hugh Dickins91612e02005-06-21 17:15:07 -0700490 pte_t *pte;
Hugh Dickins705e87c2005-10-29 18:16:27 -0700491 spinlock_t *ptl;
Hugh Dickins941150a2005-06-21 17:15:06 -0700492
Kirill A. Shutemov248db922016-01-15 16:54:14 -0800493 if (pmd_trans_huge(*pmd)) {
494 ptl = pmd_lock(walk->mm, pmd);
495 if (pmd_trans_huge(*pmd)) {
496 page = pmd_page(*pmd);
497 if (is_huge_zero_page(page)) {
498 spin_unlock(ptl);
499 split_huge_pmd(vma, pmd, addr);
500 } else {
501 get_page(page);
502 spin_unlock(ptl);
503 lock_page(page);
504 ret = split_huge_page(page);
505 unlock_page(page);
506 put_page(page);
507 if (ret)
508 return 0;
509 }
510 } else {
511 spin_unlock(ptl);
512 }
513 }
Hugh Dickins91612e02005-06-21 17:15:07 -0700514
Naoya Horiguchi337d9ab2016-07-26 15:24:03 -0700515 if (pmd_trans_unstable(pmd))
516 return 0;
Kirill A. Shutemov248db922016-01-15 16:54:14 -0800517retry:
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800518 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
519 for (; addr != end; pte++, addr += PAGE_SIZE) {
Hugh Dickins91612e02005-06-21 17:15:07 -0700520 if (!pte_present(*pte))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800522 page = vm_normal_page(vma, addr, *pte);
523 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 continue;
Nick Piggin053837f2006-01-18 17:42:27 -0800525 /*
Hugh Dickins62b61f62009-12-14 17:59:33 -0800526 * vm_normal_page() filters out zero pages, but there might
527 * still be PageReserved pages to skip, perhaps in a VDSO.
Nick Piggin053837f2006-01-18 17:42:27 -0800528 */
Hugh Dickinsb79bc0a2013-02-22 16:35:13 -0800529 if (PageReserved(page))
Christoph Lameterf4598c82006-01-12 01:05:20 -0800530 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800531 nid = page_to_nid(page);
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800532 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
Christoph Lameter38e35862006-01-08 01:01:01 -0800533 continue;
Kirill A. Shutemov800d8c62016-07-26 15:26:18 -0700534 if (PageTransCompound(page)) {
Kirill A. Shutemov248db922016-01-15 16:54:14 -0800535 get_page(page);
536 pte_unmap_unlock(pte, ptl);
537 lock_page(page);
538 ret = split_huge_page(page);
539 unlock_page(page);
540 put_page(page);
541 /* Failed to split -- skip. */
542 if (ret) {
543 pte = pte_offset_map_lock(walk->mm, pmd,
544 addr, &ptl);
545 continue;
546 }
547 goto retry;
548 }
Christoph Lameter38e35862006-01-08 01:01:01 -0800549
Kirill A. Shutemov77bf45e2016-02-05 15:36:33 -0800550 migrate_page_add(page, qp->pagelist, flags);
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800551 }
552 pte_unmap_unlock(pte - 1, ptl);
553 cond_resched();
554 return 0;
Hugh Dickins91612e02005-06-21 17:15:07 -0700555}
556
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800557static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
558 unsigned long addr, unsigned long end,
559 struct mm_walk *walk)
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700560{
561#ifdef CONFIG_HUGETLB_PAGE
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800562 struct queue_pages *qp = walk->private;
563 unsigned long flags = qp->flags;
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700564 int nid;
565 struct page *page;
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800566 spinlock_t *ptl;
Naoya Horiguchid4c54912014-06-06 10:00:01 -0400567 pte_t entry;
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700568
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800569 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
570 entry = huge_ptep_get(pte);
Naoya Horiguchid4c54912014-06-06 10:00:01 -0400571 if (!pte_present(entry))
572 goto unlock;
573 page = pte_page(entry);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700574 nid = page_to_nid(page);
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800575 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700576 goto unlock;
577 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
578 if (flags & (MPOL_MF_MOVE_ALL) ||
579 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800580 isolate_huge_page(page, qp->pagelist);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700581unlock:
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800582 spin_unlock(ptl);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700583#else
584 BUG();
585#endif
Hugh Dickins91612e02005-06-21 17:15:07 -0700586 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587}
588
Aneesh Kumar K.V58772312013-12-06 00:08:22 +0530589#ifdef CONFIG_NUMA_BALANCING
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200590/*
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200591 * This is used to mark a range of virtual addresses to be inaccessible.
592 * These are later cleared by a NUMA hinting fault. Depending on these
593 * faults, pages may be migrated for better NUMA placement.
594 *
595 * This is assuming that NUMA faults are handled using PROT_NONE. If
596 * an architecture makes a different choice, it will need further
597 * changes to the core.
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200598 */
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200599unsigned long change_prot_numa(struct vm_area_struct *vma,
600 unsigned long addr, unsigned long end)
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200601{
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200602 int nr_updated;
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200603
Mel Gorman4d942462015-02-12 14:58:28 -0800604 nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
Mel Gorman03c5a6e2012-11-02 14:52:48 +0000605 if (nr_updated)
606 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200607
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200608 return nr_updated;
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200609}
610#else
611static unsigned long change_prot_numa(struct vm_area_struct *vma,
612 unsigned long addr, unsigned long end)
613{
614 return 0;
615}
Aneesh Kumar K.V58772312013-12-06 00:08:22 +0530616#endif /* CONFIG_NUMA_BALANCING */
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200617
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800618static int queue_pages_test_walk(unsigned long start, unsigned long end,
619 struct mm_walk *walk)
620{
621 struct vm_area_struct *vma = walk->vma;
622 struct queue_pages *qp = walk->private;
623 unsigned long endvma = vma->vm_end;
624 unsigned long flags = qp->flags;
625
Kirill A. Shutemov77bf45e2016-02-05 15:36:33 -0800626 if (!vma_migratable(vma))
Naoya Horiguchi48684a62015-02-11 15:28:06 -0800627 return 1;
628
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800629 if (endvma > end)
630 endvma = end;
631 if (vma->vm_start > start)
632 start = vma->vm_start;
633
634 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
635 if (!vma->vm_next && vma->vm_end < end)
636 return -EFAULT;
637 if (qp->prev && qp->prev->vm_end < vma->vm_start)
638 return -EFAULT;
639 }
640
641 qp->prev = vma;
642
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800643 if (flags & MPOL_MF_LAZY) {
644 /* Similar to task_numa_work, skip inaccessible VMAs */
Liang Chen4355c012016-03-15 14:56:42 -0700645 if (!is_vm_hugetlb_page(vma) &&
646 (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
647 !(vma->vm_flags & VM_MIXEDMAP))
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800648 change_prot_numa(vma, start, endvma);
649 return 1;
650 }
651
Kirill A. Shutemov77bf45e2016-02-05 15:36:33 -0800652 /* queue pages from current vma */
653 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800654 return 0;
655 return 1;
656}
657
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800658/*
Naoya Horiguchi98094942013-09-11 14:22:14 -0700659 * Walk through page tables and collect pages to be migrated.
660 *
661 * If pages found in a given range are on a set of nodes (determined by
662 * @nodes and @flags,) it's isolated and queued to the pagelist which is
663 * passed via @private.)
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800664 */
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -0700665static int
Naoya Horiguchi98094942013-09-11 14:22:14 -0700666queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800667 nodemask_t *nodes, unsigned long flags,
668 struct list_head *pagelist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669{
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800670 struct queue_pages qp = {
671 .pagelist = pagelist,
672 .flags = flags,
673 .nmask = nodes,
674 .prev = NULL,
675 };
676 struct mm_walk queue_pages_walk = {
677 .hugetlb_entry = queue_pages_hugetlb,
678 .pmd_entry = queue_pages_pte_range,
679 .test_walk = queue_pages_test_walk,
680 .mm = mm,
681 .private = &qp,
682 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800684 return walk_page_range(start, end, &queue_pages_walk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685}
686
KOSAKI Motohiro869833f22012-10-08 16:29:16 -0700687/*
688 * Apply policy to a single VMA
689 * This must be called with the mmap_sem held for writing.
690 */
691static int vma_replace_policy(struct vm_area_struct *vma,
692 struct mempolicy *pol)
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700693{
KOSAKI Motohiro869833f22012-10-08 16:29:16 -0700694 int err;
695 struct mempolicy *old;
696 struct mempolicy *new;
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700697
698 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
699 vma->vm_start, vma->vm_end, vma->vm_pgoff,
700 vma->vm_ops, vma->vm_file,
701 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
702
KOSAKI Motohiro869833f22012-10-08 16:29:16 -0700703 new = mpol_dup(pol);
704 if (IS_ERR(new))
705 return PTR_ERR(new);
706
707 if (vma->vm_ops && vma->vm_ops->set_policy) {
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700708 err = vma->vm_ops->set_policy(vma, new);
KOSAKI Motohiro869833f22012-10-08 16:29:16 -0700709 if (err)
710 goto err_out;
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700711 }
KOSAKI Motohiro869833f22012-10-08 16:29:16 -0700712
713 old = vma->vm_policy;
714 vma->vm_policy = new; /* protected by mmap_sem */
715 mpol_put(old);
716
717 return 0;
718 err_out:
719 mpol_put(new);
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700720 return err;
721}
722
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723/* Step 2: apply policy to a range and do splits. */
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800724static int mbind_range(struct mm_struct *mm, unsigned long start,
725 unsigned long end, struct mempolicy *new_pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726{
727 struct vm_area_struct *next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800728 struct vm_area_struct *prev;
729 struct vm_area_struct *vma;
730 int err = 0;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800731 pgoff_t pgoff;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800732 unsigned long vmstart;
733 unsigned long vmend;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
Linus Torvalds097d5912012-03-06 18:23:36 -0800735 vma = find_vma(mm, start);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800736 if (!vma || vma->vm_start > start)
737 return -EFAULT;
738
Linus Torvalds097d5912012-03-06 18:23:36 -0800739 prev = vma->vm_prev;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800740 if (start > vma->vm_start)
741 prev = vma;
742
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800743 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 next = vma->vm_next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800745 vmstart = max(start, vma->vm_start);
746 vmend = min(end, vma->vm_end);
747
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800748 if (mpol_equal(vma_policy(vma), new_pol))
749 continue;
750
751 pgoff = vma->vm_pgoff +
752 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800753 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
Andrea Arcangeli19a809a2015-09-04 15:46:24 -0700754 vma->anon_vma, vma->vm_file, pgoff,
Colin Cross3e4578f2015-10-27 16:42:08 -0700755 new_pol, vma->vm_userfaultfd_ctx,
756 vma_get_anon_name(vma));
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800757 if (prev) {
758 vma = prev;
759 next = vma->vm_next;
Oleg Nesterov3964acd2013-07-31 13:53:28 -0700760 if (mpol_equal(vma_policy(vma), new_pol))
761 continue;
762 /* vma_merge() joined vma && vma->next, case 8 */
763 goto replace;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800764 }
765 if (vma->vm_start != vmstart) {
766 err = split_vma(vma->vm_mm, vma, vmstart, 1);
767 if (err)
768 goto out;
769 }
770 if (vma->vm_end != vmend) {
771 err = split_vma(vma->vm_mm, vma, vmend, 0);
772 if (err)
773 goto out;
774 }
Oleg Nesterov3964acd2013-07-31 13:53:28 -0700775 replace:
KOSAKI Motohiro869833f22012-10-08 16:29:16 -0700776 err = vma_replace_policy(vma, new_pol);
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700777 if (err)
778 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 }
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800780
781 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 return err;
783}
784
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785/* Set the process memory policy */
David Rientjes028fec42008-04-28 02:12:25 -0700786static long do_set_mempolicy(unsigned short mode, unsigned short flags,
787 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788{
Miao Xie58568d22009-06-16 15:31:49 -0700789 struct mempolicy *new, *old;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700790 NODEMASK_SCRATCH(scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700791 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700793 if (!scratch)
794 return -ENOMEM;
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700795
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700796 new = mpol_new(mode, flags, nodes);
797 if (IS_ERR(new)) {
798 ret = PTR_ERR(new);
799 goto out;
800 }
Oleg Nesterov2c7c3a72014-10-09 15:27:55 -0700801
Miao Xie58568d22009-06-16 15:31:49 -0700802 task_lock(current);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700803 ret = mpol_set_nodemask(new, nodes, scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700804 if (ret) {
805 task_unlock(current);
Miao Xie58568d22009-06-16 15:31:49 -0700806 mpol_put(new);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700807 goto out;
Miao Xie58568d22009-06-16 15:31:49 -0700808 }
809 old = current->mempolicy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 current->mempolicy = new;
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700811 if (new && new->mode == MPOL_INTERLEAVE &&
David Rientjesf5b087b2008-04-28 02:12:27 -0700812 nodes_weight(new->v.nodes))
Andi Kleendfcd3c02005-10-29 18:15:48 -0700813 current->il_next = first_node(new->v.nodes);
Miao Xie58568d22009-06-16 15:31:49 -0700814 task_unlock(current);
Miao Xie58568d22009-06-16 15:31:49 -0700815 mpol_put(old);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700816 ret = 0;
817out:
818 NODEMASK_SCRATCH_FREE(scratch);
819 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820}
821
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700822/*
823 * Return nodemask for policy for get_mempolicy() query
Miao Xie58568d22009-06-16 15:31:49 -0700824 *
825 * Called with task's alloc_lock held
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700826 */
827static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828{
Andi Kleendfcd3c02005-10-29 18:15:48 -0700829 nodes_clear(*nodes);
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700830 if (p == &default_policy)
831 return;
832
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700833 switch (p->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -0700834 case MPOL_BIND:
835 /* Fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 case MPOL_INTERLEAVE:
Andi Kleendfcd3c02005-10-29 18:15:48 -0700837 *nodes = p->v.nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 break;
839 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700840 if (!(p->flags & MPOL_F_LOCAL))
Andi Kleendfcd3c02005-10-29 18:15:48 -0700841 node_set(p->v.preferred_node, *nodes);
Lee Schermerhorn53f25562008-04-28 02:13:20 -0700842 /* else return empty node mask for local allocation */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 break;
844 default:
845 BUG();
846 }
847}
848
Dave Hansend4edcf02016-02-12 13:01:56 -0800849static int lookup_node(unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850{
851 struct page *p;
852 int err;
853
Lorenzo Stoakes768ae302016-10-13 01:20:16 +0100854 err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 if (err >= 0) {
856 err = page_to_nid(p);
857 put_page(p);
858 }
859 return err;
860}
861
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862/* Retrieve NUMA policy */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700863static long do_get_mempolicy(int *policy, nodemask_t *nmask,
864 unsigned long addr, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865{
Christoph Lameter8bccd852005-10-29 18:16:59 -0700866 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 struct mm_struct *mm = current->mm;
868 struct vm_area_struct *vma = NULL;
869 struct mempolicy *pol = current->mempolicy;
870
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700871 if (flags &
872 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 return -EINVAL;
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700874
875 if (flags & MPOL_F_MEMS_ALLOWED) {
876 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
877 return -EINVAL;
878 *policy = 0; /* just so it's initialized */
Miao Xie58568d22009-06-16 15:31:49 -0700879 task_lock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700880 *nmask = cpuset_current_mems_allowed;
Miao Xie58568d22009-06-16 15:31:49 -0700881 task_unlock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700882 return 0;
883 }
884
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 if (flags & MPOL_F_ADDR) {
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700886 /*
887 * Do NOT fall back to task policy if the
888 * vma/shared policy at addr is NULL. We
889 * want to return MPOL_DEFAULT in this case.
890 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 down_read(&mm->mmap_sem);
892 vma = find_vma_intersection(mm, addr, addr+1);
893 if (!vma) {
894 up_read(&mm->mmap_sem);
895 return -EFAULT;
896 }
897 if (vma->vm_ops && vma->vm_ops->get_policy)
898 pol = vma->vm_ops->get_policy(vma, addr);
899 else
900 pol = vma->vm_policy;
901 } else if (addr)
902 return -EINVAL;
903
904 if (!pol)
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700905 pol = &default_policy; /* indicates default behavior */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906
907 if (flags & MPOL_F_NODE) {
908 if (flags & MPOL_F_ADDR) {
Dave Hansend4edcf02016-02-12 13:01:56 -0800909 err = lookup_node(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 if (err < 0)
911 goto out;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700912 *policy = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 } else if (pol == current->mempolicy &&
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700914 pol->mode == MPOL_INTERLEAVE) {
Christoph Lameter8bccd852005-10-29 18:16:59 -0700915 *policy = current->il_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 } else {
917 err = -EINVAL;
918 goto out;
919 }
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700920 } else {
921 *policy = pol == &default_policy ? MPOL_DEFAULT :
922 pol->mode;
David Rientjesd79df632008-07-04 12:24:13 -0700923 /*
924 * Internal mempolicy flags must be masked off before exposing
925 * the policy to userspace.
926 */
927 *policy |= (pol->flags & MPOL_MODE_FLAGS);
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700928 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 err = 0;
Miao Xie58568d22009-06-16 15:31:49 -0700931 if (nmask) {
Lee Schermerhornc6b6ef82010-03-23 13:35:41 -0700932 if (mpol_store_user_nodemask(pol)) {
933 *nmask = pol->w.user_nodemask;
934 } else {
935 task_lock(current);
936 get_policy_nodemask(pol, nmask);
937 task_unlock(current);
938 }
Miao Xie58568d22009-06-16 15:31:49 -0700939 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940
941 out:
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700942 mpol_cond_put(pol);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 if (vma)
944 up_read(&current->mm->mmap_sem);
945 return err;
946}
947
Christoph Lameterb20a3502006-03-22 00:09:12 -0800948#ifdef CONFIG_MIGRATION
Christoph Lameter8bccd852005-10-29 18:16:59 -0700949/*
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800950 * page migration
951 */
Christoph Lameterfc301282006-01-18 17:42:29 -0800952static void migrate_page_add(struct page *page, struct list_head *pagelist,
953 unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800954{
955 /*
Christoph Lameterfc301282006-01-18 17:42:29 -0800956 * Avoid migrating a page that is shared with others.
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800957 */
Nick Piggin62695a82008-10-18 20:26:09 -0700958 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
959 if (!isolate_lru_page(page)) {
960 list_add_tail(&page->lru, pagelist);
Mel Gorman599d0c92016-07-28 15:45:31 -0700961 inc_node_page_state(page, NR_ISOLATED_ANON +
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -0800962 page_is_file_cache(page));
Nick Piggin62695a82008-10-18 20:26:09 -0700963 }
964 }
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800965}
966
Christoph Lameter742755a2006-06-23 02:03:55 -0700967static struct page *new_node_page(struct page *page, unsigned long node, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -0700968{
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700969 if (PageHuge(page))
970 return alloc_huge_page_node(page_hstate(compound_head(page)),
971 node);
972 else
Vlastimil Babka96db8002015-09-08 15:03:50 -0700973 return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
David Rientjesb360edb2015-04-14 15:46:52 -0700974 __GFP_THISNODE, 0);
Christoph Lameter95a402c2006-06-23 02:03:53 -0700975}
976
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800977/*
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800978 * Migrate pages from one node to a target node.
979 * Returns error or the number of pages not migrated.
980 */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700981static int migrate_to_node(struct mm_struct *mm, int source, int dest,
982 int flags)
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800983{
984 nodemask_t nmask;
985 LIST_HEAD(pagelist);
986 int err = 0;
987
988 nodes_clear(nmask);
989 node_set(source, nmask);
990
Minchan Kim08270802012-10-08 16:33:38 -0700991 /*
992 * This does not "check" the range but isolates all pages that
993 * need migration. Between passing in the full user address
994 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
995 */
996 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
Naoya Horiguchi98094942013-09-11 14:22:14 -0700997 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800998 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
999
Minchan Kimcf608ac2010-10-26 14:21:29 -07001000 if (!list_empty(&pagelist)) {
David Rientjes68711a72014-06-04 16:08:25 -07001001 err = migrate_pages(&pagelist, new_node_page, NULL, dest,
Hugh Dickins9c620e22013-02-22 16:35:14 -08001002 MIGRATE_SYNC, MR_SYSCALL);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001003 if (err)
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -07001004 putback_movable_pages(&pagelist);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001005 }
Christoph Lameter95a402c2006-06-23 02:03:53 -07001006
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001007 return err;
1008}
1009
1010/*
1011 * Move pages between the two nodesets so as to preserve the physical
1012 * layout as much as possible.
Christoph Lameter39743882006-01-08 01:00:51 -08001013 *
1014 * Returns the number of page that could not be moved.
1015 */
Andrew Morton0ce72d42012-05-29 15:06:24 -07001016int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1017 const nodemask_t *to, int flags)
Christoph Lameter39743882006-01-08 01:00:51 -08001018{
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001019 int busy = 0;
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001020 int err;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001021 nodemask_t tmp;
Christoph Lameter39743882006-01-08 01:00:51 -08001022
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001023 err = migrate_prep();
1024 if (err)
1025 return err;
1026
Lee Schermerhorn53f25562008-04-28 02:13:20 -07001027 down_read(&mm->mmap_sem);
Christoph Lameter39743882006-01-08 01:00:51 -08001028
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -08001029 /*
1030 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1031 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1032 * bit in 'tmp', and return that <source, dest> pair for migration.
1033 * The pair of nodemasks 'to' and 'from' define the map.
1034 *
1035 * If no pair of bits is found that way, fallback to picking some
1036 * pair of 'source' and 'dest' bits that are not the same. If the
1037 * 'source' and 'dest' bits are the same, this represents a node
1038 * that will be migrating to itself, so no pages need move.
1039 *
1040 * If no bits are left in 'tmp', or if all remaining bits left
1041 * in 'tmp' correspond to the same bit in 'to', return false
1042 * (nothing left to migrate).
1043 *
1044 * This lets us pick a pair of nodes to migrate between, such that
1045 * if possible the dest node is not already occupied by some other
1046 * source node, minimizing the risk of overloading the memory on a
1047 * node that would happen if we migrated incoming memory to a node
1048 * before migrating outgoing memory source that same node.
1049 *
1050 * A single scan of tmp is sufficient. As we go, we remember the
1051 * most recent <s, d> pair that moved (s != d). If we find a pair
1052 * that not only moved, but what's better, moved to an empty slot
1053 * (d is not set in tmp), then we break out then, with that pair.
Justin P. Mattockae0e47f2011-03-01 15:06:02 +01001054 * Otherwise when we finish scanning from_tmp, we at least have the
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -08001055 * most recent <s, d> pair that moved. If we get all the way through
1056 * the scan of tmp without finding any node that moved, much less
1057 * moved to an empty node, then there is nothing left worth migrating.
1058 */
Christoph Lameterd4984712006-01-08 01:00:55 -08001059
Andrew Morton0ce72d42012-05-29 15:06:24 -07001060 tmp = *from;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001061 while (!nodes_empty(tmp)) {
1062 int s,d;
Jianguo Wub76ac7e2013-11-12 15:07:39 -08001063 int source = NUMA_NO_NODE;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001064 int dest = 0;
1065
1066 for_each_node_mask(s, tmp) {
Larry Woodman4a5b18c2012-05-29 15:06:24 -07001067
1068 /*
1069 * do_migrate_pages() tries to maintain the relative
1070 * node relationship of the pages established between
1071 * threads and memory areas.
1072 *
1073 * However if the number of source nodes is not equal to
1074 * the number of destination nodes we can not preserve
1075 * this node relative relationship. In that case, skip
1076 * copying memory from a node that is in the destination
1077 * mask.
1078 *
1079 * Example: [2,3,4] -> [3,4,5] moves everything.
1080 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1081 */
1082
Andrew Morton0ce72d42012-05-29 15:06:24 -07001083 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1084 (node_isset(s, *to)))
Larry Woodman4a5b18c2012-05-29 15:06:24 -07001085 continue;
1086
Andrew Morton0ce72d42012-05-29 15:06:24 -07001087 d = node_remap(s, *from, *to);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001088 if (s == d)
1089 continue;
1090
1091 source = s; /* Node moved. Memorize */
1092 dest = d;
1093
1094 /* dest not in remaining from nodes? */
1095 if (!node_isset(dest, tmp))
1096 break;
1097 }
Jianguo Wub76ac7e2013-11-12 15:07:39 -08001098 if (source == NUMA_NO_NODE)
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001099 break;
1100
1101 node_clear(source, tmp);
1102 err = migrate_to_node(mm, source, dest, flags);
1103 if (err > 0)
1104 busy += err;
1105 if (err < 0)
1106 break;
Christoph Lameter39743882006-01-08 01:00:51 -08001107 }
1108 up_read(&mm->mmap_sem);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001109 if (err < 0)
1110 return err;
1111 return busy;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001112
Christoph Lameter39743882006-01-08 01:00:51 -08001113}
1114
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001115/*
1116 * Allocate a new page for page migration based on vma policy.
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001117 * Start by assuming the page is mapped by the same vma as contains @start.
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001118 * Search forward from there, if not. N.B., this assumes that the
1119 * list of pages handed to migrate_pages()--which is how we get here--
1120 * is in virtual address order.
1121 */
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001122static struct page *new_page(struct page *page, unsigned long start, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001123{
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001124 struct vm_area_struct *vma;
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001125 unsigned long uninitialized_var(address);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001126
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001127 vma = find_vma(current->mm, start);
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001128 while (vma) {
1129 address = page_address_in_vma(page, vma);
1130 if (address != -EFAULT)
1131 break;
1132 vma = vma->vm_next;
1133 }
1134
Wanpeng Li11c731e2013-12-18 17:08:56 -08001135 if (PageHuge(page)) {
Michal Hockocc817172014-01-23 15:53:15 -08001136 BUG_ON(!vma);
1137 return alloc_huge_page_noerr(vma, address, 1);
Wanpeng Li11c731e2013-12-18 17:08:56 -08001138 }
1139 /*
1140 * if !vma, alloc_page_vma() will use task or system default policy
1141 */
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001142 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001143}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001144#else
1145
1146static void migrate_page_add(struct page *page, struct list_head *pagelist,
1147 unsigned long flags)
1148{
1149}
1150
Andrew Morton0ce72d42012-05-29 15:06:24 -07001151int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1152 const nodemask_t *to, int flags)
Christoph Lameterb20a3502006-03-22 00:09:12 -08001153{
1154 return -ENOSYS;
1155}
Christoph Lameter95a402c2006-06-23 02:03:53 -07001156
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001157static struct page *new_page(struct page *page, unsigned long start, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001158{
1159 return NULL;
1160}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001161#endif
1162
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001163static long do_mbind(unsigned long start, unsigned long len,
David Rientjes028fec42008-04-28 02:12:25 -07001164 unsigned short mode, unsigned short mode_flags,
1165 nodemask_t *nmask, unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001166{
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001167 struct mm_struct *mm = current->mm;
1168 struct mempolicy *new;
1169 unsigned long end;
1170 int err;
1171 LIST_HEAD(pagelist);
1172
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001173 if (flags & ~(unsigned long)MPOL_MF_VALID)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001174 return -EINVAL;
Christoph Lameter74c00242006-03-14 19:50:21 -08001175 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001176 return -EPERM;
1177
1178 if (start & ~PAGE_MASK)
1179 return -EINVAL;
1180
1181 if (mode == MPOL_DEFAULT)
1182 flags &= ~MPOL_MF_STRICT;
1183
1184 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1185 end = start + len;
1186
1187 if (end < start)
1188 return -EINVAL;
1189 if (end == start)
1190 return 0;
1191
David Rientjes028fec42008-04-28 02:12:25 -07001192 new = mpol_new(mode, mode_flags, nmask);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001193 if (IS_ERR(new))
1194 return PTR_ERR(new);
1195
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001196 if (flags & MPOL_MF_LAZY)
1197 new->flags |= MPOL_F_MOF;
1198
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001199 /*
1200 * If we are using the default policy then operation
1201 * on discontinuous address spaces is okay after all
1202 */
1203 if (!new)
1204 flags |= MPOL_MF_DISCONTIG_OK;
1205
David Rientjes028fec42008-04-28 02:12:25 -07001206 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1207 start, start + len, mode, mode_flags,
David Rientjes00ef2d22013-02-22 16:35:36 -08001208 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001209
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001210 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1211
1212 err = migrate_prep();
1213 if (err)
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001214 goto mpol_out;
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001215 }
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07001216 {
1217 NODEMASK_SCRATCH(scratch);
1218 if (scratch) {
1219 down_write(&mm->mmap_sem);
1220 task_lock(current);
1221 err = mpol_set_nodemask(new, nmask, scratch);
1222 task_unlock(current);
1223 if (err)
1224 up_write(&mm->mmap_sem);
1225 } else
1226 err = -ENOMEM;
1227 NODEMASK_SCRATCH_FREE(scratch);
1228 }
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001229 if (err)
1230 goto mpol_out;
1231
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001232 err = queue_pages_range(mm, start, end, nmask,
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001233 flags | MPOL_MF_INVERT, &pagelist);
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001234 if (!err)
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -08001235 err = mbind_range(mm, start, end, new);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001236
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001237 if (!err) {
1238 int nr_failed = 0;
1239
Minchan Kimcf608ac2010-10-26 14:21:29 -07001240 if (!list_empty(&pagelist)) {
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001241 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001242 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1243 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001244 if (nr_failed)
Naoya Horiguchi74060e42013-09-11 14:22:06 -07001245 putback_movable_pages(&pagelist);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001246 }
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001247
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001248 if (nr_failed && (flags & MPOL_MF_STRICT))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001249 err = -EIO;
KOSAKI Motohiroab8a3e12009-10-26 16:49:58 -07001250 } else
Joonsoo Kimb0e5fd72013-12-18 17:08:51 -08001251 putback_movable_pages(&pagelist);
Christoph Lameterb20a3502006-03-22 00:09:12 -08001252
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001253 up_write(&mm->mmap_sem);
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001254 mpol_out:
Lee Schermerhornf0be3d32008-04-28 02:13:08 -07001255 mpol_put(new);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001256 return err;
1257}
1258
Christoph Lameter39743882006-01-08 01:00:51 -08001259/*
Christoph Lameter8bccd852005-10-29 18:16:59 -07001260 * User space interface with variable sized bitmaps for nodelists.
1261 */
1262
1263/* Copy a node mask from user space. */
Christoph Lameter39743882006-01-08 01:00:51 -08001264static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
Christoph Lameter8bccd852005-10-29 18:16:59 -07001265 unsigned long maxnode)
1266{
1267 unsigned long k;
Yisheng Xie2851e3b2018-01-31 16:16:11 -08001268 unsigned long t;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001269 unsigned long nlongs;
1270 unsigned long endmask;
1271
1272 --maxnode;
1273 nodes_clear(*nodes);
1274 if (maxnode == 0 || !nmask)
1275 return 0;
Andi Kleena9c930b2006-02-20 18:27:59 -08001276 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
Chris Wright636f13c2006-02-17 13:59:36 -08001277 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001278
1279 nlongs = BITS_TO_LONGS(maxnode);
1280 if ((maxnode % BITS_PER_LONG) == 0)
1281 endmask = ~0UL;
1282 else
1283 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1284
Yisheng Xie2851e3b2018-01-31 16:16:11 -08001285 /*
1286 * When the user specified more nodes than supported just check
1287 * if the non supported part is all zero.
1288 *
1289 * If maxnode have more longs than MAX_NUMNODES, check
1290 * the bits in that area first. And then go through to
1291 * check the rest bits which equal or bigger than MAX_NUMNODES.
1292 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
1293 */
Christoph Lameter8bccd852005-10-29 18:16:59 -07001294 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1295 if (nlongs > PAGE_SIZE/sizeof(long))
1296 return -EINVAL;
1297 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
Christoph Lameter8bccd852005-10-29 18:16:59 -07001298 if (get_user(t, nmask + k))
1299 return -EFAULT;
1300 if (k == nlongs - 1) {
1301 if (t & endmask)
1302 return -EINVAL;
1303 } else if (t)
1304 return -EINVAL;
1305 }
1306 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1307 endmask = ~0UL;
1308 }
1309
Yisheng Xie2851e3b2018-01-31 16:16:11 -08001310 if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
1311 unsigned long valid_mask = endmask;
1312
1313 valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1314 if (get_user(t, nmask + nlongs - 1))
1315 return -EFAULT;
1316 if (t & valid_mask)
1317 return -EINVAL;
1318 }
1319
Christoph Lameter8bccd852005-10-29 18:16:59 -07001320 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1321 return -EFAULT;
1322 nodes_addr(*nodes)[nlongs-1] &= endmask;
1323 return 0;
1324}
1325
1326/* Copy a kernel node mask to user space */
1327static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1328 nodemask_t *nodes)
1329{
1330 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1331 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1332
1333 if (copy > nbytes) {
1334 if (copy > PAGE_SIZE)
1335 return -EINVAL;
1336 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1337 return -EFAULT;
1338 copy = nbytes;
1339 }
1340 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1341}
1342
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001343SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
Rasmus Villemoesf7f28ca2014-06-04 16:07:57 -07001344 unsigned long, mode, const unsigned long __user *, nmask,
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001345 unsigned long, maxnode, unsigned, flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001346{
1347 nodemask_t nodes;
1348 int err;
David Rientjes028fec42008-04-28 02:12:25 -07001349 unsigned short mode_flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001350
David Rientjes028fec42008-04-28 02:12:25 -07001351 mode_flags = mode & MPOL_MODE_FLAGS;
1352 mode &= ~MPOL_MODE_FLAGS;
David Rientjesa3b51e02008-04-28 02:12:23 -07001353 if (mode >= MPOL_MAX)
1354 return -EINVAL;
David Rientjes4c50bc02008-04-28 02:12:30 -07001355 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1356 (mode_flags & MPOL_F_RELATIVE_NODES))
1357 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001358 err = get_nodes(&nodes, nmask, maxnode);
1359 if (err)
1360 return err;
David Rientjes028fec42008-04-28 02:12:25 -07001361 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001362}
1363
1364/* Set the process memory policy */
Rasmus Villemoes23c89022014-06-04 16:07:58 -07001365SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001366 unsigned long, maxnode)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001367{
1368 int err;
1369 nodemask_t nodes;
David Rientjes028fec42008-04-28 02:12:25 -07001370 unsigned short flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001371
David Rientjes028fec42008-04-28 02:12:25 -07001372 flags = mode & MPOL_MODE_FLAGS;
1373 mode &= ~MPOL_MODE_FLAGS;
1374 if ((unsigned int)mode >= MPOL_MAX)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001375 return -EINVAL;
David Rientjes4c50bc02008-04-28 02:12:30 -07001376 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1377 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001378 err = get_nodes(&nodes, nmask, maxnode);
1379 if (err)
1380 return err;
David Rientjes028fec42008-04-28 02:12:25 -07001381 return do_set_mempolicy(mode, flags, &nodes);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001382}
1383
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001384SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1385 const unsigned long __user *, old_nodes,
1386 const unsigned long __user *, new_nodes)
Christoph Lameter39743882006-01-08 01:00:51 -08001387{
David Howellsc69e8d92008-11-14 10:39:19 +11001388 const struct cred *cred = current_cred(), *tcred;
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001389 struct mm_struct *mm = NULL;
Christoph Lameter39743882006-01-08 01:00:51 -08001390 struct task_struct *task;
Christoph Lameter39743882006-01-08 01:00:51 -08001391 nodemask_t task_nodes;
1392 int err;
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001393 nodemask_t *old;
1394 nodemask_t *new;
1395 NODEMASK_SCRATCH(scratch);
Christoph Lameter39743882006-01-08 01:00:51 -08001396
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001397 if (!scratch)
1398 return -ENOMEM;
Christoph Lameter39743882006-01-08 01:00:51 -08001399
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001400 old = &scratch->mask1;
1401 new = &scratch->mask2;
1402
1403 err = get_nodes(old, old_nodes, maxnode);
Christoph Lameter39743882006-01-08 01:00:51 -08001404 if (err)
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001405 goto out;
1406
1407 err = get_nodes(new, new_nodes, maxnode);
1408 if (err)
1409 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001410
1411 /* Find the mm_struct */
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001412 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001413 task = pid ? find_task_by_vpid(pid) : current;
Christoph Lameter39743882006-01-08 01:00:51 -08001414 if (!task) {
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001415 rcu_read_unlock();
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001416 err = -ESRCH;
1417 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001418 }
Christoph Lameter3268c632012-03-21 16:34:06 -07001419 get_task_struct(task);
Christoph Lameter39743882006-01-08 01:00:51 -08001420
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001421 err = -EINVAL;
Christoph Lameter39743882006-01-08 01:00:51 -08001422
1423 /*
1424 * Check if this process has the right to modify the specified
1425 * process. The right exists if the process has administrative
Alexey Dobriyan7f927fc2006-03-28 01:56:53 -08001426 * capabilities, superuser privileges or the same
Christoph Lameter39743882006-01-08 01:00:51 -08001427 * userid as the target process.
1428 */
David Howellsc69e8d92008-11-14 10:39:19 +11001429 tcred = __task_cred(task);
Eric W. Biedermanb38a86e2012-03-12 15:48:24 -07001430 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1431 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
Christoph Lameter74c00242006-03-14 19:50:21 -08001432 !capable(CAP_SYS_NICE)) {
David Howellsc69e8d92008-11-14 10:39:19 +11001433 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001434 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001435 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001436 }
David Howellsc69e8d92008-11-14 10:39:19 +11001437 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001438
1439 task_nodes = cpuset_mems_allowed(task);
1440 /* Is the user allowed to access the target nodes? */
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001441 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
Christoph Lameter39743882006-01-08 01:00:51 -08001442 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001443 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001444 }
1445
Yisheng Xie4cf24632018-01-31 16:16:15 -08001446 task_nodes = cpuset_mems_allowed(current);
1447 nodes_and(*new, *new, task_nodes);
1448 if (nodes_empty(*new))
Christoph Lameter3268c632012-03-21 16:34:06 -07001449 goto out_put;
Yisheng Xie4cf24632018-01-31 16:16:15 -08001450
1451 nodes_and(*new, *new, node_states[N_MEMORY]);
1452 if (nodes_empty(*new))
1453 goto out_put;
Christoph Lameter3b42d282007-08-31 00:12:08 -07001454
David Quigley86c3a762006-06-23 02:04:02 -07001455 err = security_task_movememory(task);
1456 if (err)
Christoph Lameter3268c632012-03-21 16:34:06 -07001457 goto out_put;
David Quigley86c3a762006-06-23 02:04:02 -07001458
Christoph Lameter3268c632012-03-21 16:34:06 -07001459 mm = get_task_mm(task);
1460 put_task_struct(task);
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001461
1462 if (!mm) {
Christoph Lameter3268c632012-03-21 16:34:06 -07001463 err = -EINVAL;
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001464 goto out;
1465 }
1466
1467 err = do_migrate_pages(mm, old, new,
1468 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
Christoph Lameter3268c632012-03-21 16:34:06 -07001469
1470 mmput(mm);
1471out:
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001472 NODEMASK_SCRATCH_FREE(scratch);
1473
Christoph Lameter39743882006-01-08 01:00:51 -08001474 return err;
Christoph Lameter3268c632012-03-21 16:34:06 -07001475
1476out_put:
1477 put_task_struct(task);
1478 goto out;
1479
Christoph Lameter39743882006-01-08 01:00:51 -08001480}
1481
1482
Christoph Lameter8bccd852005-10-29 18:16:59 -07001483/* Retrieve NUMA policy */
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001484SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1485 unsigned long __user *, nmask, unsigned long, maxnode,
1486 unsigned long, addr, unsigned long, flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001487{
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001488 int err;
1489 int uninitialized_var(pval);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001490 nodemask_t nodes;
1491
1492 if (nmask != NULL && maxnode < MAX_NUMNODES)
1493 return -EINVAL;
1494
1495 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1496
1497 if (err)
1498 return err;
1499
1500 if (policy && put_user(pval, policy))
1501 return -EFAULT;
1502
1503 if (nmask)
1504 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1505
1506 return err;
1507}
1508
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509#ifdef CONFIG_COMPAT
1510
Heiko Carstensc93e0f62014-03-03 16:32:26 +01001511COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1512 compat_ulong_t __user *, nmask,
1513 compat_ulong_t, maxnode,
1514 compat_ulong_t, addr, compat_ulong_t, flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515{
1516 long err;
1517 unsigned long __user *nm = NULL;
1518 unsigned long nr_bits, alloc_size;
1519 DECLARE_BITMAP(bm, MAX_NUMNODES);
1520
1521 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1522 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1523
1524 if (nmask)
1525 nm = compat_alloc_user_space(alloc_size);
1526
1527 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1528
1529 if (!err && nmask) {
KAMEZAWA Hiroyuki2bbff6c2011-09-14 16:21:02 -07001530 unsigned long copy_size;
1531 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1532 err = copy_from_user(bm, nm, copy_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 /* ensure entire bitmap is zeroed */
1534 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1535 err |= compat_put_bitmap(nmask, bm, nr_bits);
1536 }
1537
1538 return err;
1539}
1540
Heiko Carstensc93e0f62014-03-03 16:32:26 +01001541COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1542 compat_ulong_t, maxnode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 unsigned long __user *nm = NULL;
1545 unsigned long nr_bits, alloc_size;
1546 DECLARE_BITMAP(bm, MAX_NUMNODES);
1547
1548 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1549 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1550
1551 if (nmask) {
Chris Sallscddab762017-04-07 23:48:11 -07001552 if (compat_get_bitmap(bm, nmask, nr_bits))
1553 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 nm = compat_alloc_user_space(alloc_size);
Chris Sallscddab762017-04-07 23:48:11 -07001555 if (copy_to_user(nm, bm, alloc_size))
1556 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557 }
1558
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 return sys_set_mempolicy(mode, nm, nr_bits+1);
1560}
1561
Heiko Carstensc93e0f62014-03-03 16:32:26 +01001562COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1563 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1564 compat_ulong_t, maxnode, compat_ulong_t, flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 unsigned long __user *nm = NULL;
1567 unsigned long nr_bits, alloc_size;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001568 nodemask_t bm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569
1570 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1571 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1572
1573 if (nmask) {
Chris Sallscddab762017-04-07 23:48:11 -07001574 if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1575 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 nm = compat_alloc_user_space(alloc_size);
Chris Sallscddab762017-04-07 23:48:11 -07001577 if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1578 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 }
1580
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1582}
1583
1584#endif
1585
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001586struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1587 unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588{
Oleg Nesterov8d902742014-10-09 15:27:45 -07001589 struct mempolicy *pol = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590
1591 if (vma) {
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001592 if (vma->vm_ops && vma->vm_ops->get_policy) {
Oleg Nesterov8d902742014-10-09 15:27:45 -07001593 pol = vma->vm_ops->get_policy(vma, addr);
Mel Gorman00442ad2012-10-08 16:29:20 -07001594 } else if (vma->vm_policy) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 pol = vma->vm_policy;
Mel Gorman00442ad2012-10-08 16:29:20 -07001596
1597 /*
1598 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1599 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1600 * count on these policies which will be dropped by
1601 * mpol_cond_put() later
1602 */
1603 if (mpol_needs_cond_ref(pol))
1604 mpol_get(pol);
1605 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606 }
Oleg Nesterovf15ca782014-10-09 15:27:43 -07001607
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001608 return pol;
1609}
1610
1611/*
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001612 * get_vma_policy(@vma, @addr)
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001613 * @vma: virtual memory area whose policy is sought
1614 * @addr: address in @vma for shared policy lookup
1615 *
1616 * Returns effective policy for a VMA at specified address.
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001617 * Falls back to current->mempolicy or system default policy, as necessary.
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001618 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1619 * count--added by the get_policy() vm_op, as appropriate--to protect against
1620 * freeing by another task. It is the caller's responsibility to free the
1621 * extra reference for shared policies.
1622 */
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001623static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1624 unsigned long addr)
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001625{
1626 struct mempolicy *pol = __get_vma_policy(vma, addr);
1627
Oleg Nesterov8d902742014-10-09 15:27:45 -07001628 if (!pol)
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001629 pol = get_task_policy(current);
Oleg Nesterov8d902742014-10-09 15:27:45 -07001630
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 return pol;
1632}
1633
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001634bool vma_policy_mof(struct vm_area_struct *vma)
Mel Gormanfc3147242013-10-07 11:29:09 +01001635{
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001636 struct mempolicy *pol;
Oleg Nesterovf15ca782014-10-09 15:27:43 -07001637
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001638 if (vma->vm_ops && vma->vm_ops->get_policy) {
1639 bool ret = false;
Mel Gormanfc3147242013-10-07 11:29:09 +01001640
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001641 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1642 if (pol && (pol->flags & MPOL_F_MOF))
1643 ret = true;
1644 mpol_cond_put(pol);
Mel Gormanfc3147242013-10-07 11:29:09 +01001645
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001646 return ret;
Mel Gormanfc3147242013-10-07 11:29:09 +01001647 }
1648
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001649 pol = vma->vm_policy;
Oleg Nesterov8d902742014-10-09 15:27:45 -07001650 if (!pol)
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001651 pol = get_task_policy(current);
Oleg Nesterov8d902742014-10-09 15:27:45 -07001652
Mel Gormanfc3147242013-10-07 11:29:09 +01001653 return pol->flags & MPOL_F_MOF;
1654}
1655
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001656static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1657{
1658 enum zone_type dynamic_policy_zone = policy_zone;
1659
1660 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1661
1662 /*
1663 * if policy->v.nodes has movable memory only,
1664 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1665 *
1666 * policy->v.nodes is intersect with node_states[N_MEMORY].
1667 * so if the following test faile, it implies
1668 * policy->v.nodes has movable memory only.
1669 */
1670 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1671 dynamic_policy_zone = ZONE_MOVABLE;
1672
1673 return zone >= dynamic_policy_zone;
1674}
1675
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001676/*
1677 * Return a nodemask representing a mempolicy for filtering nodes for
1678 * page allocation
1679 */
1680static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
Mel Gorman19770b32008-04-28 02:12:18 -07001681{
1682 /* Lower zones don't get a nodemask applied for MPOL_BIND */
Lee Schermerhorn45c47452008-04-28 02:13:12 -07001683 if (unlikely(policy->mode == MPOL_BIND) &&
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001684 apply_policy_zone(policy, gfp_zone(gfp)) &&
Mel Gorman19770b32008-04-28 02:12:18 -07001685 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1686 return &policy->v.nodes;
1687
1688 return NULL;
1689}
1690
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001691/* Return a zonelist indicated by gfp for node representing a mempolicy */
Andi Kleen2f5f9482011-03-04 17:36:29 -08001692static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1693 int nd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694{
Lee Schermerhorn45c47452008-04-28 02:13:12 -07001695 switch (policy->mode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001697 if (!(policy->flags & MPOL_F_LOCAL))
1698 nd = policy->v.preferred_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 break;
1700 case MPOL_BIND:
Mel Gorman19770b32008-04-28 02:12:18 -07001701 /*
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001702 * Normally, MPOL_BIND allocations are node-local within the
1703 * allowed nodemask. However, if __GFP_THISNODE is set and the
Bob Liu6eb27e12010-05-24 14:32:00 -07001704 * current node isn't part of the mask, we use the zonelist for
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001705 * the first node in the mask instead.
Mel Gorman19770b32008-04-28 02:12:18 -07001706 */
Mel Gorman19770b32008-04-28 02:12:18 -07001707 if (unlikely(gfp & __GFP_THISNODE) &&
1708 unlikely(!node_isset(nd, policy->v.nodes)))
1709 nd = first_node(policy->v.nodes);
1710 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 default:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712 BUG();
1713 }
Mel Gorman0e884602008-04-28 02:12:14 -07001714 return node_zonelist(nd, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715}
1716
1717/* Do dynamic interleaving for a process */
1718static unsigned interleave_nodes(struct mempolicy *policy)
1719{
1720 unsigned nid, next;
1721 struct task_struct *me = current;
1722
1723 nid = me->il_next;
Andrew Morton0edaf862016-05-19 17:10:58 -07001724 next = next_node_in(nid, policy->v.nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001725 if (next < MAX_NUMNODES)
1726 me->il_next = next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727 return nid;
1728}
1729
Christoph Lameterdc85da12006-01-18 17:42:36 -08001730/*
1731 * Depending on the memory policy provide a node from which to allocate the
1732 * next slab entry.
1733 */
David Rientjes2a389612014-04-07 15:37:29 -07001734unsigned int mempolicy_slab_node(void)
Christoph Lameterdc85da12006-01-18 17:42:36 -08001735{
Andi Kleene7b691b2012-06-09 02:40:03 -07001736 struct mempolicy *policy;
David Rientjes2a389612014-04-07 15:37:29 -07001737 int node = numa_mem_id();
Andi Kleene7b691b2012-06-09 02:40:03 -07001738
1739 if (in_interrupt())
David Rientjes2a389612014-04-07 15:37:29 -07001740 return node;
Andi Kleene7b691b2012-06-09 02:40:03 -07001741
1742 policy = current->mempolicy;
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001743 if (!policy || policy->flags & MPOL_F_LOCAL)
David Rientjes2a389612014-04-07 15:37:29 -07001744 return node;
Christoph Lameter765c4502006-09-27 01:50:08 -07001745
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001746 switch (policy->mode) {
1747 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001748 /*
1749 * handled MPOL_F_LOCAL above
1750 */
1751 return policy->v.preferred_node;
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001752
Christoph Lameterdc85da12006-01-18 17:42:36 -08001753 case MPOL_INTERLEAVE:
1754 return interleave_nodes(policy);
1755
Mel Gormandd1a2392008-04-28 02:12:17 -07001756 case MPOL_BIND: {
Mel Gormanc33d6c02016-05-19 17:14:10 -07001757 struct zoneref *z;
1758
Christoph Lameterdc85da12006-01-18 17:42:36 -08001759 /*
1760 * Follow bind policy behavior and start allocation at the
1761 * first node.
1762 */
Mel Gorman19770b32008-04-28 02:12:18 -07001763 struct zonelist *zonelist;
Mel Gorman19770b32008-04-28 02:12:18 -07001764 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
Aneesh Kumar K.Vc9634cf2016-10-07 16:59:12 -07001765 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
Mel Gormanc33d6c02016-05-19 17:14:10 -07001766 z = first_zones_zonelist(zonelist, highest_zoneidx,
1767 &policy->v.nodes);
1768 return z->zone ? z->zone->node : node;
Mel Gormandd1a2392008-04-28 02:12:17 -07001769 }
Christoph Lameterdc85da12006-01-18 17:42:36 -08001770
Christoph Lameterdc85da12006-01-18 17:42:36 -08001771 default:
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001772 BUG();
Christoph Lameterdc85da12006-01-18 17:42:36 -08001773 }
1774}
1775
Andrew Mortonfee83b32016-05-19 17:11:43 -07001776/*
1777 * Do static interleaving for a VMA with known offset @n. Returns the n'th
1778 * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1779 * number of present nodes.
1780 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781static unsigned offset_il_node(struct mempolicy *pol,
Andrew Mortonfee83b32016-05-19 17:11:43 -07001782 struct vm_area_struct *vma, unsigned long n)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783{
Andi Kleendfcd3c02005-10-29 18:15:48 -07001784 unsigned nnodes = nodes_weight(pol->v.nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001785 unsigned target;
Andrew Mortonfee83b32016-05-19 17:11:43 -07001786 int i;
1787 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788
David Rientjesf5b087b2008-04-28 02:12:27 -07001789 if (!nnodes)
1790 return numa_node_id();
Andrew Mortonfee83b32016-05-19 17:11:43 -07001791 target = (unsigned int)n % nnodes;
1792 nid = first_node(pol->v.nodes);
1793 for (i = 0; i < target; i++)
Andi Kleendfcd3c02005-10-29 18:15:48 -07001794 nid = next_node(nid, pol->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 return nid;
1796}
1797
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001798/* Determine a node number for interleave */
1799static inline unsigned interleave_nid(struct mempolicy *pol,
1800 struct vm_area_struct *vma, unsigned long addr, int shift)
1801{
1802 if (vma) {
1803 unsigned long off;
1804
Nishanth Aravamudan3b98b082006-08-31 21:27:53 -07001805 /*
1806 * for small pages, there is no difference between
1807 * shift and PAGE_SHIFT, so the bit-shift is safe.
1808 * for huge pages, since vm_pgoff is in units of small
1809 * pages, we need to shift off the always 0 bits to get
1810 * a useful offset.
1811 */
1812 BUG_ON(shift < PAGE_SHIFT);
1813 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001814 off += (addr - vma->vm_start) >> shift;
1815 return offset_il_node(pol, vma, off);
1816 } else
1817 return interleave_nodes(pol);
1818}
1819
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001820#ifdef CONFIG_HUGETLBFS
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001821/*
1822 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
Fabian Frederickb46e14a2014-06-04 16:08:18 -07001823 * @vma: virtual memory area whose policy is sought
1824 * @addr: address in @vma for shared policy lookup and interleave policy
1825 * @gfp_flags: for requested zone
1826 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1827 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001828 *
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001829 * Returns a zonelist suitable for a huge page allocation and a pointer
1830 * to the struct mempolicy for conditional unref after allocation.
1831 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1832 * @nodemask for filtering the zonelist.
Miao Xiec0ff7452010-05-24 14:32:08 -07001833 *
Mel Gormand26914d2014-04-03 14:47:24 -07001834 * Must be protected by read_mems_allowed_begin()
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001835 */
Mel Gorman396faf02007-07-17 04:03:13 -07001836struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
Mel Gorman19770b32008-04-28 02:12:18 -07001837 gfp_t gfp_flags, struct mempolicy **mpol,
1838 nodemask_t **nodemask)
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001839{
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001840 struct zonelist *zl;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001841
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001842 *mpol = get_vma_policy(vma, addr);
Mel Gorman19770b32008-04-28 02:12:18 -07001843 *nodemask = NULL; /* assume !MPOL_BIND */
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001844
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001845 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1846 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
Andi Kleena5516432008-07-23 21:27:41 -07001847 huge_page_shift(hstate_vma(vma))), gfp_flags);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001848 } else {
Andi Kleen2f5f9482011-03-04 17:36:29 -08001849 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001850 if ((*mpol)->mode == MPOL_BIND)
1851 *nodemask = &(*mpol)->v.nodes;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001852 }
1853 return zl;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001854}
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001855
1856/*
1857 * init_nodemask_of_mempolicy
1858 *
1859 * If the current task's mempolicy is "default" [NULL], return 'false'
1860 * to indicate default policy. Otherwise, extract the policy nodemask
1861 * for 'bind' or 'interleave' policy into the argument nodemask, or
1862 * initialize the argument nodemask to contain the single node for
1863 * 'preferred' or 'local' policy and return 'true' to indicate presence
1864 * of non-default mempolicy.
1865 *
1866 * We don't bother with reference counting the mempolicy [mpol_get/put]
1867 * because the current task is examining it's own mempolicy and a task's
1868 * mempolicy is only ever changed by the task itself.
1869 *
1870 * N.B., it is the caller's responsibility to free a returned nodemask.
1871 */
1872bool init_nodemask_of_mempolicy(nodemask_t *mask)
1873{
1874 struct mempolicy *mempolicy;
1875 int nid;
1876
1877 if (!(mask && current->mempolicy))
1878 return false;
1879
Miao Xiec0ff7452010-05-24 14:32:08 -07001880 task_lock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001881 mempolicy = current->mempolicy;
1882 switch (mempolicy->mode) {
1883 case MPOL_PREFERRED:
1884 if (mempolicy->flags & MPOL_F_LOCAL)
1885 nid = numa_node_id();
1886 else
1887 nid = mempolicy->v.preferred_node;
1888 init_nodemask_of_node(mask, nid);
1889 break;
1890
1891 case MPOL_BIND:
1892 /* Fall through */
1893 case MPOL_INTERLEAVE:
1894 *mask = mempolicy->v.nodes;
1895 break;
1896
1897 default:
1898 BUG();
1899 }
Miao Xiec0ff7452010-05-24 14:32:08 -07001900 task_unlock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001901
1902 return true;
1903}
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001904#endif
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001905
David Rientjes6f48d0eb2010-08-09 17:18:52 -07001906/*
1907 * mempolicy_nodemask_intersects
1908 *
1909 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1910 * policy. Otherwise, check for intersection between mask and the policy
1911 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1912 * policy, always return true since it may allocate elsewhere on fallback.
1913 *
1914 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1915 */
1916bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1917 const nodemask_t *mask)
1918{
1919 struct mempolicy *mempolicy;
1920 bool ret = true;
1921
1922 if (!mask)
1923 return ret;
1924 task_lock(tsk);
1925 mempolicy = tsk->mempolicy;
1926 if (!mempolicy)
1927 goto out;
1928
1929 switch (mempolicy->mode) {
1930 case MPOL_PREFERRED:
1931 /*
1932 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1933 * allocate from, they may fallback to other nodes when oom.
1934 * Thus, it's possible for tsk to have allocated memory from
1935 * nodes in mask.
1936 */
1937 break;
1938 case MPOL_BIND:
1939 case MPOL_INTERLEAVE:
1940 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1941 break;
1942 default:
1943 BUG();
1944 }
1945out:
1946 task_unlock(tsk);
1947 return ret;
1948}
1949
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950/* Allocate a page in interleaved policy.
1951 Own path because it needs to do special accounting. */
Andi Kleen662f3a02005-10-29 18:15:49 -07001952static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1953 unsigned nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954{
1955 struct zonelist *zl;
1956 struct page *page;
1957
Mel Gorman0e884602008-04-28 02:12:14 -07001958 zl = node_zonelist(nid, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 page = __alloc_pages(gfp, order, zl);
Mel Gormandd1a2392008-04-28 02:12:17 -07001960 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
Christoph Lameterca889e62006-06-30 01:55:44 -07001961 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962 return page;
1963}
1964
1965/**
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08001966 * alloc_pages_vma - Allocate a page for a VMA.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 *
1968 * @gfp:
1969 * %GFP_USER user allocation.
1970 * %GFP_KERNEL kernel allocations,
1971 * %GFP_HIGHMEM highmem/user allocations,
1972 * %GFP_FS allocation should not call back into a file system.
1973 * %GFP_ATOMIC don't sleep.
1974 *
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08001975 * @order:Order of the GFP allocation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976 * @vma: Pointer to VMA or NULL if not available.
1977 * @addr: Virtual Address of the allocation. Must be inside the VMA.
Vlastimil Babkabe97a412015-02-11 15:27:15 -08001978 * @node: Which node to prefer for allocation (modulo policy).
1979 * @hugepage: for hugepages try only the preferred node if possible
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 *
1981 * This function allocates a page from the kernel page pool and applies
1982 * a NUMA policy associated with the VMA or the current process.
1983 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1984 * mm_struct of the VMA to prevent it from going away. Should be used for
Vlastimil Babkabe97a412015-02-11 15:27:15 -08001985 * all allocations for pages that will be mapped into user space. Returns
1986 * NULL when no page can be allocated.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 */
1988struct page *
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08001989alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
Vlastimil Babkabe97a412015-02-11 15:27:15 -08001990 unsigned long addr, int node, bool hugepage)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991{
Mel Gormancc9a6c82012-03-21 16:34:11 -07001992 struct mempolicy *pol;
Miao Xiec0ff7452010-05-24 14:32:08 -07001993 struct page *page;
Mel Gormancc9a6c82012-03-21 16:34:11 -07001994 unsigned int cpuset_mems_cookie;
Vlastimil Babkabe97a412015-02-11 15:27:15 -08001995 struct zonelist *zl;
1996 nodemask_t *nmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997
Mel Gormancc9a6c82012-03-21 16:34:11 -07001998retry_cpuset:
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001999 pol = get_vma_policy(vma, addr);
Mel Gormand26914d2014-04-03 14:47:24 -07002000 cpuset_mems_cookie = read_mems_allowed_begin();
Mel Gormancc9a6c82012-03-21 16:34:11 -07002001
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002002 if (pol->mode == MPOL_INTERLEAVE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003 unsigned nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002004
Andi Kleen8eac5632011-02-25 14:44:28 -08002005 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002006 mpol_cond_put(pol);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002007 page = alloc_page_interleave(gfp, order, nid);
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002008 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009 }
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002010
Vlastimil Babka0867a572015-06-24 16:58:48 -07002011 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2012 int hpage_node = node;
2013
2014 /*
2015 * For hugepage allocation and non-interleave policy which
2016 * allows the current node (or other explicitly preferred
2017 * node) we only try to allocate from the current/preferred
2018 * node and don't fall back to other nodes, as the cost of
2019 * remote accesses would likely offset THP benefits.
2020 *
2021 * If the policy is interleave, or does not allow the current
2022 * node in its nodemask, we allocate the standard way.
2023 */
2024 if (pol->mode == MPOL_PREFERRED &&
2025 !(pol->flags & MPOL_F_LOCAL))
2026 hpage_node = pol->v.preferred_node;
2027
2028 nmask = policy_nodemask(gfp, pol);
2029 if (!nmask || node_isset(hpage_node, *nmask)) {
2030 mpol_cond_put(pol);
Vlastimil Babka96db8002015-09-08 15:03:50 -07002031 page = __alloc_pages_node(hpage_node,
Vlastimil Babka0867a572015-06-24 16:58:48 -07002032 gfp | __GFP_THISNODE, order);
2033 goto out;
2034 }
2035 }
2036
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002037 nmask = policy_nodemask(gfp, pol);
2038 zl = policy_zonelist(gfp, pol, node);
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002039 page = __alloc_pages_nodemask(gfp, order, zl, nmask);
Vlastimil Babka9b1a1ae2017-01-24 15:18:18 -08002040 mpol_cond_put(pol);
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002041out:
Mel Gormand26914d2014-04-03 14:47:24 -07002042 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
Mel Gormancc9a6c82012-03-21 16:34:11 -07002043 goto retry_cpuset;
Miao Xiec0ff7452010-05-24 14:32:08 -07002044 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045}
2046
2047/**
2048 * alloc_pages_current - Allocate pages.
2049 *
2050 * @gfp:
2051 * %GFP_USER user allocation,
2052 * %GFP_KERNEL kernel allocation,
2053 * %GFP_HIGHMEM highmem allocation,
2054 * %GFP_FS don't call back into a file system.
2055 * %GFP_ATOMIC don't sleep.
2056 * @order: Power of two of allocation size in pages. 0 is a single page.
2057 *
2058 * Allocate a page from the kernel page pool. When not in
2059 * interrupt context and apply the current process NUMA policy.
2060 * Returns NULL when no page can be allocated.
2061 *
Paul Jacksoncf2a473c2006-01-08 01:01:54 -08002062 * Don't call cpuset_update_task_memory_state() unless
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063 * 1) it's ok to take cpuset_sem (can WAIT), and
2064 * 2) allocating for current task (not interrupt).
2065 */
Al Virodd0fc662005-10-07 07:46:04 +01002066struct page *alloc_pages_current(gfp_t gfp, unsigned order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067{
Oleg Nesterov8d902742014-10-09 15:27:45 -07002068 struct mempolicy *pol = &default_policy;
Miao Xiec0ff7452010-05-24 14:32:08 -07002069 struct page *page;
Mel Gormancc9a6c82012-03-21 16:34:11 -07002070 unsigned int cpuset_mems_cookie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071
Oleg Nesterov8d902742014-10-09 15:27:45 -07002072 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2073 pol = get_task_policy(current);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002074
Mel Gormancc9a6c82012-03-21 16:34:11 -07002075retry_cpuset:
Mel Gormand26914d2014-04-03 14:47:24 -07002076 cpuset_mems_cookie = read_mems_allowed_begin();
Mel Gormancc9a6c82012-03-21 16:34:11 -07002077
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002078 /*
2079 * No reference counting needed for current->mempolicy
2080 * nor system default_policy
2081 */
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002082 if (pol->mode == MPOL_INTERLEAVE)
Miao Xiec0ff7452010-05-24 14:32:08 -07002083 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2084 else
2085 page = __alloc_pages_nodemask(gfp, order,
Andi Kleen5c4b4be2011-03-04 17:36:32 -08002086 policy_zonelist(gfp, pol, numa_node_id()),
2087 policy_nodemask(gfp, pol));
Mel Gormancc9a6c82012-03-21 16:34:11 -07002088
Mel Gormand26914d2014-04-03 14:47:24 -07002089 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
Mel Gormancc9a6c82012-03-21 16:34:11 -07002090 goto retry_cpuset;
2091
Miao Xiec0ff7452010-05-24 14:32:08 -07002092 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093}
2094EXPORT_SYMBOL(alloc_pages_current);
2095
Oleg Nesterovef0855d2013-09-11 14:20:14 -07002096int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2097{
2098 struct mempolicy *pol = mpol_dup(vma_policy(src));
2099
2100 if (IS_ERR(pol))
2101 return PTR_ERR(pol);
2102 dst->vm_policy = pol;
2103 return 0;
2104}
2105
Paul Jackson42253992006-01-08 01:01:59 -08002106/*
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07002107 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
Paul Jackson42253992006-01-08 01:01:59 -08002108 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2109 * with the mems_allowed returned by cpuset_mems_allowed(). This
2110 * keeps mempolicies cpuset relative after its cpuset moves. See
2111 * further kernel/cpuset.c update_nodemask().
Miao Xie708c1bb2010-05-24 14:32:07 -07002112 *
2113 * current's mempolicy may be rebinded by the other task(the task that changes
2114 * cpuset's mems), so we needn't do rebind work for current task.
Paul Jackson42253992006-01-08 01:01:59 -08002115 */
Paul Jackson42253992006-01-08 01:01:59 -08002116
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07002117/* Slow path of a mempolicy duplicate */
2118struct mempolicy *__mpol_dup(struct mempolicy *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119{
2120 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2121
2122 if (!new)
2123 return ERR_PTR(-ENOMEM);
Miao Xie708c1bb2010-05-24 14:32:07 -07002124
2125 /* task's mempolicy is protected by alloc_lock */
2126 if (old == current->mempolicy) {
2127 task_lock(current);
2128 *new = *old;
2129 task_unlock(current);
2130 } else
2131 *new = *old;
2132
Paul Jackson42253992006-01-08 01:01:59 -08002133 if (current_cpuset_is_being_rebound()) {
2134 nodemask_t mems = cpuset_mems_allowed(current);
Miao Xie708c1bb2010-05-24 14:32:07 -07002135 if (new->flags & MPOL_F_REBINDING)
2136 mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2137 else
2138 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
Paul Jackson42253992006-01-08 01:01:59 -08002139 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 atomic_set(&new->refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141 return new;
2142}
2143
2144/* Slow path of a mempolicy comparison */
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002145bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146{
2147 if (!a || !b)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002148 return false;
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002149 if (a->mode != b->mode)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002150 return false;
Bob Liu19800502010-05-24 14:32:01 -07002151 if (a->flags != b->flags)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002152 return false;
Bob Liu19800502010-05-24 14:32:01 -07002153 if (mpol_store_user_nodemask(a))
2154 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002155 return false;
Bob Liu19800502010-05-24 14:32:01 -07002156
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002157 switch (a->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -07002158 case MPOL_BIND:
2159 /* Fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 case MPOL_INTERLEAVE:
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002161 return !!nodes_equal(a->v.nodes, b->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162 case MPOL_PREFERRED:
Yisheng Xiebe1a9d12018-03-22 16:17:02 -07002163 /* a's ->flags is the same as b's */
2164 if (a->flags & MPOL_F_LOCAL)
2165 return true;
Namhyung Kim75719662011-03-22 16:33:02 -07002166 return a->v.preferred_node == b->v.preferred_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 default:
2168 BUG();
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002169 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 }
2171}
2172
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 * Shared memory backing store policy support.
2175 *
2176 * Remember policies even when nobody has shared memory mapped.
2177 * The policies are kept in Red-Black tree linked from the inode.
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002178 * They are protected by the sp->lock rwlock, which should be held
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 * for any accesses to the tree.
2180 */
2181
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002182/*
2183 * lookup first element intersecting start-end. Caller holds sp->lock for
2184 * reading or for writing
2185 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186static struct sp_node *
2187sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2188{
2189 struct rb_node *n = sp->root.rb_node;
2190
2191 while (n) {
2192 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2193
2194 if (start >= p->end)
2195 n = n->rb_right;
2196 else if (end <= p->start)
2197 n = n->rb_left;
2198 else
2199 break;
2200 }
2201 if (!n)
2202 return NULL;
2203 for (;;) {
2204 struct sp_node *w = NULL;
2205 struct rb_node *prev = rb_prev(n);
2206 if (!prev)
2207 break;
2208 w = rb_entry(prev, struct sp_node, nd);
2209 if (w->end <= start)
2210 break;
2211 n = prev;
2212 }
2213 return rb_entry(n, struct sp_node, nd);
2214}
2215
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002216/*
2217 * Insert a new shared policy into the list. Caller holds sp->lock for
2218 * writing.
2219 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2221{
2222 struct rb_node **p = &sp->root.rb_node;
2223 struct rb_node *parent = NULL;
2224 struct sp_node *nd;
2225
2226 while (*p) {
2227 parent = *p;
2228 nd = rb_entry(parent, struct sp_node, nd);
2229 if (new->start < nd->start)
2230 p = &(*p)->rb_left;
2231 else if (new->end > nd->end)
2232 p = &(*p)->rb_right;
2233 else
2234 BUG();
2235 }
2236 rb_link_node(&new->nd, parent, p);
2237 rb_insert_color(&new->nd, &sp->root);
Paul Mundt140d5a42007-07-15 23:38:16 -07002238 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002239 new->policy ? new->policy->mode : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240}
2241
2242/* Find shared policy intersecting idx */
2243struct mempolicy *
2244mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2245{
2246 struct mempolicy *pol = NULL;
2247 struct sp_node *sn;
2248
2249 if (!sp->root.rb_node)
2250 return NULL;
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002251 read_lock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 sn = sp_lookup(sp, idx, idx+1);
2253 if (sn) {
2254 mpol_get(sn->policy);
2255 pol = sn->policy;
2256 }
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002257 read_unlock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258 return pol;
2259}
2260
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002261static void sp_free(struct sp_node *n)
2262{
2263 mpol_put(n->policy);
2264 kmem_cache_free(sn_cache, n);
2265}
2266
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002267/**
2268 * mpol_misplaced - check whether current page node is valid in policy
2269 *
Fabian Frederickb46e14a2014-06-04 16:08:18 -07002270 * @page: page to be checked
2271 * @vma: vm area where page mapped
2272 * @addr: virtual address where page mapped
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002273 *
2274 * Lookup current policy node id for vma,addr and "compare to" page's
2275 * node id.
2276 *
2277 * Returns:
2278 * -1 - not misplaced, page is in the right node
2279 * node - node id where the page should be
2280 *
2281 * Policy determination "mimics" alloc_page_vma().
2282 * Called from fault path where we know the vma and faulting address.
2283 */
2284int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2285{
2286 struct mempolicy *pol;
Mel Gormanc33d6c02016-05-19 17:14:10 -07002287 struct zoneref *z;
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002288 int curnid = page_to_nid(page);
2289 unsigned long pgoff;
Peter Zijlstra90572892013-10-07 11:29:20 +01002290 int thiscpu = raw_smp_processor_id();
2291 int thisnid = cpu_to_node(thiscpu);
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002292 int polnid = -1;
2293 int ret = -1;
2294
2295 BUG_ON(!vma);
2296
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07002297 pol = get_vma_policy(vma, addr);
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002298 if (!(pol->flags & MPOL_F_MOF))
2299 goto out;
2300
2301 switch (pol->mode) {
2302 case MPOL_INTERLEAVE:
2303 BUG_ON(addr >= vma->vm_end);
2304 BUG_ON(addr < vma->vm_start);
2305
2306 pgoff = vma->vm_pgoff;
2307 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2308 polnid = offset_il_node(pol, vma, pgoff);
2309 break;
2310
2311 case MPOL_PREFERRED:
2312 if (pol->flags & MPOL_F_LOCAL)
2313 polnid = numa_node_id();
2314 else
2315 polnid = pol->v.preferred_node;
2316 break;
2317
2318 case MPOL_BIND:
Mel Gormanc33d6c02016-05-19 17:14:10 -07002319
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002320 /*
2321 * allows binding to multiple nodes.
2322 * use current page if in policy nodemask,
2323 * else select nearest allowed node, if any.
2324 * If no allowed nodes, use current [!misplaced].
2325 */
2326 if (node_isset(curnid, pol->v.nodes))
2327 goto out;
Mel Gormanc33d6c02016-05-19 17:14:10 -07002328 z = first_zones_zonelist(
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002329 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2330 gfp_zone(GFP_HIGHUSER),
Mel Gormanc33d6c02016-05-19 17:14:10 -07002331 &pol->v.nodes);
2332 polnid = z->zone->node;
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002333 break;
2334
2335 default:
2336 BUG();
2337 }
Mel Gorman5606e382012-11-02 18:19:13 +00002338
2339 /* Migrate the page towards the node whose CPU is referencing it */
Mel Gormane42c8ff2012-11-12 09:17:07 +00002340 if (pol->flags & MPOL_F_MORON) {
Peter Zijlstra90572892013-10-07 11:29:20 +01002341 polnid = thisnid;
Mel Gorman5606e382012-11-02 18:19:13 +00002342
Rik van Riel10f39042014-01-27 17:03:44 -05002343 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
Rik van Rielde1c9ce2013-10-07 11:29:39 +01002344 goto out;
Mel Gormane42c8ff2012-11-12 09:17:07 +00002345 }
2346
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002347 if (curnid != polnid)
2348 ret = polnid;
2349out:
2350 mpol_cond_put(pol);
2351
2352 return ret;
2353}
2354
David Rientjesc11600e2016-09-01 16:15:07 -07002355/*
2356 * Drop the (possibly final) reference to task->mempolicy. It needs to be
2357 * dropped after task->mempolicy is set to NULL so that any allocation done as
2358 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2359 * policy.
2360 */
2361void mpol_put_task_policy(struct task_struct *task)
2362{
2363 struct mempolicy *pol;
2364
2365 task_lock(task);
2366 pol = task->mempolicy;
2367 task->mempolicy = NULL;
2368 task_unlock(task);
2369 mpol_put(pol);
2370}
2371
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2373{
Paul Mundt140d5a42007-07-15 23:38:16 -07002374 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 rb_erase(&n->nd, &sp->root);
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002376 sp_free(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377}
2378
Mel Gorman42288fe2012-12-21 23:10:25 +00002379static void sp_node_init(struct sp_node *node, unsigned long start,
2380 unsigned long end, struct mempolicy *pol)
2381{
2382 node->start = start;
2383 node->end = end;
2384 node->policy = pol;
2385}
2386
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07002387static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2388 struct mempolicy *pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389{
KOSAKI Motohiro869833f22012-10-08 16:29:16 -07002390 struct sp_node *n;
2391 struct mempolicy *newpol;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392
KOSAKI Motohiro869833f22012-10-08 16:29:16 -07002393 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394 if (!n)
2395 return NULL;
KOSAKI Motohiro869833f22012-10-08 16:29:16 -07002396
2397 newpol = mpol_dup(pol);
2398 if (IS_ERR(newpol)) {
2399 kmem_cache_free(sn_cache, n);
2400 return NULL;
2401 }
2402 newpol->flags |= MPOL_F_SHARED;
Mel Gorman42288fe2012-12-21 23:10:25 +00002403 sp_node_init(n, start, end, newpol);
KOSAKI Motohiro869833f22012-10-08 16:29:16 -07002404
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405 return n;
2406}
2407
2408/* Replace a policy range. */
2409static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2410 unsigned long end, struct sp_node *new)
2411{
Mel Gormanb22d1272012-10-08 16:29:17 -07002412 struct sp_node *n;
Mel Gorman42288fe2012-12-21 23:10:25 +00002413 struct sp_node *n_new = NULL;
2414 struct mempolicy *mpol_new = NULL;
Mel Gormanb22d1272012-10-08 16:29:17 -07002415 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416
Mel Gorman42288fe2012-12-21 23:10:25 +00002417restart:
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002418 write_lock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419 n = sp_lookup(sp, start, end);
2420 /* Take care of old policies in the same range. */
2421 while (n && n->start < end) {
2422 struct rb_node *next = rb_next(&n->nd);
2423 if (n->start >= start) {
2424 if (n->end <= end)
2425 sp_delete(sp, n);
2426 else
2427 n->start = end;
2428 } else {
2429 /* Old policy spanning whole new range. */
2430 if (n->end > end) {
Mel Gorman42288fe2012-12-21 23:10:25 +00002431 if (!n_new)
2432 goto alloc_new;
2433
2434 *mpol_new = *n->policy;
2435 atomic_set(&mpol_new->refcnt, 1);
KOSAKI Motohiro78806392013-03-08 12:43:29 -08002436 sp_node_init(n_new, end, n->end, mpol_new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437 n->end = start;
Hillf Danton5ca39572013-03-08 12:43:28 -08002438 sp_insert(sp, n_new);
Mel Gorman42288fe2012-12-21 23:10:25 +00002439 n_new = NULL;
2440 mpol_new = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441 break;
2442 } else
2443 n->end = start;
2444 }
2445 if (!next)
2446 break;
2447 n = rb_entry(next, struct sp_node, nd);
2448 }
2449 if (new)
2450 sp_insert(sp, new);
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002451 write_unlock(&sp->lock);
Mel Gorman42288fe2012-12-21 23:10:25 +00002452 ret = 0;
2453
2454err_out:
2455 if (mpol_new)
2456 mpol_put(mpol_new);
2457 if (n_new)
2458 kmem_cache_free(sn_cache, n_new);
2459
Mel Gormanb22d1272012-10-08 16:29:17 -07002460 return ret;
Mel Gorman42288fe2012-12-21 23:10:25 +00002461
2462alloc_new:
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002463 write_unlock(&sp->lock);
Mel Gorman42288fe2012-12-21 23:10:25 +00002464 ret = -ENOMEM;
2465 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2466 if (!n_new)
2467 goto err_out;
2468 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2469 if (!mpol_new)
2470 goto err_out;
2471 goto restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472}
2473
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002474/**
2475 * mpol_shared_policy_init - initialize shared policy for inode
2476 * @sp: pointer to inode shared policy
2477 * @mpol: struct mempolicy to install
2478 *
2479 * Install non-NULL @mpol in inode's shared policy rb-tree.
2480 * On entry, the current task has a reference on a non-NULL @mpol.
2481 * This must be released on exit.
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002482 * This is called at get_inode() calls and we can use GFP_KERNEL.
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002483 */
2484void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
Robin Holt7339ff82006-01-14 13:20:48 -08002485{
Miao Xie58568d22009-06-16 15:31:49 -07002486 int ret;
2487
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002488 sp->root = RB_ROOT; /* empty tree == default mempolicy */
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002489 rwlock_init(&sp->lock);
Robin Holt7339ff82006-01-14 13:20:48 -08002490
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002491 if (mpol) {
2492 struct vm_area_struct pvma;
2493 struct mempolicy *new;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002494 NODEMASK_SCRATCH(scratch);
Robin Holt7339ff82006-01-14 13:20:48 -08002495
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002496 if (!scratch)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002497 goto put_mpol;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002498 /* contextualize the tmpfs mount point mempolicy */
2499 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002500 if (IS_ERR(new))
Dan Carpenter0cae3452010-05-25 23:42:58 -07002501 goto free_scratch; /* no valid nodemask intersection */
Miao Xie58568d22009-06-16 15:31:49 -07002502
2503 task_lock(current);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002504 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
Miao Xie58568d22009-06-16 15:31:49 -07002505 task_unlock(current);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002506 if (ret)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002507 goto put_new;
Robin Holt7339ff82006-01-14 13:20:48 -08002508
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002509 /* Create pseudo-vma that contains just the policy */
2510 memset(&pvma, 0, sizeof(struct vm_area_struct));
2511 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2512 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002513
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002514put_new:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002515 mpol_put(new); /* drop initial ref */
Dan Carpenter0cae3452010-05-25 23:42:58 -07002516free_scratch:
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002517 NODEMASK_SCRATCH_FREE(scratch);
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002518put_mpol:
2519 mpol_put(mpol); /* drop our incoming ref on sb mpol */
Robin Holt7339ff82006-01-14 13:20:48 -08002520 }
2521}
2522
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523int mpol_set_shared_policy(struct shared_policy *info,
2524 struct vm_area_struct *vma, struct mempolicy *npol)
2525{
2526 int err;
2527 struct sp_node *new = NULL;
2528 unsigned long sz = vma_pages(vma);
2529
David Rientjes028fec42008-04-28 02:12:25 -07002530 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531 vma->vm_pgoff,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002532 sz, npol ? npol->mode : -1,
David Rientjes028fec42008-04-28 02:12:25 -07002533 npol ? npol->flags : -1,
David Rientjes00ef2d22013-02-22 16:35:36 -08002534 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535
2536 if (npol) {
2537 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2538 if (!new)
2539 return -ENOMEM;
2540 }
2541 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2542 if (err && new)
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002543 sp_free(new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544 return err;
2545}
2546
2547/* Free a backing policy store on inode delete. */
2548void mpol_free_shared_policy(struct shared_policy *p)
2549{
2550 struct sp_node *n;
2551 struct rb_node *next;
2552
2553 if (!p->root.rb_node)
2554 return;
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002555 write_lock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556 next = rb_first(&p->root);
2557 while (next) {
2558 n = rb_entry(next, struct sp_node, nd);
2559 next = rb_next(&n->nd);
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002560 sp_delete(p, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561 }
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002562 write_unlock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563}
2564
Mel Gorman1a687c22012-11-22 11:16:36 +00002565#ifdef CONFIG_NUMA_BALANCING
Mel Gormanc2976632014-01-29 14:05:42 -08002566static int __initdata numabalancing_override;
Mel Gorman1a687c22012-11-22 11:16:36 +00002567
2568static void __init check_numabalancing_enable(void)
2569{
2570 bool numabalancing_default = false;
2571
2572 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2573 numabalancing_default = true;
2574
Mel Gormanc2976632014-01-29 14:05:42 -08002575 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2576 if (numabalancing_override)
2577 set_numabalancing_state(numabalancing_override == 1);
2578
Mel Gormanb0dc2b92015-05-14 15:17:09 -07002579 if (num_online_nodes() > 1 && !numabalancing_override) {
Joe Perches756a025f02016-03-17 14:19:47 -07002580 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
Mel Gormanc2976632014-01-29 14:05:42 -08002581 numabalancing_default ? "Enabling" : "Disabling");
Mel Gorman1a687c22012-11-22 11:16:36 +00002582 set_numabalancing_state(numabalancing_default);
2583 }
2584}
2585
2586static int __init setup_numabalancing(char *str)
2587{
2588 int ret = 0;
2589 if (!str)
2590 goto out;
Mel Gorman1a687c22012-11-22 11:16:36 +00002591
2592 if (!strcmp(str, "enable")) {
Mel Gormanc2976632014-01-29 14:05:42 -08002593 numabalancing_override = 1;
Mel Gorman1a687c22012-11-22 11:16:36 +00002594 ret = 1;
2595 } else if (!strcmp(str, "disable")) {
Mel Gormanc2976632014-01-29 14:05:42 -08002596 numabalancing_override = -1;
Mel Gorman1a687c22012-11-22 11:16:36 +00002597 ret = 1;
2598 }
2599out:
2600 if (!ret)
Andrew Morton4a404be2014-01-29 14:05:43 -08002601 pr_warn("Unable to parse numa_balancing=\n");
Mel Gorman1a687c22012-11-22 11:16:36 +00002602
2603 return ret;
2604}
2605__setup("numa_balancing=", setup_numabalancing);
2606#else
2607static inline void __init check_numabalancing_enable(void)
2608{
2609}
2610#endif /* CONFIG_NUMA_BALANCING */
2611
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612/* assumes fs == KERNEL_DS */
2613void __init numa_policy_init(void)
2614{
Paul Mundtb71636e2007-07-15 23:38:15 -07002615 nodemask_t interleave_nodes;
2616 unsigned long largest = 0;
2617 int nid, prefer = 0;
2618
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619 policy_cache = kmem_cache_create("numa_policy",
2620 sizeof(struct mempolicy),
Paul Mundt20c2df82007-07-20 10:11:58 +09002621 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622
2623 sn_cache = kmem_cache_create("shared_policy_node",
2624 sizeof(struct sp_node),
Paul Mundt20c2df82007-07-20 10:11:58 +09002625 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626
Mel Gorman5606e382012-11-02 18:19:13 +00002627 for_each_node(nid) {
2628 preferred_node_policy[nid] = (struct mempolicy) {
2629 .refcnt = ATOMIC_INIT(1),
2630 .mode = MPOL_PREFERRED,
2631 .flags = MPOL_F_MOF | MPOL_F_MORON,
2632 .v = { .preferred_node = nid, },
2633 };
2634 }
2635
Paul Mundtb71636e2007-07-15 23:38:15 -07002636 /*
2637 * Set interleaving policy for system init. Interleaving is only
2638 * enabled across suitably sized nodes (default is >= 16MB), or
2639 * fall back to the largest node if they're all smaller.
2640 */
2641 nodes_clear(interleave_nodes);
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002642 for_each_node_state(nid, N_MEMORY) {
Paul Mundtb71636e2007-07-15 23:38:15 -07002643 unsigned long total_pages = node_present_pages(nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644
Paul Mundtb71636e2007-07-15 23:38:15 -07002645 /* Preserve the largest node */
2646 if (largest < total_pages) {
2647 largest = total_pages;
2648 prefer = nid;
2649 }
2650
2651 /* Interleave this node? */
2652 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2653 node_set(nid, interleave_nodes);
2654 }
2655
2656 /* All too small, use the largest */
2657 if (unlikely(nodes_empty(interleave_nodes)))
2658 node_set(prefer, interleave_nodes);
2659
David Rientjes028fec42008-04-28 02:12:25 -07002660 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -07002661 pr_err("%s: interleaving failed\n", __func__);
Mel Gorman1a687c22012-11-22 11:16:36 +00002662
2663 check_numabalancing_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664}
2665
Christoph Lameter8bccd852005-10-29 18:16:59 -07002666/* Reset policy of current process to default */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667void numa_default_policy(void)
2668{
David Rientjes028fec42008-04-28 02:12:25 -07002669 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670}
Paul Jackson68860ec2005-10-30 15:02:36 -08002671
Paul Jackson42253992006-01-08 01:01:59 -08002672/*
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002673 * Parse and format mempolicy from/to strings
2674 */
2675
2676/*
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002677 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002678 */
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002679static const char * const policy_modes[] =
2680{
2681 [MPOL_DEFAULT] = "default",
2682 [MPOL_PREFERRED] = "prefer",
2683 [MPOL_BIND] = "bind",
2684 [MPOL_INTERLEAVE] = "interleave",
Lee Schermerhornd3a71032012-10-25 14:16:29 +02002685 [MPOL_LOCAL] = "local",
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002686};
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002687
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002688
2689#ifdef CONFIG_TMPFS
2690/**
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002691 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002692 * @str: string containing mempolicy to parse
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002693 * @mpol: pointer to struct mempolicy pointer, returned on success.
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002694 *
2695 * Format of input:
2696 * <mode>[=<flags>][:<nodelist>]
2697 *
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002698 * On success, returns 0, else 1
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002699 */
Hugh Dickinsa7a88b22013-01-02 02:04:23 -08002700int mpol_parse_str(char *str, struct mempolicy **mpol)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002701{
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002702 struct mempolicy *new = NULL;
Lee Schermerhornb4652e82010-05-24 14:32:03 -07002703 unsigned short mode;
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002704 unsigned short mode_flags;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002705 nodemask_t nodes;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002706 char *nodelist = strchr(str, ':');
2707 char *flags = strchr(str, '=');
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002708 int err = 1;
2709
2710 if (nodelist) {
2711 /* NUL-terminate mode or flags string */
2712 *nodelist++ = '\0';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002713 if (nodelist_parse(nodelist, nodes))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002714 goto out;
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002715 if (!nodes_subset(nodes, node_states[N_MEMORY]))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002716 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002717 } else
2718 nodes_clear(nodes);
2719
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002720 if (flags)
2721 *flags++ = '\0'; /* terminate mode string */
2722
Peter Zijlstra479e2802012-10-25 14:16:28 +02002723 for (mode = 0; mode < MPOL_MAX; mode++) {
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002724 if (!strcmp(str, policy_modes[mode])) {
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002725 break;
2726 }
2727 }
Mel Gormana7200942012-11-16 09:37:58 +00002728 if (mode >= MPOL_MAX)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002729 goto out;
2730
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002731 switch (mode) {
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002732 case MPOL_PREFERRED:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002733 /*
2734 * Insist on a nodelist of one node only
2735 */
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002736 if (nodelist) {
2737 char *rest = nodelist;
2738 while (isdigit(*rest))
2739 rest++;
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002740 if (*rest)
2741 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002742 }
2743 break;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002744 case MPOL_INTERLEAVE:
2745 /*
2746 * Default to online nodes with memory if no nodelist
2747 */
2748 if (!nodelist)
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002749 nodes = node_states[N_MEMORY];
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002750 break;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002751 case MPOL_LOCAL:
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002752 /*
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002753 * Don't allow a nodelist; mpol_new() checks flags
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002754 */
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002755 if (nodelist)
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002756 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002757 mode = MPOL_PREFERRED;
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002758 break;
Ravikiran G Thirumalai413b43d2010-03-23 13:35:28 -07002759 case MPOL_DEFAULT:
2760 /*
2761 * Insist on a empty nodelist
2762 */
2763 if (!nodelist)
2764 err = 0;
2765 goto out;
KOSAKI Motohirod69b2e62010-03-23 13:35:30 -07002766 case MPOL_BIND:
2767 /*
2768 * Insist on a nodelist
2769 */
2770 if (!nodelist)
2771 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002772 }
2773
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002774 mode_flags = 0;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002775 if (flags) {
2776 /*
2777 * Currently, we only support two mutually exclusive
2778 * mode flags.
2779 */
2780 if (!strcmp(flags, "static"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002781 mode_flags |= MPOL_F_STATIC_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002782 else if (!strcmp(flags, "relative"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002783 mode_flags |= MPOL_F_RELATIVE_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002784 else
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002785 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002786 }
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002787
2788 new = mpol_new(mode, mode_flags, &nodes);
2789 if (IS_ERR(new))
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002790 goto out;
2791
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002792 /*
2793 * Save nodes for mpol_to_str() to show the tmpfs mount options
2794 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2795 */
2796 if (mode != MPOL_PREFERRED)
2797 new->v.nodes = nodes;
2798 else if (nodelist)
2799 new->v.preferred_node = first_node(nodes);
2800 else
2801 new->flags |= MPOL_F_LOCAL;
2802
2803 /*
2804 * Save nodes for contextualization: this will be used to "clone"
2805 * the mempolicy in a specific context [cpuset] at a later time.
2806 */
2807 new->w.user_nodemask = nodes;
2808
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002809 err = 0;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002810
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002811out:
2812 /* Restore string for error message */
2813 if (nodelist)
2814 *--nodelist = ':';
2815 if (flags)
2816 *--flags = '=';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002817 if (!err)
2818 *mpol = new;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002819 return err;
2820}
2821#endif /* CONFIG_TMPFS */
2822
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002823/**
2824 * mpol_to_str - format a mempolicy structure for printing
2825 * @buffer: to contain formatted mempolicy string
2826 * @maxlen: length of @buffer
2827 * @pol: pointer to mempolicy to be formatted
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002828 *
David Rientjes948927e2013-11-12 15:07:28 -08002829 * Convert @pol into a string. If @buffer is too short, truncate the string.
2830 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2831 * longest flag, "relative", and to display at least a few node ids.
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002832 */
David Rientjes948927e2013-11-12 15:07:28 -08002833void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002834{
2835 char *p = buffer;
David Rientjes948927e2013-11-12 15:07:28 -08002836 nodemask_t nodes = NODE_MASK_NONE;
2837 unsigned short mode = MPOL_DEFAULT;
2838 unsigned short flags = 0;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002839
David Rientjes8790c712014-01-30 15:46:08 -08002840 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
Lee Schermerhornbea904d2008-04-28 02:13:18 -07002841 mode = pol->mode;
David Rientjes948927e2013-11-12 15:07:28 -08002842 flags = pol->flags;
2843 }
Lee Schermerhornbea904d2008-04-28 02:13:18 -07002844
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002845 switch (mode) {
2846 case MPOL_DEFAULT:
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002847 break;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002848 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002849 if (flags & MPOL_F_LOCAL)
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002850 mode = MPOL_LOCAL;
Lee Schermerhorn53f25562008-04-28 02:13:20 -07002851 else
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002852 node_set(pol->v.preferred_node, nodes);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002853 break;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002854 case MPOL_BIND:
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002855 case MPOL_INTERLEAVE:
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002856 nodes = pol->v.nodes;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002857 break;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002858 default:
David Rientjes948927e2013-11-12 15:07:28 -08002859 WARN_ON_ONCE(1);
2860 snprintf(p, maxlen, "unknown");
2861 return;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002862 }
2863
David Rientjesb7a9f422013-11-21 14:32:06 -08002864 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002865
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002866 if (flags & MPOL_MODE_FLAGS) {
David Rientjes948927e2013-11-12 15:07:28 -08002867 p += snprintf(p, buffer + maxlen - p, "=");
David Rientjesf5b087b2008-04-28 02:12:27 -07002868
Lee Schermerhorn22919902008-04-28 02:13:22 -07002869 /*
2870 * Currently, the only defined flags are mutually exclusive
2871 */
David Rientjesf5b087b2008-04-28 02:12:27 -07002872 if (flags & MPOL_F_STATIC_NODES)
Lee Schermerhorn22919902008-04-28 02:13:22 -07002873 p += snprintf(p, buffer + maxlen - p, "static");
2874 else if (flags & MPOL_F_RELATIVE_NODES)
2875 p += snprintf(p, buffer + maxlen - p, "relative");
David Rientjesf5b087b2008-04-28 02:12:27 -07002876 }
2877
Tejun Heo9e763e02015-02-13 14:38:02 -08002878 if (!nodes_empty(nodes))
2879 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
2880 nodemask_pr_args(&nodes));
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002881}