blob: e21d9b44247bc59f0a4598117f46a5cb2c1caa4a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
Christoph Lameter8bccd852005-10-29 18:16:59 -07005 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
Christoph Lameter8bccd852005-10-29 18:16:59 -070021 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
Christoph Lameter8bccd852005-10-29 18:16:59 -070024 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 * preferred Try a specific node first before normal fallback.
David Rientjes00ef2d22013-02-22 16:35:36 -080029 * As a special case NUMA_NO_NODE here means do the allocation
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
Christoph Lameter8bccd852005-10-29 18:16:59 -070033 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
Linus Torvalds1da177e2005-04-16 15:20:36 -070066*/
67
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -070068#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#include <linux/mempolicy.h>
71#include <linux/mm.h>
72#include <linux/highmem.h>
73#include <linux/hugetlb.h>
74#include <linux/kernel.h>
75#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/nodemask.h>
77#include <linux/cpuset.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/slab.h>
79#include <linux/string.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040080#include <linux/export.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070081#include <linux/nsproxy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070082#include <linux/interrupt.h>
83#include <linux/init.h>
84#include <linux/compat.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080085#include <linux/swap.h>
Christoph Lameter1a75a6c2006-01-08 01:01:02 -080086#include <linux/seq_file.h>
87#include <linux/proc_fs.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080088#include <linux/migrate.h>
Hugh Dickins62b61f62009-12-14 17:59:33 -080089#include <linux/ksm.h>
Christoph Lameter95a402c2006-06-23 02:03:53 -070090#include <linux/rmap.h>
David Quigley86c3a762006-06-23 02:04:02 -070091#include <linux/security.h>
Adrian Bunkdbcb0f12007-10-16 01:26:26 -070092#include <linux/syscalls.h>
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -070093#include <linux/ctype.h>
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -080094#include <linux/mm_inline.h>
Lee Schermerhornb24f53a2012-10-25 14:16:32 +020095#include <linux/mmu_notifier.h>
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -070096#include <linux/printk.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080097
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <asm/tlbflush.h>
99#include <asm/uaccess.h>
100
Nick Piggin62695a82008-10-18 20:26:09 -0700101#include "internal.h"
102
Christoph Lameter38e35862006-01-08 01:01:01 -0800103/* Internal flags */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800104#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
Christoph Lameter38e35862006-01-08 01:01:01 -0800105#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800106
Pekka Enbergfcc234f2006-03-22 00:08:13 -0800107static struct kmem_cache *policy_cache;
108static struct kmem_cache *sn_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110/* Highest zone. An specific allocation for a zone below that is not
111 policied. */
Christoph Lameter62672762007-02-10 01:43:07 -0800112enum zone_type policy_zone = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700114/*
115 * run-time system-wide default policy => local allocation
116 */
H Hartley Sweetene754d792011-10-31 17:09:23 -0700117static struct mempolicy default_policy = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 .refcnt = ATOMIC_INIT(1), /* never free it */
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700119 .mode = MPOL_PREFERRED,
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700120 .flags = MPOL_F_LOCAL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121};
122
Mel Gorman5606e382012-11-02 18:19:13 +0000123static struct mempolicy preferred_node_policy[MAX_NUMNODES];
124
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -0700125struct mempolicy *get_task_policy(struct task_struct *p)
Mel Gorman5606e382012-11-02 18:19:13 +0000126{
127 struct mempolicy *pol = p->mempolicy;
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700128 int node;
Mel Gorman5606e382012-11-02 18:19:13 +0000129
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700130 if (pol)
131 return pol;
Mel Gorman5606e382012-11-02 18:19:13 +0000132
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700133 node = numa_node_id();
134 if (node != NUMA_NO_NODE) {
135 pol = &preferred_node_policy[node];
136 /* preferred_node_policy is not initialised early in boot */
137 if (pol->mode)
138 return pol;
Mel Gorman5606e382012-11-02 18:19:13 +0000139 }
140
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700141 return &default_policy;
Mel Gorman5606e382012-11-02 18:19:13 +0000142}
143
David Rientjes37012942008-04-28 02:12:33 -0700144static const struct mempolicy_operations {
145 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
Miao Xie708c1bb2010-05-24 14:32:07 -0700146 /*
147 * If read-side task has no lock to protect task->mempolicy, write-side
148 * task will rebind the task->mempolicy by two step. The first step is
149 * setting all the newly nodes, and the second step is cleaning all the
150 * disallowed nodes. In this way, we can avoid finding no node to alloc
151 * page.
152 * If we have a lock to protect task->mempolicy in read-side, we do
153 * rebind directly.
154 *
155 * step:
156 * MPOL_REBIND_ONCE - do rebind work at once
157 * MPOL_REBIND_STEP1 - set all the newly nodes
158 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
159 */
160 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
161 enum mpol_rebind_step step);
David Rientjes37012942008-04-28 02:12:33 -0700162} mpol_ops[MPOL_MAX];
163
David Rientjesf5b087b2008-04-28 02:12:27 -0700164static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
165{
Bob Liu6d556292010-05-24 14:31:59 -0700166 return pol->flags & MPOL_MODE_FLAGS;
David Rientjes4c50bc02008-04-28 02:12:30 -0700167}
168
169static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
170 const nodemask_t *rel)
171{
172 nodemask_t tmp;
173 nodes_fold(tmp, *orig, nodes_weight(*rel));
174 nodes_onto(*ret, tmp, *rel);
David Rientjesf5b087b2008-04-28 02:12:27 -0700175}
176
David Rientjes37012942008-04-28 02:12:33 -0700177static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
178{
179 if (nodes_empty(*nodes))
180 return -EINVAL;
181 pol->v.nodes = *nodes;
182 return 0;
183}
184
185static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
186{
187 if (!nodes)
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700188 pol->flags |= MPOL_F_LOCAL; /* local allocation */
David Rientjes37012942008-04-28 02:12:33 -0700189 else if (nodes_empty(*nodes))
190 return -EINVAL; /* no allowed nodes */
191 else
192 pol->v.preferred_node = first_node(*nodes);
193 return 0;
194}
195
196static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
197{
Zhihui Zhang859f7ef2014-12-18 16:17:09 -0800198 if (nodes_empty(*nodes))
David Rientjes37012942008-04-28 02:12:33 -0700199 return -EINVAL;
200 pol->v.nodes = *nodes;
201 return 0;
202}
203
Miao Xie58568d22009-06-16 15:31:49 -0700204/*
205 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
206 * any, for the new policy. mpol_new() has already validated the nodes
207 * parameter with respect to the policy mode and flags. But, we need to
208 * handle an empty nodemask with MPOL_PREFERRED here.
209 *
210 * Must be called holding task's alloc_lock to protect task's mems_allowed
211 * and mempolicy. May also be called holding the mmap_semaphore for write.
212 */
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700213static int mpol_set_nodemask(struct mempolicy *pol,
214 const nodemask_t *nodes, struct nodemask_scratch *nsc)
Miao Xie58568d22009-06-16 15:31:49 -0700215{
Miao Xie58568d22009-06-16 15:31:49 -0700216 int ret;
217
218 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
219 if (pol == NULL)
220 return 0;
Lai Jiangshan01f13bd2012-12-12 13:51:33 -0800221 /* Check N_MEMORY */
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700222 nodes_and(nsc->mask1,
Lai Jiangshan01f13bd2012-12-12 13:51:33 -0800223 cpuset_current_mems_allowed, node_states[N_MEMORY]);
Miao Xie58568d22009-06-16 15:31:49 -0700224
225 VM_BUG_ON(!nodes);
226 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
227 nodes = NULL; /* explicit local allocation */
228 else {
229 if (pol->flags & MPOL_F_RELATIVE_NODES)
Zhihui Zhang859f7ef2014-12-18 16:17:09 -0800230 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
Miao Xie58568d22009-06-16 15:31:49 -0700231 else
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700232 nodes_and(nsc->mask2, *nodes, nsc->mask1);
233
Miao Xie58568d22009-06-16 15:31:49 -0700234 if (mpol_store_user_nodemask(pol))
235 pol->w.user_nodemask = *nodes;
236 else
237 pol->w.cpuset_mems_allowed =
238 cpuset_current_mems_allowed;
239 }
240
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700241 if (nodes)
242 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
243 else
244 ret = mpol_ops[pol->mode].create(pol, NULL);
Miao Xie58568d22009-06-16 15:31:49 -0700245 return ret;
246}
247
248/*
249 * This function just creates a new policy, does some check and simple
250 * initialization. You must invoke mpol_set_nodemask() to set nodes.
251 */
David Rientjes028fec42008-04-28 02:12:25 -0700252static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
253 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254{
255 struct mempolicy *policy;
256
David Rientjes028fec42008-04-28 02:12:25 -0700257 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
David Rientjes00ef2d22013-02-22 16:35:36 -0800258 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
Paul Mundt140d5a42007-07-15 23:38:16 -0700259
David Rientjes3e1f0642008-04-28 02:12:34 -0700260 if (mode == MPOL_DEFAULT) {
261 if (nodes && !nodes_empty(*nodes))
David Rientjes37012942008-04-28 02:12:33 -0700262 return ERR_PTR(-EINVAL);
Lee Schermerhornd3a71032012-10-25 14:16:29 +0200263 return NULL;
David Rientjes37012942008-04-28 02:12:33 -0700264 }
David Rientjes3e1f0642008-04-28 02:12:34 -0700265 VM_BUG_ON(!nodes);
266
267 /*
268 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
269 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
270 * All other modes require a valid pointer to a non-empty nodemask.
271 */
272 if (mode == MPOL_PREFERRED) {
273 if (nodes_empty(*nodes)) {
274 if (((flags & MPOL_F_STATIC_NODES) ||
275 (flags & MPOL_F_RELATIVE_NODES)))
276 return ERR_PTR(-EINVAL);
David Rientjes3e1f0642008-04-28 02:12:34 -0700277 }
Peter Zijlstra479e2802012-10-25 14:16:28 +0200278 } else if (mode == MPOL_LOCAL) {
279 if (!nodes_empty(*nodes))
280 return ERR_PTR(-EINVAL);
281 mode = MPOL_PREFERRED;
David Rientjes3e1f0642008-04-28 02:12:34 -0700282 } else if (nodes_empty(*nodes))
283 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
285 if (!policy)
286 return ERR_PTR(-ENOMEM);
287 atomic_set(&policy->refcnt, 1);
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700288 policy->mode = mode;
David Rientjes3e1f0642008-04-28 02:12:34 -0700289 policy->flags = flags;
David Rientjesf5b087b2008-04-28 02:12:27 -0700290
David Rientjes37012942008-04-28 02:12:33 -0700291 return policy;
292}
293
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700294/* Slow path of a mpol destructor. */
295void __mpol_put(struct mempolicy *p)
296{
297 if (!atomic_dec_and_test(&p->refcnt))
298 return;
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700299 kmem_cache_free(policy_cache, p);
300}
301
Miao Xie708c1bb2010-05-24 14:32:07 -0700302static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
303 enum mpol_rebind_step step)
David Rientjes37012942008-04-28 02:12:33 -0700304{
305}
306
Miao Xie708c1bb2010-05-24 14:32:07 -0700307/*
308 * step:
309 * MPOL_REBIND_ONCE - do rebind work at once
310 * MPOL_REBIND_STEP1 - set all the newly nodes
311 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
312 */
313static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
314 enum mpol_rebind_step step)
David Rientjes37012942008-04-28 02:12:33 -0700315{
316 nodemask_t tmp;
317
318 if (pol->flags & MPOL_F_STATIC_NODES)
319 nodes_and(tmp, pol->w.user_nodemask, *nodes);
320 else if (pol->flags & MPOL_F_RELATIVE_NODES)
321 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
322 else {
Miao Xie708c1bb2010-05-24 14:32:07 -0700323 /*
324 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
325 * result
326 */
327 if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
328 nodes_remap(tmp, pol->v.nodes,
329 pol->w.cpuset_mems_allowed, *nodes);
330 pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
331 } else if (step == MPOL_REBIND_STEP2) {
332 tmp = pol->w.cpuset_mems_allowed;
333 pol->w.cpuset_mems_allowed = *nodes;
334 } else
335 BUG();
David Rientjes37012942008-04-28 02:12:33 -0700336 }
337
Miao Xie708c1bb2010-05-24 14:32:07 -0700338 if (nodes_empty(tmp))
339 tmp = *nodes;
340
341 if (step == MPOL_REBIND_STEP1)
342 nodes_or(pol->v.nodes, pol->v.nodes, tmp);
343 else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
344 pol->v.nodes = tmp;
345 else
346 BUG();
347
David Rientjes37012942008-04-28 02:12:33 -0700348 if (!node_isset(current->il_next, tmp)) {
Andrew Morton0edaf862016-05-19 17:10:58 -0700349 current->il_next = next_node_in(current->il_next, tmp);
David Rientjes37012942008-04-28 02:12:33 -0700350 if (current->il_next >= MAX_NUMNODES)
351 current->il_next = numa_node_id();
352 }
353}
354
355static void mpol_rebind_preferred(struct mempolicy *pol,
Miao Xie708c1bb2010-05-24 14:32:07 -0700356 const nodemask_t *nodes,
357 enum mpol_rebind_step step)
David Rientjes37012942008-04-28 02:12:33 -0700358{
359 nodemask_t tmp;
360
David Rientjes37012942008-04-28 02:12:33 -0700361 if (pol->flags & MPOL_F_STATIC_NODES) {
362 int node = first_node(pol->w.user_nodemask);
363
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700364 if (node_isset(node, *nodes)) {
David Rientjes37012942008-04-28 02:12:33 -0700365 pol->v.preferred_node = node;
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700366 pol->flags &= ~MPOL_F_LOCAL;
367 } else
368 pol->flags |= MPOL_F_LOCAL;
David Rientjes37012942008-04-28 02:12:33 -0700369 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
370 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
371 pol->v.preferred_node = first_node(tmp);
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700372 } else if (!(pol->flags & MPOL_F_LOCAL)) {
David Rientjes37012942008-04-28 02:12:33 -0700373 pol->v.preferred_node = node_remap(pol->v.preferred_node,
374 pol->w.cpuset_mems_allowed,
375 *nodes);
376 pol->w.cpuset_mems_allowed = *nodes;
377 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378}
379
Miao Xie708c1bb2010-05-24 14:32:07 -0700380/*
381 * mpol_rebind_policy - Migrate a policy to a different set of nodes
382 *
383 * If read-side task has no lock to protect task->mempolicy, write-side
384 * task will rebind the task->mempolicy by two step. The first step is
385 * setting all the newly nodes, and the second step is cleaning all the
386 * disallowed nodes. In this way, we can avoid finding no node to alloc
387 * page.
388 * If we have a lock to protect task->mempolicy in read-side, we do
389 * rebind directly.
390 *
391 * step:
392 * MPOL_REBIND_ONCE - do rebind work at once
393 * MPOL_REBIND_STEP1 - set all the newly nodes
394 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
395 */
396static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
397 enum mpol_rebind_step step)
David Rientjes1d0d2682008-04-28 02:12:32 -0700398{
David Rientjes1d0d2682008-04-28 02:12:32 -0700399 if (!pol)
400 return;
Wang Sheng-Hui89c522c2012-05-29 15:06:16 -0700401 if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
David Rientjes1d0d2682008-04-28 02:12:32 -0700402 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
403 return;
Miao Xie708c1bb2010-05-24 14:32:07 -0700404
405 if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
406 return;
407
408 if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
409 BUG();
410
411 if (step == MPOL_REBIND_STEP1)
412 pol->flags |= MPOL_F_REBINDING;
413 else if (step == MPOL_REBIND_STEP2)
414 pol->flags &= ~MPOL_F_REBINDING;
415 else if (step >= MPOL_REBIND_NSTEP)
416 BUG();
417
418 mpol_ops[pol->mode].rebind(pol, newmask, step);
David Rientjes1d0d2682008-04-28 02:12:32 -0700419}
420
421/*
422 * Wrapper for mpol_rebind_policy() that just requires task
423 * pointer, and updates task mempolicy.
Miao Xie58568d22009-06-16 15:31:49 -0700424 *
425 * Called with task's alloc_lock held.
David Rientjes1d0d2682008-04-28 02:12:32 -0700426 */
427
Miao Xie708c1bb2010-05-24 14:32:07 -0700428void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
429 enum mpol_rebind_step step)
David Rientjes1d0d2682008-04-28 02:12:32 -0700430{
Miao Xie708c1bb2010-05-24 14:32:07 -0700431 mpol_rebind_policy(tsk->mempolicy, new, step);
David Rientjes1d0d2682008-04-28 02:12:32 -0700432}
433
434/*
435 * Rebind each vma in mm to new nodemask.
436 *
437 * Call holding a reference to mm. Takes mm->mmap_sem during call.
438 */
439
440void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
441{
442 struct vm_area_struct *vma;
443
444 down_write(&mm->mmap_sem);
445 for (vma = mm->mmap; vma; vma = vma->vm_next)
Miao Xie708c1bb2010-05-24 14:32:07 -0700446 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
David Rientjes1d0d2682008-04-28 02:12:32 -0700447 up_write(&mm->mmap_sem);
448}
449
David Rientjes37012942008-04-28 02:12:33 -0700450static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
451 [MPOL_DEFAULT] = {
452 .rebind = mpol_rebind_default,
453 },
454 [MPOL_INTERLEAVE] = {
455 .create = mpol_new_interleave,
456 .rebind = mpol_rebind_nodemask,
457 },
458 [MPOL_PREFERRED] = {
459 .create = mpol_new_preferred,
460 .rebind = mpol_rebind_preferred,
461 },
462 [MPOL_BIND] = {
463 .create = mpol_new_bind,
464 .rebind = mpol_rebind_nodemask,
465 },
466};
467
Christoph Lameterfc301282006-01-18 17:42:29 -0800468static void migrate_page_add(struct page *page, struct list_head *pagelist,
469 unsigned long flags);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -0800470
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800471struct queue_pages {
472 struct list_head *pagelist;
473 unsigned long flags;
474 nodemask_t *nmask;
475 struct vm_area_struct *prev;
476};
477
Naoya Horiguchi98094942013-09-11 14:22:14 -0700478/*
479 * Scan through pages checking if pages follow certain conditions,
480 * and move them to the pagelist if they do.
481 */
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800482static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
483 unsigned long end, struct mm_walk *walk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484{
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800485 struct vm_area_struct *vma = walk->vma;
486 struct page *page;
487 struct queue_pages *qp = walk->private;
488 unsigned long flags = qp->flags;
Kirill A. Shutemov248db922016-01-15 16:54:14 -0800489 int nid, ret;
Hugh Dickins91612e02005-06-21 17:15:07 -0700490 pte_t *pte;
Hugh Dickins705e87c2005-10-29 18:16:27 -0700491 spinlock_t *ptl;
Hugh Dickins941150a2005-06-21 17:15:06 -0700492
Kirill A. Shutemov248db922016-01-15 16:54:14 -0800493 if (pmd_trans_huge(*pmd)) {
494 ptl = pmd_lock(walk->mm, pmd);
495 if (pmd_trans_huge(*pmd)) {
496 page = pmd_page(*pmd);
497 if (is_huge_zero_page(page)) {
498 spin_unlock(ptl);
499 split_huge_pmd(vma, pmd, addr);
500 } else {
501 get_page(page);
502 spin_unlock(ptl);
503 lock_page(page);
504 ret = split_huge_page(page);
505 unlock_page(page);
506 put_page(page);
507 if (ret)
508 return 0;
509 }
510 } else {
511 spin_unlock(ptl);
512 }
513 }
Hugh Dickins91612e02005-06-21 17:15:07 -0700514
Naoya Horiguchi337d9ab2016-07-26 15:24:03 -0700515 if (pmd_trans_unstable(pmd))
516 return 0;
Kirill A. Shutemov248db922016-01-15 16:54:14 -0800517retry:
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800518 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
519 for (; addr != end; pte++, addr += PAGE_SIZE) {
Hugh Dickins91612e02005-06-21 17:15:07 -0700520 if (!pte_present(*pte))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800522 page = vm_normal_page(vma, addr, *pte);
523 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 continue;
Nick Piggin053837f2006-01-18 17:42:27 -0800525 /*
Hugh Dickins62b61f62009-12-14 17:59:33 -0800526 * vm_normal_page() filters out zero pages, but there might
527 * still be PageReserved pages to skip, perhaps in a VDSO.
Nick Piggin053837f2006-01-18 17:42:27 -0800528 */
Hugh Dickinsb79bc0a2013-02-22 16:35:13 -0800529 if (PageReserved(page))
Christoph Lameterf4598c82006-01-12 01:05:20 -0800530 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800531 nid = page_to_nid(page);
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800532 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
Christoph Lameter38e35862006-01-08 01:01:01 -0800533 continue;
Kirill A. Shutemov800d8c62016-07-26 15:26:18 -0700534 if (PageTransCompound(page)) {
Kirill A. Shutemov248db922016-01-15 16:54:14 -0800535 get_page(page);
536 pte_unmap_unlock(pte, ptl);
537 lock_page(page);
538 ret = split_huge_page(page);
539 unlock_page(page);
540 put_page(page);
541 /* Failed to split -- skip. */
542 if (ret) {
543 pte = pte_offset_map_lock(walk->mm, pmd,
544 addr, &ptl);
545 continue;
546 }
547 goto retry;
548 }
Christoph Lameter38e35862006-01-08 01:01:01 -0800549
Kirill A. Shutemov77bf45e2016-02-05 15:36:33 -0800550 migrate_page_add(page, qp->pagelist, flags);
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800551 }
552 pte_unmap_unlock(pte - 1, ptl);
553 cond_resched();
554 return 0;
Hugh Dickins91612e02005-06-21 17:15:07 -0700555}
556
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800557static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
558 unsigned long addr, unsigned long end,
559 struct mm_walk *walk)
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700560{
561#ifdef CONFIG_HUGETLB_PAGE
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800562 struct queue_pages *qp = walk->private;
563 unsigned long flags = qp->flags;
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700564 int nid;
565 struct page *page;
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800566 spinlock_t *ptl;
Naoya Horiguchid4c54912014-06-06 10:00:01 -0400567 pte_t entry;
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700568
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800569 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
570 entry = huge_ptep_get(pte);
Naoya Horiguchid4c54912014-06-06 10:00:01 -0400571 if (!pte_present(entry))
572 goto unlock;
573 page = pte_page(entry);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700574 nid = page_to_nid(page);
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800575 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700576 goto unlock;
577 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
578 if (flags & (MPOL_MF_MOVE_ALL) ||
579 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800580 isolate_huge_page(page, qp->pagelist);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700581unlock:
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800582 spin_unlock(ptl);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700583#else
584 BUG();
585#endif
Hugh Dickins91612e02005-06-21 17:15:07 -0700586 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587}
588
Aneesh Kumar K.V58772312013-12-06 00:08:22 +0530589#ifdef CONFIG_NUMA_BALANCING
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200590/*
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200591 * This is used to mark a range of virtual addresses to be inaccessible.
592 * These are later cleared by a NUMA hinting fault. Depending on these
593 * faults, pages may be migrated for better NUMA placement.
594 *
595 * This is assuming that NUMA faults are handled using PROT_NONE. If
596 * an architecture makes a different choice, it will need further
597 * changes to the core.
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200598 */
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200599unsigned long change_prot_numa(struct vm_area_struct *vma,
600 unsigned long addr, unsigned long end)
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200601{
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200602 int nr_updated;
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200603
Mel Gorman4d942462015-02-12 14:58:28 -0800604 nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
Mel Gorman03c5a6e2012-11-02 14:52:48 +0000605 if (nr_updated)
606 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200607
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200608 return nr_updated;
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200609}
610#else
611static unsigned long change_prot_numa(struct vm_area_struct *vma,
612 unsigned long addr, unsigned long end)
613{
614 return 0;
615}
Aneesh Kumar K.V58772312013-12-06 00:08:22 +0530616#endif /* CONFIG_NUMA_BALANCING */
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200617
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800618static int queue_pages_test_walk(unsigned long start, unsigned long end,
619 struct mm_walk *walk)
620{
621 struct vm_area_struct *vma = walk->vma;
622 struct queue_pages *qp = walk->private;
623 unsigned long endvma = vma->vm_end;
624 unsigned long flags = qp->flags;
625
Kirill A. Shutemov77bf45e2016-02-05 15:36:33 -0800626 if (!vma_migratable(vma))
Naoya Horiguchi48684a62015-02-11 15:28:06 -0800627 return 1;
628
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800629 if (endvma > end)
630 endvma = end;
631 if (vma->vm_start > start)
632 start = vma->vm_start;
633
634 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
635 if (!vma->vm_next && vma->vm_end < end)
636 return -EFAULT;
637 if (qp->prev && qp->prev->vm_end < vma->vm_start)
638 return -EFAULT;
639 }
640
641 qp->prev = vma;
642
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800643 if (flags & MPOL_MF_LAZY) {
644 /* Similar to task_numa_work, skip inaccessible VMAs */
Liang Chen4355c012016-03-15 14:56:42 -0700645 if (!is_vm_hugetlb_page(vma) &&
646 (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
647 !(vma->vm_flags & VM_MIXEDMAP))
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800648 change_prot_numa(vma, start, endvma);
649 return 1;
650 }
651
Kirill A. Shutemov77bf45e2016-02-05 15:36:33 -0800652 /* queue pages from current vma */
653 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800654 return 0;
655 return 1;
656}
657
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800658/*
Naoya Horiguchi98094942013-09-11 14:22:14 -0700659 * Walk through page tables and collect pages to be migrated.
660 *
661 * If pages found in a given range are on a set of nodes (determined by
662 * @nodes and @flags,) it's isolated and queued to the pagelist which is
663 * passed via @private.)
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800664 */
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -0700665static int
Naoya Horiguchi98094942013-09-11 14:22:14 -0700666queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800667 nodemask_t *nodes, unsigned long flags,
668 struct list_head *pagelist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669{
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800670 struct queue_pages qp = {
671 .pagelist = pagelist,
672 .flags = flags,
673 .nmask = nodes,
674 .prev = NULL,
675 };
676 struct mm_walk queue_pages_walk = {
677 .hugetlb_entry = queue_pages_hugetlb,
678 .pmd_entry = queue_pages_pte_range,
679 .test_walk = queue_pages_test_walk,
680 .mm = mm,
681 .private = &qp,
682 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683
Naoya Horiguchi6f4576e2015-02-11 15:28:03 -0800684 return walk_page_range(start, end, &queue_pages_walk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685}
686
KOSAKI Motohiro869833f22012-10-08 16:29:16 -0700687/*
688 * Apply policy to a single VMA
689 * This must be called with the mmap_sem held for writing.
690 */
691static int vma_replace_policy(struct vm_area_struct *vma,
692 struct mempolicy *pol)
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700693{
KOSAKI Motohiro869833f22012-10-08 16:29:16 -0700694 int err;
695 struct mempolicy *old;
696 struct mempolicy *new;
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700697
698 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
699 vma->vm_start, vma->vm_end, vma->vm_pgoff,
700 vma->vm_ops, vma->vm_file,
701 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
702
KOSAKI Motohiro869833f22012-10-08 16:29:16 -0700703 new = mpol_dup(pol);
704 if (IS_ERR(new))
705 return PTR_ERR(new);
706
707 if (vma->vm_ops && vma->vm_ops->set_policy) {
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700708 err = vma->vm_ops->set_policy(vma, new);
KOSAKI Motohiro869833f22012-10-08 16:29:16 -0700709 if (err)
710 goto err_out;
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700711 }
KOSAKI Motohiro869833f22012-10-08 16:29:16 -0700712
713 old = vma->vm_policy;
714 vma->vm_policy = new; /* protected by mmap_sem */
715 mpol_put(old);
716
717 return 0;
718 err_out:
719 mpol_put(new);
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700720 return err;
721}
722
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723/* Step 2: apply policy to a range and do splits. */
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800724static int mbind_range(struct mm_struct *mm, unsigned long start,
725 unsigned long end, struct mempolicy *new_pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726{
727 struct vm_area_struct *next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800728 struct vm_area_struct *prev;
729 struct vm_area_struct *vma;
730 int err = 0;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800731 pgoff_t pgoff;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800732 unsigned long vmstart;
733 unsigned long vmend;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
Linus Torvalds097d5912012-03-06 18:23:36 -0800735 vma = find_vma(mm, start);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800736 if (!vma || vma->vm_start > start)
737 return -EFAULT;
738
Linus Torvalds097d5912012-03-06 18:23:36 -0800739 prev = vma->vm_prev;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800740 if (start > vma->vm_start)
741 prev = vma;
742
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800743 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 next = vma->vm_next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800745 vmstart = max(start, vma->vm_start);
746 vmend = min(end, vma->vm_end);
747
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800748 if (mpol_equal(vma_policy(vma), new_pol))
749 continue;
750
751 pgoff = vma->vm_pgoff +
752 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800753 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
Andrea Arcangeli19a809a2015-09-04 15:46:24 -0700754 vma->anon_vma, vma->vm_file, pgoff,
755 new_pol, vma->vm_userfaultfd_ctx);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800756 if (prev) {
757 vma = prev;
758 next = vma->vm_next;
Oleg Nesterov3964acd2013-07-31 13:53:28 -0700759 if (mpol_equal(vma_policy(vma), new_pol))
760 continue;
761 /* vma_merge() joined vma && vma->next, case 8 */
762 goto replace;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800763 }
764 if (vma->vm_start != vmstart) {
765 err = split_vma(vma->vm_mm, vma, vmstart, 1);
766 if (err)
767 goto out;
768 }
769 if (vma->vm_end != vmend) {
770 err = split_vma(vma->vm_mm, vma, vmend, 0);
771 if (err)
772 goto out;
773 }
Oleg Nesterov3964acd2013-07-31 13:53:28 -0700774 replace:
KOSAKI Motohiro869833f22012-10-08 16:29:16 -0700775 err = vma_replace_policy(vma, new_pol);
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700776 if (err)
777 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 }
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800779
780 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 return err;
782}
783
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784/* Set the process memory policy */
David Rientjes028fec42008-04-28 02:12:25 -0700785static long do_set_mempolicy(unsigned short mode, unsigned short flags,
786 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787{
Miao Xie58568d22009-06-16 15:31:49 -0700788 struct mempolicy *new, *old;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700789 NODEMASK_SCRATCH(scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700790 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700792 if (!scratch)
793 return -ENOMEM;
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700794
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700795 new = mpol_new(mode, flags, nodes);
796 if (IS_ERR(new)) {
797 ret = PTR_ERR(new);
798 goto out;
799 }
Oleg Nesterov2c7c3a72014-10-09 15:27:55 -0700800
Miao Xie58568d22009-06-16 15:31:49 -0700801 task_lock(current);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700802 ret = mpol_set_nodemask(new, nodes, scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700803 if (ret) {
804 task_unlock(current);
Miao Xie58568d22009-06-16 15:31:49 -0700805 mpol_put(new);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700806 goto out;
Miao Xie58568d22009-06-16 15:31:49 -0700807 }
808 old = current->mempolicy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 current->mempolicy = new;
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700810 if (new && new->mode == MPOL_INTERLEAVE &&
David Rientjesf5b087b2008-04-28 02:12:27 -0700811 nodes_weight(new->v.nodes))
Andi Kleendfcd3c02005-10-29 18:15:48 -0700812 current->il_next = first_node(new->v.nodes);
Miao Xie58568d22009-06-16 15:31:49 -0700813 task_unlock(current);
Miao Xie58568d22009-06-16 15:31:49 -0700814 mpol_put(old);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700815 ret = 0;
816out:
817 NODEMASK_SCRATCH_FREE(scratch);
818 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819}
820
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700821/*
822 * Return nodemask for policy for get_mempolicy() query
Miao Xie58568d22009-06-16 15:31:49 -0700823 *
824 * Called with task's alloc_lock held
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700825 */
826static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827{
Andi Kleendfcd3c02005-10-29 18:15:48 -0700828 nodes_clear(*nodes);
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700829 if (p == &default_policy)
830 return;
831
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700832 switch (p->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -0700833 case MPOL_BIND:
834 /* Fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 case MPOL_INTERLEAVE:
Andi Kleendfcd3c02005-10-29 18:15:48 -0700836 *nodes = p->v.nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 break;
838 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700839 if (!(p->flags & MPOL_F_LOCAL))
Andi Kleendfcd3c02005-10-29 18:15:48 -0700840 node_set(p->v.preferred_node, *nodes);
Lee Schermerhorn53f25562008-04-28 02:13:20 -0700841 /* else return empty node mask for local allocation */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 break;
843 default:
844 BUG();
845 }
846}
847
Dave Hansend4edcf02016-02-12 13:01:56 -0800848static int lookup_node(unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849{
850 struct page *p;
851 int err;
852
Lorenzo Stoakes768ae302016-10-13 01:20:16 +0100853 err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 if (err >= 0) {
855 err = page_to_nid(p);
856 put_page(p);
857 }
858 return err;
859}
860
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861/* Retrieve NUMA policy */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700862static long do_get_mempolicy(int *policy, nodemask_t *nmask,
863 unsigned long addr, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864{
Christoph Lameter8bccd852005-10-29 18:16:59 -0700865 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 struct mm_struct *mm = current->mm;
867 struct vm_area_struct *vma = NULL;
868 struct mempolicy *pol = current->mempolicy;
869
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700870 if (flags &
871 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 return -EINVAL;
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700873
874 if (flags & MPOL_F_MEMS_ALLOWED) {
875 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
876 return -EINVAL;
877 *policy = 0; /* just so it's initialized */
Miao Xie58568d22009-06-16 15:31:49 -0700878 task_lock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700879 *nmask = cpuset_current_mems_allowed;
Miao Xie58568d22009-06-16 15:31:49 -0700880 task_unlock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700881 return 0;
882 }
883
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 if (flags & MPOL_F_ADDR) {
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700885 /*
886 * Do NOT fall back to task policy if the
887 * vma/shared policy at addr is NULL. We
888 * want to return MPOL_DEFAULT in this case.
889 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 down_read(&mm->mmap_sem);
891 vma = find_vma_intersection(mm, addr, addr+1);
892 if (!vma) {
893 up_read(&mm->mmap_sem);
894 return -EFAULT;
895 }
896 if (vma->vm_ops && vma->vm_ops->get_policy)
897 pol = vma->vm_ops->get_policy(vma, addr);
898 else
899 pol = vma->vm_policy;
900 } else if (addr)
901 return -EINVAL;
902
903 if (!pol)
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700904 pol = &default_policy; /* indicates default behavior */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905
906 if (flags & MPOL_F_NODE) {
907 if (flags & MPOL_F_ADDR) {
Dave Hansend4edcf02016-02-12 13:01:56 -0800908 err = lookup_node(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 if (err < 0)
910 goto out;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700911 *policy = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 } else if (pol == current->mempolicy &&
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700913 pol->mode == MPOL_INTERLEAVE) {
Christoph Lameter8bccd852005-10-29 18:16:59 -0700914 *policy = current->il_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 } else {
916 err = -EINVAL;
917 goto out;
918 }
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700919 } else {
920 *policy = pol == &default_policy ? MPOL_DEFAULT :
921 pol->mode;
David Rientjesd79df632008-07-04 12:24:13 -0700922 /*
923 * Internal mempolicy flags must be masked off before exposing
924 * the policy to userspace.
925 */
926 *policy |= (pol->flags & MPOL_MODE_FLAGS);
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700927 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 err = 0;
Miao Xie58568d22009-06-16 15:31:49 -0700930 if (nmask) {
Lee Schermerhornc6b6ef82010-03-23 13:35:41 -0700931 if (mpol_store_user_nodemask(pol)) {
932 *nmask = pol->w.user_nodemask;
933 } else {
934 task_lock(current);
935 get_policy_nodemask(pol, nmask);
936 task_unlock(current);
937 }
Miao Xie58568d22009-06-16 15:31:49 -0700938 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939
940 out:
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700941 mpol_cond_put(pol);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 if (vma)
943 up_read(&current->mm->mmap_sem);
944 return err;
945}
946
Christoph Lameterb20a3502006-03-22 00:09:12 -0800947#ifdef CONFIG_MIGRATION
Christoph Lameter8bccd852005-10-29 18:16:59 -0700948/*
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800949 * page migration
950 */
Christoph Lameterfc301282006-01-18 17:42:29 -0800951static void migrate_page_add(struct page *page, struct list_head *pagelist,
952 unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800953{
954 /*
Christoph Lameterfc301282006-01-18 17:42:29 -0800955 * Avoid migrating a page that is shared with others.
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800956 */
Nick Piggin62695a82008-10-18 20:26:09 -0700957 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
958 if (!isolate_lru_page(page)) {
959 list_add_tail(&page->lru, pagelist);
Mel Gorman599d0c92016-07-28 15:45:31 -0700960 inc_node_page_state(page, NR_ISOLATED_ANON +
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -0800961 page_is_file_cache(page));
Nick Piggin62695a82008-10-18 20:26:09 -0700962 }
963 }
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800964}
965
Christoph Lameter742755a2006-06-23 02:03:55 -0700966static struct page *new_node_page(struct page *page, unsigned long node, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -0700967{
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700968 if (PageHuge(page))
969 return alloc_huge_page_node(page_hstate(compound_head(page)),
970 node);
971 else
Vlastimil Babka96db8002015-09-08 15:03:50 -0700972 return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
David Rientjesb360edb2015-04-14 15:46:52 -0700973 __GFP_THISNODE, 0);
Christoph Lameter95a402c2006-06-23 02:03:53 -0700974}
975
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800976/*
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800977 * Migrate pages from one node to a target node.
978 * Returns error or the number of pages not migrated.
979 */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700980static int migrate_to_node(struct mm_struct *mm, int source, int dest,
981 int flags)
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800982{
983 nodemask_t nmask;
984 LIST_HEAD(pagelist);
985 int err = 0;
986
987 nodes_clear(nmask);
988 node_set(source, nmask);
989
Minchan Kim08270802012-10-08 16:33:38 -0700990 /*
991 * This does not "check" the range but isolates all pages that
992 * need migration. Between passing in the full user address
993 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
994 */
995 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
Naoya Horiguchi98094942013-09-11 14:22:14 -0700996 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
Christoph Lameter7e2ab152006-02-01 03:05:40 -0800997 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
998
Minchan Kimcf608ac2010-10-26 14:21:29 -0700999 if (!list_empty(&pagelist)) {
David Rientjes68711a72014-06-04 16:08:25 -07001000 err = migrate_pages(&pagelist, new_node_page, NULL, dest,
Hugh Dickins9c620e22013-02-22 16:35:14 -08001001 MIGRATE_SYNC, MR_SYSCALL);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001002 if (err)
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -07001003 putback_movable_pages(&pagelist);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001004 }
Christoph Lameter95a402c2006-06-23 02:03:53 -07001005
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001006 return err;
1007}
1008
1009/*
1010 * Move pages between the two nodesets so as to preserve the physical
1011 * layout as much as possible.
Christoph Lameter39743882006-01-08 01:00:51 -08001012 *
1013 * Returns the number of page that could not be moved.
1014 */
Andrew Morton0ce72d42012-05-29 15:06:24 -07001015int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1016 const nodemask_t *to, int flags)
Christoph Lameter39743882006-01-08 01:00:51 -08001017{
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001018 int busy = 0;
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001019 int err;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001020 nodemask_t tmp;
Christoph Lameter39743882006-01-08 01:00:51 -08001021
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001022 err = migrate_prep();
1023 if (err)
1024 return err;
1025
Lee Schermerhorn53f25562008-04-28 02:13:20 -07001026 down_read(&mm->mmap_sem);
Christoph Lameter39743882006-01-08 01:00:51 -08001027
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -08001028 /*
1029 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1030 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1031 * bit in 'tmp', and return that <source, dest> pair for migration.
1032 * The pair of nodemasks 'to' and 'from' define the map.
1033 *
1034 * If no pair of bits is found that way, fallback to picking some
1035 * pair of 'source' and 'dest' bits that are not the same. If the
1036 * 'source' and 'dest' bits are the same, this represents a node
1037 * that will be migrating to itself, so no pages need move.
1038 *
1039 * If no bits are left in 'tmp', or if all remaining bits left
1040 * in 'tmp' correspond to the same bit in 'to', return false
1041 * (nothing left to migrate).
1042 *
1043 * This lets us pick a pair of nodes to migrate between, such that
1044 * if possible the dest node is not already occupied by some other
1045 * source node, minimizing the risk of overloading the memory on a
1046 * node that would happen if we migrated incoming memory to a node
1047 * before migrating outgoing memory source that same node.
1048 *
1049 * A single scan of tmp is sufficient. As we go, we remember the
1050 * most recent <s, d> pair that moved (s != d). If we find a pair
1051 * that not only moved, but what's better, moved to an empty slot
1052 * (d is not set in tmp), then we break out then, with that pair.
Justin P. Mattockae0e47f2011-03-01 15:06:02 +01001053 * Otherwise when we finish scanning from_tmp, we at least have the
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -08001054 * most recent <s, d> pair that moved. If we get all the way through
1055 * the scan of tmp without finding any node that moved, much less
1056 * moved to an empty node, then there is nothing left worth migrating.
1057 */
Christoph Lameterd4984712006-01-08 01:00:55 -08001058
Andrew Morton0ce72d42012-05-29 15:06:24 -07001059 tmp = *from;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001060 while (!nodes_empty(tmp)) {
1061 int s,d;
Jianguo Wub76ac7e2013-11-12 15:07:39 -08001062 int source = NUMA_NO_NODE;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001063 int dest = 0;
1064
1065 for_each_node_mask(s, tmp) {
Larry Woodman4a5b18c2012-05-29 15:06:24 -07001066
1067 /*
1068 * do_migrate_pages() tries to maintain the relative
1069 * node relationship of the pages established between
1070 * threads and memory areas.
1071 *
1072 * However if the number of source nodes is not equal to
1073 * the number of destination nodes we can not preserve
1074 * this node relative relationship. In that case, skip
1075 * copying memory from a node that is in the destination
1076 * mask.
1077 *
1078 * Example: [2,3,4] -> [3,4,5] moves everything.
1079 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1080 */
1081
Andrew Morton0ce72d42012-05-29 15:06:24 -07001082 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1083 (node_isset(s, *to)))
Larry Woodman4a5b18c2012-05-29 15:06:24 -07001084 continue;
1085
Andrew Morton0ce72d42012-05-29 15:06:24 -07001086 d = node_remap(s, *from, *to);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001087 if (s == d)
1088 continue;
1089
1090 source = s; /* Node moved. Memorize */
1091 dest = d;
1092
1093 /* dest not in remaining from nodes? */
1094 if (!node_isset(dest, tmp))
1095 break;
1096 }
Jianguo Wub76ac7e2013-11-12 15:07:39 -08001097 if (source == NUMA_NO_NODE)
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001098 break;
1099
1100 node_clear(source, tmp);
1101 err = migrate_to_node(mm, source, dest, flags);
1102 if (err > 0)
1103 busy += err;
1104 if (err < 0)
1105 break;
Christoph Lameter39743882006-01-08 01:00:51 -08001106 }
1107 up_read(&mm->mmap_sem);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001108 if (err < 0)
1109 return err;
1110 return busy;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001111
Christoph Lameter39743882006-01-08 01:00:51 -08001112}
1113
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001114/*
1115 * Allocate a new page for page migration based on vma policy.
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001116 * Start by assuming the page is mapped by the same vma as contains @start.
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001117 * Search forward from there, if not. N.B., this assumes that the
1118 * list of pages handed to migrate_pages()--which is how we get here--
1119 * is in virtual address order.
1120 */
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001121static struct page *new_page(struct page *page, unsigned long start, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001122{
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001123 struct vm_area_struct *vma;
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001124 unsigned long uninitialized_var(address);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001125
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001126 vma = find_vma(current->mm, start);
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001127 while (vma) {
1128 address = page_address_in_vma(page, vma);
1129 if (address != -EFAULT)
1130 break;
1131 vma = vma->vm_next;
1132 }
1133
Wanpeng Li11c731e2013-12-18 17:08:56 -08001134 if (PageHuge(page)) {
Michal Hockocc817172014-01-23 15:53:15 -08001135 BUG_ON(!vma);
1136 return alloc_huge_page_noerr(vma, address, 1);
Wanpeng Li11c731e2013-12-18 17:08:56 -08001137 }
1138 /*
1139 * if !vma, alloc_page_vma() will use task or system default policy
1140 */
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001141 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001142}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001143#else
1144
1145static void migrate_page_add(struct page *page, struct list_head *pagelist,
1146 unsigned long flags)
1147{
1148}
1149
Andrew Morton0ce72d42012-05-29 15:06:24 -07001150int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1151 const nodemask_t *to, int flags)
Christoph Lameterb20a3502006-03-22 00:09:12 -08001152{
1153 return -ENOSYS;
1154}
Christoph Lameter95a402c2006-06-23 02:03:53 -07001155
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001156static struct page *new_page(struct page *page, unsigned long start, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001157{
1158 return NULL;
1159}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001160#endif
1161
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001162static long do_mbind(unsigned long start, unsigned long len,
David Rientjes028fec42008-04-28 02:12:25 -07001163 unsigned short mode, unsigned short mode_flags,
1164 nodemask_t *nmask, unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001165{
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001166 struct mm_struct *mm = current->mm;
1167 struct mempolicy *new;
1168 unsigned long end;
1169 int err;
1170 LIST_HEAD(pagelist);
1171
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001172 if (flags & ~(unsigned long)MPOL_MF_VALID)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001173 return -EINVAL;
Christoph Lameter74c00242006-03-14 19:50:21 -08001174 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001175 return -EPERM;
1176
1177 if (start & ~PAGE_MASK)
1178 return -EINVAL;
1179
1180 if (mode == MPOL_DEFAULT)
1181 flags &= ~MPOL_MF_STRICT;
1182
1183 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1184 end = start + len;
1185
1186 if (end < start)
1187 return -EINVAL;
1188 if (end == start)
1189 return 0;
1190
David Rientjes028fec42008-04-28 02:12:25 -07001191 new = mpol_new(mode, mode_flags, nmask);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001192 if (IS_ERR(new))
1193 return PTR_ERR(new);
1194
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001195 if (flags & MPOL_MF_LAZY)
1196 new->flags |= MPOL_F_MOF;
1197
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001198 /*
1199 * If we are using the default policy then operation
1200 * on discontinuous address spaces is okay after all
1201 */
1202 if (!new)
1203 flags |= MPOL_MF_DISCONTIG_OK;
1204
David Rientjes028fec42008-04-28 02:12:25 -07001205 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1206 start, start + len, mode, mode_flags,
David Rientjes00ef2d22013-02-22 16:35:36 -08001207 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001208
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001209 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1210
1211 err = migrate_prep();
1212 if (err)
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001213 goto mpol_out;
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001214 }
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07001215 {
1216 NODEMASK_SCRATCH(scratch);
1217 if (scratch) {
1218 down_write(&mm->mmap_sem);
1219 task_lock(current);
1220 err = mpol_set_nodemask(new, nmask, scratch);
1221 task_unlock(current);
1222 if (err)
1223 up_write(&mm->mmap_sem);
1224 } else
1225 err = -ENOMEM;
1226 NODEMASK_SCRATCH_FREE(scratch);
1227 }
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001228 if (err)
1229 goto mpol_out;
1230
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001231 err = queue_pages_range(mm, start, end, nmask,
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001232 flags | MPOL_MF_INVERT, &pagelist);
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001233 if (!err)
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -08001234 err = mbind_range(mm, start, end, new);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001235
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001236 if (!err) {
1237 int nr_failed = 0;
1238
Minchan Kimcf608ac2010-10-26 14:21:29 -07001239 if (!list_empty(&pagelist)) {
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001240 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001241 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1242 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001243 if (nr_failed)
Naoya Horiguchi74060e42013-09-11 14:22:06 -07001244 putback_movable_pages(&pagelist);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001245 }
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001246
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001247 if (nr_failed && (flags & MPOL_MF_STRICT))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001248 err = -EIO;
KOSAKI Motohiroab8a3e12009-10-26 16:49:58 -07001249 } else
Joonsoo Kimb0e5fd72013-12-18 17:08:51 -08001250 putback_movable_pages(&pagelist);
Christoph Lameterb20a3502006-03-22 00:09:12 -08001251
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001252 up_write(&mm->mmap_sem);
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001253 mpol_out:
Lee Schermerhornf0be3d32008-04-28 02:13:08 -07001254 mpol_put(new);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001255 return err;
1256}
1257
Christoph Lameter39743882006-01-08 01:00:51 -08001258/*
Christoph Lameter8bccd852005-10-29 18:16:59 -07001259 * User space interface with variable sized bitmaps for nodelists.
1260 */
1261
1262/* Copy a node mask from user space. */
Christoph Lameter39743882006-01-08 01:00:51 -08001263static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
Christoph Lameter8bccd852005-10-29 18:16:59 -07001264 unsigned long maxnode)
1265{
1266 unsigned long k;
Yisheng Xie2851e3b2018-01-31 16:16:11 -08001267 unsigned long t;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001268 unsigned long nlongs;
1269 unsigned long endmask;
1270
1271 --maxnode;
1272 nodes_clear(*nodes);
1273 if (maxnode == 0 || !nmask)
1274 return 0;
Andi Kleena9c930b2006-02-20 18:27:59 -08001275 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
Chris Wright636f13c2006-02-17 13:59:36 -08001276 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001277
1278 nlongs = BITS_TO_LONGS(maxnode);
1279 if ((maxnode % BITS_PER_LONG) == 0)
1280 endmask = ~0UL;
1281 else
1282 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1283
Yisheng Xie2851e3b2018-01-31 16:16:11 -08001284 /*
1285 * When the user specified more nodes than supported just check
1286 * if the non supported part is all zero.
1287 *
1288 * If maxnode have more longs than MAX_NUMNODES, check
1289 * the bits in that area first. And then go through to
1290 * check the rest bits which equal or bigger than MAX_NUMNODES.
1291 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
1292 */
Christoph Lameter8bccd852005-10-29 18:16:59 -07001293 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1294 if (nlongs > PAGE_SIZE/sizeof(long))
1295 return -EINVAL;
1296 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
Christoph Lameter8bccd852005-10-29 18:16:59 -07001297 if (get_user(t, nmask + k))
1298 return -EFAULT;
1299 if (k == nlongs - 1) {
1300 if (t & endmask)
1301 return -EINVAL;
1302 } else if (t)
1303 return -EINVAL;
1304 }
1305 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1306 endmask = ~0UL;
1307 }
1308
Yisheng Xie2851e3b2018-01-31 16:16:11 -08001309 if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
1310 unsigned long valid_mask = endmask;
1311
1312 valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1313 if (get_user(t, nmask + nlongs - 1))
1314 return -EFAULT;
1315 if (t & valid_mask)
1316 return -EINVAL;
1317 }
1318
Christoph Lameter8bccd852005-10-29 18:16:59 -07001319 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1320 return -EFAULT;
1321 nodes_addr(*nodes)[nlongs-1] &= endmask;
1322 return 0;
1323}
1324
1325/* Copy a kernel node mask to user space */
1326static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1327 nodemask_t *nodes)
1328{
1329 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1330 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1331
1332 if (copy > nbytes) {
1333 if (copy > PAGE_SIZE)
1334 return -EINVAL;
1335 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1336 return -EFAULT;
1337 copy = nbytes;
1338 }
1339 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1340}
1341
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001342SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
Rasmus Villemoesf7f28ca2014-06-04 16:07:57 -07001343 unsigned long, mode, const unsigned long __user *, nmask,
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001344 unsigned long, maxnode, unsigned, flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001345{
1346 nodemask_t nodes;
1347 int err;
David Rientjes028fec42008-04-28 02:12:25 -07001348 unsigned short mode_flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001349
David Rientjes028fec42008-04-28 02:12:25 -07001350 mode_flags = mode & MPOL_MODE_FLAGS;
1351 mode &= ~MPOL_MODE_FLAGS;
David Rientjesa3b51e02008-04-28 02:12:23 -07001352 if (mode >= MPOL_MAX)
1353 return -EINVAL;
David Rientjes4c50bc02008-04-28 02:12:30 -07001354 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1355 (mode_flags & MPOL_F_RELATIVE_NODES))
1356 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001357 err = get_nodes(&nodes, nmask, maxnode);
1358 if (err)
1359 return err;
David Rientjes028fec42008-04-28 02:12:25 -07001360 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001361}
1362
1363/* Set the process memory policy */
Rasmus Villemoes23c89022014-06-04 16:07:58 -07001364SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001365 unsigned long, maxnode)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001366{
1367 int err;
1368 nodemask_t nodes;
David Rientjes028fec42008-04-28 02:12:25 -07001369 unsigned short flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001370
David Rientjes028fec42008-04-28 02:12:25 -07001371 flags = mode & MPOL_MODE_FLAGS;
1372 mode &= ~MPOL_MODE_FLAGS;
1373 if ((unsigned int)mode >= MPOL_MAX)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001374 return -EINVAL;
David Rientjes4c50bc02008-04-28 02:12:30 -07001375 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1376 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001377 err = get_nodes(&nodes, nmask, maxnode);
1378 if (err)
1379 return err;
David Rientjes028fec42008-04-28 02:12:25 -07001380 return do_set_mempolicy(mode, flags, &nodes);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001381}
1382
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001383SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1384 const unsigned long __user *, old_nodes,
1385 const unsigned long __user *, new_nodes)
Christoph Lameter39743882006-01-08 01:00:51 -08001386{
David Howellsc69e8d92008-11-14 10:39:19 +11001387 const struct cred *cred = current_cred(), *tcred;
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001388 struct mm_struct *mm = NULL;
Christoph Lameter39743882006-01-08 01:00:51 -08001389 struct task_struct *task;
Christoph Lameter39743882006-01-08 01:00:51 -08001390 nodemask_t task_nodes;
1391 int err;
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001392 nodemask_t *old;
1393 nodemask_t *new;
1394 NODEMASK_SCRATCH(scratch);
Christoph Lameter39743882006-01-08 01:00:51 -08001395
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001396 if (!scratch)
1397 return -ENOMEM;
Christoph Lameter39743882006-01-08 01:00:51 -08001398
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001399 old = &scratch->mask1;
1400 new = &scratch->mask2;
1401
1402 err = get_nodes(old, old_nodes, maxnode);
Christoph Lameter39743882006-01-08 01:00:51 -08001403 if (err)
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001404 goto out;
1405
1406 err = get_nodes(new, new_nodes, maxnode);
1407 if (err)
1408 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001409
1410 /* Find the mm_struct */
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001411 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001412 task = pid ? find_task_by_vpid(pid) : current;
Christoph Lameter39743882006-01-08 01:00:51 -08001413 if (!task) {
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001414 rcu_read_unlock();
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001415 err = -ESRCH;
1416 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001417 }
Christoph Lameter3268c632012-03-21 16:34:06 -07001418 get_task_struct(task);
Christoph Lameter39743882006-01-08 01:00:51 -08001419
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001420 err = -EINVAL;
Christoph Lameter39743882006-01-08 01:00:51 -08001421
1422 /*
1423 * Check if this process has the right to modify the specified
1424 * process. The right exists if the process has administrative
Alexey Dobriyan7f927fc2006-03-28 01:56:53 -08001425 * capabilities, superuser privileges or the same
Christoph Lameter39743882006-01-08 01:00:51 -08001426 * userid as the target process.
1427 */
David Howellsc69e8d92008-11-14 10:39:19 +11001428 tcred = __task_cred(task);
Eric W. Biedermanb38a86e2012-03-12 15:48:24 -07001429 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1430 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
Christoph Lameter74c00242006-03-14 19:50:21 -08001431 !capable(CAP_SYS_NICE)) {
David Howellsc69e8d92008-11-14 10:39:19 +11001432 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001433 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001434 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001435 }
David Howellsc69e8d92008-11-14 10:39:19 +11001436 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001437
1438 task_nodes = cpuset_mems_allowed(task);
1439 /* Is the user allowed to access the target nodes? */
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001440 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
Christoph Lameter39743882006-01-08 01:00:51 -08001441 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001442 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001443 }
1444
Yisheng Xie4cf24632018-01-31 16:16:15 -08001445 task_nodes = cpuset_mems_allowed(current);
1446 nodes_and(*new, *new, task_nodes);
1447 if (nodes_empty(*new))
Christoph Lameter3268c632012-03-21 16:34:06 -07001448 goto out_put;
Yisheng Xie4cf24632018-01-31 16:16:15 -08001449
1450 nodes_and(*new, *new, node_states[N_MEMORY]);
1451 if (nodes_empty(*new))
1452 goto out_put;
Christoph Lameter3b42d282007-08-31 00:12:08 -07001453
David Quigley86c3a762006-06-23 02:04:02 -07001454 err = security_task_movememory(task);
1455 if (err)
Christoph Lameter3268c632012-03-21 16:34:06 -07001456 goto out_put;
David Quigley86c3a762006-06-23 02:04:02 -07001457
Christoph Lameter3268c632012-03-21 16:34:06 -07001458 mm = get_task_mm(task);
1459 put_task_struct(task);
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001460
1461 if (!mm) {
Christoph Lameter3268c632012-03-21 16:34:06 -07001462 err = -EINVAL;
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001463 goto out;
1464 }
1465
1466 err = do_migrate_pages(mm, old, new,
1467 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
Christoph Lameter3268c632012-03-21 16:34:06 -07001468
1469 mmput(mm);
1470out:
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001471 NODEMASK_SCRATCH_FREE(scratch);
1472
Christoph Lameter39743882006-01-08 01:00:51 -08001473 return err;
Christoph Lameter3268c632012-03-21 16:34:06 -07001474
1475out_put:
1476 put_task_struct(task);
1477 goto out;
1478
Christoph Lameter39743882006-01-08 01:00:51 -08001479}
1480
1481
Christoph Lameter8bccd852005-10-29 18:16:59 -07001482/* Retrieve NUMA policy */
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001483SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1484 unsigned long __user *, nmask, unsigned long, maxnode,
1485 unsigned long, addr, unsigned long, flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001486{
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001487 int err;
1488 int uninitialized_var(pval);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001489 nodemask_t nodes;
1490
1491 if (nmask != NULL && maxnode < MAX_NUMNODES)
1492 return -EINVAL;
1493
1494 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1495
1496 if (err)
1497 return err;
1498
1499 if (policy && put_user(pval, policy))
1500 return -EFAULT;
1501
1502 if (nmask)
1503 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1504
1505 return err;
1506}
1507
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508#ifdef CONFIG_COMPAT
1509
Heiko Carstensc93e0f62014-03-03 16:32:26 +01001510COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1511 compat_ulong_t __user *, nmask,
1512 compat_ulong_t, maxnode,
1513 compat_ulong_t, addr, compat_ulong_t, flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514{
1515 long err;
1516 unsigned long __user *nm = NULL;
1517 unsigned long nr_bits, alloc_size;
1518 DECLARE_BITMAP(bm, MAX_NUMNODES);
1519
1520 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1521 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1522
1523 if (nmask)
1524 nm = compat_alloc_user_space(alloc_size);
1525
1526 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1527
1528 if (!err && nmask) {
KAMEZAWA Hiroyuki2bbff6c2011-09-14 16:21:02 -07001529 unsigned long copy_size;
1530 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1531 err = copy_from_user(bm, nm, copy_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 /* ensure entire bitmap is zeroed */
1533 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1534 err |= compat_put_bitmap(nmask, bm, nr_bits);
1535 }
1536
1537 return err;
1538}
1539
Heiko Carstensc93e0f62014-03-03 16:32:26 +01001540COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1541 compat_ulong_t, maxnode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 unsigned long __user *nm = NULL;
1544 unsigned long nr_bits, alloc_size;
1545 DECLARE_BITMAP(bm, MAX_NUMNODES);
1546
1547 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1548 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1549
1550 if (nmask) {
Chris Sallscddab762017-04-07 23:48:11 -07001551 if (compat_get_bitmap(bm, nmask, nr_bits))
1552 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 nm = compat_alloc_user_space(alloc_size);
Chris Sallscddab762017-04-07 23:48:11 -07001554 if (copy_to_user(nm, bm, alloc_size))
1555 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 }
1557
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 return sys_set_mempolicy(mode, nm, nr_bits+1);
1559}
1560
Heiko Carstensc93e0f62014-03-03 16:32:26 +01001561COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1562 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1563 compat_ulong_t, maxnode, compat_ulong_t, flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 unsigned long __user *nm = NULL;
1566 unsigned long nr_bits, alloc_size;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001567 nodemask_t bm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568
1569 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1570 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1571
1572 if (nmask) {
Chris Sallscddab762017-04-07 23:48:11 -07001573 if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1574 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 nm = compat_alloc_user_space(alloc_size);
Chris Sallscddab762017-04-07 23:48:11 -07001576 if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1577 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 }
1579
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1581}
1582
1583#endif
1584
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001585struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1586 unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587{
Oleg Nesterov8d902742014-10-09 15:27:45 -07001588 struct mempolicy *pol = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589
1590 if (vma) {
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001591 if (vma->vm_ops && vma->vm_ops->get_policy) {
Oleg Nesterov8d902742014-10-09 15:27:45 -07001592 pol = vma->vm_ops->get_policy(vma, addr);
Mel Gorman00442ad2012-10-08 16:29:20 -07001593 } else if (vma->vm_policy) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 pol = vma->vm_policy;
Mel Gorman00442ad2012-10-08 16:29:20 -07001595
1596 /*
1597 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1598 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1599 * count on these policies which will be dropped by
1600 * mpol_cond_put() later
1601 */
1602 if (mpol_needs_cond_ref(pol))
1603 mpol_get(pol);
1604 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605 }
Oleg Nesterovf15ca782014-10-09 15:27:43 -07001606
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001607 return pol;
1608}
1609
1610/*
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001611 * get_vma_policy(@vma, @addr)
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001612 * @vma: virtual memory area whose policy is sought
1613 * @addr: address in @vma for shared policy lookup
1614 *
1615 * Returns effective policy for a VMA at specified address.
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001616 * Falls back to current->mempolicy or system default policy, as necessary.
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001617 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1618 * count--added by the get_policy() vm_op, as appropriate--to protect against
1619 * freeing by another task. It is the caller's responsibility to free the
1620 * extra reference for shared policies.
1621 */
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001622static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1623 unsigned long addr)
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001624{
1625 struct mempolicy *pol = __get_vma_policy(vma, addr);
1626
Oleg Nesterov8d902742014-10-09 15:27:45 -07001627 if (!pol)
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001628 pol = get_task_policy(current);
Oleg Nesterov8d902742014-10-09 15:27:45 -07001629
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630 return pol;
1631}
1632
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001633bool vma_policy_mof(struct vm_area_struct *vma)
Mel Gormanfc3147242013-10-07 11:29:09 +01001634{
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001635 struct mempolicy *pol;
Oleg Nesterovf15ca782014-10-09 15:27:43 -07001636
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001637 if (vma->vm_ops && vma->vm_ops->get_policy) {
1638 bool ret = false;
Mel Gormanfc3147242013-10-07 11:29:09 +01001639
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001640 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1641 if (pol && (pol->flags & MPOL_F_MOF))
1642 ret = true;
1643 mpol_cond_put(pol);
Mel Gormanfc3147242013-10-07 11:29:09 +01001644
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001645 return ret;
Mel Gormanfc3147242013-10-07 11:29:09 +01001646 }
1647
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001648 pol = vma->vm_policy;
Oleg Nesterov8d902742014-10-09 15:27:45 -07001649 if (!pol)
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001650 pol = get_task_policy(current);
Oleg Nesterov8d902742014-10-09 15:27:45 -07001651
Mel Gormanfc3147242013-10-07 11:29:09 +01001652 return pol->flags & MPOL_F_MOF;
1653}
1654
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001655static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1656{
1657 enum zone_type dynamic_policy_zone = policy_zone;
1658
1659 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1660
1661 /*
1662 * if policy->v.nodes has movable memory only,
1663 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1664 *
1665 * policy->v.nodes is intersect with node_states[N_MEMORY].
1666 * so if the following test faile, it implies
1667 * policy->v.nodes has movable memory only.
1668 */
1669 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1670 dynamic_policy_zone = ZONE_MOVABLE;
1671
1672 return zone >= dynamic_policy_zone;
1673}
1674
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001675/*
1676 * Return a nodemask representing a mempolicy for filtering nodes for
1677 * page allocation
1678 */
1679static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
Mel Gorman19770b32008-04-28 02:12:18 -07001680{
1681 /* Lower zones don't get a nodemask applied for MPOL_BIND */
Lee Schermerhorn45c47452008-04-28 02:13:12 -07001682 if (unlikely(policy->mode == MPOL_BIND) &&
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001683 apply_policy_zone(policy, gfp_zone(gfp)) &&
Mel Gorman19770b32008-04-28 02:12:18 -07001684 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1685 return &policy->v.nodes;
1686
1687 return NULL;
1688}
1689
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001690/* Return a zonelist indicated by gfp for node representing a mempolicy */
Andi Kleen2f5f9482011-03-04 17:36:29 -08001691static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1692 int nd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693{
Lee Schermerhorn45c47452008-04-28 02:13:12 -07001694 switch (policy->mode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001696 if (!(policy->flags & MPOL_F_LOCAL))
1697 nd = policy->v.preferred_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 break;
1699 case MPOL_BIND:
Mel Gorman19770b32008-04-28 02:12:18 -07001700 /*
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001701 * Normally, MPOL_BIND allocations are node-local within the
1702 * allowed nodemask. However, if __GFP_THISNODE is set and the
Bob Liu6eb27e12010-05-24 14:32:00 -07001703 * current node isn't part of the mask, we use the zonelist for
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001704 * the first node in the mask instead.
Mel Gorman19770b32008-04-28 02:12:18 -07001705 */
Mel Gorman19770b32008-04-28 02:12:18 -07001706 if (unlikely(gfp & __GFP_THISNODE) &&
1707 unlikely(!node_isset(nd, policy->v.nodes)))
1708 nd = first_node(policy->v.nodes);
1709 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 default:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 BUG();
1712 }
Mel Gorman0e884602008-04-28 02:12:14 -07001713 return node_zonelist(nd, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714}
1715
1716/* Do dynamic interleaving for a process */
1717static unsigned interleave_nodes(struct mempolicy *policy)
1718{
1719 unsigned nid, next;
1720 struct task_struct *me = current;
1721
1722 nid = me->il_next;
Andrew Morton0edaf862016-05-19 17:10:58 -07001723 next = next_node_in(nid, policy->v.nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001724 if (next < MAX_NUMNODES)
1725 me->il_next = next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 return nid;
1727}
1728
Christoph Lameterdc85da12006-01-18 17:42:36 -08001729/*
1730 * Depending on the memory policy provide a node from which to allocate the
1731 * next slab entry.
1732 */
David Rientjes2a389612014-04-07 15:37:29 -07001733unsigned int mempolicy_slab_node(void)
Christoph Lameterdc85da12006-01-18 17:42:36 -08001734{
Andi Kleene7b691b2012-06-09 02:40:03 -07001735 struct mempolicy *policy;
David Rientjes2a389612014-04-07 15:37:29 -07001736 int node = numa_mem_id();
Andi Kleene7b691b2012-06-09 02:40:03 -07001737
1738 if (in_interrupt())
David Rientjes2a389612014-04-07 15:37:29 -07001739 return node;
Andi Kleene7b691b2012-06-09 02:40:03 -07001740
1741 policy = current->mempolicy;
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001742 if (!policy || policy->flags & MPOL_F_LOCAL)
David Rientjes2a389612014-04-07 15:37:29 -07001743 return node;
Christoph Lameter765c4502006-09-27 01:50:08 -07001744
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001745 switch (policy->mode) {
1746 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001747 /*
1748 * handled MPOL_F_LOCAL above
1749 */
1750 return policy->v.preferred_node;
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001751
Christoph Lameterdc85da12006-01-18 17:42:36 -08001752 case MPOL_INTERLEAVE:
1753 return interleave_nodes(policy);
1754
Mel Gormandd1a2392008-04-28 02:12:17 -07001755 case MPOL_BIND: {
Mel Gormanc33d6c02016-05-19 17:14:10 -07001756 struct zoneref *z;
1757
Christoph Lameterdc85da12006-01-18 17:42:36 -08001758 /*
1759 * Follow bind policy behavior and start allocation at the
1760 * first node.
1761 */
Mel Gorman19770b32008-04-28 02:12:18 -07001762 struct zonelist *zonelist;
Mel Gorman19770b32008-04-28 02:12:18 -07001763 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
Aneesh Kumar K.Vc9634cf2016-10-07 16:59:12 -07001764 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
Mel Gormanc33d6c02016-05-19 17:14:10 -07001765 z = first_zones_zonelist(zonelist, highest_zoneidx,
1766 &policy->v.nodes);
1767 return z->zone ? z->zone->node : node;
Mel Gormandd1a2392008-04-28 02:12:17 -07001768 }
Christoph Lameterdc85da12006-01-18 17:42:36 -08001769
Christoph Lameterdc85da12006-01-18 17:42:36 -08001770 default:
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001771 BUG();
Christoph Lameterdc85da12006-01-18 17:42:36 -08001772 }
1773}
1774
Andrew Mortonfee83b32016-05-19 17:11:43 -07001775/*
1776 * Do static interleaving for a VMA with known offset @n. Returns the n'th
1777 * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1778 * number of present nodes.
1779 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780static unsigned offset_il_node(struct mempolicy *pol,
Andrew Mortonfee83b32016-05-19 17:11:43 -07001781 struct vm_area_struct *vma, unsigned long n)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782{
Andi Kleendfcd3c02005-10-29 18:15:48 -07001783 unsigned nnodes = nodes_weight(pol->v.nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001784 unsigned target;
Andrew Mortonfee83b32016-05-19 17:11:43 -07001785 int i;
1786 int nid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787
David Rientjesf5b087b2008-04-28 02:12:27 -07001788 if (!nnodes)
1789 return numa_node_id();
Andrew Mortonfee83b32016-05-19 17:11:43 -07001790 target = (unsigned int)n % nnodes;
1791 nid = first_node(pol->v.nodes);
1792 for (i = 0; i < target; i++)
Andi Kleendfcd3c02005-10-29 18:15:48 -07001793 nid = next_node(nid, pol->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 return nid;
1795}
1796
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001797/* Determine a node number for interleave */
1798static inline unsigned interleave_nid(struct mempolicy *pol,
1799 struct vm_area_struct *vma, unsigned long addr, int shift)
1800{
1801 if (vma) {
1802 unsigned long off;
1803
Nishanth Aravamudan3b98b082006-08-31 21:27:53 -07001804 /*
1805 * for small pages, there is no difference between
1806 * shift and PAGE_SHIFT, so the bit-shift is safe.
1807 * for huge pages, since vm_pgoff is in units of small
1808 * pages, we need to shift off the always 0 bits to get
1809 * a useful offset.
1810 */
1811 BUG_ON(shift < PAGE_SHIFT);
1812 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001813 off += (addr - vma->vm_start) >> shift;
1814 return offset_il_node(pol, vma, off);
1815 } else
1816 return interleave_nodes(pol);
1817}
1818
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001819#ifdef CONFIG_HUGETLBFS
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001820/*
1821 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
Fabian Frederickb46e14a2014-06-04 16:08:18 -07001822 * @vma: virtual memory area whose policy is sought
1823 * @addr: address in @vma for shared policy lookup and interleave policy
1824 * @gfp_flags: for requested zone
1825 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1826 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001827 *
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001828 * Returns a zonelist suitable for a huge page allocation and a pointer
1829 * to the struct mempolicy for conditional unref after allocation.
1830 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1831 * @nodemask for filtering the zonelist.
Miao Xiec0ff7452010-05-24 14:32:08 -07001832 *
Mel Gormand26914d2014-04-03 14:47:24 -07001833 * Must be protected by read_mems_allowed_begin()
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001834 */
Mel Gorman396faf02007-07-17 04:03:13 -07001835struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
Mel Gorman19770b32008-04-28 02:12:18 -07001836 gfp_t gfp_flags, struct mempolicy **mpol,
1837 nodemask_t **nodemask)
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001838{
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001839 struct zonelist *zl;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001840
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001841 *mpol = get_vma_policy(vma, addr);
Mel Gorman19770b32008-04-28 02:12:18 -07001842 *nodemask = NULL; /* assume !MPOL_BIND */
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001843
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001844 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1845 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
Andi Kleena5516432008-07-23 21:27:41 -07001846 huge_page_shift(hstate_vma(vma))), gfp_flags);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001847 } else {
Andi Kleen2f5f9482011-03-04 17:36:29 -08001848 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001849 if ((*mpol)->mode == MPOL_BIND)
1850 *nodemask = &(*mpol)->v.nodes;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001851 }
1852 return zl;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001853}
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001854
1855/*
1856 * init_nodemask_of_mempolicy
1857 *
1858 * If the current task's mempolicy is "default" [NULL], return 'false'
1859 * to indicate default policy. Otherwise, extract the policy nodemask
1860 * for 'bind' or 'interleave' policy into the argument nodemask, or
1861 * initialize the argument nodemask to contain the single node for
1862 * 'preferred' or 'local' policy and return 'true' to indicate presence
1863 * of non-default mempolicy.
1864 *
1865 * We don't bother with reference counting the mempolicy [mpol_get/put]
1866 * because the current task is examining it's own mempolicy and a task's
1867 * mempolicy is only ever changed by the task itself.
1868 *
1869 * N.B., it is the caller's responsibility to free a returned nodemask.
1870 */
1871bool init_nodemask_of_mempolicy(nodemask_t *mask)
1872{
1873 struct mempolicy *mempolicy;
1874 int nid;
1875
1876 if (!(mask && current->mempolicy))
1877 return false;
1878
Miao Xiec0ff7452010-05-24 14:32:08 -07001879 task_lock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001880 mempolicy = current->mempolicy;
1881 switch (mempolicy->mode) {
1882 case MPOL_PREFERRED:
1883 if (mempolicy->flags & MPOL_F_LOCAL)
1884 nid = numa_node_id();
1885 else
1886 nid = mempolicy->v.preferred_node;
1887 init_nodemask_of_node(mask, nid);
1888 break;
1889
1890 case MPOL_BIND:
1891 /* Fall through */
1892 case MPOL_INTERLEAVE:
1893 *mask = mempolicy->v.nodes;
1894 break;
1895
1896 default:
1897 BUG();
1898 }
Miao Xiec0ff7452010-05-24 14:32:08 -07001899 task_unlock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001900
1901 return true;
1902}
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001903#endif
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001904
David Rientjes6f48d0eb2010-08-09 17:18:52 -07001905/*
1906 * mempolicy_nodemask_intersects
1907 *
1908 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1909 * policy. Otherwise, check for intersection between mask and the policy
1910 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1911 * policy, always return true since it may allocate elsewhere on fallback.
1912 *
1913 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1914 */
1915bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1916 const nodemask_t *mask)
1917{
1918 struct mempolicy *mempolicy;
1919 bool ret = true;
1920
1921 if (!mask)
1922 return ret;
1923 task_lock(tsk);
1924 mempolicy = tsk->mempolicy;
1925 if (!mempolicy)
1926 goto out;
1927
1928 switch (mempolicy->mode) {
1929 case MPOL_PREFERRED:
1930 /*
1931 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1932 * allocate from, they may fallback to other nodes when oom.
1933 * Thus, it's possible for tsk to have allocated memory from
1934 * nodes in mask.
1935 */
1936 break;
1937 case MPOL_BIND:
1938 case MPOL_INTERLEAVE:
1939 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1940 break;
1941 default:
1942 BUG();
1943 }
1944out:
1945 task_unlock(tsk);
1946 return ret;
1947}
1948
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949/* Allocate a page in interleaved policy.
1950 Own path because it needs to do special accounting. */
Andi Kleen662f3a02005-10-29 18:15:49 -07001951static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1952 unsigned nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953{
1954 struct zonelist *zl;
1955 struct page *page;
1956
Mel Gorman0e884602008-04-28 02:12:14 -07001957 zl = node_zonelist(nid, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 page = __alloc_pages(gfp, order, zl);
Mel Gormandd1a2392008-04-28 02:12:17 -07001959 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
Christoph Lameterca889e62006-06-30 01:55:44 -07001960 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 return page;
1962}
1963
1964/**
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08001965 * alloc_pages_vma - Allocate a page for a VMA.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966 *
1967 * @gfp:
1968 * %GFP_USER user allocation.
1969 * %GFP_KERNEL kernel allocations,
1970 * %GFP_HIGHMEM highmem/user allocations,
1971 * %GFP_FS allocation should not call back into a file system.
1972 * %GFP_ATOMIC don't sleep.
1973 *
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08001974 * @order:Order of the GFP allocation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 * @vma: Pointer to VMA or NULL if not available.
1976 * @addr: Virtual Address of the allocation. Must be inside the VMA.
Vlastimil Babkabe97a412015-02-11 15:27:15 -08001977 * @node: Which node to prefer for allocation (modulo policy).
1978 * @hugepage: for hugepages try only the preferred node if possible
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979 *
1980 * This function allocates a page from the kernel page pool and applies
1981 * a NUMA policy associated with the VMA or the current process.
1982 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1983 * mm_struct of the VMA to prevent it from going away. Should be used for
Vlastimil Babkabe97a412015-02-11 15:27:15 -08001984 * all allocations for pages that will be mapped into user space. Returns
1985 * NULL when no page can be allocated.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 */
1987struct page *
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08001988alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
Vlastimil Babkabe97a412015-02-11 15:27:15 -08001989 unsigned long addr, int node, bool hugepage)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990{
Mel Gormancc9a6c82012-03-21 16:34:11 -07001991 struct mempolicy *pol;
Miao Xiec0ff7452010-05-24 14:32:08 -07001992 struct page *page;
Mel Gormancc9a6c82012-03-21 16:34:11 -07001993 unsigned int cpuset_mems_cookie;
Vlastimil Babkabe97a412015-02-11 15:27:15 -08001994 struct zonelist *zl;
1995 nodemask_t *nmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996
Mel Gormancc9a6c82012-03-21 16:34:11 -07001997retry_cpuset:
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07001998 pol = get_vma_policy(vma, addr);
Mel Gormand26914d2014-04-03 14:47:24 -07001999 cpuset_mems_cookie = read_mems_allowed_begin();
Mel Gormancc9a6c82012-03-21 16:34:11 -07002000
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002001 if (pol->mode == MPOL_INTERLEAVE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 unsigned nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002003
Andi Kleen8eac5632011-02-25 14:44:28 -08002004 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002005 mpol_cond_put(pol);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002006 page = alloc_page_interleave(gfp, order, nid);
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002007 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008 }
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002009
Vlastimil Babka0867a572015-06-24 16:58:48 -07002010 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2011 int hpage_node = node;
2012
2013 /*
2014 * For hugepage allocation and non-interleave policy which
2015 * allows the current node (or other explicitly preferred
2016 * node) we only try to allocate from the current/preferred
2017 * node and don't fall back to other nodes, as the cost of
2018 * remote accesses would likely offset THP benefits.
2019 *
2020 * If the policy is interleave, or does not allow the current
2021 * node in its nodemask, we allocate the standard way.
2022 */
2023 if (pol->mode == MPOL_PREFERRED &&
2024 !(pol->flags & MPOL_F_LOCAL))
2025 hpage_node = pol->v.preferred_node;
2026
2027 nmask = policy_nodemask(gfp, pol);
2028 if (!nmask || node_isset(hpage_node, *nmask)) {
2029 mpol_cond_put(pol);
Andrea Arcangeli818e5842018-11-02 15:47:59 -07002030 /*
2031 * We cannot invoke reclaim if __GFP_THISNODE
2032 * is set. Invoking reclaim with
2033 * __GFP_THISNODE set, would cause THP
2034 * allocations to trigger heavy swapping
2035 * despite there may be tons of free memory
2036 * (including potentially plenty of THP
2037 * already available in the buddy) on all the
2038 * other NUMA nodes.
2039 *
2040 * At most we could invoke compaction when
2041 * __GFP_THISNODE is set (but we would need to
2042 * refrain from invoking reclaim even if
2043 * compaction returned COMPACT_SKIPPED because
2044 * there wasn't not enough memory to succeed
2045 * compaction). For now just avoid
2046 * __GFP_THISNODE instead of limiting the
2047 * allocation path to a strict and single
2048 * compaction invocation.
2049 *
2050 * Supposedly if direct reclaim was enabled by
2051 * the caller, the app prefers THP regardless
2052 * of the node it comes from so this would be
2053 * more desiderable behavior than only
2054 * providing THP originated from the local
2055 * node in such case.
2056 */
2057 if (!(gfp & __GFP_DIRECT_RECLAIM))
2058 gfp |= __GFP_THISNODE;
2059 page = __alloc_pages_node(hpage_node, gfp, order);
Vlastimil Babka0867a572015-06-24 16:58:48 -07002060 goto out;
2061 }
2062 }
2063
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002064 nmask = policy_nodemask(gfp, pol);
2065 zl = policy_zonelist(gfp, pol, node);
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002066 page = __alloc_pages_nodemask(gfp, order, zl, nmask);
Vlastimil Babka9b1a1ae2017-01-24 15:18:18 -08002067 mpol_cond_put(pol);
Vlastimil Babkabe97a412015-02-11 15:27:15 -08002068out:
Mel Gormand26914d2014-04-03 14:47:24 -07002069 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
Mel Gormancc9a6c82012-03-21 16:34:11 -07002070 goto retry_cpuset;
Miao Xiec0ff7452010-05-24 14:32:08 -07002071 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072}
2073
2074/**
2075 * alloc_pages_current - Allocate pages.
2076 *
2077 * @gfp:
2078 * %GFP_USER user allocation,
2079 * %GFP_KERNEL kernel allocation,
2080 * %GFP_HIGHMEM highmem allocation,
2081 * %GFP_FS don't call back into a file system.
2082 * %GFP_ATOMIC don't sleep.
2083 * @order: Power of two of allocation size in pages. 0 is a single page.
2084 *
2085 * Allocate a page from the kernel page pool. When not in
2086 * interrupt context and apply the current process NUMA policy.
2087 * Returns NULL when no page can be allocated.
2088 *
Paul Jacksoncf2a473c2006-01-08 01:01:54 -08002089 * Don't call cpuset_update_task_memory_state() unless
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090 * 1) it's ok to take cpuset_sem (can WAIT), and
2091 * 2) allocating for current task (not interrupt).
2092 */
Al Virodd0fc662005-10-07 07:46:04 +01002093struct page *alloc_pages_current(gfp_t gfp, unsigned order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094{
Oleg Nesterov8d902742014-10-09 15:27:45 -07002095 struct mempolicy *pol = &default_policy;
Miao Xiec0ff7452010-05-24 14:32:08 -07002096 struct page *page;
Mel Gormancc9a6c82012-03-21 16:34:11 -07002097 unsigned int cpuset_mems_cookie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098
Oleg Nesterov8d902742014-10-09 15:27:45 -07002099 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2100 pol = get_task_policy(current);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002101
Mel Gormancc9a6c82012-03-21 16:34:11 -07002102retry_cpuset:
Mel Gormand26914d2014-04-03 14:47:24 -07002103 cpuset_mems_cookie = read_mems_allowed_begin();
Mel Gormancc9a6c82012-03-21 16:34:11 -07002104
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002105 /*
2106 * No reference counting needed for current->mempolicy
2107 * nor system default_policy
2108 */
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002109 if (pol->mode == MPOL_INTERLEAVE)
Miao Xiec0ff7452010-05-24 14:32:08 -07002110 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2111 else
2112 page = __alloc_pages_nodemask(gfp, order,
Andi Kleen5c4b4be2011-03-04 17:36:32 -08002113 policy_zonelist(gfp, pol, numa_node_id()),
2114 policy_nodemask(gfp, pol));
Mel Gormancc9a6c82012-03-21 16:34:11 -07002115
Mel Gormand26914d2014-04-03 14:47:24 -07002116 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
Mel Gormancc9a6c82012-03-21 16:34:11 -07002117 goto retry_cpuset;
2118
Miao Xiec0ff7452010-05-24 14:32:08 -07002119 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120}
2121EXPORT_SYMBOL(alloc_pages_current);
2122
Oleg Nesterovef0855d2013-09-11 14:20:14 -07002123int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2124{
2125 struct mempolicy *pol = mpol_dup(vma_policy(src));
2126
2127 if (IS_ERR(pol))
2128 return PTR_ERR(pol);
2129 dst->vm_policy = pol;
2130 return 0;
2131}
2132
Paul Jackson42253992006-01-08 01:01:59 -08002133/*
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07002134 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
Paul Jackson42253992006-01-08 01:01:59 -08002135 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2136 * with the mems_allowed returned by cpuset_mems_allowed(). This
2137 * keeps mempolicies cpuset relative after its cpuset moves. See
2138 * further kernel/cpuset.c update_nodemask().
Miao Xie708c1bb2010-05-24 14:32:07 -07002139 *
2140 * current's mempolicy may be rebinded by the other task(the task that changes
2141 * cpuset's mems), so we needn't do rebind work for current task.
Paul Jackson42253992006-01-08 01:01:59 -08002142 */
Paul Jackson42253992006-01-08 01:01:59 -08002143
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07002144/* Slow path of a mempolicy duplicate */
2145struct mempolicy *__mpol_dup(struct mempolicy *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146{
2147 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2148
2149 if (!new)
2150 return ERR_PTR(-ENOMEM);
Miao Xie708c1bb2010-05-24 14:32:07 -07002151
2152 /* task's mempolicy is protected by alloc_lock */
2153 if (old == current->mempolicy) {
2154 task_lock(current);
2155 *new = *old;
2156 task_unlock(current);
2157 } else
2158 *new = *old;
2159
Paul Jackson42253992006-01-08 01:01:59 -08002160 if (current_cpuset_is_being_rebound()) {
2161 nodemask_t mems = cpuset_mems_allowed(current);
Miao Xie708c1bb2010-05-24 14:32:07 -07002162 if (new->flags & MPOL_F_REBINDING)
2163 mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2164 else
2165 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
Paul Jackson42253992006-01-08 01:01:59 -08002166 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 atomic_set(&new->refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168 return new;
2169}
2170
2171/* Slow path of a mempolicy comparison */
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002172bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173{
2174 if (!a || !b)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002175 return false;
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002176 if (a->mode != b->mode)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002177 return false;
Bob Liu19800502010-05-24 14:32:01 -07002178 if (a->flags != b->flags)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002179 return false;
Bob Liu19800502010-05-24 14:32:01 -07002180 if (mpol_store_user_nodemask(a))
2181 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002182 return false;
Bob Liu19800502010-05-24 14:32:01 -07002183
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002184 switch (a->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -07002185 case MPOL_BIND:
2186 /* Fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187 case MPOL_INTERLEAVE:
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002188 return !!nodes_equal(a->v.nodes, b->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 case MPOL_PREFERRED:
Yisheng Xiebe1a9d12018-03-22 16:17:02 -07002190 /* a's ->flags is the same as b's */
2191 if (a->flags & MPOL_F_LOCAL)
2192 return true;
Namhyung Kim75719662011-03-22 16:33:02 -07002193 return a->v.preferred_node == b->v.preferred_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 default:
2195 BUG();
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002196 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 }
2198}
2199
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 * Shared memory backing store policy support.
2202 *
2203 * Remember policies even when nobody has shared memory mapped.
2204 * The policies are kept in Red-Black tree linked from the inode.
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002205 * They are protected by the sp->lock rwlock, which should be held
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206 * for any accesses to the tree.
2207 */
2208
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002209/*
2210 * lookup first element intersecting start-end. Caller holds sp->lock for
2211 * reading or for writing
2212 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213static struct sp_node *
2214sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2215{
2216 struct rb_node *n = sp->root.rb_node;
2217
2218 while (n) {
2219 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2220
2221 if (start >= p->end)
2222 n = n->rb_right;
2223 else if (end <= p->start)
2224 n = n->rb_left;
2225 else
2226 break;
2227 }
2228 if (!n)
2229 return NULL;
2230 for (;;) {
2231 struct sp_node *w = NULL;
2232 struct rb_node *prev = rb_prev(n);
2233 if (!prev)
2234 break;
2235 w = rb_entry(prev, struct sp_node, nd);
2236 if (w->end <= start)
2237 break;
2238 n = prev;
2239 }
2240 return rb_entry(n, struct sp_node, nd);
2241}
2242
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002243/*
2244 * Insert a new shared policy into the list. Caller holds sp->lock for
2245 * writing.
2246 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2248{
2249 struct rb_node **p = &sp->root.rb_node;
2250 struct rb_node *parent = NULL;
2251 struct sp_node *nd;
2252
2253 while (*p) {
2254 parent = *p;
2255 nd = rb_entry(parent, struct sp_node, nd);
2256 if (new->start < nd->start)
2257 p = &(*p)->rb_left;
2258 else if (new->end > nd->end)
2259 p = &(*p)->rb_right;
2260 else
2261 BUG();
2262 }
2263 rb_link_node(&new->nd, parent, p);
2264 rb_insert_color(&new->nd, &sp->root);
Paul Mundt140d5a42007-07-15 23:38:16 -07002265 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002266 new->policy ? new->policy->mode : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267}
2268
2269/* Find shared policy intersecting idx */
2270struct mempolicy *
2271mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2272{
2273 struct mempolicy *pol = NULL;
2274 struct sp_node *sn;
2275
2276 if (!sp->root.rb_node)
2277 return NULL;
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002278 read_lock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279 sn = sp_lookup(sp, idx, idx+1);
2280 if (sn) {
2281 mpol_get(sn->policy);
2282 pol = sn->policy;
2283 }
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002284 read_unlock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 return pol;
2286}
2287
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002288static void sp_free(struct sp_node *n)
2289{
2290 mpol_put(n->policy);
2291 kmem_cache_free(sn_cache, n);
2292}
2293
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002294/**
2295 * mpol_misplaced - check whether current page node is valid in policy
2296 *
Fabian Frederickb46e14a2014-06-04 16:08:18 -07002297 * @page: page to be checked
2298 * @vma: vm area where page mapped
2299 * @addr: virtual address where page mapped
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002300 *
2301 * Lookup current policy node id for vma,addr and "compare to" page's
2302 * node id.
2303 *
2304 * Returns:
2305 * -1 - not misplaced, page is in the right node
2306 * node - node id where the page should be
2307 *
2308 * Policy determination "mimics" alloc_page_vma().
2309 * Called from fault path where we know the vma and faulting address.
2310 */
2311int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2312{
2313 struct mempolicy *pol;
Mel Gormanc33d6c02016-05-19 17:14:10 -07002314 struct zoneref *z;
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002315 int curnid = page_to_nid(page);
2316 unsigned long pgoff;
Peter Zijlstra90572892013-10-07 11:29:20 +01002317 int thiscpu = raw_smp_processor_id();
2318 int thisnid = cpu_to_node(thiscpu);
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002319 int polnid = -1;
2320 int ret = -1;
2321
2322 BUG_ON(!vma);
2323
Oleg Nesterovdd6eecb2014-10-09 15:27:57 -07002324 pol = get_vma_policy(vma, addr);
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002325 if (!(pol->flags & MPOL_F_MOF))
2326 goto out;
2327
2328 switch (pol->mode) {
2329 case MPOL_INTERLEAVE:
2330 BUG_ON(addr >= vma->vm_end);
2331 BUG_ON(addr < vma->vm_start);
2332
2333 pgoff = vma->vm_pgoff;
2334 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2335 polnid = offset_il_node(pol, vma, pgoff);
2336 break;
2337
2338 case MPOL_PREFERRED:
2339 if (pol->flags & MPOL_F_LOCAL)
2340 polnid = numa_node_id();
2341 else
2342 polnid = pol->v.preferred_node;
2343 break;
2344
2345 case MPOL_BIND:
Mel Gormanc33d6c02016-05-19 17:14:10 -07002346
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002347 /*
2348 * allows binding to multiple nodes.
2349 * use current page if in policy nodemask,
2350 * else select nearest allowed node, if any.
2351 * If no allowed nodes, use current [!misplaced].
2352 */
2353 if (node_isset(curnid, pol->v.nodes))
2354 goto out;
Mel Gormanc33d6c02016-05-19 17:14:10 -07002355 z = first_zones_zonelist(
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002356 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2357 gfp_zone(GFP_HIGHUSER),
Mel Gormanc33d6c02016-05-19 17:14:10 -07002358 &pol->v.nodes);
2359 polnid = z->zone->node;
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002360 break;
2361
2362 default:
2363 BUG();
2364 }
Mel Gorman5606e382012-11-02 18:19:13 +00002365
2366 /* Migrate the page towards the node whose CPU is referencing it */
Mel Gormane42c8ff2012-11-12 09:17:07 +00002367 if (pol->flags & MPOL_F_MORON) {
Peter Zijlstra90572892013-10-07 11:29:20 +01002368 polnid = thisnid;
Mel Gorman5606e382012-11-02 18:19:13 +00002369
Rik van Riel10f39042014-01-27 17:03:44 -05002370 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
Rik van Rielde1c9ce2013-10-07 11:29:39 +01002371 goto out;
Mel Gormane42c8ff2012-11-12 09:17:07 +00002372 }
2373
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002374 if (curnid != polnid)
2375 ret = polnid;
2376out:
2377 mpol_cond_put(pol);
2378
2379 return ret;
2380}
2381
David Rientjesc11600e2016-09-01 16:15:07 -07002382/*
2383 * Drop the (possibly final) reference to task->mempolicy. It needs to be
2384 * dropped after task->mempolicy is set to NULL so that any allocation done as
2385 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2386 * policy.
2387 */
2388void mpol_put_task_policy(struct task_struct *task)
2389{
2390 struct mempolicy *pol;
2391
2392 task_lock(task);
2393 pol = task->mempolicy;
2394 task->mempolicy = NULL;
2395 task_unlock(task);
2396 mpol_put(pol);
2397}
2398
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2400{
Paul Mundt140d5a42007-07-15 23:38:16 -07002401 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402 rb_erase(&n->nd, &sp->root);
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002403 sp_free(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404}
2405
Mel Gorman42288fe2012-12-21 23:10:25 +00002406static void sp_node_init(struct sp_node *node, unsigned long start,
2407 unsigned long end, struct mempolicy *pol)
2408{
2409 node->start = start;
2410 node->end = end;
2411 node->policy = pol;
2412}
2413
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07002414static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2415 struct mempolicy *pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416{
KOSAKI Motohiro869833f22012-10-08 16:29:16 -07002417 struct sp_node *n;
2418 struct mempolicy *newpol;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419
KOSAKI Motohiro869833f22012-10-08 16:29:16 -07002420 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421 if (!n)
2422 return NULL;
KOSAKI Motohiro869833f22012-10-08 16:29:16 -07002423
2424 newpol = mpol_dup(pol);
2425 if (IS_ERR(newpol)) {
2426 kmem_cache_free(sn_cache, n);
2427 return NULL;
2428 }
2429 newpol->flags |= MPOL_F_SHARED;
Mel Gorman42288fe2012-12-21 23:10:25 +00002430 sp_node_init(n, start, end, newpol);
KOSAKI Motohiro869833f22012-10-08 16:29:16 -07002431
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432 return n;
2433}
2434
2435/* Replace a policy range. */
2436static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2437 unsigned long end, struct sp_node *new)
2438{
Mel Gormanb22d1272012-10-08 16:29:17 -07002439 struct sp_node *n;
Mel Gorman42288fe2012-12-21 23:10:25 +00002440 struct sp_node *n_new = NULL;
2441 struct mempolicy *mpol_new = NULL;
Mel Gormanb22d1272012-10-08 16:29:17 -07002442 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443
Mel Gorman42288fe2012-12-21 23:10:25 +00002444restart:
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002445 write_lock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 n = sp_lookup(sp, start, end);
2447 /* Take care of old policies in the same range. */
2448 while (n && n->start < end) {
2449 struct rb_node *next = rb_next(&n->nd);
2450 if (n->start >= start) {
2451 if (n->end <= end)
2452 sp_delete(sp, n);
2453 else
2454 n->start = end;
2455 } else {
2456 /* Old policy spanning whole new range. */
2457 if (n->end > end) {
Mel Gorman42288fe2012-12-21 23:10:25 +00002458 if (!n_new)
2459 goto alloc_new;
2460
2461 *mpol_new = *n->policy;
2462 atomic_set(&mpol_new->refcnt, 1);
KOSAKI Motohiro78806392013-03-08 12:43:29 -08002463 sp_node_init(n_new, end, n->end, mpol_new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464 n->end = start;
Hillf Danton5ca39572013-03-08 12:43:28 -08002465 sp_insert(sp, n_new);
Mel Gorman42288fe2012-12-21 23:10:25 +00002466 n_new = NULL;
2467 mpol_new = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468 break;
2469 } else
2470 n->end = start;
2471 }
2472 if (!next)
2473 break;
2474 n = rb_entry(next, struct sp_node, nd);
2475 }
2476 if (new)
2477 sp_insert(sp, new);
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002478 write_unlock(&sp->lock);
Mel Gorman42288fe2012-12-21 23:10:25 +00002479 ret = 0;
2480
2481err_out:
2482 if (mpol_new)
2483 mpol_put(mpol_new);
2484 if (n_new)
2485 kmem_cache_free(sn_cache, n_new);
2486
Mel Gormanb22d1272012-10-08 16:29:17 -07002487 return ret;
Mel Gorman42288fe2012-12-21 23:10:25 +00002488
2489alloc_new:
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002490 write_unlock(&sp->lock);
Mel Gorman42288fe2012-12-21 23:10:25 +00002491 ret = -ENOMEM;
2492 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2493 if (!n_new)
2494 goto err_out;
2495 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2496 if (!mpol_new)
2497 goto err_out;
2498 goto restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499}
2500
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002501/**
2502 * mpol_shared_policy_init - initialize shared policy for inode
2503 * @sp: pointer to inode shared policy
2504 * @mpol: struct mempolicy to install
2505 *
2506 * Install non-NULL @mpol in inode's shared policy rb-tree.
2507 * On entry, the current task has a reference on a non-NULL @mpol.
2508 * This must be released on exit.
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002509 * This is called at get_inode() calls and we can use GFP_KERNEL.
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002510 */
2511void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
Robin Holt7339ff82006-01-14 13:20:48 -08002512{
Miao Xie58568d22009-06-16 15:31:49 -07002513 int ret;
2514
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002515 sp->root = RB_ROOT; /* empty tree == default mempolicy */
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002516 rwlock_init(&sp->lock);
Robin Holt7339ff82006-01-14 13:20:48 -08002517
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002518 if (mpol) {
2519 struct vm_area_struct pvma;
2520 struct mempolicy *new;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002521 NODEMASK_SCRATCH(scratch);
Robin Holt7339ff82006-01-14 13:20:48 -08002522
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002523 if (!scratch)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002524 goto put_mpol;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002525 /* contextualize the tmpfs mount point mempolicy */
2526 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002527 if (IS_ERR(new))
Dan Carpenter0cae3452010-05-25 23:42:58 -07002528 goto free_scratch; /* no valid nodemask intersection */
Miao Xie58568d22009-06-16 15:31:49 -07002529
2530 task_lock(current);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002531 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
Miao Xie58568d22009-06-16 15:31:49 -07002532 task_unlock(current);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002533 if (ret)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002534 goto put_new;
Robin Holt7339ff82006-01-14 13:20:48 -08002535
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002536 /* Create pseudo-vma that contains just the policy */
2537 memset(&pvma, 0, sizeof(struct vm_area_struct));
2538 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2539 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002540
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002541put_new:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002542 mpol_put(new); /* drop initial ref */
Dan Carpenter0cae3452010-05-25 23:42:58 -07002543free_scratch:
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002544 NODEMASK_SCRATCH_FREE(scratch);
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002545put_mpol:
2546 mpol_put(mpol); /* drop our incoming ref on sb mpol */
Robin Holt7339ff82006-01-14 13:20:48 -08002547 }
2548}
2549
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550int mpol_set_shared_policy(struct shared_policy *info,
2551 struct vm_area_struct *vma, struct mempolicy *npol)
2552{
2553 int err;
2554 struct sp_node *new = NULL;
2555 unsigned long sz = vma_pages(vma);
2556
David Rientjes028fec42008-04-28 02:12:25 -07002557 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558 vma->vm_pgoff,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002559 sz, npol ? npol->mode : -1,
David Rientjes028fec42008-04-28 02:12:25 -07002560 npol ? npol->flags : -1,
David Rientjes00ef2d22013-02-22 16:35:36 -08002561 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562
2563 if (npol) {
2564 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2565 if (!new)
2566 return -ENOMEM;
2567 }
2568 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2569 if (err && new)
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002570 sp_free(new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571 return err;
2572}
2573
2574/* Free a backing policy store on inode delete. */
2575void mpol_free_shared_policy(struct shared_policy *p)
2576{
2577 struct sp_node *n;
2578 struct rb_node *next;
2579
2580 if (!p->root.rb_node)
2581 return;
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002582 write_lock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583 next = rb_first(&p->root);
2584 while (next) {
2585 n = rb_entry(next, struct sp_node, nd);
2586 next = rb_next(&n->nd);
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002587 sp_delete(p, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588 }
Nathan Zimmer4a8c7bb2016-01-14 15:18:36 -08002589 write_unlock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590}
2591
Mel Gorman1a687c22012-11-22 11:16:36 +00002592#ifdef CONFIG_NUMA_BALANCING
Mel Gormanc2976632014-01-29 14:05:42 -08002593static int __initdata numabalancing_override;
Mel Gorman1a687c22012-11-22 11:16:36 +00002594
2595static void __init check_numabalancing_enable(void)
2596{
2597 bool numabalancing_default = false;
2598
2599 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2600 numabalancing_default = true;
2601
Mel Gormanc2976632014-01-29 14:05:42 -08002602 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2603 if (numabalancing_override)
2604 set_numabalancing_state(numabalancing_override == 1);
2605
Mel Gormanb0dc2b92015-05-14 15:17:09 -07002606 if (num_online_nodes() > 1 && !numabalancing_override) {
Joe Perches756a025f02016-03-17 14:19:47 -07002607 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
Mel Gormanc2976632014-01-29 14:05:42 -08002608 numabalancing_default ? "Enabling" : "Disabling");
Mel Gorman1a687c22012-11-22 11:16:36 +00002609 set_numabalancing_state(numabalancing_default);
2610 }
2611}
2612
2613static int __init setup_numabalancing(char *str)
2614{
2615 int ret = 0;
2616 if (!str)
2617 goto out;
Mel Gorman1a687c22012-11-22 11:16:36 +00002618
2619 if (!strcmp(str, "enable")) {
Mel Gormanc2976632014-01-29 14:05:42 -08002620 numabalancing_override = 1;
Mel Gorman1a687c22012-11-22 11:16:36 +00002621 ret = 1;
2622 } else if (!strcmp(str, "disable")) {
Mel Gormanc2976632014-01-29 14:05:42 -08002623 numabalancing_override = -1;
Mel Gorman1a687c22012-11-22 11:16:36 +00002624 ret = 1;
2625 }
2626out:
2627 if (!ret)
Andrew Morton4a404be2014-01-29 14:05:43 -08002628 pr_warn("Unable to parse numa_balancing=\n");
Mel Gorman1a687c22012-11-22 11:16:36 +00002629
2630 return ret;
2631}
2632__setup("numa_balancing=", setup_numabalancing);
2633#else
2634static inline void __init check_numabalancing_enable(void)
2635{
2636}
2637#endif /* CONFIG_NUMA_BALANCING */
2638
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639/* assumes fs == KERNEL_DS */
2640void __init numa_policy_init(void)
2641{
Paul Mundtb71636e2007-07-15 23:38:15 -07002642 nodemask_t interleave_nodes;
2643 unsigned long largest = 0;
2644 int nid, prefer = 0;
2645
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646 policy_cache = kmem_cache_create("numa_policy",
2647 sizeof(struct mempolicy),
Paul Mundt20c2df82007-07-20 10:11:58 +09002648 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002649
2650 sn_cache = kmem_cache_create("shared_policy_node",
2651 sizeof(struct sp_node),
Paul Mundt20c2df82007-07-20 10:11:58 +09002652 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653
Mel Gorman5606e382012-11-02 18:19:13 +00002654 for_each_node(nid) {
2655 preferred_node_policy[nid] = (struct mempolicy) {
2656 .refcnt = ATOMIC_INIT(1),
2657 .mode = MPOL_PREFERRED,
2658 .flags = MPOL_F_MOF | MPOL_F_MORON,
2659 .v = { .preferred_node = nid, },
2660 };
2661 }
2662
Paul Mundtb71636e2007-07-15 23:38:15 -07002663 /*
2664 * Set interleaving policy for system init. Interleaving is only
2665 * enabled across suitably sized nodes (default is >= 16MB), or
2666 * fall back to the largest node if they're all smaller.
2667 */
2668 nodes_clear(interleave_nodes);
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002669 for_each_node_state(nid, N_MEMORY) {
Paul Mundtb71636e2007-07-15 23:38:15 -07002670 unsigned long total_pages = node_present_pages(nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671
Paul Mundtb71636e2007-07-15 23:38:15 -07002672 /* Preserve the largest node */
2673 if (largest < total_pages) {
2674 largest = total_pages;
2675 prefer = nid;
2676 }
2677
2678 /* Interleave this node? */
2679 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2680 node_set(nid, interleave_nodes);
2681 }
2682
2683 /* All too small, use the largest */
2684 if (unlikely(nodes_empty(interleave_nodes)))
2685 node_set(prefer, interleave_nodes);
2686
David Rientjes028fec42008-04-28 02:12:25 -07002687 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -07002688 pr_err("%s: interleaving failed\n", __func__);
Mel Gorman1a687c22012-11-22 11:16:36 +00002689
2690 check_numabalancing_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691}
2692
Christoph Lameter8bccd852005-10-29 18:16:59 -07002693/* Reset policy of current process to default */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694void numa_default_policy(void)
2695{
David Rientjes028fec42008-04-28 02:12:25 -07002696 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697}
Paul Jackson68860ec2005-10-30 15:02:36 -08002698
Paul Jackson42253992006-01-08 01:01:59 -08002699/*
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002700 * Parse and format mempolicy from/to strings
2701 */
2702
2703/*
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002704 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002705 */
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002706static const char * const policy_modes[] =
2707{
2708 [MPOL_DEFAULT] = "default",
2709 [MPOL_PREFERRED] = "prefer",
2710 [MPOL_BIND] = "bind",
2711 [MPOL_INTERLEAVE] = "interleave",
Lee Schermerhornd3a71032012-10-25 14:16:29 +02002712 [MPOL_LOCAL] = "local",
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002713};
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002714
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002715
2716#ifdef CONFIG_TMPFS
2717/**
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002718 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002719 * @str: string containing mempolicy to parse
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002720 * @mpol: pointer to struct mempolicy pointer, returned on success.
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002721 *
2722 * Format of input:
2723 * <mode>[=<flags>][:<nodelist>]
2724 *
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002725 * On success, returns 0, else 1
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002726 */
Hugh Dickinsa7a88b22013-01-02 02:04:23 -08002727int mpol_parse_str(char *str, struct mempolicy **mpol)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002728{
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002729 struct mempolicy *new = NULL;
Lee Schermerhornb4652e82010-05-24 14:32:03 -07002730 unsigned short mode;
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002731 unsigned short mode_flags;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002732 nodemask_t nodes;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002733 char *nodelist = strchr(str, ':');
2734 char *flags = strchr(str, '=');
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002735 int err = 1;
2736
2737 if (nodelist) {
2738 /* NUL-terminate mode or flags string */
2739 *nodelist++ = '\0';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002740 if (nodelist_parse(nodelist, nodes))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002741 goto out;
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002742 if (!nodes_subset(nodes, node_states[N_MEMORY]))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002743 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002744 } else
2745 nodes_clear(nodes);
2746
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002747 if (flags)
2748 *flags++ = '\0'; /* terminate mode string */
2749
Peter Zijlstra479e2802012-10-25 14:16:28 +02002750 for (mode = 0; mode < MPOL_MAX; mode++) {
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002751 if (!strcmp(str, policy_modes[mode])) {
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002752 break;
2753 }
2754 }
Mel Gormana7200942012-11-16 09:37:58 +00002755 if (mode >= MPOL_MAX)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002756 goto out;
2757
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002758 switch (mode) {
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002759 case MPOL_PREFERRED:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002760 /*
2761 * Insist on a nodelist of one node only
2762 */
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002763 if (nodelist) {
2764 char *rest = nodelist;
2765 while (isdigit(*rest))
2766 rest++;
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002767 if (*rest)
2768 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002769 }
2770 break;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002771 case MPOL_INTERLEAVE:
2772 /*
2773 * Default to online nodes with memory if no nodelist
2774 */
2775 if (!nodelist)
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002776 nodes = node_states[N_MEMORY];
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002777 break;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002778 case MPOL_LOCAL:
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002779 /*
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002780 * Don't allow a nodelist; mpol_new() checks flags
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002781 */
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002782 if (nodelist)
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002783 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002784 mode = MPOL_PREFERRED;
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002785 break;
Ravikiran G Thirumalai413b43d2010-03-23 13:35:28 -07002786 case MPOL_DEFAULT:
2787 /*
2788 * Insist on a empty nodelist
2789 */
2790 if (!nodelist)
2791 err = 0;
2792 goto out;
KOSAKI Motohirod69b2e62010-03-23 13:35:30 -07002793 case MPOL_BIND:
2794 /*
2795 * Insist on a nodelist
2796 */
2797 if (!nodelist)
2798 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002799 }
2800
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002801 mode_flags = 0;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002802 if (flags) {
2803 /*
2804 * Currently, we only support two mutually exclusive
2805 * mode flags.
2806 */
2807 if (!strcmp(flags, "static"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002808 mode_flags |= MPOL_F_STATIC_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002809 else if (!strcmp(flags, "relative"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002810 mode_flags |= MPOL_F_RELATIVE_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002811 else
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002812 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002813 }
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002814
2815 new = mpol_new(mode, mode_flags, &nodes);
2816 if (IS_ERR(new))
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002817 goto out;
2818
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002819 /*
2820 * Save nodes for mpol_to_str() to show the tmpfs mount options
2821 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2822 */
2823 if (mode != MPOL_PREFERRED)
2824 new->v.nodes = nodes;
2825 else if (nodelist)
2826 new->v.preferred_node = first_node(nodes);
2827 else
2828 new->flags |= MPOL_F_LOCAL;
2829
2830 /*
2831 * Save nodes for contextualization: this will be used to "clone"
2832 * the mempolicy in a specific context [cpuset] at a later time.
2833 */
2834 new->w.user_nodemask = nodes;
2835
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002836 err = 0;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002837
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002838out:
2839 /* Restore string for error message */
2840 if (nodelist)
2841 *--nodelist = ':';
2842 if (flags)
2843 *--flags = '=';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002844 if (!err)
2845 *mpol = new;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002846 return err;
2847}
2848#endif /* CONFIG_TMPFS */
2849
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002850/**
2851 * mpol_to_str - format a mempolicy structure for printing
2852 * @buffer: to contain formatted mempolicy string
2853 * @maxlen: length of @buffer
2854 * @pol: pointer to mempolicy to be formatted
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002855 *
David Rientjes948927e2013-11-12 15:07:28 -08002856 * Convert @pol into a string. If @buffer is too short, truncate the string.
2857 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2858 * longest flag, "relative", and to display at least a few node ids.
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002859 */
David Rientjes948927e2013-11-12 15:07:28 -08002860void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002861{
2862 char *p = buffer;
David Rientjes948927e2013-11-12 15:07:28 -08002863 nodemask_t nodes = NODE_MASK_NONE;
2864 unsigned short mode = MPOL_DEFAULT;
2865 unsigned short flags = 0;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002866
David Rientjes8790c712014-01-30 15:46:08 -08002867 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
Lee Schermerhornbea904d2008-04-28 02:13:18 -07002868 mode = pol->mode;
David Rientjes948927e2013-11-12 15:07:28 -08002869 flags = pol->flags;
2870 }
Lee Schermerhornbea904d2008-04-28 02:13:18 -07002871
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002872 switch (mode) {
2873 case MPOL_DEFAULT:
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002874 break;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002875 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002876 if (flags & MPOL_F_LOCAL)
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002877 mode = MPOL_LOCAL;
Lee Schermerhorn53f25562008-04-28 02:13:20 -07002878 else
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002879 node_set(pol->v.preferred_node, nodes);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002880 break;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002881 case MPOL_BIND:
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002882 case MPOL_INTERLEAVE:
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002883 nodes = pol->v.nodes;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002884 break;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002885 default:
David Rientjes948927e2013-11-12 15:07:28 -08002886 WARN_ON_ONCE(1);
2887 snprintf(p, maxlen, "unknown");
2888 return;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002889 }
2890
David Rientjesb7a9f422013-11-21 14:32:06 -08002891 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002892
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002893 if (flags & MPOL_MODE_FLAGS) {
David Rientjes948927e2013-11-12 15:07:28 -08002894 p += snprintf(p, buffer + maxlen - p, "=");
David Rientjesf5b087b2008-04-28 02:12:27 -07002895
Lee Schermerhorn22919902008-04-28 02:13:22 -07002896 /*
2897 * Currently, the only defined flags are mutually exclusive
2898 */
David Rientjesf5b087b2008-04-28 02:12:27 -07002899 if (flags & MPOL_F_STATIC_NODES)
Lee Schermerhorn22919902008-04-28 02:13:22 -07002900 p += snprintf(p, buffer + maxlen - p, "static");
2901 else if (flags & MPOL_F_RELATIVE_NODES)
2902 p += snprintf(p, buffer + maxlen - p, "relative");
David Rientjesf5b087b2008-04-28 02:12:27 -07002903 }
2904
Tejun Heo9e763e02015-02-13 14:38:02 -08002905 if (!nodes_empty(nodes))
2906 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
2907 nodemask_pr_args(&nodes));
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002908}