blob: 4378c334e89b5ddba3ce2868e56f80eb506f6a11 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
Christoph Lameter8bccd852005-10-29 18:16:59 -07005 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
Christoph Lameter8bccd852005-10-29 18:16:59 -070021 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
Christoph Lameter8bccd852005-10-29 18:16:59 -070024 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 * preferred Try a specific node first before normal fallback.
David Rientjes00ef2d22013-02-22 16:35:36 -080029 * As a special case NUMA_NO_NODE here means do the allocation
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
Christoph Lameter8bccd852005-10-29 18:16:59 -070033 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
Linus Torvalds1da177e2005-04-16 15:20:36 -070066*/
67
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -070068#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#include <linux/mempolicy.h>
71#include <linux/mm.h>
72#include <linux/highmem.h>
73#include <linux/hugetlb.h>
74#include <linux/kernel.h>
75#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#include <linux/nodemask.h>
77#include <linux/cpuset.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#include <linux/slab.h>
79#include <linux/string.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040080#include <linux/export.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070081#include <linux/nsproxy.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070082#include <linux/interrupt.h>
83#include <linux/init.h>
84#include <linux/compat.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080085#include <linux/swap.h>
Christoph Lameter1a75a6c2006-01-08 01:01:02 -080086#include <linux/seq_file.h>
87#include <linux/proc_fs.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080088#include <linux/migrate.h>
Hugh Dickins62b61f62009-12-14 17:59:33 -080089#include <linux/ksm.h>
Christoph Lameter95a402c2006-06-23 02:03:53 -070090#include <linux/rmap.h>
David Quigley86c3a762006-06-23 02:04:02 -070091#include <linux/security.h>
Adrian Bunkdbcb0f12007-10-16 01:26:26 -070092#include <linux/syscalls.h>
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -070093#include <linux/ctype.h>
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -080094#include <linux/mm_inline.h>
Lee Schermerhornb24f53a2012-10-25 14:16:32 +020095#include <linux/mmu_notifier.h>
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -070096#include <linux/printk.h>
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -080097
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#include <asm/tlbflush.h>
99#include <asm/uaccess.h>
Michal Hocko778d3b02011-07-26 16:08:30 -0700100#include <linux/random.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
Nick Piggin62695a82008-10-18 20:26:09 -0700102#include "internal.h"
103
Christoph Lameter38e35862006-01-08 01:01:01 -0800104/* Internal flags */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800105#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
Christoph Lameter38e35862006-01-08 01:01:01 -0800106#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800107
Pekka Enbergfcc234f2006-03-22 00:08:13 -0800108static struct kmem_cache *policy_cache;
109static struct kmem_cache *sn_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111/* Highest zone. An specific allocation for a zone below that is not
112 policied. */
Christoph Lameter62672762007-02-10 01:43:07 -0800113enum zone_type policy_zone = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700115/*
116 * run-time system-wide default policy => local allocation
117 */
H Hartley Sweetene754d792011-10-31 17:09:23 -0700118static struct mempolicy default_policy = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 .refcnt = ATOMIC_INIT(1), /* never free it */
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700120 .mode = MPOL_PREFERRED,
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700121 .flags = MPOL_F_LOCAL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122};
123
Mel Gorman5606e382012-11-02 18:19:13 +0000124static struct mempolicy preferred_node_policy[MAX_NUMNODES];
125
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -0700126struct mempolicy *get_task_policy(struct task_struct *p)
Mel Gorman5606e382012-11-02 18:19:13 +0000127{
128 struct mempolicy *pol = p->mempolicy;
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700129 int node;
Mel Gorman5606e382012-11-02 18:19:13 +0000130
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700131 if (pol)
132 return pol;
Mel Gorman5606e382012-11-02 18:19:13 +0000133
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700134 node = numa_node_id();
135 if (node != NUMA_NO_NODE) {
136 pol = &preferred_node_policy[node];
137 /* preferred_node_policy is not initialised early in boot */
138 if (pol->mode)
139 return pol;
Mel Gorman5606e382012-11-02 18:19:13 +0000140 }
141
Oleg Nesterovf15ca782014-10-09 15:27:43 -0700142 return &default_policy;
Mel Gorman5606e382012-11-02 18:19:13 +0000143}
144
David Rientjes37012942008-04-28 02:12:33 -0700145static const struct mempolicy_operations {
146 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
Miao Xie708c1bb2010-05-24 14:32:07 -0700147 /*
148 * If read-side task has no lock to protect task->mempolicy, write-side
149 * task will rebind the task->mempolicy by two step. The first step is
150 * setting all the newly nodes, and the second step is cleaning all the
151 * disallowed nodes. In this way, we can avoid finding no node to alloc
152 * page.
153 * If we have a lock to protect task->mempolicy in read-side, we do
154 * rebind directly.
155 *
156 * step:
157 * MPOL_REBIND_ONCE - do rebind work at once
158 * MPOL_REBIND_STEP1 - set all the newly nodes
159 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
160 */
161 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
162 enum mpol_rebind_step step);
David Rientjes37012942008-04-28 02:12:33 -0700163} mpol_ops[MPOL_MAX];
164
Mel Gorman19770b32008-04-28 02:12:18 -0700165/* Check that the nodemask contains at least one populated zone */
David Rientjes37012942008-04-28 02:12:33 -0700166static int is_valid_nodemask(const nodemask_t *nodemask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
Lai Jiangshand3eb1572013-02-22 16:33:22 -0800168 return nodes_intersects(*nodemask, node_states[N_MEMORY]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169}
170
David Rientjesf5b087b2008-04-28 02:12:27 -0700171static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
172{
Bob Liu6d556292010-05-24 14:31:59 -0700173 return pol->flags & MPOL_MODE_FLAGS;
David Rientjes4c50bc02008-04-28 02:12:30 -0700174}
175
176static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
177 const nodemask_t *rel)
178{
179 nodemask_t tmp;
180 nodes_fold(tmp, *orig, nodes_weight(*rel));
181 nodes_onto(*ret, tmp, *rel);
David Rientjesf5b087b2008-04-28 02:12:27 -0700182}
183
David Rientjes37012942008-04-28 02:12:33 -0700184static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
185{
186 if (nodes_empty(*nodes))
187 return -EINVAL;
188 pol->v.nodes = *nodes;
189 return 0;
190}
191
192static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
193{
194 if (!nodes)
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700195 pol->flags |= MPOL_F_LOCAL; /* local allocation */
David Rientjes37012942008-04-28 02:12:33 -0700196 else if (nodes_empty(*nodes))
197 return -EINVAL; /* no allowed nodes */
198 else
199 pol->v.preferred_node = first_node(*nodes);
200 return 0;
201}
202
203static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
204{
205 if (!is_valid_nodemask(nodes))
206 return -EINVAL;
207 pol->v.nodes = *nodes;
208 return 0;
209}
210
Miao Xie58568d22009-06-16 15:31:49 -0700211/*
212 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
213 * any, for the new policy. mpol_new() has already validated the nodes
214 * parameter with respect to the policy mode and flags. But, we need to
215 * handle an empty nodemask with MPOL_PREFERRED here.
216 *
217 * Must be called holding task's alloc_lock to protect task's mems_allowed
218 * and mempolicy. May also be called holding the mmap_semaphore for write.
219 */
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700220static int mpol_set_nodemask(struct mempolicy *pol,
221 const nodemask_t *nodes, struct nodemask_scratch *nsc)
Miao Xie58568d22009-06-16 15:31:49 -0700222{
Miao Xie58568d22009-06-16 15:31:49 -0700223 int ret;
224
225 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
226 if (pol == NULL)
227 return 0;
Lai Jiangshan01f13bd2012-12-12 13:51:33 -0800228 /* Check N_MEMORY */
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700229 nodes_and(nsc->mask1,
Lai Jiangshan01f13bd2012-12-12 13:51:33 -0800230 cpuset_current_mems_allowed, node_states[N_MEMORY]);
Miao Xie58568d22009-06-16 15:31:49 -0700231
232 VM_BUG_ON(!nodes);
233 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
234 nodes = NULL; /* explicit local allocation */
235 else {
236 if (pol->flags & MPOL_F_RELATIVE_NODES)
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700237 mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
Miao Xie58568d22009-06-16 15:31:49 -0700238 else
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700239 nodes_and(nsc->mask2, *nodes, nsc->mask1);
240
Miao Xie58568d22009-06-16 15:31:49 -0700241 if (mpol_store_user_nodemask(pol))
242 pol->w.user_nodemask = *nodes;
243 else
244 pol->w.cpuset_mems_allowed =
245 cpuset_current_mems_allowed;
246 }
247
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700248 if (nodes)
249 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
250 else
251 ret = mpol_ops[pol->mode].create(pol, NULL);
Miao Xie58568d22009-06-16 15:31:49 -0700252 return ret;
253}
254
255/*
256 * This function just creates a new policy, does some check and simple
257 * initialization. You must invoke mpol_set_nodemask() to set nodes.
258 */
David Rientjes028fec42008-04-28 02:12:25 -0700259static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
260 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261{
262 struct mempolicy *policy;
263
David Rientjes028fec42008-04-28 02:12:25 -0700264 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
David Rientjes00ef2d22013-02-22 16:35:36 -0800265 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
Paul Mundt140d5a42007-07-15 23:38:16 -0700266
David Rientjes3e1f0642008-04-28 02:12:34 -0700267 if (mode == MPOL_DEFAULT) {
268 if (nodes && !nodes_empty(*nodes))
David Rientjes37012942008-04-28 02:12:33 -0700269 return ERR_PTR(-EINVAL);
Lee Schermerhornd3a71032012-10-25 14:16:29 +0200270 return NULL;
David Rientjes37012942008-04-28 02:12:33 -0700271 }
David Rientjes3e1f0642008-04-28 02:12:34 -0700272 VM_BUG_ON(!nodes);
273
274 /*
275 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
276 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
277 * All other modes require a valid pointer to a non-empty nodemask.
278 */
279 if (mode == MPOL_PREFERRED) {
280 if (nodes_empty(*nodes)) {
281 if (((flags & MPOL_F_STATIC_NODES) ||
282 (flags & MPOL_F_RELATIVE_NODES)))
283 return ERR_PTR(-EINVAL);
David Rientjes3e1f0642008-04-28 02:12:34 -0700284 }
Peter Zijlstra479e2802012-10-25 14:16:28 +0200285 } else if (mode == MPOL_LOCAL) {
286 if (!nodes_empty(*nodes))
287 return ERR_PTR(-EINVAL);
288 mode = MPOL_PREFERRED;
David Rientjes3e1f0642008-04-28 02:12:34 -0700289 } else if (nodes_empty(*nodes))
290 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
292 if (!policy)
293 return ERR_PTR(-ENOMEM);
294 atomic_set(&policy->refcnt, 1);
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700295 policy->mode = mode;
David Rientjes3e1f0642008-04-28 02:12:34 -0700296 policy->flags = flags;
David Rientjesf5b087b2008-04-28 02:12:27 -0700297
David Rientjes37012942008-04-28 02:12:33 -0700298 return policy;
299}
300
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700301/* Slow path of a mpol destructor. */
302void __mpol_put(struct mempolicy *p)
303{
304 if (!atomic_dec_and_test(&p->refcnt))
305 return;
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700306 kmem_cache_free(policy_cache, p);
307}
308
Miao Xie708c1bb2010-05-24 14:32:07 -0700309static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
310 enum mpol_rebind_step step)
David Rientjes37012942008-04-28 02:12:33 -0700311{
312}
313
Miao Xie708c1bb2010-05-24 14:32:07 -0700314/*
315 * step:
316 * MPOL_REBIND_ONCE - do rebind work at once
317 * MPOL_REBIND_STEP1 - set all the newly nodes
318 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
319 */
320static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
321 enum mpol_rebind_step step)
David Rientjes37012942008-04-28 02:12:33 -0700322{
323 nodemask_t tmp;
324
325 if (pol->flags & MPOL_F_STATIC_NODES)
326 nodes_and(tmp, pol->w.user_nodemask, *nodes);
327 else if (pol->flags & MPOL_F_RELATIVE_NODES)
328 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
329 else {
Miao Xie708c1bb2010-05-24 14:32:07 -0700330 /*
331 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
332 * result
333 */
334 if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
335 nodes_remap(tmp, pol->v.nodes,
336 pol->w.cpuset_mems_allowed, *nodes);
337 pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
338 } else if (step == MPOL_REBIND_STEP2) {
339 tmp = pol->w.cpuset_mems_allowed;
340 pol->w.cpuset_mems_allowed = *nodes;
341 } else
342 BUG();
David Rientjes37012942008-04-28 02:12:33 -0700343 }
344
Miao Xie708c1bb2010-05-24 14:32:07 -0700345 if (nodes_empty(tmp))
346 tmp = *nodes;
347
348 if (step == MPOL_REBIND_STEP1)
349 nodes_or(pol->v.nodes, pol->v.nodes, tmp);
350 else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
351 pol->v.nodes = tmp;
352 else
353 BUG();
354
David Rientjes37012942008-04-28 02:12:33 -0700355 if (!node_isset(current->il_next, tmp)) {
356 current->il_next = next_node(current->il_next, tmp);
357 if (current->il_next >= MAX_NUMNODES)
358 current->il_next = first_node(tmp);
359 if (current->il_next >= MAX_NUMNODES)
360 current->il_next = numa_node_id();
361 }
362}
363
364static void mpol_rebind_preferred(struct mempolicy *pol,
Miao Xie708c1bb2010-05-24 14:32:07 -0700365 const nodemask_t *nodes,
366 enum mpol_rebind_step step)
David Rientjes37012942008-04-28 02:12:33 -0700367{
368 nodemask_t tmp;
369
David Rientjes37012942008-04-28 02:12:33 -0700370 if (pol->flags & MPOL_F_STATIC_NODES) {
371 int node = first_node(pol->w.user_nodemask);
372
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700373 if (node_isset(node, *nodes)) {
David Rientjes37012942008-04-28 02:12:33 -0700374 pol->v.preferred_node = node;
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700375 pol->flags &= ~MPOL_F_LOCAL;
376 } else
377 pol->flags |= MPOL_F_LOCAL;
David Rientjes37012942008-04-28 02:12:33 -0700378 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
379 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
380 pol->v.preferred_node = first_node(tmp);
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700381 } else if (!(pol->flags & MPOL_F_LOCAL)) {
David Rientjes37012942008-04-28 02:12:33 -0700382 pol->v.preferred_node = node_remap(pol->v.preferred_node,
383 pol->w.cpuset_mems_allowed,
384 *nodes);
385 pol->w.cpuset_mems_allowed = *nodes;
386 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387}
388
Miao Xie708c1bb2010-05-24 14:32:07 -0700389/*
390 * mpol_rebind_policy - Migrate a policy to a different set of nodes
391 *
392 * If read-side task has no lock to protect task->mempolicy, write-side
393 * task will rebind the task->mempolicy by two step. The first step is
394 * setting all the newly nodes, and the second step is cleaning all the
395 * disallowed nodes. In this way, we can avoid finding no node to alloc
396 * page.
397 * If we have a lock to protect task->mempolicy in read-side, we do
398 * rebind directly.
399 *
400 * step:
401 * MPOL_REBIND_ONCE - do rebind work at once
402 * MPOL_REBIND_STEP1 - set all the newly nodes
403 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
404 */
405static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
406 enum mpol_rebind_step step)
David Rientjes1d0d2682008-04-28 02:12:32 -0700407{
David Rientjes1d0d2682008-04-28 02:12:32 -0700408 if (!pol)
409 return;
Wang Sheng-Hui89c522c2012-05-29 15:06:16 -0700410 if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
David Rientjes1d0d2682008-04-28 02:12:32 -0700411 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
412 return;
Miao Xie708c1bb2010-05-24 14:32:07 -0700413
414 if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
415 return;
416
417 if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
418 BUG();
419
420 if (step == MPOL_REBIND_STEP1)
421 pol->flags |= MPOL_F_REBINDING;
422 else if (step == MPOL_REBIND_STEP2)
423 pol->flags &= ~MPOL_F_REBINDING;
424 else if (step >= MPOL_REBIND_NSTEP)
425 BUG();
426
427 mpol_ops[pol->mode].rebind(pol, newmask, step);
David Rientjes1d0d2682008-04-28 02:12:32 -0700428}
429
430/*
431 * Wrapper for mpol_rebind_policy() that just requires task
432 * pointer, and updates task mempolicy.
Miao Xie58568d22009-06-16 15:31:49 -0700433 *
434 * Called with task's alloc_lock held.
David Rientjes1d0d2682008-04-28 02:12:32 -0700435 */
436
Miao Xie708c1bb2010-05-24 14:32:07 -0700437void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
438 enum mpol_rebind_step step)
David Rientjes1d0d2682008-04-28 02:12:32 -0700439{
Miao Xie708c1bb2010-05-24 14:32:07 -0700440 mpol_rebind_policy(tsk->mempolicy, new, step);
David Rientjes1d0d2682008-04-28 02:12:32 -0700441}
442
443/*
444 * Rebind each vma in mm to new nodemask.
445 *
446 * Call holding a reference to mm. Takes mm->mmap_sem during call.
447 */
448
449void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
450{
451 struct vm_area_struct *vma;
452
453 down_write(&mm->mmap_sem);
454 for (vma = mm->mmap; vma; vma = vma->vm_next)
Miao Xie708c1bb2010-05-24 14:32:07 -0700455 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
David Rientjes1d0d2682008-04-28 02:12:32 -0700456 up_write(&mm->mmap_sem);
457}
458
David Rientjes37012942008-04-28 02:12:33 -0700459static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
460 [MPOL_DEFAULT] = {
461 .rebind = mpol_rebind_default,
462 },
463 [MPOL_INTERLEAVE] = {
464 .create = mpol_new_interleave,
465 .rebind = mpol_rebind_nodemask,
466 },
467 [MPOL_PREFERRED] = {
468 .create = mpol_new_preferred,
469 .rebind = mpol_rebind_preferred,
470 },
471 [MPOL_BIND] = {
472 .create = mpol_new_bind,
473 .rebind = mpol_rebind_nodemask,
474 },
475};
476
Christoph Lameterfc301282006-01-18 17:42:29 -0800477static void migrate_page_add(struct page *page, struct list_head *pagelist,
478 unsigned long flags);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -0800479
Naoya Horiguchi98094942013-09-11 14:22:14 -0700480/*
481 * Scan through pages checking if pages follow certain conditions,
482 * and move them to the pagelist if they do.
483 */
484static int queue_pages_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800485 unsigned long addr, unsigned long end,
486 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800487 void *private)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488{
Hugh Dickins91612e02005-06-21 17:15:07 -0700489 pte_t *orig_pte;
490 pte_t *pte;
Hugh Dickins705e87c2005-10-29 18:16:27 -0700491 spinlock_t *ptl;
Hugh Dickins941150a2005-06-21 17:15:06 -0700492
Hugh Dickins705e87c2005-10-29 18:16:27 -0700493 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
Hugh Dickins91612e02005-06-21 17:15:07 -0700494 do {
Linus Torvalds6aab3412005-11-28 14:34:23 -0800495 struct page *page;
Andy Whitcroft25ba77c2006-12-06 20:33:03 -0800496 int nid;
Hugh Dickins91612e02005-06-21 17:15:07 -0700497
498 if (!pte_present(*pte))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800500 page = vm_normal_page(vma, addr, *pte);
501 if (!page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 continue;
Nick Piggin053837f2006-01-18 17:42:27 -0800503 /*
Hugh Dickins62b61f62009-12-14 17:59:33 -0800504 * vm_normal_page() filters out zero pages, but there might
505 * still be PageReserved pages to skip, perhaps in a VDSO.
Nick Piggin053837f2006-01-18 17:42:27 -0800506 */
Hugh Dickinsb79bc0a2013-02-22 16:35:13 -0800507 if (PageReserved(page))
Christoph Lameterf4598c82006-01-12 01:05:20 -0800508 continue;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800509 nid = page_to_nid(page);
Christoph Lameter38e35862006-01-08 01:01:01 -0800510 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
511 continue;
512
Stephen Wilsonb1f72d12011-05-24 17:12:43 -0700513 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
Christoph Lameterfc301282006-01-18 17:42:29 -0800514 migrate_page_add(page, private, flags);
Christoph Lameter38e35862006-01-08 01:01:01 -0800515 else
516 break;
Hugh Dickins91612e02005-06-21 17:15:07 -0700517 } while (pte++, addr += PAGE_SIZE, addr != end);
Hugh Dickins705e87c2005-10-29 18:16:27 -0700518 pte_unmap_unlock(orig_pte, ptl);
Hugh Dickins91612e02005-06-21 17:15:07 -0700519 return addr != end;
520}
521
Naoya Horiguchi98094942013-09-11 14:22:14 -0700522static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma,
523 pmd_t *pmd, const nodemask_t *nodes, unsigned long flags,
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700524 void *private)
525{
526#ifdef CONFIG_HUGETLB_PAGE
527 int nid;
528 struct page *page;
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800529 spinlock_t *ptl;
Naoya Horiguchid4c54912014-06-06 10:00:01 -0400530 pte_t entry;
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700531
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800532 ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, (pte_t *)pmd);
Naoya Horiguchid4c54912014-06-06 10:00:01 -0400533 entry = huge_ptep_get((pte_t *)pmd);
534 if (!pte_present(entry))
535 goto unlock;
536 page = pte_page(entry);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700537 nid = page_to_nid(page);
538 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
539 goto unlock;
540 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
541 if (flags & (MPOL_MF_MOVE_ALL) ||
542 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
543 isolate_huge_page(page, private);
544unlock:
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800545 spin_unlock(ptl);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700546#else
547 BUG();
548#endif
549}
550
Naoya Horiguchi98094942013-09-11 14:22:14 -0700551static inline int queue_pages_pmd_range(struct vm_area_struct *vma, pud_t *pud,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800552 unsigned long addr, unsigned long end,
553 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800554 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700555{
556 pmd_t *pmd;
557 unsigned long next;
558
559 pmd = pmd_offset(pud, addr);
560 do {
561 next = pmd_addr_end(addr, end);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700562 if (!pmd_present(*pmd))
563 continue;
564 if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) {
Naoya Horiguchi98094942013-09-11 14:22:14 -0700565 queue_pages_hugetlb_pmd_range(vma, pmd, nodes,
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700566 flags, private);
567 continue;
568 }
Kirill A. Shutemove1803772012-12-12 13:50:59 -0800569 split_huge_page_pmd(vma, addr, pmd);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -0700570 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
Hugh Dickins91612e02005-06-21 17:15:07 -0700571 continue;
Naoya Horiguchi98094942013-09-11 14:22:14 -0700572 if (queue_pages_pte_range(vma, pmd, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800573 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700574 return -EIO;
575 } while (pmd++, addr = next, addr != end);
576 return 0;
577}
578
Naoya Horiguchi98094942013-09-11 14:22:14 -0700579static inline int queue_pages_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800580 unsigned long addr, unsigned long end,
581 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800582 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700583{
584 pud_t *pud;
585 unsigned long next;
586
587 pud = pud_offset(pgd, addr);
588 do {
589 next = pud_addr_end(addr, end);
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -0700590 if (pud_huge(*pud) && is_vm_hugetlb_page(vma))
591 continue;
Hugh Dickins91612e02005-06-21 17:15:07 -0700592 if (pud_none_or_clear_bad(pud))
593 continue;
Naoya Horiguchi98094942013-09-11 14:22:14 -0700594 if (queue_pages_pmd_range(vma, pud, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800595 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700596 return -EIO;
597 } while (pud++, addr = next, addr != end);
598 return 0;
599}
600
Naoya Horiguchi98094942013-09-11 14:22:14 -0700601static inline int queue_pages_pgd_range(struct vm_area_struct *vma,
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800602 unsigned long addr, unsigned long end,
603 const nodemask_t *nodes, unsigned long flags,
Christoph Lameter38e35862006-01-08 01:01:01 -0800604 void *private)
Hugh Dickins91612e02005-06-21 17:15:07 -0700605{
606 pgd_t *pgd;
607 unsigned long next;
608
Nick Pigginb5810032005-10-29 18:16:12 -0700609 pgd = pgd_offset(vma->vm_mm, addr);
Hugh Dickins91612e02005-06-21 17:15:07 -0700610 do {
611 next = pgd_addr_end(addr, end);
612 if (pgd_none_or_clear_bad(pgd))
613 continue;
Naoya Horiguchi98094942013-09-11 14:22:14 -0700614 if (queue_pages_pud_range(vma, pgd, addr, next, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800615 flags, private))
Hugh Dickins91612e02005-06-21 17:15:07 -0700616 return -EIO;
617 } while (pgd++, addr = next, addr != end);
618 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619}
620
Aneesh Kumar K.V58772312013-12-06 00:08:22 +0530621#ifdef CONFIG_NUMA_BALANCING
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200622/*
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200623 * This is used to mark a range of virtual addresses to be inaccessible.
624 * These are later cleared by a NUMA hinting fault. Depending on these
625 * faults, pages may be migrated for better NUMA placement.
626 *
627 * This is assuming that NUMA faults are handled using PROT_NONE. If
628 * an architecture makes a different choice, it will need further
629 * changes to the core.
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200630 */
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200631unsigned long change_prot_numa(struct vm_area_struct *vma,
632 unsigned long addr, unsigned long end)
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200633{
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200634 int nr_updated;
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200635
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200636 nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1);
Mel Gorman03c5a6e2012-11-02 14:52:48 +0000637 if (nr_updated)
638 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200639
Mel Gorman4b10e7d2012-10-25 14:16:32 +0200640 return nr_updated;
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200641}
642#else
643static unsigned long change_prot_numa(struct vm_area_struct *vma,
644 unsigned long addr, unsigned long end)
645{
646 return 0;
647}
Aneesh Kumar K.V58772312013-12-06 00:08:22 +0530648#endif /* CONFIG_NUMA_BALANCING */
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200649
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800650/*
Naoya Horiguchi98094942013-09-11 14:22:14 -0700651 * Walk through page tables and collect pages to be migrated.
652 *
653 * If pages found in a given range are on a set of nodes (determined by
654 * @nodes and @flags,) it's isolated and queued to the pagelist which is
655 * passed via @private.)
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800656 */
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -0700657static int
Naoya Horiguchi98094942013-09-11 14:22:14 -0700658queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
Christoph Lameter38e35862006-01-08 01:01:01 -0800659 const nodemask_t *nodes, unsigned long flags, void *private)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660{
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -0700661 int err = 0;
662 struct vm_area_struct *vma, *prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -0700664 vma = find_vma(mm, start);
665 if (!vma)
666 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 prev = NULL;
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -0700668 for (; vma && vma->vm_start < end; vma = vma->vm_next) {
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200669 unsigned long endvma = vma->vm_end;
670
671 if (endvma > end)
672 endvma = end;
673 if (vma->vm_start > start)
674 start = vma->vm_start;
675
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800676 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
677 if (!vma->vm_next && vma->vm_end < end)
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -0700678 return -EFAULT;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800679 if (prev && prev->vm_end < vma->vm_start)
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -0700680 return -EFAULT;
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800681 }
Christoph Lameterdc9aa5b2006-01-08 01:00:50 -0800682
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200683 if (flags & MPOL_MF_LAZY) {
684 change_prot_numa(vma, start, endvma);
685 goto next;
686 }
687
688 if ((flags & MPOL_MF_STRICT) ||
689 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
690 vma_migratable(vma))) {
691
Naoya Horiguchi98094942013-09-11 14:22:14 -0700692 err = queue_pages_pgd_range(vma, start, endvma, nodes,
Christoph Lameter38e35862006-01-08 01:01:01 -0800693 flags, private);
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -0700694 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 }
Lee Schermerhornb24f53a2012-10-25 14:16:32 +0200697next:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 prev = vma;
699 }
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -0700700 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701}
702
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700703/*
704 * Apply policy to a single VMA
705 * This must be called with the mmap_sem held for writing.
706 */
707static int vma_replace_policy(struct vm_area_struct *vma,
708 struct mempolicy *pol)
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700709{
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700710 int err;
711 struct mempolicy *old;
712 struct mempolicy *new;
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700713
714 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
715 vma->vm_start, vma->vm_end, vma->vm_pgoff,
716 vma->vm_ops, vma->vm_file,
717 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
718
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700719 new = mpol_dup(pol);
720 if (IS_ERR(new))
721 return PTR_ERR(new);
722
723 if (vma->vm_ops && vma->vm_ops->set_policy) {
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700724 err = vma->vm_ops->set_policy(vma, new);
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700725 if (err)
726 goto err_out;
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700727 }
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700728
729 old = vma->vm_policy;
730 vma->vm_policy = new; /* protected by mmap_sem */
731 mpol_put(old);
732
733 return 0;
734 err_out:
735 mpol_put(new);
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700736 return err;
737}
738
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739/* Step 2: apply policy to a range and do splits. */
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800740static int mbind_range(struct mm_struct *mm, unsigned long start,
741 unsigned long end, struct mempolicy *new_pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742{
743 struct vm_area_struct *next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800744 struct vm_area_struct *prev;
745 struct vm_area_struct *vma;
746 int err = 0;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800747 pgoff_t pgoff;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800748 unsigned long vmstart;
749 unsigned long vmend;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750
Linus Torvalds097d5912012-03-06 18:23:36 -0800751 vma = find_vma(mm, start);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800752 if (!vma || vma->vm_start > start)
753 return -EFAULT;
754
Linus Torvalds097d5912012-03-06 18:23:36 -0800755 prev = vma->vm_prev;
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800756 if (start > vma->vm_start)
757 prev = vma;
758
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800759 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 next = vma->vm_next;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800761 vmstart = max(start, vma->vm_start);
762 vmend = min(end, vma->vm_end);
763
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800764 if (mpol_equal(vma_policy(vma), new_pol))
765 continue;
766
767 pgoff = vma->vm_pgoff +
768 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800769 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
KOSAKI Motohiroe26a5112011-12-28 15:57:11 -0800770 vma->anon_vma, vma->vm_file, pgoff,
Caspar Zhang8aacc9f2011-09-14 16:20:58 -0700771 new_pol);
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800772 if (prev) {
773 vma = prev;
774 next = vma->vm_next;
Oleg Nesterov3964acd2013-07-31 13:53:28 -0700775 if (mpol_equal(vma_policy(vma), new_pol))
776 continue;
777 /* vma_merge() joined vma && vma->next, case 8 */
778 goto replace;
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800779 }
780 if (vma->vm_start != vmstart) {
781 err = split_vma(vma->vm_mm, vma, vmstart, 1);
782 if (err)
783 goto out;
784 }
785 if (vma->vm_end != vmend) {
786 err = split_vma(vma->vm_mm, vma, vmend, 0);
787 if (err)
788 goto out;
789 }
Oleg Nesterov3964acd2013-07-31 13:53:28 -0700790 replace:
KOSAKI Motohiro869833f2012-10-08 16:29:16 -0700791 err = vma_replace_policy(vma, new_pol);
KOSAKI Motohiro8d34694c12012-10-08 16:29:14 -0700792 if (err)
793 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 }
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -0800795
796 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 return err;
798}
799
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800/* Set the process memory policy */
David Rientjes028fec42008-04-28 02:12:25 -0700801static long do_set_mempolicy(unsigned short mode, unsigned short flags,
802 nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803{
Miao Xie58568d22009-06-16 15:31:49 -0700804 struct mempolicy *new, *old;
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700805 struct mm_struct *mm = current->mm;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700806 NODEMASK_SCRATCH(scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700807 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700809 if (!scratch)
810 return -ENOMEM;
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700811
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700812 new = mpol_new(mode, flags, nodes);
813 if (IS_ERR(new)) {
814 ret = PTR_ERR(new);
815 goto out;
816 }
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700817 /*
818 * prevent changing our mempolicy while show_numa_maps()
819 * is using it.
820 * Note: do_set_mempolicy() can be called at init time
821 * with no 'mm'.
822 */
823 if (mm)
824 down_write(&mm->mmap_sem);
Miao Xie58568d22009-06-16 15:31:49 -0700825 task_lock(current);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700826 ret = mpol_set_nodemask(new, nodes, scratch);
Miao Xie58568d22009-06-16 15:31:49 -0700827 if (ret) {
828 task_unlock(current);
829 if (mm)
830 up_write(&mm->mmap_sem);
831 mpol_put(new);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700832 goto out;
Miao Xie58568d22009-06-16 15:31:49 -0700833 }
834 old = current->mempolicy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 current->mempolicy = new;
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700836 if (new && new->mode == MPOL_INTERLEAVE &&
David Rientjesf5b087b2008-04-28 02:12:27 -0700837 nodes_weight(new->v.nodes))
Andi Kleendfcd3c02005-10-29 18:15:48 -0700838 current->il_next = first_node(new->v.nodes);
Miao Xie58568d22009-06-16 15:31:49 -0700839 task_unlock(current);
Lee Schermerhornf4e53d92008-04-28 02:13:10 -0700840 if (mm)
841 up_write(&mm->mmap_sem);
842
Miao Xie58568d22009-06-16 15:31:49 -0700843 mpol_put(old);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -0700844 ret = 0;
845out:
846 NODEMASK_SCRATCH_FREE(scratch);
847 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848}
849
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700850/*
851 * Return nodemask for policy for get_mempolicy() query
Miao Xie58568d22009-06-16 15:31:49 -0700852 *
853 * Called with task's alloc_lock held
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700854 */
855static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856{
Andi Kleendfcd3c02005-10-29 18:15:48 -0700857 nodes_clear(*nodes);
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700858 if (p == &default_policy)
859 return;
860
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700861 switch (p->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -0700862 case MPOL_BIND:
863 /* Fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 case MPOL_INTERLEAVE:
Andi Kleendfcd3c02005-10-29 18:15:48 -0700865 *nodes = p->v.nodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 break;
867 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -0700868 if (!(p->flags & MPOL_F_LOCAL))
Andi Kleendfcd3c02005-10-29 18:15:48 -0700869 node_set(p->v.preferred_node, *nodes);
Lee Schermerhorn53f25562008-04-28 02:13:20 -0700870 /* else return empty node mask for local allocation */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 break;
872 default:
873 BUG();
874 }
875}
876
877static int lookup_node(struct mm_struct *mm, unsigned long addr)
878{
879 struct page *p;
880 int err;
881
882 err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
883 if (err >= 0) {
884 err = page_to_nid(p);
885 put_page(p);
886 }
887 return err;
888}
889
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890/* Retrieve NUMA policy */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -0700891static long do_get_mempolicy(int *policy, nodemask_t *nmask,
892 unsigned long addr, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893{
Christoph Lameter8bccd852005-10-29 18:16:59 -0700894 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 struct mm_struct *mm = current->mm;
896 struct vm_area_struct *vma = NULL;
897 struct mempolicy *pol = current->mempolicy;
898
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700899 if (flags &
900 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 return -EINVAL;
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700902
903 if (flags & MPOL_F_MEMS_ALLOWED) {
904 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
905 return -EINVAL;
906 *policy = 0; /* just so it's initialized */
Miao Xie58568d22009-06-16 15:31:49 -0700907 task_lock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700908 *nmask = cpuset_current_mems_allowed;
Miao Xie58568d22009-06-16 15:31:49 -0700909 task_unlock(current);
Lee Schermerhorn754af6f2007-10-16 01:24:51 -0700910 return 0;
911 }
912
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 if (flags & MPOL_F_ADDR) {
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700914 /*
915 * Do NOT fall back to task policy if the
916 * vma/shared policy at addr is NULL. We
917 * want to return MPOL_DEFAULT in this case.
918 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 down_read(&mm->mmap_sem);
920 vma = find_vma_intersection(mm, addr, addr+1);
921 if (!vma) {
922 up_read(&mm->mmap_sem);
923 return -EFAULT;
924 }
925 if (vma->vm_ops && vma->vm_ops->get_policy)
926 pol = vma->vm_ops->get_policy(vma, addr);
927 else
928 pol = vma->vm_policy;
929 } else if (addr)
930 return -EINVAL;
931
932 if (!pol)
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700933 pol = &default_policy; /* indicates default behavior */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934
935 if (flags & MPOL_F_NODE) {
936 if (flags & MPOL_F_ADDR) {
937 err = lookup_node(mm, addr);
938 if (err < 0)
939 goto out;
Christoph Lameter8bccd852005-10-29 18:16:59 -0700940 *policy = err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 } else if (pol == current->mempolicy &&
Lee Schermerhorn45c47452008-04-28 02:13:12 -0700942 pol->mode == MPOL_INTERLEAVE) {
Christoph Lameter8bccd852005-10-29 18:16:59 -0700943 *policy = current->il_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 } else {
945 err = -EINVAL;
946 goto out;
947 }
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700948 } else {
949 *policy = pol == &default_policy ? MPOL_DEFAULT :
950 pol->mode;
David Rientjesd79df632008-07-04 12:24:13 -0700951 /*
952 * Internal mempolicy flags must be masked off before exposing
953 * the policy to userspace.
954 */
955 *policy |= (pol->flags & MPOL_MODE_FLAGS);
Lee Schermerhornbea904d2008-04-28 02:13:18 -0700956 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957
958 if (vma) {
959 up_read(&current->mm->mmap_sem);
960 vma = NULL;
961 }
962
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 err = 0;
Miao Xie58568d22009-06-16 15:31:49 -0700964 if (nmask) {
Lee Schermerhornc6b6ef82010-03-23 13:35:41 -0700965 if (mpol_store_user_nodemask(pol)) {
966 *nmask = pol->w.user_nodemask;
967 } else {
968 task_lock(current);
969 get_policy_nodemask(pol, nmask);
970 task_unlock(current);
971 }
Miao Xie58568d22009-06-16 15:31:49 -0700972 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973
974 out:
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -0700975 mpol_cond_put(pol);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 if (vma)
977 up_read(&current->mm->mmap_sem);
978 return err;
979}
980
Christoph Lameterb20a3502006-03-22 00:09:12 -0800981#ifdef CONFIG_MIGRATION
Christoph Lameter8bccd852005-10-29 18:16:59 -0700982/*
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800983 * page migration
984 */
Christoph Lameterfc301282006-01-18 17:42:29 -0800985static void migrate_page_add(struct page *page, struct list_head *pagelist,
986 unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800987{
988 /*
Christoph Lameterfc301282006-01-18 17:42:29 -0800989 * Avoid migrating a page that is shared with others.
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800990 */
Nick Piggin62695a82008-10-18 20:26:09 -0700991 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
992 if (!isolate_lru_page(page)) {
993 list_add_tail(&page->lru, pagelist);
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -0800994 inc_zone_page_state(page, NR_ISOLATED_ANON +
995 page_is_file_cache(page));
Nick Piggin62695a82008-10-18 20:26:09 -0700996 }
997 }
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -0800998}
999
Christoph Lameter742755a2006-06-23 02:03:55 -07001000static struct page *new_node_page(struct page *page, unsigned long node, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001001{
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -07001002 if (PageHuge(page))
1003 return alloc_huge_page_node(page_hstate(compound_head(page)),
1004 node);
1005 else
1006 return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001007}
1008
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001009/*
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001010 * Migrate pages from one node to a target node.
1011 * Returns error or the number of pages not migrated.
1012 */
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001013static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1014 int flags)
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001015{
1016 nodemask_t nmask;
1017 LIST_HEAD(pagelist);
1018 int err = 0;
1019
1020 nodes_clear(nmask);
1021 node_set(source, nmask);
1022
Minchan Kim08270802012-10-08 16:33:38 -07001023 /*
1024 * This does not "check" the range but isolates all pages that
1025 * need migration. Between passing in the full user address
1026 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1027 */
1028 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
Naoya Horiguchi98094942013-09-11 14:22:14 -07001029 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001030 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1031
Minchan Kimcf608ac2010-10-26 14:21:29 -07001032 if (!list_empty(&pagelist)) {
David Rientjes68711a72014-06-04 16:08:25 -07001033 err = migrate_pages(&pagelist, new_node_page, NULL, dest,
Hugh Dickins9c620e22013-02-22 16:35:14 -08001034 MIGRATE_SYNC, MR_SYSCALL);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001035 if (err)
Naoya Horiguchie2d8cf42013-09-11 14:22:03 -07001036 putback_movable_pages(&pagelist);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001037 }
Christoph Lameter95a402c2006-06-23 02:03:53 -07001038
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001039 return err;
1040}
1041
1042/*
1043 * Move pages between the two nodesets so as to preserve the physical
1044 * layout as much as possible.
Christoph Lameter39743882006-01-08 01:00:51 -08001045 *
1046 * Returns the number of page that could not be moved.
1047 */
Andrew Morton0ce72d42012-05-29 15:06:24 -07001048int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1049 const nodemask_t *to, int flags)
Christoph Lameter39743882006-01-08 01:00:51 -08001050{
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001051 int busy = 0;
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001052 int err;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001053 nodemask_t tmp;
Christoph Lameter39743882006-01-08 01:00:51 -08001054
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001055 err = migrate_prep();
1056 if (err)
1057 return err;
1058
Lee Schermerhorn53f25562008-04-28 02:13:20 -07001059 down_read(&mm->mmap_sem);
Christoph Lameter39743882006-01-08 01:00:51 -08001060
Andrew Morton0ce72d42012-05-29 15:06:24 -07001061 err = migrate_vmas(mm, from, to, flags);
Christoph Lameter7b2259b2006-06-25 05:46:48 -07001062 if (err)
1063 goto out;
1064
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -08001065 /*
1066 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1067 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1068 * bit in 'tmp', and return that <source, dest> pair for migration.
1069 * The pair of nodemasks 'to' and 'from' define the map.
1070 *
1071 * If no pair of bits is found that way, fallback to picking some
1072 * pair of 'source' and 'dest' bits that are not the same. If the
1073 * 'source' and 'dest' bits are the same, this represents a node
1074 * that will be migrating to itself, so no pages need move.
1075 *
1076 * If no bits are left in 'tmp', or if all remaining bits left
1077 * in 'tmp' correspond to the same bit in 'to', return false
1078 * (nothing left to migrate).
1079 *
1080 * This lets us pick a pair of nodes to migrate between, such that
1081 * if possible the dest node is not already occupied by some other
1082 * source node, minimizing the risk of overloading the memory on a
1083 * node that would happen if we migrated incoming memory to a node
1084 * before migrating outgoing memory source that same node.
1085 *
1086 * A single scan of tmp is sufficient. As we go, we remember the
1087 * most recent <s, d> pair that moved (s != d). If we find a pair
1088 * that not only moved, but what's better, moved to an empty slot
1089 * (d is not set in tmp), then we break out then, with that pair.
Justin P. Mattockae0e47f2011-03-01 15:06:02 +01001090 * Otherwise when we finish scanning from_tmp, we at least have the
KOSAKI Motohiroda0aa132010-03-05 13:41:59 -08001091 * most recent <s, d> pair that moved. If we get all the way through
1092 * the scan of tmp without finding any node that moved, much less
1093 * moved to an empty node, then there is nothing left worth migrating.
1094 */
Christoph Lameterd4984712006-01-08 01:00:55 -08001095
Andrew Morton0ce72d42012-05-29 15:06:24 -07001096 tmp = *from;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001097 while (!nodes_empty(tmp)) {
1098 int s,d;
Jianguo Wub76ac7e2013-11-12 15:07:39 -08001099 int source = NUMA_NO_NODE;
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001100 int dest = 0;
1101
1102 for_each_node_mask(s, tmp) {
Larry Woodman4a5b18c2012-05-29 15:06:24 -07001103
1104 /*
1105 * do_migrate_pages() tries to maintain the relative
1106 * node relationship of the pages established between
1107 * threads and memory areas.
1108 *
1109 * However if the number of source nodes is not equal to
1110 * the number of destination nodes we can not preserve
1111 * this node relative relationship. In that case, skip
1112 * copying memory from a node that is in the destination
1113 * mask.
1114 *
1115 * Example: [2,3,4] -> [3,4,5] moves everything.
1116 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1117 */
1118
Andrew Morton0ce72d42012-05-29 15:06:24 -07001119 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1120 (node_isset(s, *to)))
Larry Woodman4a5b18c2012-05-29 15:06:24 -07001121 continue;
1122
Andrew Morton0ce72d42012-05-29 15:06:24 -07001123 d = node_remap(s, *from, *to);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001124 if (s == d)
1125 continue;
1126
1127 source = s; /* Node moved. Memorize */
1128 dest = d;
1129
1130 /* dest not in remaining from nodes? */
1131 if (!node_isset(dest, tmp))
1132 break;
1133 }
Jianguo Wub76ac7e2013-11-12 15:07:39 -08001134 if (source == NUMA_NO_NODE)
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001135 break;
1136
1137 node_clear(source, tmp);
1138 err = migrate_to_node(mm, source, dest, flags);
1139 if (err > 0)
1140 busy += err;
1141 if (err < 0)
1142 break;
Christoph Lameter39743882006-01-08 01:00:51 -08001143 }
Christoph Lameter7b2259b2006-06-25 05:46:48 -07001144out:
Christoph Lameter39743882006-01-08 01:00:51 -08001145 up_read(&mm->mmap_sem);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001146 if (err < 0)
1147 return err;
1148 return busy;
Christoph Lameterb20a3502006-03-22 00:09:12 -08001149
Christoph Lameter39743882006-01-08 01:00:51 -08001150}
1151
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001152/*
1153 * Allocate a new page for page migration based on vma policy.
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001154 * Start by assuming the page is mapped by the same vma as contains @start.
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001155 * Search forward from there, if not. N.B., this assumes that the
1156 * list of pages handed to migrate_pages()--which is how we get here--
1157 * is in virtual address order.
1158 */
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001159static struct page *new_page(struct page *page, unsigned long start, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001160{
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001161 struct vm_area_struct *vma;
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001162 unsigned long uninitialized_var(address);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001163
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001164 vma = find_vma(current->mm, start);
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001165 while (vma) {
1166 address = page_address_in_vma(page, vma);
1167 if (address != -EFAULT)
1168 break;
1169 vma = vma->vm_next;
1170 }
1171
Wanpeng Li11c731e2013-12-18 17:08:56 -08001172 if (PageHuge(page)) {
Michal Hockocc817172014-01-23 15:53:15 -08001173 BUG_ON(!vma);
1174 return alloc_huge_page_noerr(vma, address, 1);
Wanpeng Li11c731e2013-12-18 17:08:56 -08001175 }
1176 /*
1177 * if !vma, alloc_page_vma() will use task or system default policy
1178 */
Lee Schermerhorn3ad33b22007-11-14 16:59:10 -08001179 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
Christoph Lameter95a402c2006-06-23 02:03:53 -07001180}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001181#else
1182
1183static void migrate_page_add(struct page *page, struct list_head *pagelist,
1184 unsigned long flags)
1185{
1186}
1187
Andrew Morton0ce72d42012-05-29 15:06:24 -07001188int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1189 const nodemask_t *to, int flags)
Christoph Lameterb20a3502006-03-22 00:09:12 -08001190{
1191 return -ENOSYS;
1192}
Christoph Lameter95a402c2006-06-23 02:03:53 -07001193
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001194static struct page *new_page(struct page *page, unsigned long start, int **x)
Christoph Lameter95a402c2006-06-23 02:03:53 -07001195{
1196 return NULL;
1197}
Christoph Lameterb20a3502006-03-22 00:09:12 -08001198#endif
1199
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001200static long do_mbind(unsigned long start, unsigned long len,
David Rientjes028fec42008-04-28 02:12:25 -07001201 unsigned short mode, unsigned short mode_flags,
1202 nodemask_t *nmask, unsigned long flags)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001203{
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001204 struct mm_struct *mm = current->mm;
1205 struct mempolicy *new;
1206 unsigned long end;
1207 int err;
1208 LIST_HEAD(pagelist);
1209
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001210 if (flags & ~(unsigned long)MPOL_MF_VALID)
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001211 return -EINVAL;
Christoph Lameter74c00242006-03-14 19:50:21 -08001212 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001213 return -EPERM;
1214
1215 if (start & ~PAGE_MASK)
1216 return -EINVAL;
1217
1218 if (mode == MPOL_DEFAULT)
1219 flags &= ~MPOL_MF_STRICT;
1220
1221 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1222 end = start + len;
1223
1224 if (end < start)
1225 return -EINVAL;
1226 if (end == start)
1227 return 0;
1228
David Rientjes028fec42008-04-28 02:12:25 -07001229 new = mpol_new(mode, mode_flags, nmask);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001230 if (IS_ERR(new))
1231 return PTR_ERR(new);
1232
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001233 if (flags & MPOL_MF_LAZY)
1234 new->flags |= MPOL_F_MOF;
1235
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001236 /*
1237 * If we are using the default policy then operation
1238 * on discontinuous address spaces is okay after all
1239 */
1240 if (!new)
1241 flags |= MPOL_MF_DISCONTIG_OK;
1242
David Rientjes028fec42008-04-28 02:12:25 -07001243 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1244 start, start + len, mode, mode_flags,
David Rientjes00ef2d22013-02-22 16:35:36 -08001245 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001246
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001247 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1248
1249 err = migrate_prep();
1250 if (err)
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001251 goto mpol_out;
Christoph Lameter0aedadf2008-11-06 12:53:30 -08001252 }
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07001253 {
1254 NODEMASK_SCRATCH(scratch);
1255 if (scratch) {
1256 down_write(&mm->mmap_sem);
1257 task_lock(current);
1258 err = mpol_set_nodemask(new, nmask, scratch);
1259 task_unlock(current);
1260 if (err)
1261 up_write(&mm->mmap_sem);
1262 } else
1263 err = -ENOMEM;
1264 NODEMASK_SCRATCH_FREE(scratch);
1265 }
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001266 if (err)
1267 goto mpol_out;
1268
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001269 err = queue_pages_range(mm, start, end, nmask,
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001270 flags | MPOL_MF_INVERT, &pagelist);
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001271 if (!err)
KOSAKI Motohiro9d8cebd2010-03-05 13:41:57 -08001272 err = mbind_range(mm, start, end, new);
Christoph Lameter7e2ab152006-02-01 03:05:40 -08001273
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001274 if (!err) {
1275 int nr_failed = 0;
1276
Minchan Kimcf608ac2010-10-26 14:21:29 -07001277 if (!list_empty(&pagelist)) {
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001278 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
Hugh Dickinsd05f0cdcb2014-06-23 13:22:07 -07001279 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1280 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001281 if (nr_failed)
Naoya Horiguchi74060e42013-09-11 14:22:06 -07001282 putback_movable_pages(&pagelist);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001283 }
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001284
Lee Schermerhornb24f53a2012-10-25 14:16:32 +02001285 if (nr_failed && (flags & MPOL_MF_STRICT))
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001286 err = -EIO;
KOSAKI Motohiroab8a3e12009-10-26 16:49:58 -07001287 } else
Joonsoo Kimb0e5fd72013-12-18 17:08:51 -08001288 putback_movable_pages(&pagelist);
Christoph Lameterb20a3502006-03-22 00:09:12 -08001289
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001290 up_write(&mm->mmap_sem);
KOSAKI Motohirob05ca732009-10-26 16:49:59 -07001291 mpol_out:
Lee Schermerhornf0be3d32008-04-28 02:13:08 -07001292 mpol_put(new);
Christoph Lameter6ce3c4c2006-01-08 01:01:04 -08001293 return err;
1294}
1295
Christoph Lameter39743882006-01-08 01:00:51 -08001296/*
Christoph Lameter8bccd852005-10-29 18:16:59 -07001297 * User space interface with variable sized bitmaps for nodelists.
1298 */
1299
1300/* Copy a node mask from user space. */
Christoph Lameter39743882006-01-08 01:00:51 -08001301static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
Christoph Lameter8bccd852005-10-29 18:16:59 -07001302 unsigned long maxnode)
1303{
1304 unsigned long k;
1305 unsigned long nlongs;
1306 unsigned long endmask;
1307
1308 --maxnode;
1309 nodes_clear(*nodes);
1310 if (maxnode == 0 || !nmask)
1311 return 0;
Andi Kleena9c930b2006-02-20 18:27:59 -08001312 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
Chris Wright636f13c2006-02-17 13:59:36 -08001313 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001314
1315 nlongs = BITS_TO_LONGS(maxnode);
1316 if ((maxnode % BITS_PER_LONG) == 0)
1317 endmask = ~0UL;
1318 else
1319 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1320
1321 /* When the user specified more nodes than supported just check
1322 if the non supported part is all zero. */
1323 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1324 if (nlongs > PAGE_SIZE/sizeof(long))
1325 return -EINVAL;
1326 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1327 unsigned long t;
1328 if (get_user(t, nmask + k))
1329 return -EFAULT;
1330 if (k == nlongs - 1) {
1331 if (t & endmask)
1332 return -EINVAL;
1333 } else if (t)
1334 return -EINVAL;
1335 }
1336 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1337 endmask = ~0UL;
1338 }
1339
1340 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1341 return -EFAULT;
1342 nodes_addr(*nodes)[nlongs-1] &= endmask;
1343 return 0;
1344}
1345
1346/* Copy a kernel node mask to user space */
1347static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1348 nodemask_t *nodes)
1349{
1350 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1351 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1352
1353 if (copy > nbytes) {
1354 if (copy > PAGE_SIZE)
1355 return -EINVAL;
1356 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1357 return -EFAULT;
1358 copy = nbytes;
1359 }
1360 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1361}
1362
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001363SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
Rasmus Villemoesf7f28ca2014-06-04 16:07:57 -07001364 unsigned long, mode, const unsigned long __user *, nmask,
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001365 unsigned long, maxnode, unsigned, flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001366{
1367 nodemask_t nodes;
1368 int err;
David Rientjes028fec42008-04-28 02:12:25 -07001369 unsigned short mode_flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001370
David Rientjes028fec42008-04-28 02:12:25 -07001371 mode_flags = mode & MPOL_MODE_FLAGS;
1372 mode &= ~MPOL_MODE_FLAGS;
David Rientjesa3b51e02008-04-28 02:12:23 -07001373 if (mode >= MPOL_MAX)
1374 return -EINVAL;
David Rientjes4c50bc02008-04-28 02:12:30 -07001375 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1376 (mode_flags & MPOL_F_RELATIVE_NODES))
1377 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001378 err = get_nodes(&nodes, nmask, maxnode);
1379 if (err)
1380 return err;
David Rientjes028fec42008-04-28 02:12:25 -07001381 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001382}
1383
1384/* Set the process memory policy */
Rasmus Villemoes23c89022014-06-04 16:07:58 -07001385SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001386 unsigned long, maxnode)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001387{
1388 int err;
1389 nodemask_t nodes;
David Rientjes028fec42008-04-28 02:12:25 -07001390 unsigned short flags;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001391
David Rientjes028fec42008-04-28 02:12:25 -07001392 flags = mode & MPOL_MODE_FLAGS;
1393 mode &= ~MPOL_MODE_FLAGS;
1394 if ((unsigned int)mode >= MPOL_MAX)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001395 return -EINVAL;
David Rientjes4c50bc02008-04-28 02:12:30 -07001396 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1397 return -EINVAL;
Christoph Lameter8bccd852005-10-29 18:16:59 -07001398 err = get_nodes(&nodes, nmask, maxnode);
1399 if (err)
1400 return err;
David Rientjes028fec42008-04-28 02:12:25 -07001401 return do_set_mempolicy(mode, flags, &nodes);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001402}
1403
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001404SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1405 const unsigned long __user *, old_nodes,
1406 const unsigned long __user *, new_nodes)
Christoph Lameter39743882006-01-08 01:00:51 -08001407{
David Howellsc69e8d92008-11-14 10:39:19 +11001408 const struct cred *cred = current_cred(), *tcred;
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001409 struct mm_struct *mm = NULL;
Christoph Lameter39743882006-01-08 01:00:51 -08001410 struct task_struct *task;
Christoph Lameter39743882006-01-08 01:00:51 -08001411 nodemask_t task_nodes;
1412 int err;
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001413 nodemask_t *old;
1414 nodemask_t *new;
1415 NODEMASK_SCRATCH(scratch);
Christoph Lameter39743882006-01-08 01:00:51 -08001416
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001417 if (!scratch)
1418 return -ENOMEM;
Christoph Lameter39743882006-01-08 01:00:51 -08001419
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001420 old = &scratch->mask1;
1421 new = &scratch->mask2;
1422
1423 err = get_nodes(old, old_nodes, maxnode);
Christoph Lameter39743882006-01-08 01:00:51 -08001424 if (err)
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001425 goto out;
1426
1427 err = get_nodes(new, new_nodes, maxnode);
1428 if (err)
1429 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001430
1431 /* Find the mm_struct */
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001432 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001433 task = pid ? find_task_by_vpid(pid) : current;
Christoph Lameter39743882006-01-08 01:00:51 -08001434 if (!task) {
Zeng Zhaoming55cfaa32010-12-02 14:31:13 -08001435 rcu_read_unlock();
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001436 err = -ESRCH;
1437 goto out;
Christoph Lameter39743882006-01-08 01:00:51 -08001438 }
Christoph Lameter3268c632012-03-21 16:34:06 -07001439 get_task_struct(task);
Christoph Lameter39743882006-01-08 01:00:51 -08001440
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001441 err = -EINVAL;
Christoph Lameter39743882006-01-08 01:00:51 -08001442
1443 /*
1444 * Check if this process has the right to modify the specified
1445 * process. The right exists if the process has administrative
Alexey Dobriyan7f927fc2006-03-28 01:56:53 -08001446 * capabilities, superuser privileges or the same
Christoph Lameter39743882006-01-08 01:00:51 -08001447 * userid as the target process.
1448 */
David Howellsc69e8d92008-11-14 10:39:19 +11001449 tcred = __task_cred(task);
Eric W. Biedermanb38a86e2012-03-12 15:48:24 -07001450 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1451 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
Christoph Lameter74c00242006-03-14 19:50:21 -08001452 !capable(CAP_SYS_NICE)) {
David Howellsc69e8d92008-11-14 10:39:19 +11001453 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001454 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001455 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001456 }
David Howellsc69e8d92008-11-14 10:39:19 +11001457 rcu_read_unlock();
Christoph Lameter39743882006-01-08 01:00:51 -08001458
1459 task_nodes = cpuset_mems_allowed(task);
1460 /* Is the user allowed to access the target nodes? */
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001461 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
Christoph Lameter39743882006-01-08 01:00:51 -08001462 err = -EPERM;
Christoph Lameter3268c632012-03-21 16:34:06 -07001463 goto out_put;
Christoph Lameter39743882006-01-08 01:00:51 -08001464 }
1465
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08001466 if (!nodes_subset(*new, node_states[N_MEMORY])) {
Christoph Lameter3b42d282007-08-31 00:12:08 -07001467 err = -EINVAL;
Christoph Lameter3268c632012-03-21 16:34:06 -07001468 goto out_put;
Christoph Lameter3b42d282007-08-31 00:12:08 -07001469 }
1470
David Quigley86c3a762006-06-23 02:04:02 -07001471 err = security_task_movememory(task);
1472 if (err)
Christoph Lameter3268c632012-03-21 16:34:06 -07001473 goto out_put;
David Quigley86c3a762006-06-23 02:04:02 -07001474
Christoph Lameter3268c632012-03-21 16:34:06 -07001475 mm = get_task_mm(task);
1476 put_task_struct(task);
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001477
1478 if (!mm) {
Christoph Lameter3268c632012-03-21 16:34:06 -07001479 err = -EINVAL;
Sasha Levinf2a9ef82012-04-25 16:01:52 -07001480 goto out;
1481 }
1482
1483 err = do_migrate_pages(mm, old, new,
1484 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
Christoph Lameter3268c632012-03-21 16:34:06 -07001485
1486 mmput(mm);
1487out:
KOSAKI Motohiro596d7cf2010-08-09 17:19:01 -07001488 NODEMASK_SCRATCH_FREE(scratch);
1489
Christoph Lameter39743882006-01-08 01:00:51 -08001490 return err;
Christoph Lameter3268c632012-03-21 16:34:06 -07001491
1492out_put:
1493 put_task_struct(task);
1494 goto out;
1495
Christoph Lameter39743882006-01-08 01:00:51 -08001496}
1497
1498
Christoph Lameter8bccd852005-10-29 18:16:59 -07001499/* Retrieve NUMA policy */
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001500SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1501 unsigned long __user *, nmask, unsigned long, maxnode,
1502 unsigned long, addr, unsigned long, flags)
Christoph Lameter8bccd852005-10-29 18:16:59 -07001503{
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07001504 int err;
1505 int uninitialized_var(pval);
Christoph Lameter8bccd852005-10-29 18:16:59 -07001506 nodemask_t nodes;
1507
1508 if (nmask != NULL && maxnode < MAX_NUMNODES)
1509 return -EINVAL;
1510
1511 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1512
1513 if (err)
1514 return err;
1515
1516 if (policy && put_user(pval, policy))
1517 return -EFAULT;
1518
1519 if (nmask)
1520 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1521
1522 return err;
1523}
1524
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525#ifdef CONFIG_COMPAT
1526
Heiko Carstensc93e0f62014-03-03 16:32:26 +01001527COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1528 compat_ulong_t __user *, nmask,
1529 compat_ulong_t, maxnode,
1530 compat_ulong_t, addr, compat_ulong_t, flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531{
1532 long err;
1533 unsigned long __user *nm = NULL;
1534 unsigned long nr_bits, alloc_size;
1535 DECLARE_BITMAP(bm, MAX_NUMNODES);
1536
1537 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1538 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1539
1540 if (nmask)
1541 nm = compat_alloc_user_space(alloc_size);
1542
1543 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1544
1545 if (!err && nmask) {
KAMEZAWA Hiroyuki2bbff6c2011-09-14 16:21:02 -07001546 unsigned long copy_size;
1547 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1548 err = copy_from_user(bm, nm, copy_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 /* ensure entire bitmap is zeroed */
1550 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1551 err |= compat_put_bitmap(nmask, bm, nr_bits);
1552 }
1553
1554 return err;
1555}
1556
Heiko Carstensc93e0f62014-03-03 16:32:26 +01001557COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1558 compat_ulong_t, maxnode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559{
1560 long err = 0;
1561 unsigned long __user *nm = NULL;
1562 unsigned long nr_bits, alloc_size;
1563 DECLARE_BITMAP(bm, MAX_NUMNODES);
1564
1565 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1566 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1567
1568 if (nmask) {
1569 err = compat_get_bitmap(bm, nmask, nr_bits);
1570 nm = compat_alloc_user_space(alloc_size);
1571 err |= copy_to_user(nm, bm, alloc_size);
1572 }
1573
1574 if (err)
1575 return -EFAULT;
1576
1577 return sys_set_mempolicy(mode, nm, nr_bits+1);
1578}
1579
Heiko Carstensc93e0f62014-03-03 16:32:26 +01001580COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1581 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1582 compat_ulong_t, maxnode, compat_ulong_t, flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583{
1584 long err = 0;
1585 unsigned long __user *nm = NULL;
1586 unsigned long nr_bits, alloc_size;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001587 nodemask_t bm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588
1589 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1590 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1591
1592 if (nmask) {
Andi Kleendfcd3c02005-10-29 18:15:48 -07001593 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 nm = compat_alloc_user_space(alloc_size);
Andi Kleendfcd3c02005-10-29 18:15:48 -07001595 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 }
1597
1598 if (err)
1599 return -EFAULT;
1600
1601 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1602}
1603
1604#endif
1605
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001606struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1607 unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608{
Oleg Nesterov8d90274b2014-10-09 15:27:45 -07001609 struct mempolicy *pol = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610
1611 if (vma) {
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001612 if (vma->vm_ops && vma->vm_ops->get_policy) {
Oleg Nesterov8d90274b2014-10-09 15:27:45 -07001613 pol = vma->vm_ops->get_policy(vma, addr);
Mel Gorman00442ad2012-10-08 16:29:20 -07001614 } else if (vma->vm_policy) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615 pol = vma->vm_policy;
Mel Gorman00442ad2012-10-08 16:29:20 -07001616
1617 /*
1618 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1619 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1620 * count on these policies which will be dropped by
1621 * mpol_cond_put() later
1622 */
1623 if (mpol_needs_cond_ref(pol))
1624 mpol_get(pol);
1625 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 }
Oleg Nesterovf15ca782014-10-09 15:27:43 -07001627
Oleg Nesterov74d2c3a2014-10-09 15:27:50 -07001628 return pol;
1629}
1630
1631/*
1632 * get_vma_policy(@task, @vma, @addr)
1633 * @task: task for fallback if vma policy == default
1634 * @vma: virtual memory area whose policy is sought
1635 * @addr: address in @vma for shared policy lookup
1636 *
1637 * Returns effective policy for a VMA at specified address.
1638 * Falls back to @task or system default policy, as necessary.
1639 * Current or other task's task mempolicy and non-shared vma policies must be
1640 * protected by task_lock(task) by the caller.
1641 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1642 * count--added by the get_policy() vm_op, as appropriate--to protect against
1643 * freeing by another task. It is the caller's responsibility to free the
1644 * extra reference for shared policies.
1645 */
1646struct mempolicy *get_vma_policy(struct task_struct *task,
1647 struct vm_area_struct *vma, unsigned long addr)
1648{
1649 struct mempolicy *pol = __get_vma_policy(vma, addr);
1650
Oleg Nesterov8d90274b2014-10-09 15:27:45 -07001651 if (!pol)
1652 pol = get_task_policy(task);
1653
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654 return pol;
1655}
1656
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001657bool vma_policy_mof(struct vm_area_struct *vma)
Mel Gormanfc3147242013-10-07 11:29:09 +01001658{
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001659 struct mempolicy *pol;
Oleg Nesterovf15ca782014-10-09 15:27:43 -07001660
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001661 if (vma->vm_ops && vma->vm_ops->get_policy) {
1662 bool ret = false;
Mel Gormanfc3147242013-10-07 11:29:09 +01001663
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001664 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1665 if (pol && (pol->flags & MPOL_F_MOF))
1666 ret = true;
1667 mpol_cond_put(pol);
Mel Gormanfc3147242013-10-07 11:29:09 +01001668
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001669 return ret;
Mel Gormanfc3147242013-10-07 11:29:09 +01001670 }
1671
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001672 pol = vma->vm_policy;
Oleg Nesterov8d90274b2014-10-09 15:27:45 -07001673 if (!pol)
Oleg Nesterov6b6482b2014-10-09 15:27:48 -07001674 pol = get_task_policy(current);
Oleg Nesterov8d90274b2014-10-09 15:27:45 -07001675
Mel Gormanfc3147242013-10-07 11:29:09 +01001676 return pol->flags & MPOL_F_MOF;
1677}
1678
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001679static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1680{
1681 enum zone_type dynamic_policy_zone = policy_zone;
1682
1683 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1684
1685 /*
1686 * if policy->v.nodes has movable memory only,
1687 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1688 *
1689 * policy->v.nodes is intersect with node_states[N_MEMORY].
1690 * so if the following test faile, it implies
1691 * policy->v.nodes has movable memory only.
1692 */
1693 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1694 dynamic_policy_zone = ZONE_MOVABLE;
1695
1696 return zone >= dynamic_policy_zone;
1697}
1698
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001699/*
1700 * Return a nodemask representing a mempolicy for filtering nodes for
1701 * page allocation
1702 */
1703static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
Mel Gorman19770b32008-04-28 02:12:18 -07001704{
1705 /* Lower zones don't get a nodemask applied for MPOL_BIND */
Lee Schermerhorn45c47452008-04-28 02:13:12 -07001706 if (unlikely(policy->mode == MPOL_BIND) &&
Lai Jiangshand3eb1572013-02-22 16:33:22 -08001707 apply_policy_zone(policy, gfp_zone(gfp)) &&
Mel Gorman19770b32008-04-28 02:12:18 -07001708 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1709 return &policy->v.nodes;
1710
1711 return NULL;
1712}
1713
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001714/* Return a zonelist indicated by gfp for node representing a mempolicy */
Andi Kleen2f5f9482011-03-04 17:36:29 -08001715static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1716 int nd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717{
Lee Schermerhorn45c47452008-04-28 02:13:12 -07001718 switch (policy->mode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001720 if (!(policy->flags & MPOL_F_LOCAL))
1721 nd = policy->v.preferred_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 break;
1723 case MPOL_BIND:
Mel Gorman19770b32008-04-28 02:12:18 -07001724 /*
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001725 * Normally, MPOL_BIND allocations are node-local within the
1726 * allowed nodemask. However, if __GFP_THISNODE is set and the
Bob Liu6eb27e12010-05-24 14:32:00 -07001727 * current node isn't part of the mask, we use the zonelist for
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001728 * the first node in the mask instead.
Mel Gorman19770b32008-04-28 02:12:18 -07001729 */
Mel Gorman19770b32008-04-28 02:12:18 -07001730 if (unlikely(gfp & __GFP_THISNODE) &&
1731 unlikely(!node_isset(nd, policy->v.nodes)))
1732 nd = first_node(policy->v.nodes);
1733 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 default:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 BUG();
1736 }
Mel Gorman0e884602008-04-28 02:12:14 -07001737 return node_zonelist(nd, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738}
1739
1740/* Do dynamic interleaving for a process */
1741static unsigned interleave_nodes(struct mempolicy *policy)
1742{
1743 unsigned nid, next;
1744 struct task_struct *me = current;
1745
1746 nid = me->il_next;
Andi Kleendfcd3c02005-10-29 18:15:48 -07001747 next = next_node(nid, policy->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 if (next >= MAX_NUMNODES)
Andi Kleendfcd3c02005-10-29 18:15:48 -07001749 next = first_node(policy->v.nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001750 if (next < MAX_NUMNODES)
1751 me->il_next = next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 return nid;
1753}
1754
Christoph Lameterdc85da12006-01-18 17:42:36 -08001755/*
1756 * Depending on the memory policy provide a node from which to allocate the
1757 * next slab entry.
1758 */
David Rientjes2a389612014-04-07 15:37:29 -07001759unsigned int mempolicy_slab_node(void)
Christoph Lameterdc85da12006-01-18 17:42:36 -08001760{
Andi Kleene7b691b2012-06-09 02:40:03 -07001761 struct mempolicy *policy;
David Rientjes2a389612014-04-07 15:37:29 -07001762 int node = numa_mem_id();
Andi Kleene7b691b2012-06-09 02:40:03 -07001763
1764 if (in_interrupt())
David Rientjes2a389612014-04-07 15:37:29 -07001765 return node;
Andi Kleene7b691b2012-06-09 02:40:03 -07001766
1767 policy = current->mempolicy;
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001768 if (!policy || policy->flags & MPOL_F_LOCAL)
David Rientjes2a389612014-04-07 15:37:29 -07001769 return node;
Christoph Lameter765c4502006-09-27 01:50:08 -07001770
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001771 switch (policy->mode) {
1772 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07001773 /*
1774 * handled MPOL_F_LOCAL above
1775 */
1776 return policy->v.preferred_node;
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001777
Christoph Lameterdc85da12006-01-18 17:42:36 -08001778 case MPOL_INTERLEAVE:
1779 return interleave_nodes(policy);
1780
Mel Gormandd1a2392008-04-28 02:12:17 -07001781 case MPOL_BIND: {
Christoph Lameterdc85da12006-01-18 17:42:36 -08001782 /*
1783 * Follow bind policy behavior and start allocation at the
1784 * first node.
1785 */
Mel Gorman19770b32008-04-28 02:12:18 -07001786 struct zonelist *zonelist;
1787 struct zone *zone;
1788 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
David Rientjes2a389612014-04-07 15:37:29 -07001789 zonelist = &NODE_DATA(node)->node_zonelists[0];
Mel Gorman19770b32008-04-28 02:12:18 -07001790 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1791 &policy->v.nodes,
1792 &zone);
David Rientjes2a389612014-04-07 15:37:29 -07001793 return zone ? zone->node : node;
Mel Gormandd1a2392008-04-28 02:12:17 -07001794 }
Christoph Lameterdc85da12006-01-18 17:42:36 -08001795
Christoph Lameterdc85da12006-01-18 17:42:36 -08001796 default:
Lee Schermerhornbea904d2008-04-28 02:13:18 -07001797 BUG();
Christoph Lameterdc85da12006-01-18 17:42:36 -08001798 }
1799}
1800
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801/* Do static interleaving for a VMA with known offset. */
1802static unsigned offset_il_node(struct mempolicy *pol,
1803 struct vm_area_struct *vma, unsigned long off)
1804{
Andi Kleendfcd3c02005-10-29 18:15:48 -07001805 unsigned nnodes = nodes_weight(pol->v.nodes);
David Rientjesf5b087b2008-04-28 02:12:27 -07001806 unsigned target;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807 int c;
Jianguo Wub76ac7e2013-11-12 15:07:39 -08001808 int nid = NUMA_NO_NODE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809
David Rientjesf5b087b2008-04-28 02:12:27 -07001810 if (!nnodes)
1811 return numa_node_id();
1812 target = (unsigned int)off % nnodes;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 c = 0;
1814 do {
Andi Kleendfcd3c02005-10-29 18:15:48 -07001815 nid = next_node(nid, pol->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 c++;
1817 } while (c <= target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 return nid;
1819}
1820
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001821/* Determine a node number for interleave */
1822static inline unsigned interleave_nid(struct mempolicy *pol,
1823 struct vm_area_struct *vma, unsigned long addr, int shift)
1824{
1825 if (vma) {
1826 unsigned long off;
1827
Nishanth Aravamudan3b98b082006-08-31 21:27:53 -07001828 /*
1829 * for small pages, there is no difference between
1830 * shift and PAGE_SHIFT, so the bit-shift is safe.
1831 * for huge pages, since vm_pgoff is in units of small
1832 * pages, we need to shift off the always 0 bits to get
1833 * a useful offset.
1834 */
1835 BUG_ON(shift < PAGE_SHIFT);
1836 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001837 off += (addr - vma->vm_start) >> shift;
1838 return offset_il_node(pol, vma, off);
1839 } else
1840 return interleave_nodes(pol);
1841}
1842
Michal Hocko778d3b02011-07-26 16:08:30 -07001843/*
1844 * Return the bit number of a random bit set in the nodemask.
Jianguo Wub76ac7e2013-11-12 15:07:39 -08001845 * (returns NUMA_NO_NODE if nodemask is empty)
Michal Hocko778d3b02011-07-26 16:08:30 -07001846 */
1847int node_random(const nodemask_t *maskp)
1848{
Jianguo Wub76ac7e2013-11-12 15:07:39 -08001849 int w, bit = NUMA_NO_NODE;
Michal Hocko778d3b02011-07-26 16:08:30 -07001850
1851 w = nodes_weight(*maskp);
1852 if (w)
1853 bit = bitmap_ord_to_pos(maskp->bits,
1854 get_random_int() % w, MAX_NUMNODES);
1855 return bit;
1856}
1857
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001858#ifdef CONFIG_HUGETLBFS
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001859/*
1860 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
Fabian Frederickb46e14a2014-06-04 16:08:18 -07001861 * @vma: virtual memory area whose policy is sought
1862 * @addr: address in @vma for shared policy lookup and interleave policy
1863 * @gfp_flags: for requested zone
1864 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1865 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001866 *
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001867 * Returns a zonelist suitable for a huge page allocation and a pointer
1868 * to the struct mempolicy for conditional unref after allocation.
1869 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1870 * @nodemask for filtering the zonelist.
Miao Xiec0ff7452010-05-24 14:32:08 -07001871 *
Mel Gormand26914d2014-04-03 14:47:24 -07001872 * Must be protected by read_mems_allowed_begin()
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001873 */
Mel Gorman396faf02007-07-17 04:03:13 -07001874struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
Mel Gorman19770b32008-04-28 02:12:18 -07001875 gfp_t gfp_flags, struct mempolicy **mpol,
1876 nodemask_t **nodemask)
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001877{
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001878 struct zonelist *zl;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001879
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001880 *mpol = get_vma_policy(current, vma, addr);
Mel Gorman19770b32008-04-28 02:12:18 -07001881 *nodemask = NULL; /* assume !MPOL_BIND */
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001882
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001883 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1884 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
Andi Kleena5516432008-07-23 21:27:41 -07001885 huge_page_shift(hstate_vma(vma))), gfp_flags);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001886 } else {
Andi Kleen2f5f9482011-03-04 17:36:29 -08001887 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07001888 if ((*mpol)->mode == MPOL_BIND)
1889 *nodemask = &(*mpol)->v.nodes;
Lee Schermerhorn480eccf2007-09-18 22:46:47 -07001890 }
1891 return zl;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001892}
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001893
1894/*
1895 * init_nodemask_of_mempolicy
1896 *
1897 * If the current task's mempolicy is "default" [NULL], return 'false'
1898 * to indicate default policy. Otherwise, extract the policy nodemask
1899 * for 'bind' or 'interleave' policy into the argument nodemask, or
1900 * initialize the argument nodemask to contain the single node for
1901 * 'preferred' or 'local' policy and return 'true' to indicate presence
1902 * of non-default mempolicy.
1903 *
1904 * We don't bother with reference counting the mempolicy [mpol_get/put]
1905 * because the current task is examining it's own mempolicy and a task's
1906 * mempolicy is only ever changed by the task itself.
1907 *
1908 * N.B., it is the caller's responsibility to free a returned nodemask.
1909 */
1910bool init_nodemask_of_mempolicy(nodemask_t *mask)
1911{
1912 struct mempolicy *mempolicy;
1913 int nid;
1914
1915 if (!(mask && current->mempolicy))
1916 return false;
1917
Miao Xiec0ff7452010-05-24 14:32:08 -07001918 task_lock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001919 mempolicy = current->mempolicy;
1920 switch (mempolicy->mode) {
1921 case MPOL_PREFERRED:
1922 if (mempolicy->flags & MPOL_F_LOCAL)
1923 nid = numa_node_id();
1924 else
1925 nid = mempolicy->v.preferred_node;
1926 init_nodemask_of_node(mask, nid);
1927 break;
1928
1929 case MPOL_BIND:
1930 /* Fall through */
1931 case MPOL_INTERLEAVE:
1932 *mask = mempolicy->v.nodes;
1933 break;
1934
1935 default:
1936 BUG();
1937 }
Miao Xiec0ff7452010-05-24 14:32:08 -07001938 task_unlock(current);
Lee Schermerhorn06808b02009-12-14 17:58:21 -08001939
1940 return true;
1941}
Chen, Kenneth W00ac59a2006-02-03 21:51:14 +01001942#endif
Christoph Lameter5da7ca82006-01-06 00:10:46 -08001943
David Rientjes6f48d0eb2010-08-09 17:18:52 -07001944/*
1945 * mempolicy_nodemask_intersects
1946 *
1947 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1948 * policy. Otherwise, check for intersection between mask and the policy
1949 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1950 * policy, always return true since it may allocate elsewhere on fallback.
1951 *
1952 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1953 */
1954bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1955 const nodemask_t *mask)
1956{
1957 struct mempolicy *mempolicy;
1958 bool ret = true;
1959
1960 if (!mask)
1961 return ret;
1962 task_lock(tsk);
1963 mempolicy = tsk->mempolicy;
1964 if (!mempolicy)
1965 goto out;
1966
1967 switch (mempolicy->mode) {
1968 case MPOL_PREFERRED:
1969 /*
1970 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1971 * allocate from, they may fallback to other nodes when oom.
1972 * Thus, it's possible for tsk to have allocated memory from
1973 * nodes in mask.
1974 */
1975 break;
1976 case MPOL_BIND:
1977 case MPOL_INTERLEAVE:
1978 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1979 break;
1980 default:
1981 BUG();
1982 }
1983out:
1984 task_unlock(tsk);
1985 return ret;
1986}
1987
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988/* Allocate a page in interleaved policy.
1989 Own path because it needs to do special accounting. */
Andi Kleen662f3a02005-10-29 18:15:49 -07001990static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1991 unsigned nid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992{
1993 struct zonelist *zl;
1994 struct page *page;
1995
Mel Gorman0e884602008-04-28 02:12:14 -07001996 zl = node_zonelist(nid, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 page = __alloc_pages(gfp, order, zl);
Mel Gormandd1a2392008-04-28 02:12:17 -07001998 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
Christoph Lameterca889e62006-06-30 01:55:44 -07001999 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 return page;
2001}
2002
2003/**
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002004 * alloc_pages_vma - Allocate a page for a VMA.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 *
2006 * @gfp:
2007 * %GFP_USER user allocation.
2008 * %GFP_KERNEL kernel allocations,
2009 * %GFP_HIGHMEM highmem/user allocations,
2010 * %GFP_FS allocation should not call back into a file system.
2011 * %GFP_ATOMIC don't sleep.
2012 *
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002013 * @order:Order of the GFP allocation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014 * @vma: Pointer to VMA or NULL if not available.
2015 * @addr: Virtual Address of the allocation. Must be inside the VMA.
2016 *
2017 * This function allocates a page from the kernel page pool and applies
2018 * a NUMA policy associated with the VMA or the current process.
2019 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
2020 * mm_struct of the VMA to prevent it from going away. Should be used for
2021 * all allocations for pages that will be mapped into
2022 * user space. Returns NULL when no page can be allocated.
2023 *
2024 * Should be called with the mm_sem of the vma hold.
2025 */
2026struct page *
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002027alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
Andi Kleen2f5f9482011-03-04 17:36:29 -08002028 unsigned long addr, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029{
Mel Gormancc9a6c82012-03-21 16:34:11 -07002030 struct mempolicy *pol;
Miao Xiec0ff7452010-05-24 14:32:08 -07002031 struct page *page;
Mel Gormancc9a6c82012-03-21 16:34:11 -07002032 unsigned int cpuset_mems_cookie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033
Mel Gormancc9a6c82012-03-21 16:34:11 -07002034retry_cpuset:
2035 pol = get_vma_policy(current, vma, addr);
Mel Gormand26914d2014-04-03 14:47:24 -07002036 cpuset_mems_cookie = read_mems_allowed_begin();
Mel Gormancc9a6c82012-03-21 16:34:11 -07002037
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002038 if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 unsigned nid;
Christoph Lameter5da7ca82006-01-06 00:10:46 -08002040
Andi Kleen8eac5632011-02-25 14:44:28 -08002041 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002042 mpol_cond_put(pol);
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002043 page = alloc_page_interleave(gfp, order, nid);
Mel Gormand26914d2014-04-03 14:47:24 -07002044 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
Mel Gormancc9a6c82012-03-21 16:34:11 -07002045 goto retry_cpuset;
2046
Miao Xiec0ff7452010-05-24 14:32:08 -07002047 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 }
David Rientjes212a0a62012-12-11 16:02:51 -08002049 page = __alloc_pages_nodemask(gfp, order,
2050 policy_zonelist(gfp, pol, node),
Andrea Arcangeli0bbbc0b2011-01-13 15:47:05 -08002051 policy_nodemask(gfp, pol));
Oleg Nesterov23867402014-10-09 15:27:41 -07002052 mpol_cond_put(pol);
Mel Gormand26914d2014-04-03 14:47:24 -07002053 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
Mel Gormancc9a6c82012-03-21 16:34:11 -07002054 goto retry_cpuset;
Miao Xiec0ff7452010-05-24 14:32:08 -07002055 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056}
2057
2058/**
2059 * alloc_pages_current - Allocate pages.
2060 *
2061 * @gfp:
2062 * %GFP_USER user allocation,
2063 * %GFP_KERNEL kernel allocation,
2064 * %GFP_HIGHMEM highmem allocation,
2065 * %GFP_FS don't call back into a file system.
2066 * %GFP_ATOMIC don't sleep.
2067 * @order: Power of two of allocation size in pages. 0 is a single page.
2068 *
2069 * Allocate a page from the kernel page pool. When not in
2070 * interrupt context and apply the current process NUMA policy.
2071 * Returns NULL when no page can be allocated.
2072 *
Paul Jacksoncf2a473c2006-01-08 01:01:54 -08002073 * Don't call cpuset_update_task_memory_state() unless
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074 * 1) it's ok to take cpuset_sem (can WAIT), and
2075 * 2) allocating for current task (not interrupt).
2076 */
Al Virodd0fc662005-10-07 07:46:04 +01002077struct page *alloc_pages_current(gfp_t gfp, unsigned order)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078{
Oleg Nesterov8d90274b2014-10-09 15:27:45 -07002079 struct mempolicy *pol = &default_policy;
Miao Xiec0ff7452010-05-24 14:32:08 -07002080 struct page *page;
Mel Gormancc9a6c82012-03-21 16:34:11 -07002081 unsigned int cpuset_mems_cookie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082
Oleg Nesterov8d90274b2014-10-09 15:27:45 -07002083 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2084 pol = get_task_policy(current);
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002085
Mel Gormancc9a6c82012-03-21 16:34:11 -07002086retry_cpuset:
Mel Gormand26914d2014-04-03 14:47:24 -07002087 cpuset_mems_cookie = read_mems_allowed_begin();
Mel Gormancc9a6c82012-03-21 16:34:11 -07002088
Lee Schermerhorn52cd3b02008-04-28 02:13:16 -07002089 /*
2090 * No reference counting needed for current->mempolicy
2091 * nor system default_policy
2092 */
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002093 if (pol->mode == MPOL_INTERLEAVE)
Miao Xiec0ff7452010-05-24 14:32:08 -07002094 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2095 else
2096 page = __alloc_pages_nodemask(gfp, order,
Andi Kleen5c4b4be2011-03-04 17:36:32 -08002097 policy_zonelist(gfp, pol, numa_node_id()),
2098 policy_nodemask(gfp, pol));
Mel Gormancc9a6c82012-03-21 16:34:11 -07002099
Mel Gormand26914d2014-04-03 14:47:24 -07002100 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
Mel Gormancc9a6c82012-03-21 16:34:11 -07002101 goto retry_cpuset;
2102
Miao Xiec0ff7452010-05-24 14:32:08 -07002103 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104}
2105EXPORT_SYMBOL(alloc_pages_current);
2106
Oleg Nesterovef0855d2013-09-11 14:20:14 -07002107int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2108{
2109 struct mempolicy *pol = mpol_dup(vma_policy(src));
2110
2111 if (IS_ERR(pol))
2112 return PTR_ERR(pol);
2113 dst->vm_policy = pol;
2114 return 0;
2115}
2116
Paul Jackson42253992006-01-08 01:01:59 -08002117/*
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07002118 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
Paul Jackson42253992006-01-08 01:01:59 -08002119 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2120 * with the mems_allowed returned by cpuset_mems_allowed(). This
2121 * keeps mempolicies cpuset relative after its cpuset moves. See
2122 * further kernel/cpuset.c update_nodemask().
Miao Xie708c1bb2010-05-24 14:32:07 -07002123 *
2124 * current's mempolicy may be rebinded by the other task(the task that changes
2125 * cpuset's mems), so we needn't do rebind work for current task.
Paul Jackson42253992006-01-08 01:01:59 -08002126 */
Paul Jackson42253992006-01-08 01:01:59 -08002127
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07002128/* Slow path of a mempolicy duplicate */
2129struct mempolicy *__mpol_dup(struct mempolicy *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130{
2131 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2132
2133 if (!new)
2134 return ERR_PTR(-ENOMEM);
Miao Xie708c1bb2010-05-24 14:32:07 -07002135
2136 /* task's mempolicy is protected by alloc_lock */
2137 if (old == current->mempolicy) {
2138 task_lock(current);
2139 *new = *old;
2140 task_unlock(current);
2141 } else
2142 *new = *old;
2143
Paul Jackson42253992006-01-08 01:01:59 -08002144 if (current_cpuset_is_being_rebound()) {
2145 nodemask_t mems = cpuset_mems_allowed(current);
Miao Xie708c1bb2010-05-24 14:32:07 -07002146 if (new->flags & MPOL_F_REBINDING)
2147 mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2148 else
2149 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
Paul Jackson42253992006-01-08 01:01:59 -08002150 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151 atomic_set(&new->refcnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 return new;
2153}
2154
2155/* Slow path of a mempolicy comparison */
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002156bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157{
2158 if (!a || !b)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002159 return false;
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002160 if (a->mode != b->mode)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002161 return false;
Bob Liu19800502010-05-24 14:32:01 -07002162 if (a->flags != b->flags)
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002163 return false;
Bob Liu19800502010-05-24 14:32:01 -07002164 if (mpol_store_user_nodemask(a))
2165 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002166 return false;
Bob Liu19800502010-05-24 14:32:01 -07002167
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002168 switch (a->mode) {
Mel Gorman19770b32008-04-28 02:12:18 -07002169 case MPOL_BIND:
2170 /* Fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171 case MPOL_INTERLEAVE:
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002172 return !!nodes_equal(a->v.nodes, b->v.nodes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 case MPOL_PREFERRED:
Namhyung Kim75719662011-03-22 16:33:02 -07002174 return a->v.preferred_node == b->v.preferred_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 default:
2176 BUG();
KOSAKI Motohirofcfb4dc2012-01-10 15:08:21 -08002177 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178 }
2179}
2180
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 * Shared memory backing store policy support.
2183 *
2184 * Remember policies even when nobody has shared memory mapped.
2185 * The policies are kept in Red-Black tree linked from the inode.
2186 * They are protected by the sp->lock spinlock, which should be held
2187 * for any accesses to the tree.
2188 */
2189
2190/* lookup first element intersecting start-end */
Mel Gorman42288fe2012-12-21 23:10:25 +00002191/* Caller holds sp->lock */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192static struct sp_node *
2193sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2194{
2195 struct rb_node *n = sp->root.rb_node;
2196
2197 while (n) {
2198 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2199
2200 if (start >= p->end)
2201 n = n->rb_right;
2202 else if (end <= p->start)
2203 n = n->rb_left;
2204 else
2205 break;
2206 }
2207 if (!n)
2208 return NULL;
2209 for (;;) {
2210 struct sp_node *w = NULL;
2211 struct rb_node *prev = rb_prev(n);
2212 if (!prev)
2213 break;
2214 w = rb_entry(prev, struct sp_node, nd);
2215 if (w->end <= start)
2216 break;
2217 n = prev;
2218 }
2219 return rb_entry(n, struct sp_node, nd);
2220}
2221
2222/* Insert a new shared policy into the list. */
2223/* Caller holds sp->lock */
2224static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2225{
2226 struct rb_node **p = &sp->root.rb_node;
2227 struct rb_node *parent = NULL;
2228 struct sp_node *nd;
2229
2230 while (*p) {
2231 parent = *p;
2232 nd = rb_entry(parent, struct sp_node, nd);
2233 if (new->start < nd->start)
2234 p = &(*p)->rb_left;
2235 else if (new->end > nd->end)
2236 p = &(*p)->rb_right;
2237 else
2238 BUG();
2239 }
2240 rb_link_node(&new->nd, parent, p);
2241 rb_insert_color(&new->nd, &sp->root);
Paul Mundt140d5a42007-07-15 23:38:16 -07002242 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002243 new->policy ? new->policy->mode : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244}
2245
2246/* Find shared policy intersecting idx */
2247struct mempolicy *
2248mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2249{
2250 struct mempolicy *pol = NULL;
2251 struct sp_node *sn;
2252
2253 if (!sp->root.rb_node)
2254 return NULL;
Mel Gorman42288fe2012-12-21 23:10:25 +00002255 spin_lock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 sn = sp_lookup(sp, idx, idx+1);
2257 if (sn) {
2258 mpol_get(sn->policy);
2259 pol = sn->policy;
2260 }
Mel Gorman42288fe2012-12-21 23:10:25 +00002261 spin_unlock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 return pol;
2263}
2264
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002265static void sp_free(struct sp_node *n)
2266{
2267 mpol_put(n->policy);
2268 kmem_cache_free(sn_cache, n);
2269}
2270
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002271/**
2272 * mpol_misplaced - check whether current page node is valid in policy
2273 *
Fabian Frederickb46e14a2014-06-04 16:08:18 -07002274 * @page: page to be checked
2275 * @vma: vm area where page mapped
2276 * @addr: virtual address where page mapped
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002277 *
2278 * Lookup current policy node id for vma,addr and "compare to" page's
2279 * node id.
2280 *
2281 * Returns:
2282 * -1 - not misplaced, page is in the right node
2283 * node - node id where the page should be
2284 *
2285 * Policy determination "mimics" alloc_page_vma().
2286 * Called from fault path where we know the vma and faulting address.
2287 */
2288int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2289{
2290 struct mempolicy *pol;
2291 struct zone *zone;
2292 int curnid = page_to_nid(page);
2293 unsigned long pgoff;
Peter Zijlstra90572892013-10-07 11:29:20 +01002294 int thiscpu = raw_smp_processor_id();
2295 int thisnid = cpu_to_node(thiscpu);
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002296 int polnid = -1;
2297 int ret = -1;
2298
2299 BUG_ON(!vma);
2300
2301 pol = get_vma_policy(current, vma, addr);
2302 if (!(pol->flags & MPOL_F_MOF))
2303 goto out;
2304
2305 switch (pol->mode) {
2306 case MPOL_INTERLEAVE:
2307 BUG_ON(addr >= vma->vm_end);
2308 BUG_ON(addr < vma->vm_start);
2309
2310 pgoff = vma->vm_pgoff;
2311 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2312 polnid = offset_il_node(pol, vma, pgoff);
2313 break;
2314
2315 case MPOL_PREFERRED:
2316 if (pol->flags & MPOL_F_LOCAL)
2317 polnid = numa_node_id();
2318 else
2319 polnid = pol->v.preferred_node;
2320 break;
2321
2322 case MPOL_BIND:
2323 /*
2324 * allows binding to multiple nodes.
2325 * use current page if in policy nodemask,
2326 * else select nearest allowed node, if any.
2327 * If no allowed nodes, use current [!misplaced].
2328 */
2329 if (node_isset(curnid, pol->v.nodes))
2330 goto out;
2331 (void)first_zones_zonelist(
2332 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2333 gfp_zone(GFP_HIGHUSER),
2334 &pol->v.nodes, &zone);
2335 polnid = zone->node;
2336 break;
2337
2338 default:
2339 BUG();
2340 }
Mel Gorman5606e382012-11-02 18:19:13 +00002341
2342 /* Migrate the page towards the node whose CPU is referencing it */
Mel Gormane42c8ff2012-11-12 09:17:07 +00002343 if (pol->flags & MPOL_F_MORON) {
Peter Zijlstra90572892013-10-07 11:29:20 +01002344 polnid = thisnid;
Mel Gorman5606e382012-11-02 18:19:13 +00002345
Rik van Riel10f39042014-01-27 17:03:44 -05002346 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
Rik van Rielde1c9ce2013-10-07 11:29:39 +01002347 goto out;
Mel Gormane42c8ff2012-11-12 09:17:07 +00002348 }
2349
Lee Schermerhorn771fb4d2012-10-25 14:16:30 +02002350 if (curnid != polnid)
2351 ret = polnid;
2352out:
2353 mpol_cond_put(pol);
2354
2355 return ret;
2356}
2357
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2359{
Paul Mundt140d5a42007-07-15 23:38:16 -07002360 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361 rb_erase(&n->nd, &sp->root);
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002362 sp_free(n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363}
2364
Mel Gorman42288fe2012-12-21 23:10:25 +00002365static void sp_node_init(struct sp_node *node, unsigned long start,
2366 unsigned long end, struct mempolicy *pol)
2367{
2368 node->start = start;
2369 node->end = end;
2370 node->policy = pol;
2371}
2372
Adrian Bunkdbcb0f12007-10-16 01:26:26 -07002373static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2374 struct mempolicy *pol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375{
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002376 struct sp_node *n;
2377 struct mempolicy *newpol;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002379 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380 if (!n)
2381 return NULL;
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002382
2383 newpol = mpol_dup(pol);
2384 if (IS_ERR(newpol)) {
2385 kmem_cache_free(sn_cache, n);
2386 return NULL;
2387 }
2388 newpol->flags |= MPOL_F_SHARED;
Mel Gorman42288fe2012-12-21 23:10:25 +00002389 sp_node_init(n, start, end, newpol);
KOSAKI Motohiro869833f2012-10-08 16:29:16 -07002390
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391 return n;
2392}
2393
2394/* Replace a policy range. */
2395static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2396 unsigned long end, struct sp_node *new)
2397{
Mel Gormanb22d1272012-10-08 16:29:17 -07002398 struct sp_node *n;
Mel Gorman42288fe2012-12-21 23:10:25 +00002399 struct sp_node *n_new = NULL;
2400 struct mempolicy *mpol_new = NULL;
Mel Gormanb22d1272012-10-08 16:29:17 -07002401 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402
Mel Gorman42288fe2012-12-21 23:10:25 +00002403restart:
2404 spin_lock(&sp->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405 n = sp_lookup(sp, start, end);
2406 /* Take care of old policies in the same range. */
2407 while (n && n->start < end) {
2408 struct rb_node *next = rb_next(&n->nd);
2409 if (n->start >= start) {
2410 if (n->end <= end)
2411 sp_delete(sp, n);
2412 else
2413 n->start = end;
2414 } else {
2415 /* Old policy spanning whole new range. */
2416 if (n->end > end) {
Mel Gorman42288fe2012-12-21 23:10:25 +00002417 if (!n_new)
2418 goto alloc_new;
2419
2420 *mpol_new = *n->policy;
2421 atomic_set(&mpol_new->refcnt, 1);
KOSAKI Motohiro78806392013-03-08 12:43:29 -08002422 sp_node_init(n_new, end, n->end, mpol_new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423 n->end = start;
Hillf Danton5ca39572013-03-08 12:43:28 -08002424 sp_insert(sp, n_new);
Mel Gorman42288fe2012-12-21 23:10:25 +00002425 n_new = NULL;
2426 mpol_new = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427 break;
2428 } else
2429 n->end = start;
2430 }
2431 if (!next)
2432 break;
2433 n = rb_entry(next, struct sp_node, nd);
2434 }
2435 if (new)
2436 sp_insert(sp, new);
Mel Gorman42288fe2012-12-21 23:10:25 +00002437 spin_unlock(&sp->lock);
2438 ret = 0;
2439
2440err_out:
2441 if (mpol_new)
2442 mpol_put(mpol_new);
2443 if (n_new)
2444 kmem_cache_free(sn_cache, n_new);
2445
Mel Gormanb22d1272012-10-08 16:29:17 -07002446 return ret;
Mel Gorman42288fe2012-12-21 23:10:25 +00002447
2448alloc_new:
2449 spin_unlock(&sp->lock);
2450 ret = -ENOMEM;
2451 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2452 if (!n_new)
2453 goto err_out;
2454 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2455 if (!mpol_new)
2456 goto err_out;
2457 goto restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458}
2459
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002460/**
2461 * mpol_shared_policy_init - initialize shared policy for inode
2462 * @sp: pointer to inode shared policy
2463 * @mpol: struct mempolicy to install
2464 *
2465 * Install non-NULL @mpol in inode's shared policy rb-tree.
2466 * On entry, the current task has a reference on a non-NULL @mpol.
2467 * This must be released on exit.
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002468 * This is called at get_inode() calls and we can use GFP_KERNEL.
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002469 */
2470void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
Robin Holt7339ff82006-01-14 13:20:48 -08002471{
Miao Xie58568d22009-06-16 15:31:49 -07002472 int ret;
2473
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002474 sp->root = RB_ROOT; /* empty tree == default mempolicy */
Mel Gorman42288fe2012-12-21 23:10:25 +00002475 spin_lock_init(&sp->lock);
Robin Holt7339ff82006-01-14 13:20:48 -08002476
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002477 if (mpol) {
2478 struct vm_area_struct pvma;
2479 struct mempolicy *new;
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002480 NODEMASK_SCRATCH(scratch);
Robin Holt7339ff82006-01-14 13:20:48 -08002481
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002482 if (!scratch)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002483 goto put_mpol;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002484 /* contextualize the tmpfs mount point mempolicy */
2485 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002486 if (IS_ERR(new))
Dan Carpenter0cae3452010-05-25 23:42:58 -07002487 goto free_scratch; /* no valid nodemask intersection */
Miao Xie58568d22009-06-16 15:31:49 -07002488
2489 task_lock(current);
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002490 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
Miao Xie58568d22009-06-16 15:31:49 -07002491 task_unlock(current);
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002492 if (ret)
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002493 goto put_new;
Robin Holt7339ff82006-01-14 13:20:48 -08002494
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002495 /* Create pseudo-vma that contains just the policy */
2496 memset(&pvma, 0, sizeof(struct vm_area_struct));
2497 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2498 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
Lee Schermerhorn15d77832010-05-24 14:32:04 -07002499
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002500put_new:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002501 mpol_put(new); /* drop initial ref */
Dan Carpenter0cae3452010-05-25 23:42:58 -07002502free_scratch:
KAMEZAWA Hiroyuki4bfc4492009-08-06 15:07:33 -07002503 NODEMASK_SCRATCH_FREE(scratch);
Lee Schermerhorn5c0c1652010-06-29 15:05:30 -07002504put_mpol:
2505 mpol_put(mpol); /* drop our incoming ref on sb mpol */
Robin Holt7339ff82006-01-14 13:20:48 -08002506 }
2507}
2508
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509int mpol_set_shared_policy(struct shared_policy *info,
2510 struct vm_area_struct *vma, struct mempolicy *npol)
2511{
2512 int err;
2513 struct sp_node *new = NULL;
2514 unsigned long sz = vma_pages(vma);
2515
David Rientjes028fec42008-04-28 02:12:25 -07002516 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517 vma->vm_pgoff,
Lee Schermerhorn45c47452008-04-28 02:13:12 -07002518 sz, npol ? npol->mode : -1,
David Rientjes028fec42008-04-28 02:12:25 -07002519 npol ? npol->flags : -1,
David Rientjes00ef2d22013-02-22 16:35:36 -08002520 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521
2522 if (npol) {
2523 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2524 if (!new)
2525 return -ENOMEM;
2526 }
2527 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2528 if (err && new)
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002529 sp_free(new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530 return err;
2531}
2532
2533/* Free a backing policy store on inode delete. */
2534void mpol_free_shared_policy(struct shared_policy *p)
2535{
2536 struct sp_node *n;
2537 struct rb_node *next;
2538
2539 if (!p->root.rb_node)
2540 return;
Mel Gorman42288fe2012-12-21 23:10:25 +00002541 spin_lock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542 next = rb_first(&p->root);
2543 while (next) {
2544 n = rb_entry(next, struct sp_node, nd);
2545 next = rb_next(&n->nd);
KOSAKI Motohiro63f74ca2012-10-08 16:29:19 -07002546 sp_delete(p, n);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547 }
Mel Gorman42288fe2012-12-21 23:10:25 +00002548 spin_unlock(&p->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549}
2550
Mel Gorman1a687c22012-11-22 11:16:36 +00002551#ifdef CONFIG_NUMA_BALANCING
Mel Gormanc2976632014-01-29 14:05:42 -08002552static int __initdata numabalancing_override;
Mel Gorman1a687c22012-11-22 11:16:36 +00002553
2554static void __init check_numabalancing_enable(void)
2555{
2556 bool numabalancing_default = false;
2557
2558 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2559 numabalancing_default = true;
2560
Mel Gormanc2976632014-01-29 14:05:42 -08002561 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2562 if (numabalancing_override)
2563 set_numabalancing_state(numabalancing_override == 1);
2564
Mel Gorman1a687c22012-11-22 11:16:36 +00002565 if (nr_node_ids > 1 && !numabalancing_override) {
Andrew Morton4a404be2014-01-29 14:05:43 -08002566 pr_info("%s automatic NUMA balancing. "
Mel Gormanc2976632014-01-29 14:05:42 -08002567 "Configure with numa_balancing= or the "
2568 "kernel.numa_balancing sysctl",
2569 numabalancing_default ? "Enabling" : "Disabling");
Mel Gorman1a687c22012-11-22 11:16:36 +00002570 set_numabalancing_state(numabalancing_default);
2571 }
2572}
2573
2574static int __init setup_numabalancing(char *str)
2575{
2576 int ret = 0;
2577 if (!str)
2578 goto out;
Mel Gorman1a687c22012-11-22 11:16:36 +00002579
2580 if (!strcmp(str, "enable")) {
Mel Gormanc2976632014-01-29 14:05:42 -08002581 numabalancing_override = 1;
Mel Gorman1a687c22012-11-22 11:16:36 +00002582 ret = 1;
2583 } else if (!strcmp(str, "disable")) {
Mel Gormanc2976632014-01-29 14:05:42 -08002584 numabalancing_override = -1;
Mel Gorman1a687c22012-11-22 11:16:36 +00002585 ret = 1;
2586 }
2587out:
2588 if (!ret)
Andrew Morton4a404be2014-01-29 14:05:43 -08002589 pr_warn("Unable to parse numa_balancing=\n");
Mel Gorman1a687c22012-11-22 11:16:36 +00002590
2591 return ret;
2592}
2593__setup("numa_balancing=", setup_numabalancing);
2594#else
2595static inline void __init check_numabalancing_enable(void)
2596{
2597}
2598#endif /* CONFIG_NUMA_BALANCING */
2599
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600/* assumes fs == KERNEL_DS */
2601void __init numa_policy_init(void)
2602{
Paul Mundtb71636e2007-07-15 23:38:15 -07002603 nodemask_t interleave_nodes;
2604 unsigned long largest = 0;
2605 int nid, prefer = 0;
2606
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607 policy_cache = kmem_cache_create("numa_policy",
2608 sizeof(struct mempolicy),
Paul Mundt20c2df82007-07-20 10:11:58 +09002609 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610
2611 sn_cache = kmem_cache_create("shared_policy_node",
2612 sizeof(struct sp_node),
Paul Mundt20c2df82007-07-20 10:11:58 +09002613 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614
Mel Gorman5606e382012-11-02 18:19:13 +00002615 for_each_node(nid) {
2616 preferred_node_policy[nid] = (struct mempolicy) {
2617 .refcnt = ATOMIC_INIT(1),
2618 .mode = MPOL_PREFERRED,
2619 .flags = MPOL_F_MOF | MPOL_F_MORON,
2620 .v = { .preferred_node = nid, },
2621 };
2622 }
2623
Paul Mundtb71636e2007-07-15 23:38:15 -07002624 /*
2625 * Set interleaving policy for system init. Interleaving is only
2626 * enabled across suitably sized nodes (default is >= 16MB), or
2627 * fall back to the largest node if they're all smaller.
2628 */
2629 nodes_clear(interleave_nodes);
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002630 for_each_node_state(nid, N_MEMORY) {
Paul Mundtb71636e2007-07-15 23:38:15 -07002631 unsigned long total_pages = node_present_pages(nid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002632
Paul Mundtb71636e2007-07-15 23:38:15 -07002633 /* Preserve the largest node */
2634 if (largest < total_pages) {
2635 largest = total_pages;
2636 prefer = nid;
2637 }
2638
2639 /* Interleave this node? */
2640 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2641 node_set(nid, interleave_nodes);
2642 }
2643
2644 /* All too small, use the largest */
2645 if (unlikely(nodes_empty(interleave_nodes)))
2646 node_set(prefer, interleave_nodes);
2647
David Rientjes028fec42008-04-28 02:12:25 -07002648 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
Mitchel Humpherysb1de0d12014-06-06 14:38:30 -07002649 pr_err("%s: interleaving failed\n", __func__);
Mel Gorman1a687c22012-11-22 11:16:36 +00002650
2651 check_numabalancing_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652}
2653
Christoph Lameter8bccd852005-10-29 18:16:59 -07002654/* Reset policy of current process to default */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655void numa_default_policy(void)
2656{
David Rientjes028fec42008-04-28 02:12:25 -07002657 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658}
Paul Jackson68860ec2005-10-30 15:02:36 -08002659
Paul Jackson42253992006-01-08 01:01:59 -08002660/*
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002661 * Parse and format mempolicy from/to strings
2662 */
2663
2664/*
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002665 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002666 */
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002667static const char * const policy_modes[] =
2668{
2669 [MPOL_DEFAULT] = "default",
2670 [MPOL_PREFERRED] = "prefer",
2671 [MPOL_BIND] = "bind",
2672 [MPOL_INTERLEAVE] = "interleave",
Lee Schermerhornd3a71032012-10-25 14:16:29 +02002673 [MPOL_LOCAL] = "local",
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002674};
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002675
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002676
2677#ifdef CONFIG_TMPFS
2678/**
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002679 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002680 * @str: string containing mempolicy to parse
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002681 * @mpol: pointer to struct mempolicy pointer, returned on success.
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002682 *
2683 * Format of input:
2684 * <mode>[=<flags>][:<nodelist>]
2685 *
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002686 * On success, returns 0, else 1
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002687 */
Hugh Dickinsa7a88b22013-01-02 02:04:23 -08002688int mpol_parse_str(char *str, struct mempolicy **mpol)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002689{
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002690 struct mempolicy *new = NULL;
Lee Schermerhornb4652e82010-05-24 14:32:03 -07002691 unsigned short mode;
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002692 unsigned short mode_flags;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002693 nodemask_t nodes;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002694 char *nodelist = strchr(str, ':');
2695 char *flags = strchr(str, '=');
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002696 int err = 1;
2697
2698 if (nodelist) {
2699 /* NUL-terminate mode or flags string */
2700 *nodelist++ = '\0';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002701 if (nodelist_parse(nodelist, nodes))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002702 goto out;
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002703 if (!nodes_subset(nodes, node_states[N_MEMORY]))
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002704 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002705 } else
2706 nodes_clear(nodes);
2707
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002708 if (flags)
2709 *flags++ = '\0'; /* terminate mode string */
2710
Peter Zijlstra479e2802012-10-25 14:16:28 +02002711 for (mode = 0; mode < MPOL_MAX; mode++) {
Lee Schermerhorn345ace92010-05-24 14:32:04 -07002712 if (!strcmp(str, policy_modes[mode])) {
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002713 break;
2714 }
2715 }
Mel Gormana7200942012-11-16 09:37:58 +00002716 if (mode >= MPOL_MAX)
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002717 goto out;
2718
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002719 switch (mode) {
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002720 case MPOL_PREFERRED:
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002721 /*
2722 * Insist on a nodelist of one node only
2723 */
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002724 if (nodelist) {
2725 char *rest = nodelist;
2726 while (isdigit(*rest))
2727 rest++;
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002728 if (*rest)
2729 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002730 }
2731 break;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002732 case MPOL_INTERLEAVE:
2733 /*
2734 * Default to online nodes with memory if no nodelist
2735 */
2736 if (!nodelist)
Lai Jiangshan01f13bd2012-12-12 13:51:33 -08002737 nodes = node_states[N_MEMORY];
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002738 break;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002739 case MPOL_LOCAL:
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002740 /*
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002741 * Don't allow a nodelist; mpol_new() checks flags
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002742 */
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002743 if (nodelist)
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002744 goto out;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002745 mode = MPOL_PREFERRED;
Lee Schermerhorn3f226aa2008-04-28 02:13:24 -07002746 break;
Ravikiran G Thirumalai413b43d2010-03-23 13:35:28 -07002747 case MPOL_DEFAULT:
2748 /*
2749 * Insist on a empty nodelist
2750 */
2751 if (!nodelist)
2752 err = 0;
2753 goto out;
KOSAKI Motohirod69b2e62010-03-23 13:35:30 -07002754 case MPOL_BIND:
2755 /*
2756 * Insist on a nodelist
2757 */
2758 if (!nodelist)
2759 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002760 }
2761
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002762 mode_flags = 0;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002763 if (flags) {
2764 /*
2765 * Currently, we only support two mutually exclusive
2766 * mode flags.
2767 */
2768 if (!strcmp(flags, "static"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002769 mode_flags |= MPOL_F_STATIC_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002770 else if (!strcmp(flags, "relative"))
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002771 mode_flags |= MPOL_F_RELATIVE_NODES;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002772 else
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002773 goto out;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002774 }
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002775
2776 new = mpol_new(mode, mode_flags, &nodes);
2777 if (IS_ERR(new))
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002778 goto out;
2779
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002780 /*
2781 * Save nodes for mpol_to_str() to show the tmpfs mount options
2782 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2783 */
2784 if (mode != MPOL_PREFERRED)
2785 new->v.nodes = nodes;
2786 else if (nodelist)
2787 new->v.preferred_node = first_node(nodes);
2788 else
2789 new->flags |= MPOL_F_LOCAL;
2790
2791 /*
2792 * Save nodes for contextualization: this will be used to "clone"
2793 * the mempolicy in a specific context [cpuset] at a later time.
2794 */
2795 new->w.user_nodemask = nodes;
2796
KOSAKI Motohiro926f2ae2010-03-23 13:35:32 -07002797 err = 0;
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002798
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002799out:
2800 /* Restore string for error message */
2801 if (nodelist)
2802 *--nodelist = ':';
2803 if (flags)
2804 *--flags = '=';
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002805 if (!err)
2806 *mpol = new;
Lee Schermerhorn095f1fc2008-04-28 02:13:23 -07002807 return err;
2808}
2809#endif /* CONFIG_TMPFS */
2810
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002811/**
2812 * mpol_to_str - format a mempolicy structure for printing
2813 * @buffer: to contain formatted mempolicy string
2814 * @maxlen: length of @buffer
2815 * @pol: pointer to mempolicy to be formatted
Lee Schermerhorn71fe8042008-04-28 02:13:26 -07002816 *
David Rientjes948927e2013-11-12 15:07:28 -08002817 * Convert @pol into a string. If @buffer is too short, truncate the string.
2818 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2819 * longest flag, "relative", and to display at least a few node ids.
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002820 */
David Rientjes948927e2013-11-12 15:07:28 -08002821void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002822{
2823 char *p = buffer;
David Rientjes948927e2013-11-12 15:07:28 -08002824 nodemask_t nodes = NODE_MASK_NONE;
2825 unsigned short mode = MPOL_DEFAULT;
2826 unsigned short flags = 0;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002827
David Rientjes8790c712014-01-30 15:46:08 -08002828 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
Lee Schermerhornbea904d2008-04-28 02:13:18 -07002829 mode = pol->mode;
David Rientjes948927e2013-11-12 15:07:28 -08002830 flags = pol->flags;
2831 }
Lee Schermerhornbea904d2008-04-28 02:13:18 -07002832
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002833 switch (mode) {
2834 case MPOL_DEFAULT:
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002835 break;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002836 case MPOL_PREFERRED:
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002837 if (flags & MPOL_F_LOCAL)
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002838 mode = MPOL_LOCAL;
Lee Schermerhorn53f25562008-04-28 02:13:20 -07002839 else
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002840 node_set(pol->v.preferred_node, nodes);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002841 break;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002842 case MPOL_BIND:
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002843 case MPOL_INTERLEAVE:
Hugh Dickinsf2a07f42013-01-02 02:01:33 -08002844 nodes = pol->v.nodes;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002845 break;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002846 default:
David Rientjes948927e2013-11-12 15:07:28 -08002847 WARN_ON_ONCE(1);
2848 snprintf(p, maxlen, "unknown");
2849 return;
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002850 }
2851
David Rientjesb7a9f422013-11-21 14:32:06 -08002852 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002853
Lee Schermerhornfc36b8d2008-04-28 02:13:21 -07002854 if (flags & MPOL_MODE_FLAGS) {
David Rientjes948927e2013-11-12 15:07:28 -08002855 p += snprintf(p, buffer + maxlen - p, "=");
David Rientjesf5b087b2008-04-28 02:12:27 -07002856
Lee Schermerhorn22919902008-04-28 02:13:22 -07002857 /*
2858 * Currently, the only defined flags are mutually exclusive
2859 */
David Rientjesf5b087b2008-04-28 02:12:27 -07002860 if (flags & MPOL_F_STATIC_NODES)
Lee Schermerhorn22919902008-04-28 02:13:22 -07002861 p += snprintf(p, buffer + maxlen - p, "static");
2862 else if (flags & MPOL_F_RELATIVE_NODES)
2863 p += snprintf(p, buffer + maxlen - p, "relative");
David Rientjesf5b087b2008-04-28 02:12:27 -07002864 }
2865
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002866 if (!nodes_empty(nodes)) {
David Rientjes948927e2013-11-12 15:07:28 -08002867 p += snprintf(p, buffer + maxlen - p, ":");
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002868 p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2869 }
Christoph Lameter1a75a6c2006-01-08 01:01:02 -08002870}