blob: 38e7cad782f4b008a85f03e6635332f46ba0932b [file] [log] [blame]
Christoph Lameterb20a3502006-03-22 00:09:12 -08001/*
2 * Memory Migration functionality - linux/mm/migration.c
3 *
4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
5 *
6 * Page migration was first developed in the context of the memory hotplug
7 * project. The main authors of the migration code are:
8 *
9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10 * Hirokazu Takahashi <taka@valinux.co.jp>
11 * Dave Hansen <haveblue@us.ibm.com>
Christoph Lametercde53532008-07-04 09:59:22 -070012 * Christoph Lameter
Christoph Lameterb20a3502006-03-22 00:09:12 -080013 */
14
15#include <linux/migrate.h>
16#include <linux/module.h>
17#include <linux/swap.h>
Christoph Lameter06972122006-06-23 02:03:35 -070018#include <linux/swapops.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080019#include <linux/pagemap.h>
Christoph Lametere23ca002006-04-10 22:52:57 -070020#include <linux/buffer_head.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080021#include <linux/mm_inline.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070022#include <linux/nsproxy.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080023#include <linux/pagevec.h>
Hugh Dickinse9995ef2009-12-14 17:59:31 -080024#include <linux/ksm.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080025#include <linux/rmap.h>
26#include <linux/topology.h>
27#include <linux/cpu.h>
28#include <linux/cpuset.h>
Christoph Lameter04e62a22006-06-23 02:03:38 -070029#include <linux/writeback.h>
Christoph Lameter742755a2006-06-23 02:03:55 -070030#include <linux/mempolicy.h>
31#include <linux/vmalloc.h>
David Quigley86c3a762006-06-23 02:04:02 -070032#include <linux/security.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080033#include <linux/memcontrol.h>
Adrian Bunk4f5ca262008-07-23 21:27:02 -070034#include <linux/syscalls.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/gfp.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080036
37#include "internal.h"
38
Christoph Lameterb20a3502006-03-22 00:09:12 -080039#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
40
41/*
Christoph Lameter742755a2006-06-23 02:03:55 -070042 * migrate_prep() needs to be called before we start compiling a list of pages
Mel Gorman748446b2010-05-24 14:32:27 -070043 * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
44 * undesirable, use migrate_prep_local()
Christoph Lameterb20a3502006-03-22 00:09:12 -080045 */
46int migrate_prep(void)
47{
Christoph Lameterb20a3502006-03-22 00:09:12 -080048 /*
49 * Clear the LRU lists so pages can be isolated.
50 * Note that pages may be moved off the LRU after we have
51 * drained them. Those pages will fail to migrate like other
52 * pages that may be busy.
53 */
54 lru_add_drain_all();
55
56 return 0;
57}
58
Mel Gorman748446b2010-05-24 14:32:27 -070059/* Do the necessary work of migrate_prep but not if it involves other CPUs */
60int migrate_prep_local(void)
61{
62 lru_add_drain();
63
64 return 0;
65}
66
Christoph Lameterb20a3502006-03-22 00:09:12 -080067/*
Lee Schermerhorn894bc312008-10-18 20:26:39 -070068 * Add isolated pages on the list back to the LRU under page lock
69 * to avoid leaking evictable pages back onto unevictable list.
Christoph Lameterb20a3502006-03-22 00:09:12 -080070 */
Minchan Kime13861d2010-05-24 14:31:59 -070071void putback_lru_pages(struct list_head *l)
Christoph Lameterb20a3502006-03-22 00:09:12 -080072{
73 struct page *page;
74 struct page *page2;
Christoph Lameterb20a3502006-03-22 00:09:12 -080075
76 list_for_each_entry_safe(page, page2, l, lru) {
Christoph Lametere24f0b82006-06-23 02:03:51 -070077 list_del(&page->lru);
KOSAKI Motohiroa7312862009-09-21 17:01:37 -070078 dec_zone_page_state(page, NR_ISOLATED_ANON +
Johannes Weiner6c0b1352009-09-21 17:02:59 -070079 page_is_file_cache(page));
Lee Schermerhorn894bc312008-10-18 20:26:39 -070080 putback_lru_page(page);
Christoph Lameterb20a3502006-03-22 00:09:12 -080081 }
Christoph Lameterb20a3502006-03-22 00:09:12 -080082}
83
Christoph Lameter06972122006-06-23 02:03:35 -070084/*
85 * Restore a potential migration pte to a working pte entry
86 */
Hugh Dickinse9995ef2009-12-14 17:59:31 -080087static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
88 unsigned long addr, void *old)
Christoph Lameter06972122006-06-23 02:03:35 -070089{
90 struct mm_struct *mm = vma->vm_mm;
91 swp_entry_t entry;
92 pgd_t *pgd;
93 pud_t *pud;
94 pmd_t *pmd;
95 pte_t *ptep, pte;
96 spinlock_t *ptl;
97
98 pgd = pgd_offset(mm, addr);
99 if (!pgd_present(*pgd))
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800100 goto out;
Christoph Lameter06972122006-06-23 02:03:35 -0700101
102 pud = pud_offset(pgd, addr);
103 if (!pud_present(*pud))
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800104 goto out;
Christoph Lameter06972122006-06-23 02:03:35 -0700105
106 pmd = pmd_offset(pud, addr);
107 if (!pmd_present(*pmd))
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800108 goto out;
Christoph Lameter06972122006-06-23 02:03:35 -0700109
110 ptep = pte_offset_map(pmd, addr);
111
112 if (!is_swap_pte(*ptep)) {
113 pte_unmap(ptep);
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800114 goto out;
Christoph Lameter06972122006-06-23 02:03:35 -0700115 }
116
117 ptl = pte_lockptr(mm, pmd);
118 spin_lock(ptl);
119 pte = *ptep;
120 if (!is_swap_pte(pte))
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800121 goto unlock;
Christoph Lameter06972122006-06-23 02:03:35 -0700122
123 entry = pte_to_swp_entry(pte);
124
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800125 if (!is_migration_entry(entry) ||
126 migration_entry_to_page(entry) != old)
127 goto unlock;
Christoph Lameter06972122006-06-23 02:03:35 -0700128
Christoph Lameter06972122006-06-23 02:03:35 -0700129 get_page(new);
130 pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
131 if (is_write_migration_entry(entry))
132 pte = pte_mkwrite(pte);
KAMEZAWA Hiroyuki97ee0522007-10-16 01:25:43 -0700133 flush_cache_page(vma, addr, pte_pfn(pte));
Christoph Lameter06972122006-06-23 02:03:35 -0700134 set_pte_at(mm, addr, ptep, pte);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700135
136 if (PageAnon(new))
137 page_add_anon_rmap(new, vma, addr);
138 else
139 page_add_file_rmap(new);
140
141 /* No need to invalidate - it was non-present before */
Russell King4b3073e2009-12-18 16:40:18 +0000142 update_mmu_cache(vma, addr, ptep);
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800143unlock:
Christoph Lameter06972122006-06-23 02:03:35 -0700144 pte_unmap_unlock(ptep, ptl);
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800145out:
146 return SWAP_AGAIN;
Christoph Lameter06972122006-06-23 02:03:35 -0700147}
148
149/*
Christoph Lameter04e62a22006-06-23 02:03:38 -0700150 * Get rid of all migration entries and replace them by
151 * references to the indicated page.
152 */
153static void remove_migration_ptes(struct page *old, struct page *new)
154{
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800155 rmap_walk(new, remove_migration_pte, old);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700156}
157
158/*
Christoph Lameter06972122006-06-23 02:03:35 -0700159 * Something used the pte of a page under migration. We need to
160 * get to the page and wait until migration is finished.
161 * When we return from this function the fault will be retried.
162 *
163 * This function is called from do_swap_page().
164 */
165void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
166 unsigned long address)
167{
168 pte_t *ptep, pte;
169 spinlock_t *ptl;
170 swp_entry_t entry;
171 struct page *page;
172
173 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
174 pte = *ptep;
175 if (!is_swap_pte(pte))
176 goto out;
177
178 entry = pte_to_swp_entry(pte);
179 if (!is_migration_entry(entry))
180 goto out;
181
182 page = migration_entry_to_page(entry);
183
Nick Piggine2867812008-07-25 19:45:30 -0700184 /*
185 * Once radix-tree replacement of page migration started, page_count
186 * *must* be zero. And, we don't want to call wait_on_page_locked()
187 * against a page without get_page().
188 * So, we use get_page_unless_zero(), here. Even failed, page fault
189 * will occur again.
190 */
191 if (!get_page_unless_zero(page))
192 goto out;
Christoph Lameter06972122006-06-23 02:03:35 -0700193 pte_unmap_unlock(ptep, ptl);
194 wait_on_page_locked(page);
195 put_page(page);
196 return;
197out:
198 pte_unmap_unlock(ptep, ptl);
199}
200
Christoph Lameterb20a3502006-03-22 00:09:12 -0800201/*
Christoph Lameterc3fcf8a2006-06-23 02:03:32 -0700202 * Replace the page in the mapping.
Christoph Lameter5b5c7122006-06-23 02:03:29 -0700203 *
204 * The number of remaining references must be:
205 * 1 for anonymous pages without a mapping
206 * 2 for pages with a mapping
David Howells266cf652009-04-03 16:42:36 +0100207 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800208 */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700209static int migrate_page_move_mapping(struct address_space *mapping,
210 struct page *newpage, struct page *page)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800211{
Nick Piggine2867812008-07-25 19:45:30 -0700212 int expected_count;
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800213 void **pslot;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800214
Christoph Lameter6c5240a2006-06-23 02:03:37 -0700215 if (!mapping) {
Christoph Lameter0e8c7d02007-04-23 14:41:09 -0700216 /* Anonymous page without mapping */
Christoph Lameter6c5240a2006-06-23 02:03:37 -0700217 if (page_count(page) != 1)
218 return -EAGAIN;
219 return 0;
220 }
221
Nick Piggin19fd6232008-07-25 19:45:32 -0700222 spin_lock_irq(&mapping->tree_lock);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800223
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800224 pslot = radix_tree_lookup_slot(&mapping->page_tree,
225 page_index(page));
Christoph Lameterb20a3502006-03-22 00:09:12 -0800226
Johannes Weineredcf4742009-09-21 17:02:59 -0700227 expected_count = 2 + page_has_private(page);
Nick Piggine2867812008-07-25 19:45:30 -0700228 if (page_count(page) != expected_count ||
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800229 (struct page *)radix_tree_deref_slot(pslot) != page) {
Nick Piggin19fd6232008-07-25 19:45:32 -0700230 spin_unlock_irq(&mapping->tree_lock);
Christoph Lametere23ca002006-04-10 22:52:57 -0700231 return -EAGAIN;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800232 }
233
Nick Piggine2867812008-07-25 19:45:30 -0700234 if (!page_freeze_refs(page, expected_count)) {
Nick Piggin19fd6232008-07-25 19:45:32 -0700235 spin_unlock_irq(&mapping->tree_lock);
Nick Piggine2867812008-07-25 19:45:30 -0700236 return -EAGAIN;
237 }
238
Christoph Lameterb20a3502006-03-22 00:09:12 -0800239 /*
240 * Now we know that no one else is looking at the page.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800241 */
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800242 get_page(newpage); /* add cache reference */
Christoph Lameterb20a3502006-03-22 00:09:12 -0800243 if (PageSwapCache(page)) {
244 SetPageSwapCache(newpage);
245 set_page_private(newpage, page_private(page));
246 }
247
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800248 radix_tree_replace_slot(pslot, newpage);
249
Nick Piggine2867812008-07-25 19:45:30 -0700250 page_unfreeze_refs(page, expected_count);
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800251 /*
252 * Drop cache reference from old page.
253 * We know this isn't the last reference.
254 */
Christoph Lameterb20a3502006-03-22 00:09:12 -0800255 __put_page(page);
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800256
Christoph Lameter0e8c7d02007-04-23 14:41:09 -0700257 /*
258 * If moved to a different zone then also account
259 * the page for that zone. Other VM counters will be
260 * taken care of when we establish references to the
261 * new page and drop references to the old page.
262 *
263 * Note that anonymous pages are accounted for
264 * via NR_FILE_PAGES and NR_ANON_PAGES if they
265 * are mapped to swap space.
266 */
267 __dec_zone_page_state(page, NR_FILE_PAGES);
268 __inc_zone_page_state(newpage, NR_FILE_PAGES);
KOSAKI Motohiro4b021082009-09-21 17:01:33 -0700269 if (PageSwapBacked(page)) {
270 __dec_zone_page_state(page, NR_SHMEM);
271 __inc_zone_page_state(newpage, NR_SHMEM);
272 }
Nick Piggin19fd6232008-07-25 19:45:32 -0700273 spin_unlock_irq(&mapping->tree_lock);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800274
275 return 0;
276}
Christoph Lameterb20a3502006-03-22 00:09:12 -0800277
278/*
279 * Copy the page to its new location
280 */
Christoph Lametere7340f72006-06-23 02:03:29 -0700281static void migrate_page_copy(struct page *newpage, struct page *page)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800282{
283 copy_highpage(newpage, page);
284
285 if (PageError(page))
286 SetPageError(newpage);
287 if (PageReferenced(page))
288 SetPageReferenced(newpage);
289 if (PageUptodate(page))
290 SetPageUptodate(newpage);
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700291 if (TestClearPageActive(page)) {
292 VM_BUG_ON(PageUnevictable(page));
Christoph Lameterb20a3502006-03-22 00:09:12 -0800293 SetPageActive(newpage);
Lee Schermerhorn418b27e2009-12-14 17:59:54 -0800294 } else if (TestClearPageUnevictable(page))
295 SetPageUnevictable(newpage);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800296 if (PageChecked(page))
297 SetPageChecked(newpage);
298 if (PageMappedToDisk(page))
299 SetPageMappedToDisk(newpage);
300
301 if (PageDirty(page)) {
302 clear_page_dirty_for_io(page);
Nick Piggin3a902c52008-04-30 00:55:16 -0700303 /*
304 * Want to mark the page and the radix tree as dirty, and
305 * redo the accounting that clear_page_dirty_for_io undid,
306 * but we can't use set_page_dirty because that function
307 * is actually a signal that all of the page has become dirty.
308 * Wheras only part of our page may be dirty.
309 */
310 __set_page_dirty_nobuffers(newpage);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800311 }
312
Nick Pigginb291f002008-10-18 20:26:44 -0700313 mlock_migrate_page(newpage, page);
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800314 ksm_migrate_page(newpage, page);
Nick Pigginb291f002008-10-18 20:26:44 -0700315
Christoph Lameterb20a3502006-03-22 00:09:12 -0800316 ClearPageSwapCache(page);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800317 ClearPagePrivate(page);
318 set_page_private(page, 0);
319 page->mapping = NULL;
320
321 /*
322 * If any waiters have accumulated on the new page then
323 * wake them up.
324 */
325 if (PageWriteback(newpage))
326 end_page_writeback(newpage);
327}
Christoph Lameterb20a3502006-03-22 00:09:12 -0800328
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700329/************************************************************
330 * Migration functions
331 ***********************************************************/
332
333/* Always fail migration. Used for mappings that are not movable */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700334int fail_migrate_page(struct address_space *mapping,
335 struct page *newpage, struct page *page)
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700336{
337 return -EIO;
338}
339EXPORT_SYMBOL(fail_migrate_page);
340
Christoph Lameterb20a3502006-03-22 00:09:12 -0800341/*
342 * Common logic to directly migrate a single page suitable for
David Howells266cf652009-04-03 16:42:36 +0100343 * pages that do not use PagePrivate/PagePrivate2.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800344 *
345 * Pages are locked upon entry and exit.
346 */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700347int migrate_page(struct address_space *mapping,
348 struct page *newpage, struct page *page)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800349{
350 int rc;
351
352 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
353
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700354 rc = migrate_page_move_mapping(mapping, newpage, page);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800355
356 if (rc)
357 return rc;
358
359 migrate_page_copy(newpage, page);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800360 return 0;
361}
362EXPORT_SYMBOL(migrate_page);
363
David Howells93614012006-09-30 20:45:40 +0200364#ifdef CONFIG_BLOCK
Christoph Lameterb20a3502006-03-22 00:09:12 -0800365/*
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700366 * Migration function for pages with buffers. This function can only be used
367 * if the underlying filesystem guarantees that no other references to "page"
368 * exist.
369 */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700370int buffer_migrate_page(struct address_space *mapping,
371 struct page *newpage, struct page *page)
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700372{
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700373 struct buffer_head *bh, *head;
374 int rc;
375
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700376 if (!page_has_buffers(page))
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700377 return migrate_page(mapping, newpage, page);
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700378
379 head = page_buffers(page);
380
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700381 rc = migrate_page_move_mapping(mapping, newpage, page);
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700382
383 if (rc)
384 return rc;
385
386 bh = head;
387 do {
388 get_bh(bh);
389 lock_buffer(bh);
390 bh = bh->b_this_page;
391
392 } while (bh != head);
393
394 ClearPagePrivate(page);
395 set_page_private(newpage, page_private(page));
396 set_page_private(page, 0);
397 put_page(page);
398 get_page(newpage);
399
400 bh = head;
401 do {
402 set_bh_page(bh, newpage, bh_offset(bh));
403 bh = bh->b_this_page;
404
405 } while (bh != head);
406
407 SetPagePrivate(newpage);
408
409 migrate_page_copy(newpage, page);
410
411 bh = head;
412 do {
413 unlock_buffer(bh);
414 put_bh(bh);
415 bh = bh->b_this_page;
416
417 } while (bh != head);
418
419 return 0;
420}
421EXPORT_SYMBOL(buffer_migrate_page);
David Howells93614012006-09-30 20:45:40 +0200422#endif
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700423
Christoph Lameter04e62a22006-06-23 02:03:38 -0700424/*
425 * Writeback a page to clean the dirty state
426 */
427static int writeout(struct address_space *mapping, struct page *page)
428{
429 struct writeback_control wbc = {
430 .sync_mode = WB_SYNC_NONE,
431 .nr_to_write = 1,
432 .range_start = 0,
433 .range_end = LLONG_MAX,
434 .nonblocking = 1,
435 .for_reclaim = 1
436 };
437 int rc;
438
439 if (!mapping->a_ops->writepage)
440 /* No write method for the address space */
441 return -EINVAL;
442
443 if (!clear_page_dirty_for_io(page))
444 /* Someone else already triggered a write */
445 return -EAGAIN;
446
447 /*
448 * A dirty page may imply that the underlying filesystem has
449 * the page on some queue. So the page must be clean for
450 * migration. Writeout may mean we loose the lock and the
451 * page state is no longer what we checked for earlier.
452 * At this point we know that the migration attempt cannot
453 * be successful.
454 */
455 remove_migration_ptes(page, page);
456
457 rc = mapping->a_ops->writepage(page, &wbc);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700458
459 if (rc != AOP_WRITEPAGE_ACTIVATE)
460 /* unlocked. Relock */
461 lock_page(page);
462
Hugh Dickinsbda85502008-11-19 15:36:36 -0800463 return (rc < 0) ? -EIO : -EAGAIN;
Christoph Lameter04e62a22006-06-23 02:03:38 -0700464}
465
466/*
467 * Default handling if a filesystem does not provide a migration function.
468 */
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700469static int fallback_migrate_page(struct address_space *mapping,
470 struct page *newpage, struct page *page)
471{
Christoph Lameter04e62a22006-06-23 02:03:38 -0700472 if (PageDirty(page))
473 return writeout(mapping, page);
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700474
475 /*
476 * Buffers may be managed in a filesystem specific way.
477 * We must have no buffers or drop them.
478 */
David Howells266cf652009-04-03 16:42:36 +0100479 if (page_has_private(page) &&
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700480 !try_to_release_page(page, GFP_KERNEL))
481 return -EAGAIN;
482
483 return migrate_page(mapping, newpage, page);
484}
485
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700486/*
Christoph Lametere24f0b82006-06-23 02:03:51 -0700487 * Move a page to a newly allocated page
488 * The page is locked and all ptes have been successfully removed.
489 *
490 * The new page will have replaced the old page if this function
491 * is successful.
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700492 *
493 * Return value:
494 * < 0 - error code
495 * == 0 - success
Christoph Lametere24f0b82006-06-23 02:03:51 -0700496 */
Mel Gorman3fe20112010-05-24 14:32:20 -0700497static int move_to_new_page(struct page *newpage, struct page *page,
498 int remap_swapcache)
Christoph Lametere24f0b82006-06-23 02:03:51 -0700499{
500 struct address_space *mapping;
501 int rc;
502
503 /*
504 * Block others from accessing the page when we get around to
505 * establishing additional references. We are the only one
506 * holding a reference to the new page at this point.
507 */
Nick Piggin529ae9a2008-08-02 12:01:03 +0200508 if (!trylock_page(newpage))
Christoph Lametere24f0b82006-06-23 02:03:51 -0700509 BUG();
510
511 /* Prepare mapping for the new page.*/
512 newpage->index = page->index;
513 newpage->mapping = page->mapping;
Rik van Rielb2e18532008-10-18 20:26:30 -0700514 if (PageSwapBacked(page))
515 SetPageSwapBacked(newpage);
Christoph Lametere24f0b82006-06-23 02:03:51 -0700516
517 mapping = page_mapping(page);
518 if (!mapping)
519 rc = migrate_page(mapping, newpage, page);
520 else if (mapping->a_ops->migratepage)
521 /*
522 * Most pages have a mapping and most filesystems
523 * should provide a migration function. Anonymous
524 * pages are part of swap space which also has its
525 * own migration function. This is the most common
526 * path for page migration.
527 */
528 rc = mapping->a_ops->migratepage(mapping,
529 newpage, page);
530 else
531 rc = fallback_migrate_page(mapping, newpage, page);
532
Mel Gorman3fe20112010-05-24 14:32:20 -0700533 if (rc) {
Christoph Lametere24f0b82006-06-23 02:03:51 -0700534 newpage->mapping = NULL;
Mel Gorman3fe20112010-05-24 14:32:20 -0700535 } else {
536 if (remap_swapcache)
537 remove_migration_ptes(page, newpage);
538 }
Christoph Lametere24f0b82006-06-23 02:03:51 -0700539
540 unlock_page(newpage);
541
542 return rc;
543}
544
545/*
546 * Obtain the lock on page, remove all ptes and migrate the page
547 * to the newly allocated page in newpage.
548 */
Christoph Lameter95a402c2006-06-23 02:03:53 -0700549static int unmap_and_move(new_page_t get_new_page, unsigned long private,
Hugh Dickins62b61f62009-12-14 17:59:33 -0800550 struct page *page, int force, int offlining)
Christoph Lametere24f0b82006-06-23 02:03:51 -0700551{
552 int rc = 0;
Christoph Lameter742755a2006-06-23 02:03:55 -0700553 int *result = NULL;
554 struct page *newpage = get_new_page(page, private, &result);
Mel Gorman3fe20112010-05-24 14:32:20 -0700555 int remap_swapcache = 1;
KAMEZAWA Hiroyuki989f89c2007-08-30 23:56:21 -0700556 int rcu_locked = 0;
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -0800557 int charge = 0;
KAMEZAWA Hiroyukie00e4312009-11-11 14:26:26 -0800558 struct mem_cgroup *mem = NULL;
Mel Gorman3f6c8272010-05-24 14:32:17 -0700559 struct anon_vma *anon_vma = NULL;
Christoph Lameter95a402c2006-06-23 02:03:53 -0700560
561 if (!newpage)
562 return -ENOMEM;
Christoph Lametere24f0b82006-06-23 02:03:51 -0700563
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700564 if (page_count(page) == 1) {
Christoph Lametere24f0b82006-06-23 02:03:51 -0700565 /* page was freed from under us. So we are done. */
Christoph Lameter95a402c2006-06-23 02:03:53 -0700566 goto move_newpage;
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700567 }
Christoph Lametere24f0b82006-06-23 02:03:51 -0700568
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -0700569 /* prepare cgroup just returns 0 or -ENOMEM */
Christoph Lametere24f0b82006-06-23 02:03:51 -0700570 rc = -EAGAIN;
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -0800571
Nick Piggin529ae9a2008-08-02 12:01:03 +0200572 if (!trylock_page(page)) {
Christoph Lametere24f0b82006-06-23 02:03:51 -0700573 if (!force)
Christoph Lameter95a402c2006-06-23 02:03:53 -0700574 goto move_newpage;
Christoph Lametere24f0b82006-06-23 02:03:51 -0700575 lock_page(page);
576 }
577
Hugh Dickins62b61f62009-12-14 17:59:33 -0800578 /*
579 * Only memory hotplug's offline_pages() caller has locked out KSM,
580 * and can safely migrate a KSM page. The other cases have skipped
581 * PageKsm along with PageReserved - but it is only now when we have
582 * the page lock that we can be certain it will not go KSM beneath us
583 * (KSM will not upgrade a page from PageAnon to PageKsm when it sees
584 * its pagecount raised, but only here do we take the page lock which
585 * serializes that).
586 */
587 if (PageKsm(page) && !offlining) {
588 rc = -EBUSY;
589 goto unlock;
590 }
591
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -0800592 /* charge against new page */
akpm@linux-foundation.orgac39cf82010-05-26 14:42:46 -0700593 charge = mem_cgroup_prepare_migration(page, newpage, &mem);
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -0800594 if (charge == -ENOMEM) {
595 rc = -ENOMEM;
596 goto unlock;
597 }
598 BUG_ON(charge);
599
Christoph Lametere24f0b82006-06-23 02:03:51 -0700600 if (PageWriteback(page)) {
601 if (!force)
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -0800602 goto uncharge;
Christoph Lametere24f0b82006-06-23 02:03:51 -0700603 wait_on_page_writeback(page);
604 }
Christoph Lametere24f0b82006-06-23 02:03:51 -0700605 /*
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700606 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
607 * we cannot notice that anon_vma is freed while we migrates a page.
608 * This rcu_read_lock() delays freeing anon_vma pointer until the end
609 * of migration. File cache pages are no problem because of page_lock()
KAMEZAWA Hiroyuki989f89c2007-08-30 23:56:21 -0700610 * File Caches may use write_page() or lock_page() in migration, then,
611 * just care Anon page here.
Christoph Lametere24f0b82006-06-23 02:03:51 -0700612 */
KAMEZAWA Hiroyuki989f89c2007-08-30 23:56:21 -0700613 if (PageAnon(page)) {
614 rcu_read_lock();
615 rcu_locked = 1;
Mel Gorman67b95092010-05-24 14:32:19 -0700616
Mel Gorman3fe20112010-05-24 14:32:20 -0700617 /* Determine how to safely use anon_vma */
618 if (!page_mapped(page)) {
619 if (!PageSwapCache(page))
620 goto rcu_unlock;
Mel Gorman67b95092010-05-24 14:32:19 -0700621
Mel Gorman3fe20112010-05-24 14:32:20 -0700622 /*
623 * We cannot be sure that the anon_vma of an unmapped
624 * swapcache page is safe to use because we don't
625 * know in advance if the VMA that this page belonged
626 * to still exists. If the VMA and others sharing the
627 * data have been freed, then the anon_vma could
628 * already be invalid.
629 *
630 * To avoid this possibility, swapcache pages get
631 * migrated but are not remapped when migration
632 * completes
633 */
634 remap_swapcache = 0;
635 } else {
636 /*
637 * Take a reference count on the anon_vma if the
638 * page is mapped so that it is guaranteed to
639 * exist when the page is remapped later
640 */
641 anon_vma = page_anon_vma(page);
Rik van Riel76545062010-08-09 17:18:41 -0700642 get_anon_vma(anon_vma);
Mel Gorman3fe20112010-05-24 14:32:20 -0700643 }
KAMEZAWA Hiroyuki989f89c2007-08-30 23:56:21 -0700644 }
Shaohua Li62e1c552008-02-04 22:29:33 -0800645
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700646 /*
Shaohua Li62e1c552008-02-04 22:29:33 -0800647 * Corner case handling:
648 * 1. When a new swap-cache page is read into, it is added to the LRU
649 * and treated as swapcache but it has no rmap yet.
650 * Calling try_to_unmap() against a page->mapping==NULL page will
651 * trigger a BUG. So handle it here.
652 * 2. An orphaned page (see truncate_complete_page) might have
653 * fs-private metadata. The page can be picked up due to memory
654 * offlining. Everywhere else except page reclaim, the page is
655 * invisible to the vm, so the page can not be migrated. So try to
656 * free the metadata, so the page can be freed.
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700657 */
Shaohua Li62e1c552008-02-04 22:29:33 -0800658 if (!page->mapping) {
David Howells266cf652009-04-03 16:42:36 +0100659 if (!PageAnon(page) && page_has_private(page)) {
Shaohua Li62e1c552008-02-04 22:29:33 -0800660 /*
661 * Go direct to try_to_free_buffers() here because
662 * a) that's what try_to_release_page() would do anyway
663 * b) we may be under rcu_read_lock() here, so we can't
664 * use GFP_KERNEL which is what try_to_release_page()
665 * needs to be effective.
666 */
667 try_to_free_buffers(page);
Shaohua Liabfc3482009-09-21 17:01:19 -0700668 goto rcu_unlock;
Shaohua Li62e1c552008-02-04 22:29:33 -0800669 }
Shaohua Liabfc3482009-09-21 17:01:19 -0700670 goto skip_unmap;
Shaohua Li62e1c552008-02-04 22:29:33 -0800671 }
672
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700673 /* Establish migration ptes or remove ptes */
Andi Kleen14fa31b2009-09-16 11:50:10 +0200674 try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700675
Shaohua Liabfc3482009-09-21 17:01:19 -0700676skip_unmap:
Christoph Lametere6a15302006-06-25 05:46:49 -0700677 if (!page_mapped(page))
Mel Gorman3fe20112010-05-24 14:32:20 -0700678 rc = move_to_new_page(newpage, page, remap_swapcache);
Christoph Lametere24f0b82006-06-23 02:03:51 -0700679
Mel Gorman3fe20112010-05-24 14:32:20 -0700680 if (rc && remap_swapcache)
Christoph Lametere24f0b82006-06-23 02:03:51 -0700681 remove_migration_ptes(page, page);
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700682rcu_unlock:
Mel Gorman3f6c8272010-05-24 14:32:17 -0700683
684 /* Drop an anon_vma reference if we took one */
Rik van Riel76545062010-08-09 17:18:41 -0700685 if (anon_vma)
686 drop_anon_vma(anon_vma);
Mel Gorman3f6c8272010-05-24 14:32:17 -0700687
KAMEZAWA Hiroyuki989f89c2007-08-30 23:56:21 -0700688 if (rcu_locked)
689 rcu_read_unlock();
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -0800690uncharge:
691 if (!charge)
692 mem_cgroup_end_migration(mem, page, newpage);
Christoph Lametere24f0b82006-06-23 02:03:51 -0700693unlock:
694 unlock_page(page);
Christoph Lameter95a402c2006-06-23 02:03:53 -0700695
Christoph Lametere24f0b82006-06-23 02:03:51 -0700696 if (rc != -EAGAIN) {
Christoph Lameteraaa994b2006-06-23 02:03:52 -0700697 /*
698 * A page that has been migrated has all references
699 * removed and will be freed. A page that has not been
700 * migrated will have kepts its references and be
701 * restored.
702 */
703 list_del(&page->lru);
KOSAKI Motohiroa7312862009-09-21 17:01:37 -0700704 dec_zone_page_state(page, NR_ISOLATED_ANON +
Johannes Weiner6c0b1352009-09-21 17:02:59 -0700705 page_is_file_cache(page));
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700706 putback_lru_page(page);
Christoph Lametere24f0b82006-06-23 02:03:51 -0700707 }
Christoph Lameter95a402c2006-06-23 02:03:53 -0700708
709move_newpage:
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700710
Christoph Lameter95a402c2006-06-23 02:03:53 -0700711 /*
712 * Move the new page to the LRU. If migration was not successful
713 * then this will free the page.
714 */
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700715 putback_lru_page(newpage);
716
Christoph Lameter742755a2006-06-23 02:03:55 -0700717 if (result) {
718 if (rc)
719 *result = rc;
720 else
721 *result = page_to_nid(newpage);
722 }
Christoph Lametere24f0b82006-06-23 02:03:51 -0700723 return rc;
724}
725
726/*
Christoph Lameterb20a3502006-03-22 00:09:12 -0800727 * migrate_pages
728 *
Christoph Lameter95a402c2006-06-23 02:03:53 -0700729 * The function takes one list of pages to migrate and a function
730 * that determines from the page to be migrated and the private data
731 * the target of the move and allocates the page.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800732 *
733 * The function returns after 10 attempts or if no pages
734 * are movable anymore because to has become empty
Christoph Lameteraaa994b2006-06-23 02:03:52 -0700735 * or no retryable pages exist anymore. All pages will be
Gabriel Craciunescue9534b32007-10-20 02:13:26 +0200736 * returned to the LRU or freed.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800737 *
Christoph Lameter95a402c2006-06-23 02:03:53 -0700738 * Return: Number of pages not migrated or error code.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800739 */
Christoph Lameter95a402c2006-06-23 02:03:53 -0700740int migrate_pages(struct list_head *from,
Hugh Dickins62b61f62009-12-14 17:59:33 -0800741 new_page_t get_new_page, unsigned long private, int offlining)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800742{
Christoph Lametere24f0b82006-06-23 02:03:51 -0700743 int retry = 1;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800744 int nr_failed = 0;
745 int pass = 0;
746 struct page *page;
747 struct page *page2;
748 int swapwrite = current->flags & PF_SWAPWRITE;
749 int rc;
750
751 if (!swapwrite)
752 current->flags |= PF_SWAPWRITE;
753
Christoph Lametere24f0b82006-06-23 02:03:51 -0700754 for(pass = 0; pass < 10 && retry; pass++) {
755 retry = 0;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800756
Christoph Lametere24f0b82006-06-23 02:03:51 -0700757 list_for_each_entry_safe(page, page2, from, lru) {
Christoph Lametere24f0b82006-06-23 02:03:51 -0700758 cond_resched();
Christoph Lameterb20a3502006-03-22 00:09:12 -0800759
Christoph Lameter95a402c2006-06-23 02:03:53 -0700760 rc = unmap_and_move(get_new_page, private,
Hugh Dickins62b61f62009-12-14 17:59:33 -0800761 page, pass > 2, offlining);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800762
Christoph Lametere24f0b82006-06-23 02:03:51 -0700763 switch(rc) {
Christoph Lameter95a402c2006-06-23 02:03:53 -0700764 case -ENOMEM:
765 goto out;
Christoph Lametere24f0b82006-06-23 02:03:51 -0700766 case -EAGAIN:
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700767 retry++;
Christoph Lametere24f0b82006-06-23 02:03:51 -0700768 break;
769 case 0:
Christoph Lametere24f0b82006-06-23 02:03:51 -0700770 break;
771 default:
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700772 /* Permanent failure */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700773 nr_failed++;
Christoph Lametere24f0b82006-06-23 02:03:51 -0700774 break;
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700775 }
Christoph Lameterb20a3502006-03-22 00:09:12 -0800776 }
777 }
Christoph Lameter95a402c2006-06-23 02:03:53 -0700778 rc = 0;
779out:
Christoph Lameterb20a3502006-03-22 00:09:12 -0800780 if (!swapwrite)
781 current->flags &= ~PF_SWAPWRITE;
782
Christoph Lameteraaa994b2006-06-23 02:03:52 -0700783 putback_lru_pages(from);
Christoph Lameter95a402c2006-06-23 02:03:53 -0700784
785 if (rc)
786 return rc;
787
Christoph Lameterb20a3502006-03-22 00:09:12 -0800788 return nr_failed + retry;
789}
790
Christoph Lameter742755a2006-06-23 02:03:55 -0700791#ifdef CONFIG_NUMA
792/*
793 * Move a list of individual pages
794 */
795struct page_to_node {
796 unsigned long addr;
797 struct page *page;
798 int node;
799 int status;
800};
801
802static struct page *new_page_node(struct page *p, unsigned long private,
803 int **result)
804{
805 struct page_to_node *pm = (struct page_to_node *)private;
806
807 while (pm->node != MAX_NUMNODES && pm->page != p)
808 pm++;
809
810 if (pm->node == MAX_NUMNODES)
811 return NULL;
812
813 *result = &pm->status;
814
Mel Gorman6484eb32009-06-16 15:31:54 -0700815 return alloc_pages_exact_node(pm->node,
Mel Gorman769848c2007-07-17 04:03:05 -0700816 GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
Christoph Lameter742755a2006-06-23 02:03:55 -0700817}
818
819/*
820 * Move a set of pages as indicated in the pm array. The addr
821 * field must be set to the virtual address of the page to be moved
822 * and the node number must contain a valid target node.
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700823 * The pm array ends with node = MAX_NUMNODES.
Christoph Lameter742755a2006-06-23 02:03:55 -0700824 */
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700825static int do_move_page_to_node_array(struct mm_struct *mm,
826 struct page_to_node *pm,
827 int migrate_all)
Christoph Lameter742755a2006-06-23 02:03:55 -0700828{
829 int err;
830 struct page_to_node *pp;
831 LIST_HEAD(pagelist);
832
833 down_read(&mm->mmap_sem);
834
835 /*
836 * Build a list of pages to migrate
837 */
Christoph Lameter742755a2006-06-23 02:03:55 -0700838 for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
839 struct vm_area_struct *vma;
840 struct page *page;
841
Christoph Lameter742755a2006-06-23 02:03:55 -0700842 err = -EFAULT;
843 vma = find_vma(mm, pp->addr);
Christoph Lameter0dc952d2007-03-05 00:30:33 -0800844 if (!vma || !vma_migratable(vma))
Christoph Lameter742755a2006-06-23 02:03:55 -0700845 goto set_status;
846
847 page = follow_page(vma, pp->addr, FOLL_GET);
Linus Torvalds89f5b7d2008-06-20 11:18:25 -0700848
849 err = PTR_ERR(page);
850 if (IS_ERR(page))
851 goto set_status;
852
Christoph Lameter742755a2006-06-23 02:03:55 -0700853 err = -ENOENT;
854 if (!page)
855 goto set_status;
856
Hugh Dickins62b61f62009-12-14 17:59:33 -0800857 /* Use PageReserved to check for zero page */
858 if (PageReserved(page) || PageKsm(page))
Christoph Lameter742755a2006-06-23 02:03:55 -0700859 goto put_and_set;
860
861 pp->page = page;
862 err = page_to_nid(page);
863
864 if (err == pp->node)
865 /*
866 * Node already in the right place
867 */
868 goto put_and_set;
869
870 err = -EACCES;
871 if (page_mapcount(page) > 1 &&
872 !migrate_all)
873 goto put_and_set;
874
Nick Piggin62695a82008-10-18 20:26:09 -0700875 err = isolate_lru_page(page);
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -0800876 if (!err) {
Nick Piggin62695a82008-10-18 20:26:09 -0700877 list_add_tail(&page->lru, &pagelist);
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -0800878 inc_zone_page_state(page, NR_ISOLATED_ANON +
879 page_is_file_cache(page));
880 }
Christoph Lameter742755a2006-06-23 02:03:55 -0700881put_and_set:
882 /*
883 * Either remove the duplicate refcount from
884 * isolate_lru_page() or drop the page ref if it was
885 * not isolated.
886 */
887 put_page(page);
888set_status:
889 pp->status = err;
890 }
891
Brice Gogline78bbfa2008-10-18 20:27:15 -0700892 err = 0;
Christoph Lameter742755a2006-06-23 02:03:55 -0700893 if (!list_empty(&pagelist))
894 err = migrate_pages(&pagelist, new_page_node,
Hugh Dickins62b61f62009-12-14 17:59:33 -0800895 (unsigned long)pm, 0);
Christoph Lameter742755a2006-06-23 02:03:55 -0700896
897 up_read(&mm->mmap_sem);
898 return err;
899}
900
901/*
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700902 * Migrate an array of page address onto an array of nodes and fill
903 * the corresponding array of status.
904 */
905static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
906 unsigned long nr_pages,
907 const void __user * __user *pages,
908 const int __user *nodes,
909 int __user *status, int flags)
910{
Brice Goglin3140a222009-01-06 14:38:57 -0800911 struct page_to_node *pm;
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700912 nodemask_t task_nodes;
Brice Goglin3140a222009-01-06 14:38:57 -0800913 unsigned long chunk_nr_pages;
914 unsigned long chunk_start;
915 int err;
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700916
917 task_nodes = cpuset_mems_allowed(task);
918
Brice Goglin3140a222009-01-06 14:38:57 -0800919 err = -ENOMEM;
920 pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
921 if (!pm)
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700922 goto out;
Brice Goglin35282a22009-06-16 15:32:43 -0700923
924 migrate_prep();
925
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700926 /*
Brice Goglin3140a222009-01-06 14:38:57 -0800927 * Store a chunk of page_to_node array in a page,
928 * but keep the last one as a marker
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700929 */
Brice Goglin3140a222009-01-06 14:38:57 -0800930 chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1;
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700931
Brice Goglin3140a222009-01-06 14:38:57 -0800932 for (chunk_start = 0;
933 chunk_start < nr_pages;
934 chunk_start += chunk_nr_pages) {
935 int j;
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700936
Brice Goglin3140a222009-01-06 14:38:57 -0800937 if (chunk_start + chunk_nr_pages > nr_pages)
938 chunk_nr_pages = nr_pages - chunk_start;
939
940 /* fill the chunk pm with addrs and nodes from user-space */
941 for (j = 0; j < chunk_nr_pages; j++) {
942 const void __user *p;
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700943 int node;
944
Brice Goglin3140a222009-01-06 14:38:57 -0800945 err = -EFAULT;
946 if (get_user(p, pages + j + chunk_start))
947 goto out_pm;
948 pm[j].addr = (unsigned long) p;
949
950 if (get_user(node, nodes + j + chunk_start))
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700951 goto out_pm;
952
953 err = -ENODEV;
Linus Torvalds6f5a55f2010-02-05 16:16:50 -0800954 if (node < 0 || node >= MAX_NUMNODES)
955 goto out_pm;
956
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700957 if (!node_state(node, N_HIGH_MEMORY))
958 goto out_pm;
959
960 err = -EACCES;
961 if (!node_isset(node, task_nodes))
962 goto out_pm;
963
Brice Goglin3140a222009-01-06 14:38:57 -0800964 pm[j].node = node;
965 }
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700966
Brice Goglin3140a222009-01-06 14:38:57 -0800967 /* End marker for this chunk */
968 pm[chunk_nr_pages].node = MAX_NUMNODES;
969
970 /* Migrate this chunk */
971 err = do_move_page_to_node_array(mm, pm,
972 flags & MPOL_MF_MOVE_ALL);
973 if (err < 0)
974 goto out_pm;
975
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700976 /* Return status information */
Brice Goglin3140a222009-01-06 14:38:57 -0800977 for (j = 0; j < chunk_nr_pages; j++)
978 if (put_user(pm[j].status, status + j + chunk_start)) {
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700979 err = -EFAULT;
Brice Goglin3140a222009-01-06 14:38:57 -0800980 goto out_pm;
981 }
982 }
983 err = 0;
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700984
985out_pm:
Brice Goglin3140a222009-01-06 14:38:57 -0800986 free_page((unsigned long)pm);
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700987out:
988 return err;
989}
990
991/*
Brice Goglin2f007e72008-10-18 20:27:16 -0700992 * Determine the nodes of an array of pages and store it in an array of status.
Christoph Lameter742755a2006-06-23 02:03:55 -0700993 */
Brice Goglin80bba122008-12-09 13:14:23 -0800994static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
995 const void __user **pages, int *status)
Christoph Lameter742755a2006-06-23 02:03:55 -0700996{
Brice Goglin2f007e72008-10-18 20:27:16 -0700997 unsigned long i;
Brice Goglin2f007e72008-10-18 20:27:16 -0700998
Christoph Lameter742755a2006-06-23 02:03:55 -0700999 down_read(&mm->mmap_sem);
1000
Brice Goglin2f007e72008-10-18 20:27:16 -07001001 for (i = 0; i < nr_pages; i++) {
Brice Goglin80bba122008-12-09 13:14:23 -08001002 unsigned long addr = (unsigned long)(*pages);
Christoph Lameter742755a2006-06-23 02:03:55 -07001003 struct vm_area_struct *vma;
1004 struct page *page;
KOSAKI Motohiroc095adb2008-12-16 16:06:43 +09001005 int err = -EFAULT;
Brice Goglin2f007e72008-10-18 20:27:16 -07001006
1007 vma = find_vma(mm, addr);
Christoph Lameter742755a2006-06-23 02:03:55 -07001008 if (!vma)
1009 goto set_status;
1010
Brice Goglin2f007e72008-10-18 20:27:16 -07001011 page = follow_page(vma, addr, 0);
Linus Torvalds89f5b7d2008-06-20 11:18:25 -07001012
1013 err = PTR_ERR(page);
1014 if (IS_ERR(page))
1015 goto set_status;
1016
Christoph Lameter742755a2006-06-23 02:03:55 -07001017 err = -ENOENT;
1018 /* Use PageReserved to check for zero page */
Hugh Dickins62b61f62009-12-14 17:59:33 -08001019 if (!page || PageReserved(page) || PageKsm(page))
Christoph Lameter742755a2006-06-23 02:03:55 -07001020 goto set_status;
1021
1022 err = page_to_nid(page);
1023set_status:
Brice Goglin80bba122008-12-09 13:14:23 -08001024 *status = err;
1025
1026 pages++;
1027 status++;
1028 }
1029
1030 up_read(&mm->mmap_sem);
1031}
1032
1033/*
1034 * Determine the nodes of a user array of pages and store it in
1035 * a user array of status.
1036 */
1037static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1038 const void __user * __user *pages,
1039 int __user *status)
1040{
1041#define DO_PAGES_STAT_CHUNK_NR 16
1042 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1043 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
Brice Goglin80bba122008-12-09 13:14:23 -08001044
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001045 while (nr_pages) {
1046 unsigned long chunk_nr;
Brice Goglin80bba122008-12-09 13:14:23 -08001047
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001048 chunk_nr = nr_pages;
1049 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1050 chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1051
1052 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1053 break;
Brice Goglin80bba122008-12-09 13:14:23 -08001054
1055 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1056
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001057 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1058 break;
Christoph Lameter742755a2006-06-23 02:03:55 -07001059
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001060 pages += chunk_nr;
1061 status += chunk_nr;
1062 nr_pages -= chunk_nr;
1063 }
1064 return nr_pages ? -EFAULT : 0;
Christoph Lameter742755a2006-06-23 02:03:55 -07001065}
1066
1067/*
1068 * Move a list of pages in the address space of the currently executing
1069 * process.
1070 */
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001071SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1072 const void __user * __user *, pages,
1073 const int __user *, nodes,
1074 int __user *, status, int, flags)
Christoph Lameter742755a2006-06-23 02:03:55 -07001075{
David Howellsc69e8d92008-11-14 10:39:19 +11001076 const struct cred *cred = current_cred(), *tcred;
Christoph Lameter742755a2006-06-23 02:03:55 -07001077 struct task_struct *task;
Christoph Lameter742755a2006-06-23 02:03:55 -07001078 struct mm_struct *mm;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001079 int err;
Christoph Lameter742755a2006-06-23 02:03:55 -07001080
1081 /* Check flags */
1082 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1083 return -EINVAL;
1084
1085 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1086 return -EPERM;
1087
1088 /* Find the mm_struct */
1089 read_lock(&tasklist_lock);
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001090 task = pid ? find_task_by_vpid(pid) : current;
Christoph Lameter742755a2006-06-23 02:03:55 -07001091 if (!task) {
1092 read_unlock(&tasklist_lock);
1093 return -ESRCH;
1094 }
1095 mm = get_task_mm(task);
1096 read_unlock(&tasklist_lock);
1097
1098 if (!mm)
1099 return -EINVAL;
1100
1101 /*
1102 * Check if this process has the right to modify the specified
1103 * process. The right exists if the process has administrative
1104 * capabilities, superuser privileges or the same
1105 * userid as the target process.
1106 */
David Howellsc69e8d92008-11-14 10:39:19 +11001107 rcu_read_lock();
1108 tcred = __task_cred(task);
David Howellsb6dff3e2008-11-14 10:39:16 +11001109 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
1110 cred->uid != tcred->suid && cred->uid != tcred->uid &&
Christoph Lameter742755a2006-06-23 02:03:55 -07001111 !capable(CAP_SYS_NICE)) {
David Howellsc69e8d92008-11-14 10:39:19 +11001112 rcu_read_unlock();
Christoph Lameter742755a2006-06-23 02:03:55 -07001113 err = -EPERM;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001114 goto out;
Christoph Lameter742755a2006-06-23 02:03:55 -07001115 }
David Howellsc69e8d92008-11-14 10:39:19 +11001116 rcu_read_unlock();
Christoph Lameter742755a2006-06-23 02:03:55 -07001117
David Quigley86c3a762006-06-23 02:04:02 -07001118 err = security_task_movememory(task);
1119 if (err)
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001120 goto out;
David Quigley86c3a762006-06-23 02:04:02 -07001121
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001122 if (nodes) {
1123 err = do_pages_move(mm, task, nr_pages, pages, nodes, status,
1124 flags);
1125 } else {
Brice Goglin2f007e72008-10-18 20:27:16 -07001126 err = do_pages_stat(mm, nr_pages, pages, status);
Brice Goglin2f007e72008-10-18 20:27:16 -07001127 }
David Quigley86c3a762006-06-23 02:04:02 -07001128
Christoph Lameter742755a2006-06-23 02:03:55 -07001129out:
Christoph Lameter742755a2006-06-23 02:03:55 -07001130 mmput(mm);
1131 return err;
1132}
Christoph Lameter742755a2006-06-23 02:03:55 -07001133
Christoph Lameter7b2259b2006-06-25 05:46:48 -07001134/*
1135 * Call migration functions in the vma_ops that may prepare
1136 * memory in a vm for migration. migration functions may perform
1137 * the migration for vmas that do not have an underlying page struct.
1138 */
1139int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
1140 const nodemask_t *from, unsigned long flags)
1141{
1142 struct vm_area_struct *vma;
1143 int err = 0;
1144
Daisuke Nishimura1001c9f2009-02-11 13:04:18 -08001145 for (vma = mm->mmap; vma && !err; vma = vma->vm_next) {
Christoph Lameter7b2259b2006-06-25 05:46:48 -07001146 if (vma->vm_ops && vma->vm_ops->migrate) {
1147 err = vma->vm_ops->migrate(vma, to, from, flags);
1148 if (err)
1149 break;
1150 }
1151 }
1152 return err;
1153}
Gerald Schaefer83d16742008-07-23 21:28:22 -07001154#endif