blob: 88000b89fc9a567768509926b7baf5dc44d2d7d7 [file] [log] [blame]
Christoph Lameterb20a3502006-03-22 00:09:12 -08001/*
2 * Memory Migration functionality - linux/mm/migration.c
3 *
4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
5 *
6 * Page migration was first developed in the context of the memory hotplug
7 * project. The main authors of the migration code are:
8 *
9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10 * Hirokazu Takahashi <taka@valinux.co.jp>
11 * Dave Hansen <haveblue@us.ibm.com>
Christoph Lametercde53532008-07-04 09:59:22 -070012 * Christoph Lameter
Christoph Lameterb20a3502006-03-22 00:09:12 -080013 */
14
15#include <linux/migrate.h>
16#include <linux/module.h>
17#include <linux/swap.h>
Christoph Lameter06972122006-06-23 02:03:35 -070018#include <linux/swapops.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080019#include <linux/pagemap.h>
Christoph Lametere23ca002006-04-10 22:52:57 -070020#include <linux/buffer_head.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080021#include <linux/mm_inline.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070022#include <linux/nsproxy.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080023#include <linux/pagevec.h>
Hugh Dickinse9995ef2009-12-14 17:59:31 -080024#include <linux/ksm.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080025#include <linux/rmap.h>
26#include <linux/topology.h>
27#include <linux/cpu.h>
28#include <linux/cpuset.h>
Christoph Lameter04e62a22006-06-23 02:03:38 -070029#include <linux/writeback.h>
Christoph Lameter742755a2006-06-23 02:03:55 -070030#include <linux/mempolicy.h>
31#include <linux/vmalloc.h>
David Quigley86c3a762006-06-23 02:04:02 -070032#include <linux/security.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080033#include <linux/memcontrol.h>
Adrian Bunk4f5ca262008-07-23 21:27:02 -070034#include <linux/syscalls.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080035
36#include "internal.h"
37
Christoph Lameterb20a3502006-03-22 00:09:12 -080038#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
39
40/*
Christoph Lameter742755a2006-06-23 02:03:55 -070041 * migrate_prep() needs to be called before we start compiling a list of pages
42 * to be migrated using isolate_lru_page().
Christoph Lameterb20a3502006-03-22 00:09:12 -080043 */
44int migrate_prep(void)
45{
Christoph Lameterb20a3502006-03-22 00:09:12 -080046 /*
47 * Clear the LRU lists so pages can be isolated.
48 * Note that pages may be moved off the LRU after we have
49 * drained them. Those pages will fail to migrate like other
50 * pages that may be busy.
51 */
52 lru_add_drain_all();
53
54 return 0;
55}
56
Christoph Lameterb20a3502006-03-22 00:09:12 -080057/*
Lee Schermerhorn894bc312008-10-18 20:26:39 -070058 * Add isolated pages on the list back to the LRU under page lock
59 * to avoid leaking evictable pages back onto unevictable list.
Christoph Lameterb20a3502006-03-22 00:09:12 -080060 *
61 * returns the number of pages put back.
62 */
63int putback_lru_pages(struct list_head *l)
64{
65 struct page *page;
66 struct page *page2;
67 int count = 0;
68
69 list_for_each_entry_safe(page, page2, l, lru) {
Christoph Lametere24f0b82006-06-23 02:03:51 -070070 list_del(&page->lru);
KOSAKI Motohiroa7312862009-09-21 17:01:37 -070071 dec_zone_page_state(page, NR_ISOLATED_ANON +
Johannes Weiner6c0b1352009-09-21 17:02:59 -070072 page_is_file_cache(page));
Lee Schermerhorn894bc312008-10-18 20:26:39 -070073 putback_lru_page(page);
Christoph Lameterb20a3502006-03-22 00:09:12 -080074 count++;
75 }
76 return count;
77}
78
Christoph Lameter06972122006-06-23 02:03:35 -070079/*
80 * Restore a potential migration pte to a working pte entry
81 */
Hugh Dickinse9995ef2009-12-14 17:59:31 -080082static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
83 unsigned long addr, void *old)
Christoph Lameter06972122006-06-23 02:03:35 -070084{
85 struct mm_struct *mm = vma->vm_mm;
86 swp_entry_t entry;
87 pgd_t *pgd;
88 pud_t *pud;
89 pmd_t *pmd;
90 pte_t *ptep, pte;
91 spinlock_t *ptl;
92
93 pgd = pgd_offset(mm, addr);
94 if (!pgd_present(*pgd))
Hugh Dickinse9995ef2009-12-14 17:59:31 -080095 goto out;
Christoph Lameter06972122006-06-23 02:03:35 -070096
97 pud = pud_offset(pgd, addr);
98 if (!pud_present(*pud))
Hugh Dickinse9995ef2009-12-14 17:59:31 -080099 goto out;
Christoph Lameter06972122006-06-23 02:03:35 -0700100
101 pmd = pmd_offset(pud, addr);
102 if (!pmd_present(*pmd))
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800103 goto out;
Christoph Lameter06972122006-06-23 02:03:35 -0700104
105 ptep = pte_offset_map(pmd, addr);
106
107 if (!is_swap_pte(*ptep)) {
108 pte_unmap(ptep);
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800109 goto out;
Christoph Lameter06972122006-06-23 02:03:35 -0700110 }
111
112 ptl = pte_lockptr(mm, pmd);
113 spin_lock(ptl);
114 pte = *ptep;
115 if (!is_swap_pte(pte))
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800116 goto unlock;
Christoph Lameter06972122006-06-23 02:03:35 -0700117
118 entry = pte_to_swp_entry(pte);
119
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800120 if (!is_migration_entry(entry) ||
121 migration_entry_to_page(entry) != old)
122 goto unlock;
Christoph Lameter06972122006-06-23 02:03:35 -0700123
Christoph Lameter06972122006-06-23 02:03:35 -0700124 get_page(new);
125 pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
126 if (is_write_migration_entry(entry))
127 pte = pte_mkwrite(pte);
KAMEZAWA Hiroyuki97ee0522007-10-16 01:25:43 -0700128 flush_cache_page(vma, addr, pte_pfn(pte));
Christoph Lameter06972122006-06-23 02:03:35 -0700129 set_pte_at(mm, addr, ptep, pte);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700130
131 if (PageAnon(new))
132 page_add_anon_rmap(new, vma, addr);
133 else
134 page_add_file_rmap(new);
135
136 /* No need to invalidate - it was non-present before */
Russell King4b3073e2009-12-18 16:40:18 +0000137 update_mmu_cache(vma, addr, ptep);
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800138unlock:
Christoph Lameter06972122006-06-23 02:03:35 -0700139 pte_unmap_unlock(ptep, ptl);
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800140out:
141 return SWAP_AGAIN;
Christoph Lameter06972122006-06-23 02:03:35 -0700142}
143
144/*
Christoph Lameter04e62a22006-06-23 02:03:38 -0700145 * Get rid of all migration entries and replace them by
146 * references to the indicated page.
147 */
148static void remove_migration_ptes(struct page *old, struct page *new)
149{
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800150 rmap_walk(new, remove_migration_pte, old);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700151}
152
153/*
Christoph Lameter06972122006-06-23 02:03:35 -0700154 * Something used the pte of a page under migration. We need to
155 * get to the page and wait until migration is finished.
156 * When we return from this function the fault will be retried.
157 *
158 * This function is called from do_swap_page().
159 */
160void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
161 unsigned long address)
162{
163 pte_t *ptep, pte;
164 spinlock_t *ptl;
165 swp_entry_t entry;
166 struct page *page;
167
168 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
169 pte = *ptep;
170 if (!is_swap_pte(pte))
171 goto out;
172
173 entry = pte_to_swp_entry(pte);
174 if (!is_migration_entry(entry))
175 goto out;
176
177 page = migration_entry_to_page(entry);
178
Nick Piggine2867812008-07-25 19:45:30 -0700179 /*
180 * Once radix-tree replacement of page migration started, page_count
181 * *must* be zero. And, we don't want to call wait_on_page_locked()
182 * against a page without get_page().
183 * So, we use get_page_unless_zero(), here. Even failed, page fault
184 * will occur again.
185 */
186 if (!get_page_unless_zero(page))
187 goto out;
Christoph Lameter06972122006-06-23 02:03:35 -0700188 pte_unmap_unlock(ptep, ptl);
189 wait_on_page_locked(page);
190 put_page(page);
191 return;
192out:
193 pte_unmap_unlock(ptep, ptl);
194}
195
Christoph Lameterb20a3502006-03-22 00:09:12 -0800196/*
Christoph Lameterc3fcf8a2006-06-23 02:03:32 -0700197 * Replace the page in the mapping.
Christoph Lameter5b5c7122006-06-23 02:03:29 -0700198 *
199 * The number of remaining references must be:
200 * 1 for anonymous pages without a mapping
201 * 2 for pages with a mapping
David Howells266cf652009-04-03 16:42:36 +0100202 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800203 */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700204static int migrate_page_move_mapping(struct address_space *mapping,
205 struct page *newpage, struct page *page)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800206{
Nick Piggine2867812008-07-25 19:45:30 -0700207 int expected_count;
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800208 void **pslot;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800209
Christoph Lameter6c5240a2006-06-23 02:03:37 -0700210 if (!mapping) {
Christoph Lameter0e8c7d02007-04-23 14:41:09 -0700211 /* Anonymous page without mapping */
Christoph Lameter6c5240a2006-06-23 02:03:37 -0700212 if (page_count(page) != 1)
213 return -EAGAIN;
214 return 0;
215 }
216
Nick Piggin19fd6232008-07-25 19:45:32 -0700217 spin_lock_irq(&mapping->tree_lock);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800218
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800219 pslot = radix_tree_lookup_slot(&mapping->page_tree,
220 page_index(page));
Christoph Lameterb20a3502006-03-22 00:09:12 -0800221
Johannes Weineredcf4742009-09-21 17:02:59 -0700222 expected_count = 2 + page_has_private(page);
Nick Piggine2867812008-07-25 19:45:30 -0700223 if (page_count(page) != expected_count ||
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800224 (struct page *)radix_tree_deref_slot(pslot) != page) {
Nick Piggin19fd6232008-07-25 19:45:32 -0700225 spin_unlock_irq(&mapping->tree_lock);
Christoph Lametere23ca002006-04-10 22:52:57 -0700226 return -EAGAIN;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800227 }
228
Nick Piggine2867812008-07-25 19:45:30 -0700229 if (!page_freeze_refs(page, expected_count)) {
Nick Piggin19fd6232008-07-25 19:45:32 -0700230 spin_unlock_irq(&mapping->tree_lock);
Nick Piggine2867812008-07-25 19:45:30 -0700231 return -EAGAIN;
232 }
233
Christoph Lameterb20a3502006-03-22 00:09:12 -0800234 /*
235 * Now we know that no one else is looking at the page.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800236 */
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800237 get_page(newpage); /* add cache reference */
Christoph Lameterb20a3502006-03-22 00:09:12 -0800238 if (PageSwapCache(page)) {
239 SetPageSwapCache(newpage);
240 set_page_private(newpage, page_private(page));
241 }
242
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800243 radix_tree_replace_slot(pslot, newpage);
244
Nick Piggine2867812008-07-25 19:45:30 -0700245 page_unfreeze_refs(page, expected_count);
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800246 /*
247 * Drop cache reference from old page.
248 * We know this isn't the last reference.
249 */
Christoph Lameterb20a3502006-03-22 00:09:12 -0800250 __put_page(page);
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800251
Christoph Lameter0e8c7d02007-04-23 14:41:09 -0700252 /*
253 * If moved to a different zone then also account
254 * the page for that zone. Other VM counters will be
255 * taken care of when we establish references to the
256 * new page and drop references to the old page.
257 *
258 * Note that anonymous pages are accounted for
259 * via NR_FILE_PAGES and NR_ANON_PAGES if they
260 * are mapped to swap space.
261 */
262 __dec_zone_page_state(page, NR_FILE_PAGES);
263 __inc_zone_page_state(newpage, NR_FILE_PAGES);
KOSAKI Motohiro4b021082009-09-21 17:01:33 -0700264 if (PageSwapBacked(page)) {
265 __dec_zone_page_state(page, NR_SHMEM);
266 __inc_zone_page_state(newpage, NR_SHMEM);
267 }
Nick Piggin19fd6232008-07-25 19:45:32 -0700268 spin_unlock_irq(&mapping->tree_lock);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800269
270 return 0;
271}
Christoph Lameterb20a3502006-03-22 00:09:12 -0800272
273/*
274 * Copy the page to its new location
275 */
Christoph Lametere7340f72006-06-23 02:03:29 -0700276static void migrate_page_copy(struct page *newpage, struct page *page)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800277{
278 copy_highpage(newpage, page);
279
280 if (PageError(page))
281 SetPageError(newpage);
282 if (PageReferenced(page))
283 SetPageReferenced(newpage);
284 if (PageUptodate(page))
285 SetPageUptodate(newpage);
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700286 if (TestClearPageActive(page)) {
287 VM_BUG_ON(PageUnevictable(page));
Christoph Lameterb20a3502006-03-22 00:09:12 -0800288 SetPageActive(newpage);
Lee Schermerhorn418b27e2009-12-14 17:59:54 -0800289 } else if (TestClearPageUnevictable(page))
290 SetPageUnevictable(newpage);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800291 if (PageChecked(page))
292 SetPageChecked(newpage);
293 if (PageMappedToDisk(page))
294 SetPageMappedToDisk(newpage);
295
296 if (PageDirty(page)) {
297 clear_page_dirty_for_io(page);
Nick Piggin3a902c52008-04-30 00:55:16 -0700298 /*
299 * Want to mark the page and the radix tree as dirty, and
300 * redo the accounting that clear_page_dirty_for_io undid,
301 * but we can't use set_page_dirty because that function
302 * is actually a signal that all of the page has become dirty.
303 * Wheras only part of our page may be dirty.
304 */
305 __set_page_dirty_nobuffers(newpage);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800306 }
307
Nick Pigginb291f002008-10-18 20:26:44 -0700308 mlock_migrate_page(newpage, page);
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800309 ksm_migrate_page(newpage, page);
Nick Pigginb291f002008-10-18 20:26:44 -0700310
Christoph Lameterb20a3502006-03-22 00:09:12 -0800311 ClearPageSwapCache(page);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800312 ClearPagePrivate(page);
313 set_page_private(page, 0);
314 page->mapping = NULL;
315
316 /*
317 * If any waiters have accumulated on the new page then
318 * wake them up.
319 */
320 if (PageWriteback(newpage))
321 end_page_writeback(newpage);
322}
Christoph Lameterb20a3502006-03-22 00:09:12 -0800323
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700324/************************************************************
325 * Migration functions
326 ***********************************************************/
327
328/* Always fail migration. Used for mappings that are not movable */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700329int fail_migrate_page(struct address_space *mapping,
330 struct page *newpage, struct page *page)
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700331{
332 return -EIO;
333}
334EXPORT_SYMBOL(fail_migrate_page);
335
Christoph Lameterb20a3502006-03-22 00:09:12 -0800336/*
337 * Common logic to directly migrate a single page suitable for
David Howells266cf652009-04-03 16:42:36 +0100338 * pages that do not use PagePrivate/PagePrivate2.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800339 *
340 * Pages are locked upon entry and exit.
341 */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700342int migrate_page(struct address_space *mapping,
343 struct page *newpage, struct page *page)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800344{
345 int rc;
346
347 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
348
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700349 rc = migrate_page_move_mapping(mapping, newpage, page);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800350
351 if (rc)
352 return rc;
353
354 migrate_page_copy(newpage, page);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800355 return 0;
356}
357EXPORT_SYMBOL(migrate_page);
358
David Howells93614012006-09-30 20:45:40 +0200359#ifdef CONFIG_BLOCK
Christoph Lameterb20a3502006-03-22 00:09:12 -0800360/*
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700361 * Migration function for pages with buffers. This function can only be used
362 * if the underlying filesystem guarantees that no other references to "page"
363 * exist.
364 */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700365int buffer_migrate_page(struct address_space *mapping,
366 struct page *newpage, struct page *page)
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700367{
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700368 struct buffer_head *bh, *head;
369 int rc;
370
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700371 if (!page_has_buffers(page))
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700372 return migrate_page(mapping, newpage, page);
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700373
374 head = page_buffers(page);
375
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700376 rc = migrate_page_move_mapping(mapping, newpage, page);
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700377
378 if (rc)
379 return rc;
380
381 bh = head;
382 do {
383 get_bh(bh);
384 lock_buffer(bh);
385 bh = bh->b_this_page;
386
387 } while (bh != head);
388
389 ClearPagePrivate(page);
390 set_page_private(newpage, page_private(page));
391 set_page_private(page, 0);
392 put_page(page);
393 get_page(newpage);
394
395 bh = head;
396 do {
397 set_bh_page(bh, newpage, bh_offset(bh));
398 bh = bh->b_this_page;
399
400 } while (bh != head);
401
402 SetPagePrivate(newpage);
403
404 migrate_page_copy(newpage, page);
405
406 bh = head;
407 do {
408 unlock_buffer(bh);
409 put_bh(bh);
410 bh = bh->b_this_page;
411
412 } while (bh != head);
413
414 return 0;
415}
416EXPORT_SYMBOL(buffer_migrate_page);
David Howells93614012006-09-30 20:45:40 +0200417#endif
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700418
Christoph Lameter04e62a22006-06-23 02:03:38 -0700419/*
420 * Writeback a page to clean the dirty state
421 */
422static int writeout(struct address_space *mapping, struct page *page)
423{
424 struct writeback_control wbc = {
425 .sync_mode = WB_SYNC_NONE,
426 .nr_to_write = 1,
427 .range_start = 0,
428 .range_end = LLONG_MAX,
429 .nonblocking = 1,
430 .for_reclaim = 1
431 };
432 int rc;
433
434 if (!mapping->a_ops->writepage)
435 /* No write method for the address space */
436 return -EINVAL;
437
438 if (!clear_page_dirty_for_io(page))
439 /* Someone else already triggered a write */
440 return -EAGAIN;
441
442 /*
443 * A dirty page may imply that the underlying filesystem has
444 * the page on some queue. So the page must be clean for
445 * migration. Writeout may mean we loose the lock and the
446 * page state is no longer what we checked for earlier.
447 * At this point we know that the migration attempt cannot
448 * be successful.
449 */
450 remove_migration_ptes(page, page);
451
452 rc = mapping->a_ops->writepage(page, &wbc);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700453
454 if (rc != AOP_WRITEPAGE_ACTIVATE)
455 /* unlocked. Relock */
456 lock_page(page);
457
Hugh Dickinsbda85502008-11-19 15:36:36 -0800458 return (rc < 0) ? -EIO : -EAGAIN;
Christoph Lameter04e62a22006-06-23 02:03:38 -0700459}
460
461/*
462 * Default handling if a filesystem does not provide a migration function.
463 */
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700464static int fallback_migrate_page(struct address_space *mapping,
465 struct page *newpage, struct page *page)
466{
Christoph Lameter04e62a22006-06-23 02:03:38 -0700467 if (PageDirty(page))
468 return writeout(mapping, page);
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700469
470 /*
471 * Buffers may be managed in a filesystem specific way.
472 * We must have no buffers or drop them.
473 */
David Howells266cf652009-04-03 16:42:36 +0100474 if (page_has_private(page) &&
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700475 !try_to_release_page(page, GFP_KERNEL))
476 return -EAGAIN;
477
478 return migrate_page(mapping, newpage, page);
479}
480
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700481/*
Christoph Lametere24f0b82006-06-23 02:03:51 -0700482 * Move a page to a newly allocated page
483 * The page is locked and all ptes have been successfully removed.
484 *
485 * The new page will have replaced the old page if this function
486 * is successful.
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700487 *
488 * Return value:
489 * < 0 - error code
490 * == 0 - success
Christoph Lametere24f0b82006-06-23 02:03:51 -0700491 */
492static int move_to_new_page(struct page *newpage, struct page *page)
493{
494 struct address_space *mapping;
495 int rc;
496
497 /*
498 * Block others from accessing the page when we get around to
499 * establishing additional references. We are the only one
500 * holding a reference to the new page at this point.
501 */
Nick Piggin529ae9a2008-08-02 12:01:03 +0200502 if (!trylock_page(newpage))
Christoph Lametere24f0b82006-06-23 02:03:51 -0700503 BUG();
504
505 /* Prepare mapping for the new page.*/
506 newpage->index = page->index;
507 newpage->mapping = page->mapping;
Rik van Rielb2e18532008-10-18 20:26:30 -0700508 if (PageSwapBacked(page))
509 SetPageSwapBacked(newpage);
Christoph Lametere24f0b82006-06-23 02:03:51 -0700510
511 mapping = page_mapping(page);
512 if (!mapping)
513 rc = migrate_page(mapping, newpage, page);
514 else if (mapping->a_ops->migratepage)
515 /*
516 * Most pages have a mapping and most filesystems
517 * should provide a migration function. Anonymous
518 * pages are part of swap space which also has its
519 * own migration function. This is the most common
520 * path for page migration.
521 */
522 rc = mapping->a_ops->migratepage(mapping,
523 newpage, page);
524 else
525 rc = fallback_migrate_page(mapping, newpage, page);
526
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800527 if (!rc)
Christoph Lametere24f0b82006-06-23 02:03:51 -0700528 remove_migration_ptes(page, newpage);
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800529 else
Christoph Lametere24f0b82006-06-23 02:03:51 -0700530 newpage->mapping = NULL;
531
532 unlock_page(newpage);
533
534 return rc;
535}
536
537/*
538 * Obtain the lock on page, remove all ptes and migrate the page
539 * to the newly allocated page in newpage.
540 */
Christoph Lameter95a402c2006-06-23 02:03:53 -0700541static int unmap_and_move(new_page_t get_new_page, unsigned long private,
Hugh Dickins62b61f62009-12-14 17:59:33 -0800542 struct page *page, int force, int offlining)
Christoph Lametere24f0b82006-06-23 02:03:51 -0700543{
544 int rc = 0;
Christoph Lameter742755a2006-06-23 02:03:55 -0700545 int *result = NULL;
546 struct page *newpage = get_new_page(page, private, &result);
KAMEZAWA Hiroyuki989f89c2007-08-30 23:56:21 -0700547 int rcu_locked = 0;
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -0800548 int charge = 0;
KAMEZAWA Hiroyukie00e4312009-11-11 14:26:26 -0800549 struct mem_cgroup *mem = NULL;
Christoph Lameter95a402c2006-06-23 02:03:53 -0700550
551 if (!newpage)
552 return -ENOMEM;
Christoph Lametere24f0b82006-06-23 02:03:51 -0700553
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700554 if (page_count(page) == 1) {
Christoph Lametere24f0b82006-06-23 02:03:51 -0700555 /* page was freed from under us. So we are done. */
Christoph Lameter95a402c2006-06-23 02:03:53 -0700556 goto move_newpage;
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700557 }
Christoph Lametere24f0b82006-06-23 02:03:51 -0700558
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -0700559 /* prepare cgroup just returns 0 or -ENOMEM */
Christoph Lametere24f0b82006-06-23 02:03:51 -0700560 rc = -EAGAIN;
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -0800561
Nick Piggin529ae9a2008-08-02 12:01:03 +0200562 if (!trylock_page(page)) {
Christoph Lametere24f0b82006-06-23 02:03:51 -0700563 if (!force)
Christoph Lameter95a402c2006-06-23 02:03:53 -0700564 goto move_newpage;
Christoph Lametere24f0b82006-06-23 02:03:51 -0700565 lock_page(page);
566 }
567
Hugh Dickins62b61f62009-12-14 17:59:33 -0800568 /*
569 * Only memory hotplug's offline_pages() caller has locked out KSM,
570 * and can safely migrate a KSM page. The other cases have skipped
571 * PageKsm along with PageReserved - but it is only now when we have
572 * the page lock that we can be certain it will not go KSM beneath us
573 * (KSM will not upgrade a page from PageAnon to PageKsm when it sees
574 * its pagecount raised, but only here do we take the page lock which
575 * serializes that).
576 */
577 if (PageKsm(page) && !offlining) {
578 rc = -EBUSY;
579 goto unlock;
580 }
581
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -0800582 /* charge against new page */
583 charge = mem_cgroup_prepare_migration(page, &mem);
584 if (charge == -ENOMEM) {
585 rc = -ENOMEM;
586 goto unlock;
587 }
588 BUG_ON(charge);
589
Christoph Lametere24f0b82006-06-23 02:03:51 -0700590 if (PageWriteback(page)) {
591 if (!force)
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -0800592 goto uncharge;
Christoph Lametere24f0b82006-06-23 02:03:51 -0700593 wait_on_page_writeback(page);
594 }
Christoph Lametere24f0b82006-06-23 02:03:51 -0700595 /*
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700596 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
597 * we cannot notice that anon_vma is freed while we migrates a page.
598 * This rcu_read_lock() delays freeing anon_vma pointer until the end
599 * of migration. File cache pages are no problem because of page_lock()
KAMEZAWA Hiroyuki989f89c2007-08-30 23:56:21 -0700600 * File Caches may use write_page() or lock_page() in migration, then,
601 * just care Anon page here.
Christoph Lametere24f0b82006-06-23 02:03:51 -0700602 */
KAMEZAWA Hiroyuki989f89c2007-08-30 23:56:21 -0700603 if (PageAnon(page)) {
604 rcu_read_lock();
605 rcu_locked = 1;
606 }
Shaohua Li62e1c552008-02-04 22:29:33 -0800607
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700608 /*
Shaohua Li62e1c552008-02-04 22:29:33 -0800609 * Corner case handling:
610 * 1. When a new swap-cache page is read into, it is added to the LRU
611 * and treated as swapcache but it has no rmap yet.
612 * Calling try_to_unmap() against a page->mapping==NULL page will
613 * trigger a BUG. So handle it here.
614 * 2. An orphaned page (see truncate_complete_page) might have
615 * fs-private metadata. The page can be picked up due to memory
616 * offlining. Everywhere else except page reclaim, the page is
617 * invisible to the vm, so the page can not be migrated. So try to
618 * free the metadata, so the page can be freed.
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700619 */
Shaohua Li62e1c552008-02-04 22:29:33 -0800620 if (!page->mapping) {
David Howells266cf652009-04-03 16:42:36 +0100621 if (!PageAnon(page) && page_has_private(page)) {
Shaohua Li62e1c552008-02-04 22:29:33 -0800622 /*
623 * Go direct to try_to_free_buffers() here because
624 * a) that's what try_to_release_page() would do anyway
625 * b) we may be under rcu_read_lock() here, so we can't
626 * use GFP_KERNEL which is what try_to_release_page()
627 * needs to be effective.
628 */
629 try_to_free_buffers(page);
Shaohua Liabfc3482009-09-21 17:01:19 -0700630 goto rcu_unlock;
Shaohua Li62e1c552008-02-04 22:29:33 -0800631 }
Shaohua Liabfc3482009-09-21 17:01:19 -0700632 goto skip_unmap;
Shaohua Li62e1c552008-02-04 22:29:33 -0800633 }
634
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700635 /* Establish migration ptes or remove ptes */
Andi Kleen14fa31b2009-09-16 11:50:10 +0200636 try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700637
Shaohua Liabfc3482009-09-21 17:01:19 -0700638skip_unmap:
Christoph Lametere6a15302006-06-25 05:46:49 -0700639 if (!page_mapped(page))
640 rc = move_to_new_page(newpage, page);
Christoph Lametere24f0b82006-06-23 02:03:51 -0700641
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -0700642 if (rc)
Christoph Lametere24f0b82006-06-23 02:03:51 -0700643 remove_migration_ptes(page, page);
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700644rcu_unlock:
KAMEZAWA Hiroyuki989f89c2007-08-30 23:56:21 -0700645 if (rcu_locked)
646 rcu_read_unlock();
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -0800647uncharge:
648 if (!charge)
649 mem_cgroup_end_migration(mem, page, newpage);
Christoph Lametere24f0b82006-06-23 02:03:51 -0700650unlock:
651 unlock_page(page);
Christoph Lameter95a402c2006-06-23 02:03:53 -0700652
Christoph Lametere24f0b82006-06-23 02:03:51 -0700653 if (rc != -EAGAIN) {
Christoph Lameteraaa994b2006-06-23 02:03:52 -0700654 /*
655 * A page that has been migrated has all references
656 * removed and will be freed. A page that has not been
657 * migrated will have kepts its references and be
658 * restored.
659 */
660 list_del(&page->lru);
KOSAKI Motohiroa7312862009-09-21 17:01:37 -0700661 dec_zone_page_state(page, NR_ISOLATED_ANON +
Johannes Weiner6c0b1352009-09-21 17:02:59 -0700662 page_is_file_cache(page));
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700663 putback_lru_page(page);
Christoph Lametere24f0b82006-06-23 02:03:51 -0700664 }
Christoph Lameter95a402c2006-06-23 02:03:53 -0700665
666move_newpage:
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700667
Christoph Lameter95a402c2006-06-23 02:03:53 -0700668 /*
669 * Move the new page to the LRU. If migration was not successful
670 * then this will free the page.
671 */
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700672 putback_lru_page(newpage);
673
Christoph Lameter742755a2006-06-23 02:03:55 -0700674 if (result) {
675 if (rc)
676 *result = rc;
677 else
678 *result = page_to_nid(newpage);
679 }
Christoph Lametere24f0b82006-06-23 02:03:51 -0700680 return rc;
681}
682
683/*
Christoph Lameterb20a3502006-03-22 00:09:12 -0800684 * migrate_pages
685 *
Christoph Lameter95a402c2006-06-23 02:03:53 -0700686 * The function takes one list of pages to migrate and a function
687 * that determines from the page to be migrated and the private data
688 * the target of the move and allocates the page.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800689 *
690 * The function returns after 10 attempts or if no pages
691 * are movable anymore because to has become empty
Christoph Lameteraaa994b2006-06-23 02:03:52 -0700692 * or no retryable pages exist anymore. All pages will be
Gabriel Craciunescue9534b32007-10-20 02:13:26 +0200693 * returned to the LRU or freed.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800694 *
Christoph Lameter95a402c2006-06-23 02:03:53 -0700695 * Return: Number of pages not migrated or error code.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800696 */
Christoph Lameter95a402c2006-06-23 02:03:53 -0700697int migrate_pages(struct list_head *from,
Hugh Dickins62b61f62009-12-14 17:59:33 -0800698 new_page_t get_new_page, unsigned long private, int offlining)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800699{
Christoph Lametere24f0b82006-06-23 02:03:51 -0700700 int retry = 1;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800701 int nr_failed = 0;
702 int pass = 0;
703 struct page *page;
704 struct page *page2;
705 int swapwrite = current->flags & PF_SWAPWRITE;
706 int rc;
707
708 if (!swapwrite)
709 current->flags |= PF_SWAPWRITE;
710
Christoph Lametere24f0b82006-06-23 02:03:51 -0700711 for(pass = 0; pass < 10 && retry; pass++) {
712 retry = 0;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800713
Christoph Lametere24f0b82006-06-23 02:03:51 -0700714 list_for_each_entry_safe(page, page2, from, lru) {
Christoph Lametere24f0b82006-06-23 02:03:51 -0700715 cond_resched();
Christoph Lameterb20a3502006-03-22 00:09:12 -0800716
Christoph Lameter95a402c2006-06-23 02:03:53 -0700717 rc = unmap_and_move(get_new_page, private,
Hugh Dickins62b61f62009-12-14 17:59:33 -0800718 page, pass > 2, offlining);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800719
Christoph Lametere24f0b82006-06-23 02:03:51 -0700720 switch(rc) {
Christoph Lameter95a402c2006-06-23 02:03:53 -0700721 case -ENOMEM:
722 goto out;
Christoph Lametere24f0b82006-06-23 02:03:51 -0700723 case -EAGAIN:
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700724 retry++;
Christoph Lametere24f0b82006-06-23 02:03:51 -0700725 break;
726 case 0:
Christoph Lametere24f0b82006-06-23 02:03:51 -0700727 break;
728 default:
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700729 /* Permanent failure */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700730 nr_failed++;
Christoph Lametere24f0b82006-06-23 02:03:51 -0700731 break;
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700732 }
Christoph Lameterb20a3502006-03-22 00:09:12 -0800733 }
734 }
Christoph Lameter95a402c2006-06-23 02:03:53 -0700735 rc = 0;
736out:
Christoph Lameterb20a3502006-03-22 00:09:12 -0800737 if (!swapwrite)
738 current->flags &= ~PF_SWAPWRITE;
739
Christoph Lameteraaa994b2006-06-23 02:03:52 -0700740 putback_lru_pages(from);
Christoph Lameter95a402c2006-06-23 02:03:53 -0700741
742 if (rc)
743 return rc;
744
Christoph Lameterb20a3502006-03-22 00:09:12 -0800745 return nr_failed + retry;
746}
747
Christoph Lameter742755a2006-06-23 02:03:55 -0700748#ifdef CONFIG_NUMA
749/*
750 * Move a list of individual pages
751 */
752struct page_to_node {
753 unsigned long addr;
754 struct page *page;
755 int node;
756 int status;
757};
758
759static struct page *new_page_node(struct page *p, unsigned long private,
760 int **result)
761{
762 struct page_to_node *pm = (struct page_to_node *)private;
763
764 while (pm->node != MAX_NUMNODES && pm->page != p)
765 pm++;
766
767 if (pm->node == MAX_NUMNODES)
768 return NULL;
769
770 *result = &pm->status;
771
Mel Gorman6484eb32009-06-16 15:31:54 -0700772 return alloc_pages_exact_node(pm->node,
Mel Gorman769848c2007-07-17 04:03:05 -0700773 GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
Christoph Lameter742755a2006-06-23 02:03:55 -0700774}
775
776/*
777 * Move a set of pages as indicated in the pm array. The addr
778 * field must be set to the virtual address of the page to be moved
779 * and the node number must contain a valid target node.
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700780 * The pm array ends with node = MAX_NUMNODES.
Christoph Lameter742755a2006-06-23 02:03:55 -0700781 */
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700782static int do_move_page_to_node_array(struct mm_struct *mm,
783 struct page_to_node *pm,
784 int migrate_all)
Christoph Lameter742755a2006-06-23 02:03:55 -0700785{
786 int err;
787 struct page_to_node *pp;
788 LIST_HEAD(pagelist);
789
790 down_read(&mm->mmap_sem);
791
792 /*
793 * Build a list of pages to migrate
794 */
Christoph Lameter742755a2006-06-23 02:03:55 -0700795 for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
796 struct vm_area_struct *vma;
797 struct page *page;
798
Christoph Lameter742755a2006-06-23 02:03:55 -0700799 err = -EFAULT;
800 vma = find_vma(mm, pp->addr);
Christoph Lameter0dc952d2007-03-05 00:30:33 -0800801 if (!vma || !vma_migratable(vma))
Christoph Lameter742755a2006-06-23 02:03:55 -0700802 goto set_status;
803
804 page = follow_page(vma, pp->addr, FOLL_GET);
Linus Torvalds89f5b7d2008-06-20 11:18:25 -0700805
806 err = PTR_ERR(page);
807 if (IS_ERR(page))
808 goto set_status;
809
Christoph Lameter742755a2006-06-23 02:03:55 -0700810 err = -ENOENT;
811 if (!page)
812 goto set_status;
813
Hugh Dickins62b61f62009-12-14 17:59:33 -0800814 /* Use PageReserved to check for zero page */
815 if (PageReserved(page) || PageKsm(page))
Christoph Lameter742755a2006-06-23 02:03:55 -0700816 goto put_and_set;
817
818 pp->page = page;
819 err = page_to_nid(page);
820
821 if (err == pp->node)
822 /*
823 * Node already in the right place
824 */
825 goto put_and_set;
826
827 err = -EACCES;
828 if (page_mapcount(page) > 1 &&
829 !migrate_all)
830 goto put_and_set;
831
Nick Piggin62695a82008-10-18 20:26:09 -0700832 err = isolate_lru_page(page);
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -0800833 if (!err) {
Nick Piggin62695a82008-10-18 20:26:09 -0700834 list_add_tail(&page->lru, &pagelist);
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -0800835 inc_zone_page_state(page, NR_ISOLATED_ANON +
836 page_is_file_cache(page));
837 }
Christoph Lameter742755a2006-06-23 02:03:55 -0700838put_and_set:
839 /*
840 * Either remove the duplicate refcount from
841 * isolate_lru_page() or drop the page ref if it was
842 * not isolated.
843 */
844 put_page(page);
845set_status:
846 pp->status = err;
847 }
848
Brice Gogline78bbfa2008-10-18 20:27:15 -0700849 err = 0;
Christoph Lameter742755a2006-06-23 02:03:55 -0700850 if (!list_empty(&pagelist))
851 err = migrate_pages(&pagelist, new_page_node,
Hugh Dickins62b61f62009-12-14 17:59:33 -0800852 (unsigned long)pm, 0);
Christoph Lameter742755a2006-06-23 02:03:55 -0700853
854 up_read(&mm->mmap_sem);
855 return err;
856}
857
858/*
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700859 * Migrate an array of page address onto an array of nodes and fill
860 * the corresponding array of status.
861 */
862static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
863 unsigned long nr_pages,
864 const void __user * __user *pages,
865 const int __user *nodes,
866 int __user *status, int flags)
867{
Brice Goglin3140a222009-01-06 14:38:57 -0800868 struct page_to_node *pm;
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700869 nodemask_t task_nodes;
Brice Goglin3140a222009-01-06 14:38:57 -0800870 unsigned long chunk_nr_pages;
871 unsigned long chunk_start;
872 int err;
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700873
874 task_nodes = cpuset_mems_allowed(task);
875
Brice Goglin3140a222009-01-06 14:38:57 -0800876 err = -ENOMEM;
877 pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
878 if (!pm)
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700879 goto out;
Brice Goglin35282a22009-06-16 15:32:43 -0700880
881 migrate_prep();
882
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700883 /*
Brice Goglin3140a222009-01-06 14:38:57 -0800884 * Store a chunk of page_to_node array in a page,
885 * but keep the last one as a marker
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700886 */
Brice Goglin3140a222009-01-06 14:38:57 -0800887 chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1;
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700888
Brice Goglin3140a222009-01-06 14:38:57 -0800889 for (chunk_start = 0;
890 chunk_start < nr_pages;
891 chunk_start += chunk_nr_pages) {
892 int j;
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700893
Brice Goglin3140a222009-01-06 14:38:57 -0800894 if (chunk_start + chunk_nr_pages > nr_pages)
895 chunk_nr_pages = nr_pages - chunk_start;
896
897 /* fill the chunk pm with addrs and nodes from user-space */
898 for (j = 0; j < chunk_nr_pages; j++) {
899 const void __user *p;
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700900 int node;
901
Brice Goglin3140a222009-01-06 14:38:57 -0800902 err = -EFAULT;
903 if (get_user(p, pages + j + chunk_start))
904 goto out_pm;
905 pm[j].addr = (unsigned long) p;
906
907 if (get_user(node, nodes + j + chunk_start))
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700908 goto out_pm;
909
910 err = -ENODEV;
Linus Torvalds6f5a55f2010-02-05 16:16:50 -0800911 if (node < 0 || node >= MAX_NUMNODES)
912 goto out_pm;
913
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700914 if (!node_state(node, N_HIGH_MEMORY))
915 goto out_pm;
916
917 err = -EACCES;
918 if (!node_isset(node, task_nodes))
919 goto out_pm;
920
Brice Goglin3140a222009-01-06 14:38:57 -0800921 pm[j].node = node;
922 }
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700923
Brice Goglin3140a222009-01-06 14:38:57 -0800924 /* End marker for this chunk */
925 pm[chunk_nr_pages].node = MAX_NUMNODES;
926
927 /* Migrate this chunk */
928 err = do_move_page_to_node_array(mm, pm,
929 flags & MPOL_MF_MOVE_ALL);
930 if (err < 0)
931 goto out_pm;
932
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700933 /* Return status information */
Brice Goglin3140a222009-01-06 14:38:57 -0800934 for (j = 0; j < chunk_nr_pages; j++)
935 if (put_user(pm[j].status, status + j + chunk_start)) {
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700936 err = -EFAULT;
Brice Goglin3140a222009-01-06 14:38:57 -0800937 goto out_pm;
938 }
939 }
940 err = 0;
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700941
942out_pm:
Brice Goglin3140a222009-01-06 14:38:57 -0800943 free_page((unsigned long)pm);
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700944out:
945 return err;
946}
947
948/*
Brice Goglin2f007e72008-10-18 20:27:16 -0700949 * Determine the nodes of an array of pages and store it in an array of status.
Christoph Lameter742755a2006-06-23 02:03:55 -0700950 */
Brice Goglin80bba122008-12-09 13:14:23 -0800951static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
952 const void __user **pages, int *status)
Christoph Lameter742755a2006-06-23 02:03:55 -0700953{
Brice Goglin2f007e72008-10-18 20:27:16 -0700954 unsigned long i;
Brice Goglin2f007e72008-10-18 20:27:16 -0700955
Christoph Lameter742755a2006-06-23 02:03:55 -0700956 down_read(&mm->mmap_sem);
957
Brice Goglin2f007e72008-10-18 20:27:16 -0700958 for (i = 0; i < nr_pages; i++) {
Brice Goglin80bba122008-12-09 13:14:23 -0800959 unsigned long addr = (unsigned long)(*pages);
Christoph Lameter742755a2006-06-23 02:03:55 -0700960 struct vm_area_struct *vma;
961 struct page *page;
KOSAKI Motohiroc095adb2008-12-16 16:06:43 +0900962 int err = -EFAULT;
Brice Goglin2f007e72008-10-18 20:27:16 -0700963
964 vma = find_vma(mm, addr);
Christoph Lameter742755a2006-06-23 02:03:55 -0700965 if (!vma)
966 goto set_status;
967
Brice Goglin2f007e72008-10-18 20:27:16 -0700968 page = follow_page(vma, addr, 0);
Linus Torvalds89f5b7d2008-06-20 11:18:25 -0700969
970 err = PTR_ERR(page);
971 if (IS_ERR(page))
972 goto set_status;
973
Christoph Lameter742755a2006-06-23 02:03:55 -0700974 err = -ENOENT;
975 /* Use PageReserved to check for zero page */
Hugh Dickins62b61f62009-12-14 17:59:33 -0800976 if (!page || PageReserved(page) || PageKsm(page))
Christoph Lameter742755a2006-06-23 02:03:55 -0700977 goto set_status;
978
979 err = page_to_nid(page);
980set_status:
Brice Goglin80bba122008-12-09 13:14:23 -0800981 *status = err;
982
983 pages++;
984 status++;
985 }
986
987 up_read(&mm->mmap_sem);
988}
989
990/*
991 * Determine the nodes of a user array of pages and store it in
992 * a user array of status.
993 */
994static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
995 const void __user * __user *pages,
996 int __user *status)
997{
998#define DO_PAGES_STAT_CHUNK_NR 16
999 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1000 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
Brice Goglin80bba122008-12-09 13:14:23 -08001001
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001002 while (nr_pages) {
1003 unsigned long chunk_nr;
Brice Goglin80bba122008-12-09 13:14:23 -08001004
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001005 chunk_nr = nr_pages;
1006 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1007 chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1008
1009 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1010 break;
Brice Goglin80bba122008-12-09 13:14:23 -08001011
1012 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1013
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001014 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1015 break;
Christoph Lameter742755a2006-06-23 02:03:55 -07001016
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001017 pages += chunk_nr;
1018 status += chunk_nr;
1019 nr_pages -= chunk_nr;
1020 }
1021 return nr_pages ? -EFAULT : 0;
Christoph Lameter742755a2006-06-23 02:03:55 -07001022}
1023
1024/*
1025 * Move a list of pages in the address space of the currently executing
1026 * process.
1027 */
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001028SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1029 const void __user * __user *, pages,
1030 const int __user *, nodes,
1031 int __user *, status, int, flags)
Christoph Lameter742755a2006-06-23 02:03:55 -07001032{
David Howellsc69e8d92008-11-14 10:39:19 +11001033 const struct cred *cred = current_cred(), *tcred;
Christoph Lameter742755a2006-06-23 02:03:55 -07001034 struct task_struct *task;
Christoph Lameter742755a2006-06-23 02:03:55 -07001035 struct mm_struct *mm;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001036 int err;
Christoph Lameter742755a2006-06-23 02:03:55 -07001037
1038 /* Check flags */
1039 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1040 return -EINVAL;
1041
1042 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1043 return -EPERM;
1044
1045 /* Find the mm_struct */
1046 read_lock(&tasklist_lock);
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001047 task = pid ? find_task_by_vpid(pid) : current;
Christoph Lameter742755a2006-06-23 02:03:55 -07001048 if (!task) {
1049 read_unlock(&tasklist_lock);
1050 return -ESRCH;
1051 }
1052 mm = get_task_mm(task);
1053 read_unlock(&tasklist_lock);
1054
1055 if (!mm)
1056 return -EINVAL;
1057
1058 /*
1059 * Check if this process has the right to modify the specified
1060 * process. The right exists if the process has administrative
1061 * capabilities, superuser privileges or the same
1062 * userid as the target process.
1063 */
David Howellsc69e8d92008-11-14 10:39:19 +11001064 rcu_read_lock();
1065 tcred = __task_cred(task);
David Howellsb6dff3e2008-11-14 10:39:16 +11001066 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
1067 cred->uid != tcred->suid && cred->uid != tcred->uid &&
Christoph Lameter742755a2006-06-23 02:03:55 -07001068 !capable(CAP_SYS_NICE)) {
David Howellsc69e8d92008-11-14 10:39:19 +11001069 rcu_read_unlock();
Christoph Lameter742755a2006-06-23 02:03:55 -07001070 err = -EPERM;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001071 goto out;
Christoph Lameter742755a2006-06-23 02:03:55 -07001072 }
David Howellsc69e8d92008-11-14 10:39:19 +11001073 rcu_read_unlock();
Christoph Lameter742755a2006-06-23 02:03:55 -07001074
David Quigley86c3a762006-06-23 02:04:02 -07001075 err = security_task_movememory(task);
1076 if (err)
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001077 goto out;
David Quigley86c3a762006-06-23 02:04:02 -07001078
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001079 if (nodes) {
1080 err = do_pages_move(mm, task, nr_pages, pages, nodes, status,
1081 flags);
1082 } else {
Brice Goglin2f007e72008-10-18 20:27:16 -07001083 err = do_pages_stat(mm, nr_pages, pages, status);
Brice Goglin2f007e72008-10-18 20:27:16 -07001084 }
David Quigley86c3a762006-06-23 02:04:02 -07001085
Christoph Lameter742755a2006-06-23 02:03:55 -07001086out:
Christoph Lameter742755a2006-06-23 02:03:55 -07001087 mmput(mm);
1088 return err;
1089}
Christoph Lameter742755a2006-06-23 02:03:55 -07001090
Christoph Lameter7b2259b2006-06-25 05:46:48 -07001091/*
1092 * Call migration functions in the vma_ops that may prepare
1093 * memory in a vm for migration. migration functions may perform
1094 * the migration for vmas that do not have an underlying page struct.
1095 */
1096int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
1097 const nodemask_t *from, unsigned long flags)
1098{
1099 struct vm_area_struct *vma;
1100 int err = 0;
1101
Daisuke Nishimura1001c9f2009-02-11 13:04:18 -08001102 for (vma = mm->mmap; vma && !err; vma = vma->vm_next) {
Christoph Lameter7b2259b2006-06-25 05:46:48 -07001103 if (vma->vm_ops && vma->vm_ops->migrate) {
1104 err = vma->vm_ops->migrate(vma, to, from, flags);
1105 if (err)
1106 break;
1107 }
1108 }
1109 return err;
1110}
Gerald Schaefer83d16742008-07-23 21:28:22 -07001111#endif