blob: 037b0967c1e3b40cda18e57a775d61867bafb95f [file] [log] [blame]
Christoph Lameterb20a3502006-03-22 00:09:12 -08001/*
2 * Memory Migration functionality - linux/mm/migration.c
3 *
4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
5 *
6 * Page migration was first developed in the context of the memory hotplug
7 * project. The main authors of the migration code are:
8 *
9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10 * Hirokazu Takahashi <taka@valinux.co.jp>
11 * Dave Hansen <haveblue@us.ibm.com>
Christoph Lametercde53532008-07-04 09:59:22 -070012 * Christoph Lameter
Christoph Lameterb20a3502006-03-22 00:09:12 -080013 */
14
15#include <linux/migrate.h>
16#include <linux/module.h>
17#include <linux/swap.h>
Christoph Lameter06972122006-06-23 02:03:35 -070018#include <linux/swapops.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080019#include <linux/pagemap.h>
Christoph Lametere23ca002006-04-10 22:52:57 -070020#include <linux/buffer_head.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080021#include <linux/mm_inline.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070022#include <linux/nsproxy.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080023#include <linux/pagevec.h>
24#include <linux/rmap.h>
25#include <linux/topology.h>
26#include <linux/cpu.h>
27#include <linux/cpuset.h>
Christoph Lameter04e62a22006-06-23 02:03:38 -070028#include <linux/writeback.h>
Christoph Lameter742755a2006-06-23 02:03:55 -070029#include <linux/mempolicy.h>
30#include <linux/vmalloc.h>
David Quigley86c3a762006-06-23 02:04:02 -070031#include <linux/security.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080032#include <linux/memcontrol.h>
Adrian Bunk4f5ca262008-07-23 21:27:02 -070033#include <linux/syscalls.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080034
35#include "internal.h"
36
Christoph Lameterb20a3502006-03-22 00:09:12 -080037#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
38
39/*
Christoph Lameter742755a2006-06-23 02:03:55 -070040 * migrate_prep() needs to be called before we start compiling a list of pages
41 * to be migrated using isolate_lru_page().
Christoph Lameterb20a3502006-03-22 00:09:12 -080042 */
43int migrate_prep(void)
44{
Christoph Lameterb20a3502006-03-22 00:09:12 -080045 /*
46 * Clear the LRU lists so pages can be isolated.
47 * Note that pages may be moved off the LRU after we have
48 * drained them. Those pages will fail to migrate like other
49 * pages that may be busy.
50 */
51 lru_add_drain_all();
52
53 return 0;
54}
55
Christoph Lameterb20a3502006-03-22 00:09:12 -080056/*
Lee Schermerhorn894bc312008-10-18 20:26:39 -070057 * Add isolated pages on the list back to the LRU under page lock
58 * to avoid leaking evictable pages back onto unevictable list.
Christoph Lameterb20a3502006-03-22 00:09:12 -080059 *
60 * returns the number of pages put back.
61 */
62int putback_lru_pages(struct list_head *l)
63{
64 struct page *page;
65 struct page *page2;
66 int count = 0;
67
68 list_for_each_entry_safe(page, page2, l, lru) {
Christoph Lametere24f0b82006-06-23 02:03:51 -070069 list_del(&page->lru);
Lee Schermerhorn894bc312008-10-18 20:26:39 -070070 putback_lru_page(page);
Christoph Lameterb20a3502006-03-22 00:09:12 -080071 count++;
72 }
73 return count;
74}
75
Christoph Lameter06972122006-06-23 02:03:35 -070076/*
77 * Restore a potential migration pte to a working pte entry
78 */
Christoph Lameter04e62a22006-06-23 02:03:38 -070079static void remove_migration_pte(struct vm_area_struct *vma,
Christoph Lameter06972122006-06-23 02:03:35 -070080 struct page *old, struct page *new)
81{
82 struct mm_struct *mm = vma->vm_mm;
83 swp_entry_t entry;
84 pgd_t *pgd;
85 pud_t *pud;
86 pmd_t *pmd;
87 pte_t *ptep, pte;
88 spinlock_t *ptl;
Christoph Lameter04e62a22006-06-23 02:03:38 -070089 unsigned long addr = page_address_in_vma(new, vma);
90
91 if (addr == -EFAULT)
92 return;
Christoph Lameter06972122006-06-23 02:03:35 -070093
94 pgd = pgd_offset(mm, addr);
95 if (!pgd_present(*pgd))
96 return;
97
98 pud = pud_offset(pgd, addr);
99 if (!pud_present(*pud))
100 return;
101
102 pmd = pmd_offset(pud, addr);
103 if (!pmd_present(*pmd))
104 return;
105
106 ptep = pte_offset_map(pmd, addr);
107
108 if (!is_swap_pte(*ptep)) {
109 pte_unmap(ptep);
110 return;
111 }
112
113 ptl = pte_lockptr(mm, pmd);
114 spin_lock(ptl);
115 pte = *ptep;
116 if (!is_swap_pte(pte))
117 goto out;
118
119 entry = pte_to_swp_entry(pte);
120
121 if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old)
122 goto out;
123
Hugh Dickins98837c72008-03-04 14:29:06 -0800124 /*
125 * Yes, ignore the return value from a GFP_ATOMIC mem_cgroup_charge.
126 * Failure is not an option here: we're now expected to remove every
127 * migration pte, and will cause crashes otherwise. Normally this
128 * is not an issue: mem_cgroup_prepare_migration bumped up the old
129 * page_cgroup count for safety, that's now attached to the new page,
130 * so this charge should just be another incrementation of the count,
131 * to keep in balance with rmap.c's mem_cgroup_uncharging. But if
132 * there's been a force_empty, those reference counts may no longer
133 * be reliable, and this charge can actually fail: oh well, we don't
134 * make the situation any worse by proceeding as if it had succeeded.
135 */
136 mem_cgroup_charge(new, mm, GFP_ATOMIC);
137
Christoph Lameter06972122006-06-23 02:03:35 -0700138 get_page(new);
139 pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
140 if (is_write_migration_entry(entry))
141 pte = pte_mkwrite(pte);
KAMEZAWA Hiroyuki97ee0522007-10-16 01:25:43 -0700142 flush_cache_page(vma, addr, pte_pfn(pte));
Christoph Lameter06972122006-06-23 02:03:35 -0700143 set_pte_at(mm, addr, ptep, pte);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700144
145 if (PageAnon(new))
146 page_add_anon_rmap(new, vma, addr);
147 else
148 page_add_file_rmap(new);
149
150 /* No need to invalidate - it was non-present before */
151 update_mmu_cache(vma, addr, pte);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700152
Christoph Lameter06972122006-06-23 02:03:35 -0700153out:
154 pte_unmap_unlock(ptep, ptl);
155}
156
157/*
Christoph Lameter04e62a22006-06-23 02:03:38 -0700158 * Note that remove_file_migration_ptes will only work on regular mappings,
159 * Nonlinear mappings do not use migration entries.
160 */
161static void remove_file_migration_ptes(struct page *old, struct page *new)
162{
163 struct vm_area_struct *vma;
164 struct address_space *mapping = page_mapping(new);
165 struct prio_tree_iter iter;
166 pgoff_t pgoff = new->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
167
168 if (!mapping)
169 return;
170
171 spin_lock(&mapping->i_mmap_lock);
172
173 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff)
174 remove_migration_pte(vma, old, new);
175
176 spin_unlock(&mapping->i_mmap_lock);
177}
178
179/*
Christoph Lameter06972122006-06-23 02:03:35 -0700180 * Must hold mmap_sem lock on at least one of the vmas containing
181 * the page so that the anon_vma cannot vanish.
182 */
Christoph Lameter04e62a22006-06-23 02:03:38 -0700183static void remove_anon_migration_ptes(struct page *old, struct page *new)
Christoph Lameter06972122006-06-23 02:03:35 -0700184{
185 struct anon_vma *anon_vma;
186 struct vm_area_struct *vma;
187 unsigned long mapping;
188
189 mapping = (unsigned long)new->mapping;
190
191 if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0)
192 return;
193
194 /*
195 * We hold the mmap_sem lock. So no need to call page_lock_anon_vma.
196 */
197 anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON);
198 spin_lock(&anon_vma->lock);
199
200 list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
Christoph Lameter04e62a22006-06-23 02:03:38 -0700201 remove_migration_pte(vma, old, new);
Christoph Lameter06972122006-06-23 02:03:35 -0700202
203 spin_unlock(&anon_vma->lock);
204}
205
206/*
Christoph Lameter04e62a22006-06-23 02:03:38 -0700207 * Get rid of all migration entries and replace them by
208 * references to the indicated page.
209 */
210static void remove_migration_ptes(struct page *old, struct page *new)
211{
212 if (PageAnon(new))
213 remove_anon_migration_ptes(old, new);
214 else
215 remove_file_migration_ptes(old, new);
216}
217
218/*
Christoph Lameter06972122006-06-23 02:03:35 -0700219 * Something used the pte of a page under migration. We need to
220 * get to the page and wait until migration is finished.
221 * When we return from this function the fault will be retried.
222 *
223 * This function is called from do_swap_page().
224 */
225void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
226 unsigned long address)
227{
228 pte_t *ptep, pte;
229 spinlock_t *ptl;
230 swp_entry_t entry;
231 struct page *page;
232
233 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
234 pte = *ptep;
235 if (!is_swap_pte(pte))
236 goto out;
237
238 entry = pte_to_swp_entry(pte);
239 if (!is_migration_entry(entry))
240 goto out;
241
242 page = migration_entry_to_page(entry);
243
Nick Piggine2867812008-07-25 19:45:30 -0700244 /*
245 * Once radix-tree replacement of page migration started, page_count
246 * *must* be zero. And, we don't want to call wait_on_page_locked()
247 * against a page without get_page().
248 * So, we use get_page_unless_zero(), here. Even failed, page fault
249 * will occur again.
250 */
251 if (!get_page_unless_zero(page))
252 goto out;
Christoph Lameter06972122006-06-23 02:03:35 -0700253 pte_unmap_unlock(ptep, ptl);
254 wait_on_page_locked(page);
255 put_page(page);
256 return;
257out:
258 pte_unmap_unlock(ptep, ptl);
259}
260
Christoph Lameterb20a3502006-03-22 00:09:12 -0800261/*
Christoph Lameterc3fcf8a2006-06-23 02:03:32 -0700262 * Replace the page in the mapping.
Christoph Lameter5b5c7122006-06-23 02:03:29 -0700263 *
264 * The number of remaining references must be:
265 * 1 for anonymous pages without a mapping
266 * 2 for pages with a mapping
267 * 3 for pages with a mapping and PagePrivate set.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800268 */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700269static int migrate_page_move_mapping(struct address_space *mapping,
270 struct page *newpage, struct page *page)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800271{
Nick Piggine2867812008-07-25 19:45:30 -0700272 int expected_count;
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800273 void **pslot;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800274
Christoph Lameter6c5240a2006-06-23 02:03:37 -0700275 if (!mapping) {
Christoph Lameter0e8c7d02007-04-23 14:41:09 -0700276 /* Anonymous page without mapping */
Christoph Lameter6c5240a2006-06-23 02:03:37 -0700277 if (page_count(page) != 1)
278 return -EAGAIN;
279 return 0;
280 }
281
Nick Piggin19fd6232008-07-25 19:45:32 -0700282 spin_lock_irq(&mapping->tree_lock);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800283
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800284 pslot = radix_tree_lookup_slot(&mapping->page_tree,
285 page_index(page));
Christoph Lameterb20a3502006-03-22 00:09:12 -0800286
Nick Piggine2867812008-07-25 19:45:30 -0700287 expected_count = 2 + !!PagePrivate(page);
288 if (page_count(page) != expected_count ||
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800289 (struct page *)radix_tree_deref_slot(pslot) != page) {
Nick Piggin19fd6232008-07-25 19:45:32 -0700290 spin_unlock_irq(&mapping->tree_lock);
Christoph Lametere23ca002006-04-10 22:52:57 -0700291 return -EAGAIN;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800292 }
293
Nick Piggine2867812008-07-25 19:45:30 -0700294 if (!page_freeze_refs(page, expected_count)) {
Nick Piggin19fd6232008-07-25 19:45:32 -0700295 spin_unlock_irq(&mapping->tree_lock);
Nick Piggine2867812008-07-25 19:45:30 -0700296 return -EAGAIN;
297 }
298
Christoph Lameterb20a3502006-03-22 00:09:12 -0800299 /*
300 * Now we know that no one else is looking at the page.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800301 */
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800302 get_page(newpage); /* add cache reference */
Christoph Lameter6c5240a2006-06-23 02:03:37 -0700303#ifdef CONFIG_SWAP
Christoph Lameterb20a3502006-03-22 00:09:12 -0800304 if (PageSwapCache(page)) {
305 SetPageSwapCache(newpage);
306 set_page_private(newpage, page_private(page));
307 }
Christoph Lameter6c5240a2006-06-23 02:03:37 -0700308#endif
Christoph Lameterb20a3502006-03-22 00:09:12 -0800309
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800310 radix_tree_replace_slot(pslot, newpage);
311
Nick Piggine2867812008-07-25 19:45:30 -0700312 page_unfreeze_refs(page, expected_count);
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800313 /*
314 * Drop cache reference from old page.
315 * We know this isn't the last reference.
316 */
Christoph Lameterb20a3502006-03-22 00:09:12 -0800317 __put_page(page);
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800318
Christoph Lameter0e8c7d02007-04-23 14:41:09 -0700319 /*
320 * If moved to a different zone then also account
321 * the page for that zone. Other VM counters will be
322 * taken care of when we establish references to the
323 * new page and drop references to the old page.
324 *
325 * Note that anonymous pages are accounted for
326 * via NR_FILE_PAGES and NR_ANON_PAGES if they
327 * are mapped to swap space.
328 */
329 __dec_zone_page_state(page, NR_FILE_PAGES);
330 __inc_zone_page_state(newpage, NR_FILE_PAGES);
331
Nick Piggin19fd6232008-07-25 19:45:32 -0700332 spin_unlock_irq(&mapping->tree_lock);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800333
334 return 0;
335}
Christoph Lameterb20a3502006-03-22 00:09:12 -0800336
337/*
338 * Copy the page to its new location
339 */
Christoph Lametere7340f72006-06-23 02:03:29 -0700340static void migrate_page_copy(struct page *newpage, struct page *page)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800341{
KAMEZAWA Hiroyukib7abea92008-10-18 20:28:09 -0700342 int anon;
343
Christoph Lameterb20a3502006-03-22 00:09:12 -0800344 copy_highpage(newpage, page);
345
346 if (PageError(page))
347 SetPageError(newpage);
348 if (PageReferenced(page))
349 SetPageReferenced(newpage);
350 if (PageUptodate(page))
351 SetPageUptodate(newpage);
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700352 if (TestClearPageActive(page)) {
353 VM_BUG_ON(PageUnevictable(page));
Christoph Lameterb20a3502006-03-22 00:09:12 -0800354 SetPageActive(newpage);
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700355 } else
356 unevictable_migrate_page(newpage, page);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800357 if (PageChecked(page))
358 SetPageChecked(newpage);
359 if (PageMappedToDisk(page))
360 SetPageMappedToDisk(newpage);
361
362 if (PageDirty(page)) {
363 clear_page_dirty_for_io(page);
Nick Piggin3a902c52008-04-30 00:55:16 -0700364 /*
365 * Want to mark the page and the radix tree as dirty, and
366 * redo the accounting that clear_page_dirty_for_io undid,
367 * but we can't use set_page_dirty because that function
368 * is actually a signal that all of the page has become dirty.
369 * Wheras only part of our page may be dirty.
370 */
371 __set_page_dirty_nobuffers(newpage);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800372 }
373
Nick Pigginb291f002008-10-18 20:26:44 -0700374 mlock_migrate_page(newpage, page);
375
Christoph Lameter6c5240a2006-06-23 02:03:37 -0700376#ifdef CONFIG_SWAP
Christoph Lameterb20a3502006-03-22 00:09:12 -0800377 ClearPageSwapCache(page);
Christoph Lameter6c5240a2006-06-23 02:03:37 -0700378#endif
Christoph Lameterb20a3502006-03-22 00:09:12 -0800379 ClearPagePrivate(page);
380 set_page_private(page, 0);
KAMEZAWA Hiroyukib7abea92008-10-18 20:28:09 -0700381 /* page->mapping contains a flag for PageAnon() */
382 anon = PageAnon(page);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800383 page->mapping = NULL;
384
KAMEZAWA Hiroyukib7abea92008-10-18 20:28:09 -0700385 if (!anon) /* This page was removed from radix-tree. */
386 mem_cgroup_uncharge_cache_page(page);
387
Christoph Lameterb20a3502006-03-22 00:09:12 -0800388 /*
389 * If any waiters have accumulated on the new page then
390 * wake them up.
391 */
392 if (PageWriteback(newpage))
393 end_page_writeback(newpage);
394}
Christoph Lameterb20a3502006-03-22 00:09:12 -0800395
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700396/************************************************************
397 * Migration functions
398 ***********************************************************/
399
400/* Always fail migration. Used for mappings that are not movable */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700401int fail_migrate_page(struct address_space *mapping,
402 struct page *newpage, struct page *page)
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700403{
404 return -EIO;
405}
406EXPORT_SYMBOL(fail_migrate_page);
407
Christoph Lameterb20a3502006-03-22 00:09:12 -0800408/*
409 * Common logic to directly migrate a single page suitable for
410 * pages that do not use PagePrivate.
411 *
412 * Pages are locked upon entry and exit.
413 */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700414int migrate_page(struct address_space *mapping,
415 struct page *newpage, struct page *page)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800416{
417 int rc;
418
419 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
420
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700421 rc = migrate_page_move_mapping(mapping, newpage, page);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800422
423 if (rc)
424 return rc;
425
426 migrate_page_copy(newpage, page);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800427 return 0;
428}
429EXPORT_SYMBOL(migrate_page);
430
David Howells93614012006-09-30 20:45:40 +0200431#ifdef CONFIG_BLOCK
Christoph Lameterb20a3502006-03-22 00:09:12 -0800432/*
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700433 * Migration function for pages with buffers. This function can only be used
434 * if the underlying filesystem guarantees that no other references to "page"
435 * exist.
436 */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700437int buffer_migrate_page(struct address_space *mapping,
438 struct page *newpage, struct page *page)
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700439{
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700440 struct buffer_head *bh, *head;
441 int rc;
442
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700443 if (!page_has_buffers(page))
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700444 return migrate_page(mapping, newpage, page);
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700445
446 head = page_buffers(page);
447
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700448 rc = migrate_page_move_mapping(mapping, newpage, page);
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700449
450 if (rc)
451 return rc;
452
453 bh = head;
454 do {
455 get_bh(bh);
456 lock_buffer(bh);
457 bh = bh->b_this_page;
458
459 } while (bh != head);
460
461 ClearPagePrivate(page);
462 set_page_private(newpage, page_private(page));
463 set_page_private(page, 0);
464 put_page(page);
465 get_page(newpage);
466
467 bh = head;
468 do {
469 set_bh_page(bh, newpage, bh_offset(bh));
470 bh = bh->b_this_page;
471
472 } while (bh != head);
473
474 SetPagePrivate(newpage);
475
476 migrate_page_copy(newpage, page);
477
478 bh = head;
479 do {
480 unlock_buffer(bh);
481 put_bh(bh);
482 bh = bh->b_this_page;
483
484 } while (bh != head);
485
486 return 0;
487}
488EXPORT_SYMBOL(buffer_migrate_page);
David Howells93614012006-09-30 20:45:40 +0200489#endif
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700490
Christoph Lameter04e62a22006-06-23 02:03:38 -0700491/*
492 * Writeback a page to clean the dirty state
493 */
494static int writeout(struct address_space *mapping, struct page *page)
495{
496 struct writeback_control wbc = {
497 .sync_mode = WB_SYNC_NONE,
498 .nr_to_write = 1,
499 .range_start = 0,
500 .range_end = LLONG_MAX,
501 .nonblocking = 1,
502 .for_reclaim = 1
503 };
504 int rc;
505
506 if (!mapping->a_ops->writepage)
507 /* No write method for the address space */
508 return -EINVAL;
509
510 if (!clear_page_dirty_for_io(page))
511 /* Someone else already triggered a write */
512 return -EAGAIN;
513
514 /*
515 * A dirty page may imply that the underlying filesystem has
516 * the page on some queue. So the page must be clean for
517 * migration. Writeout may mean we loose the lock and the
518 * page state is no longer what we checked for earlier.
519 * At this point we know that the migration attempt cannot
520 * be successful.
521 */
522 remove_migration_ptes(page, page);
523
524 rc = mapping->a_ops->writepage(page, &wbc);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700525
526 if (rc != AOP_WRITEPAGE_ACTIVATE)
527 /* unlocked. Relock */
528 lock_page(page);
529
Hugh Dickinsbda85502008-11-19 15:36:36 -0800530 return (rc < 0) ? -EIO : -EAGAIN;
Christoph Lameter04e62a22006-06-23 02:03:38 -0700531}
532
533/*
534 * Default handling if a filesystem does not provide a migration function.
535 */
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700536static int fallback_migrate_page(struct address_space *mapping,
537 struct page *newpage, struct page *page)
538{
Christoph Lameter04e62a22006-06-23 02:03:38 -0700539 if (PageDirty(page))
540 return writeout(mapping, page);
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700541
542 /*
543 * Buffers may be managed in a filesystem specific way.
544 * We must have no buffers or drop them.
545 */
David Howellsb398f6b2006-08-29 19:05:58 +0100546 if (PagePrivate(page) &&
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700547 !try_to_release_page(page, GFP_KERNEL))
548 return -EAGAIN;
549
550 return migrate_page(mapping, newpage, page);
551}
552
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700553/*
Christoph Lametere24f0b82006-06-23 02:03:51 -0700554 * Move a page to a newly allocated page
555 * The page is locked and all ptes have been successfully removed.
556 *
557 * The new page will have replaced the old page if this function
558 * is successful.
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700559 *
560 * Return value:
561 * < 0 - error code
562 * == 0 - success
Christoph Lametere24f0b82006-06-23 02:03:51 -0700563 */
564static int move_to_new_page(struct page *newpage, struct page *page)
565{
566 struct address_space *mapping;
567 int rc;
568
569 /*
570 * Block others from accessing the page when we get around to
571 * establishing additional references. We are the only one
572 * holding a reference to the new page at this point.
573 */
Nick Piggin529ae9a2008-08-02 12:01:03 +0200574 if (!trylock_page(newpage))
Christoph Lametere24f0b82006-06-23 02:03:51 -0700575 BUG();
576
577 /* Prepare mapping for the new page.*/
578 newpage->index = page->index;
579 newpage->mapping = page->mapping;
Rik van Rielb2e18532008-10-18 20:26:30 -0700580 if (PageSwapBacked(page))
581 SetPageSwapBacked(newpage);
Christoph Lametere24f0b82006-06-23 02:03:51 -0700582
583 mapping = page_mapping(page);
584 if (!mapping)
585 rc = migrate_page(mapping, newpage, page);
586 else if (mapping->a_ops->migratepage)
587 /*
588 * Most pages have a mapping and most filesystems
589 * should provide a migration function. Anonymous
590 * pages are part of swap space which also has its
591 * own migration function. This is the most common
592 * path for page migration.
593 */
594 rc = mapping->a_ops->migratepage(mapping,
595 newpage, page);
596 else
597 rc = fallback_migrate_page(mapping, newpage, page);
598
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -0800599 if (!rc) {
Christoph Lametere24f0b82006-06-23 02:03:51 -0700600 remove_migration_ptes(page, newpage);
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -0800601 } else
Christoph Lametere24f0b82006-06-23 02:03:51 -0700602 newpage->mapping = NULL;
603
604 unlock_page(newpage);
605
606 return rc;
607}
608
609/*
610 * Obtain the lock on page, remove all ptes and migrate the page
611 * to the newly allocated page in newpage.
612 */
Christoph Lameter95a402c2006-06-23 02:03:53 -0700613static int unmap_and_move(new_page_t get_new_page, unsigned long private,
614 struct page *page, int force)
Christoph Lametere24f0b82006-06-23 02:03:51 -0700615{
616 int rc = 0;
Christoph Lameter742755a2006-06-23 02:03:55 -0700617 int *result = NULL;
618 struct page *newpage = get_new_page(page, private, &result);
KAMEZAWA Hiroyuki989f89c2007-08-30 23:56:21 -0700619 int rcu_locked = 0;
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -0800620 int charge = 0;
Christoph Lameter95a402c2006-06-23 02:03:53 -0700621
622 if (!newpage)
623 return -ENOMEM;
Christoph Lametere24f0b82006-06-23 02:03:51 -0700624
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700625 if (page_count(page) == 1) {
Christoph Lametere24f0b82006-06-23 02:03:51 -0700626 /* page was freed from under us. So we are done. */
Christoph Lameter95a402c2006-06-23 02:03:53 -0700627 goto move_newpage;
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700628 }
Christoph Lametere24f0b82006-06-23 02:03:51 -0700629
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -0700630 charge = mem_cgroup_prepare_migration(page, newpage);
631 if (charge == -ENOMEM) {
632 rc = -ENOMEM;
633 goto move_newpage;
634 }
635 /* prepare cgroup just returns 0 or -ENOMEM */
636 BUG_ON(charge);
637
Christoph Lametere24f0b82006-06-23 02:03:51 -0700638 rc = -EAGAIN;
Nick Piggin529ae9a2008-08-02 12:01:03 +0200639 if (!trylock_page(page)) {
Christoph Lametere24f0b82006-06-23 02:03:51 -0700640 if (!force)
Christoph Lameter95a402c2006-06-23 02:03:53 -0700641 goto move_newpage;
Christoph Lametere24f0b82006-06-23 02:03:51 -0700642 lock_page(page);
643 }
644
645 if (PageWriteback(page)) {
646 if (!force)
647 goto unlock;
648 wait_on_page_writeback(page);
649 }
Christoph Lametere24f0b82006-06-23 02:03:51 -0700650 /*
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700651 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
652 * we cannot notice that anon_vma is freed while we migrates a page.
653 * This rcu_read_lock() delays freeing anon_vma pointer until the end
654 * of migration. File cache pages are no problem because of page_lock()
KAMEZAWA Hiroyuki989f89c2007-08-30 23:56:21 -0700655 * File Caches may use write_page() or lock_page() in migration, then,
656 * just care Anon page here.
Christoph Lametere24f0b82006-06-23 02:03:51 -0700657 */
KAMEZAWA Hiroyuki989f89c2007-08-30 23:56:21 -0700658 if (PageAnon(page)) {
659 rcu_read_lock();
660 rcu_locked = 1;
661 }
Shaohua Li62e1c552008-02-04 22:29:33 -0800662
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700663 /*
Shaohua Li62e1c552008-02-04 22:29:33 -0800664 * Corner case handling:
665 * 1. When a new swap-cache page is read into, it is added to the LRU
666 * and treated as swapcache but it has no rmap yet.
667 * Calling try_to_unmap() against a page->mapping==NULL page will
668 * trigger a BUG. So handle it here.
669 * 2. An orphaned page (see truncate_complete_page) might have
670 * fs-private metadata. The page can be picked up due to memory
671 * offlining. Everywhere else except page reclaim, the page is
672 * invisible to the vm, so the page can not be migrated. So try to
673 * free the metadata, so the page can be freed.
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700674 */
Shaohua Li62e1c552008-02-04 22:29:33 -0800675 if (!page->mapping) {
676 if (!PageAnon(page) && PagePrivate(page)) {
677 /*
678 * Go direct to try_to_free_buffers() here because
679 * a) that's what try_to_release_page() would do anyway
680 * b) we may be under rcu_read_lock() here, so we can't
681 * use GFP_KERNEL which is what try_to_release_page()
682 * needs to be effective.
683 */
684 try_to_free_buffers(page);
685 }
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700686 goto rcu_unlock;
Shaohua Li62e1c552008-02-04 22:29:33 -0800687 }
688
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700689 /* Establish migration ptes or remove ptes */
Christoph Lametere6a15302006-06-25 05:46:49 -0700690 try_to_unmap(page, 1);
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700691
Christoph Lametere6a15302006-06-25 05:46:49 -0700692 if (!page_mapped(page))
693 rc = move_to_new_page(newpage, page);
Christoph Lametere24f0b82006-06-23 02:03:51 -0700694
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -0700695 if (rc)
Christoph Lametere24f0b82006-06-23 02:03:51 -0700696 remove_migration_ptes(page, page);
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700697rcu_unlock:
KAMEZAWA Hiroyuki989f89c2007-08-30 23:56:21 -0700698 if (rcu_locked)
699 rcu_read_unlock();
Christoph Lametere6a15302006-06-25 05:46:49 -0700700
Christoph Lametere24f0b82006-06-23 02:03:51 -0700701unlock:
702 unlock_page(page);
Christoph Lameter95a402c2006-06-23 02:03:53 -0700703
Christoph Lametere24f0b82006-06-23 02:03:51 -0700704 if (rc != -EAGAIN) {
Christoph Lameteraaa994b2006-06-23 02:03:52 -0700705 /*
706 * A page that has been migrated has all references
707 * removed and will be freed. A page that has not been
708 * migrated will have kepts its references and be
709 * restored.
710 */
711 list_del(&page->lru);
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700712 putback_lru_page(page);
Christoph Lametere24f0b82006-06-23 02:03:51 -0700713 }
Christoph Lameter95a402c2006-06-23 02:03:53 -0700714
715move_newpage:
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -0700716 if (!charge)
717 mem_cgroup_end_migration(newpage);
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700718
Christoph Lameter95a402c2006-06-23 02:03:53 -0700719 /*
720 * Move the new page to the LRU. If migration was not successful
721 * then this will free the page.
722 */
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700723 putback_lru_page(newpage);
724
Christoph Lameter742755a2006-06-23 02:03:55 -0700725 if (result) {
726 if (rc)
727 *result = rc;
728 else
729 *result = page_to_nid(newpage);
730 }
Christoph Lametere24f0b82006-06-23 02:03:51 -0700731 return rc;
732}
733
734/*
Christoph Lameterb20a3502006-03-22 00:09:12 -0800735 * migrate_pages
736 *
Christoph Lameter95a402c2006-06-23 02:03:53 -0700737 * The function takes one list of pages to migrate and a function
738 * that determines from the page to be migrated and the private data
739 * the target of the move and allocates the page.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800740 *
741 * The function returns after 10 attempts or if no pages
742 * are movable anymore because to has become empty
Christoph Lameteraaa994b2006-06-23 02:03:52 -0700743 * or no retryable pages exist anymore. All pages will be
Gabriel Craciunescue9534b32007-10-20 02:13:26 +0200744 * returned to the LRU or freed.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800745 *
Christoph Lameter95a402c2006-06-23 02:03:53 -0700746 * Return: Number of pages not migrated or error code.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800747 */
Christoph Lameter95a402c2006-06-23 02:03:53 -0700748int migrate_pages(struct list_head *from,
749 new_page_t get_new_page, unsigned long private)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800750{
Christoph Lametere24f0b82006-06-23 02:03:51 -0700751 int retry = 1;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800752 int nr_failed = 0;
753 int pass = 0;
754 struct page *page;
755 struct page *page2;
756 int swapwrite = current->flags & PF_SWAPWRITE;
757 int rc;
758
759 if (!swapwrite)
760 current->flags |= PF_SWAPWRITE;
761
Christoph Lametere24f0b82006-06-23 02:03:51 -0700762 for(pass = 0; pass < 10 && retry; pass++) {
763 retry = 0;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800764
Christoph Lametere24f0b82006-06-23 02:03:51 -0700765 list_for_each_entry_safe(page, page2, from, lru) {
Christoph Lametere24f0b82006-06-23 02:03:51 -0700766 cond_resched();
Christoph Lameterb20a3502006-03-22 00:09:12 -0800767
Christoph Lameter95a402c2006-06-23 02:03:53 -0700768 rc = unmap_and_move(get_new_page, private,
769 page, pass > 2);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800770
Christoph Lametere24f0b82006-06-23 02:03:51 -0700771 switch(rc) {
Christoph Lameter95a402c2006-06-23 02:03:53 -0700772 case -ENOMEM:
773 goto out;
Christoph Lametere24f0b82006-06-23 02:03:51 -0700774 case -EAGAIN:
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700775 retry++;
Christoph Lametere24f0b82006-06-23 02:03:51 -0700776 break;
777 case 0:
Christoph Lametere24f0b82006-06-23 02:03:51 -0700778 break;
779 default:
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700780 /* Permanent failure */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700781 nr_failed++;
Christoph Lametere24f0b82006-06-23 02:03:51 -0700782 break;
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700783 }
Christoph Lameterb20a3502006-03-22 00:09:12 -0800784 }
785 }
Christoph Lameter95a402c2006-06-23 02:03:53 -0700786 rc = 0;
787out:
Christoph Lameterb20a3502006-03-22 00:09:12 -0800788 if (!swapwrite)
789 current->flags &= ~PF_SWAPWRITE;
790
Christoph Lameteraaa994b2006-06-23 02:03:52 -0700791 putback_lru_pages(from);
Christoph Lameter95a402c2006-06-23 02:03:53 -0700792
793 if (rc)
794 return rc;
795
Christoph Lameterb20a3502006-03-22 00:09:12 -0800796 return nr_failed + retry;
797}
798
Christoph Lameter742755a2006-06-23 02:03:55 -0700799#ifdef CONFIG_NUMA
800/*
801 * Move a list of individual pages
802 */
803struct page_to_node {
804 unsigned long addr;
805 struct page *page;
806 int node;
807 int status;
808};
809
810static struct page *new_page_node(struct page *p, unsigned long private,
811 int **result)
812{
813 struct page_to_node *pm = (struct page_to_node *)private;
814
815 while (pm->node != MAX_NUMNODES && pm->page != p)
816 pm++;
817
818 if (pm->node == MAX_NUMNODES)
819 return NULL;
820
821 *result = &pm->status;
822
Mel Gorman769848c2007-07-17 04:03:05 -0700823 return alloc_pages_node(pm->node,
824 GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
Christoph Lameter742755a2006-06-23 02:03:55 -0700825}
826
827/*
828 * Move a set of pages as indicated in the pm array. The addr
829 * field must be set to the virtual address of the page to be moved
830 * and the node number must contain a valid target node.
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700831 * The pm array ends with node = MAX_NUMNODES.
Christoph Lameter742755a2006-06-23 02:03:55 -0700832 */
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700833static int do_move_page_to_node_array(struct mm_struct *mm,
834 struct page_to_node *pm,
835 int migrate_all)
Christoph Lameter742755a2006-06-23 02:03:55 -0700836{
837 int err;
838 struct page_to_node *pp;
839 LIST_HEAD(pagelist);
840
Christoph Lameter0aedadf2008-11-06 12:53:30 -0800841 migrate_prep();
Christoph Lameter742755a2006-06-23 02:03:55 -0700842 down_read(&mm->mmap_sem);
843
844 /*
845 * Build a list of pages to migrate
846 */
Christoph Lameter742755a2006-06-23 02:03:55 -0700847 for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
848 struct vm_area_struct *vma;
849 struct page *page;
850
851 /*
852 * A valid page pointer that will not match any of the
853 * pages that will be moved.
854 */
855 pp->page = ZERO_PAGE(0);
856
857 err = -EFAULT;
858 vma = find_vma(mm, pp->addr);
Christoph Lameter0dc952d2007-03-05 00:30:33 -0800859 if (!vma || !vma_migratable(vma))
Christoph Lameter742755a2006-06-23 02:03:55 -0700860 goto set_status;
861
862 page = follow_page(vma, pp->addr, FOLL_GET);
Linus Torvalds89f5b7d2008-06-20 11:18:25 -0700863
864 err = PTR_ERR(page);
865 if (IS_ERR(page))
866 goto set_status;
867
Christoph Lameter742755a2006-06-23 02:03:55 -0700868 err = -ENOENT;
869 if (!page)
870 goto set_status;
871
872 if (PageReserved(page)) /* Check for zero page */
873 goto put_and_set;
874
875 pp->page = page;
876 err = page_to_nid(page);
877
878 if (err == pp->node)
879 /*
880 * Node already in the right place
881 */
882 goto put_and_set;
883
884 err = -EACCES;
885 if (page_mapcount(page) > 1 &&
886 !migrate_all)
887 goto put_and_set;
888
Nick Piggin62695a82008-10-18 20:26:09 -0700889 err = isolate_lru_page(page);
890 if (!err)
891 list_add_tail(&page->lru, &pagelist);
Christoph Lameter742755a2006-06-23 02:03:55 -0700892put_and_set:
893 /*
894 * Either remove the duplicate refcount from
895 * isolate_lru_page() or drop the page ref if it was
896 * not isolated.
897 */
898 put_page(page);
899set_status:
900 pp->status = err;
901 }
902
Brice Gogline78bbfa2008-10-18 20:27:15 -0700903 err = 0;
Christoph Lameter742755a2006-06-23 02:03:55 -0700904 if (!list_empty(&pagelist))
905 err = migrate_pages(&pagelist, new_page_node,
906 (unsigned long)pm);
Christoph Lameter742755a2006-06-23 02:03:55 -0700907
908 up_read(&mm->mmap_sem);
909 return err;
910}
911
912/*
Brice Goglin5e9a0f02008-10-18 20:27:17 -0700913 * Migrate an array of page address onto an array of nodes and fill
914 * the corresponding array of status.
915 */
916static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
917 unsigned long nr_pages,
918 const void __user * __user *pages,
919 const int __user *nodes,
920 int __user *status, int flags)
921{
922 struct page_to_node *pm = NULL;
923 nodemask_t task_nodes;
924 int err = 0;
925 int i;
926
927 task_nodes = cpuset_mems_allowed(task);
928
929 /* Limit nr_pages so that the multiplication may not overflow */
930 if (nr_pages >= ULONG_MAX / sizeof(struct page_to_node) - 1) {
931 err = -E2BIG;
932 goto out;
933 }
934
935 pm = vmalloc((nr_pages + 1) * sizeof(struct page_to_node));
936 if (!pm) {
937 err = -ENOMEM;
938 goto out;
939 }
940
941 /*
942 * Get parameters from user space and initialize the pm
943 * array. Return various errors if the user did something wrong.
944 */
945 for (i = 0; i < nr_pages; i++) {
946 const void __user *p;
947
948 err = -EFAULT;
949 if (get_user(p, pages + i))
950 goto out_pm;
951
952 pm[i].addr = (unsigned long)p;
953 if (nodes) {
954 int node;
955
956 if (get_user(node, nodes + i))
957 goto out_pm;
958
959 err = -ENODEV;
960 if (!node_state(node, N_HIGH_MEMORY))
961 goto out_pm;
962
963 err = -EACCES;
964 if (!node_isset(node, task_nodes))
965 goto out_pm;
966
967 pm[i].node = node;
968 } else
969 pm[i].node = 0; /* anything to not match MAX_NUMNODES */
970 }
971 /* End marker */
972 pm[nr_pages].node = MAX_NUMNODES;
973
974 err = do_move_page_to_node_array(mm, pm, flags & MPOL_MF_MOVE_ALL);
975 if (err >= 0)
976 /* Return status information */
977 for (i = 0; i < nr_pages; i++)
978 if (put_user(pm[i].status, status + i))
979 err = -EFAULT;
980
981out_pm:
982 vfree(pm);
983out:
984 return err;
985}
986
987/*
Brice Goglin2f007e72008-10-18 20:27:16 -0700988 * Determine the nodes of an array of pages and store it in an array of status.
Christoph Lameter742755a2006-06-23 02:03:55 -0700989 */
Brice Goglin80bba122008-12-09 13:14:23 -0800990static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
991 const void __user **pages, int *status)
Christoph Lameter742755a2006-06-23 02:03:55 -0700992{
Brice Goglin2f007e72008-10-18 20:27:16 -0700993 unsigned long i;
Brice Goglin2f007e72008-10-18 20:27:16 -0700994
Christoph Lameter742755a2006-06-23 02:03:55 -0700995 down_read(&mm->mmap_sem);
996
Brice Goglin2f007e72008-10-18 20:27:16 -0700997 for (i = 0; i < nr_pages; i++) {
Brice Goglin80bba122008-12-09 13:14:23 -0800998 unsigned long addr = (unsigned long)(*pages);
Christoph Lameter742755a2006-06-23 02:03:55 -0700999 struct vm_area_struct *vma;
1000 struct page *page;
KOSAKI Motohiroc095adb2008-12-16 16:06:43 +09001001 int err = -EFAULT;
Brice Goglin2f007e72008-10-18 20:27:16 -07001002
1003 vma = find_vma(mm, addr);
Christoph Lameter742755a2006-06-23 02:03:55 -07001004 if (!vma)
1005 goto set_status;
1006
Brice Goglin2f007e72008-10-18 20:27:16 -07001007 page = follow_page(vma, addr, 0);
Linus Torvalds89f5b7d2008-06-20 11:18:25 -07001008
1009 err = PTR_ERR(page);
1010 if (IS_ERR(page))
1011 goto set_status;
1012
Christoph Lameter742755a2006-06-23 02:03:55 -07001013 err = -ENOENT;
1014 /* Use PageReserved to check for zero page */
1015 if (!page || PageReserved(page))
1016 goto set_status;
1017
1018 err = page_to_nid(page);
1019set_status:
Brice Goglin80bba122008-12-09 13:14:23 -08001020 *status = err;
1021
1022 pages++;
1023 status++;
1024 }
1025
1026 up_read(&mm->mmap_sem);
1027}
1028
1029/*
1030 * Determine the nodes of a user array of pages and store it in
1031 * a user array of status.
1032 */
1033static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1034 const void __user * __user *pages,
1035 int __user *status)
1036{
1037#define DO_PAGES_STAT_CHUNK_NR 16
1038 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1039 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1040 unsigned long i, chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1041 int err;
1042
1043 for (i = 0; i < nr_pages; i += chunk_nr) {
1044 if (chunk_nr + i > nr_pages)
1045 chunk_nr = nr_pages - i;
1046
1047 err = copy_from_user(chunk_pages, &pages[i],
1048 chunk_nr * sizeof(*chunk_pages));
1049 if (err) {
1050 err = -EFAULT;
1051 goto out;
1052 }
1053
1054 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1055
1056 err = copy_to_user(&status[i], chunk_status,
1057 chunk_nr * sizeof(*chunk_status));
1058 if (err) {
1059 err = -EFAULT;
1060 goto out;
1061 }
Christoph Lameter742755a2006-06-23 02:03:55 -07001062 }
Brice Goglin2f007e72008-10-18 20:27:16 -07001063 err = 0;
Christoph Lameter742755a2006-06-23 02:03:55 -07001064
Brice Goglin2f007e72008-10-18 20:27:16 -07001065out:
Brice Goglin2f007e72008-10-18 20:27:16 -07001066 return err;
Christoph Lameter742755a2006-06-23 02:03:55 -07001067}
1068
1069/*
1070 * Move a list of pages in the address space of the currently executing
1071 * process.
1072 */
1073asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
1074 const void __user * __user *pages,
1075 const int __user *nodes,
1076 int __user *status, int flags)
1077{
Christoph Lameter742755a2006-06-23 02:03:55 -07001078 struct task_struct *task;
Christoph Lameter742755a2006-06-23 02:03:55 -07001079 struct mm_struct *mm;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001080 int err;
Christoph Lameter742755a2006-06-23 02:03:55 -07001081
1082 /* Check flags */
1083 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1084 return -EINVAL;
1085
1086 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1087 return -EPERM;
1088
1089 /* Find the mm_struct */
1090 read_lock(&tasklist_lock);
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001091 task = pid ? find_task_by_vpid(pid) : current;
Christoph Lameter742755a2006-06-23 02:03:55 -07001092 if (!task) {
1093 read_unlock(&tasklist_lock);
1094 return -ESRCH;
1095 }
1096 mm = get_task_mm(task);
1097 read_unlock(&tasklist_lock);
1098
1099 if (!mm)
1100 return -EINVAL;
1101
1102 /*
1103 * Check if this process has the right to modify the specified
1104 * process. The right exists if the process has administrative
1105 * capabilities, superuser privileges or the same
1106 * userid as the target process.
1107 */
1108 if ((current->euid != task->suid) && (current->euid != task->uid) &&
1109 (current->uid != task->suid) && (current->uid != task->uid) &&
1110 !capable(CAP_SYS_NICE)) {
1111 err = -EPERM;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001112 goto out;
Christoph Lameter742755a2006-06-23 02:03:55 -07001113 }
1114
David Quigley86c3a762006-06-23 02:04:02 -07001115 err = security_task_movememory(task);
1116 if (err)
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001117 goto out;
David Quigley86c3a762006-06-23 02:04:02 -07001118
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001119 if (nodes) {
1120 err = do_pages_move(mm, task, nr_pages, pages, nodes, status,
1121 flags);
1122 } else {
Brice Goglin2f007e72008-10-18 20:27:16 -07001123 err = do_pages_stat(mm, nr_pages, pages, status);
Brice Goglin2f007e72008-10-18 20:27:16 -07001124 }
David Quigley86c3a762006-06-23 02:04:02 -07001125
Christoph Lameter742755a2006-06-23 02:03:55 -07001126out:
Christoph Lameter742755a2006-06-23 02:03:55 -07001127 mmput(mm);
1128 return err;
1129}
Christoph Lameter742755a2006-06-23 02:03:55 -07001130
Christoph Lameter7b2259b2006-06-25 05:46:48 -07001131/*
1132 * Call migration functions in the vma_ops that may prepare
1133 * memory in a vm for migration. migration functions may perform
1134 * the migration for vmas that do not have an underlying page struct.
1135 */
1136int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
1137 const nodemask_t *from, unsigned long flags)
1138{
1139 struct vm_area_struct *vma;
1140 int err = 0;
1141
1142 for(vma = mm->mmap; vma->vm_next && !err; vma = vma->vm_next) {
1143 if (vma->vm_ops && vma->vm_ops->migrate) {
1144 err = vma->vm_ops->migrate(vma, to, from, flags);
1145 if (err)
1146 break;
1147 }
1148 }
1149 return err;
1150}
Gerald Schaefer83d16742008-07-23 21:28:22 -07001151#endif