blob: 34132f8e9109e1b2af4c92fdd74c5df7ef073392 [file] [log] [blame]
Christoph Lameterb20a3502006-03-22 00:09:12 -08001/*
2 * Memory Migration functionality - linux/mm/migration.c
3 *
4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
5 *
6 * Page migration was first developed in the context of the memory hotplug
7 * project. The main authors of the migration code are:
8 *
9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10 * Hirokazu Takahashi <taka@valinux.co.jp>
11 * Dave Hansen <haveblue@us.ibm.com>
Christoph Lametercde53532008-07-04 09:59:22 -070012 * Christoph Lameter
Christoph Lameterb20a3502006-03-22 00:09:12 -080013 */
14
15#include <linux/migrate.h>
16#include <linux/module.h>
17#include <linux/swap.h>
Christoph Lameter06972122006-06-23 02:03:35 -070018#include <linux/swapops.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080019#include <linux/pagemap.h>
Christoph Lametere23ca002006-04-10 22:52:57 -070020#include <linux/buffer_head.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080021#include <linux/mm_inline.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070022#include <linux/nsproxy.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080023#include <linux/pagevec.h>
Hugh Dickinse9995ef2009-12-14 17:59:31 -080024#include <linux/ksm.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080025#include <linux/rmap.h>
26#include <linux/topology.h>
27#include <linux/cpu.h>
28#include <linux/cpuset.h>
Christoph Lameter04e62a22006-06-23 02:03:38 -070029#include <linux/writeback.h>
Christoph Lameter742755a2006-06-23 02:03:55 -070030#include <linux/mempolicy.h>
31#include <linux/vmalloc.h>
David Quigley86c3a762006-06-23 02:04:02 -070032#include <linux/security.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080033#include <linux/memcontrol.h>
Adrian Bunk4f5ca262008-07-23 21:27:02 -070034#include <linux/syscalls.h>
Naoya Horiguchi290408d2010-09-08 10:19:35 +090035#include <linux/hugetlb.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/gfp.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080037
Michal Nazarewicz0d1836c2010-12-21 17:24:26 -080038#include <asm/tlbflush.h>
39
Christoph Lameterb20a3502006-03-22 00:09:12 -080040#include "internal.h"
41
Christoph Lameterb20a3502006-03-22 00:09:12 -080042#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
43
44/*
Christoph Lameter742755a2006-06-23 02:03:55 -070045 * migrate_prep() needs to be called before we start compiling a list of pages
Mel Gorman748446b2010-05-24 14:32:27 -070046 * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
47 * undesirable, use migrate_prep_local()
Christoph Lameterb20a3502006-03-22 00:09:12 -080048 */
49int migrate_prep(void)
50{
Christoph Lameterb20a3502006-03-22 00:09:12 -080051 /*
52 * Clear the LRU lists so pages can be isolated.
53 * Note that pages may be moved off the LRU after we have
54 * drained them. Those pages will fail to migrate like other
55 * pages that may be busy.
56 */
57 lru_add_drain_all();
58
59 return 0;
60}
61
Mel Gorman748446b2010-05-24 14:32:27 -070062/* Do the necessary work of migrate_prep but not if it involves other CPUs */
63int migrate_prep_local(void)
64{
65 lru_add_drain();
66
67 return 0;
68}
69
Christoph Lameterb20a3502006-03-22 00:09:12 -080070/*
Lee Schermerhorn894bc312008-10-18 20:26:39 -070071 * Add isolated pages on the list back to the LRU under page lock
72 * to avoid leaking evictable pages back onto unevictable list.
Christoph Lameterb20a3502006-03-22 00:09:12 -080073 */
Minchan Kime13861d2010-05-24 14:31:59 -070074void putback_lru_pages(struct list_head *l)
Christoph Lameterb20a3502006-03-22 00:09:12 -080075{
76 struct page *page;
77 struct page *page2;
Christoph Lameterb20a3502006-03-22 00:09:12 -080078
79 list_for_each_entry_safe(page, page2, l, lru) {
Christoph Lametere24f0b82006-06-23 02:03:51 -070080 list_del(&page->lru);
KOSAKI Motohiroa7312862009-09-21 17:01:37 -070081 dec_zone_page_state(page, NR_ISOLATED_ANON +
Johannes Weiner6c0b1352009-09-21 17:02:59 -070082 page_is_file_cache(page));
Lee Schermerhorn894bc312008-10-18 20:26:39 -070083 putback_lru_page(page);
Christoph Lameterb20a3502006-03-22 00:09:12 -080084 }
Christoph Lameterb20a3502006-03-22 00:09:12 -080085}
86
Christoph Lameter06972122006-06-23 02:03:35 -070087/*
88 * Restore a potential migration pte to a working pte entry
89 */
Hugh Dickinse9995ef2009-12-14 17:59:31 -080090static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
91 unsigned long addr, void *old)
Christoph Lameter06972122006-06-23 02:03:35 -070092{
93 struct mm_struct *mm = vma->vm_mm;
94 swp_entry_t entry;
95 pgd_t *pgd;
96 pud_t *pud;
97 pmd_t *pmd;
98 pte_t *ptep, pte;
99 spinlock_t *ptl;
100
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900101 if (unlikely(PageHuge(new))) {
102 ptep = huge_pte_offset(mm, addr);
103 if (!ptep)
104 goto out;
105 ptl = &mm->page_table_lock;
106 } else {
107 pgd = pgd_offset(mm, addr);
108 if (!pgd_present(*pgd))
109 goto out;
Christoph Lameter06972122006-06-23 02:03:35 -0700110
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900111 pud = pud_offset(pgd, addr);
112 if (!pud_present(*pud))
113 goto out;
Christoph Lameter06972122006-06-23 02:03:35 -0700114
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900115 pmd = pmd_offset(pud, addr);
Andrea Arcangeli500d65d2011-01-13 15:46:55 -0800116 if (pmd_trans_huge(*pmd))
117 goto out;
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900118 if (!pmd_present(*pmd))
119 goto out;
Christoph Lameter06972122006-06-23 02:03:35 -0700120
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900121 ptep = pte_offset_map(pmd, addr);
Christoph Lameter06972122006-06-23 02:03:35 -0700122
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900123 if (!is_swap_pte(*ptep)) {
124 pte_unmap(ptep);
125 goto out;
126 }
Christoph Lameter06972122006-06-23 02:03:35 -0700127
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900128 ptl = pte_lockptr(mm, pmd);
129 }
130
Christoph Lameter06972122006-06-23 02:03:35 -0700131 spin_lock(ptl);
132 pte = *ptep;
133 if (!is_swap_pte(pte))
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800134 goto unlock;
Christoph Lameter06972122006-06-23 02:03:35 -0700135
136 entry = pte_to_swp_entry(pte);
137
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800138 if (!is_migration_entry(entry) ||
139 migration_entry_to_page(entry) != old)
140 goto unlock;
Christoph Lameter06972122006-06-23 02:03:35 -0700141
Christoph Lameter06972122006-06-23 02:03:35 -0700142 get_page(new);
143 pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
144 if (is_write_migration_entry(entry))
145 pte = pte_mkwrite(pte);
Andi Kleen3ef8fd72010-10-11 16:03:21 +0200146#ifdef CONFIG_HUGETLB_PAGE
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900147 if (PageHuge(new))
148 pte = pte_mkhuge(pte);
Andi Kleen3ef8fd72010-10-11 16:03:21 +0200149#endif
KAMEZAWA Hiroyuki97ee0522007-10-16 01:25:43 -0700150 flush_cache_page(vma, addr, pte_pfn(pte));
Christoph Lameter06972122006-06-23 02:03:35 -0700151 set_pte_at(mm, addr, ptep, pte);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700152
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900153 if (PageHuge(new)) {
154 if (PageAnon(new))
155 hugepage_add_anon_rmap(new, vma, addr);
156 else
157 page_dup_rmap(new);
158 } else if (PageAnon(new))
Christoph Lameter04e62a22006-06-23 02:03:38 -0700159 page_add_anon_rmap(new, vma, addr);
160 else
161 page_add_file_rmap(new);
162
163 /* No need to invalidate - it was non-present before */
Russell King4b3073e2009-12-18 16:40:18 +0000164 update_mmu_cache(vma, addr, ptep);
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800165unlock:
Christoph Lameter06972122006-06-23 02:03:35 -0700166 pte_unmap_unlock(ptep, ptl);
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800167out:
168 return SWAP_AGAIN;
Christoph Lameter06972122006-06-23 02:03:35 -0700169}
170
171/*
Christoph Lameter04e62a22006-06-23 02:03:38 -0700172 * Get rid of all migration entries and replace them by
173 * references to the indicated page.
174 */
175static void remove_migration_ptes(struct page *old, struct page *new)
176{
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800177 rmap_walk(new, remove_migration_pte, old);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700178}
179
180/*
Christoph Lameter06972122006-06-23 02:03:35 -0700181 * Something used the pte of a page under migration. We need to
182 * get to the page and wait until migration is finished.
183 * When we return from this function the fault will be retried.
184 *
185 * This function is called from do_swap_page().
186 */
187void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
188 unsigned long address)
189{
190 pte_t *ptep, pte;
191 spinlock_t *ptl;
192 swp_entry_t entry;
193 struct page *page;
194
195 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
196 pte = *ptep;
197 if (!is_swap_pte(pte))
198 goto out;
199
200 entry = pte_to_swp_entry(pte);
201 if (!is_migration_entry(entry))
202 goto out;
203
204 page = migration_entry_to_page(entry);
205
Nick Piggine2867812008-07-25 19:45:30 -0700206 /*
207 * Once radix-tree replacement of page migration started, page_count
208 * *must* be zero. And, we don't want to call wait_on_page_locked()
209 * against a page without get_page().
210 * So, we use get_page_unless_zero(), here. Even failed, page fault
211 * will occur again.
212 */
213 if (!get_page_unless_zero(page))
214 goto out;
Christoph Lameter06972122006-06-23 02:03:35 -0700215 pte_unmap_unlock(ptep, ptl);
216 wait_on_page_locked(page);
217 put_page(page);
218 return;
219out:
220 pte_unmap_unlock(ptep, ptl);
221}
222
Christoph Lameterb20a3502006-03-22 00:09:12 -0800223/*
Christoph Lameterc3fcf8a2006-06-23 02:03:32 -0700224 * Replace the page in the mapping.
Christoph Lameter5b5c7122006-06-23 02:03:29 -0700225 *
226 * The number of remaining references must be:
227 * 1 for anonymous pages without a mapping
228 * 2 for pages with a mapping
David Howells266cf652009-04-03 16:42:36 +0100229 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800230 */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700231static int migrate_page_move_mapping(struct address_space *mapping,
232 struct page *newpage, struct page *page)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800233{
Nick Piggine2867812008-07-25 19:45:30 -0700234 int expected_count;
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800235 void **pslot;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800236
Christoph Lameter6c5240a2006-06-23 02:03:37 -0700237 if (!mapping) {
Christoph Lameter0e8c7d02007-04-23 14:41:09 -0700238 /* Anonymous page without mapping */
Christoph Lameter6c5240a2006-06-23 02:03:37 -0700239 if (page_count(page) != 1)
240 return -EAGAIN;
241 return 0;
242 }
243
Nick Piggin19fd6232008-07-25 19:45:32 -0700244 spin_lock_irq(&mapping->tree_lock);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800245
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800246 pslot = radix_tree_lookup_slot(&mapping->page_tree,
247 page_index(page));
Christoph Lameterb20a3502006-03-22 00:09:12 -0800248
Johannes Weineredcf4742009-09-21 17:02:59 -0700249 expected_count = 2 + page_has_private(page);
Nick Piggine2867812008-07-25 19:45:30 -0700250 if (page_count(page) != expected_count ||
Mel Gorman29c1f672011-01-13 15:47:21 -0800251 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
Nick Piggin19fd6232008-07-25 19:45:32 -0700252 spin_unlock_irq(&mapping->tree_lock);
Christoph Lametere23ca002006-04-10 22:52:57 -0700253 return -EAGAIN;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800254 }
255
Nick Piggine2867812008-07-25 19:45:30 -0700256 if (!page_freeze_refs(page, expected_count)) {
Nick Piggin19fd6232008-07-25 19:45:32 -0700257 spin_unlock_irq(&mapping->tree_lock);
Nick Piggine2867812008-07-25 19:45:30 -0700258 return -EAGAIN;
259 }
260
Christoph Lameterb20a3502006-03-22 00:09:12 -0800261 /*
262 * Now we know that no one else is looking at the page.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800263 */
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800264 get_page(newpage); /* add cache reference */
Christoph Lameterb20a3502006-03-22 00:09:12 -0800265 if (PageSwapCache(page)) {
266 SetPageSwapCache(newpage);
267 set_page_private(newpage, page_private(page));
268 }
269
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800270 radix_tree_replace_slot(pslot, newpage);
271
Nick Piggine2867812008-07-25 19:45:30 -0700272 page_unfreeze_refs(page, expected_count);
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800273 /*
274 * Drop cache reference from old page.
275 * We know this isn't the last reference.
276 */
Christoph Lameterb20a3502006-03-22 00:09:12 -0800277 __put_page(page);
Nick Piggin7cf9c2c2006-12-06 20:33:44 -0800278
Christoph Lameter0e8c7d02007-04-23 14:41:09 -0700279 /*
280 * If moved to a different zone then also account
281 * the page for that zone. Other VM counters will be
282 * taken care of when we establish references to the
283 * new page and drop references to the old page.
284 *
285 * Note that anonymous pages are accounted for
286 * via NR_FILE_PAGES and NR_ANON_PAGES if they
287 * are mapped to swap space.
288 */
289 __dec_zone_page_state(page, NR_FILE_PAGES);
290 __inc_zone_page_state(newpage, NR_FILE_PAGES);
KOSAKI Motohiro4b021082009-09-21 17:01:33 -0700291 if (PageSwapBacked(page)) {
292 __dec_zone_page_state(page, NR_SHMEM);
293 __inc_zone_page_state(newpage, NR_SHMEM);
294 }
Nick Piggin19fd6232008-07-25 19:45:32 -0700295 spin_unlock_irq(&mapping->tree_lock);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800296
297 return 0;
298}
Christoph Lameterb20a3502006-03-22 00:09:12 -0800299
300/*
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900301 * The expected number of remaining references is the same as that
302 * of migrate_page_move_mapping().
303 */
304int migrate_huge_page_move_mapping(struct address_space *mapping,
305 struct page *newpage, struct page *page)
306{
307 int expected_count;
308 void **pslot;
309
310 if (!mapping) {
311 if (page_count(page) != 1)
312 return -EAGAIN;
313 return 0;
314 }
315
316 spin_lock_irq(&mapping->tree_lock);
317
318 pslot = radix_tree_lookup_slot(&mapping->page_tree,
319 page_index(page));
320
321 expected_count = 2 + page_has_private(page);
322 if (page_count(page) != expected_count ||
Mel Gorman29c1f672011-01-13 15:47:21 -0800323 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900324 spin_unlock_irq(&mapping->tree_lock);
325 return -EAGAIN;
326 }
327
328 if (!page_freeze_refs(page, expected_count)) {
329 spin_unlock_irq(&mapping->tree_lock);
330 return -EAGAIN;
331 }
332
333 get_page(newpage);
334
335 radix_tree_replace_slot(pslot, newpage);
336
337 page_unfreeze_refs(page, expected_count);
338
339 __put_page(page);
340
341 spin_unlock_irq(&mapping->tree_lock);
342 return 0;
343}
344
345/*
Christoph Lameterb20a3502006-03-22 00:09:12 -0800346 * Copy the page to its new location
347 */
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900348void migrate_page_copy(struct page *newpage, struct page *page)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800349{
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900350 if (PageHuge(page))
351 copy_huge_page(newpage, page);
352 else
353 copy_highpage(newpage, page);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800354
355 if (PageError(page))
356 SetPageError(newpage);
357 if (PageReferenced(page))
358 SetPageReferenced(newpage);
359 if (PageUptodate(page))
360 SetPageUptodate(newpage);
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700361 if (TestClearPageActive(page)) {
362 VM_BUG_ON(PageUnevictable(page));
Christoph Lameterb20a3502006-03-22 00:09:12 -0800363 SetPageActive(newpage);
Lee Schermerhorn418b27e2009-12-14 17:59:54 -0800364 } else if (TestClearPageUnevictable(page))
365 SetPageUnevictable(newpage);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800366 if (PageChecked(page))
367 SetPageChecked(newpage);
368 if (PageMappedToDisk(page))
369 SetPageMappedToDisk(newpage);
370
371 if (PageDirty(page)) {
372 clear_page_dirty_for_io(page);
Nick Piggin3a902c52008-04-30 00:55:16 -0700373 /*
374 * Want to mark the page and the radix tree as dirty, and
375 * redo the accounting that clear_page_dirty_for_io undid,
376 * but we can't use set_page_dirty because that function
377 * is actually a signal that all of the page has become dirty.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300378 * Whereas only part of our page may be dirty.
Nick Piggin3a902c52008-04-30 00:55:16 -0700379 */
380 __set_page_dirty_nobuffers(newpage);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800381 }
382
Nick Pigginb291f002008-10-18 20:26:44 -0700383 mlock_migrate_page(newpage, page);
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800384 ksm_migrate_page(newpage, page);
Nick Pigginb291f002008-10-18 20:26:44 -0700385
Christoph Lameterb20a3502006-03-22 00:09:12 -0800386 ClearPageSwapCache(page);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800387 ClearPagePrivate(page);
388 set_page_private(page, 0);
389 page->mapping = NULL;
390
391 /*
392 * If any waiters have accumulated on the new page then
393 * wake them up.
394 */
395 if (PageWriteback(newpage))
396 end_page_writeback(newpage);
397}
Christoph Lameterb20a3502006-03-22 00:09:12 -0800398
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700399/************************************************************
400 * Migration functions
401 ***********************************************************/
402
403/* Always fail migration. Used for mappings that are not movable */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700404int fail_migrate_page(struct address_space *mapping,
405 struct page *newpage, struct page *page)
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700406{
407 return -EIO;
408}
409EXPORT_SYMBOL(fail_migrate_page);
410
Christoph Lameterb20a3502006-03-22 00:09:12 -0800411/*
412 * Common logic to directly migrate a single page suitable for
David Howells266cf652009-04-03 16:42:36 +0100413 * pages that do not use PagePrivate/PagePrivate2.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800414 *
415 * Pages are locked upon entry and exit.
416 */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700417int migrate_page(struct address_space *mapping,
418 struct page *newpage, struct page *page)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800419{
420 int rc;
421
422 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
423
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700424 rc = migrate_page_move_mapping(mapping, newpage, page);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800425
426 if (rc)
427 return rc;
428
429 migrate_page_copy(newpage, page);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800430 return 0;
431}
432EXPORT_SYMBOL(migrate_page);
433
David Howells93614012006-09-30 20:45:40 +0200434#ifdef CONFIG_BLOCK
Christoph Lameterb20a3502006-03-22 00:09:12 -0800435/*
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700436 * Migration function for pages with buffers. This function can only be used
437 * if the underlying filesystem guarantees that no other references to "page"
438 * exist.
439 */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700440int buffer_migrate_page(struct address_space *mapping,
441 struct page *newpage, struct page *page)
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700442{
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700443 struct buffer_head *bh, *head;
444 int rc;
445
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700446 if (!page_has_buffers(page))
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700447 return migrate_page(mapping, newpage, page);
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700448
449 head = page_buffers(page);
450
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700451 rc = migrate_page_move_mapping(mapping, newpage, page);
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700452
453 if (rc)
454 return rc;
455
456 bh = head;
457 do {
458 get_bh(bh);
459 lock_buffer(bh);
460 bh = bh->b_this_page;
461
462 } while (bh != head);
463
464 ClearPagePrivate(page);
465 set_page_private(newpage, page_private(page));
466 set_page_private(page, 0);
467 put_page(page);
468 get_page(newpage);
469
470 bh = head;
471 do {
472 set_bh_page(bh, newpage, bh_offset(bh));
473 bh = bh->b_this_page;
474
475 } while (bh != head);
476
477 SetPagePrivate(newpage);
478
479 migrate_page_copy(newpage, page);
480
481 bh = head;
482 do {
483 unlock_buffer(bh);
484 put_bh(bh);
485 bh = bh->b_this_page;
486
487 } while (bh != head);
488
489 return 0;
490}
491EXPORT_SYMBOL(buffer_migrate_page);
David Howells93614012006-09-30 20:45:40 +0200492#endif
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700493
Christoph Lameter04e62a22006-06-23 02:03:38 -0700494/*
495 * Writeback a page to clean the dirty state
496 */
497static int writeout(struct address_space *mapping, struct page *page)
498{
499 struct writeback_control wbc = {
500 .sync_mode = WB_SYNC_NONE,
501 .nr_to_write = 1,
502 .range_start = 0,
503 .range_end = LLONG_MAX,
Christoph Lameter04e62a22006-06-23 02:03:38 -0700504 .for_reclaim = 1
505 };
506 int rc;
507
508 if (!mapping->a_ops->writepage)
509 /* No write method for the address space */
510 return -EINVAL;
511
512 if (!clear_page_dirty_for_io(page))
513 /* Someone else already triggered a write */
514 return -EAGAIN;
515
516 /*
517 * A dirty page may imply that the underlying filesystem has
518 * the page on some queue. So the page must be clean for
519 * migration. Writeout may mean we loose the lock and the
520 * page state is no longer what we checked for earlier.
521 * At this point we know that the migration attempt cannot
522 * be successful.
523 */
524 remove_migration_ptes(page, page);
525
526 rc = mapping->a_ops->writepage(page, &wbc);
Christoph Lameter04e62a22006-06-23 02:03:38 -0700527
528 if (rc != AOP_WRITEPAGE_ACTIVATE)
529 /* unlocked. Relock */
530 lock_page(page);
531
Hugh Dickinsbda85502008-11-19 15:36:36 -0800532 return (rc < 0) ? -EIO : -EAGAIN;
Christoph Lameter04e62a22006-06-23 02:03:38 -0700533}
534
535/*
536 * Default handling if a filesystem does not provide a migration function.
537 */
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700538static int fallback_migrate_page(struct address_space *mapping,
539 struct page *newpage, struct page *page)
540{
Christoph Lameter04e62a22006-06-23 02:03:38 -0700541 if (PageDirty(page))
542 return writeout(mapping, page);
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700543
544 /*
545 * Buffers may be managed in a filesystem specific way.
546 * We must have no buffers or drop them.
547 */
David Howells266cf652009-04-03 16:42:36 +0100548 if (page_has_private(page) &&
Christoph Lameter8351a6e2006-06-23 02:03:33 -0700549 !try_to_release_page(page, GFP_KERNEL))
550 return -EAGAIN;
551
552 return migrate_page(mapping, newpage, page);
553}
554
Christoph Lameter1d8b85c2006-06-23 02:03:28 -0700555/*
Christoph Lametere24f0b82006-06-23 02:03:51 -0700556 * Move a page to a newly allocated page
557 * The page is locked and all ptes have been successfully removed.
558 *
559 * The new page will have replaced the old page if this function
560 * is successful.
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700561 *
562 * Return value:
563 * < 0 - error code
564 * == 0 - success
Christoph Lametere24f0b82006-06-23 02:03:51 -0700565 */
Mel Gorman3fe20112010-05-24 14:32:20 -0700566static int move_to_new_page(struct page *newpage, struct page *page,
Andrea Arcangeli11bc82d2011-03-22 16:33:11 -0700567 int remap_swapcache, bool sync)
Christoph Lametere24f0b82006-06-23 02:03:51 -0700568{
569 struct address_space *mapping;
570 int rc;
571
572 /*
573 * Block others from accessing the page when we get around to
574 * establishing additional references. We are the only one
575 * holding a reference to the new page at this point.
576 */
Nick Piggin529ae9a2008-08-02 12:01:03 +0200577 if (!trylock_page(newpage))
Christoph Lametere24f0b82006-06-23 02:03:51 -0700578 BUG();
579
580 /* Prepare mapping for the new page.*/
581 newpage->index = page->index;
582 newpage->mapping = page->mapping;
Rik van Rielb2e18532008-10-18 20:26:30 -0700583 if (PageSwapBacked(page))
584 SetPageSwapBacked(newpage);
Christoph Lametere24f0b82006-06-23 02:03:51 -0700585
586 mapping = page_mapping(page);
587 if (!mapping)
588 rc = migrate_page(mapping, newpage, page);
Andrea Arcangeli11bc82d2011-03-22 16:33:11 -0700589 else {
Christoph Lametere24f0b82006-06-23 02:03:51 -0700590 /*
Andrea Arcangeli11bc82d2011-03-22 16:33:11 -0700591 * Do not writeback pages if !sync and migratepage is
592 * not pointing to migrate_page() which is nonblocking
593 * (swapcache/tmpfs uses migratepage = migrate_page).
Christoph Lametere24f0b82006-06-23 02:03:51 -0700594 */
Andrea Arcangeli11bc82d2011-03-22 16:33:11 -0700595 if (PageDirty(page) && !sync &&
596 mapping->a_ops->migratepage != migrate_page)
597 rc = -EBUSY;
598 else if (mapping->a_ops->migratepage)
599 /*
600 * Most pages have a mapping and most filesystems
601 * should provide a migration function. Anonymous
602 * pages are part of swap space which also has its
603 * own migration function. This is the most common
604 * path for page migration.
605 */
606 rc = mapping->a_ops->migratepage(mapping,
607 newpage, page);
608 else
609 rc = fallback_migrate_page(mapping, newpage, page);
610 }
Christoph Lametere24f0b82006-06-23 02:03:51 -0700611
Mel Gorman3fe20112010-05-24 14:32:20 -0700612 if (rc) {
Christoph Lametere24f0b82006-06-23 02:03:51 -0700613 newpage->mapping = NULL;
Mel Gorman3fe20112010-05-24 14:32:20 -0700614 } else {
615 if (remap_swapcache)
616 remove_migration_ptes(page, newpage);
617 }
Christoph Lametere24f0b82006-06-23 02:03:51 -0700618
619 unlock_page(newpage);
620
621 return rc;
622}
623
624/*
625 * Obtain the lock on page, remove all ptes and migrate the page
626 * to the newly allocated page in newpage.
627 */
Christoph Lameter95a402c2006-06-23 02:03:53 -0700628static int unmap_and_move(new_page_t get_new_page, unsigned long private,
Mel Gorman7f0f2492011-01-13 15:45:58 -0800629 struct page *page, int force, bool offlining, bool sync)
Christoph Lametere24f0b82006-06-23 02:03:51 -0700630{
631 int rc = 0;
Christoph Lameter742755a2006-06-23 02:03:55 -0700632 int *result = NULL;
633 struct page *newpage = get_new_page(page, private, &result);
Mel Gorman3fe20112010-05-24 14:32:20 -0700634 int remap_swapcache = 1;
KAMEZAWA Hiroyukiae41be32008-02-07 00:14:10 -0800635 int charge = 0;
KAMEZAWA Hiroyuki56039ef2011-03-23 16:42:19 -0700636 struct mem_cgroup *mem;
Mel Gorman3f6c8272010-05-24 14:32:17 -0700637 struct anon_vma *anon_vma = NULL;
Christoph Lameter95a402c2006-06-23 02:03:53 -0700638
639 if (!newpage)
640 return -ENOMEM;
Christoph Lametere24f0b82006-06-23 02:03:51 -0700641
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700642 if (page_count(page) == 1) {
Christoph Lametere24f0b82006-06-23 02:03:51 -0700643 /* page was freed from under us. So we are done. */
Christoph Lameter95a402c2006-06-23 02:03:53 -0700644 goto move_newpage;
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700645 }
Andrea Arcangeli500d65d2011-01-13 15:46:55 -0800646 if (unlikely(PageTransHuge(page)))
647 if (unlikely(split_huge_page(page)))
648 goto move_newpage;
Christoph Lametere24f0b82006-06-23 02:03:51 -0700649
KAMEZAWA Hiroyukie8589cc2008-07-25 01:47:10 -0700650 /* prepare cgroup just returns 0 or -ENOMEM */
Christoph Lametere24f0b82006-06-23 02:03:51 -0700651 rc = -EAGAIN;
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -0800652
Nick Piggin529ae9a2008-08-02 12:01:03 +0200653 if (!trylock_page(page)) {
Andrea Arcangeli11bc82d2011-03-22 16:33:11 -0700654 if (!force || !sync)
Christoph Lameter95a402c2006-06-23 02:03:53 -0700655 goto move_newpage;
Mel Gorman3e7d3442011-01-13 15:45:56 -0800656
657 /*
658 * It's not safe for direct compaction to call lock_page.
659 * For example, during page readahead pages are added locked
660 * to the LRU. Later, when the IO completes the pages are
661 * marked uptodate and unlocked. However, the queueing
662 * could be merging multiple pages for one bio (e.g.
663 * mpage_readpages). If an allocation happens for the
664 * second or third page, the process can end up locking
665 * the same page twice and deadlocking. Rather than
666 * trying to be clever about what pages can be locked,
667 * avoid the use of lock_page for direct compaction
668 * altogether.
669 */
670 if (current->flags & PF_MEMALLOC)
671 goto move_newpage;
672
Christoph Lametere24f0b82006-06-23 02:03:51 -0700673 lock_page(page);
674 }
675
Hugh Dickins62b61f62009-12-14 17:59:33 -0800676 /*
677 * Only memory hotplug's offline_pages() caller has locked out KSM,
678 * and can safely migrate a KSM page. The other cases have skipped
679 * PageKsm along with PageReserved - but it is only now when we have
680 * the page lock that we can be certain it will not go KSM beneath us
681 * (KSM will not upgrade a page from PageAnon to PageKsm when it sees
682 * its pagecount raised, but only here do we take the page lock which
683 * serializes that).
684 */
685 if (PageKsm(page) && !offlining) {
686 rc = -EBUSY;
687 goto unlock;
688 }
689
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -0800690 /* charge against new page */
Miklos Szeredief6a3c62011-03-22 16:30:52 -0700691 charge = mem_cgroup_prepare_migration(page, newpage, &mem, GFP_KERNEL);
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -0800692 if (charge == -ENOMEM) {
693 rc = -ENOMEM;
694 goto unlock;
695 }
696 BUG_ON(charge);
697
Christoph Lametere24f0b82006-06-23 02:03:51 -0700698 if (PageWriteback(page)) {
Andrea Arcangeli11bc82d2011-03-22 16:33:11 -0700699 /*
700 * For !sync, there is no point retrying as the retry loop
701 * is expected to be too short for PageWriteback to be cleared
702 */
703 if (!sync) {
704 rc = -EBUSY;
705 goto uncharge;
706 }
707 if (!force)
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -0800708 goto uncharge;
Christoph Lametere24f0b82006-06-23 02:03:51 -0700709 wait_on_page_writeback(page);
710 }
Christoph Lametere24f0b82006-06-23 02:03:51 -0700711 /*
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700712 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
713 * we cannot notice that anon_vma is freed while we migrates a page.
Hugh Dickins1ce82b62011-01-13 15:47:30 -0800714 * This get_anon_vma() delays freeing anon_vma pointer until the end
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700715 * of migration. File cache pages are no problem because of page_lock()
KAMEZAWA Hiroyuki989f89c2007-08-30 23:56:21 -0700716 * File Caches may use write_page() or lock_page() in migration, then,
717 * just care Anon page here.
Christoph Lametere24f0b82006-06-23 02:03:51 -0700718 */
KAMEZAWA Hiroyuki989f89c2007-08-30 23:56:21 -0700719 if (PageAnon(page)) {
Hugh Dickins1ce82b62011-01-13 15:47:30 -0800720 /*
721 * Only page_lock_anon_vma() understands the subtleties of
722 * getting a hold on an anon_vma from outside one of its mms.
723 */
724 anon_vma = page_lock_anon_vma(page);
725 if (anon_vma) {
726 /*
727 * Take a reference count on the anon_vma if the
728 * page is mapped so that it is guaranteed to
729 * exist when the page is remapped later
730 */
731 get_anon_vma(anon_vma);
732 page_unlock_anon_vma(anon_vma);
733 } else if (PageSwapCache(page)) {
Mel Gorman3fe20112010-05-24 14:32:20 -0700734 /*
735 * We cannot be sure that the anon_vma of an unmapped
736 * swapcache page is safe to use because we don't
737 * know in advance if the VMA that this page belonged
738 * to still exists. If the VMA and others sharing the
739 * data have been freed, then the anon_vma could
740 * already be invalid.
741 *
742 * To avoid this possibility, swapcache pages get
743 * migrated but are not remapped when migration
744 * completes
745 */
746 remap_swapcache = 0;
747 } else {
Hugh Dickins1ce82b62011-01-13 15:47:30 -0800748 goto uncharge;
Mel Gorman3fe20112010-05-24 14:32:20 -0700749 }
KAMEZAWA Hiroyuki989f89c2007-08-30 23:56:21 -0700750 }
Shaohua Li62e1c552008-02-04 22:29:33 -0800751
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700752 /*
Shaohua Li62e1c552008-02-04 22:29:33 -0800753 * Corner case handling:
754 * 1. When a new swap-cache page is read into, it is added to the LRU
755 * and treated as swapcache but it has no rmap yet.
756 * Calling try_to_unmap() against a page->mapping==NULL page will
757 * trigger a BUG. So handle it here.
758 * 2. An orphaned page (see truncate_complete_page) might have
759 * fs-private metadata. The page can be picked up due to memory
760 * offlining. Everywhere else except page reclaim, the page is
761 * invisible to the vm, so the page can not be migrated. So try to
762 * free the metadata, so the page can be freed.
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700763 */
Shaohua Li62e1c552008-02-04 22:29:33 -0800764 if (!page->mapping) {
Hugh Dickins1ce82b62011-01-13 15:47:30 -0800765 VM_BUG_ON(PageAnon(page));
766 if (page_has_private(page)) {
Shaohua Li62e1c552008-02-04 22:29:33 -0800767 try_to_free_buffers(page);
Hugh Dickins1ce82b62011-01-13 15:47:30 -0800768 goto uncharge;
Shaohua Li62e1c552008-02-04 22:29:33 -0800769 }
Shaohua Liabfc3482009-09-21 17:01:19 -0700770 goto skip_unmap;
Shaohua Li62e1c552008-02-04 22:29:33 -0800771 }
772
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700773 /* Establish migration ptes or remove ptes */
Andi Kleen14fa31b2009-09-16 11:50:10 +0200774 try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
KAMEZAWA Hiroyukidc386d42007-07-26 10:41:07 -0700775
Shaohua Liabfc3482009-09-21 17:01:19 -0700776skip_unmap:
Christoph Lametere6a15302006-06-25 05:46:49 -0700777 if (!page_mapped(page))
Andrea Arcangeli11bc82d2011-03-22 16:33:11 -0700778 rc = move_to_new_page(newpage, page, remap_swapcache, sync);
Christoph Lametere24f0b82006-06-23 02:03:51 -0700779
Mel Gorman3fe20112010-05-24 14:32:20 -0700780 if (rc && remap_swapcache)
Christoph Lametere24f0b82006-06-23 02:03:51 -0700781 remove_migration_ptes(page, page);
Mel Gorman3f6c8272010-05-24 14:32:17 -0700782
783 /* Drop an anon_vma reference if we took one */
Rik van Riel76545062010-08-09 17:18:41 -0700784 if (anon_vma)
Peter Zijlstra9e601092011-03-22 16:32:46 -0700785 put_anon_vma(anon_vma);
Mel Gorman3f6c8272010-05-24 14:32:17 -0700786
KAMEZAWA Hiroyuki01b1ae62009-01-07 18:07:50 -0800787uncharge:
788 if (!charge)
Daisuke Nishimura50de1dd2011-01-13 15:47:43 -0800789 mem_cgroup_end_migration(mem, page, newpage, rc == 0);
Christoph Lametere24f0b82006-06-23 02:03:51 -0700790unlock:
791 unlock_page(page);
Christoph Lameter95a402c2006-06-23 02:03:53 -0700792
Andrea Arcangeli57fc4a52011-02-01 15:52:32 -0800793move_newpage:
Christoph Lametere24f0b82006-06-23 02:03:51 -0700794 if (rc != -EAGAIN) {
Christoph Lameteraaa994b2006-06-23 02:03:52 -0700795 /*
796 * A page that has been migrated has all references
797 * removed and will be freed. A page that has not been
798 * migrated will have kepts its references and be
799 * restored.
800 */
801 list_del(&page->lru);
KOSAKI Motohiroa7312862009-09-21 17:01:37 -0700802 dec_zone_page_state(page, NR_ISOLATED_ANON +
Johannes Weiner6c0b1352009-09-21 17:02:59 -0700803 page_is_file_cache(page));
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700804 putback_lru_page(page);
Christoph Lametere24f0b82006-06-23 02:03:51 -0700805 }
Christoph Lameter95a402c2006-06-23 02:03:53 -0700806
Christoph Lameter95a402c2006-06-23 02:03:53 -0700807 /*
808 * Move the new page to the LRU. If migration was not successful
809 * then this will free the page.
810 */
Lee Schermerhorn894bc312008-10-18 20:26:39 -0700811 putback_lru_page(newpage);
812
Christoph Lameter742755a2006-06-23 02:03:55 -0700813 if (result) {
814 if (rc)
815 *result = rc;
816 else
817 *result = page_to_nid(newpage);
818 }
Christoph Lametere24f0b82006-06-23 02:03:51 -0700819 return rc;
820}
821
822/*
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900823 * Counterpart of unmap_and_move_page() for hugepage migration.
824 *
825 * This function doesn't wait the completion of hugepage I/O
826 * because there is no race between I/O and migration for hugepage.
827 * Note that currently hugepage I/O occurs only in direct I/O
828 * where no lock is held and PG_writeback is irrelevant,
829 * and writeback status of all subpages are counted in the reference
830 * count of the head page (i.e. if all subpages of a 2MB hugepage are
831 * under direct I/O, the reference of the head page is 512 and a bit more.)
832 * This means that when we try to migrate hugepage whose subpages are
833 * doing direct I/O, some references remain after try_to_unmap() and
834 * hugepage migration fails without data corruption.
835 *
836 * There is also no race when direct I/O is issued on the page under migration,
837 * because then pte is replaced with migration swap entry and direct I/O code
838 * will wait in the page fault for migration to complete.
839 */
840static int unmap_and_move_huge_page(new_page_t get_new_page,
841 unsigned long private, struct page *hpage,
Mel Gorman7f0f2492011-01-13 15:45:58 -0800842 int force, bool offlining, bool sync)
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900843{
844 int rc = 0;
845 int *result = NULL;
846 struct page *new_hpage = get_new_page(hpage, private, &result);
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900847 struct anon_vma *anon_vma = NULL;
848
849 if (!new_hpage)
850 return -ENOMEM;
851
852 rc = -EAGAIN;
853
854 if (!trylock_page(hpage)) {
Mel Gorman77f1fe62011-01-13 15:45:57 -0800855 if (!force || !sync)
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900856 goto out;
857 lock_page(hpage);
858 }
859
860 if (PageAnon(hpage)) {
Hugh Dickinsfd4a4662011-01-13 15:47:31 -0800861 anon_vma = page_lock_anon_vma(hpage);
862 if (anon_vma) {
863 get_anon_vma(anon_vma);
864 page_unlock_anon_vma(anon_vma);
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900865 }
866 }
867
868 try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
869
870 if (!page_mapped(hpage))
Andrea Arcangeli11bc82d2011-03-22 16:33:11 -0700871 rc = move_to_new_page(new_hpage, hpage, 1, sync);
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900872
873 if (rc)
874 remove_migration_ptes(hpage, hpage);
875
Hugh Dickinsfd4a4662011-01-13 15:47:31 -0800876 if (anon_vma)
Peter Zijlstra9e601092011-03-22 16:32:46 -0700877 put_anon_vma(anon_vma);
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900878out:
879 unlock_page(hpage);
880
881 if (rc != -EAGAIN) {
882 list_del(&hpage->lru);
883 put_page(hpage);
884 }
885
886 put_page(new_hpage);
887
888 if (result) {
889 if (rc)
890 *result = rc;
891 else
892 *result = page_to_nid(new_hpage);
893 }
894 return rc;
895}
896
897/*
Christoph Lameterb20a3502006-03-22 00:09:12 -0800898 * migrate_pages
899 *
Christoph Lameter95a402c2006-06-23 02:03:53 -0700900 * The function takes one list of pages to migrate and a function
901 * that determines from the page to be migrated and the private data
902 * the target of the move and allocates the page.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800903 *
904 * The function returns after 10 attempts or if no pages
905 * are movable anymore because to has become empty
Minchan Kimcf608ac2010-10-26 14:21:29 -0700906 * or no retryable pages exist anymore.
907 * Caller should call putback_lru_pages to return pages to the LRU
Minchan Kim28bd6572011-01-25 15:07:26 -0800908 * or free list only if ret != 0.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800909 *
Christoph Lameter95a402c2006-06-23 02:03:53 -0700910 * Return: Number of pages not migrated or error code.
Christoph Lameterb20a3502006-03-22 00:09:12 -0800911 */
Christoph Lameter95a402c2006-06-23 02:03:53 -0700912int migrate_pages(struct list_head *from,
Mel Gorman7f0f2492011-01-13 15:45:58 -0800913 new_page_t get_new_page, unsigned long private, bool offlining,
Mel Gorman77f1fe62011-01-13 15:45:57 -0800914 bool sync)
Christoph Lameterb20a3502006-03-22 00:09:12 -0800915{
Christoph Lametere24f0b82006-06-23 02:03:51 -0700916 int retry = 1;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800917 int nr_failed = 0;
918 int pass = 0;
919 struct page *page;
920 struct page *page2;
921 int swapwrite = current->flags & PF_SWAPWRITE;
922 int rc;
923
924 if (!swapwrite)
925 current->flags |= PF_SWAPWRITE;
926
Christoph Lametere24f0b82006-06-23 02:03:51 -0700927 for(pass = 0; pass < 10 && retry; pass++) {
928 retry = 0;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800929
Christoph Lametere24f0b82006-06-23 02:03:51 -0700930 list_for_each_entry_safe(page, page2, from, lru) {
Christoph Lametere24f0b82006-06-23 02:03:51 -0700931 cond_resched();
Christoph Lameterb20a3502006-03-22 00:09:12 -0800932
Christoph Lameter95a402c2006-06-23 02:03:53 -0700933 rc = unmap_and_move(get_new_page, private,
Mel Gorman77f1fe62011-01-13 15:45:57 -0800934 page, pass > 2, offlining,
935 sync);
Christoph Lameterb20a3502006-03-22 00:09:12 -0800936
Christoph Lametere24f0b82006-06-23 02:03:51 -0700937 switch(rc) {
Christoph Lameter95a402c2006-06-23 02:03:53 -0700938 case -ENOMEM:
939 goto out;
Christoph Lametere24f0b82006-06-23 02:03:51 -0700940 case -EAGAIN:
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700941 retry++;
Christoph Lametere24f0b82006-06-23 02:03:51 -0700942 break;
943 case 0:
Christoph Lametere24f0b82006-06-23 02:03:51 -0700944 break;
945 default:
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700946 /* Permanent failure */
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700947 nr_failed++;
Christoph Lametere24f0b82006-06-23 02:03:51 -0700948 break;
Christoph Lameter2d1db3b2006-06-23 02:03:33 -0700949 }
Christoph Lameterb20a3502006-03-22 00:09:12 -0800950 }
951 }
Christoph Lameter95a402c2006-06-23 02:03:53 -0700952 rc = 0;
953out:
Christoph Lameterb20a3502006-03-22 00:09:12 -0800954 if (!swapwrite)
955 current->flags &= ~PF_SWAPWRITE;
956
Christoph Lameter95a402c2006-06-23 02:03:53 -0700957 if (rc)
958 return rc;
959
Christoph Lameterb20a3502006-03-22 00:09:12 -0800960 return nr_failed + retry;
961}
962
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900963int migrate_huge_pages(struct list_head *from,
Mel Gorman7f0f2492011-01-13 15:45:58 -0800964 new_page_t get_new_page, unsigned long private, bool offlining,
Mel Gorman77f1fe62011-01-13 15:45:57 -0800965 bool sync)
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900966{
967 int retry = 1;
968 int nr_failed = 0;
969 int pass = 0;
970 struct page *page;
971 struct page *page2;
972 int rc;
973
974 for (pass = 0; pass < 10 && retry; pass++) {
975 retry = 0;
976
977 list_for_each_entry_safe(page, page2, from, lru) {
978 cond_resched();
979
980 rc = unmap_and_move_huge_page(get_new_page,
Mel Gorman77f1fe62011-01-13 15:45:57 -0800981 private, page, pass > 2, offlining,
982 sync);
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900983
984 switch(rc) {
985 case -ENOMEM:
986 goto out;
987 case -EAGAIN:
988 retry++;
989 break;
990 case 0:
991 break;
992 default:
993 /* Permanent failure */
994 nr_failed++;
995 break;
996 }
997 }
998 }
999 rc = 0;
1000out:
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001001 if (rc)
1002 return rc;
1003
1004 return nr_failed + retry;
1005}
1006
Christoph Lameter742755a2006-06-23 02:03:55 -07001007#ifdef CONFIG_NUMA
1008/*
1009 * Move a list of individual pages
1010 */
1011struct page_to_node {
1012 unsigned long addr;
1013 struct page *page;
1014 int node;
1015 int status;
1016};
1017
1018static struct page *new_page_node(struct page *p, unsigned long private,
1019 int **result)
1020{
1021 struct page_to_node *pm = (struct page_to_node *)private;
1022
1023 while (pm->node != MAX_NUMNODES && pm->page != p)
1024 pm++;
1025
1026 if (pm->node == MAX_NUMNODES)
1027 return NULL;
1028
1029 *result = &pm->status;
1030
Mel Gorman6484eb32009-06-16 15:31:54 -07001031 return alloc_pages_exact_node(pm->node,
Mel Gorman769848c2007-07-17 04:03:05 -07001032 GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
Christoph Lameter742755a2006-06-23 02:03:55 -07001033}
1034
1035/*
1036 * Move a set of pages as indicated in the pm array. The addr
1037 * field must be set to the virtual address of the page to be moved
1038 * and the node number must contain a valid target node.
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001039 * The pm array ends with node = MAX_NUMNODES.
Christoph Lameter742755a2006-06-23 02:03:55 -07001040 */
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001041static int do_move_page_to_node_array(struct mm_struct *mm,
1042 struct page_to_node *pm,
1043 int migrate_all)
Christoph Lameter742755a2006-06-23 02:03:55 -07001044{
1045 int err;
1046 struct page_to_node *pp;
1047 LIST_HEAD(pagelist);
1048
1049 down_read(&mm->mmap_sem);
1050
1051 /*
1052 * Build a list of pages to migrate
1053 */
Christoph Lameter742755a2006-06-23 02:03:55 -07001054 for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
1055 struct vm_area_struct *vma;
1056 struct page *page;
1057
Christoph Lameter742755a2006-06-23 02:03:55 -07001058 err = -EFAULT;
1059 vma = find_vma(mm, pp->addr);
Gleb Natapov70384dc2010-10-26 14:22:07 -07001060 if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
Christoph Lameter742755a2006-06-23 02:03:55 -07001061 goto set_status;
1062
Andrea Arcangeli500d65d2011-01-13 15:46:55 -08001063 page = follow_page(vma, pp->addr, FOLL_GET|FOLL_SPLIT);
Linus Torvalds89f5b7d2008-06-20 11:18:25 -07001064
1065 err = PTR_ERR(page);
1066 if (IS_ERR(page))
1067 goto set_status;
1068
Christoph Lameter742755a2006-06-23 02:03:55 -07001069 err = -ENOENT;
1070 if (!page)
1071 goto set_status;
1072
Hugh Dickins62b61f62009-12-14 17:59:33 -08001073 /* Use PageReserved to check for zero page */
1074 if (PageReserved(page) || PageKsm(page))
Christoph Lameter742755a2006-06-23 02:03:55 -07001075 goto put_and_set;
1076
1077 pp->page = page;
1078 err = page_to_nid(page);
1079
1080 if (err == pp->node)
1081 /*
1082 * Node already in the right place
1083 */
1084 goto put_and_set;
1085
1086 err = -EACCES;
1087 if (page_mapcount(page) > 1 &&
1088 !migrate_all)
1089 goto put_and_set;
1090
Nick Piggin62695a82008-10-18 20:26:09 -07001091 err = isolate_lru_page(page);
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -08001092 if (!err) {
Nick Piggin62695a82008-10-18 20:26:09 -07001093 list_add_tail(&page->lru, &pagelist);
KOSAKI Motohiro6d9c2852009-12-14 17:58:11 -08001094 inc_zone_page_state(page, NR_ISOLATED_ANON +
1095 page_is_file_cache(page));
1096 }
Christoph Lameter742755a2006-06-23 02:03:55 -07001097put_and_set:
1098 /*
1099 * Either remove the duplicate refcount from
1100 * isolate_lru_page() or drop the page ref if it was
1101 * not isolated.
1102 */
1103 put_page(page);
1104set_status:
1105 pp->status = err;
1106 }
1107
Brice Gogline78bbfa2008-10-18 20:27:15 -07001108 err = 0;
Minchan Kimcf608ac2010-10-26 14:21:29 -07001109 if (!list_empty(&pagelist)) {
Christoph Lameter742755a2006-06-23 02:03:55 -07001110 err = migrate_pages(&pagelist, new_page_node,
Mel Gorman77f1fe62011-01-13 15:45:57 -08001111 (unsigned long)pm, 0, true);
Minchan Kimcf608ac2010-10-26 14:21:29 -07001112 if (err)
1113 putback_lru_pages(&pagelist);
1114 }
Christoph Lameter742755a2006-06-23 02:03:55 -07001115
1116 up_read(&mm->mmap_sem);
1117 return err;
1118}
1119
1120/*
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001121 * Migrate an array of page address onto an array of nodes and fill
1122 * the corresponding array of status.
1123 */
1124static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
1125 unsigned long nr_pages,
1126 const void __user * __user *pages,
1127 const int __user *nodes,
1128 int __user *status, int flags)
1129{
Brice Goglin3140a222009-01-06 14:38:57 -08001130 struct page_to_node *pm;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001131 nodemask_t task_nodes;
Brice Goglin3140a222009-01-06 14:38:57 -08001132 unsigned long chunk_nr_pages;
1133 unsigned long chunk_start;
1134 int err;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001135
1136 task_nodes = cpuset_mems_allowed(task);
1137
Brice Goglin3140a222009-01-06 14:38:57 -08001138 err = -ENOMEM;
1139 pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
1140 if (!pm)
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001141 goto out;
Brice Goglin35282a22009-06-16 15:32:43 -07001142
1143 migrate_prep();
1144
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001145 /*
Brice Goglin3140a222009-01-06 14:38:57 -08001146 * Store a chunk of page_to_node array in a page,
1147 * but keep the last one as a marker
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001148 */
Brice Goglin3140a222009-01-06 14:38:57 -08001149 chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001150
Brice Goglin3140a222009-01-06 14:38:57 -08001151 for (chunk_start = 0;
1152 chunk_start < nr_pages;
1153 chunk_start += chunk_nr_pages) {
1154 int j;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001155
Brice Goglin3140a222009-01-06 14:38:57 -08001156 if (chunk_start + chunk_nr_pages > nr_pages)
1157 chunk_nr_pages = nr_pages - chunk_start;
1158
1159 /* fill the chunk pm with addrs and nodes from user-space */
1160 for (j = 0; j < chunk_nr_pages; j++) {
1161 const void __user *p;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001162 int node;
1163
Brice Goglin3140a222009-01-06 14:38:57 -08001164 err = -EFAULT;
1165 if (get_user(p, pages + j + chunk_start))
1166 goto out_pm;
1167 pm[j].addr = (unsigned long) p;
1168
1169 if (get_user(node, nodes + j + chunk_start))
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001170 goto out_pm;
1171
1172 err = -ENODEV;
Linus Torvalds6f5a55f2010-02-05 16:16:50 -08001173 if (node < 0 || node >= MAX_NUMNODES)
1174 goto out_pm;
1175
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001176 if (!node_state(node, N_HIGH_MEMORY))
1177 goto out_pm;
1178
1179 err = -EACCES;
1180 if (!node_isset(node, task_nodes))
1181 goto out_pm;
1182
Brice Goglin3140a222009-01-06 14:38:57 -08001183 pm[j].node = node;
1184 }
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001185
Brice Goglin3140a222009-01-06 14:38:57 -08001186 /* End marker for this chunk */
1187 pm[chunk_nr_pages].node = MAX_NUMNODES;
1188
1189 /* Migrate this chunk */
1190 err = do_move_page_to_node_array(mm, pm,
1191 flags & MPOL_MF_MOVE_ALL);
1192 if (err < 0)
1193 goto out_pm;
1194
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001195 /* Return status information */
Brice Goglin3140a222009-01-06 14:38:57 -08001196 for (j = 0; j < chunk_nr_pages; j++)
1197 if (put_user(pm[j].status, status + j + chunk_start)) {
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001198 err = -EFAULT;
Brice Goglin3140a222009-01-06 14:38:57 -08001199 goto out_pm;
1200 }
1201 }
1202 err = 0;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001203
1204out_pm:
Brice Goglin3140a222009-01-06 14:38:57 -08001205 free_page((unsigned long)pm);
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001206out:
1207 return err;
1208}
1209
1210/*
Brice Goglin2f007e72008-10-18 20:27:16 -07001211 * Determine the nodes of an array of pages and store it in an array of status.
Christoph Lameter742755a2006-06-23 02:03:55 -07001212 */
Brice Goglin80bba122008-12-09 13:14:23 -08001213static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1214 const void __user **pages, int *status)
Christoph Lameter742755a2006-06-23 02:03:55 -07001215{
Brice Goglin2f007e72008-10-18 20:27:16 -07001216 unsigned long i;
Brice Goglin2f007e72008-10-18 20:27:16 -07001217
Christoph Lameter742755a2006-06-23 02:03:55 -07001218 down_read(&mm->mmap_sem);
1219
Brice Goglin2f007e72008-10-18 20:27:16 -07001220 for (i = 0; i < nr_pages; i++) {
Brice Goglin80bba122008-12-09 13:14:23 -08001221 unsigned long addr = (unsigned long)(*pages);
Christoph Lameter742755a2006-06-23 02:03:55 -07001222 struct vm_area_struct *vma;
1223 struct page *page;
KOSAKI Motohiroc095adb2008-12-16 16:06:43 +09001224 int err = -EFAULT;
Brice Goglin2f007e72008-10-18 20:27:16 -07001225
1226 vma = find_vma(mm, addr);
Gleb Natapov70384dc2010-10-26 14:22:07 -07001227 if (!vma || addr < vma->vm_start)
Christoph Lameter742755a2006-06-23 02:03:55 -07001228 goto set_status;
1229
Brice Goglin2f007e72008-10-18 20:27:16 -07001230 page = follow_page(vma, addr, 0);
Linus Torvalds89f5b7d2008-06-20 11:18:25 -07001231
1232 err = PTR_ERR(page);
1233 if (IS_ERR(page))
1234 goto set_status;
1235
Christoph Lameter742755a2006-06-23 02:03:55 -07001236 err = -ENOENT;
1237 /* Use PageReserved to check for zero page */
Hugh Dickins62b61f62009-12-14 17:59:33 -08001238 if (!page || PageReserved(page) || PageKsm(page))
Christoph Lameter742755a2006-06-23 02:03:55 -07001239 goto set_status;
1240
1241 err = page_to_nid(page);
1242set_status:
Brice Goglin80bba122008-12-09 13:14:23 -08001243 *status = err;
1244
1245 pages++;
1246 status++;
1247 }
1248
1249 up_read(&mm->mmap_sem);
1250}
1251
1252/*
1253 * Determine the nodes of a user array of pages and store it in
1254 * a user array of status.
1255 */
1256static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1257 const void __user * __user *pages,
1258 int __user *status)
1259{
1260#define DO_PAGES_STAT_CHUNK_NR 16
1261 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1262 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
Brice Goglin80bba122008-12-09 13:14:23 -08001263
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001264 while (nr_pages) {
1265 unsigned long chunk_nr;
Brice Goglin80bba122008-12-09 13:14:23 -08001266
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001267 chunk_nr = nr_pages;
1268 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1269 chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1270
1271 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1272 break;
Brice Goglin80bba122008-12-09 13:14:23 -08001273
1274 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1275
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001276 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1277 break;
Christoph Lameter742755a2006-06-23 02:03:55 -07001278
H. Peter Anvin87b8d1a2010-02-18 16:13:40 -08001279 pages += chunk_nr;
1280 status += chunk_nr;
1281 nr_pages -= chunk_nr;
1282 }
1283 return nr_pages ? -EFAULT : 0;
Christoph Lameter742755a2006-06-23 02:03:55 -07001284}
1285
1286/*
1287 * Move a list of pages in the address space of the currently executing
1288 * process.
1289 */
Heiko Carstens938bb9f2009-01-14 14:14:30 +01001290SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1291 const void __user * __user *, pages,
1292 const int __user *, nodes,
1293 int __user *, status, int, flags)
Christoph Lameter742755a2006-06-23 02:03:55 -07001294{
David Howellsc69e8d92008-11-14 10:39:19 +11001295 const struct cred *cred = current_cred(), *tcred;
Christoph Lameter742755a2006-06-23 02:03:55 -07001296 struct task_struct *task;
Christoph Lameter742755a2006-06-23 02:03:55 -07001297 struct mm_struct *mm;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001298 int err;
Christoph Lameter742755a2006-06-23 02:03:55 -07001299
1300 /* Check flags */
1301 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1302 return -EINVAL;
1303
1304 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1305 return -EPERM;
1306
1307 /* Find the mm_struct */
Greg Thelena879bf52011-02-25 14:44:13 -08001308 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001309 task = pid ? find_task_by_vpid(pid) : current;
Christoph Lameter742755a2006-06-23 02:03:55 -07001310 if (!task) {
Greg Thelena879bf52011-02-25 14:44:13 -08001311 rcu_read_unlock();
Christoph Lameter742755a2006-06-23 02:03:55 -07001312 return -ESRCH;
1313 }
1314 mm = get_task_mm(task);
Greg Thelena879bf52011-02-25 14:44:13 -08001315 rcu_read_unlock();
Christoph Lameter742755a2006-06-23 02:03:55 -07001316
1317 if (!mm)
1318 return -EINVAL;
1319
1320 /*
1321 * Check if this process has the right to modify the specified
1322 * process. The right exists if the process has administrative
1323 * capabilities, superuser privileges or the same
1324 * userid as the target process.
1325 */
David Howellsc69e8d92008-11-14 10:39:19 +11001326 rcu_read_lock();
1327 tcred = __task_cred(task);
David Howellsb6dff3e2008-11-14 10:39:16 +11001328 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
1329 cred->uid != tcred->suid && cred->uid != tcred->uid &&
Christoph Lameter742755a2006-06-23 02:03:55 -07001330 !capable(CAP_SYS_NICE)) {
David Howellsc69e8d92008-11-14 10:39:19 +11001331 rcu_read_unlock();
Christoph Lameter742755a2006-06-23 02:03:55 -07001332 err = -EPERM;
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001333 goto out;
Christoph Lameter742755a2006-06-23 02:03:55 -07001334 }
David Howellsc69e8d92008-11-14 10:39:19 +11001335 rcu_read_unlock();
Christoph Lameter742755a2006-06-23 02:03:55 -07001336
David Quigley86c3a762006-06-23 02:04:02 -07001337 err = security_task_movememory(task);
1338 if (err)
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001339 goto out;
David Quigley86c3a762006-06-23 02:04:02 -07001340
Brice Goglin5e9a0f02008-10-18 20:27:17 -07001341 if (nodes) {
1342 err = do_pages_move(mm, task, nr_pages, pages, nodes, status,
1343 flags);
1344 } else {
Brice Goglin2f007e72008-10-18 20:27:16 -07001345 err = do_pages_stat(mm, nr_pages, pages, status);
Brice Goglin2f007e72008-10-18 20:27:16 -07001346 }
David Quigley86c3a762006-06-23 02:04:02 -07001347
Christoph Lameter742755a2006-06-23 02:03:55 -07001348out:
Christoph Lameter742755a2006-06-23 02:03:55 -07001349 mmput(mm);
1350 return err;
1351}
Christoph Lameter742755a2006-06-23 02:03:55 -07001352
Christoph Lameter7b2259b2006-06-25 05:46:48 -07001353/*
1354 * Call migration functions in the vma_ops that may prepare
1355 * memory in a vm for migration. migration functions may perform
1356 * the migration for vmas that do not have an underlying page struct.
1357 */
1358int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
1359 const nodemask_t *from, unsigned long flags)
1360{
1361 struct vm_area_struct *vma;
1362 int err = 0;
1363
Daisuke Nishimura1001c9f2009-02-11 13:04:18 -08001364 for (vma = mm->mmap; vma && !err; vma = vma->vm_next) {
Christoph Lameter7b2259b2006-06-25 05:46:48 -07001365 if (vma->vm_ops && vma->vm_ops->migrate) {
1366 err = vma->vm_ops->migrate(vma, to, from, flags);
1367 if (err)
1368 break;
1369 }
1370 }
1371 return err;
1372}
Gerald Schaefer83d16742008-07-23 21:28:22 -07001373#endif