blob: 20b95db63da5285a728911fd9375a5459b6c24e9 [file] [log] [blame]
Christoph Lameterb20a3502006-03-22 00:09:12 -08001/*
2 * Memory Migration functionality - linux/mm/migration.c
3 *
4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
5 *
6 * Page migration was first developed in the context of the memory hotplug
7 * project. The main authors of the migration code are:
8 *
9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10 * Hirokazu Takahashi <taka@valinux.co.jp>
11 * Dave Hansen <haveblue@us.ibm.com>
12 * Christoph Lameter <clameter@sgi.com>
13 */
14
15#include <linux/migrate.h>
16#include <linux/module.h>
17#include <linux/swap.h>
18#include <linux/pagemap.h>
Christoph Lametere23ca002006-04-10 22:52:57 -070019#include <linux/buffer_head.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080020#include <linux/mm_inline.h>
21#include <linux/pagevec.h>
22#include <linux/rmap.h>
23#include <linux/topology.h>
24#include <linux/cpu.h>
25#include <linux/cpuset.h>
26#include <linux/swapops.h>
27
28#include "internal.h"
29
Christoph Lameterb20a3502006-03-22 00:09:12 -080030/* The maximum number of pages to take off the LRU for migration */
31#define MIGRATE_CHUNK_SIZE 256
32
33#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
34
35/*
36 * Isolate one page from the LRU lists. If successful put it onto
37 * the indicated list with elevated page count.
38 *
39 * Result:
40 * -EBUSY: page not on LRU list
41 * 0: page removed from LRU list and added to the specified list.
42 */
43int isolate_lru_page(struct page *page, struct list_head *pagelist)
44{
45 int ret = -EBUSY;
46
47 if (PageLRU(page)) {
48 struct zone *zone = page_zone(page);
49
50 spin_lock_irq(&zone->lru_lock);
51 if (PageLRU(page)) {
52 ret = 0;
53 get_page(page);
54 ClearPageLRU(page);
55 if (PageActive(page))
56 del_page_from_active_list(zone, page);
57 else
58 del_page_from_inactive_list(zone, page);
59 list_add_tail(&page->lru, pagelist);
60 }
61 spin_unlock_irq(&zone->lru_lock);
62 }
63 return ret;
64}
65
66/*
67 * migrate_prep() needs to be called after we have compiled the list of pages
68 * to be migrated using isolate_lru_page() but before we begin a series of calls
69 * to migrate_pages().
70 */
71int migrate_prep(void)
72{
73 /* Must have swap device for migration */
74 if (nr_swap_pages <= 0)
75 return -ENODEV;
76
77 /*
78 * Clear the LRU lists so pages can be isolated.
79 * Note that pages may be moved off the LRU after we have
80 * drained them. Those pages will fail to migrate like other
81 * pages that may be busy.
82 */
83 lru_add_drain_all();
84
85 return 0;
86}
87
88static inline void move_to_lru(struct page *page)
89{
90 list_del(&page->lru);
91 if (PageActive(page)) {
92 /*
93 * lru_cache_add_active checks that
94 * the PG_active bit is off.
95 */
96 ClearPageActive(page);
97 lru_cache_add_active(page);
98 } else {
99 lru_cache_add(page);
100 }
101 put_page(page);
102}
103
104/*
105 * Add isolated pages on the list back to the LRU.
106 *
107 * returns the number of pages put back.
108 */
109int putback_lru_pages(struct list_head *l)
110{
111 struct page *page;
112 struct page *page2;
113 int count = 0;
114
115 list_for_each_entry_safe(page, page2, l, lru) {
116 move_to_lru(page);
117 count++;
118 }
119 return count;
120}
121
122/*
123 * Non migratable page
124 */
125int fail_migrate_page(struct page *newpage, struct page *page)
126{
127 return -EIO;
128}
129EXPORT_SYMBOL(fail_migrate_page);
130
131/*
132 * swapout a single page
133 * page is locked upon entry, unlocked on exit
134 */
135static int swap_page(struct page *page)
136{
137 struct address_space *mapping = page_mapping(page);
138
139 if (page_mapped(page) && mapping)
140 if (try_to_unmap(page, 1) != SWAP_SUCCESS)
141 goto unlock_retry;
142
143 if (PageDirty(page)) {
144 /* Page is dirty, try to write it out here */
145 switch(pageout(page, mapping)) {
146 case PAGE_KEEP:
147 case PAGE_ACTIVATE:
148 goto unlock_retry;
149
150 case PAGE_SUCCESS:
151 goto retry;
152
153 case PAGE_CLEAN:
154 ; /* try to free the page below */
155 }
156 }
157
158 if (PagePrivate(page)) {
159 if (!try_to_release_page(page, GFP_KERNEL) ||
160 (!mapping && page_count(page) == 1))
161 goto unlock_retry;
162 }
163
164 if (remove_mapping(mapping, page)) {
165 /* Success */
166 unlock_page(page);
167 return 0;
168 }
169
170unlock_retry:
171 unlock_page(page);
172
173retry:
174 return -EAGAIN;
175}
176EXPORT_SYMBOL(swap_page);
177
178/*
179 * Remove references for a page and establish the new page with the correct
180 * basic settings to be able to stop accesses to the page.
181 */
182int migrate_page_remove_references(struct page *newpage,
183 struct page *page, int nr_refs)
184{
185 struct address_space *mapping = page_mapping(page);
186 struct page **radix_pointer;
187
188 /*
189 * Avoid doing any of the following work if the page count
190 * indicates that the page is in use or truncate has removed
191 * the page.
192 */
193 if (!mapping || page_mapcount(page) + nr_refs != page_count(page))
194 return -EAGAIN;
195
196 /*
197 * Establish swap ptes for anonymous pages or destroy pte
198 * maps for files.
199 *
200 * In order to reestablish file backed mappings the fault handlers
201 * will take the radix tree_lock which may then be used to stop
202 * processses from accessing this page until the new page is ready.
203 *
204 * A process accessing via a swap pte (an anonymous page) will take a
205 * page_lock on the old page which will block the process until the
206 * migration attempt is complete. At that time the PageSwapCache bit
207 * will be examined. If the page was migrated then the PageSwapCache
208 * bit will be clear and the operation to retrieve the page will be
209 * retried which will find the new page in the radix tree. Then a new
210 * direct mapping may be generated based on the radix tree contents.
211 *
212 * If the page was not migrated then the PageSwapCache bit
213 * is still set and the operation may continue.
214 */
215 if (try_to_unmap(page, 1) == SWAP_FAIL)
216 /* A vma has VM_LOCKED set -> permanent failure */
217 return -EPERM;
218
219 /*
220 * Give up if we were unable to remove all mappings.
221 */
222 if (page_mapcount(page))
223 return -EAGAIN;
224
225 write_lock_irq(&mapping->tree_lock);
226
227 radix_pointer = (struct page **)radix_tree_lookup_slot(
228 &mapping->page_tree,
229 page_index(page));
230
231 if (!page_mapping(page) || page_count(page) != nr_refs ||
232 *radix_pointer != page) {
233 write_unlock_irq(&mapping->tree_lock);
Christoph Lametere23ca002006-04-10 22:52:57 -0700234 return -EAGAIN;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800235 }
236
237 /*
238 * Now we know that no one else is looking at the page.
239 *
240 * Certain minimal information about a page must be available
241 * in order for other subsystems to properly handle the page if they
242 * find it through the radix tree update before we are finished
243 * copying the page.
244 */
245 get_page(newpage);
246 newpage->index = page->index;
247 newpage->mapping = page->mapping;
248 if (PageSwapCache(page)) {
249 SetPageSwapCache(newpage);
250 set_page_private(newpage, page_private(page));
251 }
252
253 *radix_pointer = newpage;
254 __put_page(page);
255 write_unlock_irq(&mapping->tree_lock);
256
257 return 0;
258}
259EXPORT_SYMBOL(migrate_page_remove_references);
260
261/*
262 * Copy the page to its new location
263 */
264void migrate_page_copy(struct page *newpage, struct page *page)
265{
266 copy_highpage(newpage, page);
267
268 if (PageError(page))
269 SetPageError(newpage);
270 if (PageReferenced(page))
271 SetPageReferenced(newpage);
272 if (PageUptodate(page))
273 SetPageUptodate(newpage);
274 if (PageActive(page))
275 SetPageActive(newpage);
276 if (PageChecked(page))
277 SetPageChecked(newpage);
278 if (PageMappedToDisk(page))
279 SetPageMappedToDisk(newpage);
280
281 if (PageDirty(page)) {
282 clear_page_dirty_for_io(page);
283 set_page_dirty(newpage);
284 }
285
286 ClearPageSwapCache(page);
287 ClearPageActive(page);
288 ClearPagePrivate(page);
289 set_page_private(page, 0);
290 page->mapping = NULL;
291
292 /*
293 * If any waiters have accumulated on the new page then
294 * wake them up.
295 */
296 if (PageWriteback(newpage))
297 end_page_writeback(newpage);
298}
299EXPORT_SYMBOL(migrate_page_copy);
300
301/*
302 * Common logic to directly migrate a single page suitable for
303 * pages that do not use PagePrivate.
304 *
305 * Pages are locked upon entry and exit.
306 */
307int migrate_page(struct page *newpage, struct page *page)
308{
309 int rc;
310
311 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
312
313 rc = migrate_page_remove_references(newpage, page, 2);
314
315 if (rc)
316 return rc;
317
318 migrate_page_copy(newpage, page);
319
320 /*
321 * Remove auxiliary swap entries and replace
322 * them with real ptes.
323 *
324 * Note that a real pte entry will allow processes that are not
325 * waiting on the page lock to use the new page via the page tables
326 * before the new page is unlocked.
327 */
328 remove_from_swap(newpage);
329 return 0;
330}
331EXPORT_SYMBOL(migrate_page);
332
333/*
334 * migrate_pages
335 *
336 * Two lists are passed to this function. The first list
337 * contains the pages isolated from the LRU to be migrated.
338 * The second list contains new pages that the pages isolated
339 * can be moved to. If the second list is NULL then all
340 * pages are swapped out.
341 *
342 * The function returns after 10 attempts or if no pages
343 * are movable anymore because to has become empty
344 * or no retryable pages exist anymore.
345 *
346 * Return: Number of pages not migrated when "to" ran empty.
347 */
348int migrate_pages(struct list_head *from, struct list_head *to,
349 struct list_head *moved, struct list_head *failed)
350{
351 int retry;
352 int nr_failed = 0;
353 int pass = 0;
354 struct page *page;
355 struct page *page2;
356 int swapwrite = current->flags & PF_SWAPWRITE;
357 int rc;
358
359 if (!swapwrite)
360 current->flags |= PF_SWAPWRITE;
361
362redo:
363 retry = 0;
364
365 list_for_each_entry_safe(page, page2, from, lru) {
366 struct page *newpage = NULL;
367 struct address_space *mapping;
368
369 cond_resched();
370
371 rc = 0;
372 if (page_count(page) == 1)
373 /* page was freed from under us. So we are done. */
374 goto next;
375
376 if (to && list_empty(to))
377 break;
378
379 /*
380 * Skip locked pages during the first two passes to give the
381 * functions holding the lock time to release the page. Later we
382 * use lock_page() to have a higher chance of acquiring the
383 * lock.
384 */
385 rc = -EAGAIN;
386 if (pass > 2)
387 lock_page(page);
388 else
389 if (TestSetPageLocked(page))
390 goto next;
391
392 /*
393 * Only wait on writeback if we have already done a pass where
394 * we we may have triggered writeouts for lots of pages.
395 */
396 if (pass > 0) {
397 wait_on_page_writeback(page);
398 } else {
399 if (PageWriteback(page))
400 goto unlock_page;
401 }
402
403 /*
404 * Anonymous pages must have swap cache references otherwise
405 * the information contained in the page maps cannot be
406 * preserved.
407 */
408 if (PageAnon(page) && !PageSwapCache(page)) {
409 if (!add_to_swap(page, GFP_KERNEL)) {
410 rc = -ENOMEM;
411 goto unlock_page;
412 }
413 }
414
415 if (!to) {
416 rc = swap_page(page);
417 goto next;
418 }
419
420 newpage = lru_to_page(to);
421 lock_page(newpage);
422
423 /*
424 * Pages are properly locked and writeback is complete.
425 * Try to migrate the page.
426 */
427 mapping = page_mapping(page);
428 if (!mapping)
429 goto unlock_both;
430
431 if (mapping->a_ops->migratepage) {
432 /*
433 * Most pages have a mapping and most filesystems
434 * should provide a migration function. Anonymous
435 * pages are part of swap space which also has its
436 * own migration function. This is the most common
437 * path for page migration.
438 */
439 rc = mapping->a_ops->migratepage(newpage, page);
440 goto unlock_both;
441 }
442
443 /*
444 * Default handling if a filesystem does not provide
445 * a migration function. We can only migrate clean
446 * pages so try to write out any dirty pages first.
447 */
448 if (PageDirty(page)) {
449 switch (pageout(page, mapping)) {
450 case PAGE_KEEP:
451 case PAGE_ACTIVATE:
452 goto unlock_both;
453
454 case PAGE_SUCCESS:
455 unlock_page(newpage);
456 goto next;
457
458 case PAGE_CLEAN:
459 ; /* try to migrate the page below */
460 }
461 }
462
463 /*
464 * Buffers are managed in a filesystem specific way.
465 * We must have no buffers or drop them.
466 */
467 if (!page_has_buffers(page) ||
468 try_to_release_page(page, GFP_KERNEL)) {
469 rc = migrate_page(newpage, page);
470 goto unlock_both;
471 }
472
473 /*
474 * On early passes with mapped pages simply
475 * retry. There may be a lock held for some
476 * buffers that may go away. Later
477 * swap them out.
478 */
479 if (pass > 4) {
480 /*
481 * Persistently unable to drop buffers..... As a
482 * measure of last resort we fall back to
483 * swap_page().
484 */
485 unlock_page(newpage);
486 newpage = NULL;
487 rc = swap_page(page);
488 goto next;
489 }
490
491unlock_both:
492 unlock_page(newpage);
493
494unlock_page:
495 unlock_page(page);
496
497next:
498 if (rc == -EAGAIN) {
499 retry++;
500 } else if (rc) {
501 /* Permanent failure */
502 list_move(&page->lru, failed);
503 nr_failed++;
504 } else {
505 if (newpage) {
506 /* Successful migration. Return page to LRU */
507 move_to_lru(newpage);
508 }
509 list_move(&page->lru, moved);
510 }
511 }
512 if (retry && pass++ < 10)
513 goto redo;
514
515 if (!swapwrite)
516 current->flags &= ~PF_SWAPWRITE;
517
518 return nr_failed + retry;
519}
520
521/*
522 * Migration function for pages with buffers. This function can only be used
523 * if the underlying filesystem guarantees that no other references to "page"
524 * exist.
525 */
526int buffer_migrate_page(struct page *newpage, struct page *page)
527{
528 struct address_space *mapping = page->mapping;
529 struct buffer_head *bh, *head;
530 int rc;
531
532 if (!mapping)
533 return -EAGAIN;
534
535 if (!page_has_buffers(page))
536 return migrate_page(newpage, page);
537
538 head = page_buffers(page);
539
540 rc = migrate_page_remove_references(newpage, page, 3);
541
542 if (rc)
543 return rc;
544
545 bh = head;
546 do {
547 get_bh(bh);
548 lock_buffer(bh);
549 bh = bh->b_this_page;
550
551 } while (bh != head);
552
553 ClearPagePrivate(page);
554 set_page_private(newpage, page_private(page));
555 set_page_private(page, 0);
556 put_page(page);
557 get_page(newpage);
558
559 bh = head;
560 do {
561 set_bh_page(bh, newpage, bh_offset(bh));
562 bh = bh->b_this_page;
563
564 } while (bh != head);
565
566 SetPagePrivate(newpage);
567
568 migrate_page_copy(newpage, page);
569
570 bh = head;
571 do {
572 unlock_buffer(bh);
573 put_bh(bh);
574 bh = bh->b_this_page;
575
576 } while (bh != head);
577
578 return 0;
579}
580EXPORT_SYMBOL(buffer_migrate_page);
581
582/*
583 * Migrate the list 'pagelist' of pages to a certain destination.
584 *
585 * Specify destination with either non-NULL vma or dest_node >= 0
586 * Return the number of pages not migrated or error code
587 */
588int migrate_pages_to(struct list_head *pagelist,
589 struct vm_area_struct *vma, int dest)
590{
591 LIST_HEAD(newlist);
592 LIST_HEAD(moved);
593 LIST_HEAD(failed);
594 int err = 0;
595 unsigned long offset = 0;
596 int nr_pages;
597 struct page *page;
598 struct list_head *p;
599
600redo:
601 nr_pages = 0;
602 list_for_each(p, pagelist) {
603 if (vma) {
604 /*
605 * The address passed to alloc_page_vma is used to
606 * generate the proper interleave behavior. We fake
607 * the address here by an increasing offset in order
608 * to get the proper distribution of pages.
609 *
610 * No decision has been made as to which page
611 * a certain old page is moved to so we cannot
612 * specify the correct address.
613 */
614 page = alloc_page_vma(GFP_HIGHUSER, vma,
615 offset + vma->vm_start);
616 offset += PAGE_SIZE;
617 }
618 else
619 page = alloc_pages_node(dest, GFP_HIGHUSER, 0);
620
621 if (!page) {
622 err = -ENOMEM;
623 goto out;
624 }
625 list_add_tail(&page->lru, &newlist);
626 nr_pages++;
627 if (nr_pages > MIGRATE_CHUNK_SIZE)
628 break;
629 }
630 err = migrate_pages(pagelist, &newlist, &moved, &failed);
631
632 putback_lru_pages(&moved); /* Call release pages instead ?? */
633
634 if (err >= 0 && list_empty(&newlist) && !list_empty(pagelist))
635 goto redo;
636out:
637 /* Return leftover allocated pages */
638 while (!list_empty(&newlist)) {
639 page = list_entry(newlist.next, struct page, lru);
640 list_del(&page->lru);
641 __free_page(page);
642 }
643 list_splice(&failed, pagelist);
644 if (err < 0)
645 return err;
646
647 /* Calculate number of leftover pages */
648 nr_pages = 0;
649 list_for_each(p, pagelist)
650 nr_pages++;
651 return nr_pages;
652}