blob: d444229f2599245e1cb0d904e7cdfed11db88b7a [file] [log] [blame]
Christoph Lameterb20a3502006-03-22 00:09:12 -08001/*
2 * Memory Migration functionality - linux/mm/migration.c
3 *
4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
5 *
6 * Page migration was first developed in the context of the memory hotplug
7 * project. The main authors of the migration code are:
8 *
9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10 * Hirokazu Takahashi <taka@valinux.co.jp>
11 * Dave Hansen <haveblue@us.ibm.com>
12 * Christoph Lameter <clameter@sgi.com>
13 */
14
15#include <linux/migrate.h>
16#include <linux/module.h>
17#include <linux/swap.h>
18#include <linux/pagemap.h>
Christoph Lametere23ca002006-04-10 22:52:57 -070019#include <linux/buffer_head.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -080020#include <linux/mm_inline.h>
21#include <linux/pagevec.h>
22#include <linux/rmap.h>
23#include <linux/topology.h>
24#include <linux/cpu.h>
25#include <linux/cpuset.h>
26#include <linux/swapops.h>
27
28#include "internal.h"
29
Christoph Lameterb20a3502006-03-22 00:09:12 -080030/* The maximum number of pages to take off the LRU for migration */
31#define MIGRATE_CHUNK_SIZE 256
32
33#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
34
35/*
36 * Isolate one page from the LRU lists. If successful put it onto
37 * the indicated list with elevated page count.
38 *
39 * Result:
40 * -EBUSY: page not on LRU list
41 * 0: page removed from LRU list and added to the specified list.
42 */
43int isolate_lru_page(struct page *page, struct list_head *pagelist)
44{
45 int ret = -EBUSY;
46
47 if (PageLRU(page)) {
48 struct zone *zone = page_zone(page);
49
50 spin_lock_irq(&zone->lru_lock);
51 if (PageLRU(page)) {
52 ret = 0;
53 get_page(page);
54 ClearPageLRU(page);
55 if (PageActive(page))
56 del_page_from_active_list(zone, page);
57 else
58 del_page_from_inactive_list(zone, page);
59 list_add_tail(&page->lru, pagelist);
60 }
61 spin_unlock_irq(&zone->lru_lock);
62 }
63 return ret;
64}
65
66/*
67 * migrate_prep() needs to be called after we have compiled the list of pages
68 * to be migrated using isolate_lru_page() but before we begin a series of calls
69 * to migrate_pages().
70 */
71int migrate_prep(void)
72{
73 /* Must have swap device for migration */
74 if (nr_swap_pages <= 0)
75 return -ENODEV;
76
77 /*
78 * Clear the LRU lists so pages can be isolated.
79 * Note that pages may be moved off the LRU after we have
80 * drained them. Those pages will fail to migrate like other
81 * pages that may be busy.
82 */
83 lru_add_drain_all();
84
85 return 0;
86}
87
88static inline void move_to_lru(struct page *page)
89{
90 list_del(&page->lru);
91 if (PageActive(page)) {
92 /*
93 * lru_cache_add_active checks that
94 * the PG_active bit is off.
95 */
96 ClearPageActive(page);
97 lru_cache_add_active(page);
98 } else {
99 lru_cache_add(page);
100 }
101 put_page(page);
102}
103
104/*
105 * Add isolated pages on the list back to the LRU.
106 *
107 * returns the number of pages put back.
108 */
109int putback_lru_pages(struct list_head *l)
110{
111 struct page *page;
112 struct page *page2;
113 int count = 0;
114
115 list_for_each_entry_safe(page, page2, l, lru) {
116 move_to_lru(page);
117 count++;
118 }
119 return count;
120}
121
122/*
123 * Non migratable page
124 */
125int fail_migrate_page(struct page *newpage, struct page *page)
126{
127 return -EIO;
128}
129EXPORT_SYMBOL(fail_migrate_page);
130
131/*
132 * swapout a single page
133 * page is locked upon entry, unlocked on exit
134 */
135static int swap_page(struct page *page)
136{
137 struct address_space *mapping = page_mapping(page);
138
139 if (page_mapped(page) && mapping)
140 if (try_to_unmap(page, 1) != SWAP_SUCCESS)
141 goto unlock_retry;
142
143 if (PageDirty(page)) {
144 /* Page is dirty, try to write it out here */
145 switch(pageout(page, mapping)) {
146 case PAGE_KEEP:
147 case PAGE_ACTIVATE:
148 goto unlock_retry;
149
150 case PAGE_SUCCESS:
151 goto retry;
152
153 case PAGE_CLEAN:
154 ; /* try to free the page below */
155 }
156 }
157
158 if (PagePrivate(page)) {
159 if (!try_to_release_page(page, GFP_KERNEL) ||
160 (!mapping && page_count(page) == 1))
161 goto unlock_retry;
162 }
163
164 if (remove_mapping(mapping, page)) {
165 /* Success */
166 unlock_page(page);
167 return 0;
168 }
169
170unlock_retry:
171 unlock_page(page);
172
173retry:
174 return -EAGAIN;
175}
Christoph Lameterb20a3502006-03-22 00:09:12 -0800176
177/*
178 * Remove references for a page and establish the new page with the correct
179 * basic settings to be able to stop accesses to the page.
180 */
181int migrate_page_remove_references(struct page *newpage,
182 struct page *page, int nr_refs)
183{
184 struct address_space *mapping = page_mapping(page);
185 struct page **radix_pointer;
186
187 /*
188 * Avoid doing any of the following work if the page count
189 * indicates that the page is in use or truncate has removed
190 * the page.
191 */
192 if (!mapping || page_mapcount(page) + nr_refs != page_count(page))
193 return -EAGAIN;
194
195 /*
196 * Establish swap ptes for anonymous pages or destroy pte
197 * maps for files.
198 *
199 * In order to reestablish file backed mappings the fault handlers
200 * will take the radix tree_lock which may then be used to stop
201 * processses from accessing this page until the new page is ready.
202 *
203 * A process accessing via a swap pte (an anonymous page) will take a
204 * page_lock on the old page which will block the process until the
205 * migration attempt is complete. At that time the PageSwapCache bit
206 * will be examined. If the page was migrated then the PageSwapCache
207 * bit will be clear and the operation to retrieve the page will be
208 * retried which will find the new page in the radix tree. Then a new
209 * direct mapping may be generated based on the radix tree contents.
210 *
211 * If the page was not migrated then the PageSwapCache bit
212 * is still set and the operation may continue.
213 */
214 if (try_to_unmap(page, 1) == SWAP_FAIL)
215 /* A vma has VM_LOCKED set -> permanent failure */
216 return -EPERM;
217
218 /*
219 * Give up if we were unable to remove all mappings.
220 */
221 if (page_mapcount(page))
222 return -EAGAIN;
223
224 write_lock_irq(&mapping->tree_lock);
225
226 radix_pointer = (struct page **)radix_tree_lookup_slot(
227 &mapping->page_tree,
228 page_index(page));
229
230 if (!page_mapping(page) || page_count(page) != nr_refs ||
231 *radix_pointer != page) {
232 write_unlock_irq(&mapping->tree_lock);
Christoph Lametere23ca002006-04-10 22:52:57 -0700233 return -EAGAIN;
Christoph Lameterb20a3502006-03-22 00:09:12 -0800234 }
235
236 /*
237 * Now we know that no one else is looking at the page.
238 *
239 * Certain minimal information about a page must be available
240 * in order for other subsystems to properly handle the page if they
241 * find it through the radix tree update before we are finished
242 * copying the page.
243 */
244 get_page(newpage);
245 newpage->index = page->index;
246 newpage->mapping = page->mapping;
247 if (PageSwapCache(page)) {
248 SetPageSwapCache(newpage);
249 set_page_private(newpage, page_private(page));
250 }
251
252 *radix_pointer = newpage;
253 __put_page(page);
254 write_unlock_irq(&mapping->tree_lock);
255
256 return 0;
257}
258EXPORT_SYMBOL(migrate_page_remove_references);
259
260/*
261 * Copy the page to its new location
262 */
263void migrate_page_copy(struct page *newpage, struct page *page)
264{
265 copy_highpage(newpage, page);
266
267 if (PageError(page))
268 SetPageError(newpage);
269 if (PageReferenced(page))
270 SetPageReferenced(newpage);
271 if (PageUptodate(page))
272 SetPageUptodate(newpage);
273 if (PageActive(page))
274 SetPageActive(newpage);
275 if (PageChecked(page))
276 SetPageChecked(newpage);
277 if (PageMappedToDisk(page))
278 SetPageMappedToDisk(newpage);
279
280 if (PageDirty(page)) {
281 clear_page_dirty_for_io(page);
282 set_page_dirty(newpage);
283 }
284
285 ClearPageSwapCache(page);
286 ClearPageActive(page);
287 ClearPagePrivate(page);
288 set_page_private(page, 0);
289 page->mapping = NULL;
290
291 /*
292 * If any waiters have accumulated on the new page then
293 * wake them up.
294 */
295 if (PageWriteback(newpage))
296 end_page_writeback(newpage);
297}
298EXPORT_SYMBOL(migrate_page_copy);
299
300/*
301 * Common logic to directly migrate a single page suitable for
302 * pages that do not use PagePrivate.
303 *
304 * Pages are locked upon entry and exit.
305 */
306int migrate_page(struct page *newpage, struct page *page)
307{
308 int rc;
309
310 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
311
312 rc = migrate_page_remove_references(newpage, page, 2);
313
314 if (rc)
315 return rc;
316
317 migrate_page_copy(newpage, page);
318
319 /*
320 * Remove auxiliary swap entries and replace
321 * them with real ptes.
322 *
323 * Note that a real pte entry will allow processes that are not
324 * waiting on the page lock to use the new page via the page tables
325 * before the new page is unlocked.
326 */
327 remove_from_swap(newpage);
328 return 0;
329}
330EXPORT_SYMBOL(migrate_page);
331
332/*
333 * migrate_pages
334 *
335 * Two lists are passed to this function. The first list
336 * contains the pages isolated from the LRU to be migrated.
337 * The second list contains new pages that the pages isolated
338 * can be moved to. If the second list is NULL then all
339 * pages are swapped out.
340 *
341 * The function returns after 10 attempts or if no pages
342 * are movable anymore because to has become empty
343 * or no retryable pages exist anymore.
344 *
345 * Return: Number of pages not migrated when "to" ran empty.
346 */
347int migrate_pages(struct list_head *from, struct list_head *to,
348 struct list_head *moved, struct list_head *failed)
349{
350 int retry;
351 int nr_failed = 0;
352 int pass = 0;
353 struct page *page;
354 struct page *page2;
355 int swapwrite = current->flags & PF_SWAPWRITE;
356 int rc;
357
358 if (!swapwrite)
359 current->flags |= PF_SWAPWRITE;
360
361redo:
362 retry = 0;
363
364 list_for_each_entry_safe(page, page2, from, lru) {
365 struct page *newpage = NULL;
366 struct address_space *mapping;
367
368 cond_resched();
369
370 rc = 0;
371 if (page_count(page) == 1)
372 /* page was freed from under us. So we are done. */
373 goto next;
374
375 if (to && list_empty(to))
376 break;
377
378 /*
379 * Skip locked pages during the first two passes to give the
380 * functions holding the lock time to release the page. Later we
381 * use lock_page() to have a higher chance of acquiring the
382 * lock.
383 */
384 rc = -EAGAIN;
385 if (pass > 2)
386 lock_page(page);
387 else
388 if (TestSetPageLocked(page))
389 goto next;
390
391 /*
392 * Only wait on writeback if we have already done a pass where
393 * we we may have triggered writeouts for lots of pages.
394 */
395 if (pass > 0) {
396 wait_on_page_writeback(page);
397 } else {
398 if (PageWriteback(page))
399 goto unlock_page;
400 }
401
402 /*
403 * Anonymous pages must have swap cache references otherwise
404 * the information contained in the page maps cannot be
405 * preserved.
406 */
407 if (PageAnon(page) && !PageSwapCache(page)) {
408 if (!add_to_swap(page, GFP_KERNEL)) {
409 rc = -ENOMEM;
410 goto unlock_page;
411 }
412 }
413
414 if (!to) {
415 rc = swap_page(page);
416 goto next;
417 }
418
419 newpage = lru_to_page(to);
420 lock_page(newpage);
421
422 /*
423 * Pages are properly locked and writeback is complete.
424 * Try to migrate the page.
425 */
426 mapping = page_mapping(page);
427 if (!mapping)
428 goto unlock_both;
429
430 if (mapping->a_ops->migratepage) {
431 /*
432 * Most pages have a mapping and most filesystems
433 * should provide a migration function. Anonymous
434 * pages are part of swap space which also has its
435 * own migration function. This is the most common
436 * path for page migration.
437 */
438 rc = mapping->a_ops->migratepage(newpage, page);
439 goto unlock_both;
440 }
441
442 /*
443 * Default handling if a filesystem does not provide
444 * a migration function. We can only migrate clean
445 * pages so try to write out any dirty pages first.
446 */
447 if (PageDirty(page)) {
448 switch (pageout(page, mapping)) {
449 case PAGE_KEEP:
450 case PAGE_ACTIVATE:
451 goto unlock_both;
452
453 case PAGE_SUCCESS:
454 unlock_page(newpage);
455 goto next;
456
457 case PAGE_CLEAN:
458 ; /* try to migrate the page below */
459 }
460 }
461
462 /*
463 * Buffers are managed in a filesystem specific way.
464 * We must have no buffers or drop them.
465 */
466 if (!page_has_buffers(page) ||
467 try_to_release_page(page, GFP_KERNEL)) {
468 rc = migrate_page(newpage, page);
469 goto unlock_both;
470 }
471
472 /*
473 * On early passes with mapped pages simply
474 * retry. There may be a lock held for some
475 * buffers that may go away. Later
476 * swap them out.
477 */
478 if (pass > 4) {
479 /*
480 * Persistently unable to drop buffers..... As a
481 * measure of last resort we fall back to
482 * swap_page().
483 */
484 unlock_page(newpage);
485 newpage = NULL;
486 rc = swap_page(page);
487 goto next;
488 }
489
490unlock_both:
491 unlock_page(newpage);
492
493unlock_page:
494 unlock_page(page);
495
496next:
497 if (rc == -EAGAIN) {
498 retry++;
499 } else if (rc) {
500 /* Permanent failure */
501 list_move(&page->lru, failed);
502 nr_failed++;
503 } else {
504 if (newpage) {
505 /* Successful migration. Return page to LRU */
506 move_to_lru(newpage);
507 }
508 list_move(&page->lru, moved);
509 }
510 }
511 if (retry && pass++ < 10)
512 goto redo;
513
514 if (!swapwrite)
515 current->flags &= ~PF_SWAPWRITE;
516
517 return nr_failed + retry;
518}
519
520/*
521 * Migration function for pages with buffers. This function can only be used
522 * if the underlying filesystem guarantees that no other references to "page"
523 * exist.
524 */
525int buffer_migrate_page(struct page *newpage, struct page *page)
526{
527 struct address_space *mapping = page->mapping;
528 struct buffer_head *bh, *head;
529 int rc;
530
531 if (!mapping)
532 return -EAGAIN;
533
534 if (!page_has_buffers(page))
535 return migrate_page(newpage, page);
536
537 head = page_buffers(page);
538
539 rc = migrate_page_remove_references(newpage, page, 3);
540
541 if (rc)
542 return rc;
543
544 bh = head;
545 do {
546 get_bh(bh);
547 lock_buffer(bh);
548 bh = bh->b_this_page;
549
550 } while (bh != head);
551
552 ClearPagePrivate(page);
553 set_page_private(newpage, page_private(page));
554 set_page_private(page, 0);
555 put_page(page);
556 get_page(newpage);
557
558 bh = head;
559 do {
560 set_bh_page(bh, newpage, bh_offset(bh));
561 bh = bh->b_this_page;
562
563 } while (bh != head);
564
565 SetPagePrivate(newpage);
566
567 migrate_page_copy(newpage, page);
568
569 bh = head;
570 do {
571 unlock_buffer(bh);
572 put_bh(bh);
573 bh = bh->b_this_page;
574
575 } while (bh != head);
576
577 return 0;
578}
579EXPORT_SYMBOL(buffer_migrate_page);
580
581/*
582 * Migrate the list 'pagelist' of pages to a certain destination.
583 *
584 * Specify destination with either non-NULL vma or dest_node >= 0
585 * Return the number of pages not migrated or error code
586 */
587int migrate_pages_to(struct list_head *pagelist,
588 struct vm_area_struct *vma, int dest)
589{
590 LIST_HEAD(newlist);
591 LIST_HEAD(moved);
592 LIST_HEAD(failed);
593 int err = 0;
594 unsigned long offset = 0;
595 int nr_pages;
596 struct page *page;
597 struct list_head *p;
598
599redo:
600 nr_pages = 0;
601 list_for_each(p, pagelist) {
602 if (vma) {
603 /*
604 * The address passed to alloc_page_vma is used to
605 * generate the proper interleave behavior. We fake
606 * the address here by an increasing offset in order
607 * to get the proper distribution of pages.
608 *
609 * No decision has been made as to which page
610 * a certain old page is moved to so we cannot
611 * specify the correct address.
612 */
613 page = alloc_page_vma(GFP_HIGHUSER, vma,
614 offset + vma->vm_start);
615 offset += PAGE_SIZE;
616 }
617 else
618 page = alloc_pages_node(dest, GFP_HIGHUSER, 0);
619
620 if (!page) {
621 err = -ENOMEM;
622 goto out;
623 }
624 list_add_tail(&page->lru, &newlist);
625 nr_pages++;
626 if (nr_pages > MIGRATE_CHUNK_SIZE)
627 break;
628 }
629 err = migrate_pages(pagelist, &newlist, &moved, &failed);
630
631 putback_lru_pages(&moved); /* Call release pages instead ?? */
632
633 if (err >= 0 && list_empty(&newlist) && !list_empty(pagelist))
634 goto redo;
635out:
636 /* Return leftover allocated pages */
637 while (!list_empty(&newlist)) {
638 page = list_entry(newlist.next, struct page, lru);
639 list_del(&page->lru);
640 __free_page(page);
641 }
642 list_splice(&failed, pagelist);
643 if (err < 0)
644 return err;
645
646 /* Calculate number of leftover pages */
647 nr_pages = 0;
648 list_for_each(p, pagelist)
649 nr_pages++;
650 return nr_pages;
651}