KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_PAGEISOLATION_H |
| 2 | #define __LINUX_PAGEISOLATION_H |
| 3 | |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 4 | |
| 5 | bool has_unmovable_pages(struct zone *zone, struct page *page, int count); |
| 6 | void set_pageblock_migratetype(struct page *page, int migratetype); |
| 7 | int move_freepages_block(struct zone *zone, struct page *page, |
| 8 | int migratetype); |
Minchan Kim | 435b405 | 2012-10-08 16:32:16 -0700 | [diff] [blame] | 9 | int move_freepages(struct zone *zone, |
| 10 | struct page *start_page, struct page *end_page, |
| 11 | int migratetype); |
| 12 | |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 13 | /* |
| 14 | * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 15 | * If specified range includes migrate types other than MOVABLE or CMA, |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 16 | * this will fail with -EBUSY. |
| 17 | * |
| 18 | * For isolating all pages in the range finally, the caller have to |
| 19 | * free all pages in the range. test_page_isolated() can be used for |
| 20 | * test it. |
| 21 | */ |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 22 | int |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 23 | start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, |
| 24 | unsigned migratetype); |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 25 | |
| 26 | /* |
| 27 | * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE. |
| 28 | * target range is [start_pfn, end_pfn) |
| 29 | */ |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 30 | int |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 31 | undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, |
| 32 | unsigned migratetype); |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 33 | |
| 34 | /* |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 35 | * Test all pages in [start_pfn, end_pfn) are isolated or not. |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 36 | */ |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 37 | int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn); |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 38 | |
| 39 | /* |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 40 | * Internal functions. Changes pageblock's migrate type. |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 41 | */ |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 42 | int set_migratetype_isolate(struct page *page); |
| 43 | void unset_migratetype_isolate(struct page *page, unsigned migratetype); |
Minchan Kim | 723a064 | 2012-10-08 16:32:52 -0700 | [diff] [blame] | 44 | struct page *alloc_migrate_target(struct page *page, unsigned long private, |
| 45 | int **resultp); |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 46 | |
| 47 | #endif |