KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_PAGEISOLATION_H |
| 2 | #define __LINUX_PAGEISOLATION_H |
| 3 | |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 4 | |
Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 5 | bool has_unmovable_pages(struct zone *zone, struct page *page, int count, |
| 6 | bool skip_hwpoisoned_pages); |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 7 | void set_pageblock_migratetype(struct page *page, int migratetype); |
| 8 | int move_freepages_block(struct zone *zone, struct page *page, |
| 9 | int migratetype); |
Minchan Kim | 435b405 | 2012-10-08 16:32:16 -0700 | [diff] [blame] | 10 | int move_freepages(struct zone *zone, |
| 11 | struct page *start_page, struct page *end_page, |
| 12 | int migratetype); |
| 13 | |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 14 | /* |
| 15 | * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 16 | * If specified range includes migrate types other than MOVABLE or CMA, |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 17 | * this will fail with -EBUSY. |
| 18 | * |
| 19 | * For isolating all pages in the range finally, the caller have to |
| 20 | * free all pages in the range. test_page_isolated() can be used for |
| 21 | * test it. |
| 22 | */ |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 23 | int |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 24 | start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, |
Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 25 | unsigned migratetype, bool skip_hwpoisoned_pages); |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 26 | |
| 27 | /* |
| 28 | * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE. |
| 29 | * target range is [start_pfn, end_pfn) |
| 30 | */ |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 31 | int |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 32 | undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, |
| 33 | unsigned migratetype); |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 34 | |
| 35 | /* |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 36 | * Test all pages in [start_pfn, end_pfn) are isolated or not. |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 37 | */ |
Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 38 | int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, |
| 39 | bool skip_hwpoisoned_pages); |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 40 | |
| 41 | /* |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 42 | * Internal functions. Changes pageblock's migrate type. |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 43 | */ |
Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 44 | int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages); |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 45 | void unset_migratetype_isolate(struct page *page, unsigned migratetype); |
Minchan Kim | 723a064 | 2012-10-08 16:32:52 -0700 | [diff] [blame] | 46 | struct page *alloc_migrate_target(struct page *page, unsigned long private, |
| 47 | int **resultp); |
KAMEZAWA Hiroyuki | a5d76b54 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 48 | |
| 49 | #endif |