KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_PAGEISOLATION_H |
| 2 | #define __LINUX_PAGEISOLATION_H |
| 3 | |
Minchan Kim | 194159f | 2013-02-22 16:33:58 -0800 | [diff] [blame] | 4 | #ifdef CONFIG_MEMORY_ISOLATION |
| 5 | static inline bool is_migrate_isolate_page(struct page *page) |
| 6 | { |
| 7 | return get_pageblock_migratetype(page) == MIGRATE_ISOLATE; |
| 8 | } |
| 9 | static inline bool is_migrate_isolate(int migratetype) |
| 10 | { |
| 11 | return migratetype == MIGRATE_ISOLATE; |
| 12 | } |
| 13 | #else |
| 14 | static inline bool is_migrate_isolate_page(struct page *page) |
| 15 | { |
| 16 | return false; |
| 17 | } |
| 18 | static inline bool is_migrate_isolate(int migratetype) |
| 19 | { |
| 20 | return false; |
| 21 | } |
| 22 | #endif |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 23 | |
Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 24 | bool has_unmovable_pages(struct zone *zone, struct page *page, int count, |
| 25 | bool skip_hwpoisoned_pages); |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 26 | void set_pageblock_migratetype(struct page *page, int migratetype); |
| 27 | int move_freepages_block(struct zone *zone, struct page *page, |
| 28 | int migratetype); |
Minchan Kim | 435b405 | 2012-10-08 16:32:16 -0700 | [diff] [blame] | 29 | int move_freepages(struct zone *zone, |
| 30 | struct page *start_page, struct page *end_page, |
| 31 | int migratetype); |
| 32 | |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 33 | /* |
| 34 | * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 35 | * If specified range includes migrate types other than MOVABLE or CMA, |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 36 | * this will fail with -EBUSY. |
| 37 | * |
| 38 | * For isolating all pages in the range finally, the caller have to |
| 39 | * free all pages in the range. test_page_isolated() can be used for |
| 40 | * test it. |
| 41 | */ |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 42 | int |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 43 | start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, |
Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 44 | unsigned migratetype, bool skip_hwpoisoned_pages); |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 45 | |
| 46 | /* |
| 47 | * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE. |
| 48 | * target range is [start_pfn, end_pfn) |
| 49 | */ |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 50 | int |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 51 | undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, |
| 52 | unsigned migratetype); |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 53 | |
| 54 | /* |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 55 | * Test all pages in [start_pfn, end_pfn) are isolated or not. |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 56 | */ |
Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 57 | int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, |
| 58 | bool skip_hwpoisoned_pages); |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 59 | |
| 60 | /* |
Michal Nazarewicz | 0815f3d | 2012-04-03 15:06:15 +0200 | [diff] [blame] | 61 | * Internal functions. Changes pageblock's migrate type. |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 62 | */ |
Wen Congyang | b023f46 | 2012-12-11 16:00:45 -0800 | [diff] [blame] | 63 | int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages); |
Minchan Kim | ee6f509 | 2012-07-31 16:43:50 -0700 | [diff] [blame] | 64 | void unset_migratetype_isolate(struct page *page, unsigned migratetype); |
Minchan Kim | 723a064 | 2012-10-08 16:32:52 -0700 | [diff] [blame] | 65 | struct page *alloc_migrate_target(struct page *page, unsigned long private, |
| 66 | int **resultp); |
KAMEZAWA Hiroyuki | a5d76b5 | 2007-10-16 01:26:11 -0700 | [diff] [blame] | 67 | |
| 68 | #endif |