Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1 | #ifndef _LINUX_MIGRATE_H |
| 2 | #define _LINUX_MIGRATE_H |
| 3 | |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 4 | #include <linux/mm.h> |
Christoph Lameter | 906e0be | 2007-05-06 14:50:20 -0700 | [diff] [blame] | 5 | #include <linux/mempolicy.h> |
Andrew Morton | 6536e31 | 2012-01-20 14:33:53 -0800 | [diff] [blame] | 6 | #include <linux/migrate_mode.h> |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 7 | |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 8 | typedef struct page *new_page_t(struct page *, unsigned long private, int **); |
Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 9 | |
Rafael Aquini | 78bd520 | 2012-12-11 16:02:31 -0800 | [diff] [blame] | 10 | /* |
| 11 | * Return values from addresss_space_operations.migratepage(): |
| 12 | * - negative errno on page migration failure; |
| 13 | * - zero on page migration success; |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame^] | 14 | * |
| 15 | * The balloon page migration introduces this special case where a 'distinct' |
| 16 | * return code is used to flag a successful page migration to unmap_and_move(). |
| 17 | * This approach is necessary because page migration can race against balloon |
| 18 | * deflation procedure, and for such case we could introduce a nasty page leak |
| 19 | * if a successfully migrated balloon page gets released concurrently with |
| 20 | * migration's unmap_and_move() wrap-up steps. |
Rafael Aquini | 78bd520 | 2012-12-11 16:02:31 -0800 | [diff] [blame] | 21 | */ |
| 22 | #define MIGRATEPAGE_SUCCESS 0 |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame^] | 23 | #define MIGRATEPAGE_BALLOON_SUCCESS 1 /* special ret code for balloon page |
| 24 | * sucessful migration case. |
| 25 | */ |
Rafael Aquini | 78bd520 | 2012-12-11 16:02:31 -0800 | [diff] [blame] | 26 | |
Christoph Lameter | 906e0be | 2007-05-06 14:50:20 -0700 | [diff] [blame] | 27 | #ifdef CONFIG_MIGRATION |
KOSAKI Motohiro | 64cdd54 | 2009-01-06 14:39:16 -0800 | [diff] [blame] | 28 | |
Minchan Kim | e13861d | 2010-05-24 14:31:59 -0700 | [diff] [blame] | 29 | extern void putback_lru_pages(struct list_head *l); |
Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 30 | extern int migrate_page(struct address_space *, |
Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 31 | struct page *, struct page *, enum migrate_mode); |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 32 | extern int migrate_pages(struct list_head *l, new_page_t x, |
Mel Gorman | 7f0f249 | 2011-01-13 15:45:58 -0800 | [diff] [blame] | 33 | unsigned long private, bool offlining, |
Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 34 | enum migrate_mode mode); |
Aneesh Kumar K.V | 189ebff | 2012-07-31 16:42:06 -0700 | [diff] [blame] | 35 | extern int migrate_huge_page(struct page *, new_page_t x, |
Mel Gorman | 7f0f249 | 2011-01-13 15:45:58 -0800 | [diff] [blame] | 36 | unsigned long private, bool offlining, |
Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 37 | enum migrate_mode mode); |
Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 38 | |
Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 39 | extern int fail_migrate_page(struct address_space *, |
| 40 | struct page *, struct page *); |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 41 | |
| 42 | extern int migrate_prep(void); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 43 | extern int migrate_prep_local(void); |
Christoph Lameter | 7b2259b | 2006-06-25 05:46:48 -0700 | [diff] [blame] | 44 | extern int migrate_vmas(struct mm_struct *mm, |
| 45 | const nodemask_t *from, const nodemask_t *to, |
| 46 | unsigned long flags); |
Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 47 | extern void migrate_page_copy(struct page *newpage, struct page *page); |
| 48 | extern int migrate_huge_page_move_mapping(struct address_space *mapping, |
| 49 | struct page *newpage, struct page *page); |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 50 | #else |
KOSAKI Motohiro | 64cdd54 | 2009-01-06 14:39:16 -0800 | [diff] [blame] | 51 | |
Minchan Kim | e13861d | 2010-05-24 14:31:59 -0700 | [diff] [blame] | 52 | static inline void putback_lru_pages(struct list_head *l) {} |
Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 53 | static inline int migrate_pages(struct list_head *l, new_page_t x, |
Mel Gorman | 7f0f249 | 2011-01-13 15:45:58 -0800 | [diff] [blame] | 54 | unsigned long private, bool offlining, |
Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 55 | enum migrate_mode mode) { return -ENOSYS; } |
Aneesh Kumar K.V | 189ebff | 2012-07-31 16:42:06 -0700 | [diff] [blame] | 56 | static inline int migrate_huge_page(struct page *page, new_page_t x, |
Mel Gorman | 7f0f249 | 2011-01-13 15:45:58 -0800 | [diff] [blame] | 57 | unsigned long private, bool offlining, |
Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 58 | enum migrate_mode mode) { return -ENOSYS; } |
Christoph Lameter | 9bf9e89 | 2006-03-31 02:29:56 -0800 | [diff] [blame] | 59 | |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 60 | static inline int migrate_prep(void) { return -ENOSYS; } |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 61 | static inline int migrate_prep_local(void) { return -ENOSYS; } |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 62 | |
Christoph Lameter | 7b2259b | 2006-06-25 05:46:48 -0700 | [diff] [blame] | 63 | static inline int migrate_vmas(struct mm_struct *mm, |
| 64 | const nodemask_t *from, const nodemask_t *to, |
| 65 | unsigned long flags) |
| 66 | { |
| 67 | return -ENOSYS; |
| 68 | } |
| 69 | |
Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 70 | static inline void migrate_page_copy(struct page *newpage, |
| 71 | struct page *page) {} |
| 72 | |
Naoya Horiguchi | 6f39ce0 | 2010-09-30 11:54:51 +0900 | [diff] [blame] | 73 | static inline int migrate_huge_page_move_mapping(struct address_space *mapping, |
Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 74 | struct page *newpage, struct page *page) |
| 75 | { |
| 76 | return -ENOSYS; |
| 77 | } |
| 78 | |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 79 | /* Possible settings for the migrate_page() method in address_operations */ |
| 80 | #define migrate_page NULL |
| 81 | #define fail_migrate_page NULL |
| 82 | |
| 83 | #endif /* CONFIG_MIGRATION */ |
| 84 | #endif /* _LINUX_MIGRATE_H */ |