Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1 | #ifndef _LINUX_MIGRATE_H |
| 2 | #define _LINUX_MIGRATE_H |
| 3 | |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 4 | #include <linux/mm.h> |
Christoph Lameter | 906e0be | 2007-05-06 14:50:20 -0700 | [diff] [blame] | 5 | #include <linux/mempolicy.h> |
Andrew Morton | 6536e31 | 2012-01-20 14:33:53 -0800 | [diff] [blame] | 6 | #include <linux/migrate_mode.h> |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 7 | |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 8 | typedef struct page *new_page_t(struct page *, unsigned long private, int **); |
Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 9 | |
Rafael Aquini | 78bd520 | 2012-12-11 16:02:31 -0800 | [diff] [blame] | 10 | /* |
| 11 | * Return values from addresss_space_operations.migratepage(): |
| 12 | * - negative errno on page migration failure; |
| 13 | * - zero on page migration success; |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 14 | * |
| 15 | * The balloon page migration introduces this special case where a 'distinct' |
| 16 | * return code is used to flag a successful page migration to unmap_and_move(). |
| 17 | * This approach is necessary because page migration can race against balloon |
| 18 | * deflation procedure, and for such case we could introduce a nasty page leak |
| 19 | * if a successfully migrated balloon page gets released concurrently with |
| 20 | * migration's unmap_and_move() wrap-up steps. |
Rafael Aquini | 78bd520 | 2012-12-11 16:02:31 -0800 | [diff] [blame] | 21 | */ |
| 22 | #define MIGRATEPAGE_SUCCESS 0 |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 23 | #define MIGRATEPAGE_BALLOON_SUCCESS 1 /* special ret code for balloon page |
| 24 | * sucessful migration case. |
| 25 | */ |
Mel Gorman | 7b2a2d4 | 2012-10-19 14:07:31 +0100 | [diff] [blame] | 26 | enum migrate_reason { |
| 27 | MR_COMPACTION, |
| 28 | MR_MEMORY_FAILURE, |
| 29 | MR_MEMORY_HOTPLUG, |
| 30 | MR_SYSCALL, /* also applies to cpusets */ |
| 31 | MR_MEMPOLICY_MBIND, |
Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 32 | MR_NUMA_MISPLACED, |
Mel Gorman | 7b2a2d4 | 2012-10-19 14:07:31 +0100 | [diff] [blame] | 33 | MR_CMA |
| 34 | }; |
Rafael Aquini | 78bd520 | 2012-12-11 16:02:31 -0800 | [diff] [blame] | 35 | |
Christoph Lameter | 906e0be | 2007-05-06 14:50:20 -0700 | [diff] [blame] | 36 | #ifdef CONFIG_MIGRATION |
KOSAKI Motohiro | 64cdd54 | 2009-01-06 14:39:16 -0800 | [diff] [blame] | 37 | |
Minchan Kim | e13861d | 2010-05-24 14:31:59 -0700 | [diff] [blame] | 38 | extern void putback_lru_pages(struct list_head *l); |
Rafael Aquini | 5733c7d | 2012-12-11 16:02:47 -0800 | [diff] [blame] | 39 | extern void putback_movable_pages(struct list_head *l); |
Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 40 | extern int migrate_page(struct address_space *, |
Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 41 | struct page *, struct page *, enum migrate_mode); |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 42 | extern int migrate_pages(struct list_head *l, new_page_t x, |
Hugh Dickins | 9c620e2 | 2013-02-22 16:35:14 -0800 | [diff] [blame] | 43 | unsigned long private, enum migrate_mode mode, int reason); |
Aneesh Kumar K.V | 189ebff | 2012-07-31 16:42:06 -0700 | [diff] [blame] | 44 | extern int migrate_huge_page(struct page *, new_page_t x, |
Hugh Dickins | 9c620e2 | 2013-02-22 16:35:14 -0800 | [diff] [blame] | 45 | unsigned long private, enum migrate_mode mode); |
Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 46 | |
Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 47 | extern int fail_migrate_page(struct address_space *, |
| 48 | struct page *, struct page *); |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 49 | |
| 50 | extern int migrate_prep(void); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 51 | extern int migrate_prep_local(void); |
Christoph Lameter | 7b2259b | 2006-06-25 05:46:48 -0700 | [diff] [blame] | 52 | extern int migrate_vmas(struct mm_struct *mm, |
| 53 | const nodemask_t *from, const nodemask_t *to, |
| 54 | unsigned long flags); |
Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 55 | extern void migrate_page_copy(struct page *newpage, struct page *page); |
| 56 | extern int migrate_huge_page_move_mapping(struct address_space *mapping, |
| 57 | struct page *newpage, struct page *page); |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 58 | #else |
KOSAKI Motohiro | 64cdd54 | 2009-01-06 14:39:16 -0800 | [diff] [blame] | 59 | |
Minchan Kim | e13861d | 2010-05-24 14:31:59 -0700 | [diff] [blame] | 60 | static inline void putback_lru_pages(struct list_head *l) {} |
Rafael Aquini | 5733c7d | 2012-12-11 16:02:47 -0800 | [diff] [blame] | 61 | static inline void putback_movable_pages(struct list_head *l) {} |
Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 62 | static inline int migrate_pages(struct list_head *l, new_page_t x, |
Hugh Dickins | 9c620e2 | 2013-02-22 16:35:14 -0800 | [diff] [blame] | 63 | unsigned long private, enum migrate_mode mode, int reason) |
| 64 | { return -ENOSYS; } |
Aneesh Kumar K.V | 189ebff | 2012-07-31 16:42:06 -0700 | [diff] [blame] | 65 | static inline int migrate_huge_page(struct page *page, new_page_t x, |
Hugh Dickins | 9c620e2 | 2013-02-22 16:35:14 -0800 | [diff] [blame] | 66 | unsigned long private, enum migrate_mode mode) |
| 67 | { return -ENOSYS; } |
Christoph Lameter | 9bf9e89 | 2006-03-31 02:29:56 -0800 | [diff] [blame] | 68 | |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 69 | static inline int migrate_prep(void) { return -ENOSYS; } |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 70 | static inline int migrate_prep_local(void) { return -ENOSYS; } |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 71 | |
Christoph Lameter | 7b2259b | 2006-06-25 05:46:48 -0700 | [diff] [blame] | 72 | static inline int migrate_vmas(struct mm_struct *mm, |
| 73 | const nodemask_t *from, const nodemask_t *to, |
| 74 | unsigned long flags) |
| 75 | { |
| 76 | return -ENOSYS; |
| 77 | } |
| 78 | |
Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 79 | static inline void migrate_page_copy(struct page *newpage, |
| 80 | struct page *page) {} |
| 81 | |
Naoya Horiguchi | 6f39ce0 | 2010-09-30 11:54:51 +0900 | [diff] [blame] | 82 | static inline int migrate_huge_page_move_mapping(struct address_space *mapping, |
Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 83 | struct page *newpage, struct page *page) |
| 84 | { |
| 85 | return -ENOSYS; |
| 86 | } |
| 87 | |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 88 | /* Possible settings for the migrate_page() method in address_operations */ |
| 89 | #define migrate_page NULL |
| 90 | #define fail_migrate_page NULL |
| 91 | |
| 92 | #endif /* CONFIG_MIGRATION */ |
Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 93 | |
| 94 | #ifdef CONFIG_NUMA_BALANCING |
| 95 | extern int migrate_misplaced_page(struct page *page, int node); |
Mel Gorman | e14808b | 2012-11-19 10:59:15 +0000 | [diff] [blame] | 96 | extern int migrate_misplaced_page(struct page *page, int node); |
| 97 | extern bool migrate_ratelimited(int node); |
Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 98 | #else |
| 99 | static inline int migrate_misplaced_page(struct page *page, int node) |
| 100 | { |
| 101 | return -EAGAIN; /* can't migrate now */ |
| 102 | } |
Mel Gorman | e14808b | 2012-11-19 10:59:15 +0000 | [diff] [blame] | 103 | static inline bool migrate_ratelimited(int node) |
| 104 | { |
| 105 | return false; |
| 106 | } |
Mel Gorman | 220018d | 2012-12-05 09:32:56 +0000 | [diff] [blame] | 107 | #endif /* CONFIG_NUMA_BALANCING */ |
Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 108 | |
Mel Gorman | 220018d | 2012-12-05 09:32:56 +0000 | [diff] [blame] | 109 | #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE) |
| 110 | extern int migrate_misplaced_transhuge_page(struct mm_struct *mm, |
| 111 | struct vm_area_struct *vma, |
| 112 | pmd_t *pmd, pmd_t entry, |
| 113 | unsigned long address, |
| 114 | struct page *page, int node); |
| 115 | #else |
Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 116 | static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm, |
| 117 | struct vm_area_struct *vma, |
| 118 | pmd_t *pmd, pmd_t entry, |
| 119 | unsigned long address, |
| 120 | struct page *page, int node) |
| 121 | { |
| 122 | return -EAGAIN; |
| 123 | } |
Mel Gorman | 220018d | 2012-12-05 09:32:56 +0000 | [diff] [blame] | 124 | #endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/ |
Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 125 | |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 126 | #endif /* _LINUX_MIGRATE_H */ |