Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 1 | #ifndef _LINUX_MIGRATE_H |
| 2 | #define _LINUX_MIGRATE_H |
| 3 | |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 4 | #include <linux/mm.h> |
Christoph Lameter | 906e0be | 2007-05-06 14:50:20 -0700 | [diff] [blame] | 5 | #include <linux/mempolicy.h> |
Andrew Morton | 6536e31 | 2012-01-20 14:33:53 -0800 | [diff] [blame] | 6 | #include <linux/migrate_mode.h> |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 7 | |
Christoph Lameter | 742755a | 2006-06-23 02:03:55 -0700 | [diff] [blame] | 8 | typedef struct page *new_page_t(struct page *, unsigned long private, int **); |
Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 9 | |
Rafael Aquini | 78bd520 | 2012-12-11 16:02:31 -0800 | [diff] [blame] | 10 | /* |
| 11 | * Return values from addresss_space_operations.migratepage(): |
| 12 | * - negative errno on page migration failure; |
| 13 | * - zero on page migration success; |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 14 | * |
| 15 | * The balloon page migration introduces this special case where a 'distinct' |
| 16 | * return code is used to flag a successful page migration to unmap_and_move(). |
| 17 | * This approach is necessary because page migration can race against balloon |
| 18 | * deflation procedure, and for such case we could introduce a nasty page leak |
| 19 | * if a successfully migrated balloon page gets released concurrently with |
| 20 | * migration's unmap_and_move() wrap-up steps. |
Rafael Aquini | 78bd520 | 2012-12-11 16:02:31 -0800 | [diff] [blame] | 21 | */ |
| 22 | #define MIGRATEPAGE_SUCCESS 0 |
Rafael Aquini | 18468d9 | 2012-12-11 16:02:38 -0800 | [diff] [blame] | 23 | #define MIGRATEPAGE_BALLOON_SUCCESS 1 /* special ret code for balloon page |
| 24 | * sucessful migration case. |
| 25 | */ |
Mel Gorman | 7b2a2d4 | 2012-10-19 14:07:31 +0100 | [diff] [blame] | 26 | enum migrate_reason { |
| 27 | MR_COMPACTION, |
| 28 | MR_MEMORY_FAILURE, |
| 29 | MR_MEMORY_HOTPLUG, |
| 30 | MR_SYSCALL, /* also applies to cpusets */ |
| 31 | MR_MEMPOLICY_MBIND, |
Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 32 | MR_NUMA_MISPLACED, |
Mel Gorman | 7b2a2d4 | 2012-10-19 14:07:31 +0100 | [diff] [blame] | 33 | MR_CMA |
| 34 | }; |
Rafael Aquini | 78bd520 | 2012-12-11 16:02:31 -0800 | [diff] [blame] | 35 | |
Christoph Lameter | 906e0be | 2007-05-06 14:50:20 -0700 | [diff] [blame] | 36 | #ifdef CONFIG_MIGRATION |
KOSAKI Motohiro | 64cdd54 | 2009-01-06 14:39:16 -0800 | [diff] [blame] | 37 | |
Minchan Kim | e13861d | 2010-05-24 14:31:59 -0700 | [diff] [blame] | 38 | extern void putback_lru_pages(struct list_head *l); |
Rafael Aquini | 5733c7d | 2012-12-11 16:02:47 -0800 | [diff] [blame] | 39 | extern void putback_movable_pages(struct list_head *l); |
Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 40 | extern int migrate_page(struct address_space *, |
Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 41 | struct page *, struct page *, enum migrate_mode); |
Hugh Dickins | 62b61f6 | 2009-12-14 17:59:33 -0800 | [diff] [blame] | 42 | extern int migrate_pages(struct list_head *l, new_page_t x, |
Mel Gorman | 7f0f249 | 2011-01-13 15:45:58 -0800 | [diff] [blame] | 43 | unsigned long private, bool offlining, |
Mel Gorman | 7b2a2d4 | 2012-10-19 14:07:31 +0100 | [diff] [blame] | 44 | enum migrate_mode mode, int reason); |
Aneesh Kumar K.V | 189ebff | 2012-07-31 16:42:06 -0700 | [diff] [blame] | 45 | extern int migrate_huge_page(struct page *, new_page_t x, |
Mel Gorman | 7f0f249 | 2011-01-13 15:45:58 -0800 | [diff] [blame] | 46 | unsigned long private, bool offlining, |
Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 47 | enum migrate_mode mode); |
Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 48 | |
Christoph Lameter | 2d1db3b | 2006-06-23 02:03:33 -0700 | [diff] [blame] | 49 | extern int fail_migrate_page(struct address_space *, |
| 50 | struct page *, struct page *); |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 51 | |
| 52 | extern int migrate_prep(void); |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 53 | extern int migrate_prep_local(void); |
Christoph Lameter | 7b2259b | 2006-06-25 05:46:48 -0700 | [diff] [blame] | 54 | extern int migrate_vmas(struct mm_struct *mm, |
| 55 | const nodemask_t *from, const nodemask_t *to, |
| 56 | unsigned long flags); |
Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 57 | extern void migrate_page_copy(struct page *newpage, struct page *page); |
| 58 | extern int migrate_huge_page_move_mapping(struct address_space *mapping, |
| 59 | struct page *newpage, struct page *page); |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 60 | #else |
KOSAKI Motohiro | 64cdd54 | 2009-01-06 14:39:16 -0800 | [diff] [blame] | 61 | |
Minchan Kim | e13861d | 2010-05-24 14:31:59 -0700 | [diff] [blame] | 62 | static inline void putback_lru_pages(struct list_head *l) {} |
Rafael Aquini | 5733c7d | 2012-12-11 16:02:47 -0800 | [diff] [blame] | 63 | static inline void putback_movable_pages(struct list_head *l) {} |
Christoph Lameter | 95a402c | 2006-06-23 02:03:53 -0700 | [diff] [blame] | 64 | static inline int migrate_pages(struct list_head *l, new_page_t x, |
Mel Gorman | 7f0f249 | 2011-01-13 15:45:58 -0800 | [diff] [blame] | 65 | unsigned long private, bool offlining, |
Mel Gorman | 7b2a2d4 | 2012-10-19 14:07:31 +0100 | [diff] [blame] | 66 | enum migrate_mode mode, int reason) { return -ENOSYS; } |
Aneesh Kumar K.V | 189ebff | 2012-07-31 16:42:06 -0700 | [diff] [blame] | 67 | static inline int migrate_huge_page(struct page *page, new_page_t x, |
Mel Gorman | 7f0f249 | 2011-01-13 15:45:58 -0800 | [diff] [blame] | 68 | unsigned long private, bool offlining, |
Mel Gorman | a6bc32b | 2012-01-12 17:19:43 -0800 | [diff] [blame] | 69 | enum migrate_mode mode) { return -ENOSYS; } |
Christoph Lameter | 9bf9e89 | 2006-03-31 02:29:56 -0800 | [diff] [blame] | 70 | |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 71 | static inline int migrate_prep(void) { return -ENOSYS; } |
Mel Gorman | 748446b | 2010-05-24 14:32:27 -0700 | [diff] [blame] | 72 | static inline int migrate_prep_local(void) { return -ENOSYS; } |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 73 | |
Christoph Lameter | 7b2259b | 2006-06-25 05:46:48 -0700 | [diff] [blame] | 74 | static inline int migrate_vmas(struct mm_struct *mm, |
| 75 | const nodemask_t *from, const nodemask_t *to, |
| 76 | unsigned long flags) |
| 77 | { |
| 78 | return -ENOSYS; |
| 79 | } |
| 80 | |
Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 81 | static inline void migrate_page_copy(struct page *newpage, |
| 82 | struct page *page) {} |
| 83 | |
Naoya Horiguchi | 6f39ce0 | 2010-09-30 11:54:51 +0900 | [diff] [blame] | 84 | static inline int migrate_huge_page_move_mapping(struct address_space *mapping, |
Naoya Horiguchi | 290408d | 2010-09-08 10:19:35 +0900 | [diff] [blame] | 85 | struct page *newpage, struct page *page) |
| 86 | { |
| 87 | return -ENOSYS; |
| 88 | } |
| 89 | |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 90 | /* Possible settings for the migrate_page() method in address_operations */ |
| 91 | #define migrate_page NULL |
| 92 | #define fail_migrate_page NULL |
| 93 | |
| 94 | #endif /* CONFIG_MIGRATION */ |
Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 95 | |
| 96 | #ifdef CONFIG_NUMA_BALANCING |
| 97 | extern int migrate_misplaced_page(struct page *page, int node); |
Mel Gorman | e14808b | 2012-11-19 10:59:15 +0000 | [diff] [blame] | 98 | extern int migrate_misplaced_page(struct page *page, int node); |
| 99 | extern bool migrate_ratelimited(int node); |
Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 100 | #else |
| 101 | static inline int migrate_misplaced_page(struct page *page, int node) |
| 102 | { |
| 103 | return -EAGAIN; /* can't migrate now */ |
| 104 | } |
Mel Gorman | e14808b | 2012-11-19 10:59:15 +0000 | [diff] [blame] | 105 | static inline bool migrate_ratelimited(int node) |
| 106 | { |
| 107 | return false; |
| 108 | } |
Mel Gorman | 220018d | 2012-12-05 09:32:56 +0000 | [diff] [blame] | 109 | #endif /* CONFIG_NUMA_BALANCING */ |
Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 110 | |
Mel Gorman | 220018d | 2012-12-05 09:32:56 +0000 | [diff] [blame] | 111 | #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE) |
| 112 | extern int migrate_misplaced_transhuge_page(struct mm_struct *mm, |
| 113 | struct vm_area_struct *vma, |
| 114 | pmd_t *pmd, pmd_t entry, |
| 115 | unsigned long address, |
| 116 | struct page *page, int node); |
| 117 | #else |
Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 118 | static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm, |
| 119 | struct vm_area_struct *vma, |
| 120 | pmd_t *pmd, pmd_t entry, |
| 121 | unsigned long address, |
| 122 | struct page *page, int node) |
| 123 | { |
| 124 | return -EAGAIN; |
| 125 | } |
Mel Gorman | 220018d | 2012-12-05 09:32:56 +0000 | [diff] [blame] | 126 | #endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/ |
Peter Zijlstra | 7039e1d | 2012-10-25 14:16:34 +0200 | [diff] [blame] | 127 | |
Christoph Lameter | b20a350 | 2006-03-22 00:09:12 -0800 | [diff] [blame] | 128 | #endif /* _LINUX_MIGRATE_H */ |