blob: a2901c41466433072aa68ee269ae54ab4adc0f2d [file] [log] [blame]
Christoph Lameterb20a3502006-03-22 00:09:12 -08001#ifndef _LINUX_MIGRATE_H
2#define _LINUX_MIGRATE_H
3
Christoph Lameterb20a3502006-03-22 00:09:12 -08004#include <linux/mm.h>
Christoph Lameter906e0be2007-05-06 14:50:20 -07005#include <linux/mempolicy.h>
Andrew Morton6536e312012-01-20 14:33:53 -08006#include <linux/migrate_mode.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -08007
David Rientjes68711a72014-06-04 16:08:25 -07008typedef struct page *new_page_t(struct page *page, unsigned long private,
9 int **reason);
10typedef void free_page_t(struct page *page, unsigned long private);
Christoph Lameter95a402c2006-06-23 02:03:53 -070011
Rafael Aquini78bd5202012-12-11 16:02:31 -080012/*
13 * Return values from addresss_space_operations.migratepage():
14 * - negative errno on page migration failure;
15 * - zero on page migration success;
Rafael Aquini18468d92012-12-11 16:02:38 -080016 *
17 * The balloon page migration introduces this special case where a 'distinct'
18 * return code is used to flag a successful page migration to unmap_and_move().
19 * This approach is necessary because page migration can race against balloon
20 * deflation procedure, and for such case we could introduce a nasty page leak
21 * if a successfully migrated balloon page gets released concurrently with
22 * migration's unmap_and_move() wrap-up steps.
Rafael Aquini78bd5202012-12-11 16:02:31 -080023 */
24#define MIGRATEPAGE_SUCCESS 0
Rafael Aquini18468d92012-12-11 16:02:38 -080025#define MIGRATEPAGE_BALLOON_SUCCESS 1 /* special ret code for balloon page
26 * sucessful migration case.
27 */
Mel Gorman7b2a2d42012-10-19 14:07:31 +010028enum migrate_reason {
29 MR_COMPACTION,
30 MR_MEMORY_FAILURE,
31 MR_MEMORY_HOTPLUG,
32 MR_SYSCALL, /* also applies to cpusets */
33 MR_MEMPOLICY_MBIND,
Peter Zijlstra7039e1d2012-10-25 14:16:34 +020034 MR_NUMA_MISPLACED,
Mel Gorman7b2a2d42012-10-19 14:07:31 +010035 MR_CMA
36};
Rafael Aquini78bd5202012-12-11 16:02:31 -080037
Christoph Lameter906e0be2007-05-06 14:50:20 -070038#ifdef CONFIG_MIGRATION
KOSAKI Motohiro64cdd542009-01-06 14:39:16 -080039
Rafael Aquini5733c7d2012-12-11 16:02:47 -080040extern void putback_movable_pages(struct list_head *l);
Christoph Lameter2d1db3b2006-06-23 02:03:33 -070041extern int migrate_page(struct address_space *,
Mel Gormana6bc32b2012-01-12 17:19:43 -080042 struct page *, struct page *, enum migrate_mode);
David Rientjes68711a72014-06-04 16:08:25 -070043extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
Hugh Dickins9c620e22013-02-22 16:35:14 -080044 unsigned long private, enum migrate_mode mode, int reason);
Christoph Lameter95a402c2006-06-23 02:03:53 -070045
Christoph Lameterb20a3502006-03-22 00:09:12 -080046extern int migrate_prep(void);
Mel Gorman748446b2010-05-24 14:32:27 -070047extern int migrate_prep_local(void);
Christoph Lameter7b2259b2006-06-25 05:46:48 -070048extern int migrate_vmas(struct mm_struct *mm,
49 const nodemask_t *from, const nodemask_t *to,
50 unsigned long flags);
Naoya Horiguchi290408d2010-09-08 10:19:35 +090051extern void migrate_page_copy(struct page *newpage, struct page *page);
52extern int migrate_huge_page_move_mapping(struct address_space *mapping,
53 struct page *newpage, struct page *page);
Gu Zheng36bc08c2013-07-16 17:56:16 +080054extern int migrate_page_move_mapping(struct address_space *mapping,
55 struct page *newpage, struct page *page,
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -050056 struct buffer_head *head, enum migrate_mode mode,
57 int extra_count);
Christoph Lameterb20a3502006-03-22 00:09:12 -080058#else
KOSAKI Motohiro64cdd542009-01-06 14:39:16 -080059
Rafael Aquini5733c7d2012-12-11 16:02:47 -080060static inline void putback_movable_pages(struct list_head *l) {}
David Rientjes68711a72014-06-04 16:08:25 -070061static inline int migrate_pages(struct list_head *l, new_page_t new,
62 free_page_t free, unsigned long private, enum migrate_mode mode,
63 int reason)
Hugh Dickins9c620e22013-02-22 16:35:14 -080064 { return -ENOSYS; }
Christoph Lameter9bf9e892006-03-31 02:29:56 -080065
Christoph Lameterb20a3502006-03-22 00:09:12 -080066static inline int migrate_prep(void) { return -ENOSYS; }
Mel Gorman748446b2010-05-24 14:32:27 -070067static inline int migrate_prep_local(void) { return -ENOSYS; }
Christoph Lameterb20a3502006-03-22 00:09:12 -080068
Christoph Lameter7b2259b2006-06-25 05:46:48 -070069static inline int migrate_vmas(struct mm_struct *mm,
70 const nodemask_t *from, const nodemask_t *to,
71 unsigned long flags)
72{
73 return -ENOSYS;
74}
75
Naoya Horiguchi290408d2010-09-08 10:19:35 +090076static inline void migrate_page_copy(struct page *newpage,
77 struct page *page) {}
78
Naoya Horiguchi6f39ce02010-09-30 11:54:51 +090079static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
Naoya Horiguchi290408d2010-09-08 10:19:35 +090080 struct page *newpage, struct page *page)
81{
82 return -ENOSYS;
83}
84
Christoph Lameterb20a3502006-03-22 00:09:12 -080085/* Possible settings for the migrate_page() method in address_operations */
86#define migrate_page NULL
Christoph Lameterb20a3502006-03-22 00:09:12 -080087
88#endif /* CONFIG_MIGRATION */
Peter Zijlstra7039e1d2012-10-25 14:16:34 +020089
90#ifdef CONFIG_NUMA_BALANCING
Mel Gormande466bd2013-12-18 17:08:42 -080091extern bool pmd_trans_migrating(pmd_t pmd);
92extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd);
Mel Gorman1bc115d2013-10-07 11:29:05 +010093extern int migrate_misplaced_page(struct page *page,
94 struct vm_area_struct *vma, int node);
Mel Gormane14808b2012-11-19 10:59:15 +000095extern bool migrate_ratelimited(int node);
Peter Zijlstra7039e1d2012-10-25 14:16:34 +020096#else
Mel Gormande466bd2013-12-18 17:08:42 -080097static inline bool pmd_trans_migrating(pmd_t pmd)
98{
99 return false;
100}
101static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
102{
103}
Mel Gorman1bc115d2013-10-07 11:29:05 +0100104static inline int migrate_misplaced_page(struct page *page,
105 struct vm_area_struct *vma, int node)
Peter Zijlstra7039e1d2012-10-25 14:16:34 +0200106{
107 return -EAGAIN; /* can't migrate now */
108}
Mel Gormane14808b2012-11-19 10:59:15 +0000109static inline bool migrate_ratelimited(int node)
110{
111 return false;
112}
Mel Gorman220018d2012-12-05 09:32:56 +0000113#endif /* CONFIG_NUMA_BALANCING */
Mel Gormanb32967f2012-11-19 12:35:47 +0000114
Mel Gorman220018d2012-12-05 09:32:56 +0000115#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
116extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
117 struct vm_area_struct *vma,
118 pmd_t *pmd, pmd_t entry,
119 unsigned long address,
120 struct page *page, int node);
121#else
Mel Gormanb32967f2012-11-19 12:35:47 +0000122static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
123 struct vm_area_struct *vma,
124 pmd_t *pmd, pmd_t entry,
125 unsigned long address,
126 struct page *page, int node)
127{
128 return -EAGAIN;
129}
Mel Gorman220018d2012-12-05 09:32:56 +0000130#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
Peter Zijlstra7039e1d2012-10-25 14:16:34 +0200131
Christoph Lameterb20a3502006-03-22 00:09:12 -0800132#endif /* _LINUX_MIGRATE_H */