blob: f2b4abbca55e75f82c55dd6b8f7771d54738a23b [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Christoph Lameterb20a3502006-03-22 00:09:12 -08002#ifndef _LINUX_MIGRATE_H
3#define _LINUX_MIGRATE_H
4
Christoph Lameterb20a3502006-03-22 00:09:12 -08005#include <linux/mm.h>
Christoph Lameter906e0be2007-05-06 14:50:20 -07006#include <linux/mempolicy.h>
Andrew Morton6536e312012-01-20 14:33:53 -08007#include <linux/migrate_mode.h>
Michal Hocko8b913232017-07-10 15:48:47 -07008#include <linux/hugetlb.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -08009
Michal Hocko666feb22018-04-10 16:30:03 -070010typedef struct page *new_page_t(struct page *page, unsigned long private);
David Rientjes68711a72014-06-04 16:08:25 -070011typedef void free_page_t(struct page *page, unsigned long private);
Christoph Lameter95a402c2006-06-23 02:03:53 -070012
Rafael Aquini78bd5202012-12-11 16:02:31 -080013/*
14 * Return values from addresss_space_operations.migratepage():
15 * - negative errno on page migration failure;
16 * - zero on page migration success;
17 */
18#define MIGRATEPAGE_SUCCESS 0
Konstantin Khlebnikovd6d86c02014-10-09 15:29:27 -070019
Mel Gorman7b2a2d42012-10-19 14:07:31 +010020enum migrate_reason {
21 MR_COMPACTION,
22 MR_MEMORY_FAILURE,
23 MR_MEMORY_HOTPLUG,
24 MR_SYSCALL, /* also applies to cpusets */
25 MR_MEMPOLICY_MBIND,
Peter Zijlstra7039e1d2012-10-25 14:16:34 +020026 MR_NUMA_MISPLACED,
Anshuman Khandual31025352018-04-05 16:22:08 -070027 MR_CONTIG_RANGE,
Vlastimil Babka7cd12b42016-03-15 14:56:18 -070028 MR_TYPES
Mel Gorman7b2a2d42012-10-19 14:07:31 +010029};
Rafael Aquini78bd5202012-12-11 16:02:31 -080030
Vlastimil Babka7cd12b42016-03-15 14:56:18 -070031/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
32extern char *migrate_reason_names[MR_TYPES];
33
Michal Hocko8b913232017-07-10 15:48:47 -070034static inline struct page *new_page_nodemask(struct page *page,
35 int preferred_nid, nodemask_t *nodemask)
36{
Michal Hocko0f556852017-07-12 14:36:58 -070037 gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
Naoya Horiguchi8135d892017-09-08 16:11:15 -070038 unsigned int order = 0;
39 struct page *new_page = NULL;
Michal Hocko8b913232017-07-10 15:48:47 -070040
41 if (PageHuge(page))
42 return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
Michal Hocko3e59fcb2017-07-10 15:49:11 -070043 preferred_nid, nodemask);
Michal Hocko8b913232017-07-10 15:48:47 -070044
Michal Hocko94723aa2018-04-10 16:30:07 -070045 if (PageTransHuge(page)) {
Naoya Horiguchi8135d892017-09-08 16:11:15 -070046 gfp_mask |= GFP_TRANSHUGE;
Michal Hocko94723aa2018-04-10 16:30:07 -070047 order = HPAGE_PMD_ORDER;
Naoya Horiguchi8135d892017-09-08 16:11:15 -070048 }
49
Michal Hocko8b913232017-07-10 15:48:47 -070050 if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
51 gfp_mask |= __GFP_HIGHMEM;
52
Naoya Horiguchi8135d892017-09-08 16:11:15 -070053 new_page = __alloc_pages_nodemask(gfp_mask, order,
54 preferred_nid, nodemask);
55
Zi Yan40a899e2017-11-29 16:11:12 -080056 if (new_page && PageTransHuge(new_page))
Naoya Horiguchi8135d892017-09-08 16:11:15 -070057 prep_transhuge_page(new_page);
58
59 return new_page;
Michal Hocko8b913232017-07-10 15:48:47 -070060}
61
Christoph Lameter906e0be2007-05-06 14:50:20 -070062#ifdef CONFIG_MIGRATION
KOSAKI Motohiro64cdd542009-01-06 14:39:16 -080063
Rafael Aquini5733c7d2012-12-11 16:02:47 -080064extern void putback_movable_pages(struct list_head *l);
Pushkar Jambhlekar9927e382017-05-03 14:54:45 -070065extern int migrate_page(struct address_space *mapping,
66 struct page *newpage, struct page *page,
67 enum migrate_mode mode);
David Rientjes68711a72014-06-04 16:08:25 -070068extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
Hugh Dickins9c620e22013-02-22 16:35:14 -080069 unsigned long private, enum migrate_mode mode, int reason);
Yisheng Xie9e5bcd62017-02-24 14:57:29 -080070extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
Minchan Kimbda807d2016-07-26 15:23:05 -070071extern void putback_movable_page(struct page *page);
Christoph Lameter95a402c2006-06-23 02:03:53 -070072
Christoph Lameterb20a3502006-03-22 00:09:12 -080073extern int migrate_prep(void);
Mel Gorman748446b2010-05-24 14:32:27 -070074extern int migrate_prep_local(void);
Jérôme Glisse2916ecc2017-09-08 16:12:06 -070075extern void migrate_page_states(struct page *newpage, struct page *page);
Naoya Horiguchi290408d2010-09-08 10:19:35 +090076extern void migrate_page_copy(struct page *newpage, struct page *page);
77extern int migrate_huge_page_move_mapping(struct address_space *mapping,
78 struct page *newpage, struct page *page);
Gu Zheng36bc08c2013-07-16 17:56:16 +080079extern int migrate_page_move_mapping(struct address_space *mapping,
80 struct page *newpage, struct page *page,
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -050081 struct buffer_head *head, enum migrate_mode mode,
82 int extra_count);
Christoph Lameterb20a3502006-03-22 00:09:12 -080083#else
KOSAKI Motohiro64cdd542009-01-06 14:39:16 -080084
Rafael Aquini5733c7d2012-12-11 16:02:47 -080085static inline void putback_movable_pages(struct list_head *l) {}
David Rientjes68711a72014-06-04 16:08:25 -070086static inline int migrate_pages(struct list_head *l, new_page_t new,
87 free_page_t free, unsigned long private, enum migrate_mode mode,
88 int reason)
Hugh Dickins9c620e22013-02-22 16:35:14 -080089 { return -ENOSYS; }
Yisheng Xiecbae0172017-02-24 14:57:32 -080090static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
91 { return -EBUSY; }
Christoph Lameter9bf9e892006-03-31 02:29:56 -080092
Christoph Lameterb20a3502006-03-22 00:09:12 -080093static inline int migrate_prep(void) { return -ENOSYS; }
Mel Gorman748446b2010-05-24 14:32:27 -070094static inline int migrate_prep_local(void) { return -ENOSYS; }
Christoph Lameterb20a3502006-03-22 00:09:12 -080095
Jérôme Glisse2916ecc2017-09-08 16:12:06 -070096static inline void migrate_page_states(struct page *newpage, struct page *page)
97{
98}
99
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900100static inline void migrate_page_copy(struct page *newpage,
101 struct page *page) {}
102
Naoya Horiguchi6f39ce02010-09-30 11:54:51 +0900103static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900104 struct page *newpage, struct page *page)
105{
106 return -ENOSYS;
107}
108
Christoph Lameterb20a3502006-03-22 00:09:12 -0800109#endif /* CONFIG_MIGRATION */
Peter Zijlstra7039e1d2012-10-25 14:16:34 +0200110
Minchan Kimdd4123f2016-07-26 15:26:50 -0700111#ifdef CONFIG_COMPACTION
112extern int PageMovable(struct page *page);
113extern void __SetPageMovable(struct page *page, struct address_space *mapping);
114extern void __ClearPageMovable(struct page *page);
115#else
116static inline int PageMovable(struct page *page) { return 0; };
117static inline void __SetPageMovable(struct page *page,
118 struct address_space *mapping)
119{
120}
121static inline void __ClearPageMovable(struct page *page)
122{
123}
124#endif
125
Peter Zijlstra7039e1d2012-10-25 14:16:34 +0200126#ifdef CONFIG_NUMA_BALANCING
Mel Gormande466bd2013-12-18 17:08:42 -0800127extern bool pmd_trans_migrating(pmd_t pmd);
Mel Gorman1bc115d2013-10-07 11:29:05 +0100128extern int migrate_misplaced_page(struct page *page,
129 struct vm_area_struct *vma, int node);
Peter Zijlstra7039e1d2012-10-25 14:16:34 +0200130#else
Mel Gormande466bd2013-12-18 17:08:42 -0800131static inline bool pmd_trans_migrating(pmd_t pmd)
132{
133 return false;
134}
Mel Gorman1bc115d2013-10-07 11:29:05 +0100135static inline int migrate_misplaced_page(struct page *page,
136 struct vm_area_struct *vma, int node)
Peter Zijlstra7039e1d2012-10-25 14:16:34 +0200137{
138 return -EAGAIN; /* can't migrate now */
139}
Mel Gorman220018d2012-12-05 09:32:56 +0000140#endif /* CONFIG_NUMA_BALANCING */
Mel Gormanb32967f2012-11-19 12:35:47 +0000141
Mel Gorman220018d2012-12-05 09:32:56 +0000142#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
143extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
144 struct vm_area_struct *vma,
145 pmd_t *pmd, pmd_t entry,
146 unsigned long address,
147 struct page *page, int node);
148#else
Mel Gormanb32967f2012-11-19 12:35:47 +0000149static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
150 struct vm_area_struct *vma,
151 pmd_t *pmd, pmd_t entry,
152 unsigned long address,
153 struct page *page, int node)
154{
155 return -EAGAIN;
156}
Mel Gorman220018d2012-12-05 09:32:56 +0000157#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
Peter Zijlstra7039e1d2012-10-25 14:16:34 +0200158
Jérôme Glisse8763cb42017-09-08 16:12:09 -0700159
160#ifdef CONFIG_MIGRATION
161
Jérôme Glissea5430dd2017-09-08 16:12:17 -0700162/*
163 * Watch out for PAE architecture, which has an unsigned long, and might not
164 * have enough bits to store all physical address and flags. So far we have
165 * enough room for all our flags.
166 */
Jérôme Glisse8763cb42017-09-08 16:12:09 -0700167#define MIGRATE_PFN_VALID (1UL << 0)
168#define MIGRATE_PFN_MIGRATE (1UL << 1)
169#define MIGRATE_PFN_LOCKED (1UL << 2)
170#define MIGRATE_PFN_WRITE (1UL << 3)
Jérôme Glissea5430dd2017-09-08 16:12:17 -0700171#define MIGRATE_PFN_DEVICE (1UL << 4)
172#define MIGRATE_PFN_ERROR (1UL << 5)
173#define MIGRATE_PFN_SHIFT 6
Jérôme Glisse8763cb42017-09-08 16:12:09 -0700174
175static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
176{
177 if (!(mpfn & MIGRATE_PFN_VALID))
178 return NULL;
179 return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT);
180}
181
182static inline unsigned long migrate_pfn(unsigned long pfn)
183{
184 return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
185}
186
187/*
188 * struct migrate_vma_ops - migrate operation callback
189 *
190 * @alloc_and_copy: alloc destination memory and copy source memory to it
191 * @finalize_and_map: allow caller to map the successfully migrated pages
192 *
193 *
194 * The alloc_and_copy() callback happens once all source pages have been locked,
195 * unmapped and checked (checked whether pinned or not). All pages that can be
196 * migrated will have an entry in the src array set with the pfn value of the
197 * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set (other
198 * flags might be set but should be ignored by the callback).
199 *
200 * The alloc_and_copy() callback can then allocate destination memory and copy
201 * source memory to it for all those entries (ie with MIGRATE_PFN_VALID and
202 * MIGRATE_PFN_MIGRATE flag set). Once these are allocated and copied, the
203 * callback must update each corresponding entry in the dst array with the pfn
204 * value of the destination page and with the MIGRATE_PFN_VALID and
205 * MIGRATE_PFN_LOCKED flags set (destination pages must have their struct pages
206 * locked, via lock_page()).
207 *
208 * At this point the alloc_and_copy() callback is done and returns.
209 *
210 * Note that the callback does not have to migrate all the pages that are
211 * marked with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration
212 * from device memory to system memory (ie the MIGRATE_PFN_DEVICE flag is also
213 * set in the src array entry). If the device driver cannot migrate a device
214 * page back to system memory, then it must set the corresponding dst array
215 * entry to MIGRATE_PFN_ERROR. This will trigger a SIGBUS if CPU tries to
216 * access any of the virtual addresses originally backed by this page. Because
217 * a SIGBUS is such a severe result for the userspace process, the device
218 * driver should avoid setting MIGRATE_PFN_ERROR unless it is really in an
219 * unrecoverable state.
220 *
Jérôme Glisse8315ada2017-09-08 16:12:21 -0700221 * For empty entry inside CPU page table (pte_none() or pmd_none() is true) we
222 * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus
223 * allowing device driver to allocate device memory for those unback virtual
224 * address. For this the device driver simply have to allocate device memory
225 * and properly set the destination entry like for regular migration. Note that
226 * this can still fails and thus inside the device driver must check if the
227 * migration was successful for those entry inside the finalize_and_map()
228 * callback just like for regular migration.
229 *
Jérôme Glisse8763cb42017-09-08 16:12:09 -0700230 * THE alloc_and_copy() CALLBACK MUST NOT CHANGE ANY OF THE SRC ARRAY ENTRIES
231 * OR BAD THINGS WILL HAPPEN !
232 *
233 *
234 * The finalize_and_map() callback happens after struct page migration from
235 * source to destination (destination struct pages are the struct pages for the
236 * memory allocated by the alloc_and_copy() callback). Migration can fail, and
237 * thus the finalize_and_map() allows the driver to inspect which pages were
238 * successfully migrated, and which were not. Successfully migrated pages will
239 * have the MIGRATE_PFN_MIGRATE flag set for their src array entry.
240 *
241 * It is safe to update device page table from within the finalize_and_map()
242 * callback because both destination and source page are still locked, and the
243 * mmap_sem is held in read mode (hence no one can unmap the range being
244 * migrated).
245 *
246 * Once callback is done cleaning up things and updating its page table (if it
247 * chose to do so, this is not an obligation) then it returns. At this point,
248 * the HMM core will finish up the final steps, and the migration is complete.
249 *
250 * THE finalize_and_map() CALLBACK MUST NOT CHANGE ANY OF THE SRC OR DST ARRAY
251 * ENTRIES OR BAD THINGS WILL HAPPEN !
252 */
253struct migrate_vma_ops {
254 void (*alloc_and_copy)(struct vm_area_struct *vma,
255 const unsigned long *src,
256 unsigned long *dst,
257 unsigned long start,
258 unsigned long end,
259 void *private);
260 void (*finalize_and_map)(struct vm_area_struct *vma,
261 const unsigned long *src,
262 const unsigned long *dst,
263 unsigned long start,
264 unsigned long end,
265 void *private);
266};
267
Jérôme Glisse6b368cd2017-09-08 16:12:32 -0700268#if defined(CONFIG_MIGRATE_VMA_HELPER)
Jérôme Glisse8763cb42017-09-08 16:12:09 -0700269int migrate_vma(const struct migrate_vma_ops *ops,
270 struct vm_area_struct *vma,
271 unsigned long start,
272 unsigned long end,
273 unsigned long *src,
274 unsigned long *dst,
275 void *private);
Jérôme Glisse6b368cd2017-09-08 16:12:32 -0700276#else
277static inline int migrate_vma(const struct migrate_vma_ops *ops,
278 struct vm_area_struct *vma,
279 unsigned long start,
280 unsigned long end,
281 unsigned long *src,
282 unsigned long *dst,
283 void *private)
284{
285 return -EINVAL;
286}
287#endif /* IS_ENABLED(CONFIG_MIGRATE_VMA_HELPER) */
Jérôme Glisse8763cb42017-09-08 16:12:09 -0700288
289#endif /* CONFIG_MIGRATION */
290
Christoph Lameterb20a3502006-03-22 00:09:12 -0800291#endif /* _LINUX_MIGRATE_H */