blob: e10a90a93b5d25a53fd12b5e623e40ffbc285bf3 [file] [log] [blame]
Christoph Lameterb20a3502006-03-22 00:09:12 -08001#ifndef _LINUX_MIGRATE_H
2#define _LINUX_MIGRATE_H
3
Christoph Lameterb20a3502006-03-22 00:09:12 -08004#include <linux/mm.h>
Christoph Lameter906e0be2007-05-06 14:50:20 -07005#include <linux/mempolicy.h>
6#include <linux/pagemap.h>
Christoph Lameterb20a3502006-03-22 00:09:12 -08007
Christoph Lameter742755a2006-06-23 02:03:55 -07008typedef struct page *new_page_t(struct page *, unsigned long private, int **);
Christoph Lameter95a402c2006-06-23 02:03:53 -07009
Christoph Lameter906e0be2007-05-06 14:50:20 -070010#ifdef CONFIG_MIGRATION
Christoph Lameter0dc952d2007-03-05 00:30:33 -080011/* Check if a vma is migratable */
12static inline int vma_migratable(struct vm_area_struct *vma)
13{
14 if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED))
15 return 0;
Christoph Lameter906e0be2007-05-06 14:50:20 -070016 /*
17 * Migration allocates pages in the highest zone. If we cannot
18 * do so then migration (at least from node to node) is not
19 * possible.
20 */
21 if (vma->vm_file &&
22 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
23 < policy_zone)
24 return 0;
Christoph Lameter0dc952d2007-03-05 00:30:33 -080025 return 1;
26}
27
Christoph Lameterb20a3502006-03-22 00:09:12 -080028extern int isolate_lru_page(struct page *p, struct list_head *pagelist);
29extern int putback_lru_pages(struct list_head *l);
Christoph Lameter2d1db3b2006-06-23 02:03:33 -070030extern int migrate_page(struct address_space *,
31 struct page *, struct page *);
Christoph Lameter95a402c2006-06-23 02:03:53 -070032extern int migrate_pages(struct list_head *l, new_page_t x, unsigned long);
33
Christoph Lameter2d1db3b2006-06-23 02:03:33 -070034extern int fail_migrate_page(struct address_space *,
35 struct page *, struct page *);
Christoph Lameterb20a3502006-03-22 00:09:12 -080036
37extern int migrate_prep(void);
Christoph Lameter7b2259b2006-06-25 05:46:48 -070038extern int migrate_vmas(struct mm_struct *mm,
39 const nodemask_t *from, const nodemask_t *to,
40 unsigned long flags);
Christoph Lameterb20a3502006-03-22 00:09:12 -080041#else
Christoph Lameter906e0be2007-05-06 14:50:20 -070042static inline int vma_migratable(struct vm_area_struct *vma)
43 { return 0; }
Christoph Lameterb20a3502006-03-22 00:09:12 -080044
45static inline int isolate_lru_page(struct page *p, struct list_head *list)
46 { return -ENOSYS; }
47static inline int putback_lru_pages(struct list_head *l) { return 0; }
Christoph Lameter95a402c2006-06-23 02:03:53 -070048static inline int migrate_pages(struct list_head *l, new_page_t x,
49 unsigned long private) { return -ENOSYS; }
Christoph Lameterb20a3502006-03-22 00:09:12 -080050
Christoph Lameter9bf9e892006-03-31 02:29:56 -080051static inline int migrate_pages_to(struct list_head *pagelist,
52 struct vm_area_struct *vma, int dest) { return 0; }
53
Christoph Lameterb20a3502006-03-22 00:09:12 -080054static inline int migrate_prep(void) { return -ENOSYS; }
55
Christoph Lameter7b2259b2006-06-25 05:46:48 -070056static inline int migrate_vmas(struct mm_struct *mm,
57 const nodemask_t *from, const nodemask_t *to,
58 unsigned long flags)
59{
60 return -ENOSYS;
61}
62
Christoph Lameterb20a3502006-03-22 00:09:12 -080063/* Possible settings for the migrate_page() method in address_operations */
64#define migrate_page NULL
65#define fail_migrate_page NULL
66
67#endif /* CONFIG_MIGRATION */
68#endif /* _LINUX_MIGRATE_H */