Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_RMAP_H |
| 2 | #define _LINUX_RMAP_H |
| 3 | /* |
| 4 | * Declarations for Reverse Mapping functions in mm/rmap.c |
| 5 | */ |
| 6 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/list.h> |
| 8 | #include <linux/slab.h> |
| 9 | #include <linux/mm.h> |
Peter Zijlstra | 2b575eb | 2011-05-24 17:12:11 -0700 | [diff] [blame] | 10 | #include <linux/mutex.h> |
Balbir Singh | bed7161 | 2008-02-07 00:14:01 -0800 | [diff] [blame] | 11 | #include <linux/memcontrol.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | |
| 13 | /* |
| 14 | * The anon_vma heads a list of private "related" vmas, to scan if |
| 15 | * an anonymous page pointing to this anon_vma needs to be unmapped: |
| 16 | * the vmas on the list will be related by forking, or by splitting. |
| 17 | * |
| 18 | * Since vmas come and go as they are split and merged (particularly |
| 19 | * in mprotect), the mapping field of an anonymous page cannot point |
| 20 | * directly to a vma: instead it points to an anon_vma, on whose list |
| 21 | * the related vmas can be easily linked or unlinked. |
| 22 | * |
| 23 | * After unlinking the last vma on the list, we must garbage collect |
| 24 | * the anon_vma object itself: we're guaranteed no page can be |
| 25 | * pointing to this anon_vma once its vma list is empty. |
| 26 | */ |
| 27 | struct anon_vma { |
Rik van Riel | 5c341ee1 | 2010-08-09 17:18:39 -0700 | [diff] [blame] | 28 | struct anon_vma *root; /* Root of this anon_vma tree */ |
Peter Zijlstra | 2b575eb | 2011-05-24 17:12:11 -0700 | [diff] [blame] | 29 | struct mutex mutex; /* Serialize access to vma list */ |
Mel Gorman | 7f60c21 | 2010-05-24 14:32:18 -0700 | [diff] [blame] | 30 | /* |
Peter Zijlstra | 8381326 | 2011-03-22 16:32:48 -0700 | [diff] [blame] | 31 | * The refcount is taken on an anon_vma when there is no |
Mel Gorman | 7f60c21 | 2010-05-24 14:32:18 -0700 | [diff] [blame] | 32 | * guarantee that the vma of page tables will exist for |
| 33 | * the duration of the operation. A caller that takes |
| 34 | * the reference is responsible for clearing up the |
| 35 | * anon_vma if they are the last user on release |
| 36 | */ |
Peter Zijlstra | 8381326 | 2011-03-22 16:32:48 -0700 | [diff] [blame] | 37 | atomic_t refcount; |
| 38 | |
Andrea Arcangeli | 7906d00 | 2008-07-28 15:46:26 -0700 | [diff] [blame] | 39 | /* |
| 40 | * NOTE: the LSB of the head.next is set by |
| 41 | * mm_take_all_locks() _after_ taking the above lock. So the |
| 42 | * head must only be read/written after taking the above lock |
| 43 | * to be sure to see a valid next pointer. The LSB bit itself |
| 44 | * is serialized by a system wide lock only visible to |
| 45 | * mm_take_all_locks() (mm_all_locks_mutex). |
| 46 | */ |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 47 | struct list_head head; /* Chain of private "related" vmas */ |
| 48 | }; |
| 49 | |
| 50 | /* |
| 51 | * The copy-on-write semantics of fork mean that an anon_vma |
| 52 | * can become associated with multiple processes. Furthermore, |
| 53 | * each child process will have its own anon_vma, where new |
| 54 | * pages for that process are instantiated. |
| 55 | * |
| 56 | * This structure allows us to find the anon_vmas associated |
| 57 | * with a VMA, or the VMAs associated with an anon_vma. |
| 58 | * The "same_vma" list contains the anon_vma_chains linking |
| 59 | * all the anon_vmas associated with this VMA. |
| 60 | * The "same_anon_vma" list contains the anon_vma_chains |
| 61 | * which link all the VMAs associated with this anon_vma. |
| 62 | */ |
| 63 | struct anon_vma_chain { |
| 64 | struct vm_area_struct *vma; |
| 65 | struct anon_vma *anon_vma; |
| 66 | struct list_head same_vma; /* locked by mmap_sem & page_table_lock */ |
Peter Zijlstra | 2b575eb | 2011-05-24 17:12:11 -0700 | [diff] [blame] | 67 | struct list_head same_anon_vma; /* locked by anon_vma->mutex */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | }; |
| 69 | |
| 70 | #ifdef CONFIG_MMU |
Rik van Riel | 7654506 | 2010-08-09 17:18:41 -0700 | [diff] [blame] | 71 | static inline void get_anon_vma(struct anon_vma *anon_vma) |
| 72 | { |
Peter Zijlstra | 8381326 | 2011-03-22 16:32:48 -0700 | [diff] [blame] | 73 | atomic_inc(&anon_vma->refcount); |
Rik van Riel | 7654506 | 2010-08-09 17:18:41 -0700 | [diff] [blame] | 74 | } |
| 75 | |
Peter Zijlstra | 01d8b20 | 2011-03-22 16:32:49 -0700 | [diff] [blame] | 76 | void __put_anon_vma(struct anon_vma *anon_vma); |
| 77 | |
| 78 | static inline void put_anon_vma(struct anon_vma *anon_vma) |
| 79 | { |
| 80 | if (atomic_dec_and_test(&anon_vma->refcount)) |
| 81 | __put_anon_vma(anon_vma); |
| 82 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | |
Hugh Dickins | 3ca7b3c | 2009-12-14 17:58:57 -0800 | [diff] [blame] | 84 | static inline struct anon_vma *page_anon_vma(struct page *page) |
| 85 | { |
| 86 | if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != |
| 87 | PAGE_MAPPING_ANON) |
| 88 | return NULL; |
| 89 | return page_rmapping(page); |
| 90 | } |
| 91 | |
Rik van Riel | bb4a340 | 2010-08-09 17:18:37 -0700 | [diff] [blame] | 92 | static inline void vma_lock_anon_vma(struct vm_area_struct *vma) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | { |
| 94 | struct anon_vma *anon_vma = vma->anon_vma; |
| 95 | if (anon_vma) |
Peter Zijlstra | 2b575eb | 2011-05-24 17:12:11 -0700 | [diff] [blame] | 96 | mutex_lock(&anon_vma->root->mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | } |
| 98 | |
Rik van Riel | bb4a340 | 2010-08-09 17:18:37 -0700 | [diff] [blame] | 99 | static inline void vma_unlock_anon_vma(struct vm_area_struct *vma) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | { |
| 101 | struct anon_vma *anon_vma = vma->anon_vma; |
| 102 | if (anon_vma) |
Peter Zijlstra | 2b575eb | 2011-05-24 17:12:11 -0700 | [diff] [blame] | 103 | mutex_unlock(&anon_vma->root->mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | } |
| 105 | |
Rik van Riel | cba48b9 | 2010-08-09 17:18:38 -0700 | [diff] [blame] | 106 | static inline void anon_vma_lock(struct anon_vma *anon_vma) |
| 107 | { |
Peter Zijlstra | 2b575eb | 2011-05-24 17:12:11 -0700 | [diff] [blame] | 108 | mutex_lock(&anon_vma->root->mutex); |
Rik van Riel | cba48b9 | 2010-08-09 17:18:38 -0700 | [diff] [blame] | 109 | } |
| 110 | |
| 111 | static inline void anon_vma_unlock(struct anon_vma *anon_vma) |
| 112 | { |
Peter Zijlstra | 2b575eb | 2011-05-24 17:12:11 -0700 | [diff] [blame] | 113 | mutex_unlock(&anon_vma->root->mutex); |
Rik van Riel | cba48b9 | 2010-08-09 17:18:38 -0700 | [diff] [blame] | 114 | } |
| 115 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | /* |
| 117 | * anon_vma helper functions. |
| 118 | */ |
| 119 | void anon_vma_init(void); /* create anon_vma_cachep */ |
| 120 | int anon_vma_prepare(struct vm_area_struct *); |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 121 | void unlink_anon_vmas(struct vm_area_struct *); |
| 122 | int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); |
Andrea Arcangeli | 948f017 | 2012-01-10 15:08:05 -0800 | [diff] [blame] | 123 | void anon_vma_moveto_tail(struct vm_area_struct *); |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 124 | int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 126 | static inline void anon_vma_merge(struct vm_area_struct *vma, |
| 127 | struct vm_area_struct *next) |
| 128 | { |
| 129 | VM_BUG_ON(vma->anon_vma != next->anon_vma); |
| 130 | unlink_anon_vmas(next); |
| 131 | } |
| 132 | |
Peter Zijlstra | 01d8b20 | 2011-03-22 16:32:49 -0700 | [diff] [blame] | 133 | struct anon_vma *page_get_anon_vma(struct page *page); |
| 134 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | /* |
| 136 | * rmap interfaces called when adding or removing pte of page |
| 137 | */ |
Rik van Riel | c44b674 | 2010-03-05 13:42:09 -0800 | [diff] [blame] | 138 | void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); |
Rik van Riel | ad8c2ee | 2010-08-09 17:19:48 -0700 | [diff] [blame] | 140 | void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, |
| 141 | unsigned long, int); |
Nick Piggin | 9617d95 | 2006-01-06 00:11:12 -0800 | [diff] [blame] | 142 | void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | void page_add_file_rmap(struct page *); |
Hugh Dickins | edc315f | 2009-01-06 14:40:11 -0800 | [diff] [blame] | 144 | void page_remove_rmap(struct page *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | |
Naoya Horiguchi | 0fe6e20 | 2010-05-28 09:29:16 +0900 | [diff] [blame] | 146 | void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, |
| 147 | unsigned long); |
| 148 | void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *, |
| 149 | unsigned long); |
| 150 | |
Hugh Dickins | 21333b2 | 2009-09-21 17:01:59 -0700 | [diff] [blame] | 151 | static inline void page_dup_rmap(struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | { |
| 153 | atomic_inc(&page->_mapcount); |
| 154 | } |
| 155 | |
| 156 | /* |
| 157 | * Called from mm/vmscan.c to handle paging out |
| 158 | */ |
Wu Fengguang | 6fe6b7e | 2009-06-16 15:33:05 -0700 | [diff] [blame] | 159 | int page_referenced(struct page *, int is_locked, |
Johannes Weiner | 72835c8 | 2012-01-12 17:18:32 -0800 | [diff] [blame] | 160 | struct mem_cgroup *memcg, unsigned long *vm_flags); |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 161 | int page_referenced_one(struct page *, struct vm_area_struct *, |
| 162 | unsigned long address, unsigned int *mapcount, unsigned long *vm_flags); |
| 163 | |
Andi Kleen | 14fa31b | 2009-09-16 11:50:10 +0200 | [diff] [blame] | 164 | enum ttu_flags { |
| 165 | TTU_UNMAP = 0, /* unmap mode */ |
| 166 | TTU_MIGRATION = 1, /* migration mode */ |
| 167 | TTU_MUNLOCK = 2, /* munlock mode */ |
| 168 | TTU_ACTION_MASK = 0xff, |
| 169 | |
| 170 | TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ |
| 171 | TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ |
Andi Kleen | 888b9f7 | 2009-09-16 11:50:11 +0200 | [diff] [blame] | 172 | TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */ |
Andi Kleen | 14fa31b | 2009-09-16 11:50:10 +0200 | [diff] [blame] | 173 | }; |
| 174 | #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) |
| 175 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 176 | bool is_vma_temporary_stack(struct vm_area_struct *vma); |
| 177 | |
Andi Kleen | 14fa31b | 2009-09-16 11:50:10 +0200 | [diff] [blame] | 178 | int try_to_unmap(struct page *, enum ttu_flags flags); |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 179 | int try_to_unmap_one(struct page *, struct vm_area_struct *, |
| 180 | unsigned long address, enum ttu_flags flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | |
| 182 | /* |
Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 183 | * Called from mm/filemap_xip.c to unmap empty zero page |
| 184 | */ |
Namhyung Kim | e9a81a8 | 2010-10-26 14:22:01 -0700 | [diff] [blame] | 185 | pte_t *__page_check_address(struct page *, struct mm_struct *, |
Nick Piggin | 479db0b | 2008-08-20 14:09:18 -0700 | [diff] [blame] | 186 | unsigned long, spinlock_t **, int); |
Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 187 | |
Namhyung Kim | e9a81a8 | 2010-10-26 14:22:01 -0700 | [diff] [blame] | 188 | static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm, |
| 189 | unsigned long address, |
| 190 | spinlock_t **ptlp, int sync) |
| 191 | { |
| 192 | pte_t *ptep; |
| 193 | |
| 194 | __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address, |
| 195 | ptlp, sync)); |
| 196 | return ptep; |
| 197 | } |
| 198 | |
Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 199 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | * Used by swapoff to help locate where page is expected in vma. |
| 201 | */ |
| 202 | unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); |
| 203 | |
Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 204 | /* |
| 205 | * Cleans the PTEs of shared mappings. |
| 206 | * (and since clean PTEs should also be readonly, write protects them too) |
| 207 | * |
| 208 | * returns the number of cleaned PTEs. |
| 209 | */ |
| 210 | int page_mkclean(struct page *); |
| 211 | |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 212 | /* |
| 213 | * called in munlock()/munmap() path to check for other vmas holding |
| 214 | * the page mlocked. |
| 215 | */ |
| 216 | int try_to_munlock(struct page *); |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 217 | |
Andi Kleen | 10be22d | 2009-09-16 11:50:04 +0200 | [diff] [blame] | 218 | /* |
| 219 | * Called by memory-failure.c to kill processes. |
| 220 | */ |
Peter Zijlstra | 25aeeb0 | 2011-05-24 17:12:07 -0700 | [diff] [blame] | 221 | struct anon_vma *page_lock_anon_vma(struct page *page); |
Andi Kleen | 10be22d | 2009-09-16 11:50:04 +0200 | [diff] [blame] | 222 | void page_unlock_anon_vma(struct anon_vma *anon_vma); |
Andi Kleen | 6a46079 | 2009-09-16 11:50:15 +0200 | [diff] [blame] | 223 | int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); |
Andi Kleen | 10be22d | 2009-09-16 11:50:04 +0200 | [diff] [blame] | 224 | |
Hugh Dickins | e9995ef | 2009-12-14 17:59:31 -0800 | [diff] [blame] | 225 | /* |
| 226 | * Called by migrate.c to remove migration ptes, but might be used more later. |
| 227 | */ |
| 228 | int rmap_walk(struct page *page, int (*rmap_one)(struct page *, |
| 229 | struct vm_area_struct *, unsigned long, void *), void *arg); |
| 230 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | #else /* !CONFIG_MMU */ |
| 232 | |
| 233 | #define anon_vma_init() do {} while (0) |
| 234 | #define anon_vma_prepare(vma) (0) |
| 235 | #define anon_vma_link(vma) do {} while (0) |
| 236 | |
Mike Frysinger | 01ff53f | 2009-06-23 12:37:01 -0700 | [diff] [blame] | 237 | static inline int page_referenced(struct page *page, int is_locked, |
Johannes Weiner | 72835c8 | 2012-01-12 17:18:32 -0800 | [diff] [blame] | 238 | struct mem_cgroup *memcg, |
Mike Frysinger | 01ff53f | 2009-06-23 12:37:01 -0700 | [diff] [blame] | 239 | unsigned long *vm_flags) |
| 240 | { |
| 241 | *vm_flags = 0; |
Johannes Weiner | 64574746 | 2010-03-05 13:42:22 -0800 | [diff] [blame] | 242 | return 0; |
Mike Frysinger | 01ff53f | 2009-06-23 12:37:01 -0700 | [diff] [blame] | 243 | } |
| 244 | |
Christoph Lameter | a48d07a | 2006-02-01 03:05:38 -0800 | [diff] [blame] | 245 | #define try_to_unmap(page, refs) SWAP_FAIL |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | |
Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 247 | static inline int page_mkclean(struct page *page) |
| 248 | { |
| 249 | return 0; |
| 250 | } |
| 251 | |
| 252 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | #endif /* CONFIG_MMU */ |
| 254 | |
| 255 | /* |
| 256 | * Return values of try_to_unmap |
| 257 | */ |
| 258 | #define SWAP_SUCCESS 0 |
| 259 | #define SWAP_AGAIN 1 |
| 260 | #define SWAP_FAIL 2 |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 261 | #define SWAP_MLOCK 3 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 | |
| 263 | #endif /* _LINUX_RMAP_H */ |