Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_RMAP_H |
| 2 | #define _LINUX_RMAP_H |
| 3 | /* |
| 4 | * Declarations for Reverse Mapping functions in mm/rmap.c |
| 5 | */ |
| 6 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/list.h> |
| 8 | #include <linux/slab.h> |
| 9 | #include <linux/mm.h> |
| 10 | #include <linux/spinlock.h> |
Balbir Singh | bed7161 | 2008-02-07 00:14:01 -0800 | [diff] [blame] | 11 | #include <linux/memcontrol.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | |
| 13 | /* |
| 14 | * The anon_vma heads a list of private "related" vmas, to scan if |
| 15 | * an anonymous page pointing to this anon_vma needs to be unmapped: |
| 16 | * the vmas on the list will be related by forking, or by splitting. |
| 17 | * |
| 18 | * Since vmas come and go as they are split and merged (particularly |
| 19 | * in mprotect), the mapping field of an anonymous page cannot point |
| 20 | * directly to a vma: instead it points to an anon_vma, on whose list |
| 21 | * the related vmas can be easily linked or unlinked. |
| 22 | * |
| 23 | * After unlinking the last vma on the list, we must garbage collect |
| 24 | * the anon_vma object itself: we're guaranteed no page can be |
| 25 | * pointing to this anon_vma once its vma list is empty. |
| 26 | */ |
| 27 | struct anon_vma { |
| 28 | spinlock_t lock; /* Serialize access to vma list */ |
Andrea Arcangeli | 7906d00 | 2008-07-28 15:46:26 -0700 | [diff] [blame] | 29 | /* |
| 30 | * NOTE: the LSB of the head.next is set by |
| 31 | * mm_take_all_locks() _after_ taking the above lock. So the |
| 32 | * head must only be read/written after taking the above lock |
| 33 | * to be sure to see a valid next pointer. The LSB bit itself |
| 34 | * is serialized by a system wide lock only visible to |
| 35 | * mm_take_all_locks() (mm_all_locks_mutex). |
| 36 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | struct list_head head; /* List of private "related" vmas */ |
| 38 | }; |
| 39 | |
| 40 | #ifdef CONFIG_MMU |
| 41 | |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 42 | extern struct kmem_cache *anon_vma_cachep; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | |
| 44 | static inline struct anon_vma *anon_vma_alloc(void) |
| 45 | { |
Christoph Lameter | e94b176 | 2006-12-06 20:33:17 -0800 | [diff] [blame] | 46 | return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | } |
| 48 | |
| 49 | static inline void anon_vma_free(struct anon_vma *anon_vma) |
| 50 | { |
| 51 | kmem_cache_free(anon_vma_cachep, anon_vma); |
| 52 | } |
| 53 | |
| 54 | static inline void anon_vma_lock(struct vm_area_struct *vma) |
| 55 | { |
| 56 | struct anon_vma *anon_vma = vma->anon_vma; |
| 57 | if (anon_vma) |
| 58 | spin_lock(&anon_vma->lock); |
| 59 | } |
| 60 | |
| 61 | static inline void anon_vma_unlock(struct vm_area_struct *vma) |
| 62 | { |
| 63 | struct anon_vma *anon_vma = vma->anon_vma; |
| 64 | if (anon_vma) |
| 65 | spin_unlock(&anon_vma->lock); |
| 66 | } |
| 67 | |
| 68 | /* |
| 69 | * anon_vma helper functions. |
| 70 | */ |
| 71 | void anon_vma_init(void); /* create anon_vma_cachep */ |
| 72 | int anon_vma_prepare(struct vm_area_struct *); |
| 73 | void __anon_vma_merge(struct vm_area_struct *, struct vm_area_struct *); |
| 74 | void anon_vma_unlink(struct vm_area_struct *); |
| 75 | void anon_vma_link(struct vm_area_struct *); |
| 76 | void __anon_vma_link(struct vm_area_struct *); |
| 77 | |
| 78 | /* |
| 79 | * rmap interfaces called when adding or removing pte of page |
| 80 | */ |
| 81 | void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); |
Nick Piggin | 9617d95 | 2006-01-06 00:11:12 -0800 | [diff] [blame] | 82 | void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | void page_add_file_rmap(struct page *); |
Nick Piggin | 7de6b80 | 2006-12-22 01:09:33 -0800 | [diff] [blame] | 84 | void page_remove_rmap(struct page *, struct vm_area_struct *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | |
Nick Piggin | c97a9e1 | 2007-05-16 22:11:21 -0700 | [diff] [blame] | 86 | #ifdef CONFIG_DEBUG_VM |
| 87 | void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address); |
| 88 | #else |
| 89 | static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | { |
| 91 | atomic_inc(&page->_mapcount); |
| 92 | } |
Nick Piggin | c97a9e1 | 2007-05-16 22:11:21 -0700 | [diff] [blame] | 93 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | |
| 95 | /* |
| 96 | * Called from mm/vmscan.c to handle paging out |
| 97 | */ |
Balbir Singh | bed7161 | 2008-02-07 00:14:01 -0800 | [diff] [blame] | 98 | int page_referenced(struct page *, int is_locked, struct mem_cgroup *cnt); |
Christoph Lameter | a48d07a | 2006-02-01 03:05:38 -0800 | [diff] [blame] | 99 | int try_to_unmap(struct page *, int ignore_refs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | |
| 101 | /* |
Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 102 | * Called from mm/filemap_xip.c to unmap empty zero page |
| 103 | */ |
Hugh Dickins | c071880 | 2005-10-29 18:16:31 -0700 | [diff] [blame] | 104 | pte_t *page_check_address(struct page *, struct mm_struct *, |
Nick Piggin | 479db0b | 2008-08-20 14:09:18 -0700 | [diff] [blame^] | 105 | unsigned long, spinlock_t **, int); |
Carsten Otte | ceffc07 | 2005-06-23 22:05:25 -0700 | [diff] [blame] | 106 | |
| 107 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | * Used by swapoff to help locate where page is expected in vma. |
| 109 | */ |
| 110 | unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); |
| 111 | |
Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 112 | /* |
| 113 | * Cleans the PTEs of shared mappings. |
| 114 | * (and since clean PTEs should also be readonly, write protects them too) |
| 115 | * |
| 116 | * returns the number of cleaned PTEs. |
| 117 | */ |
| 118 | int page_mkclean(struct page *); |
| 119 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | #else /* !CONFIG_MMU */ |
| 121 | |
| 122 | #define anon_vma_init() do {} while (0) |
| 123 | #define anon_vma_prepare(vma) (0) |
| 124 | #define anon_vma_link(vma) do {} while (0) |
| 125 | |
Balbir Singh | bed7161 | 2008-02-07 00:14:01 -0800 | [diff] [blame] | 126 | #define page_referenced(page,l,cnt) TestClearPageReferenced(page) |
Christoph Lameter | a48d07a | 2006-02-01 03:05:38 -0800 | [diff] [blame] | 127 | #define try_to_unmap(page, refs) SWAP_FAIL |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | |
Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 129 | static inline int page_mkclean(struct page *page) |
| 130 | { |
| 131 | return 0; |
| 132 | } |
| 133 | |
| 134 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | #endif /* CONFIG_MMU */ |
| 136 | |
| 137 | /* |
| 138 | * Return values of try_to_unmap |
| 139 | */ |
| 140 | #define SWAP_SUCCESS 0 |
| 141 | #define SWAP_AGAIN 1 |
| 142 | #define SWAP_FAIL 2 |
| 143 | |
| 144 | #endif /* _LINUX_RMAP_H */ |