blob: 477841d29fce238a2888a7c0af0c7d05bc795524 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_RMAP_H
2#define _LINUX_RMAP_H
3/*
4 * Declarations for Reverse Mapping functions in mm/rmap.c
5 */
6
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/list.h>
8#include <linux/slab.h>
9#include <linux/mm.h>
10#include <linux/spinlock.h>
Balbir Singhbed71612008-02-07 00:14:01 -080011#include <linux/memcontrol.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
13/*
14 * The anon_vma heads a list of private "related" vmas, to scan if
15 * an anonymous page pointing to this anon_vma needs to be unmapped:
16 * the vmas on the list will be related by forking, or by splitting.
17 *
18 * Since vmas come and go as they are split and merged (particularly
19 * in mprotect), the mapping field of an anonymous page cannot point
20 * directly to a vma: instead it points to an anon_vma, on whose list
21 * the related vmas can be easily linked or unlinked.
22 *
23 * After unlinking the last vma on the list, we must garbage collect
24 * the anon_vma object itself: we're guaranteed no page can be
25 * pointing to this anon_vma once its vma list is empty.
26 */
27struct anon_vma {
28 spinlock_t lock; /* Serialize access to vma list */
Andrea Arcangeli7906d002008-07-28 15:46:26 -070029 /*
30 * NOTE: the LSB of the head.next is set by
31 * mm_take_all_locks() _after_ taking the above lock. So the
32 * head must only be read/written after taking the above lock
33 * to be sure to see a valid next pointer. The LSB bit itself
34 * is serialized by a system wide lock only visible to
35 * mm_take_all_locks() (mm_all_locks_mutex).
36 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 struct list_head head; /* List of private "related" vmas */
38};
39
40#ifdef CONFIG_MMU
41
Linus Torvalds1da177e2005-04-16 15:20:36 -070042static inline void anon_vma_lock(struct vm_area_struct *vma)
43{
44 struct anon_vma *anon_vma = vma->anon_vma;
45 if (anon_vma)
46 spin_lock(&anon_vma->lock);
47}
48
49static inline void anon_vma_unlock(struct vm_area_struct *vma)
50{
51 struct anon_vma *anon_vma = vma->anon_vma;
52 if (anon_vma)
53 spin_unlock(&anon_vma->lock);
54}
55
56/*
57 * anon_vma helper functions.
58 */
59void anon_vma_init(void); /* create anon_vma_cachep */
60int anon_vma_prepare(struct vm_area_struct *);
61void __anon_vma_merge(struct vm_area_struct *, struct vm_area_struct *);
62void anon_vma_unlink(struct vm_area_struct *);
63void anon_vma_link(struct vm_area_struct *);
64void __anon_vma_link(struct vm_area_struct *);
65
66/*
67 * rmap interfaces called when adding or removing pte of page
68 */
69void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
Nick Piggin9617d952006-01-06 00:11:12 -080070void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071void page_add_file_rmap(struct page *);
Hugh Dickinsedc315f2009-01-06 14:40:11 -080072void page_remove_rmap(struct page *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073
Hugh Dickins21333b22009-09-21 17:01:59 -070074static inline void page_dup_rmap(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -070075{
76 atomic_inc(&page->_mapcount);
77}
78
79/*
80 * Called from mm/vmscan.c to handle paging out
81 */
Wu Fengguang6fe6b7e2009-06-16 15:33:05 -070082int page_referenced(struct page *, int is_locked,
83 struct mem_cgroup *cnt, unsigned long *vm_flags);
Christoph Lametera48d07a2006-02-01 03:05:38 -080084int try_to_unmap(struct page *, int ignore_refs);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
86/*
Carsten Otteceffc072005-06-23 22:05:25 -070087 * Called from mm/filemap_xip.c to unmap empty zero page
88 */
Hugh Dickinsc0718802005-10-29 18:16:31 -070089pte_t *page_check_address(struct page *, struct mm_struct *,
Nick Piggin479db0b2008-08-20 14:09:18 -070090 unsigned long, spinlock_t **, int);
Carsten Otteceffc072005-06-23 22:05:25 -070091
92/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 * Used by swapoff to help locate where page is expected in vma.
94 */
95unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
96
Peter Zijlstrad08b3852006-09-25 23:30:57 -070097/*
98 * Cleans the PTEs of shared mappings.
99 * (and since clean PTEs should also be readonly, write protects them too)
100 *
101 * returns the number of cleaned PTEs.
102 */
103int page_mkclean(struct page *);
104
Nick Pigginb291f002008-10-18 20:26:44 -0700105/*
106 * called in munlock()/munmap() path to check for other vmas holding
107 * the page mlocked.
108 */
109int try_to_munlock(struct page *);
Nick Pigginb291f002008-10-18 20:26:44 -0700110
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#else /* !CONFIG_MMU */
112
113#define anon_vma_init() do {} while (0)
114#define anon_vma_prepare(vma) (0)
115#define anon_vma_link(vma) do {} while (0)
116
Mike Frysinger01ff53f2009-06-23 12:37:01 -0700117static inline int page_referenced(struct page *page, int is_locked,
118 struct mem_cgroup *cnt,
119 unsigned long *vm_flags)
120{
121 *vm_flags = 0;
122 return TestClearPageReferenced(page);
123}
124
Christoph Lametera48d07a2006-02-01 03:05:38 -0800125#define try_to_unmap(page, refs) SWAP_FAIL
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
Peter Zijlstrad08b3852006-09-25 23:30:57 -0700127static inline int page_mkclean(struct page *page)
128{
129 return 0;
130}
131
132
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133#endif /* CONFIG_MMU */
134
135/*
136 * Return values of try_to_unmap
137 */
138#define SWAP_SUCCESS 0
139#define SWAP_AGAIN 1
140#define SWAP_FAIL 2
Nick Pigginb291f002008-10-18 20:26:44 -0700141#define SWAP_MLOCK 3
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
143#endif /* _LINUX_RMAP_H */