blob: 0eef8cb0baf72be865e20f311c0121ec8200be88 [file] [log] [blame]
Hugh Dickinsf8af4da2009-09-21 17:01:57 -07001#ifndef __LINUX_KSM_H
2#define __LINUX_KSM_H
3/*
4 * Memory merging support.
5 *
6 * This code enables dynamic sharing of identical pages found in different
7 * memory areas, even if they are not shared by fork().
8 */
9
10#include <linux/bitops.h>
11#include <linux/mm.h>
Hugh Dickins5ad64682009-12-14 17:59:24 -080012#include <linux/pagemap.h>
13#include <linux/rmap.h>
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070014#include <linux/sched.h>
15
Hugh Dickins08beca42009-12-14 17:59:21 -080016struct stable_node;
Hugh Dickins5ad64682009-12-14 17:59:24 -080017struct mem_cgroup;
Hugh Dickins08beca42009-12-14 17:59:21 -080018
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070019#ifdef CONFIG_KSM
20int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
21 unsigned long end, int advice, unsigned long *vm_flags);
22int __ksm_enter(struct mm_struct *mm);
Andrea Arcangeli1c2fb7a2009-09-21 17:02:22 -070023void __ksm_exit(struct mm_struct *mm);
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070024
25static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
26{
27 if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
28 return __ksm_enter(mm);
29 return 0;
30}
31
Andrea Arcangeli1c2fb7a2009-09-21 17:02:22 -070032static inline void ksm_exit(struct mm_struct *mm)
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070033{
34 if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
Andrea Arcangeli1c2fb7a2009-09-21 17:02:22 -070035 __ksm_exit(mm);
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070036}
Hugh Dickins9a840892009-09-21 17:02:01 -070037
38/*
39 * A KSM page is one of those write-protected "shared pages" or "merged pages"
40 * which KSM maps into multiple mms, wherever identical anonymous page content
Hugh Dickins08beca42009-12-14 17:59:21 -080041 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
42 * anon_vma, but to that page's node of the stable tree.
Hugh Dickins9a840892009-09-21 17:02:01 -070043 */
44static inline int PageKsm(struct page *page)
45{
Hugh Dickins3ca7b3c2009-12-14 17:58:57 -080046 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
47 (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
Hugh Dickins9a840892009-09-21 17:02:01 -070048}
49
Hugh Dickins08beca42009-12-14 17:59:21 -080050static inline struct stable_node *page_stable_node(struct page *page)
51{
52 return PageKsm(page) ? page_rmapping(page) : NULL;
53}
54
55static inline void set_page_stable_node(struct page *page,
56 struct stable_node *stable_node)
57{
58 page->mapping = (void *)stable_node +
59 (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
60}
61
Hugh Dickins5ad64682009-12-14 17:59:24 -080062/*
63 * When do_swap_page() first faults in from swap what used to be a KSM page,
64 * no problem, it will be assigned to this vma's anon_vma; but thereafter,
65 * it might be faulted into a different anon_vma (or perhaps to a different
66 * offset in the same anon_vma). do_swap_page() cannot do all the locking
67 * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
68 * a copy, and leave remerging the pages to a later pass of ksmd.
69 *
70 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
71 * but what if the vma was unmerged while the page was swapped out?
72 */
Hugh Dickinscbf86cf2013-02-22 16:35:08 -080073struct page *ksm_might_need_to_copy(struct page *page,
74 struct vm_area_struct *vma, unsigned long address);
Hugh Dickins5ad64682009-12-14 17:59:24 -080075
76int page_referenced_ksm(struct page *page,
77 struct mem_cgroup *memcg, unsigned long *vm_flags);
78int try_to_unmap_ksm(struct page *page, enum ttu_flags flags);
Joonsoo Kim051ac832014-01-21 15:49:48 -080079int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
Hugh Dickinse9995ef2009-12-14 17:59:31 -080080void ksm_migrate_page(struct page *newpage, struct page *oldpage);
Hugh Dickins5ad64682009-12-14 17:59:24 -080081
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070082#else /* !CONFIG_KSM */
83
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070084static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
85{
86 return 0;
87}
88
Andrea Arcangeli1c2fb7a2009-09-21 17:02:22 -070089static inline void ksm_exit(struct mm_struct *mm)
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070090{
91}
Hugh Dickins9a840892009-09-21 17:02:01 -070092
93static inline int PageKsm(struct page *page)
94{
95 return 0;
96}
97
Hugh Dickinsf42647a2009-12-16 08:56:57 +000098#ifdef CONFIG_MMU
99static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
100 unsigned long end, int advice, unsigned long *vm_flags)
101{
102 return 0;
103}
104
Hugh Dickinscbf86cf2013-02-22 16:35:08 -0800105static inline struct page *ksm_might_need_to_copy(struct page *page,
Hugh Dickins5ad64682009-12-14 17:59:24 -0800106 struct vm_area_struct *vma, unsigned long address)
107{
Hugh Dickinscbf86cf2013-02-22 16:35:08 -0800108 return page;
Hugh Dickins5ad64682009-12-14 17:59:24 -0800109}
110
111static inline int page_referenced_ksm(struct page *page,
112 struct mem_cgroup *memcg, unsigned long *vm_flags)
113{
114 return 0;
115}
116
117static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
118{
119 return 0;
120}
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800121
Joonsoo Kim051ac832014-01-21 15:49:48 -0800122static inline int rmap_walk_ksm(struct page *page,
123 struct rmap_walk_control *rwc)
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800124{
125 return 0;
126}
127
128static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
129{
130}
Hugh Dickinsf42647a2009-12-16 08:56:57 +0000131#endif /* CONFIG_MMU */
Hugh Dickinsf8af4da2009-09-21 17:01:57 -0700132#endif /* !CONFIG_KSM */
133
Hugh Dickins5ad64682009-12-14 17:59:24 -0800134#endif /* __LINUX_KSM_H */