blob: a485c14ecd5d52f892cbfe5ca3e22f0463c42a48 [file] [log] [blame]
Hugh Dickinsf8af4da2009-09-21 17:01:57 -07001#ifndef __LINUX_KSM_H
2#define __LINUX_KSM_H
3/*
4 * Memory merging support.
5 *
6 * This code enables dynamic sharing of identical pages found in different
7 * memory areas, even if they are not shared by fork().
8 */
9
10#include <linux/bitops.h>
11#include <linux/mm.h>
12#include <linux/sched.h>
Hugh Dickins9a840892009-09-21 17:02:01 -070013#include <linux/vmstat.h>
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070014
15#ifdef CONFIG_KSM
16int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
17 unsigned long end, int advice, unsigned long *vm_flags);
18int __ksm_enter(struct mm_struct *mm);
Andrea Arcangeli1c2fb7a2009-09-21 17:02:22 -070019void __ksm_exit(struct mm_struct *mm);
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070020
21static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
22{
23 if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
24 return __ksm_enter(mm);
25 return 0;
26}
27
Andrea Arcangeli1c2fb7a2009-09-21 17:02:22 -070028static inline void ksm_exit(struct mm_struct *mm)
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070029{
30 if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
Andrea Arcangeli1c2fb7a2009-09-21 17:02:22 -070031 __ksm_exit(mm);
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070032}
Hugh Dickins9a840892009-09-21 17:02:01 -070033
34/*
35 * A KSM page is one of those write-protected "shared pages" or "merged pages"
36 * which KSM maps into multiple mms, wherever identical anonymous page content
37 * is found in VM_MERGEABLE vmas. It's a PageAnon page, with NULL anon_vma.
38 */
39static inline int PageKsm(struct page *page)
40{
41 return ((unsigned long)page->mapping == PAGE_MAPPING_ANON);
42}
43
44/*
45 * But we have to avoid the checking which page_add_anon_rmap() performs.
46 */
47static inline void page_add_ksm_rmap(struct page *page)
48{
49 if (atomic_inc_and_test(&page->_mapcount)) {
50 page->mapping = (void *) PAGE_MAPPING_ANON;
51 __inc_zone_page_state(page, NR_ANON_PAGES);
52 }
53}
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070054#else /* !CONFIG_KSM */
55
56static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
57 unsigned long end, int advice, unsigned long *vm_flags)
58{
59 return 0;
60}
61
62static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
63{
64 return 0;
65}
66
Andrea Arcangeli1c2fb7a2009-09-21 17:02:22 -070067static inline void ksm_exit(struct mm_struct *mm)
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070068{
69}
Hugh Dickins9a840892009-09-21 17:02:01 -070070
71static inline int PageKsm(struct page *page)
72{
73 return 0;
74}
75
76/* No stub required for page_add_ksm_rmap(page) */
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070077#endif /* !CONFIG_KSM */
78
79#endif