blob: 1401a313fa77378893f28423649fbfaf109e2cb4 [file] [log] [blame]
Hugh Dickinsf8af4da2009-09-21 17:01:57 -07001#ifndef __LINUX_KSM_H
2#define __LINUX_KSM_H
3/*
4 * Memory merging support.
5 *
6 * This code enables dynamic sharing of identical pages found in different
7 * memory areas, even if they are not shared by fork().
8 */
9
10#include <linux/bitops.h>
11#include <linux/mm.h>
12#include <linux/sched.h>
Hugh Dickins9a840892009-09-21 17:02:01 -070013#include <linux/vmstat.h>
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070014
15#ifdef CONFIG_KSM
16int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
17 unsigned long end, int advice, unsigned long *vm_flags);
18int __ksm_enter(struct mm_struct *mm);
Andrea Arcangeli1c2fb7a2009-09-21 17:02:22 -070019void __ksm_exit(struct mm_struct *mm);
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070020
21static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
22{
23 if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
24 return __ksm_enter(mm);
25 return 0;
26}
27
Andrea Arcangeli1c2fb7a2009-09-21 17:02:22 -070028static inline void ksm_exit(struct mm_struct *mm)
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070029{
30 if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
Andrea Arcangeli1c2fb7a2009-09-21 17:02:22 -070031 __ksm_exit(mm);
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070032}
Hugh Dickins9a840892009-09-21 17:02:01 -070033
34/*
35 * A KSM page is one of those write-protected "shared pages" or "merged pages"
36 * which KSM maps into multiple mms, wherever identical anonymous page content
37 * is found in VM_MERGEABLE vmas. It's a PageAnon page, with NULL anon_vma.
38 */
39static inline int PageKsm(struct page *page)
40{
Hugh Dickins3ca7b3c2009-12-14 17:58:57 -080041 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
42 (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
Hugh Dickins9a840892009-09-21 17:02:01 -070043}
44
45/*
46 * But we have to avoid the checking which page_add_anon_rmap() performs.
47 */
48static inline void page_add_ksm_rmap(struct page *page)
49{
50 if (atomic_inc_and_test(&page->_mapcount)) {
Hugh Dickins3ca7b3c2009-12-14 17:58:57 -080051 page->mapping = (void *) (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
Hugh Dickins9a840892009-09-21 17:02:01 -070052 __inc_zone_page_state(page, NR_ANON_PAGES);
53 }
54}
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070055#else /* !CONFIG_KSM */
56
57static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
58 unsigned long end, int advice, unsigned long *vm_flags)
59{
60 return 0;
61}
62
63static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
64{
65 return 0;
66}
67
Andrea Arcangeli1c2fb7a2009-09-21 17:02:22 -070068static inline void ksm_exit(struct mm_struct *mm)
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070069{
70}
Hugh Dickins9a840892009-09-21 17:02:01 -070071
72static inline int PageKsm(struct page *page)
73{
74 return 0;
75}
76
77/* No stub required for page_add_ksm_rmap(page) */
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070078#endif /* !CONFIG_KSM */
79
80#endif