blob: 44368b19b27e11e848ced6348c4e747396c23b6b [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Hugh Dickinsf8af4da2009-09-21 17:01:57 -07002#ifndef __LINUX_KSM_H
3#define __LINUX_KSM_H
4/*
5 * Memory merging support.
6 *
7 * This code enables dynamic sharing of identical pages found in different
8 * memory areas, even if they are not shared by fork().
9 */
10
11#include <linux/bitops.h>
12#include <linux/mm.h>
Hugh Dickins5ad64682009-12-14 17:59:24 -080013#include <linux/pagemap.h>
14#include <linux/rmap.h>
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070015#include <linux/sched.h>
Ingo Molnarf7ccbae2017-02-08 18:51:30 +010016#include <linux/sched/coredump.h>
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070017
Hugh Dickins08beca42009-12-14 17:59:21 -080018struct stable_node;
Hugh Dickins5ad64682009-12-14 17:59:24 -080019struct mem_cgroup;
Hugh Dickins08beca42009-12-14 17:59:21 -080020
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070021#ifdef CONFIG_KSM
22int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
23 unsigned long end, int advice, unsigned long *vm_flags);
24int __ksm_enter(struct mm_struct *mm);
Andrea Arcangeli1c2fb7a2009-09-21 17:02:22 -070025void __ksm_exit(struct mm_struct *mm);
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070026
27static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
28{
29 if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
30 return __ksm_enter(mm);
31 return 0;
32}
33
Andrea Arcangeli1c2fb7a2009-09-21 17:02:22 -070034static inline void ksm_exit(struct mm_struct *mm)
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070035{
36 if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
Andrea Arcangeli1c2fb7a2009-09-21 17:02:22 -070037 __ksm_exit(mm);
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070038}
Hugh Dickins9a840892009-09-21 17:02:01 -070039
Hugh Dickins08beca42009-12-14 17:59:21 -080040static inline struct stable_node *page_stable_node(struct page *page)
41{
42 return PageKsm(page) ? page_rmapping(page) : NULL;
43}
44
45static inline void set_page_stable_node(struct page *page,
46 struct stable_node *stable_node)
47{
Minchan Kimbda807d2016-07-26 15:23:05 -070048 page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
Hugh Dickins08beca42009-12-14 17:59:21 -080049}
50
Hugh Dickins5ad64682009-12-14 17:59:24 -080051/*
52 * When do_swap_page() first faults in from swap what used to be a KSM page,
53 * no problem, it will be assigned to this vma's anon_vma; but thereafter,
54 * it might be faulted into a different anon_vma (or perhaps to a different
55 * offset in the same anon_vma). do_swap_page() cannot do all the locking
56 * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
57 * a copy, and leave remerging the pages to a later pass of ksmd.
58 *
59 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
60 * but what if the vma was unmerged while the page was swapped out?
61 */
Hugh Dickinscbf86cf2013-02-22 16:35:08 -080062struct page *ksm_might_need_to_copy(struct page *page,
63 struct vm_area_struct *vma, unsigned long address);
Hugh Dickins5ad64682009-12-14 17:59:24 -080064
Minchan Kim1df631a2017-05-03 14:54:23 -070065void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
Hugh Dickinse9995ef2009-12-14 17:59:31 -080066void ksm_migrate_page(struct page *newpage, struct page *oldpage);
Hugh Dickins5ad64682009-12-14 17:59:24 -080067
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070068#else /* !CONFIG_KSM */
69
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070070static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
71{
72 return 0;
73}
74
Andrea Arcangeli1c2fb7a2009-09-21 17:02:22 -070075static inline void ksm_exit(struct mm_struct *mm)
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070076{
77}
Hugh Dickins9a840892009-09-21 17:02:01 -070078
Hugh Dickinsf42647a2009-12-16 08:56:57 +000079#ifdef CONFIG_MMU
80static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
81 unsigned long end, int advice, unsigned long *vm_flags)
82{
83 return 0;
84}
85
Hugh Dickinscbf86cf2013-02-22 16:35:08 -080086static inline struct page *ksm_might_need_to_copy(struct page *page,
Hugh Dickins5ad64682009-12-14 17:59:24 -080087 struct vm_area_struct *vma, unsigned long address)
88{
Hugh Dickinscbf86cf2013-02-22 16:35:08 -080089 return page;
Hugh Dickins5ad64682009-12-14 17:59:24 -080090}
91
92static inline int page_referenced_ksm(struct page *page,
93 struct mem_cgroup *memcg, unsigned long *vm_flags)
94{
95 return 0;
96}
97
Minchan Kim1df631a2017-05-03 14:54:23 -070098static inline void rmap_walk_ksm(struct page *page,
Joonsoo Kim051ac832014-01-21 15:49:48 -080099 struct rmap_walk_control *rwc)
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800100{
Hugh Dickinse9995ef2009-12-14 17:59:31 -0800101}
102
103static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
104{
105}
Hugh Dickinsf42647a2009-12-16 08:56:57 +0000106#endif /* CONFIG_MMU */
Hugh Dickinsf8af4da2009-09-21 17:01:57 -0700107#endif /* !CONFIG_KSM */
108
Hugh Dickins5ad64682009-12-14 17:59:24 -0800109#endif /* __LINUX_KSM_H */