blob: 419fb9e03447aff8aef55934e89bbd844a28d7e7 [file] [log] [blame]
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -08001#ifndef _LINUX_HUGE_MM_H
2#define _LINUX_HUGE_MM_H
3
4extern int do_huge_pmd_anonymous_page(struct mm_struct *mm,
5 struct vm_area_struct *vma,
6 unsigned long address, pmd_t *pmd,
7 unsigned int flags);
8extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
9 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
10 struct vm_area_struct *vma);
Will Deacona1dd4502012-12-11 16:01:27 -080011extern void huge_pmd_set_accessed(struct mm_struct *mm,
12 struct vm_area_struct *vma,
13 unsigned long address, pmd_t *pmd,
14 pmd_t orig_pmd, int dirty);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -080015extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
16 unsigned long address, pmd_t *pmd,
17 pmd_t orig_pmd);
David Rientjesb676b292012-10-08 16:34:03 -070018extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -080019 unsigned long addr,
20 pmd_t *pmd,
21 unsigned int flags);
Minchan Kimb8d3c4c2016-01-15 16:55:42 -080022extern int madvise_free_huge_pmd(struct mmu_gather *tlb,
23 struct vm_area_struct *vma,
24 pmd_t *pmd, unsigned long addr, unsigned long next);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -080025extern int zap_huge_pmd(struct mmu_gather *tlb,
26 struct vm_area_struct *vma,
Shaohua Lif21760b2012-01-12 17:19:16 -080027 pmd_t *pmd, unsigned long addr);
Johannes Weiner0ca16342011-01-13 15:47:02 -080028extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
29 unsigned long addr, unsigned long end,
30 unsigned char *vec);
Hugh Dickinsbf8616d2016-05-19 17:12:54 -070031extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
Andrea Arcangeli37a1c492011-10-31 17:08:30 -070032 unsigned long new_addr, unsigned long old_end,
33 pmd_t *old_pmd, pmd_t *new_pmd);
Johannes Weinercd7548a2011-01-13 15:47:04 -080034extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
Mel Gorman4b10e7d2012-10-25 14:16:32 +020035 unsigned long addr, pgprot_t newprot,
36 int prot_numa);
Matthew Wilcox5cad4652015-09-08 14:58:54 -070037int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
Dan Williamsf25748e32016-01-15 16:56:43 -080038 pfn_t pfn, bool write);
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -080039enum transparent_hugepage_flag {
40 TRANSPARENT_HUGEPAGE_FLAG,
41 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
Mel Gorman444eb2a42016-03-17 14:19:23 -070042 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
43 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -080044 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
Andrea Arcangeliba761492011-01-13 15:46:58 -080045 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
Kirill A. Shutemov79da5402012-12-12 13:51:12 -080046 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -080047#ifdef CONFIG_DEBUG_VM
48 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
49#endif
50};
51
Naoya Horiguchid8c37c42012-03-21 16:34:27 -070052#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
53#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
54
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -080055#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Dan Williams3565fce2016-01-15 16:56:55 -080056struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
57 pmd_t *pmd, int flags);
58
Aneesh Kumar K.Vfde52792013-06-05 17:14:05 -070059#define HPAGE_PMD_SHIFT PMD_SHIFT
60#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
61#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -080062
Alex Shi20995972012-05-29 15:06:31 -070063extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
64
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -080065#define transparent_hugepage_enabled(__vma) \
Andrea Arcangelia664b2d2011-01-13 15:47:17 -080066 ((transparent_hugepage_flags & \
67 (1<<TRANSPARENT_HUGEPAGE_FLAG) || \
68 (transparent_hugepage_flags & \
69 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \
70 ((__vma)->vm_flags & VM_HUGEPAGE))) && \
Andrea Arcangelia7d6e4e2011-02-15 19:02:45 +010071 !((__vma)->vm_flags & VM_NOHUGEPAGE) && \
72 !is_vma_temporary_stack(__vma))
Kirill A. Shutemov79da5402012-12-12 13:51:12 -080073#define transparent_hugepage_use_zero_page() \
74 (transparent_hugepage_flags & \
75 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -080076#ifdef CONFIG_DEBUG_VM
77#define transparent_hugepage_debug_cow() \
78 (transparent_hugepage_flags & \
79 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
80#else /* CONFIG_DEBUG_VM */
81#define transparent_hugepage_debug_cow() 0
82#endif /* CONFIG_DEBUG_VM */
83
84extern unsigned long transparent_hugepage_flags;
Kirill A. Shutemovad0bed22016-01-15 16:52:53 -080085
Kirill A. Shutemov9a982252016-01-15 16:54:17 -080086extern void prep_transhuge_page(struct page *page);
87extern void free_transhuge_page(struct page *page);
88
Kirill A. Shutemove9b61f12016-01-15 16:54:10 -080089int split_huge_page_to_list(struct page *page, struct list_head *list);
90static inline int split_huge_page(struct page *page)
91{
92 return split_huge_page_to_list(page, NULL);
93}
Kirill A. Shutemov9a982252016-01-15 16:54:17 -080094void deferred_split_huge_page(struct page *page);
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -080095
96void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
Kirill A. Shutemovfec89c12016-03-17 14:20:10 -070097 unsigned long address, bool freeze);
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -080098
99#define split_huge_pmd(__vma, __pmd, __address) \
100 do { \
101 pmd_t *____pmd = (__pmd); \
Dan Williams5c7fb562016-01-15 16:56:52 -0800102 if (pmd_trans_huge(*____pmd) \
103 || pmd_devmap(*____pmd)) \
Kirill A. Shutemovfec89c12016-03-17 14:20:10 -0700104 __split_huge_pmd(__vma, __pmd, __address, \
105 false); \
Kirill A. Shutemoveef1b3b2016-01-15 16:53:53 -0800106 } while (0)
Kirill A. Shutemovad0bed22016-01-15 16:52:53 -0800107
Kirill A. Shutemov2a52bcb2016-03-17 14:20:04 -0700108
Kirill A. Shutemovfec89c12016-03-17 14:20:10 -0700109void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
110 bool freeze, struct page *page);
Kirill A. Shutemov2a52bcb2016-03-17 14:20:04 -0700111
Andrea Arcangeli60ab3242011-01-13 15:47:18 -0800112extern int hugepage_madvise(struct vm_area_struct *vma,
113 unsigned long *vm_flags, int advice);
Kirill A. Shutemove1b99962015-09-08 14:58:37 -0700114extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
Andrea Arcangeli94fcc582011-01-13 15:47:08 -0800115 unsigned long start,
116 unsigned long end,
117 long adjust_next);
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -0800118extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
119 struct vm_area_struct *vma);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -0700120/* mmap_sem must be held on entry */
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -0800121static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
122 struct vm_area_struct *vma)
Naoya Horiguchi025c5b22012-03-21 16:33:57 -0700123{
Sasha Levin81d1b092014-10-09 15:28:10 -0700124 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
Dan Williams5c7fb562016-01-15 16:56:52 -0800125 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -0800126 return __pmd_trans_huge_lock(pmd, vma);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -0700127 else
Chen Gang969e8d72016-04-01 14:31:17 -0700128 return NULL;
Naoya Horiguchi025c5b22012-03-21 16:33:57 -0700129}
Rik van Riel2c888cf2011-01-13 15:47:13 -0800130static inline int hpage_nr_pages(struct page *page)
131{
132 if (unlikely(PageTransHuge(page)))
133 return HPAGE_PMD_NR;
134 return 1;
135}
Mel Gormand10e63f2012-10-25 14:16:31 +0200136
Mel Gorman4daae3b2012-11-02 11:33:45 +0000137extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
138 unsigned long addr, pmd_t pmd, pmd_t *pmdp);
Mel Gormand10e63f2012-10-25 14:16:31 +0200139
Wang, Yalin56873f42015-02-11 15:24:51 -0800140extern struct page *huge_zero_page;
141
142static inline bool is_huge_zero_page(struct page *page)
143{
144 return ACCESS_ONCE(huge_zero_page) == page;
145}
146
Matthew Wilcoxfc437042015-09-08 14:58:51 -0700147static inline bool is_huge_zero_pmd(pmd_t pmd)
148{
149 return is_huge_zero_page(pmd_page(pmd));
150}
151
152struct page *get_huge_zero_page(void);
Kirill A. Shutemovaa88b682016-04-28 16:18:27 -0700153void put_huge_zero_page(void);
Matthew Wilcoxfc437042015-09-08 14:58:51 -0700154
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800155#else /* CONFIG_TRANSPARENT_HUGEPAGE */
Naoya Horiguchid8c37c42012-03-21 16:34:27 -0700156#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
157#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
158#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800159
Rik van Riel2c888cf2011-01-13 15:47:13 -0800160#define hpage_nr_pages(x) 1
161
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800162#define transparent_hugepage_enabled(__vma) 0
163
164#define transparent_hugepage_flags 0UL
Shaohua Li5bc7b8a2013-04-29 15:08:36 -0700165static inline int
166split_huge_page_to_list(struct page *page, struct list_head *list)
167{
168 return 0;
169}
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800170static inline int split_huge_page(struct page *page)
171{
172 return 0;
173}
Kirill A. Shutemov9a982252016-01-15 16:54:17 -0800174static inline void deferred_split_huge_page(struct page *page) {}
Kirill A. Shutemov78ddc532016-01-15 16:52:42 -0800175#define split_huge_pmd(__vma, __pmd, __address) \
Kirill A. Shutemove1803772012-12-12 13:50:59 -0800176 do { } while (0)
Kirill A. Shutemov2a52bcb2016-03-17 14:20:04 -0700177
178static inline void split_huge_pmd_address(struct vm_area_struct *vma,
Kirill A. Shutemovfec89c12016-03-17 14:20:10 -0700179 unsigned long address, bool freeze, struct page *page) {}
Kirill A. Shutemov2a52bcb2016-03-17 14:20:04 -0700180
Andrea Arcangeli60ab3242011-01-13 15:47:18 -0800181static inline int hugepage_madvise(struct vm_area_struct *vma,
182 unsigned long *vm_flags, int advice)
Andrea Arcangeli0af4e982011-01-13 15:46:55 -0800183{
184 BUG();
185 return 0;
186}
Andrea Arcangeli94fcc582011-01-13 15:47:08 -0800187static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
188 unsigned long start,
189 unsigned long end,
190 long adjust_next)
191{
192}
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -0800193static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
194 struct vm_area_struct *vma)
Naoya Horiguchi025c5b22012-03-21 16:33:57 -0700195{
Kirill A. Shutemovb6ec57f2016-01-21 16:40:25 -0800196 return NULL;
Naoya Horiguchi025c5b22012-03-21 16:33:57 -0700197}
Mel Gormand10e63f2012-10-25 14:16:31 +0200198
Mel Gorman4daae3b2012-11-02 11:33:45 +0000199static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
200 unsigned long addr, pmd_t pmd, pmd_t *pmdp)
Mel Gormand10e63f2012-10-25 14:16:31 +0200201{
Mel Gorman4daae3b2012-11-02 11:33:45 +0000202 return 0;
Mel Gormand10e63f2012-10-25 14:16:31 +0200203}
204
Wang, Yalin56873f42015-02-11 15:24:51 -0800205static inline bool is_huge_zero_page(struct page *page)
206{
207 return false;
208}
209
Kirill A. Shutemovaa88b682016-04-28 16:18:27 -0700210static inline void put_huge_zero_page(void)
211{
212 BUILD_BUG();
213}
Dan Williams3565fce2016-01-15 16:56:55 -0800214
215static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
216 unsigned long addr, pmd_t *pmd, int flags)
217{
218 return NULL;
219}
Andrea Arcangeli71e3aac2011-01-13 15:46:52 -0800220#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
221
222#endif /* _LINUX_HUGE_MM_H */