blob: 4daf7e684f580f5702d08b1fb94b9af887bbd87c [file] [log] [blame]
Gerald Schaefer6d779072008-04-28 02:13:27 -07001#ifndef _ASM_POWERPC_HUGETLB_H
2#define _ASM_POWERPC_HUGETLB_H
3
Becky Bruce41151e72011-06-28 09:54:48 +00004#ifdef CONFIG_HUGETLB_PAGE
Gerald Schaefer6d779072008-04-28 02:13:27 -07005#include <asm/page.h>
6
Becky Bruce41151e72011-06-28 09:54:48 +00007extern struct kmem_cache *hugepte_cache;
Becky Bruce41151e72011-06-28 09:54:48 +00008
Aneesh Kumar K.Vcf9427b2013-04-28 09:37:29 +00009#ifdef CONFIG_PPC_BOOK3S_64
10/*
11 * This should work for other subarchs too. But right now we use the
12 * new format only for 64bit book3s
13 */
14static inline pte_t *hugepd_page(hugepd_t hpd)
15{
16 BUG_ON(!hugepd_ok(hpd));
17 /*
18 * We have only four bits to encode, MMU page size
19 */
20 BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
21 return (pte_t *)(hpd.pd & ~HUGEPD_SHIFT_MASK);
22}
23
24static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
25{
26 return (hpd.pd & HUGEPD_SHIFT_MASK) >> 2;
27}
28
29static inline unsigned int hugepd_shift(hugepd_t hpd)
30{
31 return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
32}
33
34#else
35
Becky Bruce41151e72011-06-28 09:54:48 +000036static inline pte_t *hugepd_page(hugepd_t hpd)
37{
38 BUG_ON(!hugepd_ok(hpd));
39 return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
40}
41
42static inline unsigned int hugepd_shift(hugepd_t hpd)
43{
44 return hpd.pd & HUGEPD_SHIFT_MASK;
45}
46
Aneesh Kumar K.Vcf9427b2013-04-28 09:37:29 +000047#endif /* CONFIG_PPC_BOOK3S_64 */
48
49
Becky Bruce41151e72011-06-28 09:54:48 +000050static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
51 unsigned pdshift)
52{
53 /*
Becky Bruce881fde12011-10-10 10:50:40 +000054 * On FSL BookE, we have multiple higher-level table entries that
55 * point to the same hugepte. Just use the first one since they're all
Becky Bruce41151e72011-06-28 09:54:48 +000056 * identical. So for that case, idx=0.
57 */
58 unsigned long idx = 0;
59
60 pte_t *dir = hugepd_page(*hpdp);
Becky Bruce881fde12011-10-10 10:50:40 +000061#ifndef CONFIG_PPC_FSL_BOOK3E
Becky Bruce41151e72011-06-28 09:54:48 +000062 idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp);
63#endif
64
65 return dir + idx;
66}
67
David Gibson883a3e52009-10-26 19:24:31 +000068pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
69 unsigned long addr, unsigned *shift);
70
David Gibson0895ecd2009-10-26 19:24:31 +000071void flush_dcache_icache_hugepage(struct page *page);
72
Becky Bruce41151e72011-06-28 09:54:48 +000073#if defined(CONFIG_PPC_MM_SLICES) || defined(CONFIG_PPC_SUBPAGE_PROT)
Gerald Schaefer6d779072008-04-28 02:13:27 -070074int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
75 unsigned long len);
Becky Bruce41151e72011-06-28 09:54:48 +000076#else
77static inline int is_hugepage_only_range(struct mm_struct *mm,
78 unsigned long addr,
79 unsigned long len)
80{
81 return 0;
82}
83#endif
84
Becky Bruced93e4d72011-11-28 14:43:33 +000085void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
86 pte_t pte);
Becky Bruce41151e72011-06-28 09:54:48 +000087void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
Gerald Schaefer6d779072008-04-28 02:13:27 -070088
Jan Beulich42b77722008-07-23 21:27:10 -070089void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
Gerald Schaefer6d779072008-04-28 02:13:27 -070090 unsigned long end, unsigned long floor,
91 unsigned long ceiling);
92
Gerald Schaefer6d779072008-04-28 02:13:27 -070093/*
Mel Gorman33402892009-01-06 14:38:54 -080094 * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
95 * to override the version in mm/hugetlb.c
96 */
97#define vma_mmu_pagesize vma_mmu_pagesize
98
99/*
Gerald Schaefer6d779072008-04-28 02:13:27 -0700100 * If the arch doesn't supply something else, assume that hugepage
101 * size aligned regions are ok without further preparation.
102 */
Andi Kleena5516432008-07-23 21:27:41 -0700103static inline int prepare_hugepage_range(struct file *file,
104 unsigned long addr, unsigned long len)
Gerald Schaefer6d779072008-04-28 02:13:27 -0700105{
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700106 struct hstate *h = hstate_file(file);
107 if (len & ~huge_page_mask(h))
Gerald Schaefer6d779072008-04-28 02:13:27 -0700108 return -EINVAL;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700109 if (addr & ~huge_page_mask(h))
Gerald Schaefer6d779072008-04-28 02:13:27 -0700110 return -EINVAL;
111 return 0;
112}
113
114static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
115{
116}
117
David Gibson0895ecd2009-10-26 19:24:31 +0000118
119static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
120 pte_t *ptep, pte_t pte)
121{
122 set_pte_at(mm, addr, ptep, pte);
123}
124
125static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
126 unsigned long addr, pte_t *ptep)
127{
Becky Bruce41151e72011-06-28 09:54:48 +0000128#ifdef CONFIG_PPC64
129 return __pte(pte_update(mm, addr, ptep, ~0UL, 1));
130#else
131 return __pte(pte_update(ptep, ~0UL, 0));
132#endif
David Gibson0895ecd2009-10-26 19:24:31 +0000133}
134
Gerald Schaefer8fe627e2008-04-28 02:13:28 -0700135static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
136 unsigned long addr, pte_t *ptep)
137{
David Gibson0895ecd2009-10-26 19:24:31 +0000138 pte_t pte;
139 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
140 flush_tlb_page(vma, addr);
Gerald Schaefer8fe627e2008-04-28 02:13:28 -0700141}
142
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700143static inline int huge_pte_none(pte_t pte)
144{
145 return pte_none(pte);
146}
147
148static inline pte_t huge_pte_wrprotect(pte_t pte)
149{
150 return pte_wrprotect(pte);
151}
152
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700153static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
154 unsigned long addr, pte_t *ptep,
155 pte_t pte, int dirty)
156{
Becky Bruce1f6820b2011-11-29 15:10:39 +0000157#ifdef HUGETLB_NEED_PRELOAD
Becky Bruce97632e62011-10-10 10:50:37 +0000158 /*
159 * The "return 1" forces a call of update_mmu_cache, which will write a
160 * TLB entry. Without this, platforms that don't do a write of the TLB
161 * entry in the TLB miss handler asm will fault ad infinitum.
162 */
163 ptep_set_access_flags(vma, addr, ptep, pte, dirty);
164 return 1;
165#else
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700166 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
Becky Bruce97632e62011-10-10 10:50:37 +0000167#endif
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700168}
169
170static inline pte_t huge_ptep_get(pte_t *ptep)
171{
172 return *ptep;
173}
174
175static inline int arch_prepare_hugepage(struct page *page)
176{
177 return 0;
178}
179
180static inline void arch_release_hugepage(struct page *page)
181{
182}
183
Will Deacon5d3a5512012-10-08 16:29:32 -0700184static inline void arch_clear_hugepage_flags(struct page *page)
185{
186}
187
Becky Bruce41151e72011-06-28 09:54:48 +0000188#else /* ! CONFIG_HUGETLB_PAGE */
Becky Bruce41151e72011-06-28 09:54:48 +0000189static inline void flush_hugetlb_page(struct vm_area_struct *vma,
190 unsigned long vmaddr)
191{
192}
Becky Brucea6146882011-10-10 10:50:43 +0000193#endif /* CONFIG_HUGETLB_PAGE */
194
195
196/*
197 * FSL Book3E platforms require special gpage handling - the gpages
198 * are reserved early in the boot process by memblock instead of via
199 * the .dts as on IBM platforms.
200 */
201#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E)
202extern void __init reserve_hugetlb_gpages(void);
203#else
204static inline void reserve_hugetlb_gpages(void)
205{
206}
Becky Bruce41151e72011-06-28 09:54:48 +0000207#endif
208
Gerald Schaefer6d779072008-04-28 02:13:27 -0700209#endif /* _ASM_POWERPC_HUGETLB_H */