blob: d750336b171db4cf2c75f7e1fb95cbf6bcba8d74 [file] [log] [blame]
Gerald Schaefer6d779072008-04-28 02:13:27 -07001#ifndef _ASM_POWERPC_HUGETLB_H
2#define _ASM_POWERPC_HUGETLB_H
3
Becky Bruce41151e72011-06-28 09:54:48 +00004#ifdef CONFIG_HUGETLB_PAGE
Gerald Schaefer6d779072008-04-28 02:13:27 -07005#include <asm/page.h>
Gerald Schaefer106c9922013-04-29 15:07:23 -07006#include <asm-generic/hugetlb.h>
Gerald Schaefer6d779072008-04-28 02:13:27 -07007
Becky Bruce41151e72011-06-28 09:54:48 +00008extern struct kmem_cache *hugepte_cache;
Becky Bruce41151e72011-06-28 09:54:48 +00009
Aneesh Kumar K.Vcf9427b2013-04-28 09:37:29 +000010#ifdef CONFIG_PPC_BOOK3S_64
11/*
12 * This should work for other subarchs too. But right now we use the
13 * new format only for 64bit book3s
14 */
15static inline pte_t *hugepd_page(hugepd_t hpd)
16{
17 BUG_ON(!hugepd_ok(hpd));
18 /*
19 * We have only four bits to encode, MMU page size
20 */
21 BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
22 return (pte_t *)(hpd.pd & ~HUGEPD_SHIFT_MASK);
23}
24
25static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
26{
27 return (hpd.pd & HUGEPD_SHIFT_MASK) >> 2;
28}
29
30static inline unsigned int hugepd_shift(hugepd_t hpd)
31{
32 return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
33}
34
35#else
36
Becky Bruce41151e72011-06-28 09:54:48 +000037static inline pte_t *hugepd_page(hugepd_t hpd)
38{
39 BUG_ON(!hugepd_ok(hpd));
40 return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
41}
42
43static inline unsigned int hugepd_shift(hugepd_t hpd)
44{
45 return hpd.pd & HUGEPD_SHIFT_MASK;
46}
47
Aneesh Kumar K.Vcf9427b2013-04-28 09:37:29 +000048#endif /* CONFIG_PPC_BOOK3S_64 */
49
50
Becky Bruce41151e72011-06-28 09:54:48 +000051static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
52 unsigned pdshift)
53{
54 /*
Becky Bruce881fde12011-10-10 10:50:40 +000055 * On FSL BookE, we have multiple higher-level table entries that
56 * point to the same hugepte. Just use the first one since they're all
Becky Bruce41151e72011-06-28 09:54:48 +000057 * identical. So for that case, idx=0.
58 */
59 unsigned long idx = 0;
60
61 pte_t *dir = hugepd_page(*hpdp);
Becky Bruce881fde12011-10-10 10:50:40 +000062#ifndef CONFIG_PPC_FSL_BOOK3E
Becky Bruce41151e72011-06-28 09:54:48 +000063 idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp);
64#endif
65
66 return dir + idx;
67}
68
David Gibson883a3e52009-10-26 19:24:31 +000069pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
70 unsigned long addr, unsigned *shift);
71
David Gibson0895ecd2009-10-26 19:24:31 +000072void flush_dcache_icache_hugepage(struct page *page);
73
Becky Bruce41151e72011-06-28 09:54:48 +000074#if defined(CONFIG_PPC_MM_SLICES) || defined(CONFIG_PPC_SUBPAGE_PROT)
Gerald Schaefer6d779072008-04-28 02:13:27 -070075int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
76 unsigned long len);
Becky Bruce41151e72011-06-28 09:54:48 +000077#else
78static inline int is_hugepage_only_range(struct mm_struct *mm,
79 unsigned long addr,
80 unsigned long len)
81{
82 return 0;
83}
84#endif
85
Becky Bruced93e4d72011-11-28 14:43:33 +000086void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
87 pte_t pte);
Becky Bruce41151e72011-06-28 09:54:48 +000088void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
Gerald Schaefer6d779072008-04-28 02:13:27 -070089
Jan Beulich42b77722008-07-23 21:27:10 -070090void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
Gerald Schaefer6d779072008-04-28 02:13:27 -070091 unsigned long end, unsigned long floor,
92 unsigned long ceiling);
93
Gerald Schaefer6d779072008-04-28 02:13:27 -070094/*
Mel Gorman33402892009-01-06 14:38:54 -080095 * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
96 * to override the version in mm/hugetlb.c
97 */
98#define vma_mmu_pagesize vma_mmu_pagesize
99
100/*
Gerald Schaefer6d779072008-04-28 02:13:27 -0700101 * If the arch doesn't supply something else, assume that hugepage
102 * size aligned regions are ok without further preparation.
103 */
Andi Kleena5516432008-07-23 21:27:41 -0700104static inline int prepare_hugepage_range(struct file *file,
105 unsigned long addr, unsigned long len)
Gerald Schaefer6d779072008-04-28 02:13:27 -0700106{
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700107 struct hstate *h = hstate_file(file);
108 if (len & ~huge_page_mask(h))
Gerald Schaefer6d779072008-04-28 02:13:27 -0700109 return -EINVAL;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700110 if (addr & ~huge_page_mask(h))
Gerald Schaefer6d779072008-04-28 02:13:27 -0700111 return -EINVAL;
112 return 0;
113}
114
115static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
116{
117}
118
David Gibson0895ecd2009-10-26 19:24:31 +0000119
120static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
121 pte_t *ptep, pte_t pte)
122{
123 set_pte_at(mm, addr, ptep, pte);
124}
125
126static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
127 unsigned long addr, pte_t *ptep)
128{
Becky Bruce41151e72011-06-28 09:54:48 +0000129#ifdef CONFIG_PPC64
130 return __pte(pte_update(mm, addr, ptep, ~0UL, 1));
131#else
132 return __pte(pte_update(ptep, ~0UL, 0));
133#endif
David Gibson0895ecd2009-10-26 19:24:31 +0000134}
135
Gerald Schaefer8fe627e2008-04-28 02:13:28 -0700136static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
137 unsigned long addr, pte_t *ptep)
138{
David Gibson0895ecd2009-10-26 19:24:31 +0000139 pte_t pte;
140 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
141 flush_tlb_page(vma, addr);
Gerald Schaefer8fe627e2008-04-28 02:13:28 -0700142}
143
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700144static inline int huge_pte_none(pte_t pte)
145{
146 return pte_none(pte);
147}
148
149static inline pte_t huge_pte_wrprotect(pte_t pte)
150{
151 return pte_wrprotect(pte);
152}
153
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700154static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
155 unsigned long addr, pte_t *ptep,
156 pte_t pte, int dirty)
157{
Becky Bruce1f6820b2011-11-29 15:10:39 +0000158#ifdef HUGETLB_NEED_PRELOAD
Becky Bruce97632e62011-10-10 10:50:37 +0000159 /*
160 * The "return 1" forces a call of update_mmu_cache, which will write a
161 * TLB entry. Without this, platforms that don't do a write of the TLB
162 * entry in the TLB miss handler asm will fault ad infinitum.
163 */
164 ptep_set_access_flags(vma, addr, ptep, pte, dirty);
165 return 1;
166#else
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700167 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
Becky Bruce97632e62011-10-10 10:50:37 +0000168#endif
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700169}
170
171static inline pte_t huge_ptep_get(pte_t *ptep)
172{
173 return *ptep;
174}
175
176static inline int arch_prepare_hugepage(struct page *page)
177{
178 return 0;
179}
180
181static inline void arch_release_hugepage(struct page *page)
182{
183}
184
Will Deacon5d3a5512012-10-08 16:29:32 -0700185static inline void arch_clear_hugepage_flags(struct page *page)
186{
187}
188
Becky Bruce41151e72011-06-28 09:54:48 +0000189#else /* ! CONFIG_HUGETLB_PAGE */
Becky Bruce41151e72011-06-28 09:54:48 +0000190static inline void flush_hugetlb_page(struct vm_area_struct *vma,
191 unsigned long vmaddr)
192{
193}
Becky Brucea6146882011-10-10 10:50:43 +0000194
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530195#define hugepd_shift(x) 0
196static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
197 unsigned pdshift)
198{
199 return 0;
200}
201#endif /* CONFIG_HUGETLB_PAGE */
Becky Brucea6146882011-10-10 10:50:43 +0000202
203/*
204 * FSL Book3E platforms require special gpage handling - the gpages
205 * are reserved early in the boot process by memblock instead of via
206 * the .dts as on IBM platforms.
207 */
208#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E)
209extern void __init reserve_hugetlb_gpages(void);
210#else
211static inline void reserve_hugetlb_gpages(void)
212{
213}
Becky Bruce41151e72011-06-28 09:54:48 +0000214#endif
215
Gerald Schaefer6d779072008-04-28 02:13:27 -0700216#endif /* _ASM_POWERPC_HUGETLB_H */