blob: 7f4025a6c69ea5b71b340989b35197d8f9d17b91 [file] [log] [blame]
Gerald Schaefer6d779072008-04-28 02:13:27 -07001#ifndef _ASM_POWERPC_HUGETLB_H
2#define _ASM_POWERPC_HUGETLB_H
3
Becky Bruce41151e72011-06-28 09:54:48 +00004#ifdef CONFIG_HUGETLB_PAGE
Gerald Schaefer6d779072008-04-28 02:13:27 -07005#include <asm/page.h>
Gerald Schaefer106c9922013-04-29 15:07:23 -07006#include <asm-generic/hugetlb.h>
Gerald Schaefer6d779072008-04-28 02:13:27 -07007
Becky Bruce41151e72011-06-28 09:54:48 +00008extern struct kmem_cache *hugepte_cache;
Becky Bruce41151e72011-06-28 09:54:48 +00009
Aneesh Kumar K.Vcf9427b2013-04-28 09:37:29 +000010#ifdef CONFIG_PPC_BOOK3S_64
Aneesh Kumar K.V48483762016-04-29 23:26:25 +100011
Aneesh Kumar K.Vbee8b3b2016-11-28 11:46:58 +053012#include <asm/book3s/64/hugetlb.h>
Aneesh Kumar K.Vcf9427b2013-04-28 09:37:29 +000013/*
14 * This should work for other subarchs too. But right now we use the
15 * new format only for 64bit book3s
16 */
17static inline pte_t *hugepd_page(hugepd_t hpd)
18{
19 BUG_ON(!hugepd_ok(hpd));
20 /*
21 * We have only four bits to encode, MMU page size
22 */
23 BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +053024 return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
Aneesh Kumar K.Vcf9427b2013-04-28 09:37:29 +000025}
26
27static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
28{
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +053029 return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2;
Aneesh Kumar K.Vcf9427b2013-04-28 09:37:29 +000030}
31
32static inline unsigned int hugepd_shift(hugepd_t hpd)
33{
34 return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
35}
Aneesh Kumar K.V48483762016-04-29 23:26:25 +100036static inline void flush_hugetlb_page(struct vm_area_struct *vma,
37 unsigned long vmaddr)
38{
39 if (radix_enabled())
40 return radix__flush_hugetlb_page(vma, vmaddr);
41}
Aneesh Kumar K.Vcf9427b2013-04-28 09:37:29 +000042
Aneesh Kumar K.V48483762016-04-29 23:26:25 +100043static inline void __local_flush_hugetlb_page(struct vm_area_struct *vma,
44 unsigned long vmaddr)
45{
46 if (radix_enabled())
47 return radix__local_flush_hugetlb_page(vma, vmaddr);
48}
Aneesh Kumar K.Vcf9427b2013-04-28 09:37:29 +000049#else
50
Becky Bruce41151e72011-06-28 09:54:48 +000051static inline pte_t *hugepd_page(hugepd_t hpd)
52{
53 BUG_ON(!hugepd_ok(hpd));
Christophe Leroy4b9142862016-12-07 08:47:28 +010054#ifdef CONFIG_PPC_8xx
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +053055 return (pte_t *)__va(hpd_val(hpd) &
56 ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
Christophe Leroy4b9142862016-12-07 08:47:28 +010057#else
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +053058 return (pte_t *)((hpd_val(hpd) &
59 ~HUGEPD_SHIFT_MASK) | PD_HUGE);
Christophe Leroy4b9142862016-12-07 08:47:28 +010060#endif
Becky Bruce41151e72011-06-28 09:54:48 +000061}
62
63static inline unsigned int hugepd_shift(hugepd_t hpd)
64{
Christophe Leroy4b9142862016-12-07 08:47:28 +010065#ifdef CONFIG_PPC_8xx
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +053066 return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17;
Christophe Leroy4b9142862016-12-07 08:47:28 +010067#else
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +053068 return hpd_val(hpd) & HUGEPD_SHIFT_MASK;
Christophe Leroy4b9142862016-12-07 08:47:28 +010069#endif
Becky Bruce41151e72011-06-28 09:54:48 +000070}
71
Aneesh Kumar K.Vcf9427b2013-04-28 09:37:29 +000072#endif /* CONFIG_PPC_BOOK3S_64 */
73
74
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +053075static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
Becky Bruce41151e72011-06-28 09:54:48 +000076 unsigned pdshift)
77{
78 /*
Becky Bruce881fde12011-10-10 10:50:40 +000079 * On FSL BookE, we have multiple higher-level table entries that
80 * point to the same hugepte. Just use the first one since they're all
Becky Bruce41151e72011-06-28 09:54:48 +000081 * identical. So for that case, idx=0.
82 */
83 unsigned long idx = 0;
84
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +053085 pte_t *dir = hugepd_page(hpd);
Becky Bruce881fde12011-10-10 10:50:40 +000086#ifndef CONFIG_PPC_FSL_BOOK3E
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +053087 idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
Becky Bruce41151e72011-06-28 09:54:48 +000088#endif
89
90 return dir + idx;
91}
92
David Gibson883a3e52009-10-26 19:24:31 +000093pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
94 unsigned long addr, unsigned *shift);
95
David Gibson0895ecd2009-10-26 19:24:31 +000096void flush_dcache_icache_hugepage(struct page *page);
97
Aneesh Kumar K.Vca5f1d12014-10-21 14:25:59 +110098#if defined(CONFIG_PPC_MM_SLICES)
Gerald Schaefer6d779072008-04-28 02:13:27 -070099int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
100 unsigned long len);
Becky Bruce41151e72011-06-28 09:54:48 +0000101#else
102static inline int is_hugepage_only_range(struct mm_struct *mm,
103 unsigned long addr,
104 unsigned long len)
105{
106 return 0;
107}
108#endif
109
Becky Bruced93e4d72011-11-28 14:43:33 +0000110void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
111 pte_t pte);
Christophe Leroy4b9142862016-12-07 08:47:28 +0100112#ifdef CONFIG_PPC_8xx
113static inline void flush_hugetlb_page(struct vm_area_struct *vma,
114 unsigned long vmaddr)
115{
116 flush_tlb_page(vma, vmaddr);
117}
118#else
Becky Bruce41151e72011-06-28 09:54:48 +0000119void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
Christophe Leroy4b9142862016-12-07 08:47:28 +0100120#endif
Gerald Schaefer6d779072008-04-28 02:13:27 -0700121
Jan Beulich42b77722008-07-23 21:27:10 -0700122void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
Gerald Schaefer6d779072008-04-28 02:13:27 -0700123 unsigned long end, unsigned long floor,
124 unsigned long ceiling);
125
Gerald Schaefer6d779072008-04-28 02:13:27 -0700126/*
Mel Gorman33402892009-01-06 14:38:54 -0800127 * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
128 * to override the version in mm/hugetlb.c
129 */
130#define vma_mmu_pagesize vma_mmu_pagesize
131
132/*
Gerald Schaefer6d779072008-04-28 02:13:27 -0700133 * If the arch doesn't supply something else, assume that hugepage
134 * size aligned regions are ok without further preparation.
135 */
Andi Kleena5516432008-07-23 21:27:41 -0700136static inline int prepare_hugepage_range(struct file *file,
137 unsigned long addr, unsigned long len)
Gerald Schaefer6d779072008-04-28 02:13:27 -0700138{
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700139 struct hstate *h = hstate_file(file);
140 if (len & ~huge_page_mask(h))
Gerald Schaefer6d779072008-04-28 02:13:27 -0700141 return -EINVAL;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700142 if (addr & ~huge_page_mask(h))
Gerald Schaefer6d779072008-04-28 02:13:27 -0700143 return -EINVAL;
144 return 0;
145}
146
David Gibson0895ecd2009-10-26 19:24:31 +0000147static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
148 pte_t *ptep, pte_t pte)
149{
150 set_pte_at(mm, addr, ptep, pte);
151}
152
153static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
154 unsigned long addr, pte_t *ptep)
155{
Becky Bruce41151e72011-06-28 09:54:48 +0000156#ifdef CONFIG_PPC64
Aneesh Kumar K.V88247e82014-02-12 09:13:36 +0530157 return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
Becky Bruce41151e72011-06-28 09:54:48 +0000158#else
159 return __pte(pte_update(ptep, ~0UL, 0));
160#endif
David Gibson0895ecd2009-10-26 19:24:31 +0000161}
162
Gerald Schaefer8fe627e2008-04-28 02:13:28 -0700163static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
164 unsigned long addr, pte_t *ptep)
165{
David Gibson0895ecd2009-10-26 19:24:31 +0000166 pte_t pte;
167 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
Aneesh Kumar K.V13dce032016-07-13 15:06:38 +0530168 flush_hugetlb_page(vma, addr);
Gerald Schaefer8fe627e2008-04-28 02:13:28 -0700169}
170
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700171static inline int huge_pte_none(pte_t pte)
172{
173 return pte_none(pte);
174}
175
176static inline pte_t huge_pte_wrprotect(pte_t pte)
177{
178 return pte_wrprotect(pte);
179}
180
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700181static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
182 unsigned long addr, pte_t *ptep,
183 pte_t pte, int dirty)
184{
Becky Bruce1f6820b2011-11-29 15:10:39 +0000185#ifdef HUGETLB_NEED_PRELOAD
Becky Bruce97632e62011-10-10 10:50:37 +0000186 /*
187 * The "return 1" forces a call of update_mmu_cache, which will write a
188 * TLB entry. Without this, platforms that don't do a write of the TLB
189 * entry in the TLB miss handler asm will fault ad infinitum.
190 */
191 ptep_set_access_flags(vma, addr, ptep, pte, dirty);
192 return 1;
193#else
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700194 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
Becky Bruce97632e62011-10-10 10:50:37 +0000195#endif
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700196}
197
198static inline pte_t huge_ptep_get(pte_t *ptep)
199{
200 return *ptep;
201}
202
Will Deacon5d3a5512012-10-08 16:29:32 -0700203static inline void arch_clear_hugepage_flags(struct page *page)
204{
205}
206
Becky Bruce41151e72011-06-28 09:54:48 +0000207#else /* ! CONFIG_HUGETLB_PAGE */
Becky Bruce41151e72011-06-28 09:54:48 +0000208static inline void flush_hugetlb_page(struct vm_area_struct *vma,
209 unsigned long vmaddr)
210{
211}
Becky Brucea6146882011-10-10 10:50:43 +0000212
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530213#define hugepd_shift(x) 0
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +0530214static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530215 unsigned pdshift)
216{
217 return 0;
218}
219#endif /* CONFIG_HUGETLB_PAGE */
Becky Brucea6146882011-10-10 10:50:43 +0000220
221/*
222 * FSL Book3E platforms require special gpage handling - the gpages
223 * are reserved early in the boot process by memblock instead of via
224 * the .dts as on IBM platforms.
225 */
Christophe Leroy4b9142862016-12-07 08:47:28 +0100226#if defined(CONFIG_HUGETLB_PAGE) && (defined(CONFIG_PPC_FSL_BOOK3E) || \
227 defined(CONFIG_PPC_8xx))
Becky Brucea6146882011-10-10 10:50:43 +0000228extern void __init reserve_hugetlb_gpages(void);
229#else
230static inline void reserve_hugetlb_gpages(void)
231{
232}
Becky Bruce41151e72011-06-28 09:54:48 +0000233#endif
234
Gerald Schaefer6d779072008-04-28 02:13:27 -0700235#endif /* _ASM_POWERPC_HUGETLB_H */