blob: 93f98239159fb1c85ba845a7a24358b5fe3a45c8 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Gerald Schaefer6d779072008-04-28 02:13:27 -07002#ifndef _ASM_POWERPC_HUGETLB_H
3#define _ASM_POWERPC_HUGETLB_H
4
Becky Bruce41151e72011-06-28 09:54:48 +00005#ifdef CONFIG_HUGETLB_PAGE
Gerald Schaefer6d779072008-04-28 02:13:27 -07006#include <asm/page.h>
Gerald Schaefer106c9922013-04-29 15:07:23 -07007#include <asm-generic/hugetlb.h>
Gerald Schaefer6d779072008-04-28 02:13:27 -07008
Becky Bruce41151e72011-06-28 09:54:48 +00009extern struct kmem_cache *hugepte_cache;
Becky Bruce41151e72011-06-28 09:54:48 +000010
Aneesh Kumar K.Vcf9427b2013-04-28 09:37:29 +000011#ifdef CONFIG_PPC_BOOK3S_64
Aneesh Kumar K.V48483762016-04-29 23:26:25 +100012
Aneesh Kumar K.Vbee8b3b2016-11-28 11:46:58 +053013#include <asm/book3s/64/hugetlb.h>
Aneesh Kumar K.Vcf9427b2013-04-28 09:37:29 +000014/*
15 * This should work for other subarchs too. But right now we use the
16 * new format only for 64bit book3s
17 */
18static inline pte_t *hugepd_page(hugepd_t hpd)
19{
20 BUG_ON(!hugepd_ok(hpd));
21 /*
22 * We have only four bits to encode, MMU page size
23 */
24 BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +053025 return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
Aneesh Kumar K.Vcf9427b2013-04-28 09:37:29 +000026}
27
28static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
29{
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +053030 return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2;
Aneesh Kumar K.Vcf9427b2013-04-28 09:37:29 +000031}
32
33static inline unsigned int hugepd_shift(hugepd_t hpd)
34{
35 return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
36}
Aneesh Kumar K.V48483762016-04-29 23:26:25 +100037static inline void flush_hugetlb_page(struct vm_area_struct *vma,
38 unsigned long vmaddr)
39{
40 if (radix_enabled())
41 return radix__flush_hugetlb_page(vma, vmaddr);
42}
Aneesh Kumar K.Vcf9427b2013-04-28 09:37:29 +000043
Aneesh Kumar K.V48483762016-04-29 23:26:25 +100044static inline void __local_flush_hugetlb_page(struct vm_area_struct *vma,
45 unsigned long vmaddr)
46{
47 if (radix_enabled())
48 return radix__local_flush_hugetlb_page(vma, vmaddr);
49}
Aneesh Kumar K.Vcf9427b2013-04-28 09:37:29 +000050#else
51
Becky Bruce41151e72011-06-28 09:54:48 +000052static inline pte_t *hugepd_page(hugepd_t hpd)
53{
54 BUG_ON(!hugepd_ok(hpd));
Christophe Leroy4b9142862016-12-07 08:47:28 +010055#ifdef CONFIG_PPC_8xx
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +053056 return (pte_t *)__va(hpd_val(hpd) &
57 ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
Christophe Leroy4b9142862016-12-07 08:47:28 +010058#else
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +053059 return (pte_t *)((hpd_val(hpd) &
60 ~HUGEPD_SHIFT_MASK) | PD_HUGE);
Christophe Leroy4b9142862016-12-07 08:47:28 +010061#endif
Becky Bruce41151e72011-06-28 09:54:48 +000062}
63
64static inline unsigned int hugepd_shift(hugepd_t hpd)
65{
Christophe Leroy4b9142862016-12-07 08:47:28 +010066#ifdef CONFIG_PPC_8xx
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +053067 return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17;
Christophe Leroy4b9142862016-12-07 08:47:28 +010068#else
Aneesh Kumar K.V20717e12016-12-14 10:07:53 +053069 return hpd_val(hpd) & HUGEPD_SHIFT_MASK;
Christophe Leroy4b9142862016-12-07 08:47:28 +010070#endif
Becky Bruce41151e72011-06-28 09:54:48 +000071}
72
Aneesh Kumar K.Vcf9427b2013-04-28 09:37:29 +000073#endif /* CONFIG_PPC_BOOK3S_64 */
74
75
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +053076static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
Becky Bruce41151e72011-06-28 09:54:48 +000077 unsigned pdshift)
78{
79 /*
Becky Bruce881fde12011-10-10 10:50:40 +000080 * On FSL BookE, we have multiple higher-level table entries that
81 * point to the same hugepte. Just use the first one since they're all
Becky Bruce41151e72011-06-28 09:54:48 +000082 * identical. So for that case, idx=0.
83 */
84 unsigned long idx = 0;
85
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +053086 pte_t *dir = hugepd_page(hpd);
Becky Bruce881fde12011-10-10 10:50:40 +000087#ifndef CONFIG_PPC_FSL_BOOK3E
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +053088 idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
Becky Bruce41151e72011-06-28 09:54:48 +000089#endif
90
91 return dir + idx;
92}
93
David Gibson883a3e52009-10-26 19:24:31 +000094pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
95 unsigned long addr, unsigned *shift);
96
David Gibson0895ecd2009-10-26 19:24:31 +000097void flush_dcache_icache_hugepage(struct page *page);
98
Aneesh Kumar K.Vca5f1d12014-10-21 14:25:59 +110099#if defined(CONFIG_PPC_MM_SLICES)
Gerald Schaefer6d779072008-04-28 02:13:27 -0700100int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
101 unsigned long len);
Becky Bruce41151e72011-06-28 09:54:48 +0000102#else
103static inline int is_hugepage_only_range(struct mm_struct *mm,
104 unsigned long addr,
105 unsigned long len)
106{
107 return 0;
108}
109#endif
110
Becky Bruced93e4d72011-11-28 14:43:33 +0000111void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
112 pte_t pte);
Christophe Leroy4b9142862016-12-07 08:47:28 +0100113#ifdef CONFIG_PPC_8xx
114static inline void flush_hugetlb_page(struct vm_area_struct *vma,
115 unsigned long vmaddr)
116{
117 flush_tlb_page(vma, vmaddr);
118}
119#else
Becky Bruce41151e72011-06-28 09:54:48 +0000120void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
Christophe Leroy4b9142862016-12-07 08:47:28 +0100121#endif
Gerald Schaefer6d779072008-04-28 02:13:27 -0700122
Jan Beulich42b77722008-07-23 21:27:10 -0700123void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
Gerald Schaefer6d779072008-04-28 02:13:27 -0700124 unsigned long end, unsigned long floor,
125 unsigned long ceiling);
126
Gerald Schaefer6d779072008-04-28 02:13:27 -0700127/*
Mel Gorman33402892009-01-06 14:38:54 -0800128 * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
129 * to override the version in mm/hugetlb.c
130 */
131#define vma_mmu_pagesize vma_mmu_pagesize
132
133/*
Gerald Schaefer6d779072008-04-28 02:13:27 -0700134 * If the arch doesn't supply something else, assume that hugepage
135 * size aligned regions are ok without further preparation.
136 */
Andi Kleena5516432008-07-23 21:27:41 -0700137static inline int prepare_hugepage_range(struct file *file,
138 unsigned long addr, unsigned long len)
Gerald Schaefer6d779072008-04-28 02:13:27 -0700139{
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700140 struct hstate *h = hstate_file(file);
141 if (len & ~huge_page_mask(h))
Gerald Schaefer6d779072008-04-28 02:13:27 -0700142 return -EINVAL;
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700143 if (addr & ~huge_page_mask(h))
Gerald Schaefer6d779072008-04-28 02:13:27 -0700144 return -EINVAL;
145 return 0;
146}
147
David Gibson0895ecd2009-10-26 19:24:31 +0000148static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
149 pte_t *ptep, pte_t pte)
150{
151 set_pte_at(mm, addr, ptep, pte);
152}
153
154static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
155 unsigned long addr, pte_t *ptep)
156{
Becky Bruce41151e72011-06-28 09:54:48 +0000157#ifdef CONFIG_PPC64
Aneesh Kumar K.V88247e82014-02-12 09:13:36 +0530158 return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
Becky Bruce41151e72011-06-28 09:54:48 +0000159#else
160 return __pte(pte_update(ptep, ~0UL, 0));
161#endif
David Gibson0895ecd2009-10-26 19:24:31 +0000162}
163
Gerald Schaefer8fe627e2008-04-28 02:13:28 -0700164static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
165 unsigned long addr, pte_t *ptep)
166{
David Gibson0895ecd2009-10-26 19:24:31 +0000167 pte_t pte;
168 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
Aneesh Kumar K.V13dce032016-07-13 15:06:38 +0530169 flush_hugetlb_page(vma, addr);
Gerald Schaefer8fe627e2008-04-28 02:13:28 -0700170}
171
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700172static inline int huge_pte_none(pte_t pte)
173{
174 return pte_none(pte);
175}
176
177static inline pte_t huge_pte_wrprotect(pte_t pte)
178{
179 return pte_wrprotect(pte);
180}
181
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700182static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
183 unsigned long addr, pte_t *ptep,
184 pte_t pte, int dirty)
185{
Becky Bruce1f6820b2011-11-29 15:10:39 +0000186#ifdef HUGETLB_NEED_PRELOAD
Becky Bruce97632e62011-10-10 10:50:37 +0000187 /*
188 * The "return 1" forces a call of update_mmu_cache, which will write a
189 * TLB entry. Without this, platforms that don't do a write of the TLB
190 * entry in the TLB miss handler asm will fault ad infinitum.
191 */
192 ptep_set_access_flags(vma, addr, ptep, pte, dirty);
193 return 1;
194#else
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700195 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
Becky Bruce97632e62011-10-10 10:50:37 +0000196#endif
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700197}
198
199static inline pte_t huge_ptep_get(pte_t *ptep)
200{
201 return *ptep;
202}
203
Will Deacon5d3a5512012-10-08 16:29:32 -0700204static inline void arch_clear_hugepage_flags(struct page *page)
205{
206}
207
Becky Bruce41151e72011-06-28 09:54:48 +0000208#else /* ! CONFIG_HUGETLB_PAGE */
Becky Bruce41151e72011-06-28 09:54:48 +0000209static inline void flush_hugetlb_page(struct vm_area_struct *vma,
210 unsigned long vmaddr)
211{
212}
Becky Brucea6146882011-10-10 10:50:43 +0000213
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530214#define hugepd_shift(x) 0
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +0530215static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530216 unsigned pdshift)
217{
218 return 0;
219}
220#endif /* CONFIG_HUGETLB_PAGE */
Becky Brucea6146882011-10-10 10:50:43 +0000221
Gerald Schaefer6d779072008-04-28 02:13:27 -0700222#endif /* _ASM_POWERPC_HUGETLB_H */