blob: 4fcbd6b14a3adba4184bce01110b990ca63bc8b2 [file] [log] [blame]
Gerald Schaefer6d779072008-04-28 02:13:27 -07001#ifndef _ASM_POWERPC_HUGETLB_H
2#define _ASM_POWERPC_HUGETLB_H
3
Becky Bruce41151e72011-06-28 09:54:48 +00004#ifdef CONFIG_HUGETLB_PAGE
Gerald Schaefer6d779072008-04-28 02:13:27 -07005#include <asm/page.h>
Gerald Schaefer106c9922013-04-29 15:07:23 -07006#include <asm-generic/hugetlb.h>
Gerald Schaefer6d779072008-04-28 02:13:27 -07007
Becky Bruce41151e72011-06-28 09:54:48 +00008extern struct kmem_cache *hugepte_cache;
Becky Bruce41151e72011-06-28 09:54:48 +00009
10static inline pte_t *hugepd_page(hugepd_t hpd)
11{
12 BUG_ON(!hugepd_ok(hpd));
13 return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
14}
15
16static inline unsigned int hugepd_shift(hugepd_t hpd)
17{
18 return hpd.pd & HUGEPD_SHIFT_MASK;
19}
20
21static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
22 unsigned pdshift)
23{
24 /*
Becky Bruce881fde12011-10-10 10:50:40 +000025 * On FSL BookE, we have multiple higher-level table entries that
26 * point to the same hugepte. Just use the first one since they're all
Becky Bruce41151e72011-06-28 09:54:48 +000027 * identical. So for that case, idx=0.
28 */
29 unsigned long idx = 0;
30
31 pte_t *dir = hugepd_page(*hpdp);
Becky Bruce881fde12011-10-10 10:50:40 +000032#ifndef CONFIG_PPC_FSL_BOOK3E
Becky Bruce41151e72011-06-28 09:54:48 +000033 idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp);
34#endif
35
36 return dir + idx;
37}
38
David Gibson883a3e52009-10-26 19:24:31 +000039pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
40 unsigned long addr, unsigned *shift);
41
David Gibson0895ecd2009-10-26 19:24:31 +000042void flush_dcache_icache_hugepage(struct page *page);
43
Becky Bruce41151e72011-06-28 09:54:48 +000044#if defined(CONFIG_PPC_MM_SLICES) || defined(CONFIG_PPC_SUBPAGE_PROT)
Gerald Schaefer6d779072008-04-28 02:13:27 -070045int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
46 unsigned long len);
Becky Bruce41151e72011-06-28 09:54:48 +000047#else
48static inline int is_hugepage_only_range(struct mm_struct *mm,
49 unsigned long addr,
50 unsigned long len)
51{
52 return 0;
53}
54#endif
55
Becky Bruced93e4d72011-11-28 14:43:33 +000056void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
57 pte_t pte);
Becky Bruce41151e72011-06-28 09:54:48 +000058void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
Gerald Schaefer6d779072008-04-28 02:13:27 -070059
Jan Beulich42b77722008-07-23 21:27:10 -070060void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
Gerald Schaefer6d779072008-04-28 02:13:27 -070061 unsigned long end, unsigned long floor,
62 unsigned long ceiling);
63
Gerald Schaefer6d779072008-04-28 02:13:27 -070064/*
Mel Gorman33402892009-01-06 14:38:54 -080065 * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
66 * to override the version in mm/hugetlb.c
67 */
68#define vma_mmu_pagesize vma_mmu_pagesize
69
70/*
Gerald Schaefer6d779072008-04-28 02:13:27 -070071 * If the arch doesn't supply something else, assume that hugepage
72 * size aligned regions are ok without further preparation.
73 */
Andi Kleena5516432008-07-23 21:27:41 -070074static inline int prepare_hugepage_range(struct file *file,
75 unsigned long addr, unsigned long len)
Gerald Schaefer6d779072008-04-28 02:13:27 -070076{
Jon Tollefson0d9ea752008-07-23 21:27:56 -070077 struct hstate *h = hstate_file(file);
78 if (len & ~huge_page_mask(h))
Gerald Schaefer6d779072008-04-28 02:13:27 -070079 return -EINVAL;
Jon Tollefson0d9ea752008-07-23 21:27:56 -070080 if (addr & ~huge_page_mask(h))
Gerald Schaefer6d779072008-04-28 02:13:27 -070081 return -EINVAL;
82 return 0;
83}
84
85static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
86{
87}
88
David Gibson0895ecd2009-10-26 19:24:31 +000089
90static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
91 pte_t *ptep, pte_t pte)
92{
93 set_pte_at(mm, addr, ptep, pte);
94}
95
96static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
97 unsigned long addr, pte_t *ptep)
98{
Becky Bruce41151e72011-06-28 09:54:48 +000099#ifdef CONFIG_PPC64
100 return __pte(pte_update(mm, addr, ptep, ~0UL, 1));
101#else
102 return __pte(pte_update(ptep, ~0UL, 0));
103#endif
David Gibson0895ecd2009-10-26 19:24:31 +0000104}
105
Gerald Schaefer8fe627e2008-04-28 02:13:28 -0700106static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
107 unsigned long addr, pte_t *ptep)
108{
David Gibson0895ecd2009-10-26 19:24:31 +0000109 pte_t pte;
110 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
111 flush_tlb_page(vma, addr);
Gerald Schaefer8fe627e2008-04-28 02:13:28 -0700112}
113
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700114static inline int huge_pte_none(pte_t pte)
115{
116 return pte_none(pte);
117}
118
119static inline pte_t huge_pte_wrprotect(pte_t pte)
120{
121 return pte_wrprotect(pte);
122}
123
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700124static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
125 unsigned long addr, pte_t *ptep,
126 pte_t pte, int dirty)
127{
Becky Bruce1f6820b2011-11-29 15:10:39 +0000128#ifdef HUGETLB_NEED_PRELOAD
Becky Bruce97632e62011-10-10 10:50:37 +0000129 /*
130 * The "return 1" forces a call of update_mmu_cache, which will write a
131 * TLB entry. Without this, platforms that don't do a write of the TLB
132 * entry in the TLB miss handler asm will fault ad infinitum.
133 */
134 ptep_set_access_flags(vma, addr, ptep, pte, dirty);
135 return 1;
136#else
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700137 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
Becky Bruce97632e62011-10-10 10:50:37 +0000138#endif
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700139}
140
141static inline pte_t huge_ptep_get(pte_t *ptep)
142{
143 return *ptep;
144}
145
146static inline int arch_prepare_hugepage(struct page *page)
147{
148 return 0;
149}
150
151static inline void arch_release_hugepage(struct page *page)
152{
153}
154
Will Deacon5d3a5512012-10-08 16:29:32 -0700155static inline void arch_clear_hugepage_flags(struct page *page)
156{
157}
158
Becky Bruce41151e72011-06-28 09:54:48 +0000159#else /* ! CONFIG_HUGETLB_PAGE */
Becky Bruce41151e72011-06-28 09:54:48 +0000160static inline void flush_hugetlb_page(struct vm_area_struct *vma,
161 unsigned long vmaddr)
162{
163}
Becky Brucea6146882011-10-10 10:50:43 +0000164#endif /* CONFIG_HUGETLB_PAGE */
165
166
167/*
168 * FSL Book3E platforms require special gpage handling - the gpages
169 * are reserved early in the boot process by memblock instead of via
170 * the .dts as on IBM platforms.
171 */
172#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E)
173extern void __init reserve_hugetlb_gpages(void);
174#else
175static inline void reserve_hugetlb_gpages(void)
176{
177}
Becky Bruce41151e72011-06-28 09:54:48 +0000178#endif
179
Gerald Schaefer6d779072008-04-28 02:13:27 -0700180#endif /* _ASM_POWERPC_HUGETLB_H */