blob: 86004930a78e4b79cc5e5dc7a070dc5939a72665 [file] [log] [blame]
Gerald Schaefer6d779072008-04-28 02:13:27 -07001#ifndef _ASM_POWERPC_HUGETLB_H
2#define _ASM_POWERPC_HUGETLB_H
3
Becky Bruce41151e72011-06-28 09:54:48 +00004#ifdef CONFIG_HUGETLB_PAGE
Gerald Schaefer6d779072008-04-28 02:13:27 -07005#include <asm/page.h>
6
Becky Bruce41151e72011-06-28 09:54:48 +00007extern struct kmem_cache *hugepte_cache;
8extern void __init reserve_hugetlb_gpages(void);
9
10static inline pte_t *hugepd_page(hugepd_t hpd)
11{
12 BUG_ON(!hugepd_ok(hpd));
13 return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
14}
15
16static inline unsigned int hugepd_shift(hugepd_t hpd)
17{
18 return hpd.pd & HUGEPD_SHIFT_MASK;
19}
20
21static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
22 unsigned pdshift)
23{
24 /*
25 * On 32-bit, we have multiple higher-level table entries that point to
26 * the same hugepte. Just use the first one since they're all
27 * identical. So for that case, idx=0.
28 */
29 unsigned long idx = 0;
30
31 pte_t *dir = hugepd_page(*hpdp);
32#ifdef CONFIG_PPC64
33 idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp);
34#endif
35
36 return dir + idx;
37}
38
David Gibson883a3e52009-10-26 19:24:31 +000039pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
40 unsigned long addr, unsigned *shift);
41
David Gibson0895ecd2009-10-26 19:24:31 +000042void flush_dcache_icache_hugepage(struct page *page);
43
Becky Bruce41151e72011-06-28 09:54:48 +000044#if defined(CONFIG_PPC_MM_SLICES) || defined(CONFIG_PPC_SUBPAGE_PROT)
Gerald Schaefer6d779072008-04-28 02:13:27 -070045int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
46 unsigned long len);
Becky Bruce41151e72011-06-28 09:54:48 +000047#else
48static inline int is_hugepage_only_range(struct mm_struct *mm,
49 unsigned long addr,
50 unsigned long len)
51{
52 return 0;
53}
54#endif
55
56void book3e_hugetlb_preload(struct mm_struct *mm, unsigned long ea, pte_t pte);
57void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
Gerald Schaefer6d779072008-04-28 02:13:27 -070058
Jan Beulich42b77722008-07-23 21:27:10 -070059void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
Gerald Schaefer6d779072008-04-28 02:13:27 -070060 unsigned long end, unsigned long floor,
61 unsigned long ceiling);
62
Gerald Schaefer6d779072008-04-28 02:13:27 -070063/*
Mel Gorman33402892009-01-06 14:38:54 -080064 * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
65 * to override the version in mm/hugetlb.c
66 */
67#define vma_mmu_pagesize vma_mmu_pagesize
68
69/*
Gerald Schaefer6d779072008-04-28 02:13:27 -070070 * If the arch doesn't supply something else, assume that hugepage
71 * size aligned regions are ok without further preparation.
72 */
Andi Kleena5516432008-07-23 21:27:41 -070073static inline int prepare_hugepage_range(struct file *file,
74 unsigned long addr, unsigned long len)
Gerald Schaefer6d779072008-04-28 02:13:27 -070075{
Jon Tollefson0d9ea752008-07-23 21:27:56 -070076 struct hstate *h = hstate_file(file);
77 if (len & ~huge_page_mask(h))
Gerald Schaefer6d779072008-04-28 02:13:27 -070078 return -EINVAL;
Jon Tollefson0d9ea752008-07-23 21:27:56 -070079 if (addr & ~huge_page_mask(h))
Gerald Schaefer6d779072008-04-28 02:13:27 -070080 return -EINVAL;
81 return 0;
82}
83
84static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
85{
86}
87
David Gibson0895ecd2009-10-26 19:24:31 +000088
89static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
90 pte_t *ptep, pte_t pte)
91{
92 set_pte_at(mm, addr, ptep, pte);
93}
94
95static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
96 unsigned long addr, pte_t *ptep)
97{
Becky Bruce41151e72011-06-28 09:54:48 +000098#ifdef CONFIG_PPC64
99 return __pte(pte_update(mm, addr, ptep, ~0UL, 1));
100#else
101 return __pte(pte_update(ptep, ~0UL, 0));
102#endif
David Gibson0895ecd2009-10-26 19:24:31 +0000103}
104
Gerald Schaefer8fe627e2008-04-28 02:13:28 -0700105static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
106 unsigned long addr, pte_t *ptep)
107{
David Gibson0895ecd2009-10-26 19:24:31 +0000108 pte_t pte;
109 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
110 flush_tlb_page(vma, addr);
Gerald Schaefer8fe627e2008-04-28 02:13:28 -0700111}
112
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700113static inline int huge_pte_none(pte_t pte)
114{
115 return pte_none(pte);
116}
117
118static inline pte_t huge_pte_wrprotect(pte_t pte)
119{
120 return pte_wrprotect(pte);
121}
122
Gerald Schaefer7f2e9522008-04-28 02:13:29 -0700123static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
124 unsigned long addr, pte_t *ptep,
125 pte_t pte, int dirty)
126{
127 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
128}
129
130static inline pte_t huge_ptep_get(pte_t *ptep)
131{
132 return *ptep;
133}
134
135static inline int arch_prepare_hugepage(struct page *page)
136{
137 return 0;
138}
139
140static inline void arch_release_hugepage(struct page *page)
141{
142}
143
Becky Bruce41151e72011-06-28 09:54:48 +0000144#else /* ! CONFIG_HUGETLB_PAGE */
145static inline void reserve_hugetlb_gpages(void)
146{
147 pr_err("Cannot reserve gpages without hugetlb enabled\n");
148}
149static inline void flush_hugetlb_page(struct vm_area_struct *vma,
150 unsigned long vmaddr)
151{
152}
153#endif
154
Gerald Schaefer6d779072008-04-28 02:13:27 -0700155#endif /* _ASM_POWERPC_HUGETLB_H */