Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 1 | #ifndef _ASM_POWERPC_HUGETLB_H |
| 2 | #define _ASM_POWERPC_HUGETLB_H |
| 3 | |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 4 | #ifdef CONFIG_HUGETLB_PAGE |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 5 | #include <asm/page.h> |
Gerald Schaefer | 106c992 | 2013-04-29 15:07:23 -0700 | [diff] [blame] | 6 | #include <asm-generic/hugetlb.h> |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 7 | |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 8 | extern struct kmem_cache *hugepte_cache; |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 9 | |
Aneesh Kumar K.V | cf9427b | 2013-04-28 09:37:29 +0000 | [diff] [blame] | 10 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 11 | /* |
| 12 | * This should work for other subarchs too. But right now we use the |
| 13 | * new format only for 64bit book3s |
| 14 | */ |
| 15 | static inline pte_t *hugepd_page(hugepd_t hpd) |
| 16 | { |
| 17 | BUG_ON(!hugepd_ok(hpd)); |
| 18 | /* |
| 19 | * We have only four bits to encode, MMU page size |
| 20 | */ |
| 21 | BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf); |
| 22 | return (pte_t *)(hpd.pd & ~HUGEPD_SHIFT_MASK); |
| 23 | } |
| 24 | |
| 25 | static inline unsigned int hugepd_mmu_psize(hugepd_t hpd) |
| 26 | { |
| 27 | return (hpd.pd & HUGEPD_SHIFT_MASK) >> 2; |
| 28 | } |
| 29 | |
| 30 | static inline unsigned int hugepd_shift(hugepd_t hpd) |
| 31 | { |
| 32 | return mmu_psize_to_shift(hugepd_mmu_psize(hpd)); |
| 33 | } |
| 34 | |
| 35 | #else |
| 36 | |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 37 | static inline pte_t *hugepd_page(hugepd_t hpd) |
| 38 | { |
| 39 | BUG_ON(!hugepd_ok(hpd)); |
| 40 | return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE); |
| 41 | } |
| 42 | |
| 43 | static inline unsigned int hugepd_shift(hugepd_t hpd) |
| 44 | { |
| 45 | return hpd.pd & HUGEPD_SHIFT_MASK; |
| 46 | } |
| 47 | |
Aneesh Kumar K.V | cf9427b | 2013-04-28 09:37:29 +0000 | [diff] [blame] | 48 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
| 49 | |
| 50 | |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 51 | static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr, |
| 52 | unsigned pdshift) |
| 53 | { |
| 54 | /* |
Becky Bruce | 881fde1 | 2011-10-10 10:50:40 +0000 | [diff] [blame] | 55 | * On FSL BookE, we have multiple higher-level table entries that |
| 56 | * point to the same hugepte. Just use the first one since they're all |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 57 | * identical. So for that case, idx=0. |
| 58 | */ |
| 59 | unsigned long idx = 0; |
| 60 | |
| 61 | pte_t *dir = hugepd_page(*hpdp); |
Becky Bruce | 881fde1 | 2011-10-10 10:50:40 +0000 | [diff] [blame] | 62 | #ifndef CONFIG_PPC_FSL_BOOK3E |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 63 | idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp); |
| 64 | #endif |
| 65 | |
| 66 | return dir + idx; |
| 67 | } |
| 68 | |
David Gibson | 883a3e5 | 2009-10-26 19:24:31 +0000 | [diff] [blame] | 69 | pte_t *huge_pte_offset_and_shift(struct mm_struct *mm, |
| 70 | unsigned long addr, unsigned *shift); |
| 71 | |
David Gibson | 0895ecd | 2009-10-26 19:24:31 +0000 | [diff] [blame] | 72 | void flush_dcache_icache_hugepage(struct page *page); |
| 73 | |
Aneesh Kumar K.V | ca5f1d1 | 2014-10-21 14:25:59 +1100 | [diff] [blame] | 74 | #if defined(CONFIG_PPC_MM_SLICES) |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 75 | int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, |
| 76 | unsigned long len); |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 77 | #else |
| 78 | static inline int is_hugepage_only_range(struct mm_struct *mm, |
| 79 | unsigned long addr, |
| 80 | unsigned long len) |
| 81 | { |
| 82 | return 0; |
| 83 | } |
| 84 | #endif |
| 85 | |
Becky Bruce | d93e4d7 | 2011-11-28 14:43:33 +0000 | [diff] [blame] | 86 | void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, |
| 87 | pte_t pte); |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 88 | void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 89 | |
Jan Beulich | 42b7772 | 2008-07-23 21:27:10 -0700 | [diff] [blame] | 90 | void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 91 | unsigned long end, unsigned long floor, |
| 92 | unsigned long ceiling); |
| 93 | |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 94 | /* |
Mel Gorman | 3340289 | 2009-01-06 14:38:54 -0800 | [diff] [blame] | 95 | * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs |
| 96 | * to override the version in mm/hugetlb.c |
| 97 | */ |
| 98 | #define vma_mmu_pagesize vma_mmu_pagesize |
| 99 | |
| 100 | /* |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 101 | * If the arch doesn't supply something else, assume that hugepage |
| 102 | * size aligned regions are ok without further preparation. |
| 103 | */ |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 104 | static inline int prepare_hugepage_range(struct file *file, |
| 105 | unsigned long addr, unsigned long len) |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 106 | { |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 107 | struct hstate *h = hstate_file(file); |
| 108 | if (len & ~huge_page_mask(h)) |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 109 | return -EINVAL; |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 110 | if (addr & ~huge_page_mask(h)) |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 111 | return -EINVAL; |
| 112 | return 0; |
| 113 | } |
| 114 | |
| 115 | static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) |
| 116 | { |
| 117 | } |
| 118 | |
David Gibson | 0895ecd | 2009-10-26 19:24:31 +0000 | [diff] [blame] | 119 | |
| 120 | static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, |
| 121 | pte_t *ptep, pte_t pte) |
| 122 | { |
| 123 | set_pte_at(mm, addr, ptep, pte); |
| 124 | } |
| 125 | |
| 126 | static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, |
| 127 | unsigned long addr, pte_t *ptep) |
| 128 | { |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 129 | #ifdef CONFIG_PPC64 |
Aneesh Kumar K.V | 88247e8 | 2014-02-12 09:13:36 +0530 | [diff] [blame] | 130 | return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1)); |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 131 | #else |
| 132 | return __pte(pte_update(ptep, ~0UL, 0)); |
| 133 | #endif |
David Gibson | 0895ecd | 2009-10-26 19:24:31 +0000 | [diff] [blame] | 134 | } |
| 135 | |
Gerald Schaefer | 8fe627e | 2008-04-28 02:13:28 -0700 | [diff] [blame] | 136 | static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, |
| 137 | unsigned long addr, pte_t *ptep) |
| 138 | { |
David Gibson | 0895ecd | 2009-10-26 19:24:31 +0000 | [diff] [blame] | 139 | pte_t pte; |
| 140 | pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); |
| 141 | flush_tlb_page(vma, addr); |
Gerald Schaefer | 8fe627e | 2008-04-28 02:13:28 -0700 | [diff] [blame] | 142 | } |
| 143 | |
Gerald Schaefer | 7f2e952 | 2008-04-28 02:13:29 -0700 | [diff] [blame] | 144 | static inline int huge_pte_none(pte_t pte) |
| 145 | { |
| 146 | return pte_none(pte); |
| 147 | } |
| 148 | |
| 149 | static inline pte_t huge_pte_wrprotect(pte_t pte) |
| 150 | { |
| 151 | return pte_wrprotect(pte); |
| 152 | } |
| 153 | |
Gerald Schaefer | 7f2e952 | 2008-04-28 02:13:29 -0700 | [diff] [blame] | 154 | static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, |
| 155 | unsigned long addr, pte_t *ptep, |
| 156 | pte_t pte, int dirty) |
| 157 | { |
Becky Bruce | 1f6820b | 2011-11-29 15:10:39 +0000 | [diff] [blame] | 158 | #ifdef HUGETLB_NEED_PRELOAD |
Becky Bruce | 97632e6 | 2011-10-10 10:50:37 +0000 | [diff] [blame] | 159 | /* |
| 160 | * The "return 1" forces a call of update_mmu_cache, which will write a |
| 161 | * TLB entry. Without this, platforms that don't do a write of the TLB |
| 162 | * entry in the TLB miss handler asm will fault ad infinitum. |
| 163 | */ |
| 164 | ptep_set_access_flags(vma, addr, ptep, pte, dirty); |
| 165 | return 1; |
| 166 | #else |
Gerald Schaefer | 7f2e952 | 2008-04-28 02:13:29 -0700 | [diff] [blame] | 167 | return ptep_set_access_flags(vma, addr, ptep, pte, dirty); |
Becky Bruce | 97632e6 | 2011-10-10 10:50:37 +0000 | [diff] [blame] | 168 | #endif |
Gerald Schaefer | 7f2e952 | 2008-04-28 02:13:29 -0700 | [diff] [blame] | 169 | } |
| 170 | |
| 171 | static inline pte_t huge_ptep_get(pte_t *ptep) |
| 172 | { |
| 173 | return *ptep; |
| 174 | } |
| 175 | |
| 176 | static inline int arch_prepare_hugepage(struct page *page) |
| 177 | { |
| 178 | return 0; |
| 179 | } |
| 180 | |
| 181 | static inline void arch_release_hugepage(struct page *page) |
| 182 | { |
| 183 | } |
| 184 | |
Will Deacon | 5d3a551 | 2012-10-08 16:29:32 -0700 | [diff] [blame] | 185 | static inline void arch_clear_hugepage_flags(struct page *page) |
| 186 | { |
| 187 | } |
| 188 | |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 189 | #else /* ! CONFIG_HUGETLB_PAGE */ |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 190 | static inline void flush_hugetlb_page(struct vm_area_struct *vma, |
| 191 | unsigned long vmaddr) |
| 192 | { |
| 193 | } |
Becky Bruce | a614688 | 2011-10-10 10:50:43 +0000 | [diff] [blame] | 194 | |
Aneesh Kumar K.V | 2940999 | 2013-06-20 14:30:16 +0530 | [diff] [blame] | 195 | #define hugepd_shift(x) 0 |
| 196 | static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr, |
| 197 | unsigned pdshift) |
| 198 | { |
| 199 | return 0; |
| 200 | } |
| 201 | #endif /* CONFIG_HUGETLB_PAGE */ |
Becky Bruce | a614688 | 2011-10-10 10:50:43 +0000 | [diff] [blame] | 202 | |
| 203 | /* |
| 204 | * FSL Book3E platforms require special gpage handling - the gpages |
| 205 | * are reserved early in the boot process by memblock instead of via |
| 206 | * the .dts as on IBM platforms. |
| 207 | */ |
| 208 | #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E) |
| 209 | extern void __init reserve_hugetlb_gpages(void); |
| 210 | #else |
| 211 | static inline void reserve_hugetlb_gpages(void) |
| 212 | { |
| 213 | } |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 214 | #endif |
| 215 | |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 216 | #endif /* _ASM_POWERPC_HUGETLB_H */ |