Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 1 | #ifndef _ASM_POWERPC_HUGETLB_H |
| 2 | #define _ASM_POWERPC_HUGETLB_H |
| 3 | |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 4 | #ifdef CONFIG_HUGETLB_PAGE |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 5 | #include <asm/page.h> |
Gerald Schaefer | 106c992 | 2013-04-29 15:07:23 -0700 | [diff] [blame] | 6 | #include <asm-generic/hugetlb.h> |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 7 | |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 8 | extern struct kmem_cache *hugepte_cache; |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 9 | |
Aneesh Kumar K.V | cf9427b | 2013-04-28 09:37:29 +0000 | [diff] [blame] | 10 | #ifdef CONFIG_PPC_BOOK3S_64 |
Aneesh Kumar K.V | 4848376 | 2016-04-29 23:26:25 +1000 | [diff] [blame] | 11 | |
Aneesh Kumar K.V | bee8b3b | 2016-11-28 11:46:58 +0530 | [diff] [blame] | 12 | #include <asm/book3s/64/hugetlb.h> |
Aneesh Kumar K.V | cf9427b | 2013-04-28 09:37:29 +0000 | [diff] [blame] | 13 | /* |
| 14 | * This should work for other subarchs too. But right now we use the |
| 15 | * new format only for 64bit book3s |
| 16 | */ |
| 17 | static inline pte_t *hugepd_page(hugepd_t hpd) |
| 18 | { |
| 19 | BUG_ON(!hugepd_ok(hpd)); |
| 20 | /* |
| 21 | * We have only four bits to encode, MMU page size |
| 22 | */ |
| 23 | BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf); |
Aneesh Kumar K.V | 20717e1 | 2016-12-14 10:07:53 +0530 | [diff] [blame] | 24 | return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK); |
Aneesh Kumar K.V | cf9427b | 2013-04-28 09:37:29 +0000 | [diff] [blame] | 25 | } |
| 26 | |
| 27 | static inline unsigned int hugepd_mmu_psize(hugepd_t hpd) |
| 28 | { |
Aneesh Kumar K.V | 20717e1 | 2016-12-14 10:07:53 +0530 | [diff] [blame] | 29 | return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2; |
Aneesh Kumar K.V | cf9427b | 2013-04-28 09:37:29 +0000 | [diff] [blame] | 30 | } |
| 31 | |
| 32 | static inline unsigned int hugepd_shift(hugepd_t hpd) |
| 33 | { |
| 34 | return mmu_psize_to_shift(hugepd_mmu_psize(hpd)); |
| 35 | } |
Aneesh Kumar K.V | 4848376 | 2016-04-29 23:26:25 +1000 | [diff] [blame] | 36 | static inline void flush_hugetlb_page(struct vm_area_struct *vma, |
| 37 | unsigned long vmaddr) |
| 38 | { |
| 39 | if (radix_enabled()) |
| 40 | return radix__flush_hugetlb_page(vma, vmaddr); |
| 41 | } |
Aneesh Kumar K.V | cf9427b | 2013-04-28 09:37:29 +0000 | [diff] [blame] | 42 | |
Aneesh Kumar K.V | 4848376 | 2016-04-29 23:26:25 +1000 | [diff] [blame] | 43 | static inline void __local_flush_hugetlb_page(struct vm_area_struct *vma, |
| 44 | unsigned long vmaddr) |
| 45 | { |
| 46 | if (radix_enabled()) |
| 47 | return radix__local_flush_hugetlb_page(vma, vmaddr); |
| 48 | } |
Aneesh Kumar K.V | cf9427b | 2013-04-28 09:37:29 +0000 | [diff] [blame] | 49 | #else |
| 50 | |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 51 | static inline pte_t *hugepd_page(hugepd_t hpd) |
| 52 | { |
| 53 | BUG_ON(!hugepd_ok(hpd)); |
Christophe Leroy | 4b914286 | 2016-12-07 08:47:28 +0100 | [diff] [blame] | 54 | #ifdef CONFIG_PPC_8xx |
Aneesh Kumar K.V | 20717e1 | 2016-12-14 10:07:53 +0530 | [diff] [blame] | 55 | return (pte_t *)__va(hpd_val(hpd) & |
| 56 | ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK)); |
Christophe Leroy | 4b914286 | 2016-12-07 08:47:28 +0100 | [diff] [blame] | 57 | #else |
Aneesh Kumar K.V | 20717e1 | 2016-12-14 10:07:53 +0530 | [diff] [blame] | 58 | return (pte_t *)((hpd_val(hpd) & |
| 59 | ~HUGEPD_SHIFT_MASK) | PD_HUGE); |
Christophe Leroy | 4b914286 | 2016-12-07 08:47:28 +0100 | [diff] [blame] | 60 | #endif |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 61 | } |
| 62 | |
| 63 | static inline unsigned int hugepd_shift(hugepd_t hpd) |
| 64 | { |
Christophe Leroy | 4b914286 | 2016-12-07 08:47:28 +0100 | [diff] [blame] | 65 | #ifdef CONFIG_PPC_8xx |
Aneesh Kumar K.V | 20717e1 | 2016-12-14 10:07:53 +0530 | [diff] [blame] | 66 | return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17; |
Christophe Leroy | 4b914286 | 2016-12-07 08:47:28 +0100 | [diff] [blame] | 67 | #else |
Aneesh Kumar K.V | 20717e1 | 2016-12-14 10:07:53 +0530 | [diff] [blame] | 68 | return hpd_val(hpd) & HUGEPD_SHIFT_MASK; |
Christophe Leroy | 4b914286 | 2016-12-07 08:47:28 +0100 | [diff] [blame] | 69 | #endif |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 70 | } |
| 71 | |
Aneesh Kumar K.V | cf9427b | 2013-04-28 09:37:29 +0000 | [diff] [blame] | 72 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
| 73 | |
| 74 | |
Aneesh Kumar K.V | b30e759 | 2014-11-05 21:57:41 +0530 | [diff] [blame] | 75 | static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 76 | unsigned pdshift) |
| 77 | { |
| 78 | /* |
Becky Bruce | 881fde1 | 2011-10-10 10:50:40 +0000 | [diff] [blame] | 79 | * On FSL BookE, we have multiple higher-level table entries that |
| 80 | * point to the same hugepte. Just use the first one since they're all |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 81 | * identical. So for that case, idx=0. |
| 82 | */ |
| 83 | unsigned long idx = 0; |
| 84 | |
Aneesh Kumar K.V | b30e759 | 2014-11-05 21:57:41 +0530 | [diff] [blame] | 85 | pte_t *dir = hugepd_page(hpd); |
Becky Bruce | 881fde1 | 2011-10-10 10:50:40 +0000 | [diff] [blame] | 86 | #ifndef CONFIG_PPC_FSL_BOOK3E |
Aneesh Kumar K.V | b30e759 | 2014-11-05 21:57:41 +0530 | [diff] [blame] | 87 | idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd); |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 88 | #endif |
| 89 | |
| 90 | return dir + idx; |
| 91 | } |
| 92 | |
David Gibson | 883a3e5 | 2009-10-26 19:24:31 +0000 | [diff] [blame] | 93 | pte_t *huge_pte_offset_and_shift(struct mm_struct *mm, |
| 94 | unsigned long addr, unsigned *shift); |
| 95 | |
David Gibson | 0895ecd | 2009-10-26 19:24:31 +0000 | [diff] [blame] | 96 | void flush_dcache_icache_hugepage(struct page *page); |
| 97 | |
Aneesh Kumar K.V | ca5f1d1 | 2014-10-21 14:25:59 +1100 | [diff] [blame] | 98 | #if defined(CONFIG_PPC_MM_SLICES) |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 99 | int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, |
| 100 | unsigned long len); |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 101 | #else |
| 102 | static inline int is_hugepage_only_range(struct mm_struct *mm, |
| 103 | unsigned long addr, |
| 104 | unsigned long len) |
| 105 | { |
| 106 | return 0; |
| 107 | } |
| 108 | #endif |
| 109 | |
Becky Bruce | d93e4d7 | 2011-11-28 14:43:33 +0000 | [diff] [blame] | 110 | void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, |
| 111 | pte_t pte); |
Christophe Leroy | 4b914286 | 2016-12-07 08:47:28 +0100 | [diff] [blame] | 112 | #ifdef CONFIG_PPC_8xx |
| 113 | static inline void flush_hugetlb_page(struct vm_area_struct *vma, |
| 114 | unsigned long vmaddr) |
| 115 | { |
| 116 | flush_tlb_page(vma, vmaddr); |
| 117 | } |
| 118 | #else |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 119 | void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); |
Christophe Leroy | 4b914286 | 2016-12-07 08:47:28 +0100 | [diff] [blame] | 120 | #endif |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 121 | |
Jan Beulich | 42b7772 | 2008-07-23 21:27:10 -0700 | [diff] [blame] | 122 | void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 123 | unsigned long end, unsigned long floor, |
| 124 | unsigned long ceiling); |
| 125 | |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 126 | /* |
Mel Gorman | 3340289 | 2009-01-06 14:38:54 -0800 | [diff] [blame] | 127 | * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs |
| 128 | * to override the version in mm/hugetlb.c |
| 129 | */ |
| 130 | #define vma_mmu_pagesize vma_mmu_pagesize |
| 131 | |
| 132 | /* |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 133 | * If the arch doesn't supply something else, assume that hugepage |
| 134 | * size aligned regions are ok without further preparation. |
| 135 | */ |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 136 | static inline int prepare_hugepage_range(struct file *file, |
| 137 | unsigned long addr, unsigned long len) |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 138 | { |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 139 | struct hstate *h = hstate_file(file); |
| 140 | if (len & ~huge_page_mask(h)) |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 141 | return -EINVAL; |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 142 | if (addr & ~huge_page_mask(h)) |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 143 | return -EINVAL; |
| 144 | return 0; |
| 145 | } |
| 146 | |
David Gibson | 0895ecd | 2009-10-26 19:24:31 +0000 | [diff] [blame] | 147 | static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, |
| 148 | pte_t *ptep, pte_t pte) |
| 149 | { |
| 150 | set_pte_at(mm, addr, ptep, pte); |
| 151 | } |
| 152 | |
| 153 | static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, |
| 154 | unsigned long addr, pte_t *ptep) |
| 155 | { |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 156 | #ifdef CONFIG_PPC64 |
Aneesh Kumar K.V | 88247e8 | 2014-02-12 09:13:36 +0530 | [diff] [blame] | 157 | return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1)); |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 158 | #else |
| 159 | return __pte(pte_update(ptep, ~0UL, 0)); |
| 160 | #endif |
David Gibson | 0895ecd | 2009-10-26 19:24:31 +0000 | [diff] [blame] | 161 | } |
| 162 | |
Gerald Schaefer | 8fe627e | 2008-04-28 02:13:28 -0700 | [diff] [blame] | 163 | static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, |
| 164 | unsigned long addr, pte_t *ptep) |
| 165 | { |
David Gibson | 0895ecd | 2009-10-26 19:24:31 +0000 | [diff] [blame] | 166 | pte_t pte; |
| 167 | pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); |
Aneesh Kumar K.V | 13dce03 | 2016-07-13 15:06:38 +0530 | [diff] [blame] | 168 | flush_hugetlb_page(vma, addr); |
Gerald Schaefer | 8fe627e | 2008-04-28 02:13:28 -0700 | [diff] [blame] | 169 | } |
| 170 | |
Gerald Schaefer | 7f2e952 | 2008-04-28 02:13:29 -0700 | [diff] [blame] | 171 | static inline int huge_pte_none(pte_t pte) |
| 172 | { |
| 173 | return pte_none(pte); |
| 174 | } |
| 175 | |
| 176 | static inline pte_t huge_pte_wrprotect(pte_t pte) |
| 177 | { |
| 178 | return pte_wrprotect(pte); |
| 179 | } |
| 180 | |
Gerald Schaefer | 7f2e952 | 2008-04-28 02:13:29 -0700 | [diff] [blame] | 181 | static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, |
| 182 | unsigned long addr, pte_t *ptep, |
| 183 | pte_t pte, int dirty) |
| 184 | { |
Becky Bruce | 1f6820b | 2011-11-29 15:10:39 +0000 | [diff] [blame] | 185 | #ifdef HUGETLB_NEED_PRELOAD |
Becky Bruce | 97632e6 | 2011-10-10 10:50:37 +0000 | [diff] [blame] | 186 | /* |
| 187 | * The "return 1" forces a call of update_mmu_cache, which will write a |
| 188 | * TLB entry. Without this, platforms that don't do a write of the TLB |
| 189 | * entry in the TLB miss handler asm will fault ad infinitum. |
| 190 | */ |
| 191 | ptep_set_access_flags(vma, addr, ptep, pte, dirty); |
| 192 | return 1; |
| 193 | #else |
Gerald Schaefer | 7f2e952 | 2008-04-28 02:13:29 -0700 | [diff] [blame] | 194 | return ptep_set_access_flags(vma, addr, ptep, pte, dirty); |
Becky Bruce | 97632e6 | 2011-10-10 10:50:37 +0000 | [diff] [blame] | 195 | #endif |
Gerald Schaefer | 7f2e952 | 2008-04-28 02:13:29 -0700 | [diff] [blame] | 196 | } |
| 197 | |
| 198 | static inline pte_t huge_ptep_get(pte_t *ptep) |
| 199 | { |
| 200 | return *ptep; |
| 201 | } |
| 202 | |
Will Deacon | 5d3a551 | 2012-10-08 16:29:32 -0700 | [diff] [blame] | 203 | static inline void arch_clear_hugepage_flags(struct page *page) |
| 204 | { |
| 205 | } |
| 206 | |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 207 | #else /* ! CONFIG_HUGETLB_PAGE */ |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 208 | static inline void flush_hugetlb_page(struct vm_area_struct *vma, |
| 209 | unsigned long vmaddr) |
| 210 | { |
| 211 | } |
Becky Bruce | a614688 | 2011-10-10 10:50:43 +0000 | [diff] [blame] | 212 | |
Aneesh Kumar K.V | 2940999 | 2013-06-20 14:30:16 +0530 | [diff] [blame] | 213 | #define hugepd_shift(x) 0 |
Aneesh Kumar K.V | b30e759 | 2014-11-05 21:57:41 +0530 | [diff] [blame] | 214 | static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, |
Aneesh Kumar K.V | 2940999 | 2013-06-20 14:30:16 +0530 | [diff] [blame] | 215 | unsigned pdshift) |
| 216 | { |
| 217 | return 0; |
| 218 | } |
| 219 | #endif /* CONFIG_HUGETLB_PAGE */ |
Becky Bruce | a614688 | 2011-10-10 10:50:43 +0000 | [diff] [blame] | 220 | |
| 221 | /* |
| 222 | * FSL Book3E platforms require special gpage handling - the gpages |
| 223 | * are reserved early in the boot process by memblock instead of via |
| 224 | * the .dts as on IBM platforms. |
| 225 | */ |
Christophe Leroy | 4b914286 | 2016-12-07 08:47:28 +0100 | [diff] [blame] | 226 | #if defined(CONFIG_HUGETLB_PAGE) && (defined(CONFIG_PPC_FSL_BOOK3E) || \ |
| 227 | defined(CONFIG_PPC_8xx)) |
Becky Bruce | a614688 | 2011-10-10 10:50:43 +0000 | [diff] [blame] | 228 | extern void __init reserve_hugetlb_gpages(void); |
| 229 | #else |
| 230 | static inline void reserve_hugetlb_gpages(void) |
| 231 | { |
| 232 | } |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 233 | #endif |
| 234 | |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 235 | #endif /* _ASM_POWERPC_HUGETLB_H */ |