Aneesh Kumar K.V | ab537dc | 2015-12-01 09:06:30 +0530 | [diff] [blame] | 1 | #ifndef _ASM_POWERPC_BOOK3S_64_HASH_64K_H |
| 2 | #define _ASM_POWERPC_BOOK3S_64_HASH_64K_H |
| 3 | |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 4 | #define H_PTE_INDEX_SIZE 8 |
| 5 | #define H_PMD_INDEX_SIZE 5 |
| 6 | #define H_PUD_INDEX_SIZE 5 |
| 7 | #define H_PGD_INDEX_SIZE 12 |
Aneesh Kumar K.V | ab537dc | 2015-12-01 09:06:30 +0530 | [diff] [blame] | 8 | |
| 9 | /* With 4k base page size, hugepage PTEs go at the PMD level */ |
| 10 | #define MIN_HUGEPTE_SHIFT PAGE_SHIFT |
| 11 | |
Aneesh Kumar K.V | 945537d | 2016-04-29 23:25:45 +1000 | [diff] [blame] | 12 | #define H_PAGE_COMBO 0x00001000 /* this is a combo 4k page */ |
| 13 | #define H_PAGE_4K_PFN 0x00002000 /* PFN is for a single 4k page */ |
Aneesh Kumar K.V | bf680d5 | 2015-12-01 09:06:45 +0530 | [diff] [blame] | 14 | /* |
Aneesh Kumar K.V | 945537d | 2016-04-29 23:25:45 +1000 | [diff] [blame] | 15 | * We need to differentiate between explicit huge page and THP huge |
| 16 | * page, since THP huge page also need to track real subpage details |
Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame] | 17 | */ |
Aneesh Kumar K.V | 945537d | 2016-04-29 23:25:45 +1000 | [diff] [blame] | 18 | #define H_PAGE_THP_HUGE H_PAGE_4K_PFN |
| 19 | |
| 20 | /* |
| 21 | * Used to track subpage group valid if H_PAGE_COMBO is set |
| 22 | * This overloads H_PAGE_F_GIX and H_PAGE_F_SECOND |
| 23 | */ |
| 24 | #define H_PAGE_COMBO_VALID (H_PAGE_F_GIX | H_PAGE_F_SECOND) |
Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame] | 25 | |
| 26 | /* PTE flags to conserve for HPTE identification */ |
Aneesh Kumar K.V | 945537d | 2016-04-29 23:25:45 +1000 | [diff] [blame] | 27 | #define _PAGE_HPTEFLAGS (H_PAGE_BUSY | H_PAGE_F_SECOND | \ |
| 28 | H_PAGE_F_GIX | H_PAGE_HASHPTE | H_PAGE_COMBO) |
Aneesh Kumar K.V | 62607bc | 2015-12-01 09:06:55 +0530 | [diff] [blame] | 29 | /* |
| 30 | * we support 16 fragments per PTE page of 64K size. |
| 31 | */ |
| 32 | #define PTE_FRAG_NR 16 |
| 33 | /* |
| 34 | * We use a 2K PTE page fragment and another 2K for storing |
| 35 | * real_pte_t hash index |
| 36 | */ |
| 37 | #define PTE_FRAG_SIZE_SHIFT 12 |
| 38 | #define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT) |
| 39 | |
Stephen Rothwell | ee7a76d | 2007-09-18 17:22:59 +1000 | [diff] [blame] | 40 | #ifndef __ASSEMBLY__ |
Aneesh Kumar K.V | 96270b1 | 2016-04-29 23:25:35 +1000 | [diff] [blame] | 41 | #include <asm/errno.h> |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 42 | |
Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame] | 43 | /* |
| 44 | * With 64K pages on hash table, we have a special PTE format that |
| 45 | * uses a second "half" of the page table to encode sub-page information |
| 46 | * in order to deal with 64K made of 4K HW pages. Thus we override the |
| 47 | * generic accessors and iterators here |
| 48 | */ |
Aneesh Kumar K.V | 85c1faf | 2014-08-13 12:32:03 +0530 | [diff] [blame] | 49 | #define __real_pte __real_pte |
| 50 | static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep) |
| 51 | { |
| 52 | real_pte_t rpte; |
Aneesh Kumar K.V | 506b863 | 2015-12-01 09:06:46 +0530 | [diff] [blame] | 53 | unsigned long *hidxp; |
Aneesh Kumar K.V | 85c1faf | 2014-08-13 12:32:03 +0530 | [diff] [blame] | 54 | |
| 55 | rpte.pte = pte; |
| 56 | rpte.hidx = 0; |
Aneesh Kumar K.V | 945537d | 2016-04-29 23:25:45 +1000 | [diff] [blame] | 57 | if (pte_val(pte) & H_PAGE_COMBO) { |
Aneesh Kumar K.V | 85c1faf | 2014-08-13 12:32:03 +0530 | [diff] [blame] | 58 | /* |
Aneesh Kumar K.V | 945537d | 2016-04-29 23:25:45 +1000 | [diff] [blame] | 59 | * Make sure we order the hidx load against the H_PAGE_COMBO |
Aneesh Kumar K.V | 85c1faf | 2014-08-13 12:32:03 +0530 | [diff] [blame] | 60 | * check. The store side ordering is done in __hash_page_4K |
| 61 | */ |
| 62 | smp_rmb(); |
Aneesh Kumar K.V | 506b863 | 2015-12-01 09:06:46 +0530 | [diff] [blame] | 63 | hidxp = (unsigned long *)(ptep + PTRS_PER_PTE); |
| 64 | rpte.hidx = *hidxp; |
Aneesh Kumar K.V | 85c1faf | 2014-08-13 12:32:03 +0530 | [diff] [blame] | 65 | } |
| 66 | return rpte; |
| 67 | } |
| 68 | |
| 69 | static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index) |
| 70 | { |
Aneesh Kumar K.V | 945537d | 2016-04-29 23:25:45 +1000 | [diff] [blame] | 71 | if ((pte_val(rpte.pte) & H_PAGE_COMBO)) |
Aneesh Kumar K.V | 85c1faf | 2014-08-13 12:32:03 +0530 | [diff] [blame] | 72 | return (rpte.hidx >> (index<<2)) & 0xf; |
Aneesh Kumar K.V | 945537d | 2016-04-29 23:25:45 +1000 | [diff] [blame] | 73 | return (pte_val(rpte.pte) >> H_PAGE_F_GIX_SHIFT) & 0xf; |
Aneesh Kumar K.V | 85c1faf | 2014-08-13 12:32:03 +0530 | [diff] [blame] | 74 | } |
| 75 | |
Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame] | 76 | #define __rpte_to_pte(r) ((r).pte) |
Aneesh Kumar K.V | bf680d5 | 2015-12-01 09:06:45 +0530 | [diff] [blame] | 77 | extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index); |
Aneesh Kumar K.V | ab537dc | 2015-12-01 09:06:30 +0530 | [diff] [blame] | 78 | /* |
| 79 | * Trick: we set __end to va + 64k, which happens works for |
Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame] | 80 | * a 16M page as well as we want only one iteration |
| 81 | */ |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 82 | #define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift) \ |
| 83 | do { \ |
| 84 | unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \ |
| 85 | unsigned __split = (psize == MMU_PAGE_4K || \ |
| 86 | psize == MMU_PAGE_64K_AP); \ |
| 87 | shift = mmu_psize_defs[psize].shift; \ |
| 88 | for (index = 0; vpn < __end; index++, \ |
| 89 | vpn += (1L << (shift - VPN_SHIFT))) { \ |
| 90 | if (!__split || __rpte_sub_valid(rpte, index)) \ |
| 91 | do { |
Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame] | 92 | |
| 93 | #define pte_iterate_hashed_end() } while(0); } } while(0) |
| 94 | |
| 95 | #define pte_pagesize_index(mm, addr, pte) \ |
Aneesh Kumar K.V | 945537d | 2016-04-29 23:25:45 +1000 | [diff] [blame] | 96 | (((pte) & H_PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K) |
Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame] | 97 | |
Aneesh Kumar K.V | 96270b1 | 2016-04-29 23:25:35 +1000 | [diff] [blame] | 98 | extern int remap_pfn_range(struct vm_area_struct *, unsigned long addr, |
| 99 | unsigned long pfn, unsigned long size, pgprot_t); |
Aneesh Kumar K.V | 6cc1a0e | 2016-04-29 23:25:56 +1000 | [diff] [blame] | 100 | static inline int hash__remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr, |
| 101 | unsigned long pfn, pgprot_t prot) |
Aneesh Kumar K.V | 96270b1 | 2016-04-29 23:25:35 +1000 | [diff] [blame] | 102 | { |
| 103 | if (pfn > (PTE_RPN_MASK >> PAGE_SHIFT)) { |
| 104 | WARN(1, "remap_4k_pfn called with wrong pfn value\n"); |
| 105 | return -EINVAL; |
| 106 | } |
| 107 | return remap_pfn_range(vma, addr, pfn, PAGE_SIZE, |
Aneesh Kumar K.V | 945537d | 2016-04-29 23:25:45 +1000 | [diff] [blame] | 108 | __pgprot(pgprot_val(prot) | H_PAGE_4K_PFN)); |
Aneesh Kumar K.V | 96270b1 | 2016-04-29 23:25:35 +1000 | [diff] [blame] | 109 | } |
Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame] | 110 | |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 111 | #define H_PTE_TABLE_SIZE PTE_FRAG_SIZE |
Aneesh Kumar K.V | 62607bc | 2015-12-01 09:06:55 +0530 | [diff] [blame] | 112 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 113 | #define H_PMD_TABLE_SIZE ((sizeof(pmd_t) << PMD_INDEX_SIZE) + \ |
| 114 | (sizeof(unsigned long) << PMD_INDEX_SIZE)) |
Aneesh Kumar K.V | 62607bc | 2015-12-01 09:06:55 +0530 | [diff] [blame] | 115 | #else |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 116 | #define H_PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) |
Aneesh Kumar K.V | 62607bc | 2015-12-01 09:06:55 +0530 | [diff] [blame] | 117 | #endif |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 118 | #define H_PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE) |
| 119 | #define H_PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) |
Aneesh Kumar K.V | ab537dc | 2015-12-01 09:06:30 +0530 | [diff] [blame] | 120 | |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 121 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 122 | extern unsigned long pmd_hugepage_update(struct mm_struct *mm, |
| 123 | unsigned long addr, |
| 124 | pmd_t *pmdp, |
| 125 | unsigned long clr, |
| 126 | unsigned long set); |
| 127 | static inline char *get_hpte_slot_array(pmd_t *pmdp) |
| 128 | { |
| 129 | /* |
| 130 | * The hpte hindex is stored in the pgtable whose address is in the |
| 131 | * second half of the PMD |
| 132 | * |
| 133 | * Order this load with the test for pmd_trans_huge in the caller |
| 134 | */ |
| 135 | smp_rmb(); |
| 136 | return *(char **)(pmdp + PTRS_PER_PMD); |
| 137 | |
| 138 | |
| 139 | } |
| 140 | /* |
| 141 | * The linux hugepage PMD now include the pmd entries followed by the address |
| 142 | * to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits. |
Paul Mackerras | 849f86a | 2016-02-22 13:41:15 +1100 | [diff] [blame] | 143 | * [ 000 | 1 bit secondary | 3 bit hidx | 1 bit valid]. We use one byte per |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 144 | * each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and |
| 145 | * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t. |
| 146 | * |
Paul Mackerras | 849f86a | 2016-02-22 13:41:15 +1100 | [diff] [blame] | 147 | * The top three bits are intentionally left as zero. This memory location |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 148 | * are also used as normal page PTE pointers. So if we have any pointers |
| 149 | * left around while we collapse a hugepage, we need to make sure |
| 150 | * _PAGE_PRESENT bit of that is zero when we look at them |
| 151 | */ |
| 152 | static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index) |
| 153 | { |
Paul Mackerras | 849f86a | 2016-02-22 13:41:15 +1100 | [diff] [blame] | 154 | return hpte_slot_array[index] & 0x1; |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 155 | } |
| 156 | |
| 157 | static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array, |
| 158 | int index) |
| 159 | { |
Paul Mackerras | 849f86a | 2016-02-22 13:41:15 +1100 | [diff] [blame] | 160 | return hpte_slot_array[index] >> 1; |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 161 | } |
| 162 | |
| 163 | static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array, |
| 164 | unsigned int index, unsigned int hidx) |
| 165 | { |
Paul Mackerras | 849f86a | 2016-02-22 13:41:15 +1100 | [diff] [blame] | 166 | hpte_slot_array[index] = (hidx << 1) | 0x1; |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 167 | } |
| 168 | |
| 169 | /* |
| 170 | * |
| 171 | * For core kernel code by design pmd_trans_huge is never run on any hugetlbfs |
| 172 | * page. The hugetlbfs page table walking and mangling paths are totally |
| 173 | * separated form the core VM paths and they're differentiated by |
| 174 | * VM_HUGETLB being set on vm_flags well before any pmd_trans_huge could run. |
| 175 | * |
| 176 | * pmd_trans_huge() is defined as false at build time if |
| 177 | * CONFIG_TRANSPARENT_HUGEPAGE=n to optimize away code blocks at build |
| 178 | * time in such case. |
| 179 | * |
| 180 | * For ppc64 we need to differntiate from explicit hugepages from THP, because |
| 181 | * for THP we also track the subpage details at the pmd level. We don't do |
| 182 | * that for explicit huge pages. |
| 183 | * |
| 184 | */ |
Aneesh Kumar K.V | 6cc1a0e | 2016-04-29 23:25:56 +1000 | [diff] [blame] | 185 | static inline int hash__pmd_trans_huge(pmd_t pmd) |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 186 | { |
Aneesh Kumar K.V | 945537d | 2016-04-29 23:25:45 +1000 | [diff] [blame] | 187 | return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE)) == |
| 188 | (_PAGE_PTE | H_PAGE_THP_HUGE)); |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 189 | } |
| 190 | |
Aneesh Kumar K.V | 6cc1a0e | 2016-04-29 23:25:56 +1000 | [diff] [blame] | 191 | static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b) |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 192 | { |
Michael Ellerman | ee3caed | 2016-04-29 23:25:29 +1000 | [diff] [blame] | 193 | return (((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0); |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 194 | } |
| 195 | |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 196 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 197 | #endif /* __ASSEMBLY__ */ |
Aneesh Kumar K.V | ab537dc | 2015-12-01 09:06:30 +0530 | [diff] [blame] | 198 | |
| 199 | #endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */ |