Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Aneesh Kumar K.V | ab537dc | 2015-12-01 09:06:30 +0530 | [diff] [blame] | 2 | #ifndef _ASM_POWERPC_BOOK3S_64_HASH_64K_H |
| 3 | #define _ASM_POWERPC_BOOK3S_64_HASH_64K_H |
| 4 | |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 5 | #define H_PTE_INDEX_SIZE 8 |
Michael Ellerman | ba95b5d03 | 2017-05-09 15:39:04 +1000 | [diff] [blame] | 6 | #define H_PMD_INDEX_SIZE 10 |
| 7 | #define H_PUD_INDEX_SIZE 7 |
| 8 | #define H_PGD_INDEX_SIZE 8 |
Aneesh Kumar K.V | ab537dc | 2015-12-01 09:06:30 +0530 | [diff] [blame] | 9 | |
Aneesh Kumar K.V | f5bd0fd | 2017-03-21 22:59:53 +0530 | [diff] [blame] | 10 | /* |
| 11 | * 64k aligned address free up few of the lower bits of RPN for us |
| 12 | * We steal that here. For more deatils look at pte_pfn/pfn_pte() |
| 13 | */ |
Aneesh Kumar K.V | 32789d3 | 2017-03-21 22:59:58 +0530 | [diff] [blame] | 14 | #define H_PAGE_COMBO _RPAGE_RPN0 /* this is a combo 4k page */ |
| 15 | #define H_PAGE_4K_PFN _RPAGE_RPN1 /* PFN is for a single 4k page */ |
Ram Pai | bf9a95f | 2017-11-06 00:50:48 -0800 | [diff] [blame] | 16 | #define H_PAGE_BUSY _RPAGE_RPN44 /* software: PTE & hash are busy */ |
Ram Pai | 273b493 | 2017-11-06 00:50:50 -0800 | [diff] [blame] | 17 | #define H_PAGE_HASHPTE _RPAGE_RPN43 /* PTE has associated HPTE */ |
Ram Pai | 9d2edb1 | 2017-11-06 00:50:47 -0800 | [diff] [blame] | 18 | |
Aneesh Kumar K.V | bf680d5 | 2015-12-01 09:06:45 +0530 | [diff] [blame] | 19 | /* |
Aneesh Kumar K.V | 945537d | 2016-04-29 23:25:45 +1000 | [diff] [blame] | 20 | * We need to differentiate between explicit huge page and THP huge |
| 21 | * page, since THP huge page also need to track real subpage details |
Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame] | 22 | */ |
Aneesh Kumar K.V | 945537d | 2016-04-29 23:25:45 +1000 | [diff] [blame] | 23 | #define H_PAGE_THP_HUGE H_PAGE_4K_PFN |
| 24 | |
Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame] | 25 | /* PTE flags to conserve for HPTE identification */ |
Ram Pai | bf9a95f | 2017-11-06 00:50:48 -0800 | [diff] [blame] | 26 | #define _PAGE_HPTEFLAGS (H_PAGE_BUSY | H_PAGE_HASHPTE | H_PAGE_COMBO) |
Aneesh Kumar K.V | 62607bc | 2015-12-01 09:06:55 +0530 | [diff] [blame] | 27 | /* |
| 28 | * we support 16 fragments per PTE page of 64K size. |
| 29 | */ |
Aneesh Kumar K.V | 5ed7ecd | 2016-04-29 23:26:23 +1000 | [diff] [blame] | 30 | #define H_PTE_FRAG_NR 16 |
Aneesh Kumar K.V | 62607bc | 2015-12-01 09:06:55 +0530 | [diff] [blame] | 31 | /* |
| 32 | * We use a 2K PTE page fragment and another 2K for storing |
| 33 | * real_pte_t hash index |
| 34 | */ |
Aneesh Kumar K.V | 5ed7ecd | 2016-04-29 23:26:23 +1000 | [diff] [blame] | 35 | #define H_PTE_FRAG_SIZE_SHIFT 12 |
Aneesh Kumar K.V | 62607bc | 2015-12-01 09:06:55 +0530 | [diff] [blame] | 36 | #define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT) |
| 37 | |
Stephen Rothwell | ee7a76d | 2007-09-18 17:22:59 +1000 | [diff] [blame] | 38 | #ifndef __ASSEMBLY__ |
Aneesh Kumar K.V | 96270b1 | 2016-04-29 23:25:35 +1000 | [diff] [blame] | 39 | #include <asm/errno.h> |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 40 | |
Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame] | 41 | /* |
| 42 | * With 64K pages on hash table, we have a special PTE format that |
| 43 | * uses a second "half" of the page table to encode sub-page information |
| 44 | * in order to deal with 64K made of 4K HW pages. Thus we override the |
| 45 | * generic accessors and iterators here |
| 46 | */ |
Aneesh Kumar K.V | 85c1faf | 2014-08-13 12:32:03 +0530 | [diff] [blame] | 47 | #define __real_pte __real_pte |
Aneesh Kumar K.V | ff31e10 | 2018-02-11 20:30:08 +0530 | [diff] [blame] | 48 | static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep, int offset) |
Aneesh Kumar K.V | 85c1faf | 2014-08-13 12:32:03 +0530 | [diff] [blame] | 49 | { |
| 50 | real_pte_t rpte; |
Aneesh Kumar K.V | 506b863 | 2015-12-01 09:06:46 +0530 | [diff] [blame] | 51 | unsigned long *hidxp; |
Aneesh Kumar K.V | 85c1faf | 2014-08-13 12:32:03 +0530 | [diff] [blame] | 52 | |
| 53 | rpte.pte = pte; |
Ram Pai | bf9a95f | 2017-11-06 00:50:48 -0800 | [diff] [blame] | 54 | |
| 55 | /* |
| 56 | * Ensure that we do not read the hidx before we read the PTE. Because |
| 57 | * the writer side is expected to finish writing the hidx first followed |
| 58 | * by the PTE, by using smp_wmb(). pte_set_hash_slot() ensures that. |
| 59 | */ |
| 60 | smp_rmb(); |
| 61 | |
Aneesh Kumar K.V | ff31e10 | 2018-02-11 20:30:08 +0530 | [diff] [blame] | 62 | hidxp = (unsigned long *)(ptep + offset); |
Ram Pai | bf9a95f | 2017-11-06 00:50:48 -0800 | [diff] [blame] | 63 | rpte.hidx = *hidxp; |
Aneesh Kumar K.V | 85c1faf | 2014-08-13 12:32:03 +0530 | [diff] [blame] | 64 | return rpte; |
| 65 | } |
| 66 | |
Ram Pai | 7b84947 | 2017-11-06 00:50:49 -0800 | [diff] [blame] | 67 | /* |
| 68 | * shift the hidx representation by one-modulo-0xf; i.e hidx 0 is respresented |
| 69 | * as 1, 1 as 2,... , and 0xf as 0. This convention lets us represent a |
| 70 | * invalid hidx 0xf with a 0x0 bit value. PTEs are anyway zero'd when |
| 71 | * allocated. We dont have to zero them gain; thus save on the initialization. |
| 72 | */ |
| 73 | #define HIDX_UNSHIFT_BY_ONE(x) ((x + 0xfUL) & 0xfUL) /* shift backward by one */ |
| 74 | #define HIDX_SHIFT_BY_ONE(x) ((x + 0x1UL) & 0xfUL) /* shift forward by one */ |
Ram Pai | 59aa31f | 2017-11-06 00:50:45 -0800 | [diff] [blame] | 75 | #define HIDX_BITS(x, index) (x << (index << 2)) |
Ram Pai | bf9a95f | 2017-11-06 00:50:48 -0800 | [diff] [blame] | 76 | #define BITS_TO_HIDX(x, index) ((x >> (index << 2)) & 0xfUL) |
Ram Pai | 7b84947 | 2017-11-06 00:50:49 -0800 | [diff] [blame] | 77 | #define INVALID_RPTE_HIDX 0x0UL |
Ram Pai | 59aa31f | 2017-11-06 00:50:45 -0800 | [diff] [blame] | 78 | |
Aneesh Kumar K.V | 85c1faf | 2014-08-13 12:32:03 +0530 | [diff] [blame] | 79 | static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index) |
| 80 | { |
Ram Pai | 7b84947 | 2017-11-06 00:50:49 -0800 | [diff] [blame] | 81 | return HIDX_UNSHIFT_BY_ONE(BITS_TO_HIDX(rpte.hidx, index)); |
Aneesh Kumar K.V | 85c1faf | 2014-08-13 12:32:03 +0530 | [diff] [blame] | 82 | } |
| 83 | |
Ram Pai | 59aa31f | 2017-11-06 00:50:45 -0800 | [diff] [blame] | 84 | /* |
| 85 | * Commit the hidx and return PTE bits that needs to be modified. The caller is |
| 86 | * expected to modify the PTE bits accordingly and commit the PTE to memory. |
| 87 | */ |
| 88 | static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte, |
Aneesh Kumar K.V | ff31e10 | 2018-02-11 20:30:08 +0530 | [diff] [blame] | 89 | unsigned int subpg_index, |
| 90 | unsigned long hidx, int offset) |
Ram Pai | 59aa31f | 2017-11-06 00:50:45 -0800 | [diff] [blame] | 91 | { |
Aneesh Kumar K.V | ff31e10 | 2018-02-11 20:30:08 +0530 | [diff] [blame] | 92 | unsigned long *hidxp = (unsigned long *)(ptep + offset); |
Ram Pai | 59aa31f | 2017-11-06 00:50:45 -0800 | [diff] [blame] | 93 | |
| 94 | rpte.hidx &= ~HIDX_BITS(0xfUL, subpg_index); |
Ram Pai | 7b84947 | 2017-11-06 00:50:49 -0800 | [diff] [blame] | 95 | *hidxp = rpte.hidx | HIDX_BITS(HIDX_SHIFT_BY_ONE(hidx), subpg_index); |
Ram Pai | 59aa31f | 2017-11-06 00:50:45 -0800 | [diff] [blame] | 96 | |
| 97 | /* |
| 98 | * Anyone reading PTE must ensure hidx bits are read after reading the |
| 99 | * PTE by using the read-side barrier smp_rmb(). __real_pte() can be |
| 100 | * used for that. |
| 101 | */ |
| 102 | smp_wmb(); |
| 103 | |
| 104 | /* No PTE bits to be modified, return 0x0UL */ |
| 105 | return 0x0UL; |
Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame] | 106 | } |
| 107 | |
| 108 | #define __rpte_to_pte(r) ((r).pte) |
Aneesh Kumar K.V | bf680d5 | 2015-12-01 09:06:45 +0530 | [diff] [blame] | 109 | extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index); |
Aneesh Kumar K.V | ab537dc | 2015-12-01 09:06:30 +0530 | [diff] [blame] | 110 | /* |
| 111 | * Trick: we set __end to va + 64k, which happens works for |
Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame] | 112 | * a 16M page as well as we want only one iteration |
| 113 | */ |
| 114 | #define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift) \ |
| 115 | do { \ |
| 116 | unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \ |
| 117 | unsigned __split = (psize == MMU_PAGE_4K || \ |
| 118 | psize == MMU_PAGE_64K_AP); \ |
| 119 | shift = mmu_psize_defs[psize].shift; \ |
| 120 | for (index = 0; vpn < __end; index++, \ |
| 121 | vpn += (1L << (shift - VPN_SHIFT))) { \ |
| 122 | if (!__split || __rpte_sub_valid(rpte, index)) \ |
| 123 | do { |
| 124 | |
| 125 | #define pte_iterate_hashed_end() } while(0); } } while(0) |
| 126 | |
| 127 | #define pte_pagesize_index(mm, addr, pte) \ |
Aneesh Kumar K.V | 945537d | 2016-04-29 23:25:45 +1000 | [diff] [blame] | 128 | (((pte) & H_PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K) |
Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame] | 129 | |
Aneesh Kumar K.V | 96270b1 | 2016-04-29 23:25:35 +1000 | [diff] [blame] | 130 | extern int remap_pfn_range(struct vm_area_struct *, unsigned long addr, |
| 131 | unsigned long pfn, unsigned long size, pgprot_t); |
Aneesh Kumar K.V | 6cc1a0e | 2016-04-29 23:25:56 +1000 | [diff] [blame] | 132 | static inline int hash__remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr, |
| 133 | unsigned long pfn, pgprot_t prot) |
Aneesh Kumar K.V | 96270b1 | 2016-04-29 23:25:35 +1000 | [diff] [blame] | 134 | { |
| 135 | if (pfn > (PTE_RPN_MASK >> PAGE_SHIFT)) { |
| 136 | WARN(1, "remap_4k_pfn called with wrong pfn value\n"); |
| 137 | return -EINVAL; |
| 138 | } |
| 139 | return remap_pfn_range(vma, addr, pfn, PAGE_SIZE, |
Aneesh Kumar K.V | 945537d | 2016-04-29 23:25:45 +1000 | [diff] [blame] | 140 | __pgprot(pgprot_val(prot) | H_PAGE_4K_PFN)); |
Aneesh Kumar K.V | 96270b1 | 2016-04-29 23:25:35 +1000 | [diff] [blame] | 141 | } |
Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame] | 142 | |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 143 | #define H_PTE_TABLE_SIZE PTE_FRAG_SIZE |
Aneesh Kumar K.V | 4a7aa4f | 2018-02-11 20:30:07 +0530 | [diff] [blame] | 144 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined (CONFIG_HUGETLB_PAGE) |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 145 | #define H_PMD_TABLE_SIZE ((sizeof(pmd_t) << PMD_INDEX_SIZE) + \ |
| 146 | (sizeof(unsigned long) << PMD_INDEX_SIZE)) |
Aneesh Kumar K.V | 62607bc | 2015-12-01 09:06:55 +0530 | [diff] [blame] | 147 | #else |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 148 | #define H_PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) |
Aneesh Kumar K.V | 62607bc | 2015-12-01 09:06:55 +0530 | [diff] [blame] | 149 | #endif |
Aneesh Kumar K.V | fae2211 | 2018-02-11 20:30:06 +0530 | [diff] [blame] | 150 | #ifdef CONFIG_HUGETLB_PAGE |
| 151 | #define H_PUD_TABLE_SIZE ((sizeof(pud_t) << PUD_INDEX_SIZE) + \ |
| 152 | (sizeof(unsigned long) << PUD_INDEX_SIZE)) |
| 153 | #else |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 154 | #define H_PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE) |
Aneesh Kumar K.V | fae2211 | 2018-02-11 20:30:06 +0530 | [diff] [blame] | 155 | #endif |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 156 | #define H_PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) |
Aneesh Kumar K.V | ab537dc | 2015-12-01 09:06:30 +0530 | [diff] [blame] | 157 | |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 158 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 159 | static inline char *get_hpte_slot_array(pmd_t *pmdp) |
| 160 | { |
| 161 | /* |
| 162 | * The hpte hindex is stored in the pgtable whose address is in the |
| 163 | * second half of the PMD |
| 164 | * |
| 165 | * Order this load with the test for pmd_trans_huge in the caller |
| 166 | */ |
| 167 | smp_rmb(); |
| 168 | return *(char **)(pmdp + PTRS_PER_PMD); |
| 169 | |
| 170 | |
| 171 | } |
| 172 | /* |
| 173 | * The linux hugepage PMD now include the pmd entries followed by the address |
| 174 | * to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits. |
Paul Mackerras | 849f86a | 2016-02-22 13:41:15 +1100 | [diff] [blame] | 175 | * [ 000 | 1 bit secondary | 3 bit hidx | 1 bit valid]. We use one byte per |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 176 | * each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and |
| 177 | * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t. |
| 178 | * |
Paul Mackerras | 849f86a | 2016-02-22 13:41:15 +1100 | [diff] [blame] | 179 | * The top three bits are intentionally left as zero. This memory location |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 180 | * are also used as normal page PTE pointers. So if we have any pointers |
| 181 | * left around while we collapse a hugepage, we need to make sure |
| 182 | * _PAGE_PRESENT bit of that is zero when we look at them |
| 183 | */ |
| 184 | static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index) |
| 185 | { |
Paul Mackerras | 849f86a | 2016-02-22 13:41:15 +1100 | [diff] [blame] | 186 | return hpte_slot_array[index] & 0x1; |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 187 | } |
| 188 | |
| 189 | static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array, |
| 190 | int index) |
| 191 | { |
Paul Mackerras | 849f86a | 2016-02-22 13:41:15 +1100 | [diff] [blame] | 192 | return hpte_slot_array[index] >> 1; |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 193 | } |
| 194 | |
| 195 | static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array, |
| 196 | unsigned int index, unsigned int hidx) |
| 197 | { |
Paul Mackerras | 849f86a | 2016-02-22 13:41:15 +1100 | [diff] [blame] | 198 | hpte_slot_array[index] = (hidx << 1) | 0x1; |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 199 | } |
| 200 | |
| 201 | /* |
| 202 | * |
| 203 | * For core kernel code by design pmd_trans_huge is never run on any hugetlbfs |
| 204 | * page. The hugetlbfs page table walking and mangling paths are totally |
| 205 | * separated form the core VM paths and they're differentiated by |
| 206 | * VM_HUGETLB being set on vm_flags well before any pmd_trans_huge could run. |
| 207 | * |
| 208 | * pmd_trans_huge() is defined as false at build time if |
| 209 | * CONFIG_TRANSPARENT_HUGEPAGE=n to optimize away code blocks at build |
| 210 | * time in such case. |
| 211 | * |
| 212 | * For ppc64 we need to differntiate from explicit hugepages from THP, because |
| 213 | * for THP we also track the subpage details at the pmd level. We don't do |
| 214 | * that for explicit huge pages. |
| 215 | * |
| 216 | */ |
Aneesh Kumar K.V | 6cc1a0e | 2016-04-29 23:25:56 +1000 | [diff] [blame] | 217 | static inline int hash__pmd_trans_huge(pmd_t pmd) |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 218 | { |
Aneesh Kumar K.V | 945537d | 2016-04-29 23:25:45 +1000 | [diff] [blame] | 219 | return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE)) == |
| 220 | (_PAGE_PTE | H_PAGE_THP_HUGE)); |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 221 | } |
| 222 | |
Aneesh Kumar K.V | 6cc1a0e | 2016-04-29 23:25:56 +1000 | [diff] [blame] | 223 | static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b) |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 224 | { |
Michael Ellerman | ee3caed | 2016-04-29 23:25:29 +1000 | [diff] [blame] | 225 | return (((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0); |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 226 | } |
| 227 | |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 228 | static inline pmd_t hash__pmd_mkhuge(pmd_t pmd) |
| 229 | { |
| 230 | return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE)); |
| 231 | } |
| 232 | |
| 233 | extern unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, |
| 234 | unsigned long addr, pmd_t *pmdp, |
| 235 | unsigned long clr, unsigned long set); |
| 236 | extern pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, |
| 237 | unsigned long address, pmd_t *pmdp); |
| 238 | extern void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, |
| 239 | pgtable_t pgtable); |
| 240 | extern pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 241 | extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm, |
| 242 | unsigned long addr, pmd_t *pmdp); |
| 243 | extern int hash__has_transparent_hugepage(void); |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 244 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
Paul Mackerras | fa28237 | 2008-01-24 08:35:13 +1100 | [diff] [blame] | 245 | #endif /* __ASSEMBLY__ */ |
Aneesh Kumar K.V | ab537dc | 2015-12-01 09:06:30 +0530 | [diff] [blame] | 246 | |
| 247 | #endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */ |