Aneesh Kumar K.V | 26b6a3d | 2015-12-01 09:06:26 +0530 | [diff] [blame] | 1 | #ifndef _ASM_POWERPC_BOOK3S_64_HASH_H |
| 2 | #define _ASM_POWERPC_BOOK3S_64_HASH_H |
Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame] | 3 | #ifdef __KERNEL__ |
| 4 | |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 5 | /* |
| 6 | * Common bits between 4K and 64K pages in a linux-style PTE. |
Paul Mackerras | 1ec3f93 | 2016-02-22 13:41:12 +1100 | [diff] [blame] | 7 | * Additional bits may be defined in pgtable-hash64-*.h |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 8 | * |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 9 | */ |
Aneesh Kumar K.V | d2cf005 | 2016-04-29 23:25:46 +1000 | [diff] [blame] | 10 | #define H_PTE_NONE_MASK _PAGE_HPTEFLAGS |
Aneesh Kumar K.V | 6aa59f5 | 2017-03-28 15:21:12 +1100 | [diff] [blame] | 11 | #define H_PAGE_F_GIX_SHIFT 56 |
| 12 | #define H_PAGE_BUSY _RPAGE_RSV1 /* software: PTE & hash are busy */ |
| 13 | #define H_PAGE_F_SECOND _RPAGE_RSV2 /* HPTE is in 2ndary HPTEG */ |
| 14 | #define H_PAGE_F_GIX (_RPAGE_RSV3 | _RPAGE_RSV4 | _RPAGE_RPN44) |
| 15 | #define H_PAGE_HASHPTE _RPAGE_RPN43 /* PTE has associated HPTE */ |
Aneesh Kumar K.V | e34aa03 | 2015-12-01 09:06:53 +0530 | [diff] [blame] | 16 | |
Aneesh Kumar K.V | 371352c | 2015-12-01 09:06:36 +0530 | [diff] [blame] | 17 | #ifdef CONFIG_PPC_64K_PAGES |
| 18 | #include <asm/book3s/64/hash-64k.h> |
| 19 | #else |
| 20 | #include <asm/book3s/64/hash-4k.h> |
| 21 | #endif |
| 22 | |
| 23 | /* |
| 24 | * Size of EA range mapped by our pagetables. |
| 25 | */ |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 26 | #define H_PGTABLE_EADDR_SIZE (H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + \ |
| 27 | H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT) |
| 28 | #define H_PGTABLE_RANGE (ASM_CONST(1) << H_PGTABLE_EADDR_SIZE) |
Aneesh Kumar K.V | 371352c | 2015-12-01 09:06:36 +0530 | [diff] [blame] | 29 | |
Aneesh Kumar K.V | 8ad4333 | 2017-01-04 08:19:12 +0530 | [diff] [blame] | 30 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_PPC_64K_PAGES) |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 31 | /* |
Aneesh Kumar K.V | 8ad4333 | 2017-01-04 08:19:12 +0530 | [diff] [blame] | 32 | * only with hash 64k we need to use the second half of pmd page table |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 33 | * to store pointer to deposited pgtable_t |
| 34 | */ |
| 35 | #define H_PMD_CACHE_INDEX (H_PMD_INDEX_SIZE + 1) |
Aneesh Kumar K.V | 371352c | 2015-12-01 09:06:36 +0530 | [diff] [blame] | 36 | #else |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 37 | #define H_PMD_CACHE_INDEX H_PMD_INDEX_SIZE |
Aneesh Kumar K.V | 371352c | 2015-12-01 09:06:36 +0530 | [diff] [blame] | 38 | #endif |
| 39 | /* |
| 40 | * Define the address range of the kernel non-linear virtual area |
| 41 | */ |
Aneesh Kumar K.V | d6a9996 | 2016-04-29 23:26:21 +1000 | [diff] [blame] | 42 | #define H_KERN_VIRT_START ASM_CONST(0xD000000000000000) |
| 43 | #define H_KERN_VIRT_SIZE ASM_CONST(0x0000100000000000) |
Aneesh Kumar K.V | 371352c | 2015-12-01 09:06:36 +0530 | [diff] [blame] | 44 | |
| 45 | /* |
| 46 | * The vmalloc space starts at the beginning of that region, and |
| 47 | * occupies half of it on hash CPUs and a quarter of it on Book3E |
| 48 | * (we keep a quarter for the virtual memmap) |
| 49 | */ |
Aneesh Kumar K.V | d6a9996 | 2016-04-29 23:26:21 +1000 | [diff] [blame] | 50 | #define H_VMALLOC_START H_KERN_VIRT_START |
| 51 | #define H_VMALLOC_SIZE (H_KERN_VIRT_SIZE >> 1) |
| 52 | #define H_VMALLOC_END (H_VMALLOC_START + H_VMALLOC_SIZE) |
Aneesh Kumar K.V | 371352c | 2015-12-01 09:06:36 +0530 | [diff] [blame] | 53 | |
| 54 | /* |
| 55 | * Region IDs |
| 56 | */ |
| 57 | #define REGION_SHIFT 60UL |
| 58 | #define REGION_MASK (0xfUL << REGION_SHIFT) |
| 59 | #define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT) |
| 60 | |
Aneesh Kumar K.V | d6a9996 | 2016-04-29 23:26:21 +1000 | [diff] [blame] | 61 | #define VMALLOC_REGION_ID (REGION_ID(H_VMALLOC_START)) |
Aneesh Kumar K.V | 371352c | 2015-12-01 09:06:36 +0530 | [diff] [blame] | 62 | #define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET)) |
| 63 | #define VMEMMAP_REGION_ID (0xfUL) /* Server only */ |
| 64 | #define USER_REGION_ID (0UL) |
| 65 | |
| 66 | /* |
| 67 | * Defines the address of the vmemap area, in its own region on |
| 68 | * hash table CPUs. |
| 69 | */ |
Aneesh Kumar K.V | d6a9996 | 2016-04-29 23:26:21 +1000 | [diff] [blame] | 70 | #define H_VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT) |
Aneesh Kumar K.V | 371352c | 2015-12-01 09:06:36 +0530 | [diff] [blame] | 71 | |
| 72 | #ifdef CONFIG_PPC_MM_SLICES |
| 73 | #define HAVE_ARCH_UNMAPPED_AREA |
| 74 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN |
| 75 | #endif /* CONFIG_PPC_MM_SLICES */ |
Benjamin Herrenschmidt | 8d1cf34 | 2009-03-19 19:34:08 +0000 | [diff] [blame] | 76 | |
Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame] | 77 | |
| 78 | /* PTEIDX nibble */ |
| 79 | #define _PTEIDX_SECONDARY 0x8 |
| 80 | #define _PTEIDX_GROUP_IX 0x7 |
| 81 | |
Aneesh Kumar K.V | ac94ac79 | 2016-04-29 23:25:54 +1000 | [diff] [blame] | 82 | #define H_PMD_BAD_BITS (PTE_TABLE_SIZE-1) |
| 83 | #define H_PUD_BAD_BITS (PMD_TABLE_SIZE-1) |
Aneesh Kumar K.V | 371352c | 2015-12-01 09:06:36 +0530 | [diff] [blame] | 84 | |
| 85 | #ifndef __ASSEMBLY__ |
Aneesh Kumar K.V | ac94ac79 | 2016-04-29 23:25:54 +1000 | [diff] [blame] | 86 | #define hash__pmd_bad(pmd) (pmd_val(pmd) & H_PMD_BAD_BITS) |
| 87 | #define hash__pud_bad(pud) (pud_val(pud) & H_PUD_BAD_BITS) |
| 88 | static inline int hash__pgd_bad(pgd_t pgd) |
| 89 | { |
| 90 | return (pgd_val(pgd) == 0); |
| 91 | } |
Balbir Singh | cd65d69 | 2017-06-29 03:04:08 +1000 | [diff] [blame] | 92 | #ifdef CONFIG_STRICT_KERNEL_RWX |
| 93 | extern void hash__mark_rodata_ro(void); |
| 94 | #endif |
Aneesh Kumar K.V | 371352c | 2015-12-01 09:06:36 +0530 | [diff] [blame] | 95 | |
| 96 | extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, |
| 97 | pte_t *ptep, unsigned long pte, int huge); |
Aneesh Kumar K.V | c6a3c49 | 2015-12-01 09:06:50 +0530 | [diff] [blame] | 98 | extern unsigned long htab_convert_pte_flags(unsigned long pteflags); |
Aneesh Kumar K.V | 371352c | 2015-12-01 09:06:36 +0530 | [diff] [blame] | 99 | /* Atomic PTE updates */ |
Aneesh Kumar K.V | ac94ac79 | 2016-04-29 23:25:54 +1000 | [diff] [blame] | 100 | static inline unsigned long hash__pte_update(struct mm_struct *mm, |
| 101 | unsigned long addr, |
| 102 | pte_t *ptep, unsigned long clr, |
| 103 | unsigned long set, |
| 104 | int huge) |
Aneesh Kumar K.V | 371352c | 2015-12-01 09:06:36 +0530 | [diff] [blame] | 105 | { |
Aneesh Kumar K.V | 5dc1ef8 | 2016-04-29 23:25:28 +1000 | [diff] [blame] | 106 | __be64 old_be, tmp_be; |
| 107 | unsigned long old; |
Aneesh Kumar K.V | 371352c | 2015-12-01 09:06:36 +0530 | [diff] [blame] | 108 | |
| 109 | __asm__ __volatile__( |
| 110 | "1: ldarx %0,0,%3 # pte_update\n\ |
Aneesh Kumar K.V | 5dc1ef8 | 2016-04-29 23:25:28 +1000 | [diff] [blame] | 111 | and. %1,%0,%6\n\ |
Aneesh Kumar K.V | 371352c | 2015-12-01 09:06:36 +0530 | [diff] [blame] | 112 | bne- 1b \n\ |
| 113 | andc %1,%0,%4 \n\ |
| 114 | or %1,%1,%7\n\ |
| 115 | stdcx. %1,0,%3 \n\ |
| 116 | bne- 1b" |
Aneesh Kumar K.V | 5dc1ef8 | 2016-04-29 23:25:28 +1000 | [diff] [blame] | 117 | : "=&r" (old_be), "=&r" (tmp_be), "=m" (*ptep) |
| 118 | : "r" (ptep), "r" (cpu_to_be64(clr)), "m" (*ptep), |
Aneesh Kumar K.V | 945537d | 2016-04-29 23:25:45 +1000 | [diff] [blame] | 119 | "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set)) |
Aneesh Kumar K.V | 371352c | 2015-12-01 09:06:36 +0530 | [diff] [blame] | 120 | : "cc" ); |
| 121 | /* huge pages use the old page table lock */ |
| 122 | if (!huge) |
| 123 | assert_pte_locked(mm, addr); |
| 124 | |
Aneesh Kumar K.V | 5dc1ef8 | 2016-04-29 23:25:28 +1000 | [diff] [blame] | 125 | old = be64_to_cpu(old_be); |
Aneesh Kumar K.V | 945537d | 2016-04-29 23:25:45 +1000 | [diff] [blame] | 126 | if (old & H_PAGE_HASHPTE) |
Aneesh Kumar K.V | 371352c | 2015-12-01 09:06:36 +0530 | [diff] [blame] | 127 | hpte_need_flush(mm, addr, ptep, old, huge); |
| 128 | |
| 129 | return old; |
| 130 | } |
| 131 | |
Aneesh Kumar K.V | 371352c | 2015-12-01 09:06:36 +0530 | [diff] [blame] | 132 | /* Set the dirty and/or accessed bits atomically in a linux PTE, this |
| 133 | * function doesn't need to flush the hash entry |
| 134 | */ |
Aneesh Kumar K.V | ac94ac79 | 2016-04-29 23:25:54 +1000 | [diff] [blame] | 135 | static inline void hash__ptep_set_access_flags(pte_t *ptep, pte_t entry) |
Aneesh Kumar K.V | 371352c | 2015-12-01 09:06:36 +0530 | [diff] [blame] | 136 | { |
Aneesh Kumar K.V | 5dc1ef8 | 2016-04-29 23:25:28 +1000 | [diff] [blame] | 137 | __be64 old, tmp, val, mask; |
Aneesh Kumar K.V | 371352c | 2015-12-01 09:06:36 +0530 | [diff] [blame] | 138 | |
Aneesh Kumar K.V | c7d5484 | 2016-04-29 23:25:30 +1000 | [diff] [blame] | 139 | mask = cpu_to_be64(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_READ | _PAGE_WRITE | |
Aneesh Kumar K.V | 5dc1ef8 | 2016-04-29 23:25:28 +1000 | [diff] [blame] | 140 | _PAGE_EXEC | _PAGE_SOFT_DIRTY); |
| 141 | |
| 142 | val = pte_raw(entry) & mask; |
Aneesh Kumar K.V | 371352c | 2015-12-01 09:06:36 +0530 | [diff] [blame] | 143 | |
| 144 | __asm__ __volatile__( |
| 145 | "1: ldarx %0,0,%4\n\ |
Aneesh Kumar K.V | 5dc1ef8 | 2016-04-29 23:25:28 +1000 | [diff] [blame] | 146 | and. %1,%0,%6\n\ |
Aneesh Kumar K.V | 371352c | 2015-12-01 09:06:36 +0530 | [diff] [blame] | 147 | bne- 1b \n\ |
| 148 | or %0,%3,%0\n\ |
| 149 | stdcx. %0,0,%4\n\ |
| 150 | bne- 1b" |
| 151 | :"=&r" (old), "=&r" (tmp), "=m" (*ptep) |
Aneesh Kumar K.V | 945537d | 2016-04-29 23:25:45 +1000 | [diff] [blame] | 152 | :"r" (val), "r" (ptep), "m" (*ptep), "r" (cpu_to_be64(H_PAGE_BUSY)) |
Aneesh Kumar K.V | 371352c | 2015-12-01 09:06:36 +0530 | [diff] [blame] | 153 | :"cc"); |
| 154 | } |
| 155 | |
Aneesh Kumar K.V | ac94ac79 | 2016-04-29 23:25:54 +1000 | [diff] [blame] | 156 | static inline int hash__pte_same(pte_t pte_a, pte_t pte_b) |
Michael Ellerman | ee3caed | 2016-04-29 23:25:29 +1000 | [diff] [blame] | 157 | { |
| 158 | return (((pte_raw(pte_a) ^ pte_raw(pte_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0); |
| 159 | } |
| 160 | |
Aneesh Kumar K.V | ac94ac79 | 2016-04-29 23:25:54 +1000 | [diff] [blame] | 161 | static inline int hash__pte_none(pte_t pte) |
| 162 | { |
| 163 | return (pte_val(pte) & ~H_PTE_NONE_MASK) == 0; |
| 164 | } |
Aneesh Kumar K.V | 1ca7212 | 2015-12-01 09:06:37 +0530 | [diff] [blame] | 165 | |
| 166 | /* This low level function performs the actual PTE insertion |
| 167 | * Setting the PTE depends on the MMU type and other factors. It's |
| 168 | * an horrible mess that I'm not going to try to clean up now but |
| 169 | * I'm keeping it in one place rather than spread around |
| 170 | */ |
Aneesh Kumar K.V | ac94ac79 | 2016-04-29 23:25:54 +1000 | [diff] [blame] | 171 | static inline void hash__set_pte_at(struct mm_struct *mm, unsigned long addr, |
| 172 | pte_t *ptep, pte_t pte, int percpu) |
Aneesh Kumar K.V | 1ca7212 | 2015-12-01 09:06:37 +0530 | [diff] [blame] | 173 | { |
| 174 | /* |
| 175 | * Anything else just stores the PTE normally. That covers all 64-bit |
| 176 | * cases, and 32-bit non-hash with 32-bit PTEs. |
| 177 | */ |
| 178 | *ptep = pte; |
| 179 | } |
| 180 | |
Aneesh Kumar K.V | 371352c | 2015-12-01 09:06:36 +0530 | [diff] [blame] | 181 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 182 | extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, |
| 183 | pmd_t *pmdp, unsigned long old_pmd); |
| 184 | #else |
| 185 | static inline void hpte_do_hugepage_flush(struct mm_struct *mm, |
| 186 | unsigned long addr, pmd_t *pmdp, |
| 187 | unsigned long old_pmd) |
| 188 | { |
| 189 | WARN(1, "%s called with THP disabled\n", __func__); |
| 190 | } |
| 191 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 192 | |
Aneesh Kumar K.V | 31a14fa | 2016-04-29 23:25:59 +1000 | [diff] [blame] | 193 | |
| 194 | extern int hash__map_kernel_page(unsigned long ea, unsigned long pa, |
| 195 | unsigned long flags); |
| 196 | extern int __meminit hash__vmemmap_create_mapping(unsigned long start, |
| 197 | unsigned long page_size, |
| 198 | unsigned long phys); |
| 199 | extern void hash__vmemmap_remove_mapping(unsigned long start, |
| 200 | unsigned long page_size); |
Reza Arbab | 32b53c0 | 2017-01-03 14:39:51 -0600 | [diff] [blame] | 201 | |
| 202 | int hash__create_section_mapping(unsigned long start, unsigned long end); |
| 203 | int hash__remove_section_mapping(unsigned long start, unsigned long end); |
| 204 | |
Aneesh Kumar K.V | 371352c | 2015-12-01 09:06:36 +0530 | [diff] [blame] | 205 | #endif /* !__ASSEMBLY__ */ |
Benjamin Herrenschmidt | c605782 | 2009-03-10 17:53:29 +0000 | [diff] [blame] | 206 | #endif /* __KERNEL__ */ |
Aneesh Kumar K.V | 26b6a3d | 2015-12-01 09:06:26 +0530 | [diff] [blame] | 207 | #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */ |