Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 2 | #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ |
| 3 | #define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ |
Aneesh Kumar K.V | 2e87351 | 2016-04-29 23:25:47 +1000 | [diff] [blame] | 4 | |
Kirill A. Shutemov | 9849a56 | 2017-03-09 17:24:05 +0300 | [diff] [blame] | 5 | #include <asm-generic/5level-fixup.h> |
| 6 | |
Aneesh Kumar K.V | c137a27 | 2017-02-24 14:59:21 -0800 | [diff] [blame] | 7 | #ifndef __ASSEMBLY__ |
| 8 | #include <linux/mmdebug.h> |
Oliver O'Halloran | ebd3119 | 2017-06-28 11:32:34 +1000 | [diff] [blame] | 9 | #include <linux/bug.h> |
Aneesh Kumar K.V | c137a27 | 2017-02-24 14:59:21 -0800 | [diff] [blame] | 10 | #endif |
Kirill A. Shutemov | 9849a56 | 2017-03-09 17:24:05 +0300 | [diff] [blame] | 11 | |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 12 | /* |
Aneesh Kumar K.V | 2e87351 | 2016-04-29 23:25:47 +1000 | [diff] [blame] | 13 | * Common bits between hash and Radix page table |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 14 | */ |
Aneesh Kumar K.V | 2e87351 | 2016-04-29 23:25:47 +1000 | [diff] [blame] | 15 | #define _PAGE_BIT_SWAP_TYPE 0 |
| 16 | |
Christophe Leroy | 3517503 | 2018-01-12 13:45:29 +0100 | [diff] [blame] | 17 | #define _PAGE_NA 0 |
Christophe Leroy | 6b8cb66 | 2016-09-19 12:58:54 +0200 | [diff] [blame] | 18 | #define _PAGE_RO 0 |
Christophe Leroy | 812fadc | 2018-01-12 13:45:27 +0100 | [diff] [blame] | 19 | #define _PAGE_USER 0 |
Christophe Leroy | 6b8cb66 | 2016-09-19 12:58:54 +0200 | [diff] [blame] | 20 | |
Aneesh Kumar K.V | 2e87351 | 2016-04-29 23:25:47 +1000 | [diff] [blame] | 21 | #define _PAGE_EXEC 0x00001 /* execute permission */ |
| 22 | #define _PAGE_WRITE 0x00002 /* write access allowed */ |
| 23 | #define _PAGE_READ 0x00004 /* read access allowed */ |
| 24 | #define _PAGE_RW (_PAGE_READ | _PAGE_WRITE) |
| 25 | #define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC) |
| 26 | #define _PAGE_PRIVILEGED 0x00008 /* kernel access only */ |
| 27 | #define _PAGE_SAO 0x00010 /* Strong access order */ |
| 28 | #define _PAGE_NON_IDEMPOTENT 0x00020 /* non idempotent memory */ |
| 29 | #define _PAGE_TOLERANT 0x00030 /* tolerant memory, cache inhibited */ |
| 30 | #define _PAGE_DIRTY 0x00080 /* C: page changed */ |
| 31 | #define _PAGE_ACCESSED 0x00100 /* R: page referenced */ |
| 32 | /* |
| 33 | * Software bits |
| 34 | */ |
Aneesh Kumar K.V | 69dfbae | 2016-04-29 23:26:33 +1000 | [diff] [blame] | 35 | #define _RPAGE_SW0 0x2000000000000000UL |
| 36 | #define _RPAGE_SW1 0x00800 |
| 37 | #define _RPAGE_SW2 0x00400 |
| 38 | #define _RPAGE_SW3 0x00200 |
Aneesh Kumar K.V | 049d567 | 2016-11-28 11:47:00 +0530 | [diff] [blame] | 39 | #define _RPAGE_RSV1 0x1000000000000000UL |
| 40 | #define _RPAGE_RSV2 0x0800000000000000UL |
| 41 | #define _RPAGE_RSV3 0x0400000000000000UL |
| 42 | #define _RPAGE_RSV4 0x0200000000000000UL |
Ram Pai | eb95d01 | 2018-01-18 17:50:35 -0800 | [diff] [blame] | 43 | #define _RPAGE_RSV5 0x00040UL |
Aneesh Kumar K.V | 6aa59f5 | 2017-03-28 15:21:12 +1100 | [diff] [blame] | 44 | |
| 45 | #define _PAGE_PTE 0x4000000000000000UL /* distinguishes PTEs from pointers */ |
| 46 | #define _PAGE_PRESENT 0x8000000000000000UL /* pte contains a translation */ |
| 47 | |
| 48 | /* |
| 49 | * Top and bottom bits of RPN which can be used by hash |
| 50 | * translation mode, because we expect them to be zero |
| 51 | * otherwise. |
| 52 | */ |
Aneesh Kumar K.V | 32789d3 | 2017-03-21 22:59:58 +0530 | [diff] [blame] | 53 | #define _RPAGE_RPN0 0x01000 |
| 54 | #define _RPAGE_RPN1 0x02000 |
Aneesh Kumar K.V | 6aa59f5 | 2017-03-28 15:21:12 +1100 | [diff] [blame] | 55 | #define _RPAGE_RPN44 0x0100000000000000UL |
| 56 | #define _RPAGE_RPN43 0x0080000000000000UL |
| 57 | #define _RPAGE_RPN42 0x0040000000000000UL |
| 58 | #define _RPAGE_RPN41 0x0020000000000000UL |
Aneesh Kumar K.V | 049d567 | 2016-11-28 11:47:00 +0530 | [diff] [blame] | 59 | |
Aneesh Kumar K.V | 2f18d53 | 2017-03-21 22:59:59 +0530 | [diff] [blame] | 60 | /* Max physical address bit as per radix table */ |
| 61 | #define _RPAGE_PA_MAX 57 |
| 62 | |
Ram Pai | eb95d01 | 2018-01-18 17:50:35 -0800 | [diff] [blame] | 63 | #ifdef CONFIG_PPC_MEM_KEYS |
| 64 | #ifdef CONFIG_PPC_64K_PAGES |
| 65 | #define H_PTE_PKEY_BIT0 _RPAGE_RSV1 |
| 66 | #define H_PTE_PKEY_BIT1 _RPAGE_RSV2 |
| 67 | #else /* CONFIG_PPC_64K_PAGES */ |
| 68 | #define H_PTE_PKEY_BIT0 0 /* _RPAGE_RSV1 is not available */ |
| 69 | #define H_PTE_PKEY_BIT1 0 /* _RPAGE_RSV2 is not available */ |
| 70 | #endif /* CONFIG_PPC_64K_PAGES */ |
| 71 | #define H_PTE_PKEY_BIT2 _RPAGE_RSV3 |
| 72 | #define H_PTE_PKEY_BIT3 _RPAGE_RSV4 |
| 73 | #define H_PTE_PKEY_BIT4 _RPAGE_RSV5 |
| 74 | #else /* CONFIG_PPC_MEM_KEYS */ |
| 75 | #define H_PTE_PKEY_BIT0 0 |
| 76 | #define H_PTE_PKEY_BIT1 0 |
| 77 | #define H_PTE_PKEY_BIT2 0 |
| 78 | #define H_PTE_PKEY_BIT3 0 |
| 79 | #define H_PTE_PKEY_BIT4 0 |
| 80 | #endif /* CONFIG_PPC_MEM_KEYS */ |
| 81 | |
Aneesh Kumar K.V | 2f18d53 | 2017-03-21 22:59:59 +0530 | [diff] [blame] | 82 | /* |
| 83 | * Max physical address bit we will use for now. |
| 84 | * |
| 85 | * This is mostly a hardware limitation and for now Power9 has |
| 86 | * a 51 bit limit. |
| 87 | * |
| 88 | * This is different from the number of physical bit required to address |
| 89 | * the last byte of memory. That is defined by MAX_PHYSMEM_BITS. |
| 90 | * MAX_PHYSMEM_BITS is a linux limitation imposed by the maximum |
| 91 | * number of sections we can support (SECTIONS_SHIFT). |
| 92 | * |
| 93 | * This is different from Radix page table limitation above and |
| 94 | * should always be less than that. The limit is done such that |
| 95 | * we can overload the bits between _RPAGE_PA_MAX and _PAGE_PA_MAX |
| 96 | * for hash linux page table specific bits. |
| 97 | * |
| 98 | * In order to be compatible with future hardware generations we keep |
| 99 | * some offsets and limit this for now to 53 |
| 100 | */ |
| 101 | #define _PAGE_PA_MAX 53 |
| 102 | |
Aneesh Kumar K.V | 69dfbae | 2016-04-29 23:26:33 +1000 | [diff] [blame] | 103 | #define _PAGE_SOFT_DIRTY _RPAGE_SW3 /* software: software dirty tracking */ |
Aneesh Kumar K.V | 69dfbae | 2016-04-29 23:26:33 +1000 | [diff] [blame] | 104 | #define _PAGE_SPECIAL _RPAGE_SW2 /* software: special page */ |
Oliver O'Halloran | ebd3119 | 2017-06-28 11:32:34 +1000 | [diff] [blame] | 105 | #define _PAGE_DEVMAP _RPAGE_SW1 /* software: ZONE_DEVICE page */ |
| 106 | #define __HAVE_ARCH_PTE_DEVMAP |
| 107 | |
Aneesh Kumar K.V | 2e87351 | 2016-04-29 23:25:47 +1000 | [diff] [blame] | 108 | /* |
| 109 | * Drivers request for cache inhibited pte mapping using _PAGE_NO_CACHE |
| 110 | * Instead of fixing all of them, add an alternate define which |
| 111 | * maps CI pte mapping. |
| 112 | */ |
| 113 | #define _PAGE_NO_CACHE _PAGE_TOLERANT |
| 114 | /* |
Aneesh Kumar K.V | 2f18d53 | 2017-03-21 22:59:59 +0530 | [diff] [blame] | 115 | * We support _RPAGE_PA_MAX bit real address in pte. On the linux side |
| 116 | * we are limited by _PAGE_PA_MAX. Clear everything above _PAGE_PA_MAX |
| 117 | * and every thing below PAGE_SHIFT; |
Aneesh Kumar K.V | 2e87351 | 2016-04-29 23:25:47 +1000 | [diff] [blame] | 118 | */ |
Aneesh Kumar K.V | 2f18d53 | 2017-03-21 22:59:59 +0530 | [diff] [blame] | 119 | #define PTE_RPN_MASK (((1UL << _PAGE_PA_MAX) - 1) & (PAGE_MASK)) |
Aneesh Kumar K.V | 2e87351 | 2016-04-29 23:25:47 +1000 | [diff] [blame] | 120 | /* |
| 121 | * set of bits not changed in pmd_modify. Even though we have hash specific bits |
| 122 | * in here, on radix we expect them to be zero. |
| 123 | */ |
| 124 | #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ |
| 125 | _PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \ |
| 126 | _PAGE_SOFT_DIRTY) |
| 127 | /* |
| 128 | * user access blocked by key |
| 129 | */ |
| 130 | #define _PAGE_KERNEL_RW (_PAGE_PRIVILEGED | _PAGE_RW | _PAGE_DIRTY) |
| 131 | #define _PAGE_KERNEL_RO (_PAGE_PRIVILEGED | _PAGE_READ) |
| 132 | #define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | \ |
| 133 | _PAGE_RW | _PAGE_EXEC) |
| 134 | /* |
| 135 | * No page size encoding in the linux PTE |
| 136 | */ |
| 137 | #define _PAGE_PSIZE 0 |
| 138 | /* |
| 139 | * _PAGE_CHG_MASK masks of bits that are to be preserved across |
| 140 | * pgprot changes |
| 141 | */ |
| 142 | #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ |
| 143 | _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \ |
| 144 | _PAGE_SOFT_DIRTY) |
Ram Pai | eb95d01 | 2018-01-18 17:50:35 -0800 | [diff] [blame] | 145 | |
| 146 | #define H_PTE_PKEY (H_PTE_PKEY_BIT0 | H_PTE_PKEY_BIT1 | H_PTE_PKEY_BIT2 | \ |
| 147 | H_PTE_PKEY_BIT3 | H_PTE_PKEY_BIT4) |
Aneesh Kumar K.V | 2e87351 | 2016-04-29 23:25:47 +1000 | [diff] [blame] | 148 | /* |
| 149 | * Mask of bits returned by pte_pgprot() |
| 150 | */ |
| 151 | #define PAGE_PROT_BITS (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT | \ |
| 152 | H_PAGE_4K_PFN | _PAGE_PRIVILEGED | _PAGE_ACCESSED | \ |
| 153 | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_EXEC | \ |
Ram Pai | eb95d01 | 2018-01-18 17:50:35 -0800 | [diff] [blame] | 154 | _PAGE_SOFT_DIRTY | H_PTE_PKEY) |
Aneesh Kumar K.V | 2e87351 | 2016-04-29 23:25:47 +1000 | [diff] [blame] | 155 | /* |
| 156 | * We define 2 sets of base prot bits, one for basic pages (ie, |
| 157 | * cacheable kernel and user pages) and one for non cacheable |
| 158 | * pages. We always set _PAGE_COHERENT when SMP is enabled or |
| 159 | * the processor might need it for DMA coherency. |
| 160 | */ |
| 161 | #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE) |
| 162 | #define _PAGE_BASE (_PAGE_BASE_NC) |
| 163 | |
| 164 | /* Permission masks used to generate the __P and __S table, |
| 165 | * |
| 166 | * Note:__pgprot is defined in arch/powerpc/include/asm/page.h |
| 167 | * |
| 168 | * Write permissions imply read permissions for now (we could make write-only |
| 169 | * pages on BookE but we don't bother for now). Execute permission control is |
| 170 | * possible on platforms that define _PAGE_EXEC |
| 171 | * |
| 172 | * Note due to the way vm flags are laid out, the bits are XWR |
| 173 | */ |
| 174 | #define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_PRIVILEGED) |
| 175 | #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW) |
| 176 | #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_EXEC) |
| 177 | #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_READ) |
| 178 | #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC) |
| 179 | #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_READ) |
| 180 | #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC) |
| 181 | |
| 182 | #define __P000 PAGE_NONE |
| 183 | #define __P001 PAGE_READONLY |
| 184 | #define __P010 PAGE_COPY |
| 185 | #define __P011 PAGE_COPY |
| 186 | #define __P100 PAGE_READONLY_X |
| 187 | #define __P101 PAGE_READONLY_X |
| 188 | #define __P110 PAGE_COPY_X |
| 189 | #define __P111 PAGE_COPY_X |
| 190 | |
| 191 | #define __S000 PAGE_NONE |
| 192 | #define __S001 PAGE_READONLY |
| 193 | #define __S010 PAGE_SHARED |
| 194 | #define __S011 PAGE_SHARED |
| 195 | #define __S100 PAGE_READONLY_X |
| 196 | #define __S101 PAGE_READONLY_X |
| 197 | #define __S110 PAGE_SHARED_X |
| 198 | #define __S111 PAGE_SHARED_X |
| 199 | |
| 200 | /* Permission masks used for kernel mappings */ |
| 201 | #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) |
| 202 | #define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ |
| 203 | _PAGE_TOLERANT) |
| 204 | #define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ |
| 205 | _PAGE_NON_IDEMPOTENT) |
| 206 | #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX) |
| 207 | #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) |
| 208 | #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) |
| 209 | |
| 210 | /* |
| 211 | * Protection used for kernel text. We want the debuggers to be able to |
| 212 | * set breakpoints anywhere, so don't write protect the kernel text |
| 213 | * on platforms where such control is possible. |
| 214 | */ |
| 215 | #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) || \ |
| 216 | defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) |
| 217 | #define PAGE_KERNEL_TEXT PAGE_KERNEL_X |
| 218 | #else |
| 219 | #define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX |
| 220 | #endif |
| 221 | |
| 222 | /* Make modules code happy. We don't set RO yet */ |
| 223 | #define PAGE_KERNEL_EXEC PAGE_KERNEL_X |
| 224 | #define PAGE_AGP (PAGE_KERNEL_NC) |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 225 | |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 226 | #ifndef __ASSEMBLY__ |
| 227 | /* |
| 228 | * page table defines |
| 229 | */ |
| 230 | extern unsigned long __pte_index_size; |
| 231 | extern unsigned long __pmd_index_size; |
| 232 | extern unsigned long __pud_index_size; |
| 233 | extern unsigned long __pgd_index_size; |
| 234 | extern unsigned long __pmd_cache_index; |
Aneesh Kumar K.V | fae2211 | 2018-02-11 20:30:06 +0530 | [diff] [blame^] | 235 | extern unsigned long __pud_cache_index; |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 236 | #define PTE_INDEX_SIZE __pte_index_size |
| 237 | #define PMD_INDEX_SIZE __pmd_index_size |
| 238 | #define PUD_INDEX_SIZE __pud_index_size |
| 239 | #define PGD_INDEX_SIZE __pgd_index_size |
| 240 | #define PMD_CACHE_INDEX __pmd_cache_index |
Aneesh Kumar K.V | fae2211 | 2018-02-11 20:30:06 +0530 | [diff] [blame^] | 241 | #define PUD_CACHE_INDEX __pud_cache_index |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 242 | /* |
| 243 | * Because of use of pte fragments and THP, size of page table |
| 244 | * are not always derived out of index size above. |
| 245 | */ |
| 246 | extern unsigned long __pte_table_size; |
| 247 | extern unsigned long __pmd_table_size; |
| 248 | extern unsigned long __pud_table_size; |
| 249 | extern unsigned long __pgd_table_size; |
| 250 | #define PTE_TABLE_SIZE __pte_table_size |
| 251 | #define PMD_TABLE_SIZE __pmd_table_size |
| 252 | #define PUD_TABLE_SIZE __pud_table_size |
| 253 | #define PGD_TABLE_SIZE __pgd_table_size |
Aneesh Kumar K.V | a2f41eb | 2016-04-29 23:26:19 +1000 | [diff] [blame] | 254 | |
| 255 | extern unsigned long __pmd_val_bits; |
| 256 | extern unsigned long __pud_val_bits; |
| 257 | extern unsigned long __pgd_val_bits; |
| 258 | #define PMD_VAL_BITS __pmd_val_bits |
| 259 | #define PUD_VAL_BITS __pud_val_bits |
| 260 | #define PGD_VAL_BITS __pgd_val_bits |
Aneesh Kumar K.V | 5ed7ecd | 2016-04-29 23:26:23 +1000 | [diff] [blame] | 261 | |
| 262 | extern unsigned long __pte_frag_nr; |
| 263 | #define PTE_FRAG_NR __pte_frag_nr |
| 264 | extern unsigned long __pte_frag_size_shift; |
| 265 | #define PTE_FRAG_SIZE_SHIFT __pte_frag_size_shift |
| 266 | #define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT) |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 267 | |
| 268 | #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) |
| 269 | #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) |
| 270 | #define PTRS_PER_PUD (1 << PUD_INDEX_SIZE) |
| 271 | #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) |
| 272 | |
| 273 | /* PMD_SHIFT determines what a second-level page table entry can map */ |
| 274 | #define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) |
| 275 | #define PMD_SIZE (1UL << PMD_SHIFT) |
| 276 | #define PMD_MASK (~(PMD_SIZE-1)) |
| 277 | |
| 278 | /* PUD_SHIFT determines what a third-level page table entry can map */ |
| 279 | #define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) |
| 280 | #define PUD_SIZE (1UL << PUD_SHIFT) |
| 281 | #define PUD_MASK (~(PUD_SIZE-1)) |
| 282 | |
| 283 | /* PGDIR_SHIFT determines what a fourth-level page table entry can map */ |
| 284 | #define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE) |
| 285 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
| 286 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
| 287 | |
| 288 | /* Bits to mask out from a PMD to get to the PTE page */ |
| 289 | #define PMD_MASKED_BITS 0xc0000000000000ffUL |
| 290 | /* Bits to mask out from a PUD to get to the PMD page */ |
| 291 | #define PUD_MASKED_BITS 0xc0000000000000ffUL |
| 292 | /* Bits to mask out from a PGD to get to the PUD page */ |
| 293 | #define PGD_MASKED_BITS 0xc0000000000000ffUL |
Aneesh Kumar K.V | d6a9996 | 2016-04-29 23:26:21 +1000 | [diff] [blame] | 294 | |
| 295 | extern unsigned long __vmalloc_start; |
| 296 | extern unsigned long __vmalloc_end; |
| 297 | #define VMALLOC_START __vmalloc_start |
| 298 | #define VMALLOC_END __vmalloc_end |
| 299 | |
| 300 | extern unsigned long __kernel_virt_start; |
| 301 | extern unsigned long __kernel_virt_size; |
Michael Ellerman | 63ee9b2 | 2017-08-01 20:29:22 +1000 | [diff] [blame] | 302 | extern unsigned long __kernel_io_start; |
Aneesh Kumar K.V | d6a9996 | 2016-04-29 23:26:21 +1000 | [diff] [blame] | 303 | #define KERN_VIRT_START __kernel_virt_start |
| 304 | #define KERN_VIRT_SIZE __kernel_virt_size |
Michael Ellerman | 63ee9b2 | 2017-08-01 20:29:22 +1000 | [diff] [blame] | 305 | #define KERN_IO_START __kernel_io_start |
Aneesh Kumar K.V | d6a9996 | 2016-04-29 23:26:21 +1000 | [diff] [blame] | 306 | extern struct page *vmemmap; |
| 307 | extern unsigned long ioremap_bot; |
Darren Stevens | bfa3708 | 2016-06-29 21:06:28 +0100 | [diff] [blame] | 308 | extern unsigned long pci_io_base; |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 309 | #endif /* __ASSEMBLY__ */ |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 310 | |
Aneesh Kumar K.V | ab537dc | 2015-12-01 09:06:30 +0530 | [diff] [blame] | 311 | #include <asm/book3s/64/hash.h> |
Aneesh Kumar K.V | b0b5e9b | 2016-04-29 23:25:52 +1000 | [diff] [blame] | 312 | #include <asm/book3s/64/radix.h> |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 313 | |
Aneesh Kumar K.V | a9252aae | 2016-04-29 23:25:55 +1000 | [diff] [blame] | 314 | #ifdef CONFIG_PPC_64K_PAGES |
| 315 | #include <asm/book3s/64/pgtable-64k.h> |
| 316 | #else |
| 317 | #include <asm/book3s/64/pgtable-4k.h> |
| 318 | #endif |
| 319 | |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 320 | #include <asm/barrier.h> |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 321 | /* |
| 322 | * The second half of the kernel virtual space is used for IO mappings, |
| 323 | * it's itself carved into the PIO region (ISA and PHB IO space) and |
| 324 | * the ioremap space |
| 325 | * |
| 326 | * ISA_IO_BASE = KERN_IO_START, 64K reserved area |
| 327 | * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces |
| 328 | * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE |
| 329 | */ |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 330 | #define FULL_IO_SIZE 0x80000000ul |
| 331 | #define ISA_IO_BASE (KERN_IO_START) |
| 332 | #define ISA_IO_END (KERN_IO_START + 0x10000ul) |
| 333 | #define PHB_IO_BASE (ISA_IO_END) |
| 334 | #define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE) |
| 335 | #define IOREMAP_BASE (PHB_IO_END) |
| 336 | #define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE) |
| 337 | |
Aneesh Kumar K.V | b0412ea | 2015-12-01 09:06:33 +0530 | [diff] [blame] | 338 | /* Advertise special mapping type for AGP */ |
Aneesh Kumar K.V | b0412ea | 2015-12-01 09:06:33 +0530 | [diff] [blame] | 339 | #define HAVE_PAGE_AGP |
| 340 | |
| 341 | /* Advertise support for _PAGE_SPECIAL */ |
| 342 | #define __HAVE_ARCH_PTE_SPECIAL |
| 343 | |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 344 | #ifndef __ASSEMBLY__ |
| 345 | |
| 346 | /* |
| 347 | * This is the default implementation of various PTE accessors, it's |
| 348 | * used in all cases except Book3S with 64K pages where we have a |
| 349 | * concept of sub-pages |
| 350 | */ |
| 351 | #ifndef __real_pte |
| 352 | |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 353 | #define __real_pte(e,p) ((real_pte_t){(e)}) |
| 354 | #define __rpte_to_pte(r) ((r).pte) |
Aneesh Kumar K.V | 945537d | 2016-04-29 23:25:45 +1000 | [diff] [blame] | 355 | #define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT) |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 356 | |
| 357 | #define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ |
| 358 | do { \ |
| 359 | index = 0; \ |
| 360 | shift = mmu_psize_defs[psize].shift; \ |
| 361 | |
| 362 | #define pte_iterate_hashed_end() } while(0) |
| 363 | |
| 364 | /* |
| 365 | * We expect this to be called only for user addresses or kernel virtual |
| 366 | * addresses other than the linear mapping. |
| 367 | */ |
| 368 | #define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K |
| 369 | |
| 370 | #endif /* __real_pte */ |
| 371 | |
Aneesh Kumar K.V | ac94ac79 | 2016-04-29 23:25:54 +1000 | [diff] [blame] | 372 | static inline unsigned long pte_update(struct mm_struct *mm, unsigned long addr, |
| 373 | pte_t *ptep, unsigned long clr, |
| 374 | unsigned long set, int huge) |
| 375 | { |
| 376 | if (radix_enabled()) |
| 377 | return radix__pte_update(mm, addr, ptep, clr, set, huge); |
| 378 | return hash__pte_update(mm, addr, ptep, clr, set, huge); |
| 379 | } |
Aneesh Kumar K.V | 13f829a | 2016-04-29 23:25:48 +1000 | [diff] [blame] | 380 | /* |
| 381 | * For hash even if we have _PAGE_ACCESSED = 0, we do a pte_update. |
| 382 | * We currently remove entries from the hashtable regardless of whether |
| 383 | * the entry was young or dirty. |
| 384 | * |
| 385 | * We should be more intelligent about this but for the moment we override |
| 386 | * these functions and force a tlb flush unconditionally |
| 387 | * For radix: H_PAGE_HASHPTE should be zero. Hence we can use the same |
| 388 | * function for both hash and radix. |
| 389 | */ |
| 390 | static inline int __ptep_test_and_clear_young(struct mm_struct *mm, |
| 391 | unsigned long addr, pte_t *ptep) |
| 392 | { |
| 393 | unsigned long old; |
| 394 | |
Aneesh Kumar K.V | 66c570f | 2016-07-13 15:05:22 +0530 | [diff] [blame] | 395 | if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0) |
Aneesh Kumar K.V | 13f829a | 2016-04-29 23:25:48 +1000 | [diff] [blame] | 396 | return 0; |
| 397 | old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); |
| 398 | return (old & _PAGE_ACCESSED) != 0; |
| 399 | } |
| 400 | |
| 401 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
| 402 | #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ |
| 403 | ({ \ |
| 404 | int __r; \ |
| 405 | __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \ |
| 406 | __r; \ |
| 407 | }) |
| 408 | |
Aneesh Kumar K.V | d19469e | 2017-03-09 16:16:39 -0800 | [diff] [blame] | 409 | static inline int __pte_write(pte_t pte) |
Aneesh Kumar K.V | 52c50ca | 2017-03-09 16:16:36 -0800 | [diff] [blame] | 410 | { |
| 411 | return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE)); |
| 412 | } |
| 413 | |
| 414 | #ifdef CONFIG_NUMA_BALANCING |
| 415 | #define pte_savedwrite pte_savedwrite |
| 416 | static inline bool pte_savedwrite(pte_t pte) |
| 417 | { |
| 418 | /* |
| 419 | * Saved write ptes are prot none ptes that doesn't have |
| 420 | * privileged bit sit. We mark prot none as one which has |
| 421 | * present and pviliged bit set and RWX cleared. To mark |
| 422 | * protnone which used to have _PAGE_WRITE set we clear |
| 423 | * the privileged bit. |
| 424 | */ |
| 425 | return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED)); |
| 426 | } |
| 427 | #else |
| 428 | #define pte_savedwrite pte_savedwrite |
| 429 | static inline bool pte_savedwrite(pte_t pte) |
| 430 | { |
| 431 | return false; |
| 432 | } |
| 433 | #endif |
| 434 | |
Aneesh Kumar K.V | d19469e | 2017-03-09 16:16:39 -0800 | [diff] [blame] | 435 | static inline int pte_write(pte_t pte) |
| 436 | { |
| 437 | return __pte_write(pte) || pte_savedwrite(pte); |
| 438 | } |
| 439 | |
Christophe Leroy | ca8afd4 | 2017-07-12 17:03:42 +0200 | [diff] [blame] | 440 | static inline int pte_read(pte_t pte) |
| 441 | { |
| 442 | return !!(pte_raw(pte) & cpu_to_be64(_PAGE_READ)); |
| 443 | } |
| 444 | |
Aneesh Kumar K.V | 13f829a | 2016-04-29 23:25:48 +1000 | [diff] [blame] | 445 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
| 446 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, |
| 447 | pte_t *ptep) |
| 448 | { |
Aneesh Kumar K.V | d19469e | 2017-03-09 16:16:39 -0800 | [diff] [blame] | 449 | if (__pte_write(*ptep)) |
Aneesh Kumar K.V | 52c50ca | 2017-03-09 16:16:36 -0800 | [diff] [blame] | 450 | pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0); |
| 451 | else if (unlikely(pte_savedwrite(*ptep))) |
| 452 | pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 0); |
Aneesh Kumar K.V | 13f829a | 2016-04-29 23:25:48 +1000 | [diff] [blame] | 453 | } |
| 454 | |
| 455 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, |
| 456 | unsigned long addr, pte_t *ptep) |
| 457 | { |
Aneesh Kumar K.V | 52c50ca | 2017-03-09 16:16:36 -0800 | [diff] [blame] | 458 | /* |
| 459 | * We should not find protnone for hugetlb, but this complete the |
| 460 | * interface. |
| 461 | */ |
Aneesh Kumar K.V | d19469e | 2017-03-09 16:16:39 -0800 | [diff] [blame] | 462 | if (__pte_write(*ptep)) |
Aneesh Kumar K.V | 52c50ca | 2017-03-09 16:16:36 -0800 | [diff] [blame] | 463 | pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1); |
| 464 | else if (unlikely(pte_savedwrite(*ptep))) |
| 465 | pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 1); |
Aneesh Kumar K.V | 13f829a | 2016-04-29 23:25:48 +1000 | [diff] [blame] | 466 | } |
| 467 | |
| 468 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
| 469 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, |
| 470 | unsigned long addr, pte_t *ptep) |
| 471 | { |
| 472 | unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0); |
| 473 | return __pte(old); |
| 474 | } |
| 475 | |
Aneesh Kumar K.V | f4894b8 | 2017-02-09 08:28:20 +0530 | [diff] [blame] | 476 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL |
| 477 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, |
| 478 | unsigned long addr, |
| 479 | pte_t *ptep, int full) |
| 480 | { |
| 481 | if (full && radix_enabled()) { |
| 482 | /* |
| 483 | * Let's skip the DD1 style pte update here. We know that |
| 484 | * this is a full mm pte clear and hence can be sure there is |
| 485 | * no parallel set_pte. |
| 486 | */ |
| 487 | return radix__ptep_get_and_clear_full(mm, addr, ptep, full); |
| 488 | } |
| 489 | return ptep_get_and_clear(mm, addr, ptep); |
| 490 | } |
| 491 | |
| 492 | |
Aneesh Kumar K.V | 13f829a | 2016-04-29 23:25:48 +1000 | [diff] [blame] | 493 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, |
| 494 | pte_t * ptep) |
| 495 | { |
| 496 | pte_update(mm, addr, ptep, ~0UL, 0, 0); |
| 497 | } |
Aneesh Kumar K.V | 66c570f | 2016-07-13 15:05:22 +0530 | [diff] [blame] | 498 | |
Aneesh Kumar K.V | 66c570f | 2016-07-13 15:05:22 +0530 | [diff] [blame] | 499 | static inline int pte_dirty(pte_t pte) |
| 500 | { |
| 501 | return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DIRTY)); |
| 502 | } |
| 503 | |
| 504 | static inline int pte_young(pte_t pte) |
| 505 | { |
| 506 | return !!(pte_raw(pte) & cpu_to_be64(_PAGE_ACCESSED)); |
| 507 | } |
| 508 | |
| 509 | static inline int pte_special(pte_t pte) |
| 510 | { |
| 511 | return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SPECIAL)); |
| 512 | } |
| 513 | |
Aneesh Kumar K.V | 13f829a | 2016-04-29 23:25:48 +1000 | [diff] [blame] | 514 | static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } |
| 515 | |
| 516 | #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY |
| 517 | static inline bool pte_soft_dirty(pte_t pte) |
| 518 | { |
Aneesh Kumar K.V | 66c570f | 2016-07-13 15:05:22 +0530 | [diff] [blame] | 519 | return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SOFT_DIRTY)); |
Aneesh Kumar K.V | 13f829a | 2016-04-29 23:25:48 +1000 | [diff] [blame] | 520 | } |
Aneesh Kumar K.V | 66c570f | 2016-07-13 15:05:22 +0530 | [diff] [blame] | 521 | |
Aneesh Kumar K.V | 13f829a | 2016-04-29 23:25:48 +1000 | [diff] [blame] | 522 | static inline pte_t pte_mksoft_dirty(pte_t pte) |
| 523 | { |
| 524 | return __pte(pte_val(pte) | _PAGE_SOFT_DIRTY); |
| 525 | } |
| 526 | |
| 527 | static inline pte_t pte_clear_soft_dirty(pte_t pte) |
| 528 | { |
| 529 | return __pte(pte_val(pte) & ~_PAGE_SOFT_DIRTY); |
| 530 | } |
| 531 | #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ |
| 532 | |
| 533 | #ifdef CONFIG_NUMA_BALANCING |
Aneesh Kumar K.V | 13f829a | 2016-04-29 23:25:48 +1000 | [diff] [blame] | 534 | static inline int pte_protnone(pte_t pte) |
| 535 | { |
Aneesh Kumar K.V | c137a27 | 2017-02-24 14:59:21 -0800 | [diff] [blame] | 536 | return (pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE | _PAGE_RWX)) == |
| 537 | cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE); |
| 538 | } |
| 539 | |
| 540 | #define pte_mk_savedwrite pte_mk_savedwrite |
| 541 | static inline pte_t pte_mk_savedwrite(pte_t pte) |
| 542 | { |
| 543 | /* |
| 544 | * Used by Autonuma subsystem to preserve the write bit |
| 545 | * while marking the pte PROT_NONE. Only allow this |
| 546 | * on PROT_NONE pte |
| 547 | */ |
| 548 | VM_BUG_ON((pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_RWX | _PAGE_PRIVILEGED)) != |
| 549 | cpu_to_be64(_PAGE_PRESENT | _PAGE_PRIVILEGED)); |
| 550 | return __pte(pte_val(pte) & ~_PAGE_PRIVILEGED); |
| 551 | } |
| 552 | |
| 553 | #define pte_clear_savedwrite pte_clear_savedwrite |
| 554 | static inline pte_t pte_clear_savedwrite(pte_t pte) |
| 555 | { |
| 556 | /* |
| 557 | * Used by KSM subsystem to make a protnone pte readonly. |
| 558 | */ |
| 559 | VM_BUG_ON(!pte_protnone(pte)); |
| 560 | return __pte(pte_val(pte) | _PAGE_PRIVILEGED); |
| 561 | } |
Aneesh Kumar K.V | d19469e | 2017-03-09 16:16:39 -0800 | [diff] [blame] | 562 | #else |
| 563 | #define pte_clear_savedwrite pte_clear_savedwrite |
| 564 | static inline pte_t pte_clear_savedwrite(pte_t pte) |
| 565 | { |
| 566 | VM_WARN_ON(1); |
| 567 | return __pte(pte_val(pte) & ~_PAGE_WRITE); |
| 568 | } |
Aneesh Kumar K.V | 13f829a | 2016-04-29 23:25:48 +1000 | [diff] [blame] | 569 | #endif /* CONFIG_NUMA_BALANCING */ |
| 570 | |
| 571 | static inline int pte_present(pte_t pte) |
| 572 | { |
Aneesh Kumar K.V | 66c570f | 2016-07-13 15:05:22 +0530 | [diff] [blame] | 573 | return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT)); |
Aneesh Kumar K.V | 13f829a | 2016-04-29 23:25:48 +1000 | [diff] [blame] | 574 | } |
Aneesh Kumar K.V | f72a85e | 2017-12-04 07:49:11 +0530 | [diff] [blame] | 575 | |
Ram Pai | bca7aac | 2018-01-18 17:50:38 -0800 | [diff] [blame] | 576 | #ifdef CONFIG_PPC_MEM_KEYS |
Ram Pai | f2407ef | 2018-01-18 17:50:37 -0800 | [diff] [blame] | 577 | extern bool arch_pte_access_permitted(u64 pte, bool write, bool execute); |
Ram Pai | bca7aac | 2018-01-18 17:50:38 -0800 | [diff] [blame] | 578 | #else |
| 579 | static inline bool arch_pte_access_permitted(u64 pte, bool write, bool execute) |
| 580 | { |
| 581 | return true; |
| 582 | } |
| 583 | #endif /* CONFIG_PPC_MEM_KEYS */ |
Ram Pai | f2407ef | 2018-01-18 17:50:37 -0800 | [diff] [blame] | 584 | |
Aneesh Kumar K.V | f72a85e | 2017-12-04 07:49:11 +0530 | [diff] [blame] | 585 | #define pte_access_permitted pte_access_permitted |
| 586 | static inline bool pte_access_permitted(pte_t pte, bool write) |
| 587 | { |
| 588 | unsigned long pteval = pte_val(pte); |
| 589 | /* Also check for pte_user */ |
| 590 | unsigned long clear_pte_bits = _PAGE_PRIVILEGED; |
| 591 | /* |
| 592 | * _PAGE_READ is needed for any access and will be |
| 593 | * cleared for PROT_NONE |
| 594 | */ |
| 595 | unsigned long need_pte_bits = _PAGE_PRESENT | _PAGE_READ; |
| 596 | |
| 597 | if (write) |
| 598 | need_pte_bits |= _PAGE_WRITE; |
| 599 | |
| 600 | if ((pteval & need_pte_bits) != need_pte_bits) |
| 601 | return false; |
| 602 | |
| 603 | if ((pteval & clear_pte_bits) == clear_pte_bits) |
| 604 | return false; |
Ram Pai | bca7aac | 2018-01-18 17:50:38 -0800 | [diff] [blame] | 605 | |
| 606 | return arch_pte_access_permitted(pte_val(pte), write, 0); |
Aneesh Kumar K.V | f72a85e | 2017-12-04 07:49:11 +0530 | [diff] [blame] | 607 | } |
| 608 | |
Aneesh Kumar K.V | 13f829a | 2016-04-29 23:25:48 +1000 | [diff] [blame] | 609 | /* |
| 610 | * Conversion functions: convert a page and protection to a page entry, |
| 611 | * and a page entry and page directory to the page they refer to. |
| 612 | * |
| 613 | * Even if PTEs can be unsigned long long, a PFN is always an unsigned |
| 614 | * long for now. |
| 615 | */ |
| 616 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) |
| 617 | { |
| 618 | return __pte((((pte_basic_t)(pfn) << PAGE_SHIFT) & PTE_RPN_MASK) | |
| 619 | pgprot_val(pgprot)); |
| 620 | } |
| 621 | |
| 622 | static inline unsigned long pte_pfn(pte_t pte) |
| 623 | { |
| 624 | return (pte_val(pte) & PTE_RPN_MASK) >> PAGE_SHIFT; |
| 625 | } |
| 626 | |
| 627 | /* Generic modifiers for PTE bits */ |
| 628 | static inline pte_t pte_wrprotect(pte_t pte) |
| 629 | { |
Aneesh Kumar K.V | d19469e | 2017-03-09 16:16:39 -0800 | [diff] [blame] | 630 | if (unlikely(pte_savedwrite(pte))) |
| 631 | return pte_clear_savedwrite(pte); |
Aneesh Kumar K.V | 13f829a | 2016-04-29 23:25:48 +1000 | [diff] [blame] | 632 | return __pte(pte_val(pte) & ~_PAGE_WRITE); |
| 633 | } |
| 634 | |
| 635 | static inline pte_t pte_mkclean(pte_t pte) |
| 636 | { |
| 637 | return __pte(pte_val(pte) & ~_PAGE_DIRTY); |
| 638 | } |
| 639 | |
| 640 | static inline pte_t pte_mkold(pte_t pte) |
| 641 | { |
| 642 | return __pte(pte_val(pte) & ~_PAGE_ACCESSED); |
| 643 | } |
| 644 | |
| 645 | static inline pte_t pte_mkwrite(pte_t pte) |
| 646 | { |
| 647 | /* |
| 648 | * write implies read, hence set both |
| 649 | */ |
| 650 | return __pte(pte_val(pte) | _PAGE_RW); |
| 651 | } |
| 652 | |
| 653 | static inline pte_t pte_mkdirty(pte_t pte) |
| 654 | { |
| 655 | return __pte(pte_val(pte) | _PAGE_DIRTY | _PAGE_SOFT_DIRTY); |
| 656 | } |
| 657 | |
| 658 | static inline pte_t pte_mkyoung(pte_t pte) |
| 659 | { |
| 660 | return __pte(pte_val(pte) | _PAGE_ACCESSED); |
| 661 | } |
| 662 | |
| 663 | static inline pte_t pte_mkspecial(pte_t pte) |
| 664 | { |
| 665 | return __pte(pte_val(pte) | _PAGE_SPECIAL); |
| 666 | } |
| 667 | |
| 668 | static inline pte_t pte_mkhuge(pte_t pte) |
| 669 | { |
| 670 | return pte; |
| 671 | } |
| 672 | |
Oliver O'Halloran | ebd3119 | 2017-06-28 11:32:34 +1000 | [diff] [blame] | 673 | static inline pte_t pte_mkdevmap(pte_t pte) |
| 674 | { |
| 675 | return __pte(pte_val(pte) | _PAGE_SPECIAL|_PAGE_DEVMAP); |
| 676 | } |
| 677 | |
Oliver O'Halloran | c9c98bc | 2017-07-28 01:35:53 +1000 | [diff] [blame] | 678 | /* |
| 679 | * This is potentially called with a pmd as the argument, in which case it's not |
| 680 | * safe to check _PAGE_DEVMAP unless we also confirm that _PAGE_PTE is set. |
| 681 | * That's because the bit we use for _PAGE_DEVMAP is not reserved for software |
| 682 | * use in page directory entries (ie. non-ptes). |
| 683 | */ |
Oliver O'Halloran | ebd3119 | 2017-06-28 11:32:34 +1000 | [diff] [blame] | 684 | static inline int pte_devmap(pte_t pte) |
| 685 | { |
Oliver O'Halloran | c9c98bc | 2017-07-28 01:35:53 +1000 | [diff] [blame] | 686 | u64 mask = cpu_to_be64(_PAGE_DEVMAP | _PAGE_PTE); |
| 687 | |
| 688 | return (pte_raw(pte) & mask) == mask; |
Oliver O'Halloran | ebd3119 | 2017-06-28 11:32:34 +1000 | [diff] [blame] | 689 | } |
| 690 | |
Aneesh Kumar K.V | 13f829a | 2016-04-29 23:25:48 +1000 | [diff] [blame] | 691 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
| 692 | { |
| 693 | /* FIXME!! check whether this need to be a conditional */ |
| 694 | return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); |
| 695 | } |
| 696 | |
Aneesh Kumar K.V | 34fbadd | 2016-04-29 23:25:51 +1000 | [diff] [blame] | 697 | static inline bool pte_user(pte_t pte) |
| 698 | { |
Aneesh Kumar K.V | 66c570f | 2016-07-13 15:05:22 +0530 | [diff] [blame] | 699 | return !(pte_raw(pte) & cpu_to_be64(_PAGE_PRIVILEGED)); |
Aneesh Kumar K.V | 34fbadd | 2016-04-29 23:25:51 +1000 | [diff] [blame] | 700 | } |
| 701 | |
| 702 | /* Encode and de-code a swap entry */ |
| 703 | #define MAX_SWAPFILES_CHECK() do { \ |
| 704 | BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \ |
| 705 | /* \ |
| 706 | * Don't have overlapping bits with _PAGE_HPTEFLAGS \ |
| 707 | * We filter HPTEFLAGS on set_pte. \ |
| 708 | */ \ |
| 709 | BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \ |
| 710 | BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_SOFT_DIRTY); \ |
| 711 | } while (0) |
| 712 | /* |
| 713 | * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT; |
| 714 | */ |
| 715 | #define SWP_TYPE_BITS 5 |
| 716 | #define __swp_type(x) (((x).val >> _PAGE_BIT_SWAP_TYPE) \ |
| 717 | & ((1UL << SWP_TYPE_BITS) - 1)) |
| 718 | #define __swp_offset(x) (((x).val & PTE_RPN_MASK) >> PAGE_SHIFT) |
| 719 | #define __swp_entry(type, offset) ((swp_entry_t) { \ |
| 720 | ((type) << _PAGE_BIT_SWAP_TYPE) \ |
| 721 | | (((offset) << PAGE_SHIFT) & PTE_RPN_MASK)}) |
| 722 | /* |
| 723 | * swp_entry_t must be independent of pte bits. We build a swp_entry_t from |
| 724 | * swap type and offset we get from swap and convert that to pte to find a |
| 725 | * matching pte in linux page table. |
| 726 | * Clear bits not found in swap entries here. |
| 727 | */ |
| 728 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) & ~_PAGE_PTE }) |
| 729 | #define __swp_entry_to_pte(x) __pte((x).val | _PAGE_PTE) |
| 730 | |
| 731 | #ifdef CONFIG_MEM_SOFT_DIRTY |
| 732 | #define _PAGE_SWP_SOFT_DIRTY (1UL << (SWP_TYPE_BITS + _PAGE_BIT_SWAP_TYPE)) |
| 733 | #else |
| 734 | #define _PAGE_SWP_SOFT_DIRTY 0UL |
| 735 | #endif /* CONFIG_MEM_SOFT_DIRTY */ |
| 736 | |
| 737 | #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY |
| 738 | static inline pte_t pte_swp_mksoft_dirty(pte_t pte) |
| 739 | { |
| 740 | return __pte(pte_val(pte) | _PAGE_SWP_SOFT_DIRTY); |
| 741 | } |
Aneesh Kumar K.V | 66c570f | 2016-07-13 15:05:22 +0530 | [diff] [blame] | 742 | |
Aneesh Kumar K.V | 34fbadd | 2016-04-29 23:25:51 +1000 | [diff] [blame] | 743 | static inline bool pte_swp_soft_dirty(pte_t pte) |
| 744 | { |
Aneesh Kumar K.V | 66c570f | 2016-07-13 15:05:22 +0530 | [diff] [blame] | 745 | return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SWP_SOFT_DIRTY)); |
Aneesh Kumar K.V | 34fbadd | 2016-04-29 23:25:51 +1000 | [diff] [blame] | 746 | } |
Aneesh Kumar K.V | 66c570f | 2016-07-13 15:05:22 +0530 | [diff] [blame] | 747 | |
Aneesh Kumar K.V | 34fbadd | 2016-04-29 23:25:51 +1000 | [diff] [blame] | 748 | static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) |
| 749 | { |
| 750 | return __pte(pte_val(pte) & ~_PAGE_SWP_SOFT_DIRTY); |
| 751 | } |
| 752 | #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ |
| 753 | |
| 754 | static inline bool check_pte_access(unsigned long access, unsigned long ptev) |
| 755 | { |
| 756 | /* |
| 757 | * This check for _PAGE_RWX and _PAGE_PRESENT bits |
| 758 | */ |
| 759 | if (access & ~ptev) |
| 760 | return false; |
| 761 | /* |
| 762 | * This check for access to privilege space |
| 763 | */ |
| 764 | if ((access & _PAGE_PRIVILEGED) != (ptev & _PAGE_PRIVILEGED)) |
| 765 | return false; |
| 766 | |
| 767 | return true; |
| 768 | } |
Aneesh Kumar K.V | ac94ac79 | 2016-04-29 23:25:54 +1000 | [diff] [blame] | 769 | /* |
| 770 | * Generic functions with hash/radix callbacks |
| 771 | */ |
| 772 | |
Aneesh Kumar K.V | c6d1a76 | 2016-08-24 15:03:38 +0530 | [diff] [blame] | 773 | static inline void __ptep_set_access_flags(struct mm_struct *mm, |
Aneesh Kumar K.V | b3603e1 | 2016-11-28 11:47:02 +0530 | [diff] [blame] | 774 | pte_t *ptep, pte_t entry, |
| 775 | unsigned long address) |
Aneesh Kumar K.V | ac94ac79 | 2016-04-29 23:25:54 +1000 | [diff] [blame] | 776 | { |
| 777 | if (radix_enabled()) |
Aneesh Kumar K.V | b3603e1 | 2016-11-28 11:47:02 +0530 | [diff] [blame] | 778 | return radix__ptep_set_access_flags(mm, ptep, entry, address); |
Aneesh Kumar K.V | ac94ac79 | 2016-04-29 23:25:54 +1000 | [diff] [blame] | 779 | return hash__ptep_set_access_flags(ptep, entry); |
| 780 | } |
| 781 | |
| 782 | #define __HAVE_ARCH_PTE_SAME |
| 783 | static inline int pte_same(pte_t pte_a, pte_t pte_b) |
| 784 | { |
| 785 | if (radix_enabled()) |
| 786 | return radix__pte_same(pte_a, pte_b); |
| 787 | return hash__pte_same(pte_a, pte_b); |
| 788 | } |
| 789 | |
| 790 | static inline int pte_none(pte_t pte) |
| 791 | { |
| 792 | if (radix_enabled()) |
| 793 | return radix__pte_none(pte); |
| 794 | return hash__pte_none(pte); |
| 795 | } |
| 796 | |
| 797 | static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, |
| 798 | pte_t *ptep, pte_t pte, int percpu) |
| 799 | { |
| 800 | if (radix_enabled()) |
| 801 | return radix__set_pte_at(mm, addr, ptep, pte, percpu); |
| 802 | return hash__set_pte_at(mm, addr, ptep, pte, percpu); |
| 803 | } |
Aneesh Kumar K.V | 34fbadd | 2016-04-29 23:25:51 +1000 | [diff] [blame] | 804 | |
Aneesh Kumar K.V | 13f829a | 2016-04-29 23:25:48 +1000 | [diff] [blame] | 805 | #define _PAGE_CACHE_CTL (_PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT) |
| 806 | |
| 807 | #define pgprot_noncached pgprot_noncached |
| 808 | static inline pgprot_t pgprot_noncached(pgprot_t prot) |
| 809 | { |
| 810 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | |
| 811 | _PAGE_NON_IDEMPOTENT); |
| 812 | } |
| 813 | |
| 814 | #define pgprot_noncached_wc pgprot_noncached_wc |
| 815 | static inline pgprot_t pgprot_noncached_wc(pgprot_t prot) |
| 816 | { |
| 817 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | |
| 818 | _PAGE_TOLERANT); |
| 819 | } |
| 820 | |
| 821 | #define pgprot_cached pgprot_cached |
| 822 | static inline pgprot_t pgprot_cached(pgprot_t prot) |
| 823 | { |
| 824 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL)); |
| 825 | } |
| 826 | |
| 827 | #define pgprot_writecombine pgprot_writecombine |
| 828 | static inline pgprot_t pgprot_writecombine(pgprot_t prot) |
| 829 | { |
| 830 | return pgprot_noncached_wc(prot); |
| 831 | } |
| 832 | /* |
| 833 | * check a pte mapping have cache inhibited property |
| 834 | */ |
| 835 | static inline bool pte_ci(pte_t pte) |
| 836 | { |
| 837 | unsigned long pte_v = pte_val(pte); |
| 838 | |
| 839 | if (((pte_v & _PAGE_CACHE_CTL) == _PAGE_TOLERANT) || |
| 840 | ((pte_v & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT)) |
| 841 | return true; |
| 842 | return false; |
| 843 | } |
| 844 | |
Aneesh Kumar K.V | f281b5d | 2015-12-01 09:06:35 +0530 | [diff] [blame] | 845 | static inline void pmd_set(pmd_t *pmdp, unsigned long val) |
| 846 | { |
| 847 | *pmdp = __pmd(val); |
| 848 | } |
| 849 | |
| 850 | static inline void pmd_clear(pmd_t *pmdp) |
| 851 | { |
| 852 | *pmdp = __pmd(0); |
| 853 | } |
| 854 | |
Aneesh Kumar K.V | 66c570f | 2016-07-13 15:05:22 +0530 | [diff] [blame] | 855 | static inline int pmd_none(pmd_t pmd) |
| 856 | { |
| 857 | return !pmd_raw(pmd); |
| 858 | } |
| 859 | |
| 860 | static inline int pmd_present(pmd_t pmd) |
| 861 | { |
| 862 | |
| 863 | return !pmd_none(pmd); |
| 864 | } |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 865 | |
Aneesh Kumar K.V | ac94ac79 | 2016-04-29 23:25:54 +1000 | [diff] [blame] | 866 | static inline int pmd_bad(pmd_t pmd) |
| 867 | { |
| 868 | if (radix_enabled()) |
| 869 | return radix__pmd_bad(pmd); |
| 870 | return hash__pmd_bad(pmd); |
| 871 | } |
| 872 | |
Aneesh Kumar K.V | f281b5d | 2015-12-01 09:06:35 +0530 | [diff] [blame] | 873 | static inline void pud_set(pud_t *pudp, unsigned long val) |
| 874 | { |
| 875 | *pudp = __pud(val); |
| 876 | } |
| 877 | |
| 878 | static inline void pud_clear(pud_t *pudp) |
| 879 | { |
| 880 | *pudp = __pud(0); |
| 881 | } |
| 882 | |
Aneesh Kumar K.V | 66c570f | 2016-07-13 15:05:22 +0530 | [diff] [blame] | 883 | static inline int pud_none(pud_t pud) |
| 884 | { |
| 885 | return !pud_raw(pud); |
| 886 | } |
| 887 | |
| 888 | static inline int pud_present(pud_t pud) |
| 889 | { |
| 890 | return !pud_none(pud); |
| 891 | } |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 892 | |
| 893 | extern struct page *pud_page(pud_t pud); |
Aneesh Kumar K.V | 371352c | 2015-12-01 09:06:36 +0530 | [diff] [blame] | 894 | extern struct page *pmd_page(pmd_t pmd); |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 895 | static inline pte_t pud_pte(pud_t pud) |
| 896 | { |
Aneesh Kumar K.V | 66c570f | 2016-07-13 15:05:22 +0530 | [diff] [blame] | 897 | return __pte_raw(pud_raw(pud)); |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 898 | } |
| 899 | |
| 900 | static inline pud_t pte_pud(pte_t pte) |
| 901 | { |
Aneesh Kumar K.V | 66c570f | 2016-07-13 15:05:22 +0530 | [diff] [blame] | 902 | return __pud_raw(pte_raw(pte)); |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 903 | } |
| 904 | #define pud_write(pud) pte_write(pud_pte(pud)) |
Aneesh Kumar K.V | ac94ac79 | 2016-04-29 23:25:54 +1000 | [diff] [blame] | 905 | |
| 906 | static inline int pud_bad(pud_t pud) |
| 907 | { |
| 908 | if (radix_enabled()) |
| 909 | return radix__pud_bad(pud); |
| 910 | return hash__pud_bad(pud); |
| 911 | } |
| 912 | |
Aneesh Kumar K.V | f72a85e | 2017-12-04 07:49:11 +0530 | [diff] [blame] | 913 | #define pud_access_permitted pud_access_permitted |
| 914 | static inline bool pud_access_permitted(pud_t pud, bool write) |
| 915 | { |
| 916 | return pte_access_permitted(pud_pte(pud), write); |
| 917 | } |
Aneesh Kumar K.V | ac94ac79 | 2016-04-29 23:25:54 +1000 | [diff] [blame] | 918 | |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 919 | #define pgd_write(pgd) pte_write(pgd_pte(pgd)) |
Aneesh Kumar K.V | f281b5d | 2015-12-01 09:06:35 +0530 | [diff] [blame] | 920 | static inline void pgd_set(pgd_t *pgdp, unsigned long val) |
| 921 | { |
| 922 | *pgdp = __pgd(val); |
| 923 | } |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 924 | |
Aneesh Kumar K.V | 368ced7 | 2016-03-01 09:45:13 +0530 | [diff] [blame] | 925 | static inline void pgd_clear(pgd_t *pgdp) |
| 926 | { |
| 927 | *pgdp = __pgd(0); |
| 928 | } |
| 929 | |
Aneesh Kumar K.V | 66c570f | 2016-07-13 15:05:22 +0530 | [diff] [blame] | 930 | static inline int pgd_none(pgd_t pgd) |
| 931 | { |
| 932 | return !pgd_raw(pgd); |
| 933 | } |
| 934 | |
| 935 | static inline int pgd_present(pgd_t pgd) |
| 936 | { |
| 937 | return !pgd_none(pgd); |
| 938 | } |
Aneesh Kumar K.V | 368ced7 | 2016-03-01 09:45:13 +0530 | [diff] [blame] | 939 | |
| 940 | static inline pte_t pgd_pte(pgd_t pgd) |
| 941 | { |
Aneesh Kumar K.V | 66c570f | 2016-07-13 15:05:22 +0530 | [diff] [blame] | 942 | return __pte_raw(pgd_raw(pgd)); |
Aneesh Kumar K.V | 368ced7 | 2016-03-01 09:45:13 +0530 | [diff] [blame] | 943 | } |
| 944 | |
| 945 | static inline pgd_t pte_pgd(pte_t pte) |
| 946 | { |
Aneesh Kumar K.V | 66c570f | 2016-07-13 15:05:22 +0530 | [diff] [blame] | 947 | return __pgd_raw(pte_raw(pte)); |
Aneesh Kumar K.V | 368ced7 | 2016-03-01 09:45:13 +0530 | [diff] [blame] | 948 | } |
| 949 | |
Aneesh Kumar K.V | ac94ac79 | 2016-04-29 23:25:54 +1000 | [diff] [blame] | 950 | static inline int pgd_bad(pgd_t pgd) |
| 951 | { |
| 952 | if (radix_enabled()) |
| 953 | return radix__pgd_bad(pgd); |
| 954 | return hash__pgd_bad(pgd); |
| 955 | } |
| 956 | |
Aneesh Kumar K.V | f72a85e | 2017-12-04 07:49:11 +0530 | [diff] [blame] | 957 | #define pgd_access_permitted pgd_access_permitted |
| 958 | static inline bool pgd_access_permitted(pgd_t pgd, bool write) |
| 959 | { |
| 960 | return pte_access_permitted(pgd_pte(pgd), write); |
| 961 | } |
| 962 | |
Aneesh Kumar K.V | 368ced7 | 2016-03-01 09:45:13 +0530 | [diff] [blame] | 963 | extern struct page *pgd_page(pgd_t pgd); |
| 964 | |
Aneesh Kumar K.V | aba480e | 2016-04-29 23:25:50 +1000 | [diff] [blame] | 965 | /* Pointers in the page table tree are physical addresses */ |
| 966 | #define __pgtable_ptr_val(ptr) __pa(ptr) |
| 967 | |
| 968 | #define pmd_page_vaddr(pmd) __va(pmd_val(pmd) & ~PMD_MASKED_BITS) |
| 969 | #define pud_page_vaddr(pud) __va(pud_val(pud) & ~PUD_MASKED_BITS) |
| 970 | #define pgd_page_vaddr(pgd) __va(pgd_val(pgd) & ~PGD_MASKED_BITS) |
| 971 | |
| 972 | #define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1)) |
| 973 | #define pud_index(address) (((address) >> (PUD_SHIFT)) & (PTRS_PER_PUD - 1)) |
| 974 | #define pmd_index(address) (((address) >> (PMD_SHIFT)) & (PTRS_PER_PMD - 1)) |
| 975 | #define pte_index(address) (((address) >> (PAGE_SHIFT)) & (PTRS_PER_PTE - 1)) |
| 976 | |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 977 | /* |
| 978 | * Find an entry in a page-table-directory. We combine the address region |
| 979 | * (the high order N bits) and the pgd portion of the address. |
| 980 | */ |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 981 | |
| 982 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) |
| 983 | |
Aneesh Kumar K.V | 368ced7 | 2016-03-01 09:45:13 +0530 | [diff] [blame] | 984 | #define pud_offset(pgdp, addr) \ |
| 985 | (((pud_t *) pgd_page_vaddr(*(pgdp))) + pud_index(addr)) |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 986 | #define pmd_offset(pudp,addr) \ |
Aneesh Kumar K.V | 371352c | 2015-12-01 09:06:36 +0530 | [diff] [blame] | 987 | (((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr)) |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 988 | #define pte_offset_kernel(dir,addr) \ |
Aneesh Kumar K.V | 371352c | 2015-12-01 09:06:36 +0530 | [diff] [blame] | 989 | (((pte_t *) pmd_page_vaddr(*(dir))) + pte_index(addr)) |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 990 | |
| 991 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) |
| 992 | #define pte_unmap(pte) do { } while(0) |
| 993 | |
| 994 | /* to find an entry in a kernel page-table-directory */ |
| 995 | /* This now only contains the vmalloc pages */ |
| 996 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 997 | |
| 998 | #define pte_ERROR(e) \ |
| 999 | pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) |
| 1000 | #define pmd_ERROR(e) \ |
| 1001 | pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) |
Aneesh Kumar K.V | 368ced7 | 2016-03-01 09:45:13 +0530 | [diff] [blame] | 1002 | #define pud_ERROR(e) \ |
| 1003 | pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e)) |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 1004 | #define pgd_ERROR(e) \ |
| 1005 | pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) |
| 1006 | |
Aneesh Kumar K.V | 31a14fa | 2016-04-29 23:25:59 +1000 | [diff] [blame] | 1007 | static inline int map_kernel_page(unsigned long ea, unsigned long pa, |
| 1008 | unsigned long flags) |
| 1009 | { |
Aneesh Kumar K.V | d9225ad | 2016-04-29 23:26:00 +1000 | [diff] [blame] | 1010 | if (radix_enabled()) { |
| 1011 | #if defined(CONFIG_PPC_RADIX_MMU) && defined(DEBUG_VM) |
| 1012 | unsigned long page_size = 1 << mmu_psize_defs[mmu_io_psize].shift; |
| 1013 | WARN((page_size != PAGE_SIZE), "I/O page size != PAGE_SIZE"); |
| 1014 | #endif |
| 1015 | return radix__map_kernel_page(ea, pa, __pgprot(flags), PAGE_SIZE); |
| 1016 | } |
Aneesh Kumar K.V | 31a14fa | 2016-04-29 23:25:59 +1000 | [diff] [blame] | 1017 | return hash__map_kernel_page(ea, pa, flags); |
| 1018 | } |
| 1019 | |
| 1020 | static inline int __meminit vmemmap_create_mapping(unsigned long start, |
| 1021 | unsigned long page_size, |
| 1022 | unsigned long phys) |
| 1023 | { |
Aneesh Kumar K.V | d9225ad | 2016-04-29 23:26:00 +1000 | [diff] [blame] | 1024 | if (radix_enabled()) |
| 1025 | return radix__vmemmap_create_mapping(start, page_size, phys); |
Aneesh Kumar K.V | 31a14fa | 2016-04-29 23:25:59 +1000 | [diff] [blame] | 1026 | return hash__vmemmap_create_mapping(start, page_size, phys); |
| 1027 | } |
| 1028 | |
| 1029 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 1030 | static inline void vmemmap_remove_mapping(unsigned long start, |
| 1031 | unsigned long page_size) |
| 1032 | { |
Aneesh Kumar K.V | d9225ad | 2016-04-29 23:26:00 +1000 | [diff] [blame] | 1033 | if (radix_enabled()) |
| 1034 | return radix__vmemmap_remove_mapping(start, page_size); |
Aneesh Kumar K.V | 31a14fa | 2016-04-29 23:25:59 +1000 | [diff] [blame] | 1035 | return hash__vmemmap_remove_mapping(start, page_size); |
| 1036 | } |
| 1037 | #endif |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 1038 | struct page *realmode_pfn_to_page(unsigned long pfn); |
| 1039 | |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 1040 | static inline pte_t pmd_pte(pmd_t pmd) |
| 1041 | { |
Aneesh Kumar K.V | 66c570f | 2016-07-13 15:05:22 +0530 | [diff] [blame] | 1042 | return __pte_raw(pmd_raw(pmd)); |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 1043 | } |
| 1044 | |
| 1045 | static inline pmd_t pte_pmd(pte_t pte) |
| 1046 | { |
Aneesh Kumar K.V | 66c570f | 2016-07-13 15:05:22 +0530 | [diff] [blame] | 1047 | return __pmd_raw(pte_raw(pte)); |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 1048 | } |
| 1049 | |
| 1050 | static inline pte_t *pmdp_ptep(pmd_t *pmd) |
| 1051 | { |
| 1052 | return (pte_t *)pmd; |
| 1053 | } |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 1054 | #define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd)) |
| 1055 | #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) |
| 1056 | #define pmd_young(pmd) pte_young(pmd_pte(pmd)) |
| 1057 | #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) |
| 1058 | #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) |
| 1059 | #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) |
Minchan Kim | d5d6a44 | 2016-01-15 16:55:29 -0800 | [diff] [blame] | 1060 | #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 1061 | #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) |
| 1062 | #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) |
Aneesh Kumar K.V | c137a27 | 2017-02-24 14:59:21 -0800 | [diff] [blame] | 1063 | #define pmd_mk_savedwrite(pmd) pte_pmd(pte_mk_savedwrite(pmd_pte(pmd))) |
| 1064 | #define pmd_clear_savedwrite(pmd) pte_pmd(pte_clear_savedwrite(pmd_pte(pmd))) |
Laurent Dufour | 7207f43 | 2015-12-03 11:29:19 +0100 | [diff] [blame] | 1065 | |
| 1066 | #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY |
| 1067 | #define pmd_soft_dirty(pmd) pte_soft_dirty(pmd_pte(pmd)) |
| 1068 | #define pmd_mksoft_dirty(pmd) pte_pmd(pte_mksoft_dirty(pmd_pte(pmd))) |
| 1069 | #define pmd_clear_soft_dirty(pmd) pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd))) |
| 1070 | #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ |
| 1071 | |
Aneesh Kumar K.V | 1ca7212 | 2015-12-01 09:06:37 +0530 | [diff] [blame] | 1072 | #ifdef CONFIG_NUMA_BALANCING |
| 1073 | static inline int pmd_protnone(pmd_t pmd) |
| 1074 | { |
| 1075 | return pte_protnone(pmd_pte(pmd)); |
| 1076 | } |
| 1077 | #endif /* CONFIG_NUMA_BALANCING */ |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 1078 | |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 1079 | #define pmd_write(pmd) pte_write(pmd_pte(pmd)) |
Aneesh Kumar K.V | d19469e | 2017-03-09 16:16:39 -0800 | [diff] [blame] | 1080 | #define __pmd_write(pmd) __pte_write(pmd_pte(pmd)) |
Aneesh Kumar K.V | c137a27 | 2017-02-24 14:59:21 -0800 | [diff] [blame] | 1081 | #define pmd_savedwrite(pmd) pte_savedwrite(pmd_pte(pmd)) |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 1082 | |
Aneesh Kumar K.V | f72a85e | 2017-12-04 07:49:11 +0530 | [diff] [blame] | 1083 | #define pmd_access_permitted pmd_access_permitted |
| 1084 | static inline bool pmd_access_permitted(pmd_t pmd, bool write) |
| 1085 | { |
| 1086 | return pte_access_permitted(pmd_pte(pmd), write); |
| 1087 | } |
| 1088 | |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 1089 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 1090 | extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot); |
| 1091 | extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot); |
| 1092 | extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot); |
| 1093 | extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
| 1094 | pmd_t *pmdp, pmd_t pmd); |
| 1095 | extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, |
| 1096 | pmd_t *pmd); |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 1097 | extern int hash__has_transparent_hugepage(void); |
| 1098 | static inline int has_transparent_hugepage(void) |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 1099 | { |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 1100 | if (radix_enabled()) |
| 1101 | return radix__has_transparent_hugepage(); |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 1102 | return hash__has_transparent_hugepage(); |
| 1103 | } |
Linus Torvalds | c04a588 | 2016-05-20 10:12:41 -0700 | [diff] [blame] | 1104 | #define has_transparent_hugepage has_transparent_hugepage |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 1105 | |
| 1106 | static inline unsigned long |
| 1107 | pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, |
| 1108 | unsigned long clr, unsigned long set) |
| 1109 | { |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 1110 | if (radix_enabled()) |
| 1111 | return radix__pmd_hugepage_update(mm, addr, pmdp, clr, set); |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 1112 | return hash__pmd_hugepage_update(mm, addr, pmdp, clr, set); |
| 1113 | } |
| 1114 | |
| 1115 | static inline int pmd_large(pmd_t pmd) |
| 1116 | { |
Aneesh Kumar K.V | 66c570f | 2016-07-13 15:05:22 +0530 | [diff] [blame] | 1117 | return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE)); |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 1118 | } |
| 1119 | |
| 1120 | static inline pmd_t pmd_mknotpresent(pmd_t pmd) |
| 1121 | { |
| 1122 | return __pmd(pmd_val(pmd) & ~_PAGE_PRESENT); |
| 1123 | } |
| 1124 | /* |
| 1125 | * For radix we should always find H_PAGE_HASHPTE zero. Hence |
| 1126 | * the below will work for radix too |
| 1127 | */ |
| 1128 | static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, |
| 1129 | unsigned long addr, pmd_t *pmdp) |
| 1130 | { |
| 1131 | unsigned long old; |
| 1132 | |
Aneesh Kumar K.V | 66c570f | 2016-07-13 15:05:22 +0530 | [diff] [blame] | 1133 | if ((pmd_raw(*pmdp) & cpu_to_be64(_PAGE_ACCESSED | H_PAGE_HASHPTE)) == 0) |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 1134 | return 0; |
| 1135 | old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0); |
| 1136 | return ((old & _PAGE_ACCESSED) != 0); |
| 1137 | } |
| 1138 | |
| 1139 | #define __HAVE_ARCH_PMDP_SET_WRPROTECT |
| 1140 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, |
| 1141 | pmd_t *pmdp) |
| 1142 | { |
Aneesh Kumar K.V | d19469e | 2017-03-09 16:16:39 -0800 | [diff] [blame] | 1143 | if (__pmd_write((*pmdp))) |
Aneesh Kumar K.V | 52c50ca | 2017-03-09 16:16:36 -0800 | [diff] [blame] | 1144 | pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0); |
| 1145 | else if (unlikely(pmd_savedwrite(*pmdp))) |
| 1146 | pmd_hugepage_update(mm, addr, pmdp, 0, _PAGE_PRIVILEGED); |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 1147 | } |
| 1148 | |
Aneesh Kumar K.V | ab62476 | 2016-04-29 23:26:31 +1000 | [diff] [blame] | 1149 | static inline int pmd_trans_huge(pmd_t pmd) |
| 1150 | { |
| 1151 | if (radix_enabled()) |
| 1152 | return radix__pmd_trans_huge(pmd); |
| 1153 | return hash__pmd_trans_huge(pmd); |
| 1154 | } |
| 1155 | |
| 1156 | #define __HAVE_ARCH_PMD_SAME |
| 1157 | static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) |
| 1158 | { |
| 1159 | if (radix_enabled()) |
| 1160 | return radix__pmd_same(pmd_a, pmd_b); |
| 1161 | return hash__pmd_same(pmd_a, pmd_b); |
| 1162 | } |
| 1163 | |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 1164 | static inline pmd_t pmd_mkhuge(pmd_t pmd) |
| 1165 | { |
Aneesh Kumar K.V | ab62476 | 2016-04-29 23:26:31 +1000 | [diff] [blame] | 1166 | if (radix_enabled()) |
| 1167 | return radix__pmd_mkhuge(pmd); |
| 1168 | return hash__pmd_mkhuge(pmd); |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 1169 | } |
| 1170 | |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 1171 | #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS |
| 1172 | extern int pmdp_set_access_flags(struct vm_area_struct *vma, |
| 1173 | unsigned long address, pmd_t *pmdp, |
| 1174 | pmd_t entry, int dirty); |
| 1175 | |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 1176 | #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG |
| 1177 | extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, |
| 1178 | unsigned long address, pmd_t *pmdp); |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 1179 | |
| 1180 | #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 1181 | static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, |
| 1182 | unsigned long addr, pmd_t *pmdp) |
| 1183 | { |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 1184 | if (radix_enabled()) |
| 1185 | return radix__pmdp_huge_get_and_clear(mm, addr, pmdp); |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 1186 | return hash__pmdp_huge_get_and_clear(mm, addr, pmdp); |
| 1187 | } |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 1188 | |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 1189 | static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, |
| 1190 | unsigned long address, pmd_t *pmdp) |
| 1191 | { |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 1192 | if (radix_enabled()) |
| 1193 | return radix__pmdp_collapse_flush(vma, address, pmdp); |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 1194 | return hash__pmdp_collapse_flush(vma, address, pmdp); |
| 1195 | } |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 1196 | #define pmdp_collapse_flush pmdp_collapse_flush |
| 1197 | |
| 1198 | #define __HAVE_ARCH_PGTABLE_DEPOSIT |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 1199 | static inline void pgtable_trans_huge_deposit(struct mm_struct *mm, |
| 1200 | pmd_t *pmdp, pgtable_t pgtable) |
| 1201 | { |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 1202 | if (radix_enabled()) |
| 1203 | return radix__pgtable_trans_huge_deposit(mm, pmdp, pgtable); |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 1204 | return hash__pgtable_trans_huge_deposit(mm, pmdp, pgtable); |
| 1205 | } |
| 1206 | |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 1207 | #define __HAVE_ARCH_PGTABLE_WITHDRAW |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 1208 | static inline pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, |
| 1209 | pmd_t *pmdp) |
| 1210 | { |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 1211 | if (radix_enabled()) |
| 1212 | return radix__pgtable_trans_huge_withdraw(mm, pmdp); |
Aneesh Kumar K.V | 3df33f1 | 2016-04-29 23:26:29 +1000 | [diff] [blame] | 1213 | return hash__pgtable_trans_huge_withdraw(mm, pmdp); |
| 1214 | } |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 1215 | |
| 1216 | #define __HAVE_ARCH_PMDP_INVALIDATE |
Aneesh Kumar K.V | 8cc931e | 2018-01-31 16:18:02 -0800 | [diff] [blame] | 1217 | extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, |
| 1218 | pmd_t *pmdp); |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 1219 | |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 1220 | #define pmd_move_must_withdraw pmd_move_must_withdraw |
| 1221 | struct spinlock; |
| 1222 | static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, |
Aneesh Kumar K.V | 1dd38b6 | 2016-12-12 16:44:29 -0800 | [diff] [blame] | 1223 | struct spinlock *old_pmd_ptl, |
| 1224 | struct vm_area_struct *vma) |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 1225 | { |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 1226 | if (radix_enabled()) |
| 1227 | return false; |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 1228 | /* |
| 1229 | * Archs like ppc64 use pgtable to store per pmd |
| 1230 | * specific information. So when we switch the pmd, |
| 1231 | * we should also withdraw and deposit the pgtable |
| 1232 | */ |
| 1233 | return true; |
| 1234 | } |
Aneesh Kumar K.V | 953c66c | 2016-12-12 16:44:32 -0800 | [diff] [blame] | 1235 | |
| 1236 | |
| 1237 | #define arch_needs_pgtable_deposit arch_needs_pgtable_deposit |
| 1238 | static inline bool arch_needs_pgtable_deposit(void) |
| 1239 | { |
| 1240 | if (radix_enabled()) |
| 1241 | return false; |
| 1242 | return true; |
| 1243 | } |
Aneesh Kumar K.V | fa4531f | 2017-07-27 11:54:54 +0530 | [diff] [blame] | 1244 | extern void serialize_against_pte_lookup(struct mm_struct *mm); |
Aneesh Kumar K.V | 953c66c | 2016-12-12 16:44:32 -0800 | [diff] [blame] | 1245 | |
Oliver O'Halloran | ebd3119 | 2017-06-28 11:32:34 +1000 | [diff] [blame] | 1246 | |
| 1247 | static inline pmd_t pmd_mkdevmap(pmd_t pmd) |
| 1248 | { |
| 1249 | return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP)); |
| 1250 | } |
| 1251 | |
| 1252 | static inline int pmd_devmap(pmd_t pmd) |
| 1253 | { |
| 1254 | return pte_devmap(pmd_pte(pmd)); |
| 1255 | } |
| 1256 | |
| 1257 | static inline int pud_devmap(pud_t pud) |
| 1258 | { |
| 1259 | return 0; |
| 1260 | } |
| 1261 | |
| 1262 | static inline int pgd_devmap(pgd_t pgd) |
| 1263 | { |
| 1264 | return 0; |
| 1265 | } |
Aneesh Kumar K.V | 6a1ea36 | 2016-04-29 23:26:28 +1000 | [diff] [blame] | 1266 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
Oliver O'Halloran | ebd3119 | 2017-06-28 11:32:34 +1000 | [diff] [blame] | 1267 | |
| 1268 | static inline const int pud_pfn(pud_t pud) |
| 1269 | { |
| 1270 | /* |
| 1271 | * Currently all calls to pud_pfn() are gated around a pud_devmap() |
| 1272 | * check so this should never be used. If it grows another user we |
| 1273 | * want to know about it. |
| 1274 | */ |
| 1275 | BUILD_BUG(); |
| 1276 | return 0; |
| 1277 | } |
Michael Ellerman | 029d925 | 2017-07-14 16:51:23 +1000 | [diff] [blame] | 1278 | |
Aneesh Kumar K.V | 3dfcb315 | 2015-12-01 09:06:28 +0530 | [diff] [blame] | 1279 | #endif /* __ASSEMBLY__ */ |
| 1280 | #endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */ |