Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 1 | #ifndef _ASM_POWERPC_PAGE_H |
| 2 | #define _ASM_POWERPC_PAGE_H |
| 3 | |
| 4 | /* |
| 5 | * Copyright (C) 2001,2005 IBM Corporation. |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or |
| 8 | * modify it under the terms of the GNU General Public License |
| 9 | * as published by the Free Software Foundation; either version |
| 10 | * 2 of the License, or (at your option) any later version. |
| 11 | */ |
| 12 | |
Kumar Gala | a3ba68f | 2008-10-20 03:16:55 +0000 | [diff] [blame] | 13 | #ifndef __ASSEMBLY__ |
| 14 | #include <linux/types.h> |
Aneesh Kumar K.V | f78f7ed | 2015-10-08 13:29:28 +0530 | [diff] [blame] | 15 | #include <linux/kernel.h> |
Stephen Rothwell | 463baa8 | 2008-10-16 20:29:07 +1100 | [diff] [blame] | 16 | #else |
| 17 | #include <asm/types.h> |
Kumar Gala | a3ba68f | 2008-10-20 03:16:55 +0000 | [diff] [blame] | 18 | #endif |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 19 | #include <asm/asm-compat.h> |
Michael Ellerman | 4731041 | 2006-05-17 18:00:49 +1000 | [diff] [blame] | 20 | #include <asm/kdump.h> |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 21 | |
| 22 | /* |
Yuri Tikhonov | e124012 | 2009-01-29 01:40:44 +0000 | [diff] [blame] | 23 | * On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages |
Ilya Yanok | ca9153a | 2008-12-11 04:55:41 +0300 | [diff] [blame] | 24 | * on PPC44x). For PPC64 we support either 4K or 64K software |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 25 | * page size. When using 64K pages however, whether we are really supporting |
| 26 | * 64K pages in HW or not is irrelevant to those definitions. |
| 27 | */ |
Yuri Tikhonov | e124012 | 2009-01-29 01:40:44 +0000 | [diff] [blame] | 28 | #if defined(CONFIG_PPC_256K_PAGES) |
| 29 | #define PAGE_SHIFT 18 |
| 30 | #elif defined(CONFIG_PPC_64K_PAGES) |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 31 | #define PAGE_SHIFT 16 |
Ilya Yanok | ca9153a | 2008-12-11 04:55:41 +0300 | [diff] [blame] | 32 | #elif defined(CONFIG_PPC_16K_PAGES) |
| 33 | #define PAGE_SHIFT 14 |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 34 | #else |
| 35 | #define PAGE_SHIFT 12 |
| 36 | #endif |
| 37 | |
| 38 | #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT) |
| 39 | |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 40 | #ifndef __ASSEMBLY__ |
| 41 | #ifdef CONFIG_HUGETLB_PAGE |
| 42 | extern unsigned int HPAGE_SHIFT; |
| 43 | #else |
| 44 | #define HPAGE_SHIFT PAGE_SHIFT |
| 45 | #endif |
| 46 | #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) |
| 47 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) |
| 48 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) |
| 49 | #define HUGE_MAX_HSTATE (MMU_PAGE_COUNT-1) |
| 50 | #endif |
| 51 | |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 52 | /* |
| 53 | * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we |
| 54 | * assign PAGE_MASK to a larger type it gets extended the way we want |
| 55 | * (i.e. with 1s in the high bits) |
| 56 | */ |
| 57 | #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) |
| 58 | |
Michael Ellerman | b5666f7 | 2005-12-05 10:24:33 -0600 | [diff] [blame] | 59 | /* |
| 60 | * KERNELBASE is the virtual address of the start of the kernel, it's often |
| 61 | * the same as PAGE_OFFSET, but _might not be_. |
| 62 | * |
| 63 | * The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET. |
| 64 | * |
Kumar Gala | 37dd2ba | 2008-04-22 04:22:34 +1000 | [diff] [blame] | 65 | * PAGE_OFFSET is the virtual address of the start of lowmem. |
| 66 | * |
| 67 | * PHYSICAL_START is the physical address of the start of the kernel. |
| 68 | * |
| 69 | * MEMORY_START is the physical address of the start of lowmem. |
| 70 | * |
| 71 | * KERNELBASE, PAGE_OFFSET, and PHYSICAL_START are all configurable on |
| 72 | * ppc32 and based on how they are set we determine MEMORY_START. |
| 73 | * |
| 74 | * For the linear mapping the following equation should be true: |
| 75 | * KERNELBASE - PAGE_OFFSET = PHYSICAL_START - MEMORY_START |
| 76 | * |
| 77 | * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START |
| 78 | * |
Vaishnavi Bhat | b839417 | 2013-10-27 11:47:19 +0530 | [diff] [blame] | 79 | * There are two ways to determine a physical address from a virtual one: |
Kumar Gala | 37dd2ba | 2008-04-22 04:22:34 +1000 | [diff] [blame] | 80 | * va = pa + PAGE_OFFSET - MEMORY_START |
| 81 | * va = pa + KERNELBASE - PHYSICAL_START |
Michael Ellerman | b5666f7 | 2005-12-05 10:24:33 -0600 | [diff] [blame] | 82 | * |
| 83 | * If you want to know something's offset from the start of the kernel you |
| 84 | * should subtract KERNELBASE. |
| 85 | * |
| 86 | * If you want to test if something's a kernel address, use is_kernel_addr(). |
| 87 | */ |
Michael Ellerman | 398ab1f | 2005-12-04 18:39:23 +1100 | [diff] [blame] | 88 | |
Kumar Gala | 37dd2ba | 2008-04-22 04:22:34 +1000 | [diff] [blame] | 89 | #define KERNELBASE ASM_CONST(CONFIG_KERNEL_START) |
| 90 | #define PAGE_OFFSET ASM_CONST(CONFIG_PAGE_OFFSET) |
| 91 | #define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START)) |
| 92 | |
Suzuki Poulose | 0f890c8 | 2011-12-14 22:57:15 +0000 | [diff] [blame] | 93 | #if defined(CONFIG_NONSTATIC_KERNEL) |
Kumar Gala | 37dd2ba | 2008-04-22 04:22:34 +1000 | [diff] [blame] | 94 | #ifndef __ASSEMBLY__ |
Kumar Gala | a3ba68f | 2008-10-20 03:16:55 +0000 | [diff] [blame] | 95 | |
Kumar Gala | 37dd2ba | 2008-04-22 04:22:34 +1000 | [diff] [blame] | 96 | extern phys_addr_t memstart_addr; |
| 97 | extern phys_addr_t kernstart_addr; |
Suzuki Poulose | 368ff8f | 2011-12-14 22:58:37 +0000 | [diff] [blame] | 98 | |
| 99 | #ifdef CONFIG_RELOCATABLE_PPC32 |
| 100 | extern long long virt_phys_offset; |
Kumar Gala | 37dd2ba | 2008-04-22 04:22:34 +1000 | [diff] [blame] | 101 | #endif |
Suzuki Poulose | 368ff8f | 2011-12-14 22:58:37 +0000 | [diff] [blame] | 102 | |
| 103 | #endif /* __ASSEMBLY__ */ |
Kumar Gala | 37dd2ba | 2008-04-22 04:22:34 +1000 | [diff] [blame] | 104 | #define PHYSICAL_START kernstart_addr |
Suzuki Poulose | 368ff8f | 2011-12-14 22:58:37 +0000 | [diff] [blame] | 105 | |
| 106 | #else /* !CONFIG_NONSTATIC_KERNEL */ |
Kumar Gala | 37dd2ba | 2008-04-22 04:22:34 +1000 | [diff] [blame] | 107 | #define PHYSICAL_START ASM_CONST(CONFIG_PHYSICAL_START) |
Paul Mackerras | 549e815 | 2008-08-30 11:43:47 +1000 | [diff] [blame] | 108 | #endif |
| 109 | |
Suzuki Poulose | 368ff8f | 2011-12-14 22:58:37 +0000 | [diff] [blame] | 110 | /* See Description below for VIRT_PHYS_OFFSET */ |
| 111 | #ifdef CONFIG_RELOCATABLE_PPC32 |
| 112 | #define VIRT_PHYS_OFFSET virt_phys_offset |
| 113 | #else |
| 114 | #define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START) |
| 115 | #endif |
| 116 | |
| 117 | |
Paul Mackerras | 549e815 | 2008-08-30 11:43:47 +1000 | [diff] [blame] | 118 | #ifdef CONFIG_PPC64 |
| 119 | #define MEMORY_START 0UL |
Suzuki Poulose | 0f890c8 | 2011-12-14 22:57:15 +0000 | [diff] [blame] | 120 | #elif defined(CONFIG_NONSTATIC_KERNEL) |
Paul Mackerras | 549e815 | 2008-08-30 11:43:47 +1000 | [diff] [blame] | 121 | #define MEMORY_START memstart_addr |
| 122 | #else |
Kumar Gala | 37dd2ba | 2008-04-22 04:22:34 +1000 | [diff] [blame] | 123 | #define MEMORY_START (PHYSICAL_START + PAGE_OFFSET - KERNELBASE) |
| 124 | #endif |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 125 | |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 126 | #ifdef CONFIG_FLATMEM |
Scott Wood | 67eb549 | 2011-03-24 11:51:19 +0000 | [diff] [blame] | 127 | #define ARCH_PFN_OFFSET ((unsigned long)(MEMORY_START >> PAGE_SHIFT)) |
Scott Wood | 81c386c | 2011-01-27 10:31:38 +0000 | [diff] [blame] | 128 | #define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr) |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 129 | #endif |
| 130 | |
Aneesh Kumar K.V | 65d3223 | 2015-09-03 13:20:56 +0530 | [diff] [blame] | 131 | #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) |
| 132 | #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 133 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
Aneesh Kumar K.V | 65d3223 | 2015-09-03 13:20:56 +0530 | [diff] [blame] | 134 | #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 135 | |
Kumar Gala | dbc9632 | 2010-04-21 02:12:58 -0500 | [diff] [blame] | 136 | /* |
| 137 | * On Book-E parts we need __va to parse the device tree and we can't |
| 138 | * determine MEMORY_START until then. However we can determine PHYSICAL_START |
| 139 | * from information at hand (program counter, TLB lookup). |
| 140 | * |
Suzuki Poulose | 368ff8f | 2011-12-14 22:58:37 +0000 | [diff] [blame] | 141 | * On BookE with RELOCATABLE (RELOCATABLE_PPC32) |
| 142 | * |
| 143 | * With RELOCATABLE_PPC32, we support loading the kernel at any physical |
| 144 | * address without any restriction on the page alignment. |
| 145 | * |
| 146 | * We find the runtime address of _stext and relocate ourselves based on |
| 147 | * the following calculation: |
| 148 | * |
| 149 | * virtual_base = ALIGN_DOWN(KERNELBASE,256M) + |
| 150 | * MODULO(_stext.run,256M) |
| 151 | * and create the following mapping: |
| 152 | * |
| 153 | * ALIGN_DOWN(_stext.run,256M) => ALIGN_DOWN(KERNELBASE,256M) |
| 154 | * |
| 155 | * When we process relocations, we cannot depend on the |
| 156 | * existing equation for the __va()/__pa() translations: |
| 157 | * |
| 158 | * __va(x) = (x) - PHYSICAL_START + KERNELBASE |
| 159 | * |
| 160 | * Where: |
| 161 | * PHYSICAL_START = kernstart_addr = Physical address of _stext |
| 162 | * KERNELBASE = Compiled virtual address of _stext. |
| 163 | * |
| 164 | * This formula holds true iff, kernel load address is TLB page aligned. |
| 165 | * |
| 166 | * In our case, we need to also account for the shift in the kernel Virtual |
| 167 | * address. |
| 168 | * |
| 169 | * E.g., |
| 170 | * |
| 171 | * Let the kernel be loaded at 64MB and KERNELBASE be 0xc0000000 (same as PAGE_OFFSET). |
| 172 | * In this case, we would be mapping 0 to 0xc0000000, and kernstart_addr = 64M |
| 173 | * |
| 174 | * Now __va(1MB) = (0x100000) - (0x4000000) + 0xc0000000 |
| 175 | * = 0xbc100000 , which is wrong. |
| 176 | * |
| 177 | * Rather, it should be : 0xc0000000 + 0x100000 = 0xc0100000 |
| 178 | * according to our mapping. |
| 179 | * |
| 180 | * Hence we use the following formula to get the translations right: |
| 181 | * |
| 182 | * __va(x) = (x) - [ PHYSICAL_START - Effective KERNELBASE ] |
| 183 | * |
| 184 | * Where : |
| 185 | * PHYSICAL_START = dynamic load address.(kernstart_addr variable) |
| 186 | * Effective KERNELBASE = virtual_base = |
| 187 | * = ALIGN_DOWN(KERNELBASE,256M) + |
| 188 | * MODULO(PHYSICAL_START,256M) |
| 189 | * |
| 190 | * To make the cost of __va() / __pa() more light weight, we introduce |
| 191 | * a new variable virt_phys_offset, which will hold : |
| 192 | * |
| 193 | * virt_phys_offset = Effective KERNELBASE - PHYSICAL_START |
| 194 | * = ALIGN_DOWN(KERNELBASE,256M) - |
| 195 | * ALIGN_DOWN(PHYSICALSTART,256M) |
| 196 | * |
| 197 | * Hence : |
| 198 | * |
| 199 | * __va(x) = x - PHYSICAL_START + Effective KERNELBASE |
| 200 | * = x + virt_phys_offset |
| 201 | * |
| 202 | * and |
| 203 | * __pa(x) = x + PHYSICAL_START - Effective KERNELBASE |
| 204 | * = x - virt_phys_offset |
| 205 | * |
Kumar Gala | dbc9632 | 2010-04-21 02:12:58 -0500 | [diff] [blame] | 206 | * On non-Book-E PPC64 PAGE_OFFSET and MEMORY_START are constants so use |
| 207 | * the other definitions for __va & __pa. |
| 208 | */ |
| 209 | #ifdef CONFIG_BOOKE |
Suzuki Poulose | 368ff8f | 2011-12-14 22:58:37 +0000 | [diff] [blame] | 210 | #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET)) |
| 211 | #define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET) |
Kumar Gala | dbc9632 | 2010-04-21 02:12:58 -0500 | [diff] [blame] | 212 | #else |
Paul Mackerras | bdbc29c | 2013-08-27 16:07:49 +1000 | [diff] [blame] | 213 | #ifdef CONFIG_PPC64 |
| 214 | /* |
| 215 | * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET |
| 216 | * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit. |
| 217 | */ |
| 218 | #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET)) |
| 219 | #define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL) |
| 220 | |
| 221 | #else /* 32-bit, non book E */ |
Kumar Gala | dbc9632 | 2010-04-21 02:12:58 -0500 | [diff] [blame] | 222 | #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START)) |
Paul Mackerras | 549e815 | 2008-08-30 11:43:47 +1000 | [diff] [blame] | 223 | #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START) |
Kumar Gala | dbc9632 | 2010-04-21 02:12:58 -0500 | [diff] [blame] | 224 | #endif |
Paul Mackerras | bdbc29c | 2013-08-27 16:07:49 +1000 | [diff] [blame] | 225 | #endif |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 226 | |
| 227 | /* |
| 228 | * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI, |
| 229 | * and needs to be executable. This means the whole heap ends |
| 230 | * up being executable. |
| 231 | */ |
| 232 | #define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ |
| 233 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
| 234 | |
| 235 | #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ |
| 236 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
| 237 | |
| 238 | #ifdef __powerpc64__ |
| 239 | #include <asm/page_64.h> |
| 240 | #else |
| 241 | #include <asm/page_32.h> |
| 242 | #endif |
| 243 | |
| 244 | /* align addr on a size boundary - adjust address up/down if needed */ |
Aneesh Kumar K.V | f78f7ed | 2015-10-08 13:29:28 +0530 | [diff] [blame] | 245 | #define _ALIGN_UP(addr, size) __ALIGN_KERNEL(addr, size) |
| 246 | #define _ALIGN_DOWN(addr, size) ((addr)&(~((typeof(addr))(size)-1))) |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 247 | |
| 248 | /* align addr on a size boundary - adjust address up if needed */ |
| 249 | #define _ALIGN(addr,size) _ALIGN_UP(addr,size) |
| 250 | |
Michael Ellerman | 51fae6de | 2005-12-04 18:39:15 +1100 | [diff] [blame] | 251 | /* |
| 252 | * Don't compare things with KERNELBASE or PAGE_OFFSET to test for |
| 253 | * "kernelness", use is_kernel_addr() - it should do what you want. |
| 254 | */ |
Benjamin Herrenschmidt | 57e2a99 | 2009-07-28 11:59:34 +1000 | [diff] [blame] | 255 | #ifdef CONFIG_PPC_BOOK3E_64 |
| 256 | #define is_kernel_addr(x) ((x) >= 0x8000000000000000ul) |
| 257 | #else |
Michael Ellerman | 51fae6de | 2005-12-04 18:39:15 +1100 | [diff] [blame] | 258 | #define is_kernel_addr(x) ((x) >= PAGE_OFFSET) |
Benjamin Herrenschmidt | 57e2a99 | 2009-07-28 11:59:34 +1000 | [diff] [blame] | 259 | #endif |
Michael Ellerman | 51fae6de | 2005-12-04 18:39:15 +1100 | [diff] [blame] | 260 | |
Aneesh Kumar K.V | cf9427b | 2013-04-28 09:37:29 +0000 | [diff] [blame] | 261 | #ifndef CONFIG_PPC_BOOK3S_64 |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 262 | /* |
| 263 | * Use the top bit of the higher-level page table entries to indicate whether |
| 264 | * the entries we point to contain hugepages. This works because we know that |
| 265 | * the page tables live in kernel space. If we ever decide to support having |
| 266 | * page tables at arbitrary addresses, this breaks and will have to change. |
| 267 | */ |
| 268 | #ifdef CONFIG_PPC64 |
| 269 | #define PD_HUGE 0x8000000000000000 |
| 270 | #else |
| 271 | #define PD_HUGE 0x80000000 |
| 272 | #endif |
Aneesh Kumar K.V | cf9427b | 2013-04-28 09:37:29 +0000 | [diff] [blame] | 273 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
Becky Bruce | 41151e7 | 2011-06-28 09:54:48 +0000 | [diff] [blame] | 274 | |
| 275 | /* |
| 276 | * Some number of bits at the level of the page table that points to |
| 277 | * a hugepte are used to encode the size. This masks those bits. |
| 278 | */ |
| 279 | #define HUGEPD_SHIFT_MASK 0x3f |
| 280 | |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 281 | #ifndef __ASSEMBLY__ |
| 282 | |
Michael Ellerman | f1e7c20 | 2015-03-25 20:11:56 +1100 | [diff] [blame] | 283 | #ifdef CONFIG_STRICT_MM_TYPECHECKS |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 284 | /* These are used to make use of C type-checking. */ |
| 285 | |
| 286 | /* PTE level */ |
| 287 | typedef struct { pte_basic_t pte; } pte_t; |
| 288 | #define pte_val(x) ((x).pte) |
| 289 | #define __pte(x) ((pte_t) { (x) }) |
| 290 | |
| 291 | /* 64k pages additionally define a bigger "real PTE" type that gathers |
| 292 | * the "second half" part of the PTE for pseudo 64k pages |
| 293 | */ |
Ilya Yanok | ca9153a | 2008-12-11 04:55:41 +0300 | [diff] [blame] | 294 | #if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64) |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 295 | typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; |
| 296 | #else |
| 297 | typedef struct { pte_t pte; } real_pte_t; |
| 298 | #endif |
| 299 | |
| 300 | /* PMD level */ |
David Gibson | d1953c8 | 2007-05-08 12:46:49 +1000 | [diff] [blame] | 301 | #ifdef CONFIG_PPC64 |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 302 | typedef struct { unsigned long pmd; } pmd_t; |
| 303 | #define pmd_val(x) ((x).pmd) |
| 304 | #define __pmd(x) ((pmd_t) { (x) }) |
| 305 | |
| 306 | /* PUD level exusts only on 4k pages */ |
David Gibson | d1953c8 | 2007-05-08 12:46:49 +1000 | [diff] [blame] | 307 | #ifndef CONFIG_PPC_64K_PAGES |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 308 | typedef struct { unsigned long pud; } pud_t; |
| 309 | #define pud_val(x) ((x).pud) |
| 310 | #define __pud(x) ((pud_t) { (x) }) |
David Gibson | d1953c8 | 2007-05-08 12:46:49 +1000 | [diff] [blame] | 311 | #endif /* !CONFIG_PPC_64K_PAGES */ |
| 312 | #endif /* CONFIG_PPC64 */ |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 313 | |
| 314 | /* PGD level */ |
| 315 | typedef struct { unsigned long pgd; } pgd_t; |
| 316 | #define pgd_val(x) ((x).pgd) |
| 317 | #define __pgd(x) ((pgd_t) { (x) }) |
| 318 | |
| 319 | /* Page protection bits */ |
| 320 | typedef struct { unsigned long pgprot; } pgprot_t; |
| 321 | #define pgprot_val(x) ((x).pgprot) |
| 322 | #define __pgprot(x) ((pgprot_t) { (x) }) |
| 323 | |
| 324 | #else |
| 325 | |
| 326 | /* |
| 327 | * .. while these make it easier on the compiler |
| 328 | */ |
| 329 | |
| 330 | typedef pte_basic_t pte_t; |
| 331 | #define pte_val(x) (x) |
| 332 | #define __pte(x) (x) |
| 333 | |
Ilya Yanok | ca9153a | 2008-12-11 04:55:41 +0300 | [diff] [blame] | 334 | #if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64) |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 335 | typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; |
| 336 | #else |
Ilya Yanok | ca9153a | 2008-12-11 04:55:41 +0300 | [diff] [blame] | 337 | typedef pte_t real_pte_t; |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 338 | #endif |
| 339 | |
| 340 | |
David Gibson | d1953c8 | 2007-05-08 12:46:49 +1000 | [diff] [blame] | 341 | #ifdef CONFIG_PPC64 |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 342 | typedef unsigned long pmd_t; |
| 343 | #define pmd_val(x) (x) |
| 344 | #define __pmd(x) (x) |
| 345 | |
David Gibson | d1953c8 | 2007-05-08 12:46:49 +1000 | [diff] [blame] | 346 | #ifndef CONFIG_PPC_64K_PAGES |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 347 | typedef unsigned long pud_t; |
| 348 | #define pud_val(x) (x) |
| 349 | #define __pud(x) (x) |
David Gibson | d1953c8 | 2007-05-08 12:46:49 +1000 | [diff] [blame] | 350 | #endif /* !CONFIG_PPC_64K_PAGES */ |
| 351 | #endif /* CONFIG_PPC64 */ |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 352 | |
| 353 | typedef unsigned long pgd_t; |
| 354 | #define pgd_val(x) (x) |
| 355 | #define pgprot_val(x) (x) |
| 356 | |
| 357 | typedef unsigned long pgprot_t; |
| 358 | #define __pgd(x) (x) |
| 359 | #define __pgprot(x) (x) |
| 360 | |
| 361 | #endif |
| 362 | |
David Gibson | a4fe3ce | 2009-10-26 19:24:31 +0000 | [diff] [blame] | 363 | typedef struct { signed long pd; } hugepd_t; |
David Gibson | a4fe3ce | 2009-10-26 19:24:31 +0000 | [diff] [blame] | 364 | |
| 365 | #ifdef CONFIG_HUGETLB_PAGE |
Aneesh Kumar K.V | cf9427b | 2013-04-28 09:37:29 +0000 | [diff] [blame] | 366 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 367 | static inline int hugepd_ok(hugepd_t hpd) |
| 368 | { |
| 369 | /* |
| 370 | * hugepd pointer, bottom two bits == 00 and next 4 bits |
| 371 | * indicate size of table |
| 372 | */ |
| 373 | return (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0)); |
| 374 | } |
| 375 | #else |
David Gibson | a4fe3ce | 2009-10-26 19:24:31 +0000 | [diff] [blame] | 376 | static inline int hugepd_ok(hugepd_t hpd) |
| 377 | { |
| 378 | return (hpd.pd > 0); |
| 379 | } |
Aneesh Kumar K.V | cf9427b | 2013-04-28 09:37:29 +0000 | [diff] [blame] | 380 | #endif |
David Gibson | a4fe3ce | 2009-10-26 19:24:31 +0000 | [diff] [blame] | 381 | |
Aneesh Kumar K.V | b30e759 | 2014-11-05 21:57:41 +0530 | [diff] [blame] | 382 | #define is_hugepd(hpd) (hugepd_ok(hpd)) |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 383 | #define pgd_huge pgd_huge |
Aneesh Kumar K.V | e2b3d20 | 2013-04-28 09:37:30 +0000 | [diff] [blame] | 384 | int pgd_huge(pgd_t pgd); |
David Gibson | a4fe3ce | 2009-10-26 19:24:31 +0000 | [diff] [blame] | 385 | #else /* CONFIG_HUGETLB_PAGE */ |
| 386 | #define is_hugepd(pdep) 0 |
Aneesh Kumar K.V | e2b3d20 | 2013-04-28 09:37:30 +0000 | [diff] [blame] | 387 | #define pgd_huge(pgd) 0 |
David Gibson | a4fe3ce | 2009-10-26 19:24:31 +0000 | [diff] [blame] | 388 | #endif /* CONFIG_HUGETLB_PAGE */ |
Aneesh Kumar K.V | b30e759 | 2014-11-05 21:57:41 +0530 | [diff] [blame] | 389 | #define __hugepd(x) ((hugepd_t) { (x) }) |
David Gibson | a4fe3ce | 2009-10-26 19:24:31 +0000 | [diff] [blame] | 390 | |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 391 | struct page; |
| 392 | extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg); |
| 393 | extern void copy_user_page(void *to, void *from, unsigned long vaddr, |
| 394 | struct page *p); |
| 395 | extern int page_is_ram(unsigned long pfn); |
sukadev@linux.vnet.ibm.com | 1d54cf2 | 2011-08-30 09:19:17 +0000 | [diff] [blame] | 396 | extern int devmem_is_allowed(unsigned long pfn); |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 397 | |
Robert Jennings | 14f966e | 2009-04-15 05:55:32 +0000 | [diff] [blame] | 398 | #ifdef CONFIG_PPC_SMLPAR |
| 399 | void arch_free_page(struct page *page, int order); |
| 400 | #define HAVE_ARCH_FREE_PAGE |
| 401 | #endif |
| 402 | |
Benjamin Herrenschmidt | a5bba93 | 2006-05-30 13:51:37 +1000 | [diff] [blame] | 403 | struct vm_area_struct; |
Benjamin Herrenschmidt | a5bba93 | 2006-05-30 13:51:37 +1000 | [diff] [blame] | 404 | |
Alistair Popple | ecb35c3 | 2013-10-17 17:08:28 +1100 | [diff] [blame] | 405 | #if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC64) |
Aneesh Kumar K.V | 5c1f6ee | 2013-04-28 09:37:33 +0000 | [diff] [blame] | 406 | typedef pte_t *pgtable_t; |
| 407 | #else |
Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 408 | typedef struct page *pgtable_t; |
Aneesh Kumar K.V | 5c1f6ee | 2013-04-28 09:37:33 +0000 | [diff] [blame] | 409 | #endif |
Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 410 | |
KAMEZAWA Hiroyuki | 659e350 | 2006-03-27 01:15:35 -0800 | [diff] [blame] | 411 | #include <asm-generic/memory_model.h> |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 412 | #endif /* __ASSEMBLY__ */ |
| 413 | |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 414 | #endif /* _ASM_POWERPC_PAGE_H */ |