Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 1 | #ifndef _ASM_POWERPC_PAGE_H |
| 2 | #define _ASM_POWERPC_PAGE_H |
| 3 | |
| 4 | /* |
| 5 | * Copyright (C) 2001,2005 IBM Corporation. |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or |
| 8 | * modify it under the terms of the GNU General Public License |
| 9 | * as published by the Free Software Foundation; either version |
| 10 | * 2 of the License, or (at your option) any later version. |
| 11 | */ |
| 12 | |
Kumar Gala | a3ba68f | 2008-10-20 03:16:55 +0000 | [diff] [blame] | 13 | #ifndef __ASSEMBLY__ |
| 14 | #include <linux/types.h> |
Stephen Rothwell | 463baa8 | 2008-10-16 20:29:07 +1100 | [diff] [blame] | 15 | #else |
| 16 | #include <asm/types.h> |
Kumar Gala | a3ba68f | 2008-10-20 03:16:55 +0000 | [diff] [blame] | 17 | #endif |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 18 | #include <asm/asm-compat.h> |
Michael Ellerman | 4731041 | 2006-05-17 18:00:49 +1000 | [diff] [blame] | 19 | #include <asm/kdump.h> |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 20 | |
| 21 | /* |
Yuri Tikhonov | e124012 | 2009-01-29 01:40:44 +0000 | [diff] [blame] | 22 | * On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages |
Ilya Yanok | ca9153a | 2008-12-11 04:55:41 +0300 | [diff] [blame] | 23 | * on PPC44x). For PPC64 we support either 4K or 64K software |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 24 | * page size. When using 64K pages however, whether we are really supporting |
| 25 | * 64K pages in HW or not is irrelevant to those definitions. |
| 26 | */ |
Yuri Tikhonov | e124012 | 2009-01-29 01:40:44 +0000 | [diff] [blame] | 27 | #if defined(CONFIG_PPC_256K_PAGES) |
| 28 | #define PAGE_SHIFT 18 |
| 29 | #elif defined(CONFIG_PPC_64K_PAGES) |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 30 | #define PAGE_SHIFT 16 |
Ilya Yanok | ca9153a | 2008-12-11 04:55:41 +0300 | [diff] [blame] | 31 | #elif defined(CONFIG_PPC_16K_PAGES) |
| 32 | #define PAGE_SHIFT 14 |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 33 | #else |
| 34 | #define PAGE_SHIFT 12 |
| 35 | #endif |
| 36 | |
| 37 | #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT) |
| 38 | |
| 39 | /* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */ |
| 40 | #define __HAVE_ARCH_GATE_AREA 1 |
| 41 | |
| 42 | /* |
| 43 | * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we |
| 44 | * assign PAGE_MASK to a larger type it gets extended the way we want |
| 45 | * (i.e. with 1s in the high bits) |
| 46 | */ |
| 47 | #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) |
| 48 | |
Michael Ellerman | b5666f7 | 2005-12-05 10:24:33 -0600 | [diff] [blame] | 49 | /* |
| 50 | * KERNELBASE is the virtual address of the start of the kernel, it's often |
| 51 | * the same as PAGE_OFFSET, but _might not be_. |
| 52 | * |
| 53 | * The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET. |
| 54 | * |
Kumar Gala | 37dd2ba | 2008-04-22 04:22:34 +1000 | [diff] [blame] | 55 | * PAGE_OFFSET is the virtual address of the start of lowmem. |
| 56 | * |
| 57 | * PHYSICAL_START is the physical address of the start of the kernel. |
| 58 | * |
| 59 | * MEMORY_START is the physical address of the start of lowmem. |
| 60 | * |
| 61 | * KERNELBASE, PAGE_OFFSET, and PHYSICAL_START are all configurable on |
| 62 | * ppc32 and based on how they are set we determine MEMORY_START. |
| 63 | * |
| 64 | * For the linear mapping the following equation should be true: |
| 65 | * KERNELBASE - PAGE_OFFSET = PHYSICAL_START - MEMORY_START |
| 66 | * |
| 67 | * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START |
| 68 | * |
| 69 | * There are two was to determine a physical address from a virtual one: |
| 70 | * va = pa + PAGE_OFFSET - MEMORY_START |
| 71 | * va = pa + KERNELBASE - PHYSICAL_START |
Michael Ellerman | b5666f7 | 2005-12-05 10:24:33 -0600 | [diff] [blame] | 72 | * |
| 73 | * If you want to know something's offset from the start of the kernel you |
| 74 | * should subtract KERNELBASE. |
| 75 | * |
| 76 | * If you want to test if something's a kernel address, use is_kernel_addr(). |
| 77 | */ |
Michael Ellerman | 398ab1f | 2005-12-04 18:39:23 +1100 | [diff] [blame] | 78 | |
Kumar Gala | 37dd2ba | 2008-04-22 04:22:34 +1000 | [diff] [blame] | 79 | #define KERNELBASE ASM_CONST(CONFIG_KERNEL_START) |
| 80 | #define PAGE_OFFSET ASM_CONST(CONFIG_PAGE_OFFSET) |
| 81 | #define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START)) |
| 82 | |
Paul Mackerras | 549e815 | 2008-08-30 11:43:47 +1000 | [diff] [blame] | 83 | #if defined(CONFIG_RELOCATABLE) |
Kumar Gala | 37dd2ba | 2008-04-22 04:22:34 +1000 | [diff] [blame] | 84 | #ifndef __ASSEMBLY__ |
Kumar Gala | a3ba68f | 2008-10-20 03:16:55 +0000 | [diff] [blame] | 85 | |
Kumar Gala | 37dd2ba | 2008-04-22 04:22:34 +1000 | [diff] [blame] | 86 | extern phys_addr_t memstart_addr; |
| 87 | extern phys_addr_t kernstart_addr; |
| 88 | #endif |
| 89 | #define PHYSICAL_START kernstart_addr |
Kumar Gala | 37dd2ba | 2008-04-22 04:22:34 +1000 | [diff] [blame] | 90 | #else |
| 91 | #define PHYSICAL_START ASM_CONST(CONFIG_PHYSICAL_START) |
Paul Mackerras | 549e815 | 2008-08-30 11:43:47 +1000 | [diff] [blame] | 92 | #endif |
| 93 | |
| 94 | #ifdef CONFIG_PPC64 |
| 95 | #define MEMORY_START 0UL |
| 96 | #elif defined(CONFIG_RELOCATABLE) |
| 97 | #define MEMORY_START memstart_addr |
| 98 | #else |
Kumar Gala | 37dd2ba | 2008-04-22 04:22:34 +1000 | [diff] [blame] | 99 | #define MEMORY_START (PHYSICAL_START + PAGE_OFFSET - KERNELBASE) |
| 100 | #endif |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 101 | |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 102 | #ifdef CONFIG_FLATMEM |
Kumar Gala | 37dd2ba | 2008-04-22 04:22:34 +1000 | [diff] [blame] | 103 | #define ARCH_PFN_OFFSET (MEMORY_START >> PAGE_SHIFT) |
| 104 | #define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < (ARCH_PFN_OFFSET + max_mapnr)) |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 105 | #endif |
| 106 | |
| 107 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
| 108 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
| 109 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) |
| 110 | |
Paul Mackerras | 549e815 | 2008-08-30 11:43:47 +1000 | [diff] [blame] | 111 | #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - MEMORY_START)) |
| 112 | #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START) |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 113 | |
| 114 | /* |
| 115 | * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI, |
| 116 | * and needs to be executable. This means the whole heap ends |
| 117 | * up being executable. |
| 118 | */ |
| 119 | #define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ |
| 120 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
| 121 | |
| 122 | #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ |
| 123 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
| 124 | |
| 125 | #ifdef __powerpc64__ |
| 126 | #include <asm/page_64.h> |
| 127 | #else |
| 128 | #include <asm/page_32.h> |
| 129 | #endif |
| 130 | |
| 131 | /* align addr on a size boundary - adjust address up/down if needed */ |
| 132 | #define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1))) |
| 133 | #define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1))) |
| 134 | |
| 135 | /* align addr on a size boundary - adjust address up if needed */ |
| 136 | #define _ALIGN(addr,size) _ALIGN_UP(addr,size) |
| 137 | |
Michael Ellerman | 51fae6de | 2005-12-04 18:39:15 +1100 | [diff] [blame] | 138 | /* |
| 139 | * Don't compare things with KERNELBASE or PAGE_OFFSET to test for |
| 140 | * "kernelness", use is_kernel_addr() - it should do what you want. |
| 141 | */ |
Benjamin Herrenschmidt | 57e2a99 | 2009-07-28 11:59:34 +1000 | [diff] [blame] | 142 | #ifdef CONFIG_PPC_BOOK3E_64 |
| 143 | #define is_kernel_addr(x) ((x) >= 0x8000000000000000ul) |
| 144 | #else |
Michael Ellerman | 51fae6de | 2005-12-04 18:39:15 +1100 | [diff] [blame] | 145 | #define is_kernel_addr(x) ((x) >= PAGE_OFFSET) |
Benjamin Herrenschmidt | 57e2a99 | 2009-07-28 11:59:34 +1000 | [diff] [blame] | 146 | #endif |
Michael Ellerman | 51fae6de | 2005-12-04 18:39:15 +1100 | [diff] [blame] | 147 | |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 148 | #ifndef __ASSEMBLY__ |
| 149 | |
| 150 | #undef STRICT_MM_TYPECHECKS |
| 151 | |
| 152 | #ifdef STRICT_MM_TYPECHECKS |
| 153 | /* These are used to make use of C type-checking. */ |
| 154 | |
| 155 | /* PTE level */ |
| 156 | typedef struct { pte_basic_t pte; } pte_t; |
| 157 | #define pte_val(x) ((x).pte) |
| 158 | #define __pte(x) ((pte_t) { (x) }) |
| 159 | |
| 160 | /* 64k pages additionally define a bigger "real PTE" type that gathers |
| 161 | * the "second half" part of the PTE for pseudo 64k pages |
| 162 | */ |
Ilya Yanok | ca9153a | 2008-12-11 04:55:41 +0300 | [diff] [blame] | 163 | #if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64) |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 164 | typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; |
| 165 | #else |
| 166 | typedef struct { pte_t pte; } real_pte_t; |
| 167 | #endif |
| 168 | |
| 169 | /* PMD level */ |
David Gibson | d1953c8 | 2007-05-08 12:46:49 +1000 | [diff] [blame] | 170 | #ifdef CONFIG_PPC64 |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 171 | typedef struct { unsigned long pmd; } pmd_t; |
| 172 | #define pmd_val(x) ((x).pmd) |
| 173 | #define __pmd(x) ((pmd_t) { (x) }) |
| 174 | |
| 175 | /* PUD level exusts only on 4k pages */ |
David Gibson | d1953c8 | 2007-05-08 12:46:49 +1000 | [diff] [blame] | 176 | #ifndef CONFIG_PPC_64K_PAGES |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 177 | typedef struct { unsigned long pud; } pud_t; |
| 178 | #define pud_val(x) ((x).pud) |
| 179 | #define __pud(x) ((pud_t) { (x) }) |
David Gibson | d1953c8 | 2007-05-08 12:46:49 +1000 | [diff] [blame] | 180 | #endif /* !CONFIG_PPC_64K_PAGES */ |
| 181 | #endif /* CONFIG_PPC64 */ |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 182 | |
| 183 | /* PGD level */ |
| 184 | typedef struct { unsigned long pgd; } pgd_t; |
| 185 | #define pgd_val(x) ((x).pgd) |
| 186 | #define __pgd(x) ((pgd_t) { (x) }) |
| 187 | |
| 188 | /* Page protection bits */ |
| 189 | typedef struct { unsigned long pgprot; } pgprot_t; |
| 190 | #define pgprot_val(x) ((x).pgprot) |
| 191 | #define __pgprot(x) ((pgprot_t) { (x) }) |
| 192 | |
| 193 | #else |
| 194 | |
| 195 | /* |
| 196 | * .. while these make it easier on the compiler |
| 197 | */ |
| 198 | |
| 199 | typedef pte_basic_t pte_t; |
| 200 | #define pte_val(x) (x) |
| 201 | #define __pte(x) (x) |
| 202 | |
Ilya Yanok | ca9153a | 2008-12-11 04:55:41 +0300 | [diff] [blame] | 203 | #if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64) |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 204 | typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; |
| 205 | #else |
Ilya Yanok | ca9153a | 2008-12-11 04:55:41 +0300 | [diff] [blame] | 206 | typedef pte_t real_pte_t; |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 207 | #endif |
| 208 | |
| 209 | |
David Gibson | d1953c8 | 2007-05-08 12:46:49 +1000 | [diff] [blame] | 210 | #ifdef CONFIG_PPC64 |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 211 | typedef unsigned long pmd_t; |
| 212 | #define pmd_val(x) (x) |
| 213 | #define __pmd(x) (x) |
| 214 | |
David Gibson | d1953c8 | 2007-05-08 12:46:49 +1000 | [diff] [blame] | 215 | #ifndef CONFIG_PPC_64K_PAGES |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 216 | typedef unsigned long pud_t; |
| 217 | #define pud_val(x) (x) |
| 218 | #define __pud(x) (x) |
David Gibson | d1953c8 | 2007-05-08 12:46:49 +1000 | [diff] [blame] | 219 | #endif /* !CONFIG_PPC_64K_PAGES */ |
| 220 | #endif /* CONFIG_PPC64 */ |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 221 | |
| 222 | typedef unsigned long pgd_t; |
| 223 | #define pgd_val(x) (x) |
| 224 | #define pgprot_val(x) (x) |
| 225 | |
| 226 | typedef unsigned long pgprot_t; |
| 227 | #define __pgd(x) (x) |
| 228 | #define __pgprot(x) (x) |
| 229 | |
| 230 | #endif |
| 231 | |
David Gibson | a4fe3ce | 2009-10-26 19:24:31 +0000 | [diff] [blame] | 232 | typedef struct { signed long pd; } hugepd_t; |
| 233 | #define HUGEPD_SHIFT_MASK 0x3f |
| 234 | |
| 235 | #ifdef CONFIG_HUGETLB_PAGE |
| 236 | static inline int hugepd_ok(hugepd_t hpd) |
| 237 | { |
| 238 | return (hpd.pd > 0); |
| 239 | } |
| 240 | |
| 241 | #define is_hugepd(pdep) (hugepd_ok(*((hugepd_t *)(pdep)))) |
| 242 | #else /* CONFIG_HUGETLB_PAGE */ |
| 243 | #define is_hugepd(pdep) 0 |
| 244 | #endif /* CONFIG_HUGETLB_PAGE */ |
| 245 | |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 246 | struct page; |
| 247 | extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg); |
| 248 | extern void copy_user_page(void *to, void *from, unsigned long vaddr, |
| 249 | struct page *p); |
| 250 | extern int page_is_ram(unsigned long pfn); |
| 251 | |
Robert Jennings | 14f966e | 2009-04-15 05:55:32 +0000 | [diff] [blame] | 252 | #ifdef CONFIG_PPC_SMLPAR |
| 253 | void arch_free_page(struct page *page, int order); |
| 254 | #define HAVE_ARCH_FREE_PAGE |
| 255 | #endif |
| 256 | |
Benjamin Herrenschmidt | a5bba93 | 2006-05-30 13:51:37 +1000 | [diff] [blame] | 257 | struct vm_area_struct; |
Benjamin Herrenschmidt | a5bba93 | 2006-05-30 13:51:37 +1000 | [diff] [blame] | 258 | |
Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 259 | typedef struct page *pgtable_t; |
| 260 | |
KAMEZAWA Hiroyuki | 659e350 | 2006-03-27 01:15:35 -0800 | [diff] [blame] | 261 | #include <asm-generic/memory_model.h> |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 262 | #endif /* __ASSEMBLY__ */ |
| 263 | |
Michael Ellerman | 5cd16ee | 2005-11-11 14:25:24 +1100 | [diff] [blame] | 264 | #endif /* _ASM_POWERPC_PAGE_H */ |