Paul Mackerras | 047ea78 | 2005-11-19 20:17:32 +1100 | [diff] [blame] | 1 | #ifndef _ASM_POWERPC_PGTABLE_H |
| 2 | #define _ASM_POWERPC_PGTABLE_H |
Arnd Bergmann | 88ced03 | 2005-12-16 22:43:46 +0100 | [diff] [blame] | 3 | #ifdef __KERNEL__ |
Paul Mackerras | 047ea78 | 2005-11-19 20:17:32 +1100 | [diff] [blame] | 4 | |
David Gibson | 9c709f3 | 2007-06-13 14:52:56 +1000 | [diff] [blame] | 5 | #ifndef __ASSEMBLY__ |
Aneesh Kumar K.V | c34a51c | 2013-11-18 14:58:13 +0530 | [diff] [blame] | 6 | #include <linux/mmdebug.h> |
David Gibson | 9c709f3 | 2007-06-13 14:52:56 +1000 | [diff] [blame] | 7 | #include <asm/processor.h> /* For TASK_SIZE */ |
| 8 | #include <asm/mmu.h> |
| 9 | #include <asm/page.h> |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 10 | |
David Gibson | 9c709f3 | 2007-06-13 14:52:56 +1000 | [diff] [blame] | 11 | struct mm_struct; |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 12 | |
David Gibson | 9c709f3 | 2007-06-13 14:52:56 +1000 | [diff] [blame] | 13 | #endif /* !__ASSEMBLY__ */ |
| 14 | |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 15 | #if defined(CONFIG_PPC64) |
| 16 | # include <asm/pgtable-ppc64.h> |
Paul Mackerras | 047ea78 | 2005-11-19 20:17:32 +1100 | [diff] [blame] | 17 | #else |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 18 | # include <asm/pgtable-ppc32.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #endif |
| 20 | |
Aneesh Kumar K.V | cc3665a | 2013-04-28 09:37:27 +0000 | [diff] [blame] | 21 | /* |
| 22 | * We save the slot number & secondary bit in the second half of the |
| 23 | * PTE page. We use the 8 bytes per each pte entry. |
| 24 | */ |
| 25 | #define PTE_PAGE_HIDX_OFFSET (PTRS_PER_PTE * 8) |
| 26 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #ifndef __ASSEMBLY__ |
Benjamin Herrenschmidt | 64b3d0e | 2008-12-18 19:13:51 +0000 | [diff] [blame] | 28 | |
Aneesh Kumar K.V | 78f1dbd | 2012-09-10 02:52:57 +0000 | [diff] [blame] | 29 | #include <asm/tlbflush.h> |
| 30 | |
Benjamin Herrenschmidt | 7108700 | 2009-03-19 19:34:09 +0000 | [diff] [blame] | 31 | /* Generic accessors to PTE bits */ |
| 32 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } |
| 33 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } |
| 34 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } |
| 35 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } |
| 36 | static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } |
Benjamin Herrenschmidt | 7108700 | 2009-03-19 19:34:09 +0000 | [diff] [blame] | 37 | static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } |
| 38 | static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } |
| 39 | |
Aneesh Kumar K.V | c34a51c | 2013-11-18 14:58:13 +0530 | [diff] [blame] | 40 | #ifdef CONFIG_NUMA_BALANCING |
| 41 | |
| 42 | static inline int pte_present(pte_t pte) |
| 43 | { |
| 44 | return pte_val(pte) & (_PAGE_PRESENT | _PAGE_NUMA); |
| 45 | } |
| 46 | |
| 47 | #define pte_numa pte_numa |
| 48 | static inline int pte_numa(pte_t pte) |
| 49 | { |
| 50 | return (pte_val(pte) & |
| 51 | (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA; |
| 52 | } |
| 53 | |
| 54 | #define pte_mknonnuma pte_mknonnuma |
| 55 | static inline pte_t pte_mknonnuma(pte_t pte) |
| 56 | { |
| 57 | pte_val(pte) &= ~_PAGE_NUMA; |
| 58 | pte_val(pte) |= _PAGE_PRESENT | _PAGE_ACCESSED; |
| 59 | return pte; |
| 60 | } |
| 61 | |
| 62 | #define pte_mknuma pte_mknuma |
| 63 | static inline pte_t pte_mknuma(pte_t pte) |
| 64 | { |
| 65 | /* |
| 66 | * We should not set _PAGE_NUMA on non present ptes. Also clear the |
| 67 | * present bit so that hash_page will return 1 and we collect this |
| 68 | * as numa fault. |
| 69 | */ |
| 70 | if (pte_present(pte)) { |
| 71 | pte_val(pte) |= _PAGE_NUMA; |
| 72 | pte_val(pte) &= ~_PAGE_PRESENT; |
| 73 | } else |
| 74 | VM_BUG_ON(1); |
| 75 | return pte; |
| 76 | } |
| 77 | |
| 78 | #define pmd_numa pmd_numa |
| 79 | static inline int pmd_numa(pmd_t pmd) |
| 80 | { |
| 81 | return pte_numa(pmd_pte(pmd)); |
| 82 | } |
| 83 | |
| 84 | #define pmd_mknonnuma pmd_mknonnuma |
| 85 | static inline pmd_t pmd_mknonnuma(pmd_t pmd) |
| 86 | { |
| 87 | return pte_pmd(pte_mknonnuma(pmd_pte(pmd))); |
| 88 | } |
| 89 | |
| 90 | #define pmd_mknuma pmd_mknuma |
| 91 | static inline pmd_t pmd_mknuma(pmd_t pmd) |
| 92 | { |
| 93 | return pte_pmd(pte_mknuma(pmd_pte(pmd))); |
| 94 | } |
| 95 | |
| 96 | # else |
| 97 | |
| 98 | static inline int pte_present(pte_t pte) |
| 99 | { |
| 100 | return pte_val(pte) & _PAGE_PRESENT; |
| 101 | } |
| 102 | #endif /* CONFIG_NUMA_BALANCING */ |
| 103 | |
Benjamin Herrenschmidt | 7108700 | 2009-03-19 19:34:09 +0000 | [diff] [blame] | 104 | /* Conversion functions: convert a page and protection to a page entry, |
| 105 | * and a page entry and page directory to the page they refer to. |
| 106 | * |
| 107 | * Even if PTEs can be unsigned long long, a PFN is always an unsigned |
| 108 | * long for now. |
| 109 | */ |
| 110 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) { |
| 111 | return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | |
| 112 | pgprot_val(pgprot)); } |
| 113 | static inline unsigned long pte_pfn(pte_t pte) { |
| 114 | return pte_val(pte) >> PTE_RPN_SHIFT; } |
| 115 | |
| 116 | /* Keep these as a macros to avoid include dependency mess */ |
| 117 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
| 118 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) |
| 119 | |
| 120 | /* Generic modifiers for PTE bits */ |
| 121 | static inline pte_t pte_wrprotect(pte_t pte) { |
| 122 | pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; } |
| 123 | static inline pte_t pte_mkclean(pte_t pte) { |
| 124 | pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; } |
| 125 | static inline pte_t pte_mkold(pte_t pte) { |
| 126 | pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } |
| 127 | static inline pte_t pte_mkwrite(pte_t pte) { |
| 128 | pte_val(pte) |= _PAGE_RW; return pte; } |
| 129 | static inline pte_t pte_mkdirty(pte_t pte) { |
| 130 | pte_val(pte) |= _PAGE_DIRTY; return pte; } |
| 131 | static inline pte_t pte_mkyoung(pte_t pte) { |
| 132 | pte_val(pte) |= _PAGE_ACCESSED; return pte; } |
| 133 | static inline pte_t pte_mkspecial(pte_t pte) { |
| 134 | pte_val(pte) |= _PAGE_SPECIAL; return pte; } |
| 135 | static inline pte_t pte_mkhuge(pte_t pte) { |
| 136 | return pte; } |
| 137 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
| 138 | { |
| 139 | pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); |
| 140 | return pte; |
| 141 | } |
| 142 | |
| 143 | |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 144 | /* Insert a PTE, top-level function is out of line. It uses an inline |
| 145 | * low level function in the respective pgtable-* files |
| 146 | */ |
| 147 | extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, |
| 148 | pte_t pte); |
| 149 | |
| 150 | /* This low level function performs the actual PTE insertion |
| 151 | * Setting the PTE depends on the MMU type and other factors. It's |
| 152 | * an horrible mess that I'm not going to try to clean up now but |
| 153 | * I'm keeping it in one place rather than spread around |
| 154 | */ |
| 155 | static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, |
| 156 | pte_t *ptep, pte_t pte, int percpu) |
| 157 | { |
| 158 | #if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) |
| 159 | /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the |
| 160 | * helper pte_update() which does an atomic update. We need to do that |
| 161 | * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a |
| 162 | * per-CPU PTE such as a kmap_atomic, we do a simple update preserving |
| 163 | * the hash bits instead (ie, same as the non-SMP case) |
| 164 | */ |
| 165 | if (percpu) |
| 166 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) |
| 167 | | (pte_val(pte) & ~_PAGE_HASHPTE)); |
| 168 | else |
| 169 | pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); |
| 170 | |
Paul Mackerras | 1660e9d | 2009-08-17 14:36:32 +1000 | [diff] [blame] | 171 | #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) |
| 172 | /* Second case is 32-bit with 64-bit PTE. In this case, we |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 173 | * can just store as long as we do the two halves in the right order |
| 174 | * with a barrier in between. This is possible because we take care, |
| 175 | * in the hash code, to pre-invalidate if the PTE was already hashed, |
| 176 | * which synchronizes us with any concurrent invalidation. |
| 177 | * In the percpu case, we also fallback to the simple update preserving |
| 178 | * the hash bits |
| 179 | */ |
| 180 | if (percpu) { |
| 181 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) |
| 182 | | (pte_val(pte) & ~_PAGE_HASHPTE)); |
| 183 | return; |
| 184 | } |
| 185 | #if _PAGE_HASHPTE != 0 |
| 186 | if (pte_val(*ptep) & _PAGE_HASHPTE) |
| 187 | flush_hash_entry(mm, ptep, addr); |
| 188 | #endif |
| 189 | __asm__ __volatile__("\ |
| 190 | stw%U0%X0 %2,%0\n\ |
| 191 | eieio\n\ |
| 192 | stw%U0%X0 %L2,%1" |
| 193 | : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) |
| 194 | : "r" (pte) : "memory"); |
| 195 | |
| 196 | #elif defined(CONFIG_PPC_STD_MMU_32) |
| 197 | /* Third case is 32-bit hash table in UP mode, we need to preserve |
| 198 | * the _PAGE_HASHPTE bit since we may not have invalidated the previous |
| 199 | * translation in the hash yet (done in a subsequent flush_tlb_xxx()) |
| 200 | * and see we need to keep track that this PTE needs invalidating |
| 201 | */ |
| 202 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) |
| 203 | | (pte_val(pte) & ~_PAGE_HASHPTE)); |
| 204 | |
| 205 | #else |
| 206 | /* Anything else just stores the PTE normally. That covers all 64-bit |
Paul Mackerras | 1660e9d | 2009-08-17 14:36:32 +1000 | [diff] [blame] | 207 | * cases, and 32-bit non-hash with 32-bit PTEs. |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 208 | */ |
| 209 | *ptep = pte; |
| 210 | #endif |
| 211 | } |
| 212 | |
| 213 | |
| 214 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
| 215 | extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, |
| 216 | pte_t *ptep, pte_t entry, int dirty); |
| 217 | |
Benjamin Herrenschmidt | 64b3d0e | 2008-12-18 19:13:51 +0000 | [diff] [blame] | 218 | /* |
| 219 | * Macro to mark a page protection value as "uncacheable". |
| 220 | */ |
| 221 | |
| 222 | #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ |
| 223 | _PAGE_WRITETHRU) |
| 224 | |
| 225 | #define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ |
| 226 | _PAGE_NO_CACHE | _PAGE_GUARDED)) |
| 227 | |
| 228 | #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ |
| 229 | _PAGE_NO_CACHE)) |
| 230 | |
| 231 | #define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ |
| 232 | _PAGE_COHERENT)) |
| 233 | |
| 234 | #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ |
| 235 | _PAGE_COHERENT | _PAGE_WRITETHRU)) |
| 236 | |
Geoff Thorpe | 09c188c | 2011-10-27 02:58:45 +0000 | [diff] [blame] | 237 | #define pgprot_cached_noncoherent(prot) \ |
| 238 | (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL)) |
| 239 | |
Anton Blanchard | fe3cc0d | 2011-02-28 20:00:47 +0000 | [diff] [blame] | 240 | #define pgprot_writecombine pgprot_noncached_wc |
Benjamin Herrenschmidt | 64b3d0e | 2008-12-18 19:13:51 +0000 | [diff] [blame] | 241 | |
| 242 | struct file; |
| 243 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
| 244 | unsigned long size, pgprot_t vma_prot); |
| 245 | #define __HAVE_PHYS_MEM_ACCESS_PROT |
| 246 | |
David Gibson | 9c709f3 | 2007-06-13 14:52:56 +1000 | [diff] [blame] | 247 | /* |
| 248 | * ZERO_PAGE is a global shared page that is always zero: used |
| 249 | * for zero-mapped memory areas etc.. |
| 250 | */ |
| 251 | extern unsigned long empty_zero_page[]; |
| 252 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) |
| 253 | |
| 254 | extern pgd_t swapper_pg_dir[]; |
| 255 | |
| 256 | extern void paging_init(void); |
| 257 | |
| 258 | /* |
| 259 | * kern_addr_valid is intended to indicate whether an address is a valid |
| 260 | * kernel address. Most 32-bit archs define it as always true (like this) |
| 261 | * but most 64-bit archs actually perform a test. What should we do here? |
| 262 | */ |
| 263 | #define kern_addr_valid(addr) (1) |
| 264 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | #include <asm-generic/pgtable.h> |
Benjamin Herrenschmidt | 1e3519f | 2008-07-25 16:21:11 +1000 | [diff] [blame] | 266 | |
| 267 | |
| 268 | /* |
| 269 | * This gets called at the end of handling a page fault, when |
| 270 | * the kernel has put a new PTE into the page table for the process. |
| 271 | * We use it to ensure coherency between the i-cache and d-cache |
| 272 | * for the page which has just been mapped in. |
| 273 | * On machines which use an MMU hash table, we use this to put a |
| 274 | * corresponding HPTE into the hash table ahead of time, instead of |
| 275 | * waiting for the inevitable extra hash-table miss exception. |
| 276 | */ |
Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 277 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); |
Benjamin Herrenschmidt | 1e3519f | 2008-07-25 16:21:11 +1000 | [diff] [blame] | 278 | |
David Gibson | a4fe3ce | 2009-10-26 19:24:31 +0000 | [diff] [blame] | 279 | extern int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, unsigned long addr, |
| 280 | unsigned long end, int write, struct page **pages, int *nr); |
| 281 | |
Aneesh Kumar K.V | e2b3d20 | 2013-04-28 09:37:30 +0000 | [diff] [blame] | 282 | extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, |
| 283 | unsigned long end, int write, struct page **pages, int *nr); |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 284 | #ifndef CONFIG_TRANSPARENT_HUGEPAGE |
| 285 | #define pmd_large(pmd) 0 |
| 286 | #define has_transparent_hugepage() 0 |
| 287 | #endif |
Aneesh Kumar K.V | 2940999 | 2013-06-20 14:30:16 +0530 | [diff] [blame] | 288 | pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, |
| 289 | unsigned *shift); |
Bharat Bhushan | f5e3fe0 | 2013-11-15 11:01:15 +0530 | [diff] [blame] | 290 | |
| 291 | static inline pte_t *lookup_linux_ptep(pgd_t *pgdir, unsigned long hva, |
| 292 | unsigned long *pte_sizep) |
| 293 | { |
| 294 | pte_t *ptep; |
| 295 | unsigned long ps = *pte_sizep; |
| 296 | unsigned int shift; |
| 297 | |
| 298 | ptep = find_linux_pte_or_hugepte(pgdir, hva, &shift); |
| 299 | if (!ptep) |
| 300 | return NULL; |
| 301 | if (shift) |
| 302 | *pte_sizep = 1ul << shift; |
| 303 | else |
| 304 | *pte_sizep = PAGE_SIZE; |
| 305 | |
| 306 | if (ps > *pte_sizep) |
| 307 | return NULL; |
| 308 | |
| 309 | return ptep; |
| 310 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | #endif /* __ASSEMBLY__ */ |
| 312 | |
Arnd Bergmann | 88ced03 | 2005-12-16 22:43:46 +0100 | [diff] [blame] | 313 | #endif /* __KERNEL__ */ |
Paul Mackerras | 047ea78 | 2005-11-19 20:17:32 +1100 | [diff] [blame] | 314 | #endif /* _ASM_POWERPC_PGTABLE_H */ |