Paul Mackerras | 047ea78 | 2005-11-19 20:17:32 +1100 | [diff] [blame] | 1 | #ifndef _ASM_POWERPC_PGTABLE_H |
| 2 | #define _ASM_POWERPC_PGTABLE_H |
Arnd Bergmann | 88ced03 | 2005-12-16 22:43:46 +0100 | [diff] [blame] | 3 | #ifdef __KERNEL__ |
Paul Mackerras | 047ea78 | 2005-11-19 20:17:32 +1100 | [diff] [blame] | 4 | |
David Gibson | 9c709f3 | 2007-06-13 14:52:56 +1000 | [diff] [blame] | 5 | #ifndef __ASSEMBLY__ |
| 6 | #include <asm/processor.h> /* For TASK_SIZE */ |
| 7 | #include <asm/mmu.h> |
| 8 | #include <asm/page.h> |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 9 | |
David Gibson | 9c709f3 | 2007-06-13 14:52:56 +1000 | [diff] [blame] | 10 | struct mm_struct; |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 11 | |
| 12 | #ifdef CONFIG_DEBUG_VM |
| 13 | extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr); |
| 14 | #else /* CONFIG_DEBUG_VM */ |
| 15 | static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr) |
| 16 | { |
| 17 | } |
| 18 | #endif /* !CONFIG_DEBUG_VM */ |
| 19 | |
David Gibson | 9c709f3 | 2007-06-13 14:52:56 +1000 | [diff] [blame] | 20 | #endif /* !__ASSEMBLY__ */ |
| 21 | |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 22 | #if defined(CONFIG_PPC64) |
| 23 | # include <asm/pgtable-ppc64.h> |
Paul Mackerras | 047ea78 | 2005-11-19 20:17:32 +1100 | [diff] [blame] | 24 | #else |
David Gibson | f88df14 | 2007-04-30 16:30:56 +1000 | [diff] [blame] | 25 | # include <asm/pgtable-ppc32.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #endif |
| 27 | |
| 28 | #ifndef __ASSEMBLY__ |
Benjamin Herrenschmidt | 64b3d0e | 2008-12-18 19:13:51 +0000 | [diff] [blame] | 29 | |
Benjamin Herrenschmidt | 7108700 | 2009-03-19 19:34:09 +0000 | [diff] [blame] | 30 | /* Generic accessors to PTE bits */ |
| 31 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } |
| 32 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } |
| 33 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } |
| 34 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } |
| 35 | static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } |
| 36 | static inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; } |
| 37 | static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } |
| 38 | static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } |
| 39 | |
| 40 | /* Conversion functions: convert a page and protection to a page entry, |
| 41 | * and a page entry and page directory to the page they refer to. |
| 42 | * |
| 43 | * Even if PTEs can be unsigned long long, a PFN is always an unsigned |
| 44 | * long for now. |
| 45 | */ |
| 46 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) { |
| 47 | return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | |
| 48 | pgprot_val(pgprot)); } |
| 49 | static inline unsigned long pte_pfn(pte_t pte) { |
| 50 | return pte_val(pte) >> PTE_RPN_SHIFT; } |
| 51 | |
| 52 | /* Keep these as a macros to avoid include dependency mess */ |
| 53 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
| 54 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) |
| 55 | |
| 56 | /* Generic modifiers for PTE bits */ |
| 57 | static inline pte_t pte_wrprotect(pte_t pte) { |
| 58 | pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; } |
| 59 | static inline pte_t pte_mkclean(pte_t pte) { |
| 60 | pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; } |
| 61 | static inline pte_t pte_mkold(pte_t pte) { |
| 62 | pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } |
| 63 | static inline pte_t pte_mkwrite(pte_t pte) { |
| 64 | pte_val(pte) |= _PAGE_RW; return pte; } |
| 65 | static inline pte_t pte_mkdirty(pte_t pte) { |
| 66 | pte_val(pte) |= _PAGE_DIRTY; return pte; } |
| 67 | static inline pte_t pte_mkyoung(pte_t pte) { |
| 68 | pte_val(pte) |= _PAGE_ACCESSED; return pte; } |
| 69 | static inline pte_t pte_mkspecial(pte_t pte) { |
| 70 | pte_val(pte) |= _PAGE_SPECIAL; return pte; } |
| 71 | static inline pte_t pte_mkhuge(pte_t pte) { |
| 72 | return pte; } |
| 73 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
| 74 | { |
| 75 | pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); |
| 76 | return pte; |
| 77 | } |
| 78 | |
| 79 | |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 80 | /* Insert a PTE, top-level function is out of line. It uses an inline |
| 81 | * low level function in the respective pgtable-* files |
| 82 | */ |
| 83 | extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, |
| 84 | pte_t pte); |
| 85 | |
| 86 | /* This low level function performs the actual PTE insertion |
| 87 | * Setting the PTE depends on the MMU type and other factors. It's |
| 88 | * an horrible mess that I'm not going to try to clean up now but |
| 89 | * I'm keeping it in one place rather than spread around |
| 90 | */ |
| 91 | static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, |
| 92 | pte_t *ptep, pte_t pte, int percpu) |
| 93 | { |
| 94 | #if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) |
| 95 | /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the |
| 96 | * helper pte_update() which does an atomic update. We need to do that |
| 97 | * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a |
| 98 | * per-CPU PTE such as a kmap_atomic, we do a simple update preserving |
| 99 | * the hash bits instead (ie, same as the non-SMP case) |
| 100 | */ |
| 101 | if (percpu) |
| 102 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) |
| 103 | | (pte_val(pte) & ~_PAGE_HASHPTE)); |
| 104 | else |
| 105 | pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); |
| 106 | |
Paul Mackerras | 1660e9d | 2009-08-17 14:36:32 +1000 | [diff] [blame] | 107 | #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) |
| 108 | /* Second case is 32-bit with 64-bit PTE. In this case, we |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 109 | * can just store as long as we do the two halves in the right order |
| 110 | * with a barrier in between. This is possible because we take care, |
| 111 | * in the hash code, to pre-invalidate if the PTE was already hashed, |
| 112 | * which synchronizes us with any concurrent invalidation. |
| 113 | * In the percpu case, we also fallback to the simple update preserving |
| 114 | * the hash bits |
| 115 | */ |
| 116 | if (percpu) { |
| 117 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) |
| 118 | | (pte_val(pte) & ~_PAGE_HASHPTE)); |
| 119 | return; |
| 120 | } |
| 121 | #if _PAGE_HASHPTE != 0 |
| 122 | if (pte_val(*ptep) & _PAGE_HASHPTE) |
| 123 | flush_hash_entry(mm, ptep, addr); |
| 124 | #endif |
| 125 | __asm__ __volatile__("\ |
| 126 | stw%U0%X0 %2,%0\n\ |
| 127 | eieio\n\ |
| 128 | stw%U0%X0 %L2,%1" |
| 129 | : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) |
| 130 | : "r" (pte) : "memory"); |
| 131 | |
| 132 | #elif defined(CONFIG_PPC_STD_MMU_32) |
| 133 | /* Third case is 32-bit hash table in UP mode, we need to preserve |
| 134 | * the _PAGE_HASHPTE bit since we may not have invalidated the previous |
| 135 | * translation in the hash yet (done in a subsequent flush_tlb_xxx()) |
| 136 | * and see we need to keep track that this PTE needs invalidating |
| 137 | */ |
| 138 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) |
| 139 | | (pte_val(pte) & ~_PAGE_HASHPTE)); |
| 140 | |
| 141 | #else |
| 142 | /* Anything else just stores the PTE normally. That covers all 64-bit |
Paul Mackerras | 1660e9d | 2009-08-17 14:36:32 +1000 | [diff] [blame] | 143 | * cases, and 32-bit non-hash with 32-bit PTEs. |
Benjamin Herrenschmidt | 8d30c14 | 2009-02-10 16:02:37 +0000 | [diff] [blame] | 144 | */ |
| 145 | *ptep = pte; |
| 146 | #endif |
| 147 | } |
| 148 | |
| 149 | |
| 150 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
| 151 | extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, |
| 152 | pte_t *ptep, pte_t entry, int dirty); |
| 153 | |
Benjamin Herrenschmidt | 64b3d0e | 2008-12-18 19:13:51 +0000 | [diff] [blame] | 154 | /* |
| 155 | * Macro to mark a page protection value as "uncacheable". |
| 156 | */ |
| 157 | |
| 158 | #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ |
| 159 | _PAGE_WRITETHRU) |
| 160 | |
| 161 | #define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ |
| 162 | _PAGE_NO_CACHE | _PAGE_GUARDED)) |
| 163 | |
| 164 | #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ |
| 165 | _PAGE_NO_CACHE)) |
| 166 | |
| 167 | #define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ |
| 168 | _PAGE_COHERENT)) |
| 169 | |
| 170 | #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ |
| 171 | _PAGE_COHERENT | _PAGE_WRITETHRU)) |
| 172 | |
| 173 | |
| 174 | struct file; |
| 175 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
| 176 | unsigned long size, pgprot_t vma_prot); |
| 177 | #define __HAVE_PHYS_MEM_ACCESS_PROT |
| 178 | |
David Gibson | 9c709f3 | 2007-06-13 14:52:56 +1000 | [diff] [blame] | 179 | /* |
| 180 | * ZERO_PAGE is a global shared page that is always zero: used |
| 181 | * for zero-mapped memory areas etc.. |
| 182 | */ |
| 183 | extern unsigned long empty_zero_page[]; |
| 184 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) |
| 185 | |
| 186 | extern pgd_t swapper_pg_dir[]; |
| 187 | |
| 188 | extern void paging_init(void); |
| 189 | |
| 190 | /* |
| 191 | * kern_addr_valid is intended to indicate whether an address is a valid |
| 192 | * kernel address. Most 32-bit archs define it as always true (like this) |
| 193 | * but most 64-bit archs actually perform a test. What should we do here? |
| 194 | */ |
| 195 | #define kern_addr_valid(addr) (1) |
| 196 | |
| 197 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
| 198 | remap_pfn_range(vma, vaddr, pfn, size, prot) |
| 199 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | #include <asm-generic/pgtable.h> |
Benjamin Herrenschmidt | 1e3519f | 2008-07-25 16:21:11 +1000 | [diff] [blame] | 201 | |
| 202 | |
| 203 | /* |
| 204 | * This gets called at the end of handling a page fault, when |
| 205 | * the kernel has put a new PTE into the page table for the process. |
| 206 | * We use it to ensure coherency between the i-cache and d-cache |
| 207 | * for the page which has just been mapped in. |
| 208 | * On machines which use an MMU hash table, we use this to put a |
| 209 | * corresponding HPTE into the hash table ahead of time, instead of |
| 210 | * waiting for the inevitable extra hash-table miss exception. |
| 211 | */ |
Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 212 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); |
Benjamin Herrenschmidt | 1e3519f | 2008-07-25 16:21:11 +1000 | [diff] [blame] | 213 | |
David Gibson | a4fe3ce | 2009-10-26 19:24:31 +0000 | [diff] [blame] | 214 | extern int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, unsigned long addr, |
| 215 | unsigned long end, int write, struct page **pages, int *nr); |
| 216 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | #endif /* __ASSEMBLY__ */ |
| 218 | |
Arnd Bergmann | 88ced03 | 2005-12-16 22:43:46 +0100 | [diff] [blame] | 219 | #endif /* __KERNEL__ */ |
Paul Mackerras | 047ea78 | 2005-11-19 20:17:32 +1100 | [diff] [blame] | 220 | #endif /* _ASM_POWERPC_PGTABLE_H */ |