blob: 0717693c8428973a0f9899d02eda1d2e46b590d7 [file] [log] [blame]
Paul Mackerras047ea782005-11-19 20:17:32 +11001#ifndef _ASM_POWERPC_PGTABLE_H
2#define _ASM_POWERPC_PGTABLE_H
Arnd Bergmann88ced032005-12-16 22:43:46 +01003#ifdef __KERNEL__
Paul Mackerras047ea782005-11-19 20:17:32 +11004
David Gibson9c709f32007-06-13 14:52:56 +10005#ifndef __ASSEMBLY__
Aneesh Kumar K.Vc34a51c2013-11-18 14:58:13 +05306#include <linux/mmdebug.h>
Scott Wood1c980252014-08-08 18:40:42 -05007#include <linux/mmzone.h>
David Gibson9c709f32007-06-13 14:52:56 +10008#include <asm/processor.h> /* For TASK_SIZE */
9#include <asm/mmu.h>
10#include <asm/page.h>
Benjamin Herrenschmidt8d30c142009-02-10 16:02:37 +000011
David Gibson9c709f32007-06-13 14:52:56 +100012struct mm_struct;
Benjamin Herrenschmidt8d30c142009-02-10 16:02:37 +000013
David Gibson9c709f32007-06-13 14:52:56 +100014#endif /* !__ASSEMBLY__ */
15
David Gibsonf88df142007-04-30 16:30:56 +100016#if defined(CONFIG_PPC64)
17# include <asm/pgtable-ppc64.h>
Paul Mackerras047ea782005-11-19 20:17:32 +110018#else
David Gibsonf88df142007-04-30 16:30:56 +100019# include <asm/pgtable-ppc32.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#endif
21
Aneesh Kumar K.Vcc3665a2013-04-28 09:37:27 +000022/*
23 * We save the slot number & secondary bit in the second half of the
24 * PTE page. We use the 8 bytes per each pte entry.
25 */
26#define PTE_PAGE_HIDX_OFFSET (PTRS_PER_PTE * 8)
27
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#ifndef __ASSEMBLY__
Benjamin Herrenschmidt64b3d0e2008-12-18 19:13:51 +000029
Aneesh Kumar K.V78f1dbd2012-09-10 02:52:57 +000030#include <asm/tlbflush.h>
31
Benjamin Herrenschmidt71087002009-03-19 19:34:09 +000032/* Generic accessors to PTE bits */
LEROY Christophea7b9f672015-01-19 17:04:38 +010033static inline int pte_write(pte_t pte)
34{ return (pte_val(pte) & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO; }
Benjamin Herrenschmidt71087002009-03-19 19:34:09 +000035static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
36static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
Benjamin Herrenschmidt71087002009-03-19 19:34:09 +000037static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
Benjamin Herrenschmidt71087002009-03-19 19:34:09 +000038static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
39static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
40
Aneesh Kumar K.Vc34a51c2013-11-18 14:58:13 +053041#ifdef CONFIG_NUMA_BALANCING
Mel Gormane7bb4b6d2015-02-12 14:58:19 -080042/*
43 * These work without NUMA balancing but the kernel does not care. See the
44 * comment in include/asm-generic/pgtable.h . On powerpc, this will only
45 * work for user pages and always return true for kernel pages.
46 */
47static inline int pte_protnone(pte_t pte)
48{
49 return (pte_val(pte) &
50 (_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT;
51}
52
53static inline int pmd_protnone(pmd_t pmd)
54{
55 return pte_protnone(pmd_pte(pmd));
56}
Mel Gorman21d9ee32015-02-12 14:58:32 -080057#endif /* CONFIG_NUMA_BALANCING */
Aneesh Kumar K.Vc34a51c2013-11-18 14:58:13 +053058
59static inline int pte_present(pte_t pte)
60{
61 return pte_val(pte) & _PAGE_PRESENT;
62}
Aneesh Kumar K.Vc34a51c2013-11-18 14:58:13 +053063
Benjamin Herrenschmidt71087002009-03-19 19:34:09 +000064/* Conversion functions: convert a page and protection to a page entry,
65 * and a page entry and page directory to the page they refer to.
66 *
67 * Even if PTEs can be unsigned long long, a PFN is always an unsigned
68 * long for now.
69 */
70static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
71 return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
72 pgprot_val(pgprot)); }
73static inline unsigned long pte_pfn(pte_t pte) {
74 return pte_val(pte) >> PTE_RPN_SHIFT; }
75
76/* Keep these as a macros to avoid include dependency mess */
77#define pte_page(x) pfn_to_page(pte_pfn(x))
78#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
79
80/* Generic modifiers for PTE bits */
81static inline pte_t pte_wrprotect(pte_t pte) {
LEROY Christophea7b9f672015-01-19 17:04:38 +010082 pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE);
83 pte_val(pte) |= _PAGE_RO; return pte; }
Benjamin Herrenschmidt71087002009-03-19 19:34:09 +000084static inline pte_t pte_mkclean(pte_t pte) {
85 pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
86static inline pte_t pte_mkold(pte_t pte) {
87 pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
88static inline pte_t pte_mkwrite(pte_t pte) {
LEROY Christophea7b9f672015-01-19 17:04:38 +010089 pte_val(pte) &= ~_PAGE_RO;
Benjamin Herrenschmidt71087002009-03-19 19:34:09 +000090 pte_val(pte) |= _PAGE_RW; return pte; }
91static inline pte_t pte_mkdirty(pte_t pte) {
92 pte_val(pte) |= _PAGE_DIRTY; return pte; }
93static inline pte_t pte_mkyoung(pte_t pte) {
94 pte_val(pte) |= _PAGE_ACCESSED; return pte; }
95static inline pte_t pte_mkspecial(pte_t pte) {
96 pte_val(pte) |= _PAGE_SPECIAL; return pte; }
97static inline pte_t pte_mkhuge(pte_t pte) {
98 return pte; }
99static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
100{
101 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
102 return pte;
103}
104
105
Benjamin Herrenschmidt8d30c142009-02-10 16:02:37 +0000106/* Insert a PTE, top-level function is out of line. It uses an inline
107 * low level function in the respective pgtable-* files
108 */
109extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
110 pte_t pte);
111
112/* This low level function performs the actual PTE insertion
113 * Setting the PTE depends on the MMU type and other factors. It's
114 * an horrible mess that I'm not going to try to clean up now but
115 * I'm keeping it in one place rather than spread around
116 */
117static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
118 pte_t *ptep, pte_t pte, int percpu)
119{
120#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
121 /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
122 * helper pte_update() which does an atomic update. We need to do that
123 * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
124 * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
125 * the hash bits instead (ie, same as the non-SMP case)
126 */
127 if (percpu)
128 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
129 | (pte_val(pte) & ~_PAGE_HASHPTE));
130 else
131 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
132
Paul Mackerras1660e9d2009-08-17 14:36:32 +1000133#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
134 /* Second case is 32-bit with 64-bit PTE. In this case, we
Benjamin Herrenschmidt8d30c142009-02-10 16:02:37 +0000135 * can just store as long as we do the two halves in the right order
136 * with a barrier in between. This is possible because we take care,
137 * in the hash code, to pre-invalidate if the PTE was already hashed,
138 * which synchronizes us with any concurrent invalidation.
139 * In the percpu case, we also fallback to the simple update preserving
140 * the hash bits
141 */
142 if (percpu) {
143 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
144 | (pte_val(pte) & ~_PAGE_HASHPTE));
145 return;
146 }
147#if _PAGE_HASHPTE != 0
148 if (pte_val(*ptep) & _PAGE_HASHPTE)
149 flush_hash_entry(mm, ptep, addr);
150#endif
151 __asm__ __volatile__("\
152 stw%U0%X0 %2,%0\n\
153 eieio\n\
154 stw%U0%X0 %L2,%1"
155 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
156 : "r" (pte) : "memory");
157
158#elif defined(CONFIG_PPC_STD_MMU_32)
159 /* Third case is 32-bit hash table in UP mode, we need to preserve
160 * the _PAGE_HASHPTE bit since we may not have invalidated the previous
161 * translation in the hash yet (done in a subsequent flush_tlb_xxx())
162 * and see we need to keep track that this PTE needs invalidating
163 */
164 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
165 | (pte_val(pte) & ~_PAGE_HASHPTE));
166
167#else
168 /* Anything else just stores the PTE normally. That covers all 64-bit
Paul Mackerras1660e9d2009-08-17 14:36:32 +1000169 * cases, and 32-bit non-hash with 32-bit PTEs.
Benjamin Herrenschmidt8d30c142009-02-10 16:02:37 +0000170 */
171 *ptep = pte;
Scott Wood0d61f0b2015-07-18 14:24:57 -0500172
173#ifdef CONFIG_PPC_BOOK3E_64
174 /*
175 * With hardware tablewalk, a sync is needed to ensure that
176 * subsequent accesses see the PTE we just wrote. Unlike userspace
177 * mappings, we can't tolerate spurious faults, so make sure
178 * the new PTE will be seen the first time.
179 */
180 if (is_kernel_addr(addr))
181 mb();
182#endif
Benjamin Herrenschmidt8d30c142009-02-10 16:02:37 +0000183#endif
184}
185
186
187#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
188extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
189 pte_t *ptep, pte_t entry, int dirty);
190
Benjamin Herrenschmidt64b3d0e2008-12-18 19:13:51 +0000191/*
192 * Macro to mark a page protection value as "uncacheable".
193 */
194
195#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
196 _PAGE_WRITETHRU)
197
198#define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
199 _PAGE_NO_CACHE | _PAGE_GUARDED))
200
201#define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
202 _PAGE_NO_CACHE))
203
204#define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
205 _PAGE_COHERENT))
206
207#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
208 _PAGE_COHERENT | _PAGE_WRITETHRU))
209
Geoff Thorpe09c188c2011-10-27 02:58:45 +0000210#define pgprot_cached_noncoherent(prot) \
211 (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
212
Anton Blanchardfe3cc0d92011-02-28 20:00:47 +0000213#define pgprot_writecombine pgprot_noncached_wc
Benjamin Herrenschmidt64b3d0e2008-12-18 19:13:51 +0000214
215struct file;
216extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
217 unsigned long size, pgprot_t vma_prot);
218#define __HAVE_PHYS_MEM_ACCESS_PROT
219
David Gibson9c709f32007-06-13 14:52:56 +1000220/*
221 * ZERO_PAGE is a global shared page that is always zero: used
222 * for zero-mapped memory areas etc..
223 */
224extern unsigned long empty_zero_page[];
225#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
226
227extern pgd_t swapper_pg_dir[];
228
Scott Wood1c980252014-08-08 18:40:42 -0500229void limit_zone_pfn(enum zone_type zone, unsigned long max_pfn);
230int dma_pfn_limit_to_zone(u64 pfn_limit);
David Gibson9c709f32007-06-13 14:52:56 +1000231extern void paging_init(void);
232
233/*
234 * kern_addr_valid is intended to indicate whether an address is a valid
235 * kernel address. Most 32-bit archs define it as always true (like this)
236 * but most 64-bit archs actually perform a test. What should we do here?
237 */
238#define kern_addr_valid(addr) (1)
239
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240#include <asm-generic/pgtable.h>
Benjamin Herrenschmidt1e3519f2008-07-25 16:21:11 +1000241
242
243/*
244 * This gets called at the end of handling a page fault, when
245 * the kernel has put a new PTE into the page table for the process.
246 * We use it to ensure coherency between the i-cache and d-cache
247 * for the page which has just been mapped in.
248 * On machines which use an MMU hash table, we use this to put a
249 * corresponding HPTE into the hash table ahead of time, instead of
250 * waiting for the inevitable extra hash-table miss exception.
251 */
Russell King4b3073e2009-12-18 16:40:18 +0000252extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
Benjamin Herrenschmidt1e3519f2008-07-25 16:21:11 +1000253
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000254extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
Aneesh Kumar K.Vb30e7592014-11-05 21:57:41 +0530255 unsigned long end, int write,
256 struct page **pages, int *nr);
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530257#ifndef CONFIG_TRANSPARENT_HUGEPAGE
258#define pmd_large(pmd) 0
259#define has_transparent_hugepage() 0
260#endif
Aneesh Kumar K.V691e95f2015-03-30 10:41:03 +0530261pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530262 unsigned *shift);
Aneesh Kumar K.V691e95f2015-03-30 10:41:03 +0530263static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
264 unsigned *shift)
265{
266 if (!arch_irqs_disabled()) {
267 pr_info("%s called with irq enabled\n", __func__);
268 dump_stack();
269 }
270 return __find_linux_pte_or_hugepte(pgdir, ea, shift);
271}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272#endif /* __ASSEMBLY__ */
273
Arnd Bergmann88ced032005-12-16 22:43:46 +0100274#endif /* __KERNEL__ */
Paul Mackerras047ea782005-11-19 20:17:32 +1100275#endif /* _ASM_POWERPC_PGTABLE_H */