blob: 3ebb188c3ff51a5ec0d55c31ce26601354a1bef7 [file] [log] [blame]
Paul Mackerras047ea782005-11-19 20:17:32 +11001#ifndef _ASM_POWERPC_PGTABLE_H
2#define _ASM_POWERPC_PGTABLE_H
Arnd Bergmann88ced032005-12-16 22:43:46 +01003#ifdef __KERNEL__
Paul Mackerras047ea782005-11-19 20:17:32 +11004
David Gibson9c709f32007-06-13 14:52:56 +10005#ifndef __ASSEMBLY__
Aneesh Kumar K.Vc34a51c2013-11-18 14:58:13 +05306#include <linux/mmdebug.h>
David Gibson9c709f32007-06-13 14:52:56 +10007#include <asm/processor.h> /* For TASK_SIZE */
8#include <asm/mmu.h>
9#include <asm/page.h>
Benjamin Herrenschmidt8d30c142009-02-10 16:02:37 +000010
David Gibson9c709f32007-06-13 14:52:56 +100011struct mm_struct;
Benjamin Herrenschmidt8d30c142009-02-10 16:02:37 +000012
David Gibson9c709f32007-06-13 14:52:56 +100013#endif /* !__ASSEMBLY__ */
14
David Gibsonf88df142007-04-30 16:30:56 +100015#if defined(CONFIG_PPC64)
16# include <asm/pgtable-ppc64.h>
Paul Mackerras047ea782005-11-19 20:17:32 +110017#else
David Gibsonf88df142007-04-30 16:30:56 +100018# include <asm/pgtable-ppc32.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#endif
20
Aneesh Kumar K.Vcc3665a2013-04-28 09:37:27 +000021/*
22 * We save the slot number & secondary bit in the second half of the
23 * PTE page. We use the 8 bytes per each pte entry.
24 */
25#define PTE_PAGE_HIDX_OFFSET (PTRS_PER_PTE * 8)
26
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#ifndef __ASSEMBLY__
Benjamin Herrenschmidt64b3d0e2008-12-18 19:13:51 +000028
Aneesh Kumar K.V78f1dbd2012-09-10 02:52:57 +000029#include <asm/tlbflush.h>
30
Benjamin Herrenschmidt71087002009-03-19 19:34:09 +000031/* Generic accessors to PTE bits */
32static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
33static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
34static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
35static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
36static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
Benjamin Herrenschmidt71087002009-03-19 19:34:09 +000037static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
38static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
39
Aneesh Kumar K.Vc34a51c2013-11-18 14:58:13 +053040#ifdef CONFIG_NUMA_BALANCING
41
42static inline int pte_present(pte_t pte)
43{
44 return pte_val(pte) & (_PAGE_PRESENT | _PAGE_NUMA);
45}
46
47#define pte_numa pte_numa
48static inline int pte_numa(pte_t pte)
49{
50 return (pte_val(pte) &
51 (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA;
52}
53
54#define pte_mknonnuma pte_mknonnuma
55static inline pte_t pte_mknonnuma(pte_t pte)
56{
57 pte_val(pte) &= ~_PAGE_NUMA;
58 pte_val(pte) |= _PAGE_PRESENT | _PAGE_ACCESSED;
59 return pte;
60}
61
62#define pte_mknuma pte_mknuma
63static inline pte_t pte_mknuma(pte_t pte)
64{
65 /*
66 * We should not set _PAGE_NUMA on non present ptes. Also clear the
67 * present bit so that hash_page will return 1 and we collect this
68 * as numa fault.
69 */
70 if (pte_present(pte)) {
71 pte_val(pte) |= _PAGE_NUMA;
72 pte_val(pte) &= ~_PAGE_PRESENT;
73 } else
74 VM_BUG_ON(1);
75 return pte;
76}
77
Aneesh Kumar K.V56eecdb2014-02-12 09:13:38 +053078#define ptep_set_numa ptep_set_numa
79static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
80 pte_t *ptep)
81{
82 if ((pte_val(*ptep) & _PAGE_PRESENT) == 0)
83 VM_BUG_ON(1);
84
85 pte_update(mm, addr, ptep, _PAGE_PRESENT, _PAGE_NUMA, 0);
86 return;
87}
88
Aneesh Kumar K.Vc34a51c2013-11-18 14:58:13 +053089#define pmd_numa pmd_numa
90static inline int pmd_numa(pmd_t pmd)
91{
92 return pte_numa(pmd_pte(pmd));
93}
94
Aneesh Kumar K.V56eecdb2014-02-12 09:13:38 +053095#define pmdp_set_numa pmdp_set_numa
96static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
97 pmd_t *pmdp)
98{
99 if ((pmd_val(*pmdp) & _PAGE_PRESENT) == 0)
100 VM_BUG_ON(1);
101
102 pmd_hugepage_update(mm, addr, pmdp, _PAGE_PRESENT, _PAGE_NUMA);
103 return;
104}
105
Aneesh Kumar K.Vc34a51c2013-11-18 14:58:13 +0530106#define pmd_mknonnuma pmd_mknonnuma
107static inline pmd_t pmd_mknonnuma(pmd_t pmd)
108{
109 return pte_pmd(pte_mknonnuma(pmd_pte(pmd)));
110}
111
112#define pmd_mknuma pmd_mknuma
113static inline pmd_t pmd_mknuma(pmd_t pmd)
114{
115 return pte_pmd(pte_mknuma(pmd_pte(pmd)));
116}
117
118# else
119
120static inline int pte_present(pte_t pte)
121{
122 return pte_val(pte) & _PAGE_PRESENT;
123}
124#endif /* CONFIG_NUMA_BALANCING */
125
Benjamin Herrenschmidt71087002009-03-19 19:34:09 +0000126/* Conversion functions: convert a page and protection to a page entry,
127 * and a page entry and page directory to the page they refer to.
128 *
129 * Even if PTEs can be unsigned long long, a PFN is always an unsigned
130 * long for now.
131 */
132static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
133 return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
134 pgprot_val(pgprot)); }
135static inline unsigned long pte_pfn(pte_t pte) {
136 return pte_val(pte) >> PTE_RPN_SHIFT; }
137
138/* Keep these as a macros to avoid include dependency mess */
139#define pte_page(x) pfn_to_page(pte_pfn(x))
140#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
141
142/* Generic modifiers for PTE bits */
143static inline pte_t pte_wrprotect(pte_t pte) {
144 pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
145static inline pte_t pte_mkclean(pte_t pte) {
146 pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
147static inline pte_t pte_mkold(pte_t pte) {
148 pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
149static inline pte_t pte_mkwrite(pte_t pte) {
150 pte_val(pte) |= _PAGE_RW; return pte; }
151static inline pte_t pte_mkdirty(pte_t pte) {
152 pte_val(pte) |= _PAGE_DIRTY; return pte; }
153static inline pte_t pte_mkyoung(pte_t pte) {
154 pte_val(pte) |= _PAGE_ACCESSED; return pte; }
155static inline pte_t pte_mkspecial(pte_t pte) {
156 pte_val(pte) |= _PAGE_SPECIAL; return pte; }
157static inline pte_t pte_mkhuge(pte_t pte) {
158 return pte; }
159static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
160{
161 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
162 return pte;
163}
164
165
Benjamin Herrenschmidt8d30c142009-02-10 16:02:37 +0000166/* Insert a PTE, top-level function is out of line. It uses an inline
167 * low level function in the respective pgtable-* files
168 */
169extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
170 pte_t pte);
171
172/* This low level function performs the actual PTE insertion
173 * Setting the PTE depends on the MMU type and other factors. It's
174 * an horrible mess that I'm not going to try to clean up now but
175 * I'm keeping it in one place rather than spread around
176 */
177static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
178 pte_t *ptep, pte_t pte, int percpu)
179{
180#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
181 /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
182 * helper pte_update() which does an atomic update. We need to do that
183 * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
184 * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
185 * the hash bits instead (ie, same as the non-SMP case)
186 */
187 if (percpu)
188 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
189 | (pte_val(pte) & ~_PAGE_HASHPTE));
190 else
191 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
192
Paul Mackerras1660e9d2009-08-17 14:36:32 +1000193#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
194 /* Second case is 32-bit with 64-bit PTE. In this case, we
Benjamin Herrenschmidt8d30c142009-02-10 16:02:37 +0000195 * can just store as long as we do the two halves in the right order
196 * with a barrier in between. This is possible because we take care,
197 * in the hash code, to pre-invalidate if the PTE was already hashed,
198 * which synchronizes us with any concurrent invalidation.
199 * In the percpu case, we also fallback to the simple update preserving
200 * the hash bits
201 */
202 if (percpu) {
203 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
204 | (pte_val(pte) & ~_PAGE_HASHPTE));
205 return;
206 }
207#if _PAGE_HASHPTE != 0
208 if (pte_val(*ptep) & _PAGE_HASHPTE)
209 flush_hash_entry(mm, ptep, addr);
210#endif
211 __asm__ __volatile__("\
212 stw%U0%X0 %2,%0\n\
213 eieio\n\
214 stw%U0%X0 %L2,%1"
215 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
216 : "r" (pte) : "memory");
217
218#elif defined(CONFIG_PPC_STD_MMU_32)
219 /* Third case is 32-bit hash table in UP mode, we need to preserve
220 * the _PAGE_HASHPTE bit since we may not have invalidated the previous
221 * translation in the hash yet (done in a subsequent flush_tlb_xxx())
222 * and see we need to keep track that this PTE needs invalidating
223 */
224 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
225 | (pte_val(pte) & ~_PAGE_HASHPTE));
226
227#else
228 /* Anything else just stores the PTE normally. That covers all 64-bit
Paul Mackerras1660e9d2009-08-17 14:36:32 +1000229 * cases, and 32-bit non-hash with 32-bit PTEs.
Benjamin Herrenschmidt8d30c142009-02-10 16:02:37 +0000230 */
231 *ptep = pte;
232#endif
233}
234
235
236#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
237extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
238 pte_t *ptep, pte_t entry, int dirty);
239
Benjamin Herrenschmidt64b3d0e2008-12-18 19:13:51 +0000240/*
241 * Macro to mark a page protection value as "uncacheable".
242 */
243
244#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
245 _PAGE_WRITETHRU)
246
247#define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
248 _PAGE_NO_CACHE | _PAGE_GUARDED))
249
250#define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
251 _PAGE_NO_CACHE))
252
253#define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
254 _PAGE_COHERENT))
255
256#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
257 _PAGE_COHERENT | _PAGE_WRITETHRU))
258
Geoff Thorpe09c188c2011-10-27 02:58:45 +0000259#define pgprot_cached_noncoherent(prot) \
260 (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
261
Anton Blanchardfe3cc0d2011-02-28 20:00:47 +0000262#define pgprot_writecombine pgprot_noncached_wc
Benjamin Herrenschmidt64b3d0e2008-12-18 19:13:51 +0000263
264struct file;
265extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
266 unsigned long size, pgprot_t vma_prot);
267#define __HAVE_PHYS_MEM_ACCESS_PROT
268
David Gibson9c709f32007-06-13 14:52:56 +1000269/*
270 * ZERO_PAGE is a global shared page that is always zero: used
271 * for zero-mapped memory areas etc..
272 */
273extern unsigned long empty_zero_page[];
274#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
275
276extern pgd_t swapper_pg_dir[];
277
278extern void paging_init(void);
279
280/*
281 * kern_addr_valid is intended to indicate whether an address is a valid
282 * kernel address. Most 32-bit archs define it as always true (like this)
283 * but most 64-bit archs actually perform a test. What should we do here?
284 */
285#define kern_addr_valid(addr) (1)
286
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287#include <asm-generic/pgtable.h>
Benjamin Herrenschmidt1e3519f2008-07-25 16:21:11 +1000288
289
290/*
291 * This gets called at the end of handling a page fault, when
292 * the kernel has put a new PTE into the page table for the process.
293 * We use it to ensure coherency between the i-cache and d-cache
294 * for the page which has just been mapped in.
295 * On machines which use an MMU hash table, we use this to put a
296 * corresponding HPTE into the hash table ahead of time, instead of
297 * waiting for the inevitable extra hash-table miss exception.
298 */
Russell King4b3073e2009-12-18 16:40:18 +0000299extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
Benjamin Herrenschmidt1e3519f2008-07-25 16:21:11 +1000300
David Gibsona4fe3ce2009-10-26 19:24:31 +0000301extern int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, unsigned long addr,
302 unsigned long end, int write, struct page **pages, int *nr);
303
Aneesh Kumar K.Ve2b3d202013-04-28 09:37:30 +0000304extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
305 unsigned long end, int write, struct page **pages, int *nr);
Aneesh Kumar K.V074c2ea2013-06-20 14:30:15 +0530306#ifndef CONFIG_TRANSPARENT_HUGEPAGE
307#define pmd_large(pmd) 0
308#define has_transparent_hugepage() 0
309#endif
Aneesh Kumar K.V29409992013-06-20 14:30:16 +0530310pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
311 unsigned *shift);
Bharat Bhushanf5e3fe02013-11-15 11:01:15 +0530312
313static inline pte_t *lookup_linux_ptep(pgd_t *pgdir, unsigned long hva,
314 unsigned long *pte_sizep)
315{
316 pte_t *ptep;
317 unsigned long ps = *pte_sizep;
318 unsigned int shift;
319
320 ptep = find_linux_pte_or_hugepte(pgdir, hva, &shift);
321 if (!ptep)
322 return NULL;
323 if (shift)
324 *pte_sizep = 1ul << shift;
325 else
326 *pte_sizep = PAGE_SIZE;
327
328 if (ps > *pte_sizep)
329 return NULL;
330
331 return ptep;
332}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333#endif /* __ASSEMBLY__ */
334
Arnd Bergmann88ced032005-12-16 22:43:46 +0100335#endif /* __KERNEL__ */
Paul Mackerras047ea782005-11-19 20:17:32 +1100336#endif /* _ASM_POWERPC_PGTABLE_H */