blob: 8204b0c393aac69285a666d1b70fae0961053614 [file] [log] [blame]
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +05301#ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
2#define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
3/*
4 * This file contains the functions and defines necessary to modify and use
5 * the ppc64 hashed page table.
6 */
7
Aneesh Kumar K.Vab537dc2015-12-01 09:06:30 +05308#include <asm/book3s/64/hash.h>
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +05309#include <asm/barrier.h>
10
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +053011/*
12 * The second half of the kernel virtual space is used for IO mappings,
13 * it's itself carved into the PIO region (ISA and PHB IO space) and
14 * the ioremap space
15 *
16 * ISA_IO_BASE = KERN_IO_START, 64K reserved area
17 * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
18 * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
19 */
20#define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1))
21#define FULL_IO_SIZE 0x80000000ul
22#define ISA_IO_BASE (KERN_IO_START)
23#define ISA_IO_END (KERN_IO_START + 0x10000ul)
24#define PHB_IO_BASE (ISA_IO_END)
25#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
26#define IOREMAP_BASE (PHB_IO_END)
27#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE)
28
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +053029#define vmemmap ((struct page *)VMEMMAP_BASE)
30
Aneesh Kumar K.Vb0412ea2015-12-01 09:06:33 +053031/* Advertise special mapping type for AGP */
Aneesh Kumar K.Vb0412ea2015-12-01 09:06:33 +053032#define HAVE_PAGE_AGP
33
34/* Advertise support for _PAGE_SPECIAL */
35#define __HAVE_ARCH_PTE_SPECIAL
36
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +053037#ifndef __ASSEMBLY__
38
39/*
40 * This is the default implementation of various PTE accessors, it's
41 * used in all cases except Book3S with 64K pages where we have a
42 * concept of sub-pages
43 */
44#ifndef __real_pte
45
46#ifdef CONFIG_STRICT_MM_TYPECHECKS
47#define __real_pte(e,p) ((real_pte_t){(e)})
48#define __rpte_to_pte(r) ((r).pte)
49#else
50#define __real_pte(e,p) (e)
51#define __rpte_to_pte(r) (__pte(r))
52#endif
Aneesh Kumar K.V4d9057c2015-12-01 09:06:56 +053053#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >>_PAGE_F_GIX_SHIFT)
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +053054
55#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
56 do { \
57 index = 0; \
58 shift = mmu_psize_defs[psize].shift; \
59
60#define pte_iterate_hashed_end() } while(0)
61
62/*
63 * We expect this to be called only for user addresses or kernel virtual
64 * addresses other than the linear mapping.
65 */
66#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
67
68#endif /* __real_pte */
69
Aneesh Kumar K.Vf281b5d2015-12-01 09:06:35 +053070static inline void pmd_set(pmd_t *pmdp, unsigned long val)
71{
72 *pmdp = __pmd(val);
73}
74
75static inline void pmd_clear(pmd_t *pmdp)
76{
77 *pmdp = __pmd(0);
78}
79
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +053080#define pmd_none(pmd) (!pmd_val(pmd))
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +053081#define pmd_present(pmd) (!pmd_none(pmd))
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +053082
Aneesh Kumar K.Vf281b5d2015-12-01 09:06:35 +053083static inline void pud_set(pud_t *pudp, unsigned long val)
84{
85 *pudp = __pud(val);
86}
87
88static inline void pud_clear(pud_t *pudp)
89{
90 *pudp = __pud(0);
91}
92
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +053093#define pud_none(pud) (!pud_val(pud))
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +053094#define pud_present(pud) (pud_val(pud) != 0)
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +053095
96extern struct page *pud_page(pud_t pud);
Aneesh Kumar K.V371352c2015-12-01 09:06:36 +053097extern struct page *pmd_page(pmd_t pmd);
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +053098static inline pte_t pud_pte(pud_t pud)
99{
100 return __pte(pud_val(pud));
101}
102
103static inline pud_t pte_pud(pte_t pte)
104{
105 return __pud(pte_val(pte));
106}
107#define pud_write(pud) pte_write(pud_pte(pud))
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +0530108#define pgd_write(pgd) pte_write(pgd_pte(pgd))
Aneesh Kumar K.Vf281b5d2015-12-01 09:06:35 +0530109static inline void pgd_set(pgd_t *pgdp, unsigned long val)
110{
111 *pgdp = __pgd(val);
112}
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +0530113
114/*
115 * Find an entry in a page-table-directory. We combine the address region
116 * (the high order N bits) and the pgd portion of the address.
117 */
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +0530118
119#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
120
121#define pmd_offset(pudp,addr) \
Aneesh Kumar K.V371352c2015-12-01 09:06:36 +0530122 (((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr))
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +0530123
124#define pte_offset_kernel(dir,addr) \
Aneesh Kumar K.V371352c2015-12-01 09:06:36 +0530125 (((pte_t *) pmd_page_vaddr(*(dir))) + pte_index(addr))
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +0530126
127#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
128#define pte_unmap(pte) do { } while(0)
129
130/* to find an entry in a kernel page-table-directory */
131/* This now only contains the vmalloc pages */
132#define pgd_offset_k(address) pgd_offset(&init_mm, address)
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +0530133
134#define pte_ERROR(e) \
135 pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
136#define pmd_ERROR(e) \
137 pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
138#define pgd_ERROR(e) \
139 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
140
141/* Encode and de-code a swap entry */
142#define MAX_SWAPFILES_CHECK() do { \
143 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \
144 /* \
145 * Don't have overlapping bits with _PAGE_HPTEFLAGS \
146 * We filter HPTEFLAGS on set_pte. \
147 */ \
148 BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \
Laurent Dufour7207f432015-12-03 11:29:19 +0100149 BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_SOFT_DIRTY); \
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +0530150 } while (0)
151/*
152 * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT;
153 */
154#define SWP_TYPE_BITS 5
155#define __swp_type(x) (((x).val >> _PAGE_BIT_SWAP_TYPE) \
156 & ((1UL << SWP_TYPE_BITS) - 1))
157#define __swp_offset(x) ((x).val >> PTE_RPN_SHIFT)
158#define __swp_entry(type, offset) ((swp_entry_t) { \
159 ((type) << _PAGE_BIT_SWAP_TYPE) \
160 | ((offset) << PTE_RPN_SHIFT) })
Aneesh Kumar K.V44734f22016-01-11 21:19:34 +0530161/*
162 * swp_entry_t must be independent of pte bits. We build a swp_entry_t from
163 * swap type and offset we get from swap and convert that to pte to find a
164 * matching pte in linux page table.
165 * Clear bits not found in swap entries here.
166 */
167#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) & ~_PAGE_PTE })
168#define __swp_entry_to_pte(x) __pte((x).val | _PAGE_PTE)
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +0530169
Hugh Dickins2f10f1a2016-01-09 16:54:59 -0800170#ifdef CONFIG_MEM_SOFT_DIRTY
Laurent Dufour7207f432015-12-03 11:29:19 +0100171#define _PAGE_SWP_SOFT_DIRTY (1UL << (SWP_TYPE_BITS + _PAGE_BIT_SWAP_TYPE))
Hugh Dickins2f10f1a2016-01-09 16:54:59 -0800172#else
173#define _PAGE_SWP_SOFT_DIRTY 0UL
174#endif /* CONFIG_MEM_SOFT_DIRTY */
175
176#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
Laurent Dufour7207f432015-12-03 11:29:19 +0100177static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
178{
179 return __pte(pte_val(pte) | _PAGE_SWP_SOFT_DIRTY);
180}
181static inline bool pte_swp_soft_dirty(pte_t pte)
182{
183 return !!(pte_val(pte) & _PAGE_SWP_SOFT_DIRTY);
184}
185static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
186{
187 return __pte(pte_val(pte) & ~_PAGE_SWP_SOFT_DIRTY);
188}
Laurent Dufour7207f432015-12-03 11:29:19 +0100189#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
190
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +0530191void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
192void pgtable_cache_init(void);
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +0530193
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +0530194struct page *realmode_pfn_to_page(unsigned long pfn);
195
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +0530196#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +0530197extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
198extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
199extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
200extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
201 pmd_t *pmdp, pmd_t pmd);
202extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
203 pmd_t *pmd);
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +0530204extern int has_transparent_hugepage(void);
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +0530205#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
206
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +0530207
208static inline pte_t pmd_pte(pmd_t pmd)
209{
210 return __pte(pmd_val(pmd));
211}
212
213static inline pmd_t pte_pmd(pte_t pte)
214{
215 return __pmd(pte_val(pte));
216}
217
218static inline pte_t *pmdp_ptep(pmd_t *pmd)
219{
220 return (pte_t *)pmd;
221}
222
223#define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd))
224#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
225#define pmd_young(pmd) pte_young(pmd_pte(pmd))
Minchan Kimd5d6a442016-01-15 16:55:29 -0800226#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +0530227#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
228#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
229#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
Minchan Kimd5d6a442016-01-15 16:55:29 -0800230#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +0530231#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
232#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
Laurent Dufour7207f432015-12-03 11:29:19 +0100233
234#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
235#define pmd_soft_dirty(pmd) pte_soft_dirty(pmd_pte(pmd))
236#define pmd_mksoft_dirty(pmd) pte_pmd(pte_mksoft_dirty(pmd_pte(pmd)))
237#define pmd_clear_soft_dirty(pmd) pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd)))
238#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
239
Aneesh Kumar K.V1ca72122015-12-01 09:06:37 +0530240#ifdef CONFIG_NUMA_BALANCING
241static inline int pmd_protnone(pmd_t pmd)
242{
243 return pte_protnone(pmd_pte(pmd));
244}
245#endif /* CONFIG_NUMA_BALANCING */
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +0530246
247#define __HAVE_ARCH_PMD_WRITE
248#define pmd_write(pmd) pte_write(pmd_pte(pmd))
249
250static inline pmd_t pmd_mkhuge(pmd_t pmd)
251{
Aneesh Kumar K.V6a119ea2015-12-01 09:06:54 +0530252 return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_THP_HUGE));
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +0530253}
254
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +0530255#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
256extern int pmdp_set_access_flags(struct vm_area_struct *vma,
257 unsigned long address, pmd_t *pmdp,
258 pmd_t entry, int dirty);
259
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +0530260#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
261extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
262 unsigned long address, pmd_t *pmdp);
263#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
264extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
265 unsigned long address, pmd_t *pmdp);
266
267#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
268extern pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
269 unsigned long addr, pmd_t *pmdp);
270
Aneesh Kumar K.V3dfcb3152015-12-01 09:06:28 +0530271extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
272 unsigned long address, pmd_t *pmdp);
273#define pmdp_collapse_flush pmdp_collapse_flush
274
275#define __HAVE_ARCH_PGTABLE_DEPOSIT
276extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
277 pgtable_t pgtable);
278#define __HAVE_ARCH_PGTABLE_WITHDRAW
279extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
280
281#define __HAVE_ARCH_PMDP_INVALIDATE
282extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
283 pmd_t *pmdp);
284
285#define pmd_move_must_withdraw pmd_move_must_withdraw
286struct spinlock;
287static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
288 struct spinlock *old_pmd_ptl)
289{
290 /*
291 * Archs like ppc64 use pgtable to store per pmd
292 * specific information. So when we switch the pmd,
293 * we should also withdraw and deposit the pgtable
294 */
295 return true;
296}
297#endif /* __ASSEMBLY__ */
298#endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */