blob: c4205616dfb50c1598b7c0446efa86a8414fcec7 [file] [log] [blame]
David Gibsonf88df142007-04-30 16:30:56 +10001#ifndef _ASM_POWERPC_PGTABLE_PPC64_H_
2#define _ASM_POWERPC_PGTABLE_PPC64_H_
3/*
4 * This file contains the functions and defines necessary to modify and use
5 * the ppc64 hashed page table.
6 */
7
David Gibsonf88df142007-04-30 16:30:56 +10008#ifdef CONFIG_PPC_64K_PAGES
Benjamin Herrenschmidtc6057822009-03-10 17:53:29 +00009#include <asm/pgtable-ppc64-64k.h>
David Gibsonf88df142007-04-30 16:30:56 +100010#else
Benjamin Herrenschmidtc6057822009-03-10 17:53:29 +000011#include <asm/pgtable-ppc64-4k.h>
David Gibsonf88df142007-04-30 16:30:56 +100012#endif
13
14#define FIRST_USER_ADDRESS 0
15
16/*
17 * Size of EA range mapped by our pagetables.
18 */
19#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
20 PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +100021#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
David Gibsonf88df142007-04-30 16:30:56 +100022
Benjamin Herrenschmidtc6057822009-03-10 17:53:29 +000023
24/* Some sanity checking */
David Gibsonf88df142007-04-30 16:30:56 +100025#if TASK_SIZE_USER64 > PGTABLE_RANGE
26#error TASK_SIZE_USER64 exceeds pagetable range
27#endif
28
Benjamin Herrenschmidt94491682009-06-02 21:17:45 +000029#ifdef CONFIG_PPC_STD_MMU_64
David Gibsonf88df142007-04-30 16:30:56 +100030#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
31#error TASK_SIZE_USER64 exceeds user VSID range
32#endif
Benjamin Herrenschmidt94491682009-06-02 21:17:45 +000033#endif
David Gibsonf88df142007-04-30 16:30:56 +100034
35/*
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +100036 * Define the address range of the kernel non-linear virtual area
David Gibsonf88df142007-04-30 16:30:56 +100037 */
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +100038
39#ifdef CONFIG_PPC_BOOK3E
40#define KERN_VIRT_START ASM_CONST(0x8000000000000000)
41#else
42#define KERN_VIRT_START ASM_CONST(0xD000000000000000)
43#endif
44#define KERN_VIRT_SIZE PGTABLE_RANGE
David Gibsonf88df142007-04-30 16:30:56 +100045
46/*
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +100047 * The vmalloc space starts at the beginning of that region, and
48 * occupies half of it on hash CPUs and a quarter of it on Book3E
Benjamin Herrenschmidt32a74942009-07-23 23:15:58 +000049 * (we keep a quarter for the virtual memmap)
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +100050 */
51#define VMALLOC_START KERN_VIRT_START
52#ifdef CONFIG_PPC_BOOK3E
53#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 2)
54#else
55#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
56#endif
57#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
58
59/*
60 * The second half of the kernel virtual space is used for IO mappings,
61 * it's itself carved into the PIO region (ISA and PHB IO space) and
62 * the ioremap space
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +100063 *
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +100064 * ISA_IO_BASE = KERN_IO_START, 64K reserved area
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +100065 * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
66 * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
David Gibsonf88df142007-04-30 16:30:56 +100067 */
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +100068#define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1))
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +100069#define FULL_IO_SIZE 0x80000000ul
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +100070#define ISA_IO_BASE (KERN_IO_START)
71#define ISA_IO_END (KERN_IO_START + 0x10000ul)
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +100072#define PHB_IO_BASE (ISA_IO_END)
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +100073#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +100074#define IOREMAP_BASE (PHB_IO_END)
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +100075#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE)
76
David Gibsonf88df142007-04-30 16:30:56 +100077
78/*
79 * Region IDs
80 */
81#define REGION_SHIFT 60UL
82#define REGION_MASK (0xfUL << REGION_SHIFT)
83#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
84
85#define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START))
86#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET))
Benjamin Herrenschmidt32a74942009-07-23 23:15:58 +000087#define VMEMMAP_REGION_ID (0xfUL) /* Server only */
David Gibsonf88df142007-04-30 16:30:56 +100088#define USER_REGION_ID (0UL)
89
90/*
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +100091 * Defines the address of the vmemap area, in its own region on
92 * hash table CPUs and after the vmalloc space on Book3E
Andy Whitcroftd29eff72007-10-16 01:24:17 -070093 */
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +100094#ifdef CONFIG_PPC_BOOK3E
95#define VMEMMAP_BASE VMALLOC_END
96#define VMEMMAP_END KERN_IO_START
97#else
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +100098#define VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT)
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +100099#endif
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +1000100#define vmemmap ((struct page *)VMEMMAP_BASE)
101
Andy Whitcroftd29eff72007-10-16 01:24:17 -0700102
103/*
Benjamin Herrenschmidtc6057822009-03-10 17:53:29 +0000104 * Include the PTE bits definitions
David Gibsonf88df142007-04-30 16:30:56 +1000105 */
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +1000106#ifdef CONFIG_PPC_BOOK3S
Benjamin Herrenschmidtc6057822009-03-10 17:53:29 +0000107#include <asm/pte-hash64.h>
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +1000108#else
109#include <asm/pte-book3e.h>
110#endif
Benjamin Herrenschmidt71087002009-03-19 19:34:09 +0000111#include <asm/pte-common.h>
David Gibsonf88df142007-04-30 16:30:56 +1000112
Benjamin Herrenschmidt94ee8152008-09-03 13:12:05 +1000113#ifdef CONFIG_PPC_MM_SLICES
David Gibsonf88df142007-04-30 16:30:56 +1000114#define HAVE_ARCH_UNMAPPED_AREA
115#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
Benjamin Herrenschmidt94ee8152008-09-03 13:12:05 +1000116#endif /* CONFIG_PPC_MM_SLICES */
David Gibsonf88df142007-04-30 16:30:56 +1000117
118#ifndef __ASSEMBLY__
119
Benjamin Herrenschmidt57e2a992009-07-28 11:59:34 +1000120#include <linux/stddef.h>
121#include <asm/tlbflush.h>
122
David Gibsonf88df142007-04-30 16:30:56 +1000123/*
Benjamin Herrenschmidtc6057822009-03-10 17:53:29 +0000124 * This is the default implementation of various PTE accessors, it's
125 * used in all cases except Book3S with 64K pages where we have a
126 * concept of sub-pages
127 */
128#ifndef __real_pte
129
130#ifdef STRICT_MM_TYPECHECKS
131#define __real_pte(e,p) ((real_pte_t){(e)})
132#define __rpte_to_pte(r) ((r).pte)
133#else
134#define __real_pte(e,p) (e)
135#define __rpte_to_pte(r) (__pte(r))
136#endif
137#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> 12)
138
139#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
140 do { \
141 index = 0; \
142 shift = mmu_psize_defs[psize].shift; \
143
144#define pte_iterate_hashed_end() } while(0)
145
146#ifdef CONFIG_PPC_HAS_HASH_64K
147#define pte_pagesize_index(mm, addr, pte) get_slice_psize(mm, addr)
148#else
149#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
150#endif
151
152#endif /* __real_pte */
153
154
David Gibsonf88df142007-04-30 16:30:56 +1000155/* pte_clear moved to later in this file */
156
David Gibsonf88df142007-04-30 16:30:56 +1000157#define PMD_BAD_BITS (PTE_TABLE_SIZE-1)
158#define PUD_BAD_BITS (PMD_TABLE_SIZE-1)
159
160#define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval))
161#define pmd_none(pmd) (!pmd_val(pmd))
162#define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \
163 || (pmd_val(pmd) & PMD_BAD_BITS))
164#define pmd_present(pmd) (pmd_val(pmd) != 0)
165#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0)
166#define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS)
167#define pmd_page(pmd) virt_to_page(pmd_page_vaddr(pmd))
168
169#define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval))
170#define pud_none(pud) (!pud_val(pud))
171#define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \
172 || (pud_val(pud) & PUD_BAD_BITS))
173#define pud_present(pud) (pud_val(pud) != 0)
174#define pud_clear(pudp) (pud_val(*(pudp)) = 0)
175#define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS)
176#define pud_page(pud) virt_to_page(pud_page_vaddr(pud))
177
178#define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);})
179
180/*
181 * Find an entry in a page-table-directory. We combine the address region
182 * (the high order N bits) and the pgd portion of the address.
183 */
184/* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */
185#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x1ff)
186
187#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
188
189#define pmd_offset(pudp,addr) \
190 (((pmd_t *) pud_page_vaddr(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
191
192#define pte_offset_kernel(dir,addr) \
193 (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
194
195#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
David Gibsonf88df142007-04-30 16:30:56 +1000196#define pte_unmap(pte) do { } while(0)
David Gibsonf88df142007-04-30 16:30:56 +1000197
198/* to find an entry in a kernel page-table-directory */
199/* This now only contains the vmalloc pages */
200#define pgd_offset_k(address) pgd_offset(&init_mm, address)
201
David Gibsonf88df142007-04-30 16:30:56 +1000202
203/* Atomic PTE updates */
204static inline unsigned long pte_update(struct mm_struct *mm,
205 unsigned long addr,
206 pte_t *ptep, unsigned long clr,
207 int huge)
208{
Benjamin Herrenschmidta033a482009-03-19 19:34:15 +0000209#ifdef PTE_ATOMIC_UPDATES
David Gibsonf88df142007-04-30 16:30:56 +1000210 unsigned long old, tmp;
211
212 __asm__ __volatile__(
213 "1: ldarx %0,0,%3 # pte_update\n\
214 andi. %1,%0,%6\n\
215 bne- 1b \n\
216 andc %1,%0,%4 \n\
217 stdcx. %1,0,%3 \n\
218 bne- 1b"
219 : "=&r" (old), "=&r" (tmp), "=m" (*ptep)
220 : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY)
221 : "cc" );
Benjamin Herrenschmidta033a482009-03-19 19:34:15 +0000222#else
223 unsigned long old = pte_val(*ptep);
224 *ptep = __pte(old & ~clr);
225#endif
Benjamin Herrenschmidt8d30c142009-02-10 16:02:37 +0000226 /* huge pages use the old page table lock */
227 if (!huge)
228 assert_pte_locked(mm, addr);
229
Benjamin Herrenschmidt94491682009-06-02 21:17:45 +0000230#ifdef CONFIG_PPC_STD_MMU_64
David Gibsonf88df142007-04-30 16:30:56 +1000231 if (old & _PAGE_HASHPTE)
232 hpte_need_flush(mm, addr, ptep, old, huge);
Benjamin Herrenschmidt94491682009-06-02 21:17:45 +0000233#endif
234
David Gibsonf88df142007-04-30 16:30:56 +1000235 return old;
236}
237
238static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
239 unsigned long addr, pte_t *ptep)
240{
241 unsigned long old;
242
243 if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
244 return 0;
245 old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0);
246 return (old & _PAGE_ACCESSED) != 0;
247}
248#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
249#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
250({ \
251 int __r; \
252 __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
253 __r; \
254})
255
David Gibsonf88df142007-04-30 16:30:56 +1000256#define __HAVE_ARCH_PTEP_SET_WRPROTECT
257static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
258 pte_t *ptep)
259{
David Gibsonf88df142007-04-30 16:30:56 +1000260
Stratos Psomadakis2a2c29c2011-05-07 04:11:31 +0000261 if ((pte_val(*ptep) & _PAGE_RW) == 0)
262 return;
263
264 pte_update(mm, addr, ptep, _PAGE_RW, 0);
David Gibsonf88df142007-04-30 16:30:56 +1000265}
266
Andy Whitcroft016b33c2008-06-26 19:55:58 +1000267static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
268 unsigned long addr, pte_t *ptep)
269{
David Gibson86df8642008-07-08 15:58:16 +1000270 if ((pte_val(*ptep) & _PAGE_RW) == 0)
271 return;
Stratos Psomadakis2a2c29c2011-05-07 04:11:31 +0000272
273 pte_update(mm, addr, ptep, _PAGE_RW, 1);
Andy Whitcroft016b33c2008-06-26 19:55:58 +1000274}
David Gibsonf88df142007-04-30 16:30:56 +1000275
276/*
277 * We currently remove entries from the hashtable regardless of whether
278 * the entry was young or dirty. The generic routines only flush if the
279 * entry was young or dirty which is not good enough.
280 *
281 * We should be more intelligent about this but for the moment we override
282 * these functions and force a tlb flush unconditionally
283 */
284#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
285#define ptep_clear_flush_young(__vma, __address, __ptep) \
286({ \
287 int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \
288 __ptep); \
289 __young; \
290})
291
David Gibsonf88df142007-04-30 16:30:56 +1000292#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
293static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
294 unsigned long addr, pte_t *ptep)
295{
296 unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0);
297 return __pte(old);
298}
299
300static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
301 pte_t * ptep)
302{
303 pte_update(mm, addr, ptep, ~0UL, 0);
304}
305
David Gibsonf88df142007-04-30 16:30:56 +1000306
307/* Set the dirty and/or accessed bits atomically in a linux PTE, this
308 * function doesn't need to flush the hash entry
309 */
Benjamin Herrenschmidt8d30c142009-02-10 16:02:37 +0000310static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
David Gibsonf88df142007-04-30 16:30:56 +1000311{
312 unsigned long bits = pte_val(entry) &
Benjamin Herrenschmidtea3cc332009-08-18 19:00:34 +0000313 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
Benjamin Herrenschmidta033a482009-03-19 19:34:15 +0000314
315#ifdef PTE_ATOMIC_UPDATES
David Gibsonf88df142007-04-30 16:30:56 +1000316 unsigned long old, tmp;
317
318 __asm__ __volatile__(
319 "1: ldarx %0,0,%4\n\
320 andi. %1,%0,%6\n\
321 bne- 1b \n\
322 or %0,%3,%0\n\
323 stdcx. %0,0,%4\n\
324 bne- 1b"
325 :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
326 :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY)
327 :"cc");
Benjamin Herrenschmidta033a482009-03-19 19:34:15 +0000328#else
329 unsigned long old = pte_val(*ptep);
330 *ptep = __pte(old | bits);
331#endif
David Gibsonf88df142007-04-30 16:30:56 +1000332}
David Gibsonf88df142007-04-30 16:30:56 +1000333
David Gibsonf88df142007-04-30 16:30:56 +1000334#define __HAVE_ARCH_PTE_SAME
335#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
336
337#define pte_ERROR(e) \
338 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
339#define pmd_ERROR(e) \
340 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
341#define pgd_ERROR(e) \
342 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
343
David Gibsonf88df142007-04-30 16:30:56 +1000344/* Encode and de-code a swap entry */
345#define __swp_type(entry) (((entry).val >> 1) & 0x3f)
346#define __swp_offset(entry) ((entry).val >> 8)
347#define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)})
348#define __pte_to_swp_entry(pte) ((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT})
349#define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_RPN_SHIFT })
350#define pte_to_pgoff(pte) (pte_val(pte) >> PTE_RPN_SHIFT)
351#define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE})
352#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT)
353
David Gibsona0668cd2009-10-28 16:27:18 +0000354void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
David Gibsonf88df142007-04-30 16:30:56 +1000355void pgtable_cache_init(void);
356
357/*
358 * find_linux_pte returns the address of a linux pte for a given
359 * effective address and directory. If not found, it returns zero.
Becky Bruce72632ce2011-06-28 09:06:54 +0000360 */
361static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea)
David Gibsonf88df142007-04-30 16:30:56 +1000362{
363 pgd_t *pg;
364 pud_t *pu;
365 pmd_t *pm;
366 pte_t *pt = NULL;
367
368 pg = pgdir + pgd_index(ea);
369 if (!pgd_none(*pg)) {
370 pu = pud_offset(pg, ea);
371 if (!pud_none(*pu)) {
372 pm = pmd_offset(pu, ea);
373 if (pmd_present(*pm))
374 pt = pte_offset_kernel(pm, ea);
375 }
376 }
377 return pt;
378}
379
David Gibsona4fe3ce2009-10-26 19:24:31 +0000380#ifdef CONFIG_HUGETLB_PAGE
381pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
382 unsigned *shift);
383#else
384static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
385 unsigned *shift)
386{
387 if (shift)
388 *shift = 0;
389 return find_linux_pte(pgdir, ea);
390}
391#endif /* !CONFIG_HUGETLB_PAGE */
Nick Piggince0ad7f2008-07-30 15:23:13 +1000392
David Gibsonf88df142007-04-30 16:30:56 +1000393#endif /* __ASSEMBLY__ */
394
395#endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */