blob: 945e47adf7db633ae13fc5003e948f19e38fbb08 [file] [log] [blame]
David Gibsonf88df142007-04-30 16:30:56 +10001#ifndef _ASM_POWERPC_PGTABLE_PPC32_H
2#define _ASM_POWERPC_PGTABLE_PPC32_H
3
David Gibsond1953c82007-05-08 12:46:49 +10004#include <asm-generic/pgtable-nopmd.h>
David Gibsonf88df142007-04-30 16:30:56 +10005
6#ifndef __ASSEMBLY__
7#include <linux/sched.h>
8#include <linux/threads.h>
David Gibsonf88df142007-04-30 16:30:56 +10009#include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */
David Gibsonf88df142007-04-30 16:30:56 +100010
Benjamin Herrenschmidtf637a492009-05-27 13:44:50 +100011extern unsigned long ioremap_bot;
Benjamin Herrenschmidtb98ac05d2007-10-31 16:42:19 +110012
13#ifdef CONFIG_44x
14extern int icache_44x_need_flush;
15#endif
16
David Gibsonf88df142007-04-30 16:30:56 +100017#endif /* __ASSEMBLY__ */
18
19/*
David Gibsonf88df142007-04-30 16:30:56 +100020 * The normal case is that PTEs are 32-bits and we have a 1-page
21 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
22 *
23 * For any >32-bit physical address platform, we can use the following
24 * two level page table layout where the pgdir is 8KB and the MS 13 bits
25 * are an index to the second level table. The combined pgdir/pmd first
26 * level has 2048 entries and the second level has 512 64-bit PTE entries.
27 * -Matt
28 */
David Gibsonf88df142007-04-30 16:30:56 +100029/* PGDIR_SHIFT determines what a top-level page table entry can map */
David Gibsond1953c82007-05-08 12:46:49 +100030#define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT)
David Gibsonf88df142007-04-30 16:30:56 +100031#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
32#define PGDIR_MASK (~(PGDIR_SIZE-1))
33
34/*
35 * entries per page directory level: our page-table tree is two-level, so
36 * we don't really have any PMD directory.
37 */
Kumar Galabee86f12007-12-06 13:11:04 -060038#ifndef __ASSEMBLY__
39#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT)
40#define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT))
41#endif /* __ASSEMBLY__ */
42
David Gibsonf88df142007-04-30 16:30:56 +100043#define PTRS_PER_PTE (1 << PTE_SHIFT)
44#define PTRS_PER_PMD 1
45#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
46
47#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
48#define FIRST_USER_ADDRESS 0
49
David Gibsonf88df142007-04-30 16:30:56 +100050#define pte_ERROR(e) \
Anton Blancharda7696b32014-09-17 14:39:39 +100051 pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
David Gibson0aeafb02007-05-04 16:47:51 +100052 (unsigned long long)pte_val(e))
David Gibsonf88df142007-04-30 16:30:56 +100053#define pgd_ERROR(e) \
Anton Blancharda7696b32014-09-17 14:39:39 +100054 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
David Gibsonf88df142007-04-30 16:30:56 +100055
56/*
Benjamin Herrenschmidtf637a492009-05-27 13:44:50 +100057 * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
58 * value (for now) on others, from where we can start layout kernel
59 * virtual space that goes below PKMAP and FIXMAP
60 */
61#ifdef CONFIG_HIGHMEM
62#define KVIRT_TOP PKMAP_BASE
63#else
64#define KVIRT_TOP (0xfe000000UL) /* for now, could be FIXMAP_BASE ? */
65#endif
66
67/*
68 * ioremap_bot starts at that address. Early ioremaps move down from there,
69 * until mem_init() at which point this becomes the top of the vmalloc
70 * and ioremap space
71 */
Benjamin Herrenschmidt8b31e492009-05-27 13:50:33 +100072#ifdef CONFIG_NOT_COHERENT_CACHE
73#define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
74#else
Benjamin Herrenschmidtf637a492009-05-27 13:44:50 +100075#define IOREMAP_TOP KVIRT_TOP
Benjamin Herrenschmidt8b31e492009-05-27 13:50:33 +100076#endif
Benjamin Herrenschmidtf637a492009-05-27 13:44:50 +100077
78/*
David Gibsonf88df142007-04-30 16:30:56 +100079 * Just any arbitrary offset to the start of the vmalloc VM area: the
Benjamin Herrenschmidtf637a492009-05-27 13:44:50 +100080 * current 16MB value just means that there will be a 64MB "hole" after the
David Gibsonf88df142007-04-30 16:30:56 +100081 * physical memory until the kernel virtual memory starts. That means that
82 * any out-of-bounds memory accesses will hopefully be caught.
83 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
84 * area for the same reason. ;)
85 *
86 * We no longer map larger than phys RAM with the BATs so we don't have
87 * to worry about the VMALLOC_OFFSET causing problems. We do have to worry
88 * about clashes between our early calls to ioremap() that start growing down
89 * from ioremap_base being run into the VM area allocations (growing upwards
90 * from VMALLOC_START). For this reason we have ioremap_bot to check when
91 * we actually run into our mappings setup in the early boot with the VM
92 * system. This really does become a problem for machines with good amounts
93 * of RAM. -- Cort
94 */
95#define VMALLOC_OFFSET (0x1000000) /* 16M */
96#ifdef PPC_PIN_SIZE
97#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
98#else
99#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
100#endif
101#define VMALLOC_END ioremap_bot
102
103/*
104 * Bits in a linux-style PTE. These match the bits in the
105 * (hardware-defined) PowerPC PTE as closely as possible.
106 */
107
108#if defined(CONFIG_40x)
Benjamin Herrenschmidtc6057822009-03-10 17:53:29 +0000109#include <asm/pte-40x.h>
David Gibsonf88df142007-04-30 16:30:56 +1000110#elif defined(CONFIG_44x)
Benjamin Herrenschmidtc6057822009-03-10 17:53:29 +0000111#include <asm/pte-44x.h>
Kumar Gala76acc2c2009-09-01 15:48:42 +0000112#elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
113#include <asm/pte-book3e.h>
David Gibsonf88df142007-04-30 16:30:56 +1000114#elif defined(CONFIG_FSL_BOOKE)
Benjamin Herrenschmidtc6057822009-03-10 17:53:29 +0000115#include <asm/pte-fsl-booke.h>
David Gibsonf88df142007-04-30 16:30:56 +1000116#elif defined(CONFIG_8xx)
Benjamin Herrenschmidtc6057822009-03-10 17:53:29 +0000117#include <asm/pte-8xx.h>
David Gibsonf88df142007-04-30 16:30:56 +1000118#else /* CONFIG_6xx */
Benjamin Herrenschmidtc6057822009-03-10 17:53:29 +0000119#include <asm/pte-hash32.h>
Becky Bruce4ee70842008-09-24 11:01:24 -0500120#endif
David Gibsonf88df142007-04-30 16:30:56 +1000121
Benjamin Herrenschmidt71087002009-03-19 19:34:09 +0000122/* And here we include common definitions */
123#include <asm/pte-common.h>
David Gibsonf88df142007-04-30 16:30:56 +1000124
125#ifndef __ASSEMBLY__
David Gibsonf88df142007-04-30 16:30:56 +1000126
Kumar Gala9bf2b5c2008-07-16 15:54:21 -0500127#define pte_clear(mm, addr, ptep) \
128 do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
David Gibsonf88df142007-04-30 16:30:56 +1000129
130#define pmd_none(pmd) (!pmd_val(pmd))
131#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
132#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
133#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
134
David Gibsonf88df142007-04-30 16:30:56 +1000135/*
136 * When flushing the tlb entry for a page, we also need to flush the hash
137 * table entry. flush_hash_pages is assembler (for speed) in hashtable.S.
138 */
139extern int flush_hash_pages(unsigned context, unsigned long va,
140 unsigned long pmdval, int count);
141
142/* Add an HPTE to the hash table */
143extern void add_hash_page(unsigned context, unsigned long va,
144 unsigned long pmdval);
145
Becky Bruce4ee70842008-09-24 11:01:24 -0500146/* Flush an entry from the TLB/hash table */
147extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
148 unsigned long address);
149
David Gibsonf88df142007-04-30 16:30:56 +1000150/*
Benjamin Herrenschmidtc6057822009-03-10 17:53:29 +0000151 * PTE updates. This function is called whenever an existing
152 * valid PTE is updated. This does -not- include set_pte_at()
153 * which nowadays only sets a new PTE.
David Gibsonf88df142007-04-30 16:30:56 +1000154 *
Benjamin Herrenschmidtc6057822009-03-10 17:53:29 +0000155 * Depending on the type of MMU, we may need to use atomic updates
156 * and the PTE may be either 32 or 64 bit wide. In the later case,
157 * when using atomic updates, only the low part of the PTE is
158 * accessed atomically.
159 *
160 * In addition, on 44x, we also maintain a global flag indicating
161 * that an executable user mapping was modified, which is needed
162 * to properly flush the virtually tagged instruction cache of
163 * those implementations.
David Gibsonf88df142007-04-30 16:30:56 +1000164 */
165#ifndef CONFIG_PTE_64BIT
Benjamin Herrenschmidt1bc54c02008-07-08 15:54:40 +1000166static inline unsigned long pte_update(pte_t *p,
167 unsigned long clr,
David Gibsonf88df142007-04-30 16:30:56 +1000168 unsigned long set)
169{
Benjamin Herrenschmidt1bc54c02008-07-08 15:54:40 +1000170#ifdef PTE_ATOMIC_UPDATES
David Gibsonf88df142007-04-30 16:30:56 +1000171 unsigned long old, tmp;
172
173 __asm__ __volatile__("\
1741: lwarx %0,0,%3\n\
175 andc %1,%0,%4\n\
176 or %1,%1,%5\n"
177 PPC405_ERR77(0,%3)
178" stwcx. %1,0,%3\n\
179 bne- 1b"
180 : "=&r" (old), "=&r" (tmp), "=m" (*p)
181 : "r" (p), "r" (clr), "r" (set), "m" (*p)
182 : "cc" );
Benjamin Herrenschmidt1bc54c02008-07-08 15:54:40 +1000183#else /* PTE_ATOMIC_UPDATES */
184 unsigned long old = pte_val(*p);
185 *p = __pte((old & ~clr) | set);
186#endif /* !PTE_ATOMIC_UPDATES */
187
Benjamin Herrenschmidtb98ac05d2007-10-31 16:42:19 +1100188#ifdef CONFIG_44x
Benjamin Herrenschmidtea3cc332009-08-18 19:00:34 +0000189 if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
Benjamin Herrenschmidtb98ac05d2007-10-31 16:42:19 +1100190 icache_44x_need_flush = 1;
191#endif
David Gibsonf88df142007-04-30 16:30:56 +1000192 return old;
193}
Benjamin Herrenschmidt1bc54c02008-07-08 15:54:40 +1000194#else /* CONFIG_PTE_64BIT */
Benjamin Herrenschmidt1bc54c02008-07-08 15:54:40 +1000195static inline unsigned long long pte_update(pte_t *p,
196 unsigned long clr,
197 unsigned long set)
David Gibsonf88df142007-04-30 16:30:56 +1000198{
Benjamin Herrenschmidt1bc54c02008-07-08 15:54:40 +1000199#ifdef PTE_ATOMIC_UPDATES
David Gibsonf88df142007-04-30 16:30:56 +1000200 unsigned long long old;
201 unsigned long tmp;
202
203 __asm__ __volatile__("\
2041: lwarx %L0,0,%4\n\
205 lwzx %0,0,%3\n\
206 andc %1,%L0,%5\n\
207 or %1,%1,%6\n"
208 PPC405_ERR77(0,%3)
209" stwcx. %1,0,%4\n\
210 bne- 1b"
211 : "=&r" (old), "=&r" (tmp), "=m" (*p)
212 : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
213 : "cc" );
Benjamin Herrenschmidt1bc54c02008-07-08 15:54:40 +1000214#else /* PTE_ATOMIC_UPDATES */
215 unsigned long long old = pte_val(*p);
Kumar Gala585583d2008-07-14 08:08:45 -0500216 *p = __pte((old & ~(unsigned long long)clr) | set);
Benjamin Herrenschmidt1bc54c02008-07-08 15:54:40 +1000217#endif /* !PTE_ATOMIC_UPDATES */
218
Benjamin Herrenschmidtb98ac05d2007-10-31 16:42:19 +1100219#ifdef CONFIG_44x
Benjamin Herrenschmidtea3cc332009-08-18 19:00:34 +0000220 if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
Benjamin Herrenschmidtb98ac05d2007-10-31 16:42:19 +1100221 icache_44x_need_flush = 1;
222#endif
David Gibsonf88df142007-04-30 16:30:56 +1000223 return old;
224}
Benjamin Herrenschmidt1bc54c02008-07-08 15:54:40 +1000225#endif /* CONFIG_PTE_64BIT */
David Gibsonf88df142007-04-30 16:30:56 +1000226
227/*
Becky Brucebf2737f2008-06-14 09:12:44 +1000228 * 2.6 calls this without flushing the TLB entry; this is wrong
229 * for our hash-based implementation, we fix that up here.
David Gibsonf88df142007-04-30 16:30:56 +1000230 */
231#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
232static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
233{
234 unsigned long old;
235 old = pte_update(ptep, _PAGE_ACCESSED, 0);
236#if _PAGE_HASHPTE != 0
237 if (old & _PAGE_HASHPTE) {
238 unsigned long ptephys = __pa(ptep) & PAGE_MASK;
239 flush_hash_pages(context, addr, ptephys, 1);
240 }
241#endif
242 return (old & _PAGE_ACCESSED) != 0;
243}
244#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
245 __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
246
David Gibsonf88df142007-04-30 16:30:56 +1000247#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
248static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
249 pte_t *ptep)
250{
251 return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
252}
253
254#define __HAVE_ARCH_PTEP_SET_WRPROTECT
255static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
256 pte_t *ptep)
257{
258 pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);
259}
Andy Whitcroft016b33c2008-06-26 19:55:58 +1000260static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
261 unsigned long addr, pte_t *ptep)
262{
263 ptep_set_wrprotect(mm, addr, ptep);
264}
265
David Gibsonf88df142007-04-30 16:30:56 +1000266
Benjamin Herrenschmidt8d30c142009-02-10 16:02:37 +0000267static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
David Gibsonf88df142007-04-30 16:30:56 +1000268{
269 unsigned long bits = pte_val(entry) &
Benjamin Herrenschmidtea3cc332009-08-18 19:00:34 +0000270 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
David Gibsonf88df142007-04-30 16:30:56 +1000271 pte_update(ptep, 0, bits);
272}
273
David Gibsonf88df142007-04-30 16:30:56 +1000274#define __HAVE_ARCH_PTE_SAME
275#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
276
277/*
278 * Note that on Book E processors, the pmd contains the kernel virtual
279 * (lowmem) address of the pte page. The physical address is less useful
280 * because everything runs with translation enabled (even the TLB miss
281 * handler). On everything else the pmd contains the physical address
282 * of the pte page. -- paulus
283 */
284#ifndef CONFIG_BOOKE
285#define pmd_page_vaddr(pmd) \
286 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
287#define pmd_page(pmd) \
Jason Gunthorpe43b5fef2010-03-09 09:35:00 +0000288 pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
David Gibsonf88df142007-04-30 16:30:56 +1000289#else
290#define pmd_page_vaddr(pmd) \
291 ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
292#define pmd_page(pmd) \
Kumar Galaaf892e02008-04-16 05:52:30 +1000293 pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
David Gibsonf88df142007-04-30 16:30:56 +1000294#endif
295
296/* to find an entry in a kernel page-table-directory */
297#define pgd_offset_k(address) pgd_offset(&init_mm, address)
298
299/* to find an entry in a page-table-directory */
300#define pgd_index(address) ((address) >> PGDIR_SHIFT)
301#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
302
David Gibsonf88df142007-04-30 16:30:56 +1000303/* Find an entry in the third-level page table.. */
304#define pte_index(address) \
305 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
306#define pte_offset_kernel(dir, addr) \
307 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
308#define pte_offset_map(dir, addr) \
Peter Zijlstraece0e2b2010-10-26 14:21:52 -0700309 ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
310#define pte_unmap(pte) kunmap_atomic(pte)
David Gibsonf88df142007-04-30 16:30:56 +1000311
David Gibsonf88df142007-04-30 16:30:56 +1000312/*
313 * Encode and decode a swap entry.
314 * Note that the bits we use in a PTE for representing a swap entry
315 * must not include the _PAGE_PRESENT bit, the _PAGE_FILE bit, or the
316 *_PAGE_HASHPTE bit (if used). -- paulus
317 */
318#define __swp_type(entry) ((entry).val & 0x1f)
319#define __swp_offset(entry) ((entry).val >> 5)
320#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })
321#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
322#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
323
324/* Encode and decode a nonlinear file mapping entry */
325#define PTE_FILE_MAX_BITS 29
326#define pte_to_pgoff(pte) (pte_val(pte) >> 3)
327#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE })
328
David Gibsonf88df142007-04-30 16:30:56 +1000329/*
330 * No page table caches to initialise
331 */
332#define pgtable_cache_init() do { } while (0)
333
334extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
335 pmd_t **pmdp);
336
337#endif /* !__ASSEMBLY__ */
338
339#endif /* _ASM_POWERPC_PGTABLE_PPC32_H */