blob: 8ade1840c6f232f29421517687774eba6b075900 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Russell King4baa9922008-08-02 10:55:55 +01002 * arch/arm/include/asm/pgtable.h
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 1995-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef _ASMARM_PGTABLE_H
11#define _ASMARM_PGTABLE_H
12
Russell Kingf6e33542010-11-16 00:22:09 +000013#include <linux/const.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <asm-generic/4level-fixup.h>
Russell King002547b2006-06-20 20:46:52 +010015#include <asm/proc-fns.h>
16
17#ifndef CONFIG_MMU
18
19#include "pgtable-nommu.h"
20
21#else
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
23#include <asm/memory.h>
Russell Kinga09e64f2008-08-05 16:14:15 +010024#include <mach/vmalloc.h>
Russell Kingad1ae2f2006-12-13 14:34:43 +000025#include <asm/pgtable-hwdef.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
Catalin Marinas17f57212011-09-05 17:41:02 +010027#include <asm/pgtable-2level.h>
28
Linus Torvalds1da177e2005-04-16 15:20:36 -070029/*
Russell King5c3073e2005-05-03 12:20:29 +010030 * Just any arbitrary offset to the start of the vmalloc VM area: the
31 * current 8MB value just means that there will be a 8MB "hole" after the
32 * physical memory until the kernel virtual memory starts. That means that
33 * any out-of-bounds memory accesses will hopefully be caught.
34 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
35 * area for the same reason. ;)
36 *
37 * Note that platforms may override VMALLOC_START, but they must provide
38 * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space,
39 * which may not overlap IO space.
40 */
41#ifndef VMALLOC_START
42#define VMALLOC_OFFSET (8*1024*1024)
43#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
44#endif
45
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#define LIBRARY_TEXT_START 0x0c000000
47
48#ifndef __ASSEMBLY__
Russell King69529c02010-11-16 00:19:55 +000049extern void __pte_error(const char *file, int line, pte_t);
50extern void __pmd_error(const char *file, int line, pmd_t);
51extern void __pgd_error(const char *file, int line, pgd_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Russell King69529c02010-11-16 00:19:55 +000053#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte)
54#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
55#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Hugh Dickins6119be02005-04-19 13:29:21 -070057/*
58 * This is the lowest virtual address we can permit any user space
59 * mapping to be mapped at. This is particularly important for
60 * non-high vector CPUs.
61 */
62#define FIRST_USER_ADDRESS PAGE_SIZE
63
Linus Torvalds1da177e2005-04-16 15:20:36 -070064/*
Imre_Deak44b18692007-02-11 13:45:13 +010065 * The pgprot_* and protection_map entries will be fixed up in runtime
66 * to include the cachable and bufferable bits based on memory policy,
67 * as well as any architecture dependent bits like global/ASID and SMP
68 * shared mapping bits.
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 */
Russell Kingbb30f362008-09-06 20:04:59 +010070#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Imre_Deak44b18692007-02-11 13:45:13 +010072extern pgprot_t pgprot_user;
Linus Torvalds1da177e2005-04-16 15:20:36 -070073extern pgprot_t pgprot_kernel;
74
Russell King8ec53662008-09-07 17:16:54 +010075#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Russell King36bb94b2010-11-16 08:40:36 +000077#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY)
78#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
79#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER)
80#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
81#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
82#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
83#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
Russell King9522d7e2010-11-16 00:23:31 +000084#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
85#define PAGE_KERNEL_EXEC pgprot_kernel
Russell King8ec53662008-09-07 17:16:54 +010086
Russell King36bb94b2010-11-16 08:40:36 +000087#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN)
88#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
89#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
90#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
91#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
92#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
93#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
Imre_Deak44b18692007-02-11 13:45:13 +010094
Russell Kingeb9b2b62010-11-26 17:39:28 +000095#define __pgprot_modify(prot,mask,bits) \
96 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
97
98#define pgprot_noncached(prot) \
99 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
100
101#define pgprot_writecombine(prot) \
102 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
103
104#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
105#define pgprot_dmacoherent(prot) \
Russell King9522d7e2010-11-16 00:23:31 +0000106 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
Russell Kingeb9b2b62010-11-26 17:39:28 +0000107#define __HAVE_PHYS_MEM_ACCESS_PROT
108struct file;
109extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
110 unsigned long size, pgprot_t vma_prot);
111#else
112#define pgprot_dmacoherent(prot) \
Russell King9522d7e2010-11-16 00:23:31 +0000113 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
Russell Kingeb9b2b62010-11-26 17:39:28 +0000114#endif
115
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116#endif /* __ASSEMBLY__ */
117
118/*
119 * The table below defines the page protection levels that we insert into our
120 * Linux page table version. These get translated into the best that the
121 * architecture can perform. Note that on most ARM hardware:
122 * 1) We cannot do execute protection
123 * 2) If we could do execute protection, then read is implied
124 * 3) write implies read permissions
125 */
Imre_Deak44b18692007-02-11 13:45:13 +0100126#define __P000 __PAGE_NONE
127#define __P001 __PAGE_READONLY
128#define __P010 __PAGE_COPY
129#define __P011 __PAGE_COPY
Russell King8ec53662008-09-07 17:16:54 +0100130#define __P100 __PAGE_READONLY_EXEC
131#define __P101 __PAGE_READONLY_EXEC
132#define __P110 __PAGE_COPY_EXEC
133#define __P111 __PAGE_COPY_EXEC
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
Imre_Deak44b18692007-02-11 13:45:13 +0100135#define __S000 __PAGE_NONE
136#define __S001 __PAGE_READONLY
137#define __S010 __PAGE_SHARED
138#define __S011 __PAGE_SHARED
Russell King8ec53662008-09-07 17:16:54 +0100139#define __S100 __PAGE_READONLY_EXEC
140#define __S101 __PAGE_READONLY_EXEC
141#define __S110 __PAGE_SHARED_EXEC
142#define __S111 __PAGE_SHARED_EXEC
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
144#ifndef __ASSEMBLY__
145/*
146 * ZERO_PAGE is a global shared page that is always zero: used
147 * for zero-mapped memory areas etc..
148 */
149extern struct page *empty_zero_page;
150#define ZERO_PAGE(vaddr) (empty_zero_page)
151
Russell King4eec4b12010-11-26 20:12:12 +0000152
153extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
154
155/* to find an entry in a page-table-directory */
156#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
157
158#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
159
160/* to find an entry in a kernel page-table-directory */
161#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
162
163/*
164 * The "pgd_xxx()" functions here are trivial for a folded two-level
165 * setup: the pgd is never bad, and a pmd always exists (as it's folded
166 * into the pgd entry)
167 */
168#define pgd_none(pgd) (0)
169#define pgd_bad(pgd) (0)
170#define pgd_present(pgd) (1)
171#define pgd_clear(pgdp) do { } while (0)
172#define set_pgd(pgd,pgdp) do { } while (0)
Russell King516295e2010-11-21 16:27:49 +0000173#define set_pud(pud,pudp) do { } while (0)
Russell King4eec4b12010-11-26 20:12:12 +0000174
175
Russell Kingb510b0492010-11-26 20:35:25 +0000176/* Find an entry in the second-level page table.. */
177#define pmd_offset(dir, addr) ((pmd_t *)(dir))
Nick Piggin7e675132008-04-28 02:13:00 -0700178
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179#define pmd_none(pmd) (!pmd_val(pmd))
180#define pmd_present(pmd) (pmd_val(pmd))
181#define pmd_bad(pmd) (pmd_val(pmd) & 2)
182
183#define copy_pmd(pmdpd,pmdps) \
184 do { \
185 pmdpd[0] = pmdps[0]; \
186 pmdpd[1] = pmdps[1]; \
187 flush_pmd_entry(pmdpd); \
188 } while (0)
189
190#define pmd_clear(pmdp) \
191 do { \
192 pmdp[0] = __pmd(0); \
193 pmdp[1] = __pmd(0); \
194 clean_pmd_entry(pmdp); \
195 } while (0)
196
Dave McCracken46a82b22006-09-25 23:31:48 -0700197static inline pte_t *pmd_page_vaddr(pmd_t pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198{
Catalin Marinasd7c5d0d2011-09-05 17:52:36 +0100199 return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200}
201
Catalin Marinasd7c5d0d2011-09-05 17:52:36 +0100202#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
Russell Kingc0ba10b2010-11-21 14:42:47 +0000204/* we don't need complex calculations here as the pmd is folded into the pgd */
205#define pmd_addr_end(addr,end) (end)
206
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
Russell Kingb510b0492010-11-26 20:35:25 +0000208#ifndef CONFIG_HIGHPTE
209#define __pte_map(pmd) pmd_page_vaddr(*(pmd))
210#define __pte_unmap(pte) do { } while (0)
211#else
Russell Kingd30e45e2010-11-16 00:16:01 +0000212#define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd)))
213#define __pte_unmap(pte) kunmap_atomic(pte)
Russell Kingb510b0492010-11-26 20:35:25 +0000214#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215
Russell Kingb510b0492010-11-26 20:35:25 +0000216#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
217
218#define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr))
219
220#define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr))
221#define pte_unmap(pte) __pte_unmap(pte)
222
Catalin Marinasd7c5d0d2011-09-05 17:52:36 +0100223#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
Will Deaconcae62922011-02-15 12:42:57 +0100224#define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
Russell Kingb510b0492010-11-26 20:35:25 +0000225
226#define pte_page(pte) pfn_to_page(pte_pfn(pte))
227#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot)
228
229#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
230#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
231
232#if __LINUX_ARM_ARCH__ < 6
233static inline void __sync_icache_dcache(pte_t pteval)
234{
235}
236#else
237extern void __sync_icache_dcache(pte_t pteval);
238#endif
239
240static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
241 pte_t *ptep, pte_t pteval)
242{
243 if (addr >= TASK_SIZE)
244 set_pte_ext(ptep, pteval, 0);
245 else {
246 __sync_icache_dcache(pteval);
247 set_pte_ext(ptep, pteval, PTE_EXT_NG);
248 }
249}
250
251#define pte_none(pte) (!pte_val(pte))
252#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
Russell King36bb94b2010-11-16 08:40:36 +0000253#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
Russell Kingb510b0492010-11-26 20:35:25 +0000254#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
255#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
Russell King9522d7e2010-11-16 00:23:31 +0000256#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
Russell Kingb510b0492010-11-26 20:35:25 +0000257#define pte_special(pte) (0)
258
259#define pte_present_user(pte) \
260 ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
261 (L_PTE_PRESENT | L_PTE_USER))
262
263#define PTE_BIT_FUNC(fn,op) \
264static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
265
Russell King36bb94b2010-11-16 08:40:36 +0000266PTE_BIT_FUNC(wrprotect, |= L_PTE_RDONLY);
267PTE_BIT_FUNC(mkwrite, &= ~L_PTE_RDONLY);
Russell Kingb510b0492010-11-26 20:35:25 +0000268PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY);
269PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY);
270PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG);
271PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
272
273static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274
275static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
276{
Russell King36bb94b2010-11-16 08:40:36 +0000277 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
279 return pte;
280}
281
Russell Kingfb93a1c2009-07-05 11:30:15 +0100282/*
283 * Encode and decode a swap entry. Swap entries are stored in the Linux
284 * page tables as follows:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 *
Russell Kingfb93a1c2009-07-05 11:30:15 +0100286 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
287 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
Russell King6a00cde2009-07-11 16:57:20 +0100288 * <--------------- offset --------------------> <- type --> 0 0 0
Russell Kingfb93a1c2009-07-05 11:30:15 +0100289 *
Russell King6a00cde2009-07-11 16:57:20 +0100290 * This gives us up to 63 swap files and 32GB per swap file. Note that
Russell Kingfb93a1c2009-07-05 11:30:15 +0100291 * the offset field is always non-zero.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 */
Russell King6a00cde2009-07-11 16:57:20 +0100293#define __SWP_TYPE_SHIFT 3
294#define __SWP_TYPE_BITS 6
Russell Kingfb93a1c2009-07-05 11:30:15 +0100295#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
296#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
297
298#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
299#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
300#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
301
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
303#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
304
Russell Kingfb93a1c2009-07-05 11:30:15 +0100305/*
306 * It is an error for the kernel to have more swap files than we can
307 * encode in the PTEs. This ensures that we know when MAX_SWAPFILES
308 * is increased beyond what we presently support.
309 */
310#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
311
Russell King65b1bfc2009-07-05 11:52:21 +0100312/*
313 * Encode and decode a file entry. File entries are stored in the Linux
314 * page tables as follows:
315 *
316 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
317 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
Russell King6a00cde2009-07-11 16:57:20 +0100318 * <----------------------- offset ------------------------> 1 0 0
Russell King65b1bfc2009-07-05 11:52:21 +0100319 */
320#define pte_file(pte) (pte_val(pte) & L_PTE_FILE)
Russell King6a00cde2009-07-11 16:57:20 +0100321#define pte_to_pgoff(x) (pte_val(x) >> 3)
322#define pgoff_to_pte(x) __pte(((x) << 3) | L_PTE_FILE)
Russell King65b1bfc2009-07-05 11:52:21 +0100323
Russell King6a00cde2009-07-11 16:57:20 +0100324#define PTE_FILE_MAX_BITS 29
Russell King65b1bfc2009-07-05 11:52:21 +0100325
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
327/* FIXME: this is not correct */
328#define kern_addr_valid(addr) (1)
329
330#include <asm-generic/pgtable.h>
331
332/*
333 * We provide our own arch_get_unmapped_area to cope with VIPT caches.
334 */
335#define HAVE_ARCH_UNMAPPED_AREA
336
337/*
Randy Dunlap33bf5612005-09-13 01:25:50 -0700338 * remap a physical page `pfn' of size `size' with page protection `prot'
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 * into virtual address `from'
340 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341#define io_remap_pfn_range(vma,from,pfn,size,prot) \
342 remap_pfn_range(vma, from, pfn, size, prot)
343
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344#define pgtable_cache_init() do { } while (0)
345
Russell King614dd052010-11-21 11:41:57 +0000346void identity_mapping_add(pgd_t *, unsigned long, unsigned long);
347void identity_mapping_del(pgd_t *, unsigned long, unsigned long);
348
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349#endif /* !__ASSEMBLY__ */
350
Russell King002547b2006-06-20 20:46:52 +0100351#endif /* CONFIG_MMU */
352
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353#endif /* _ASMARM_PGTABLE_H */