blob: 75740debcd01e0c46842c6aee002e0d28fb216b7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ASM_M32R_PGTABLE_H
2#define _ASM_M32R_PGTABLE_H
3
4#include <asm-generic/4level-fixup.h>
5
6#ifdef __KERNEL__
7/*
8 * The Linux memory management assumes a three-level page table setup. On
9 * the M32R, we use that, but "fold" the mid level into the top-level page
10 * table, so that we physically have the same two-level page table as the
11 * M32R mmu expects.
12 *
13 * This file contains the functions and defines necessary to modify and use
14 * the M32R page table tree.
15 */
16
17/* CAUTION!: If you change macro definitions in this file, you might have to
18 * change arch/m32r/mmu.S manually.
19 */
20
21#ifndef __ASSEMBLY__
22
23#include <linux/config.h>
24#include <linux/threads.h>
25#include <asm/processor.h>
26#include <asm/addrspace.h>
27#include <asm/bitops.h>
28#include <asm/page.h>
29
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080030struct mm_struct;
31struct vm_area_struct;
32
Linus Torvalds1da177e2005-04-16 15:20:36 -070033extern pgd_t swapper_pg_dir[1024];
34extern void paging_init(void);
35
36/*
37 * ZERO_PAGE is a global shared page that is always zero: used
38 * for zero-mapped memory areas etc..
39 */
40extern unsigned long empty_zero_page[1024];
41#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
42
43#endif /* !__ASSEMBLY__ */
44
45#ifndef __ASSEMBLY__
46#include <asm/pgtable-2level.h>
47#endif
48
49#define pgtable_cache_init() do { } while (0)
50
51#define PMD_SIZE (1UL << PMD_SHIFT)
52#define PMD_MASK (~(PMD_SIZE - 1))
53#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
54#define PGDIR_MASK (~(PGDIR_SIZE - 1))
55
56#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
Hugh Dickinsd455a362005-04-19 13:29:23 -070057#define FIRST_USER_ADDRESS 0
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
59#ifndef __ASSEMBLY__
60/* Just any arbitrary offset to the start of the vmalloc VM area: the
61 * current 8MB value just means that there will be a 8MB "hole" after the
62 * physical memory until the kernel virtual memory starts. That means that
63 * any out-of-bounds memory accesses will hopefully be caught.
64 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
65 * area for the same reason. ;)
66 */
67#define VMALLOC_START KSEG2
68#define VMALLOC_END KSEG3
69
70/*
71 * M32R TLB format
72 *
73 * [0] [1:19] [20:23] [24:31]
74 * +-----------------------+----+-------------+
75 * | VPN |0000| ASID |
76 * +-----------------------+----+-------------+
77 * +-+---------------------+----+-+---+-+-+-+-+
78 * |0 PPN |0000|N|AC |L|G|V| |
79 * +-+---------------------+----+-+---+-+-+-+-+
80 * RWX
81 */
82
83#define _PAGE_BIT_DIRTY 0 /* software: page changed */
84#define _PAGE_BIT_FILE 0 /* when !present: nonlinear file
85 mapping */
86#define _PAGE_BIT_PRESENT 1 /* Valid: page is valid */
87#define _PAGE_BIT_GLOBAL 2 /* Global */
88#define _PAGE_BIT_LARGE 3 /* Large */
89#define _PAGE_BIT_EXEC 4 /* Execute */
90#define _PAGE_BIT_WRITE 5 /* Write */
91#define _PAGE_BIT_READ 6 /* Read */
92#define _PAGE_BIT_NONCACHABLE 7 /* Non cachable */
93#define _PAGE_BIT_ACCESSED 8 /* software: page referenced */
94#define _PAGE_BIT_PROTNONE 9 /* software: if not present */
95
96#define _PAGE_DIRTY (1UL << _PAGE_BIT_DIRTY)
97#define _PAGE_FILE (1UL << _PAGE_BIT_FILE)
98#define _PAGE_PRESENT (1UL << _PAGE_BIT_PRESENT)
99#define _PAGE_GLOBAL (1UL << _PAGE_BIT_GLOBAL)
100#define _PAGE_LARGE (1UL << _PAGE_BIT_LARGE)
101#define _PAGE_EXEC (1UL << _PAGE_BIT_EXEC)
102#define _PAGE_WRITE (1UL << _PAGE_BIT_WRITE)
103#define _PAGE_READ (1UL << _PAGE_BIT_READ)
104#define _PAGE_NONCACHABLE (1UL << _PAGE_BIT_NONCACHABLE)
105#define _PAGE_ACCESSED (1UL << _PAGE_BIT_ACCESSED)
106#define _PAGE_PROTNONE (1UL << _PAGE_BIT_PROTNONE)
107
108#define _PAGE_TABLE \
109 ( _PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | _PAGE_ACCESSED \
110 | _PAGE_DIRTY )
111#define _KERNPG_TABLE \
112 ( _PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | _PAGE_ACCESSED \
113 | _PAGE_DIRTY )
114#define _PAGE_CHG_MASK \
115 ( PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY )
116
117#ifdef CONFIG_MMU
118#define PAGE_NONE \
119 __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
120#define PAGE_SHARED \
121 __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | _PAGE_ACCESSED)
122#define PAGE_SHARED_EXEC \
123 __pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_WRITE | _PAGE_READ \
124 | _PAGE_ACCESSED)
125#define PAGE_COPY \
126 __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_ACCESSED)
127#define PAGE_COPY_EXEC \
128 __pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_ACCESSED)
129#define PAGE_READONLY \
130 __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_ACCESSED)
131#define PAGE_READONLY_EXEC \
132 __pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_ACCESSED)
133
134#define __PAGE_KERNEL \
135 ( _PAGE_PRESENT | _PAGE_EXEC | _PAGE_WRITE | _PAGE_READ | _PAGE_DIRTY \
136 | _PAGE_ACCESSED )
137#define __PAGE_KERNEL_RO ( __PAGE_KERNEL & ~_PAGE_WRITE )
138#define __PAGE_KERNEL_NOCACHE ( __PAGE_KERNEL | _PAGE_NONCACHABLE)
139
140#define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL)
141
142#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
143#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
144#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
145
146#else
147#define PAGE_NONE __pgprot(0)
148#define PAGE_SHARED __pgprot(0)
149#define PAGE_SHARED_EXEC __pgprot(0)
150#define PAGE_COPY __pgprot(0)
151#define PAGE_COPY_EXEC __pgprot(0)
152#define PAGE_READONLY __pgprot(0)
153#define PAGE_READONLY_EXEC __pgprot(0)
154
155#define PAGE_KERNEL __pgprot(0)
156#define PAGE_KERNEL_RO __pgprot(0)
157#define PAGE_KERNEL_NOCACHE __pgprot(0)
158#endif /* CONFIG_MMU */
159
160 /* xwr */
161#define __P000 PAGE_NONE
162#define __P001 PAGE_READONLY
163#define __P010 PAGE_COPY
164#define __P011 PAGE_COPY
165#define __P100 PAGE_READONLY_EXEC
166#define __P101 PAGE_READONLY_EXEC
167#define __P110 PAGE_COPY_EXEC
168#define __P111 PAGE_COPY_EXEC
169
170#define __S000 PAGE_NONE
171#define __S001 PAGE_READONLY
172#define __S010 PAGE_SHARED
173#define __S011 PAGE_SHARED
174#define __S100 PAGE_READONLY_EXEC
175#define __S101 PAGE_READONLY_EXEC
176#define __S110 PAGE_SHARED_EXEC
177#define __S111 PAGE_SHARED_EXEC
178
179/* page table for 0-4MB for everybody */
180
181#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
182#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
183
184#define pmd_none(x) (!pmd_val(x))
185#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
186#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
187#define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK) != _KERNPG_TABLE)
188
189#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))
190
191/*
192 * The following only work if pte_present() is true.
193 * Undefined behaviour if not..
194 */
195static inline int pte_read(pte_t pte)
196{
197 return pte_val(pte) & _PAGE_READ;
198}
199
200static inline int pte_exec(pte_t pte)
201{
202 return pte_val(pte) & _PAGE_EXEC;
203}
204
205static inline int pte_dirty(pte_t pte)
206{
207 return pte_val(pte) & _PAGE_DIRTY;
208}
209
210static inline int pte_young(pte_t pte)
211{
212 return pte_val(pte) & _PAGE_ACCESSED;
213}
214
215static inline int pte_write(pte_t pte)
216{
217 return pte_val(pte) & _PAGE_WRITE;
218}
219
220/*
221 * The following only works if pte_present() is not true.
222 */
223static inline int pte_file(pte_t pte)
224{
225 return pte_val(pte) & _PAGE_FILE;
226}
227
228static inline pte_t pte_rdprotect(pte_t pte)
229{
230 pte_val(pte) &= ~_PAGE_READ;
231 return pte;
232}
233
234static inline pte_t pte_exprotect(pte_t pte)
235{
236 pte_val(pte) &= ~_PAGE_EXEC;
237 return pte;
238}
239
240static inline pte_t pte_mkclean(pte_t pte)
241{
242 pte_val(pte) &= ~_PAGE_DIRTY;
243 return pte;
244}
245
246static inline pte_t pte_mkold(pte_t pte)
247{
248 pte_val(pte) &= ~_PAGE_ACCESSED;
249 return pte;
250}
251
252static inline pte_t pte_wrprotect(pte_t pte)
253{
254 pte_val(pte) &= ~_PAGE_WRITE;
255 return pte;
256}
257
258static inline pte_t pte_mkread(pte_t pte)
259{
260 pte_val(pte) |= _PAGE_READ;
261 return pte;
262}
263
264static inline pte_t pte_mkexec(pte_t pte)
265{
266 pte_val(pte) |= _PAGE_EXEC;
267 return pte;
268}
269
270static inline pte_t pte_mkdirty(pte_t pte)
271{
272 pte_val(pte) |= _PAGE_DIRTY;
273 return pte;
274}
275
276static inline pte_t pte_mkyoung(pte_t pte)
277{
278 pte_val(pte) |= _PAGE_ACCESSED;
279 return pte;
280}
281
282static inline pte_t pte_mkwrite(pte_t pte)
283{
284 pte_val(pte) |= _PAGE_WRITE;
285 return pte;
286}
287
288static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
289{
290 return test_and_clear_bit(_PAGE_BIT_DIRTY, ptep);
291}
292
293static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
294{
295 return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep);
296}
297
298static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
299{
300 clear_bit(_PAGE_BIT_WRITE, ptep);
301}
302
303/*
304 * Macro and implementation to make a page protection as uncachable.
305 */
306static inline pgprot_t pgprot_noncached(pgprot_t _prot)
307{
308 unsigned long prot = pgprot_val(_prot);
309
310 prot |= _PAGE_NONCACHABLE;
311 return __pgprot(prot);
312}
313
314#define pgprot_writecombine(prot) pgprot_noncached(prot)
315
316/*
317 * Conversion functions: convert a page and protection to a page entry,
318 * and a page entry and page directory to the page they refer to.
319 */
320#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), pgprot)
321
322static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
323{
324 set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) \
325 | pgprot_val(newprot)));
326
327 return pte;
328}
329
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330/*
331 * Conversion functions: convert a page and protection to a page entry,
332 * and a page entry and page directory to the page they refer to.
333 */
334
335static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
336{
337 pmd_val(*pmdp) = (((unsigned long) ptep) & PAGE_MASK);
338}
339
340#define pmd_page_kernel(pmd) \
341 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
342
343#ifndef CONFIG_DISCONTIGMEM
344#define pmd_page(pmd) (mem_map + ((pmd_val(pmd) >> PAGE_SHIFT) - PFN_BASE))
345#endif /* !CONFIG_DISCONTIGMEM */
346
347/* to find an entry in a page-table-directory. */
348#define pgd_index(address) \
349 (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
350
351#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
352
353/* to find an entry in a kernel page-table-directory */
354#define pgd_offset_k(address) pgd_offset(&init_mm, address)
355
356#define pmd_index(address) \
357 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
358
359#define pte_index(address) \
360 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
361#define pte_offset_kernel(dir, address) \
362 ((pte_t *)pmd_page_kernel(*(dir)) + pte_index(address))
363#define pte_offset_map(dir, address) \
364 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
365#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
366#define pte_unmap(pte) do { } while (0)
367#define pte_unmap_nested(pte) do { } while (0)
368
369/* Encode and de-code a swap entry */
370#define __swp_type(x) (((x).val >> 2) & 0x3f)
371#define __swp_offset(x) ((x).val >> 10)
372#define __swp_entry(type, offset) \
373 ((swp_entry_t) { ((type) << 2) | ((offset) << 10) })
374#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
375#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
376
377#endif /* !__ASSEMBLY__ */
378
379/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
380#define kern_addr_valid(addr) (1)
381
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
383 remap_pfn_range(vma, vaddr, pfn, size, prot)
384
385#define MK_IOSPACE_PFN(space, pfn) (pfn)
386#define GET_IOSPACE(pfn) 0
387#define GET_PFN(pfn) (pfn)
388
389#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
390#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
391#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
392#define __HAVE_ARCH_PTEP_SET_WRPROTECT
393#define __HAVE_ARCH_PTE_SAME
394#include <asm-generic/pgtable.h>
395
396#endif /* __KERNEL__ */
397
398#endif /* _ASM_M32R_PGTABLE_H */