blob: 4e219046fe422910b72619c51245f3798ac6ea80 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* pgtable.h: FR-V page table mangling
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Derived from:
12 * include/asm-m68knommu/pgtable.h
13 * include/asm-i386/pgtable.h
14 */
15
16#ifndef _ASM_PGTABLE_H
17#define _ASM_PGTABLE_H
18
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <asm/mem-layout.h>
20#include <asm/setup.h>
21#include <asm/processor.h>
22
23#ifndef __ASSEMBLY__
24#include <linux/threads.h>
25#include <linux/slab.h>
26#include <linux/list.h>
27#include <linux/spinlock.h>
Alexey Dobriyan95203ae2007-07-01 12:06:36 -070028#include <linux/sched.h>
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080029struct vm_area_struct;
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#endif
31
32#ifndef __ASSEMBLY__
33#if defined(CONFIG_HIGHPTE)
34typedef unsigned long pte_addr_t;
35#else
36typedef pte_t *pte_addr_t;
37#endif
38#endif
39
40/*****************************************************************************/
41/*
42 * MMU-less operation case first
43 */
44#ifndef CONFIG_MMU
45
46#define pgd_present(pgd) (1) /* pages are always present on NO_MM */
47#define pgd_none(pgd) (0)
48#define pgd_bad(pgd) (0)
49#define pgd_clear(pgdp)
50#define kern_addr_valid(addr) (1)
51#define pmd_offset(a, b) ((void *) 0)
52
53#define PAGE_NONE __pgprot(0) /* these mean nothing to NO_MM */
54#define PAGE_SHARED __pgprot(0) /* these mean nothing to NO_MM */
55#define PAGE_COPY __pgprot(0) /* these mean nothing to NO_MM */
56#define PAGE_READONLY __pgprot(0) /* these mean nothing to NO_MM */
57#define PAGE_KERNEL __pgprot(0) /* these mean nothing to NO_MM */
58
59#define __swp_type(x) (0)
60#define __swp_offset(x) (0)
61#define __swp_entry(typ,off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
62#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
63#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
64
65#ifndef __ASSEMBLY__
66static inline int pte_file(pte_t pte) { return 0; }
67#endif
68
69#define ZERO_PAGE(vaddr) ({ BUG(); NULL; })
70
71#define swapper_pg_dir ((pgd_t *) NULL)
72
David Howells28936112007-02-28 20:12:03 -080073#define pgtable_cache_init() do {} while (0)
74#define arch_enter_lazy_mmu_mode() do {} while (0)
75#define arch_leave_lazy_mmu_mode() do {} while (0)
76#define arch_enter_lazy_cpu_mode() do {} while (0)
77#define arch_leave_lazy_cpu_mode() do {} while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
79#else /* !CONFIG_MMU */
80/*****************************************************************************/
81/*
82 * then MMU operation
83 */
84
85/*
86 * ZERO_PAGE is a global shared page that is always zero: used
87 * for zero-mapped memory areas etc..
88 */
89#ifndef __ASSEMBLY__
90extern unsigned long empty_zero_page;
91#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
92#endif
93
94/*
95 * we use 2-level page tables, folding the PMD (mid-level table) into the PGE (top-level entry)
Adrian Bunk0868ff72008-02-03 15:54:28 +020096 * [see Documentation/frv/mmu-layout.txt]
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 *
98 * Page Directory:
99 * - Size: 16KB
100 * - 64 PGEs per PGD
101 * - Each PGE holds 1 PUD and covers 64MB
102 *
103 * Page Upper Directory:
104 * - Size: 256B
105 * - 1 PUE per PUD
106 * - Each PUE holds 1 PMD and covers 64MB
107 *
108 * Page Mid-Level Directory
109 * - Size: 256B
110 * - 1 PME per PMD
111 * - Each PME holds 64 STEs, all of which point to separate chunks of the same Page Table
112 * - All STEs are instantiated at the same time
113 *
114 * Page Table
115 * - Size: 16KB
116 * - 4096 PTEs per PT
117 * - Each Linux PT is subdivided into 64 FR451 PT's, each of which holds 64 entries
118 *
119 * Pages
120 * - Size: 4KB
121 *
122 * total PTEs
123 * = 1 PML4E * 64 PGEs * 1 PUEs * 1 PMEs * 4096 PTEs
124 * = 1 PML4E * 64 PGEs * 64 STEs * 64 PTEs/FR451-PT
125 * = 262144 (or 256 * 1024)
126 */
127#define PGDIR_SHIFT 26
128#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
129#define PGDIR_MASK (~(PGDIR_SIZE - 1))
130#define PTRS_PER_PGD 64
131
132#define PUD_SHIFT 26
133#define PTRS_PER_PUD 1
134#define PUD_SIZE (1UL << PUD_SHIFT)
135#define PUD_MASK (~(PUD_SIZE - 1))
136#define PUE_SIZE 256
137
138#define PMD_SHIFT 26
139#define PMD_SIZE (1UL << PMD_SHIFT)
140#define PMD_MASK (~(PMD_SIZE - 1))
141#define PTRS_PER_PMD 1
142#define PME_SIZE 256
143
144#define __frv_PT_SIZE 256
145
146#define PTRS_PER_PTE 4096
147
148#define USER_PGDS_IN_LAST_PML4 (TASK_SIZE / PGDIR_SIZE)
Hugh Dickinsd455a362005-04-19 13:29:23 -0700149#define FIRST_USER_ADDRESS 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150
151#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
152#define KERNEL_PGD_PTRS (PTRS_PER_PGD - USER_PGD_PTRS)
153
154#define TWOLEVEL_PGDIR_SHIFT 26
155#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
156#define BOOT_KERNEL_PGD_PTRS (PTRS_PER_PGD - BOOT_USER_PGD_PTRS)
157
158#ifndef __ASSEMBLY__
159
160extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
161
162#define pte_ERROR(e) \
163 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte)
164#define pmd_ERROR(e) \
165 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
166#define pud_ERROR(e) \
167 printk("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pmd_val(pud_val(e)))
168#define pgd_ERROR(e) \
169 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pmd_val(pud_val(pgd_val(e))))
170
171/*
172 * Certain architectures need to do special things when PTEs
173 * within a page table are directly modified. Thus, the following
174 * hook is made available.
175 */
176#define set_pte(pteptr, pteval) \
177do { \
178 *(pteptr) = (pteval); \
179 asm volatile("dcf %M0" :: "U"(*pteptr)); \
180} while(0)
181#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
182
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183/*
184 * pgd_offset() returns a (pgd_t *)
185 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
186 */
187#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
188
189/*
190 * a shortcut which implies the use of the kernel's pgd, instead
191 * of a process's
192 */
193#define pgd_offset_k(address) pgd_offset(&init_mm, address)
194
195/*
196 * The "pgd_xxx()" functions here are trivial for a folded two-level
197 * setup: the pud is never bad, and a pud always exists (as it's folded
198 * into the pgd entry)
199 */
200static inline int pgd_none(pgd_t pgd) { return 0; }
201static inline int pgd_bad(pgd_t pgd) { return 0; }
202static inline int pgd_present(pgd_t pgd) { return 1; }
203static inline void pgd_clear(pgd_t *pgd) { }
204
205#define pgd_populate(mm, pgd, pud) do { } while (0)
206/*
207 * (puds are folded into pgds so this doesn't get actually called,
208 * but the define is needed for a generic inline function.)
209 */
210#define set_pgd(pgdptr, pgdval) \
211do { \
212 memcpy((pgdptr), &(pgdval), sizeof(pgd_t)); \
213 asm volatile("dcf %M0" :: "U"(*(pgdptr))); \
214} while(0)
215
216static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
217{
218 return (pud_t *) pgd;
219}
220
221#define pgd_page(pgd) (pud_page((pud_t){ pgd }))
Dave McCracken46a82b22006-09-25 23:31:48 -0700222#define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd }))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224/*
225 * allocating and freeing a pud is trivial: the 1-entry pud is
226 * inside the pgd, so has no extra memory associated with it.
227 */
228#define pud_alloc_one(mm, address) NULL
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -0800229#define pud_free(mm, x) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230#define __pud_free_tlb(tlb, x) do { } while (0)
231
232/*
233 * The "pud_xxx()" functions here are trivial for a folded two-level
234 * setup: the pmd is never bad, and a pmd always exists (as it's folded
235 * into the pud entry)
236 */
237static inline int pud_none(pud_t pud) { return 0; }
238static inline int pud_bad(pud_t pud) { return 0; }
239static inline int pud_present(pud_t pud) { return 1; }
240static inline void pud_clear(pud_t *pud) { }
241
242#define pud_populate(mm, pmd, pte) do { } while (0)
243
244/*
245 * (pmds are folded into puds so this doesn't get actually called,
246 * but the define is needed for a generic inline function.)
247 */
248#define set_pud(pudptr, pudval) set_pmd((pmd_t *)(pudptr), (pmd_t) { pudval })
249
250#define pud_page(pud) (pmd_page((pmd_t){ pud }))
Dave McCracken46a82b22006-09-25 23:31:48 -0700251#define pud_page_vaddr(pud) (pmd_page_vaddr((pmd_t){ pud }))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
253/*
254 * (pmds are folded into pgds so this doesn't get actually called,
255 * but the define is needed for a generic inline function.)
256 */
257extern void __set_pmd(pmd_t *pmdptr, unsigned long __pmd);
258
259#define set_pmd(pmdptr, pmdval) \
260do { \
261 __set_pmd((pmdptr), (pmdval).ste[0]); \
262} while(0)
263
264#define __pmd_index(address) 0
265
266static inline pmd_t *pmd_offset(pud_t *dir, unsigned long address)
267{
268 return (pmd_t *) dir + __pmd_index(address);
269}
270
271#define pte_same(a, b) ((a).pte == (b).pte)
272#define pte_page(x) (mem_map + ((unsigned long)(((x).pte >> PAGE_SHIFT))))
273#define pte_none(x) (!(x).pte)
274#define pte_pfn(x) ((unsigned long)(((x).pte >> PAGE_SHIFT)))
275#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
276#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
277
278#define VMALLOC_VMADDR(x) ((unsigned long) (x))
279
280#endif /* !__ASSEMBLY__ */
281
282/*
283 * control flags in AMPR registers and TLB entries
284 */
285#define _PAGE_BIT_PRESENT xAMPRx_V_BIT
286#define _PAGE_BIT_WP DAMPRx_WP_BIT
287#define _PAGE_BIT_NOCACHE xAMPRx_C_BIT
288#define _PAGE_BIT_SUPER xAMPRx_S_BIT
289#define _PAGE_BIT_ACCESSED xAMPRx_RESERVED8_BIT
290#define _PAGE_BIT_DIRTY xAMPRx_M_BIT
291#define _PAGE_BIT_NOTGLOBAL xAMPRx_NG_BIT
292
293#define _PAGE_PRESENT xAMPRx_V
294#define _PAGE_WP DAMPRx_WP
295#define _PAGE_NOCACHE xAMPRx_C
296#define _PAGE_SUPER xAMPRx_S
297#define _PAGE_ACCESSED xAMPRx_RESERVED8 /* accessed if set */
298#define _PAGE_DIRTY xAMPRx_M
299#define _PAGE_NOTGLOBAL xAMPRx_NG
300
301#define _PAGE_RESERVED_MASK (xAMPRx_RESERVED8 | xAMPRx_RESERVED13)
302
303#define _PAGE_FILE 0x002 /* set:pagecache unset:swap */
304#define _PAGE_PROTNONE 0x000 /* If not present */
305
306#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
307
308#define __PGPROT_BASE \
309 (_PAGE_PRESENT | xAMPRx_SS_16Kb | xAMPRx_D | _PAGE_NOTGLOBAL | _PAGE_ACCESSED)
310
311#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
312#define PAGE_SHARED __pgprot(__PGPROT_BASE)
313#define PAGE_COPY __pgprot(__PGPROT_BASE | _PAGE_WP)
314#define PAGE_READONLY __pgprot(__PGPROT_BASE | _PAGE_WP)
315
316#define __PAGE_KERNEL (__PGPROT_BASE | _PAGE_SUPER | _PAGE_DIRTY)
317#define __PAGE_KERNEL_NOCACHE (__PGPROT_BASE | _PAGE_SUPER | _PAGE_DIRTY | _PAGE_NOCACHE)
318#define __PAGE_KERNEL_RO (__PGPROT_BASE | _PAGE_SUPER | _PAGE_DIRTY | _PAGE_WP)
319
320#define MAKE_GLOBAL(x) __pgprot((x) & ~_PAGE_NOTGLOBAL)
321
322#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
323#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
324#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
325
326#define _PAGE_TABLE (_PAGE_PRESENT | xAMPRx_SS_16Kb)
327
328#ifndef __ASSEMBLY__
329
330/*
331 * The FR451 can do execute protection by virtue of having separate TLB miss handlers for
332 * instruction access and for data access. However, we don't have enough reserved bits to say
333 * "execute only", so we don't bother. If you can read it, you can execute it and vice versa.
334 */
335#define __P000 PAGE_NONE
336#define __P001 PAGE_READONLY
337#define __P010 PAGE_COPY
338#define __P011 PAGE_COPY
339#define __P100 PAGE_READONLY
340#define __P101 PAGE_READONLY
341#define __P110 PAGE_COPY
342#define __P111 PAGE_COPY
343
344#define __S000 PAGE_NONE
345#define __S001 PAGE_READONLY
346#define __S010 PAGE_SHARED
347#define __S011 PAGE_SHARED
348#define __S100 PAGE_READONLY
349#define __S101 PAGE_READONLY
350#define __S110 PAGE_SHARED
351#define __S111 PAGE_SHARED
352
353/*
354 * Define this to warn about kernel memory accesses that are
Jesper Juhle49332b2005-05-01 08:59:08 -0700355 * done without a 'access_ok(VERIFY_WRITE,..)'
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 */
Jesper Juhle49332b2005-05-01 08:59:08 -0700357#undef TEST_ACCESS_OK
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358
359#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
360#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
361
362#define pmd_none(x) (!pmd_val(x))
363#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
364#define pmd_bad(x) (pmd_val(x) & xAMPRx_SS)
365#define pmd_clear(xp) do { __set_pmd(xp, 0); } while(0)
366
Dave McCracken46a82b22006-09-25 23:31:48 -0700367#define pmd_page_vaddr(pmd) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
369
370#ifndef CONFIG_DISCONTIGMEM
371#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
372#endif
373
374#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
375
376/*
377 * The following only work if pte_present() is true.
378 * Undefined behaviour if not..
379 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380static inline int pte_dirty(pte_t pte) { return (pte).pte & _PAGE_DIRTY; }
381static inline int pte_young(pte_t pte) { return (pte).pte & _PAGE_ACCESSED; }
382static inline int pte_write(pte_t pte) { return !((pte).pte & _PAGE_WP); }
383
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384static inline pte_t pte_mkclean(pte_t pte) { (pte).pte &= ~_PAGE_DIRTY; return pte; }
385static inline pte_t pte_mkold(pte_t pte) { (pte).pte &= ~_PAGE_ACCESSED; return pte; }
386static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte |= _PAGE_WP; return pte; }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte |= _PAGE_DIRTY; return pte; }
388static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte |= _PAGE_ACCESSED; return pte; }
389static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte &= ~_PAGE_WP; return pte; }
390
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
392{
393 int i = test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep);
394 asm volatile("dcf %M0" :: "U"(*ptep));
395 return i;
396}
397
398static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
399{
400 unsigned long x = xchg(&ptep->pte, 0);
401 asm volatile("dcf %M0" :: "U"(*ptep));
402 return __pte(x);
403}
404
405static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
406{
407 set_bit(_PAGE_BIT_WP, ptep);
408 asm volatile("dcf %M0" :: "U"(*ptep));
409}
410
411/*
David Howells41be6ae2006-01-08 01:01:25 -0800412 * Macro to mark a page protection value as "uncacheable"
413 */
414#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NOCACHE))
415
416/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 * Conversion functions: convert a page and protection to a page entry,
418 * and a page entry and page directory to the page they refer to.
419 */
420
421#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
422#define mk_pte_huge(entry) ((entry).pte_low |= _PAGE_PRESENT | _PAGE_PSE)
423
424/* This takes a physical page address that is used by the remapping functions */
425#define mk_pte_phys(physpage, pgprot) pfn_pte((physpage) >> PAGE_SHIFT, pgprot)
426
427static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
428{
429 pte.pte &= _PAGE_CHG_MASK;
430 pte.pte |= pgprot_val(newprot);
431 return pte;
432}
433
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434/* to find an entry in a page-table-directory. */
435#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
436#define pgd_index_k(addr) pgd_index(addr)
437
438/* Find an entry in the bottom-level page table.. */
439#define __pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
440
441/*
442 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
443 *
444 * this macro returns the index of the entry in the pte page which would
445 * control the given virtual address
446 */
447#define pte_index(address) \
448 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
449#define pte_offset_kernel(dir, address) \
Dave McCracken46a82b22006-09-25 23:31:48 -0700450 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451
452#if defined(CONFIG_HIGHPTE)
453#define pte_offset_map(dir, address) \
454 ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
455#define pte_offset_map_nested(dir, address) \
456 ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
457#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
458#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
459#else
460#define pte_offset_map(dir, address) \
461 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
462#define pte_offset_map_nested(dir, address) pte_offset_map((dir), (address))
463#define pte_unmap(pte) do { } while (0)
464#define pte_unmap_nested(pte) do { } while (0)
465#endif
466
467/*
468 * Handle swap and file entries
469 * - the PTE is encoded in the following format:
470 * bit 0: Must be 0 (!_PAGE_PRESENT)
471 * bit 1: Type: 0 for swap, 1 for file (_PAGE_FILE)
472 * bits 2-7: Swap type
473 * bits 8-31: Swap offset
474 * bits 2-31: File pgoff
475 */
476#define __swp_type(x) (((x).val >> 2) & 0x1f)
477#define __swp_offset(x) ((x).val >> 8)
478#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 8) })
479#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte })
480#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
481
482static inline int pte_file(pte_t pte)
483{
484 return pte.pte & _PAGE_FILE;
485}
486
487#define PTE_FILE_MAX_BITS 29
488
489#define pte_to_pgoff(PTE) ((PTE).pte >> 2)
490#define pgoff_to_pte(off) __pte((off) << 2 | _PAGE_FILE)
491
492/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
493#define PageSkip(page) (0)
494#define kern_addr_valid(addr) (1)
495
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
497 remap_pfn_range(vma, vaddr, pfn, size, prot)
498
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
501#define __HAVE_ARCH_PTEP_SET_WRPROTECT
502#define __HAVE_ARCH_PTE_SAME
503#include <asm-generic/pgtable.h>
504
505/*
506 * preload information about a newly instantiated PTE into the SCR0/SCR1 PGE cache
507 */
508static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
509{
David Howellsa31b9dd2008-04-10 16:10:45 +0100510 struct mm_struct *mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 unsigned long ampr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512
David Howellsa31b9dd2008-04-10 16:10:45 +0100513 mm = current->mm;
514 if (mm) {
515 pgd_t *pge = pgd_offset(mm, address);
516 pud_t *pue = pud_offset(pge, address);
517 pmd_t *pme = pmd_offset(pue, address);
518
519 ampr = pme->ste[0] & 0xffffff00;
520 ampr |= xAMPRx_L | xAMPRx_SS_16Kb | xAMPRx_S | xAMPRx_C |
521 xAMPRx_V;
522 } else {
523 address = ULONG_MAX;
524 ampr = 0;
525 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526
527 asm volatile("movgs %0,scr0\n"
528 "movgs %0,scr1\n"
529 "movgs %1,dampr4\n"
530 "movgs %1,dampr5\n"
531 :
532 : "r"(address), "r"(ampr)
533 );
534}
535
536#ifdef CONFIG_PROC_FS
537extern char *proc_pid_status_frv_cxnr(struct mm_struct *mm, char *buffer);
538#endif
539
540extern void __init pgtable_cache_init(void);
541
542#endif /* !__ASSEMBLY__ */
543#endif /* !CONFIG_MMU */
544
545#ifndef __ASSEMBLY__
546extern void __init paging_init(void);
547#endif /* !__ASSEMBLY__ */
548
549#endif /* _ASM_PGTABLE_H */