blob: 110295c5461dc2dd471f7172b31daf7eb589b244 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Russell King4baa9922008-08-02 10:55:55 +01002 * arch/arm/include/asm/pgtable.h
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 1995-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef _ASMARM_PGTABLE_H
11#define _ASMARM_PGTABLE_H
12
13#include <asm-generic/4level-fixup.h>
Russell King002547b2006-06-20 20:46:52 +010014#include <asm/proc-fns.h>
15
16#ifndef CONFIG_MMU
17
18#include "pgtable-nommu.h"
19
20#else
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
22#include <asm/memory.h>
Russell Kinga09e64f2008-08-05 16:14:15 +010023#include <mach/vmalloc.h>
Russell Kingad1ae2f2006-12-13 14:34:43 +000024#include <asm/pgtable-hwdef.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
26/*
Russell King5c3073e2005-05-03 12:20:29 +010027 * Just any arbitrary offset to the start of the vmalloc VM area: the
28 * current 8MB value just means that there will be a 8MB "hole" after the
29 * physical memory until the kernel virtual memory starts. That means that
30 * any out-of-bounds memory accesses will hopefully be caught.
31 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
32 * area for the same reason. ;)
33 *
34 * Note that platforms may override VMALLOC_START, but they must provide
35 * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space,
36 * which may not overlap IO space.
37 */
38#ifndef VMALLOC_START
39#define VMALLOC_OFFSET (8*1024*1024)
40#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
41#endif
42
43/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 * Hardware-wise, we have a two level page table structure, where the first
45 * level has 4096 entries, and the second level has 256 entries. Each entry
46 * is one 32-bit word. Most of the bits in the second level entry are used
47 * by hardware, and there aren't any "accessed" and "dirty" bits.
48 *
49 * Linux on the other hand has a three level page table structure, which can
50 * be wrapped to fit a two level page table structure easily - using the PGD
51 * and PTE only. However, Linux also expects one "PTE" table per page, and
52 * at least a "dirty" bit.
53 *
54 * Therefore, we tweak the implementation slightly - we tell Linux that we
55 * have 2048 entries in the first level, each of which is 8 bytes (iow, two
56 * hardware pointers to the second level.) The second level contains two
57 * hardware PTE tables arranged contiguously, followed by Linux versions
58 * which contain the state information Linux needs. We, therefore, end up
59 * with 512 entries in the "PTE" level.
60 *
61 * This leads to the page tables having the following layout:
62 *
63 * pgd pte
64 * | |
65 * +--------+ +0
66 * | |-----> +------------+ +0
67 * +- - - - + +4 | h/w pt 0 |
68 * | |-----> +------------+ +1024
69 * +--------+ +8 | h/w pt 1 |
70 * | | +------------+ +2048
71 * +- - - - + | Linux pt 0 |
72 * | | +------------+ +3072
73 * +--------+ | Linux pt 1 |
74 * | | +------------+ +4096
75 *
76 * See L_PTE_xxx below for definitions of bits in the "Linux pt", and
77 * PTE_xxx for definitions of bits appearing in the "h/w pt".
78 *
79 * PMD_xxx definitions refer to bits in the first level page table.
80 *
81 * The "dirty" bit is emulated by only granting hardware write permission
82 * iff the page is marked "writable" and "dirty" in the Linux PTE. This
83 * means that a write to a clean page will cause a permission fault, and
84 * the Linux MM layer will mark the page dirty via handle_pte_fault().
85 * For the hardware to notice the permission change, the TLB entry must
Martin Schwidefskyf0e47c22007-07-17 04:03:03 -070086 * be flushed, and ptep_set_access_flags() does that for us.
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 *
88 * The "accessed" or "young" bit is emulated by a similar method; we only
89 * allow accesses to the page if the "young" bit is set. Accesses to the
90 * page will cause a fault, and handle_pte_fault() will set the young bit
91 * for us as long as the page is marked present in the corresponding Linux
Martin Schwidefskyf0e47c22007-07-17 04:03:03 -070092 * PTE entry. Again, ptep_set_access_flags() will ensure that the TLB is
93 * up to date.
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 *
95 * However, when the "young" bit is cleared, we deny access to the page
96 * by clearing the hardware PTE. Currently Linux does not flush the TLB
97 * for us in this case, which means the TLB will retain the transation
98 * until either the TLB entry is evicted under pressure, or a context
99 * switch which changes the user space mapping occurs.
100 */
101#define PTRS_PER_PTE 512
102#define PTRS_PER_PMD 1
103#define PTRS_PER_PGD 2048
104
105/*
106 * PMD_SHIFT determines the size of the area a second-level page table can map
107 * PGDIR_SHIFT determines what a third-level page table entry can map
108 */
109#define PMD_SHIFT 21
110#define PGDIR_SHIFT 21
111
112#define LIBRARY_TEXT_START 0x0c000000
113
114#ifndef __ASSEMBLY__
115extern void __pte_error(const char *file, int line, unsigned long val);
116extern void __pmd_error(const char *file, int line, unsigned long val);
117extern void __pgd_error(const char *file, int line, unsigned long val);
118
119#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
120#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
121#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
122#endif /* !__ASSEMBLY__ */
123
124#define PMD_SIZE (1UL << PMD_SHIFT)
125#define PMD_MASK (~(PMD_SIZE-1))
126#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
127#define PGDIR_MASK (~(PGDIR_SIZE-1))
128
Hugh Dickins6119be02005-04-19 13:29:21 -0700129/*
130 * This is the lowest virtual address we can permit any user space
131 * mapping to be mapped at. This is particularly important for
132 * non-high vector CPUs.
133 */
134#define FIRST_USER_ADDRESS PAGE_SIZE
135
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136#define FIRST_USER_PGD_NR 1
137#define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR)
138
139/*
George G. Davis4052ebb2006-09-22 18:36:38 +0100140 * section address mask and size definitions.
141 */
142#define SECTION_SHIFT 20
143#define SECTION_SIZE (1UL << SECTION_SHIFT)
144#define SECTION_MASK (~(SECTION_SIZE-1))
145
146/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 * ARMv6 supersection address mask and size definitions.
148 */
149#define SUPERSECTION_SHIFT 24
150#define SUPERSECTION_SIZE (1UL << SUPERSECTION_SHIFT)
151#define SUPERSECTION_MASK (~(SUPERSECTION_SIZE-1))
152
153/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 * "Linux" PTE definitions.
155 *
156 * We keep two sets of PTEs - the hardware and the linux version.
157 * This allows greater flexibility in the way we map the Linux bits
158 * onto the hardware tables, and allows us to have YOUNG and DIRTY
159 * bits.
160 *
161 * The PTE table pointer refers to the hardware entries; the "Linux"
162 * entries are stored 1024 bytes below.
163 */
164#define L_PTE_PRESENT (1 << 0)
165#define L_PTE_FILE (1 << 1) /* only when !PRESENT */
166#define L_PTE_YOUNG (1 << 1)
Russell Kingbb30f362008-09-06 20:04:59 +0100167#define L_PTE_BUFFERABLE (1 << 2) /* obsolete, matches PTE */
168#define L_PTE_CACHEABLE (1 << 3) /* obsolete, matches PTE */
Russell King9cff96e2008-09-06 18:53:37 +0100169#define L_PTE_DIRTY (1 << 6)
170#define L_PTE_WRITE (1 << 7)
171#define L_PTE_USER (1 << 8)
172#define L_PTE_EXEC (1 << 9)
Lennert Buytenhek0e5fdca2006-12-02 00:03:47 +0100173#define L_PTE_SHARED (1 << 10) /* shared(v6), coherent(xsc3) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174
Russell Kingbb30f362008-09-06 20:04:59 +0100175/*
176 * These are the memory types, defined to be compatible with
177 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
Russell Kingbb30f362008-09-06 20:04:59 +0100178 */
179#define L_PTE_MT_UNCACHED (0x00 << 2) /* 0000 */
180#define L_PTE_MT_BUFFERABLE (0x01 << 2) /* 0001 */
181#define L_PTE_MT_WRITETHROUGH (0x02 << 2) /* 0010 */
182#define L_PTE_MT_WRITEBACK (0x03 << 2) /* 0011 */
183#define L_PTE_MT_MINICACHE (0x06 << 2) /* 0110 (sa1100, xscale) */
184#define L_PTE_MT_WRITEALLOC (0x07 << 2) /* 0111 */
Russell King639b0ae2008-09-06 21:07:45 +0100185#define L_PTE_MT_DEV_SHARED (0x04 << 2) /* 0100 */
Russell Kingbb30f362008-09-06 20:04:59 +0100186#define L_PTE_MT_DEV_NONSHARED (0x0c << 2) /* 1100 */
Russell King639b0ae2008-09-06 21:07:45 +0100187#define L_PTE_MT_DEV_WC (0x09 << 2) /* 1001 */
Russell Kingbb30f362008-09-06 20:04:59 +0100188#define L_PTE_MT_DEV_CACHED (0x0b << 2) /* 1011 */
189#define L_PTE_MT_MASK (0x0f << 2)
190
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191#ifndef __ASSEMBLY__
192
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193/*
Imre_Deak44b18692007-02-11 13:45:13 +0100194 * The pgprot_* and protection_map entries will be fixed up in runtime
195 * to include the cachable and bufferable bits based on memory policy,
196 * as well as any architecture dependent bits like global/ASID and SMP
197 * shared mapping bits.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 */
Russell Kingbb30f362008-09-06 20:04:59 +0100199#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200
Imre_Deak44b18692007-02-11 13:45:13 +0100201extern pgprot_t pgprot_user;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202extern pgprot_t pgprot_kernel;
203
Russell King8ec53662008-09-07 17:16:54 +0100204#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
Russell King8ec53662008-09-07 17:16:54 +0100206#define PAGE_NONE pgprot_user
207#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE)
208#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC)
209#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER)
210#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC)
211#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER)
212#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC)
213#define PAGE_KERNEL pgprot_kernel
214#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_kernel, L_PTE_EXEC)
215
216#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT)
217#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE)
218#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC)
219#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
220#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC)
221#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
222#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC)
Imre_Deak44b18692007-02-11 13:45:13 +0100223
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224#endif /* __ASSEMBLY__ */
225
226/*
227 * The table below defines the page protection levels that we insert into our
228 * Linux page table version. These get translated into the best that the
229 * architecture can perform. Note that on most ARM hardware:
230 * 1) We cannot do execute protection
231 * 2) If we could do execute protection, then read is implied
232 * 3) write implies read permissions
233 */
Imre_Deak44b18692007-02-11 13:45:13 +0100234#define __P000 __PAGE_NONE
235#define __P001 __PAGE_READONLY
236#define __P010 __PAGE_COPY
237#define __P011 __PAGE_COPY
Russell King8ec53662008-09-07 17:16:54 +0100238#define __P100 __PAGE_READONLY_EXEC
239#define __P101 __PAGE_READONLY_EXEC
240#define __P110 __PAGE_COPY_EXEC
241#define __P111 __PAGE_COPY_EXEC
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242
Imre_Deak44b18692007-02-11 13:45:13 +0100243#define __S000 __PAGE_NONE
244#define __S001 __PAGE_READONLY
245#define __S010 __PAGE_SHARED
246#define __S011 __PAGE_SHARED
Russell King8ec53662008-09-07 17:16:54 +0100247#define __S100 __PAGE_READONLY_EXEC
248#define __S101 __PAGE_READONLY_EXEC
249#define __S110 __PAGE_SHARED_EXEC
250#define __S111 __PAGE_SHARED_EXEC
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
252#ifndef __ASSEMBLY__
253/*
254 * ZERO_PAGE is a global shared page that is always zero: used
255 * for zero-mapped memory areas etc..
256 */
257extern struct page *empty_zero_page;
258#define ZERO_PAGE(vaddr) (empty_zero_page)
259
260#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
261#define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
262
263#define pte_none(pte) (!pte_val(pte))
Russell Kingad1ae2f2006-12-13 14:34:43 +0000264#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
Dave McCracken46a82b22006-09-25 23:31:48 -0700266#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr))
267#define pte_offset_map(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr))
268#define pte_offset_map_nested(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269#define pte_unmap(pte) do { } while (0)
270#define pte_unmap_nested(pte) do { } while (0)
271
Russell Kingad1ae2f2006-12-13 14:34:43 +0000272#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
273
274#define set_pte_at(mm,addr,ptep,pteval) do { \
Kevin Hilmana8fa9ba2007-11-08 01:48:16 +0100275 set_pte_ext(ptep, pteval, (addr) >= TASK_SIZE ? 0 : PTE_EXT_NG); \
Russell Kingad1ae2f2006-12-13 14:34:43 +0000276 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
278/*
279 * The following only work if pte_present() is true.
280 * Undefined behaviour if not..
281 */
282#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283#define pte_write(pte) (pte_val(pte) & L_PTE_WRITE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
285#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
Nick Piggin7e675132008-04-28 02:13:00 -0700286#define pte_special(pte) (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
288/*
289 * The following only works if pte_present() is not true.
290 */
291#define pte_file(pte) (pte_val(pte) & L_PTE_FILE)
292#define pte_to_pgoff(x) (pte_val(x) >> 2)
293#define pgoff_to_pte(x) __pte(((x) << 2) | L_PTE_FILE)
294
295#define PTE_FILE_MAX_BITS 30
296
297#define PTE_BIT_FUNC(fn,op) \
298static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
299
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300PTE_BIT_FUNC(wrprotect, &= ~L_PTE_WRITE);
301PTE_BIT_FUNC(mkwrite, |= L_PTE_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY);
303PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY);
304PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG);
305PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
306
Nick Piggin7e675132008-04-28 02:13:00 -0700307static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
308
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309/*
310 * Mark the prot value as uncacheable and unbufferable.
311 */
Russell Kingbb30f362008-09-06 20:04:59 +0100312#define pgprot_noncached(prot) \
313 __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_UNCACHED)
314#define pgprot_writecombine(prot) \
315 __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_BUFFERABLE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316
317#define pmd_none(pmd) (!pmd_val(pmd))
318#define pmd_present(pmd) (pmd_val(pmd))
319#define pmd_bad(pmd) (pmd_val(pmd) & 2)
320
321#define copy_pmd(pmdpd,pmdps) \
322 do { \
323 pmdpd[0] = pmdps[0]; \
324 pmdpd[1] = pmdps[1]; \
325 flush_pmd_entry(pmdpd); \
326 } while (0)
327
328#define pmd_clear(pmdp) \
329 do { \
330 pmdp[0] = __pmd(0); \
331 pmdp[1] = __pmd(0); \
332 clean_pmd_entry(pmdp); \
333 } while (0)
334
Dave McCracken46a82b22006-09-25 23:31:48 -0700335static inline pte_t *pmd_page_vaddr(pmd_t pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336{
337 unsigned long ptr;
338
339 ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1);
340 ptr += PTRS_PER_PTE * sizeof(void *);
341
342 return __va(ptr);
343}
344
345#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))
346
347/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 * Conversion functions: convert a page and protection to a page entry,
349 * and a page entry and page directory to the page they refer to.
350 */
351#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
352
353/*
354 * The "pgd_xxx()" functions here are trivial for a folded two-level
355 * setup: the pgd is never bad, and a pmd always exists (as it's folded
356 * into the pgd entry)
357 */
358#define pgd_none(pgd) (0)
359#define pgd_bad(pgd) (0)
360#define pgd_present(pgd) (1)
361#define pgd_clear(pgdp) do { } while (0)
362#define set_pgd(pgd,pgdp) do { } while (0)
363
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364/* to find an entry in a page-table-directory */
365#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
366
367#define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr))
368
369/* to find an entry in a kernel page-table-directory */
370#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
371
372/* Find an entry in the second-level page table.. */
373#define pmd_offset(dir, addr) ((pmd_t *)(dir))
374
375/* Find an entry in the third-level page table.. */
376#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
377
378static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
379{
380 const unsigned long mask = L_PTE_EXEC | L_PTE_WRITE | L_PTE_USER;
381 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
382 return pte;
383}
384
385extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
386
387/* Encode and decode a swap entry.
388 *
389 * We support up to 32GB of swap on 4k machines
390 */
391#define __swp_type(x) (((x).val >> 2) & 0x7f)
392#define __swp_offset(x) ((x).val >> 9)
393#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 9) })
394#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
395#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
396
397/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
398/* FIXME: this is not correct */
399#define kern_addr_valid(addr) (1)
400
401#include <asm-generic/pgtable.h>
402
403/*
404 * We provide our own arch_get_unmapped_area to cope with VIPT caches.
405 */
406#define HAVE_ARCH_UNMAPPED_AREA
407
408/*
Randy Dunlap33bf5612005-09-13 01:25:50 -0700409 * remap a physical page `pfn' of size `size' with page protection `prot'
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 * into virtual address `from'
411 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412#define io_remap_pfn_range(vma,from,pfn,size,prot) \
413 remap_pfn_range(vma, from, pfn, size, prot)
414
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415#define pgtable_cache_init() do { } while (0)
416
417#endif /* !__ASSEMBLY__ */
418
Russell King002547b2006-06-20 20:46:52 +0100419#endif /* CONFIG_MMU */
420
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421#endif /* _ASMARM_PGTABLE_H */