Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Russell King | 4baa992 | 2008-08-02 10:55:55 +0100 | [diff] [blame] | 2 | * arch/arm/include/asm/pgtable.h |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
| 4 | * Copyright (C) 1995-2002 Russell King |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | */ |
| 10 | #ifndef _ASMARM_PGTABLE_H |
| 11 | #define _ASMARM_PGTABLE_H |
| 12 | |
Russell King | f6e3354 | 2010-11-16 00:22:09 +0000 | [diff] [blame] | 13 | #include <linux/const.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <asm-generic/4level-fixup.h> |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 15 | #include <asm/proc-fns.h> |
| 16 | |
| 17 | #ifndef CONFIG_MMU |
| 18 | |
| 19 | #include "pgtable-nommu.h" |
| 20 | |
| 21 | #else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | |
| 23 | #include <asm/memory.h> |
Russell King | ad1ae2f | 2006-12-13 14:34:43 +0000 | [diff] [blame] | 24 | #include <asm/pgtable-hwdef.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | |
Catalin Marinas | 17f5721 | 2011-09-05 17:41:02 +0100 | [diff] [blame] | 26 | #include <asm/pgtable-2level.h> |
| 27 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | /* |
Russell King | 5c3073e | 2005-05-03 12:20:29 +0100 | [diff] [blame] | 29 | * Just any arbitrary offset to the start of the vmalloc VM area: the |
| 30 | * current 8MB value just means that there will be a 8MB "hole" after the |
| 31 | * physical memory until the kernel virtual memory starts. That means that |
| 32 | * any out-of-bounds memory accesses will hopefully be caught. |
| 33 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced |
| 34 | * area for the same reason. ;) |
Russell King | 5c3073e | 2005-05-03 12:20:29 +0100 | [diff] [blame] | 35 | */ |
Russell King | 5c3073e | 2005-05-03 12:20:29 +0100 | [diff] [blame] | 36 | #define VMALLOC_OFFSET (8*1024*1024) |
| 37 | #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) |
Nicolas Pitre | 0536bdf | 2011-08-25 00:35:59 -0400 | [diff] [blame^] | 38 | #define VMALLOC_END 0xff000000UL |
Russell King | 5c3073e | 2005-05-03 12:20:29 +0100 | [diff] [blame] | 39 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | #define LIBRARY_TEXT_START 0x0c000000 |
| 41 | |
| 42 | #ifndef __ASSEMBLY__ |
Russell King | 69529c0 | 2010-11-16 00:19:55 +0000 | [diff] [blame] | 43 | extern void __pte_error(const char *file, int line, pte_t); |
| 44 | extern void __pmd_error(const char *file, int line, pmd_t); |
| 45 | extern void __pgd_error(const char *file, int line, pgd_t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | |
Russell King | 69529c0 | 2010-11-16 00:19:55 +0000 | [diff] [blame] | 47 | #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte) |
| 48 | #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd) |
| 49 | #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | |
Hugh Dickins | 6119be0 | 2005-04-19 13:29:21 -0700 | [diff] [blame] | 51 | /* |
| 52 | * This is the lowest virtual address we can permit any user space |
| 53 | * mapping to be mapped at. This is particularly important for |
| 54 | * non-high vector CPUs. |
| 55 | */ |
| 56 | #define FIRST_USER_ADDRESS PAGE_SIZE |
| 57 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | /* |
Imre_Deak | 44b1869 | 2007-02-11 13:45:13 +0100 | [diff] [blame] | 59 | * The pgprot_* and protection_map entries will be fixed up in runtime |
| 60 | * to include the cachable and bufferable bits based on memory policy, |
| 61 | * as well as any architecture dependent bits like global/ASID and SMP |
| 62 | * shared mapping bits. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | */ |
Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 64 | #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | |
Imre_Deak | 44b1869 | 2007-02-11 13:45:13 +0100 | [diff] [blame] | 66 | extern pgprot_t pgprot_user; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | extern pgprot_t pgprot_kernel; |
| 68 | |
Russell King | 8ec5366 | 2008-09-07 17:16:54 +0100 | [diff] [blame] | 69 | #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | |
Russell King | 36bb94b | 2010-11-16 08:40:36 +0000 | [diff] [blame] | 71 | #define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY) |
| 72 | #define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN) |
| 73 | #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER) |
| 74 | #define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) |
| 75 | #define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) |
| 76 | #define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) |
| 77 | #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) |
Russell King | 9522d7e | 2010-11-16 00:23:31 +0000 | [diff] [blame] | 78 | #define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) |
| 79 | #define PAGE_KERNEL_EXEC pgprot_kernel |
Russell King | 8ec5366 | 2008-09-07 17:16:54 +0100 | [diff] [blame] | 80 | |
Russell King | 36bb94b | 2010-11-16 08:40:36 +0000 | [diff] [blame] | 81 | #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN) |
| 82 | #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) |
| 83 | #define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER) |
| 84 | #define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) |
| 85 | #define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY) |
| 86 | #define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) |
| 87 | #define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY) |
Imre_Deak | 44b1869 | 2007-02-11 13:45:13 +0100 | [diff] [blame] | 88 | |
Russell King | eb9b2b6 | 2010-11-26 17:39:28 +0000 | [diff] [blame] | 89 | #define __pgprot_modify(prot,mask,bits) \ |
| 90 | __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) |
| 91 | |
| 92 | #define pgprot_noncached(prot) \ |
| 93 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) |
| 94 | |
| 95 | #define pgprot_writecombine(prot) \ |
| 96 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) |
| 97 | |
Santosh Shilimkar | 8fb5428 | 2011-06-28 12:42:56 -0700 | [diff] [blame] | 98 | #define pgprot_stronglyordered(prot) \ |
| 99 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) |
| 100 | |
Russell King | eb9b2b6 | 2010-11-26 17:39:28 +0000 | [diff] [blame] | 101 | #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE |
| 102 | #define pgprot_dmacoherent(prot) \ |
Russell King | 9522d7e | 2010-11-16 00:23:31 +0000 | [diff] [blame] | 103 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN) |
Russell King | eb9b2b6 | 2010-11-26 17:39:28 +0000 | [diff] [blame] | 104 | #define __HAVE_PHYS_MEM_ACCESS_PROT |
| 105 | struct file; |
| 106 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
| 107 | unsigned long size, pgprot_t vma_prot); |
| 108 | #else |
| 109 | #define pgprot_dmacoherent(prot) \ |
Russell King | 9522d7e | 2010-11-16 00:23:31 +0000 | [diff] [blame] | 110 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN) |
Russell King | eb9b2b6 | 2010-11-26 17:39:28 +0000 | [diff] [blame] | 111 | #endif |
| 112 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | #endif /* __ASSEMBLY__ */ |
| 114 | |
| 115 | /* |
| 116 | * The table below defines the page protection levels that we insert into our |
| 117 | * Linux page table version. These get translated into the best that the |
| 118 | * architecture can perform. Note that on most ARM hardware: |
| 119 | * 1) We cannot do execute protection |
| 120 | * 2) If we could do execute protection, then read is implied |
| 121 | * 3) write implies read permissions |
| 122 | */ |
Imre_Deak | 44b1869 | 2007-02-11 13:45:13 +0100 | [diff] [blame] | 123 | #define __P000 __PAGE_NONE |
| 124 | #define __P001 __PAGE_READONLY |
| 125 | #define __P010 __PAGE_COPY |
| 126 | #define __P011 __PAGE_COPY |
Russell King | 8ec5366 | 2008-09-07 17:16:54 +0100 | [diff] [blame] | 127 | #define __P100 __PAGE_READONLY_EXEC |
| 128 | #define __P101 __PAGE_READONLY_EXEC |
| 129 | #define __P110 __PAGE_COPY_EXEC |
| 130 | #define __P111 __PAGE_COPY_EXEC |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | |
Imre_Deak | 44b1869 | 2007-02-11 13:45:13 +0100 | [diff] [blame] | 132 | #define __S000 __PAGE_NONE |
| 133 | #define __S001 __PAGE_READONLY |
| 134 | #define __S010 __PAGE_SHARED |
| 135 | #define __S011 __PAGE_SHARED |
Russell King | 8ec5366 | 2008-09-07 17:16:54 +0100 | [diff] [blame] | 136 | #define __S100 __PAGE_READONLY_EXEC |
| 137 | #define __S101 __PAGE_READONLY_EXEC |
| 138 | #define __S110 __PAGE_SHARED_EXEC |
| 139 | #define __S111 __PAGE_SHARED_EXEC |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | |
| 141 | #ifndef __ASSEMBLY__ |
| 142 | /* |
| 143 | * ZERO_PAGE is a global shared page that is always zero: used |
| 144 | * for zero-mapped memory areas etc.. |
| 145 | */ |
| 146 | extern struct page *empty_zero_page; |
| 147 | #define ZERO_PAGE(vaddr) (empty_zero_page) |
| 148 | |
Russell King | 4eec4b1 | 2010-11-26 20:12:12 +0000 | [diff] [blame] | 149 | |
| 150 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
| 151 | |
| 152 | /* to find an entry in a page-table-directory */ |
| 153 | #define pgd_index(addr) ((addr) >> PGDIR_SHIFT) |
| 154 | |
| 155 | #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) |
| 156 | |
| 157 | /* to find an entry in a kernel page-table-directory */ |
| 158 | #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) |
| 159 | |
| 160 | /* |
| 161 | * The "pgd_xxx()" functions here are trivial for a folded two-level |
| 162 | * setup: the pgd is never bad, and a pmd always exists (as it's folded |
| 163 | * into the pgd entry) |
| 164 | */ |
| 165 | #define pgd_none(pgd) (0) |
| 166 | #define pgd_bad(pgd) (0) |
| 167 | #define pgd_present(pgd) (1) |
| 168 | #define pgd_clear(pgdp) do { } while (0) |
| 169 | #define set_pgd(pgd,pgdp) do { } while (0) |
Russell King | 516295e | 2010-11-21 16:27:49 +0000 | [diff] [blame] | 170 | #define set_pud(pud,pudp) do { } while (0) |
Russell King | 4eec4b1 | 2010-11-26 20:12:12 +0000 | [diff] [blame] | 171 | |
| 172 | |
Russell King | b510b049 | 2010-11-26 20:35:25 +0000 | [diff] [blame] | 173 | /* Find an entry in the second-level page table.. */ |
| 174 | #define pmd_offset(dir, addr) ((pmd_t *)(dir)) |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 175 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | #define pmd_none(pmd) (!pmd_val(pmd)) |
| 177 | #define pmd_present(pmd) (pmd_val(pmd)) |
| 178 | #define pmd_bad(pmd) (pmd_val(pmd) & 2) |
| 179 | |
| 180 | #define copy_pmd(pmdpd,pmdps) \ |
| 181 | do { \ |
| 182 | pmdpd[0] = pmdps[0]; \ |
| 183 | pmdpd[1] = pmdps[1]; \ |
| 184 | flush_pmd_entry(pmdpd); \ |
| 185 | } while (0) |
| 186 | |
| 187 | #define pmd_clear(pmdp) \ |
| 188 | do { \ |
| 189 | pmdp[0] = __pmd(0); \ |
| 190 | pmdp[1] = __pmd(0); \ |
| 191 | clean_pmd_entry(pmdp); \ |
| 192 | } while (0) |
| 193 | |
Dave McCracken | 46a82b2 | 2006-09-25 23:31:48 -0700 | [diff] [blame] | 194 | static inline pte_t *pmd_page_vaddr(pmd_t pmd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | { |
Catalin Marinas | d7c5d0d | 2011-09-05 17:52:36 +0100 | [diff] [blame] | 196 | return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | } |
| 198 | |
Catalin Marinas | d7c5d0d | 2011-09-05 17:52:36 +0100 | [diff] [blame] | 199 | #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | |
Russell King | c0ba10b | 2010-11-21 14:42:47 +0000 | [diff] [blame] | 201 | /* we don't need complex calculations here as the pmd is folded into the pgd */ |
| 202 | #define pmd_addr_end(addr,end) (end) |
| 203 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | |
Russell King | b510b049 | 2010-11-26 20:35:25 +0000 | [diff] [blame] | 205 | #ifndef CONFIG_HIGHPTE |
| 206 | #define __pte_map(pmd) pmd_page_vaddr(*(pmd)) |
| 207 | #define __pte_unmap(pte) do { } while (0) |
| 208 | #else |
Russell King | d30e45e | 2010-11-16 00:16:01 +0000 | [diff] [blame] | 209 | #define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd))) |
| 210 | #define __pte_unmap(pte) kunmap_atomic(pte) |
Russell King | b510b049 | 2010-11-26 20:35:25 +0000 | [diff] [blame] | 211 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | |
Russell King | b510b049 | 2010-11-26 20:35:25 +0000 | [diff] [blame] | 213 | #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
| 214 | |
| 215 | #define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr)) |
| 216 | |
| 217 | #define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr)) |
| 218 | #define pte_unmap(pte) __pte_unmap(pte) |
| 219 | |
Catalin Marinas | d7c5d0d | 2011-09-05 17:52:36 +0100 | [diff] [blame] | 220 | #define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT) |
Will Deacon | cae6292 | 2011-02-15 12:42:57 +0100 | [diff] [blame] | 221 | #define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot)) |
Russell King | b510b049 | 2010-11-26 20:35:25 +0000 | [diff] [blame] | 222 | |
| 223 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) |
| 224 | #define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot) |
| 225 | |
| 226 | #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) |
| 227 | #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) |
| 228 | |
| 229 | #if __LINUX_ARM_ARCH__ < 6 |
| 230 | static inline void __sync_icache_dcache(pte_t pteval) |
| 231 | { |
| 232 | } |
| 233 | #else |
| 234 | extern void __sync_icache_dcache(pte_t pteval); |
| 235 | #endif |
| 236 | |
| 237 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
| 238 | pte_t *ptep, pte_t pteval) |
| 239 | { |
| 240 | if (addr >= TASK_SIZE) |
| 241 | set_pte_ext(ptep, pteval, 0); |
| 242 | else { |
| 243 | __sync_icache_dcache(pteval); |
| 244 | set_pte_ext(ptep, pteval, PTE_EXT_NG); |
| 245 | } |
| 246 | } |
| 247 | |
| 248 | #define pte_none(pte) (!pte_val(pte)) |
| 249 | #define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) |
Russell King | 36bb94b | 2010-11-16 08:40:36 +0000 | [diff] [blame] | 250 | #define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY)) |
Russell King | b510b049 | 2010-11-26 20:35:25 +0000 | [diff] [blame] | 251 | #define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) |
| 252 | #define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) |
Russell King | 9522d7e | 2010-11-16 00:23:31 +0000 | [diff] [blame] | 253 | #define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) |
Russell King | b510b049 | 2010-11-26 20:35:25 +0000 | [diff] [blame] | 254 | #define pte_special(pte) (0) |
| 255 | |
| 256 | #define pte_present_user(pte) \ |
| 257 | ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \ |
| 258 | (L_PTE_PRESENT | L_PTE_USER)) |
| 259 | |
| 260 | #define PTE_BIT_FUNC(fn,op) \ |
| 261 | static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } |
| 262 | |
Russell King | 36bb94b | 2010-11-16 08:40:36 +0000 | [diff] [blame] | 263 | PTE_BIT_FUNC(wrprotect, |= L_PTE_RDONLY); |
| 264 | PTE_BIT_FUNC(mkwrite, &= ~L_PTE_RDONLY); |
Russell King | b510b049 | 2010-11-26 20:35:25 +0000 | [diff] [blame] | 265 | PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY); |
| 266 | PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY); |
| 267 | PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG); |
| 268 | PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); |
| 269 | |
| 270 | static inline pte_t pte_mkspecial(pte_t pte) { return pte; } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | |
| 272 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
| 273 | { |
Russell King | 36bb94b | 2010-11-16 08:40:36 +0000 | [diff] [blame] | 274 | const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); |
| 276 | return pte; |
| 277 | } |
| 278 | |
Russell King | fb93a1c | 2009-07-05 11:30:15 +0100 | [diff] [blame] | 279 | /* |
| 280 | * Encode and decode a swap entry. Swap entries are stored in the Linux |
| 281 | * page tables as follows: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | * |
Russell King | fb93a1c | 2009-07-05 11:30:15 +0100 | [diff] [blame] | 283 | * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 |
| 284 | * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 |
Russell King | 6a00cde | 2009-07-11 16:57:20 +0100 | [diff] [blame] | 285 | * <--------------- offset --------------------> <- type --> 0 0 0 |
Russell King | fb93a1c | 2009-07-05 11:30:15 +0100 | [diff] [blame] | 286 | * |
Russell King | 6a00cde | 2009-07-11 16:57:20 +0100 | [diff] [blame] | 287 | * This gives us up to 63 swap files and 32GB per swap file. Note that |
Russell King | fb93a1c | 2009-07-05 11:30:15 +0100 | [diff] [blame] | 288 | * the offset field is always non-zero. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | */ |
Russell King | 6a00cde | 2009-07-11 16:57:20 +0100 | [diff] [blame] | 290 | #define __SWP_TYPE_SHIFT 3 |
| 291 | #define __SWP_TYPE_BITS 6 |
Russell King | fb93a1c | 2009-07-05 11:30:15 +0100 | [diff] [blame] | 292 | #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) |
| 293 | #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) |
| 294 | |
| 295 | #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) |
| 296 | #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) |
| 297 | #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) |
| 298 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
| 300 | #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) |
| 301 | |
Russell King | fb93a1c | 2009-07-05 11:30:15 +0100 | [diff] [blame] | 302 | /* |
| 303 | * It is an error for the kernel to have more swap files than we can |
| 304 | * encode in the PTEs. This ensures that we know when MAX_SWAPFILES |
| 305 | * is increased beyond what we presently support. |
| 306 | */ |
| 307 | #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) |
| 308 | |
Russell King | 65b1bfc | 2009-07-05 11:52:21 +0100 | [diff] [blame] | 309 | /* |
| 310 | * Encode and decode a file entry. File entries are stored in the Linux |
| 311 | * page tables as follows: |
| 312 | * |
| 313 | * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 |
| 314 | * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 |
Russell King | 6a00cde | 2009-07-11 16:57:20 +0100 | [diff] [blame] | 315 | * <----------------------- offset ------------------------> 1 0 0 |
Russell King | 65b1bfc | 2009-07-05 11:52:21 +0100 | [diff] [blame] | 316 | */ |
| 317 | #define pte_file(pte) (pte_val(pte) & L_PTE_FILE) |
Russell King | 6a00cde | 2009-07-11 16:57:20 +0100 | [diff] [blame] | 318 | #define pte_to_pgoff(x) (pte_val(x) >> 3) |
| 319 | #define pgoff_to_pte(x) __pte(((x) << 3) | L_PTE_FILE) |
Russell King | 65b1bfc | 2009-07-05 11:52:21 +0100 | [diff] [blame] | 320 | |
Russell King | 6a00cde | 2009-07-11 16:57:20 +0100 | [diff] [blame] | 321 | #define PTE_FILE_MAX_BITS 29 |
Russell King | 65b1bfc | 2009-07-05 11:52:21 +0100 | [diff] [blame] | 322 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ |
| 324 | /* FIXME: this is not correct */ |
| 325 | #define kern_addr_valid(addr) (1) |
| 326 | |
| 327 | #include <asm-generic/pgtable.h> |
| 328 | |
| 329 | /* |
| 330 | * We provide our own arch_get_unmapped_area to cope with VIPT caches. |
| 331 | */ |
| 332 | #define HAVE_ARCH_UNMAPPED_AREA |
| 333 | |
| 334 | /* |
Randy Dunlap | 33bf561 | 2005-09-13 01:25:50 -0700 | [diff] [blame] | 335 | * remap a physical page `pfn' of size `size' with page protection `prot' |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | * into virtual address `from' |
| 337 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | #define io_remap_pfn_range(vma,from,pfn,size,prot) \ |
| 339 | remap_pfn_range(vma, from, pfn, size, prot) |
| 340 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | #define pgtable_cache_init() do { } while (0) |
| 342 | |
Russell King | 614dd05 | 2010-11-21 11:41:57 +0000 | [diff] [blame] | 343 | void identity_mapping_add(pgd_t *, unsigned long, unsigned long); |
| 344 | void identity_mapping_del(pgd_t *, unsigned long, unsigned long); |
| 345 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | #endif /* !__ASSEMBLY__ */ |
| 347 | |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 348 | #endif /* CONFIG_MMU */ |
| 349 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 | #endif /* _ASMARM_PGTABLE_H */ |