Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 1 | /* |
| 2 | * This file contains the functions and defines necessary to modify and |
| 3 | * use the SuperH page table tree. |
| 4 | * |
| 5 | * Copyright (C) 1999 Niibe Yutaka |
| 6 | * Copyright (C) 2002 - 2005 Paul Mundt |
| 7 | * |
| 8 | * This file is subject to the terms and conditions of the GNU General |
| 9 | * Public License. See the file "COPYING" in the main directory of this |
| 10 | * archive for more details. |
| 11 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #ifndef __ASM_SH_PGTABLE_H |
| 13 | #define __ASM_SH_PGTABLE_H |
| 14 | |
Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 15 | #include <asm-generic/pgtable-nopmd.h> |
| 16 | #include <asm/page.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #ifndef __ASSEMBLY__ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <asm/addrspace.h> |
| 20 | #include <asm/fixmap.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | * ZERO_PAGE is a global shared page that is always zero: used |
| 24 | * for zero-mapped memory areas etc.. |
| 25 | */ |
Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 26 | extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) |
| 28 | |
| 29 | #endif /* !__ASSEMBLY__ */ |
| 30 | |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 31 | /* |
| 32 | * traditional two-level paging structure |
| 33 | */ |
| 34 | /* PTE bits */ |
| 35 | #ifdef CONFIG_X2TLB |
| 36 | # define PTE_MAGNITUDE 3 /* 64-bit PTEs on extended mode SH-X2 TLB */ |
| 37 | #else |
| 38 | # define PTE_MAGNITUDE 2 /* 32-bit PTEs */ |
| 39 | #endif |
| 40 | #define PTE_SHIFT PAGE_SHIFT |
| 41 | #define PTE_BITS (PTE_SHIFT - PTE_MAGNITUDE) |
| 42 | |
| 43 | /* PGD bits */ |
| 44 | #define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS) |
| 45 | #define PGDIR_BITS (32 - PGDIR_SHIFT) |
Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 46 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
| 48 | |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 49 | /* Entries per level */ |
Paul Mundt | 7a847f8 | 2006-12-26 15:29:19 +0900 | [diff] [blame] | 50 | #define PTRS_PER_PTE (PAGE_SIZE / (1 << PTE_MAGNITUDE)) |
Paul Mundt | 510c72ad | 2006-11-27 12:06:26 +0900 | [diff] [blame] | 51 | #define PTRS_PER_PGD (PAGE_SIZE / 4) |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 52 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) |
Hugh Dickins | d455a36 | 2005-04-19 13:29:23 -0700 | [diff] [blame] | 54 | #define FIRST_USER_ADDRESS 0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | |
Paul Mundt | 7a847f8 | 2006-12-26 15:29:19 +0900 | [diff] [blame] | 56 | #define PTE_PHYS_MASK (0x20000000 - PAGE_SIZE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | |
Paul Mundt | f0b859e | 2007-07-25 10:43:47 +0900 | [diff] [blame] | 58 | #define VMALLOC_START (P3SEG) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | #define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) |
| 60 | |
Paul Mundt | ef48e8e | 2006-09-27 16:17:17 +0900 | [diff] [blame] | 61 | /* |
| 62 | * Linux PTEL encoding. |
| 63 | * |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 64 | * Hardware and software bit definitions for the PTEL value (see below for |
| 65 | * notes on SH-X2 MMUs and 64-bit PTEs): |
Paul Mundt | ef48e8e | 2006-09-27 16:17:17 +0900 | [diff] [blame] | 66 | * |
| 67 | * - Bits 0 and 7 are reserved on SH-3 (_PAGE_WT and _PAGE_SZ1 on SH-4). |
| 68 | * |
| 69 | * - Bit 1 is the SH-bit, but is unused on SH-3 due to an MMU bug (the |
| 70 | * hardware PTEL value can't have the SH-bit set when MMUCR.IX is set, |
| 71 | * which is the default in cpu-sh3/mmu_context.h:MMU_CONTROL_INIT). |
| 72 | * |
| 73 | * In order to keep this relatively clean, do not use these for defining |
| 74 | * SH-3 specific flags until all of the other unused bits have been |
| 75 | * exhausted. |
| 76 | * |
| 77 | * - Bit 9 is reserved by everyone and used by _PAGE_PROTNONE. |
| 78 | * |
| 79 | * - Bits 10 and 11 are low bits of the PPN that are reserved on >= 4K pages. |
| 80 | * Bit 10 is used for _PAGE_ACCESSED, bit 11 remains unused. |
| 81 | * |
| 82 | * - Bits 31, 30, and 29 remain unused by everyone and can be used for future |
| 83 | * software flags, although care must be taken to update _PAGE_CLEAR_FLAGS. |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 84 | * |
| 85 | * XXX: Leave the _PAGE_FILE and _PAGE_WT overhaul for a rainy day. |
| 86 | * |
| 87 | * SH-X2 MMUs and extended PTEs |
| 88 | * |
| 89 | * SH-X2 supports an extended mode TLB with split data arrays due to the |
| 90 | * number of bits needed for PR and SZ (now EPR and ESZ) encodings. The PR and |
| 91 | * SZ bit placeholders still exist in data array 1, but are implemented as |
| 92 | * reserved bits, with the real logic existing in data array 2. |
| 93 | * |
| 94 | * The downside to this is that we can no longer fit everything in to a 32-bit |
| 95 | * PTE encoding, so a 64-bit pte_t is necessary for these parts. On the plus |
| 96 | * side, this gives us quite a few spare bits to play with for future usage. |
Paul Mundt | ef48e8e | 2006-09-27 16:17:17 +0900 | [diff] [blame] | 97 | */ |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 98 | /* Legacy and compat mode bits */ |
Paul Mundt | ef48e8e | 2006-09-27 16:17:17 +0900 | [diff] [blame] | 99 | #define _PAGE_WT 0x001 /* WT-bit on SH-4, 0 on SH-3 */ |
| 100 | #define _PAGE_HW_SHARED 0x002 /* SH-bit : shared among processes */ |
| 101 | #define _PAGE_DIRTY 0x004 /* D-bit : page changed */ |
| 102 | #define _PAGE_CACHABLE 0x008 /* C-bit : cachable */ |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 103 | #ifndef CONFIG_X2TLB |
| 104 | # define _PAGE_SZ0 0x010 /* SZ0-bit : Size of page */ |
| 105 | # define _PAGE_RW 0x020 /* PR0-bit : write access allowed */ |
| 106 | # define _PAGE_USER 0x040 /* PR1-bit : user space access allowed*/ |
| 107 | # define _PAGE_SZ1 0x080 /* SZ1-bit : Size of page (on SH-4) */ |
| 108 | #endif |
Paul Mundt | ef48e8e | 2006-09-27 16:17:17 +0900 | [diff] [blame] | 109 | #define _PAGE_PRESENT 0x100 /* V-bit : page is valid */ |
| 110 | #define _PAGE_PROTNONE 0x200 /* software: if not present */ |
| 111 | #define _PAGE_ACCESSED 0x400 /* software: page referenced */ |
| 112 | #define _PAGE_FILE _PAGE_WT /* software: pagecache or swap? */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 114 | /* Extended mode bits */ |
| 115 | #define _PAGE_EXT_ESZ0 0x0010 /* ESZ0-bit: Size of page */ |
| 116 | #define _PAGE_EXT_ESZ1 0x0020 /* ESZ1-bit: Size of page */ |
| 117 | #define _PAGE_EXT_ESZ2 0x0040 /* ESZ2-bit: Size of page */ |
| 118 | #define _PAGE_EXT_ESZ3 0x0080 /* ESZ3-bit: Size of page */ |
| 119 | |
| 120 | #define _PAGE_EXT_USER_EXEC 0x0100 /* EPR0-bit: User space executable */ |
| 121 | #define _PAGE_EXT_USER_WRITE 0x0200 /* EPR1-bit: User space writable */ |
| 122 | #define _PAGE_EXT_USER_READ 0x0400 /* EPR2-bit: User space readable */ |
| 123 | |
| 124 | #define _PAGE_EXT_KERN_EXEC 0x0800 /* EPR3-bit: Kernel space executable */ |
| 125 | #define _PAGE_EXT_KERN_WRITE 0x1000 /* EPR4-bit: Kernel space writable */ |
| 126 | #define _PAGE_EXT_KERN_READ 0x2000 /* EPR5-bit: Kernel space readable */ |
| 127 | |
| 128 | /* Wrapper for extended mode pgprot twiddling */ |
| 129 | #ifdef CONFIG_X2TLB |
| 130 | # define _PAGE_EXT(x) ((unsigned long long)(x) << 32) |
| 131 | #else |
| 132 | # define _PAGE_EXT(x) (0) |
| 133 | #endif |
| 134 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | /* software: moves to PTEA.TC (Timing Control) */ |
| 136 | #define _PAGE_PCC_AREA5 0x00000000 /* use BSC registers for area5 */ |
| 137 | #define _PAGE_PCC_AREA6 0x80000000 /* use BSC registers for area6 */ |
| 138 | |
| 139 | /* software: moves to PTEA.SA[2:0] (Space Attributes) */ |
| 140 | #define _PAGE_PCC_IODYN 0x00000001 /* IO space, dynamically sized bus */ |
| 141 | #define _PAGE_PCC_IO8 0x20000000 /* IO space, 8 bit bus */ |
| 142 | #define _PAGE_PCC_IO16 0x20000001 /* IO space, 16 bit bus */ |
| 143 | #define _PAGE_PCC_COM8 0x40000000 /* Common Memory space, 8 bit bus */ |
| 144 | #define _PAGE_PCC_COM16 0x40000001 /* Common Memory space, 16 bit bus */ |
| 145 | #define _PAGE_PCC_ATR8 0x60000000 /* Attribute Memory space, 8 bit bus */ |
| 146 | #define _PAGE_PCC_ATR16 0x60000001 /* Attribute Memory space, 6 bit bus */ |
| 147 | |
Paul Mundt | ef48e8e | 2006-09-27 16:17:17 +0900 | [diff] [blame] | 148 | /* Mask which drops unused bits from the PTEL value */ |
| 149 | #ifdef CONFIG_CPU_SH3 |
| 150 | #define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED| \ |
| 151 | _PAGE_FILE | _PAGE_SZ1 | \ |
| 152 | _PAGE_HW_SHARED) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | #else |
Paul Mundt | ef48e8e | 2006-09-27 16:17:17 +0900 | [diff] [blame] | 154 | #define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | _PAGE_FILE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | #endif |
| 156 | |
Paul Mundt | ef48e8e | 2006-09-27 16:17:17 +0900 | [diff] [blame] | 157 | #define _PAGE_FLAGS_HARDWARE_MASK (0x1fffffff & ~(_PAGE_CLEAR_FLAGS)) |
| 158 | |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 159 | /* Hardware flags, page size encoding */ |
| 160 | #if defined(CONFIG_X2TLB) |
| 161 | # if defined(CONFIG_PAGE_SIZE_4KB) |
| 162 | # define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ0) |
| 163 | # elif defined(CONFIG_PAGE_SIZE_8KB) |
| 164 | # define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ1) |
| 165 | # elif defined(CONFIG_PAGE_SIZE_64KB) |
| 166 | # define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ2) |
| 167 | # endif |
| 168 | #else |
| 169 | # if defined(CONFIG_PAGE_SIZE_4KB) |
| 170 | # define _PAGE_FLAGS_HARD _PAGE_SZ0 |
| 171 | # elif defined(CONFIG_PAGE_SIZE_64KB) |
| 172 | # define _PAGE_FLAGS_HARD _PAGE_SZ1 |
| 173 | # endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | #endif |
| 175 | |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 176 | #if defined(CONFIG_X2TLB) |
| 177 | # if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) |
| 178 | # define _PAGE_SZHUGE (_PAGE_EXT_ESZ2) |
| 179 | # elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) |
| 180 | # define _PAGE_SZHUGE (_PAGE_EXT_ESZ0 | _PAGE_EXT_ESZ2) |
| 181 | # elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) |
| 182 | # define _PAGE_SZHUGE (_PAGE_EXT_ESZ0 | _PAGE_EXT_ESZ1 | _PAGE_EXT_ESZ2) |
| 183 | # elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) |
| 184 | # define _PAGE_SZHUGE (_PAGE_EXT_ESZ3) |
| 185 | # elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB) |
| 186 | # define _PAGE_SZHUGE (_PAGE_EXT_ESZ2 | _PAGE_EXT_ESZ3) |
| 187 | # endif |
| 188 | #else |
| 189 | # if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) |
| 190 | # define _PAGE_SZHUGE (_PAGE_SZ1) |
| 191 | # elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) |
| 192 | # define _PAGE_SZHUGE (_PAGE_SZ0 | _PAGE_SZ1) |
| 193 | # endif |
| 194 | #endif |
| 195 | |
Paul Mundt | 5b67954 | 2006-12-06 11:20:53 +0900 | [diff] [blame] | 196 | /* |
| 197 | * Stub out _PAGE_SZHUGE if we don't have a good definition for it, |
| 198 | * to make pte_mkhuge() happy. |
| 199 | */ |
| 200 | #ifndef _PAGE_SZHUGE |
| 201 | # define _PAGE_SZHUGE (_PAGE_FLAGS_HARD) |
| 202 | #endif |
| 203 | |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 204 | #define _PAGE_CHG_MASK \ |
| 205 | (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | |
Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 207 | #ifndef __ASSEMBLY__ |
| 208 | |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 209 | #if defined(CONFIG_X2TLB) /* SH-X2 TLB */ |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 210 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \ |
| 211 | _PAGE_ACCESSED | _PAGE_FLAGS_HARD) |
| 212 | |
| 213 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ |
| 214 | _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ |
| 215 | _PAGE_EXT(_PAGE_EXT_USER_READ | \ |
Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 216 | _PAGE_EXT_USER_WRITE)) |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 217 | |
| 218 | #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ |
| 219 | _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ |
| 220 | _PAGE_EXT(_PAGE_EXT_USER_EXEC | \ |
Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 221 | _PAGE_EXT_USER_READ)) |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 222 | |
| 223 | #define PAGE_COPY PAGE_EXECREAD |
| 224 | |
| 225 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ |
| 226 | _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ |
| 227 | _PAGE_EXT(_PAGE_EXT_USER_READ)) |
| 228 | |
| 229 | #define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ |
| 230 | _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ |
| 231 | _PAGE_EXT(_PAGE_EXT_USER_WRITE)) |
| 232 | |
| 233 | #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ |
| 234 | _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ |
| 235 | _PAGE_EXT(_PAGE_EXT_USER_WRITE | \ |
Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 236 | _PAGE_EXT_USER_READ | \ |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 237 | _PAGE_EXT_USER_EXEC)) |
| 238 | |
| 239 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \ |
| 240 | _PAGE_DIRTY | _PAGE_ACCESSED | \ |
| 241 | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \ |
| 242 | _PAGE_EXT(_PAGE_EXT_KERN_READ | \ |
Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 243 | _PAGE_EXT_KERN_WRITE | \ |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 244 | _PAGE_EXT_KERN_EXEC)) |
| 245 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | #define PAGE_KERNEL_NOCACHE \ |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 247 | __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \ |
| 248 | _PAGE_ACCESSED | _PAGE_HW_SHARED | \ |
| 249 | _PAGE_FLAGS_HARD | \ |
| 250 | _PAGE_EXT(_PAGE_EXT_KERN_READ | \ |
Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 251 | _PAGE_EXT_KERN_WRITE | \ |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 252 | _PAGE_EXT_KERN_EXEC)) |
| 253 | |
| 254 | #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \ |
| 255 | _PAGE_DIRTY | _PAGE_ACCESSED | \ |
| 256 | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \ |
| 257 | _PAGE_EXT(_PAGE_EXT_KERN_READ | \ |
Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 258 | _PAGE_EXT_KERN_EXEC)) |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 259 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | #define PAGE_KERNEL_PCC(slot, type) \ |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 261 | __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \ |
| 262 | _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \ |
| 263 | _PAGE_EXT(_PAGE_EXT_KERN_READ | \ |
Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 264 | _PAGE_EXT_KERN_WRITE | \ |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 265 | _PAGE_EXT_KERN_EXEC) \ |
| 266 | (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \ |
| 267 | (type)) |
| 268 | |
| 269 | #elif defined(CONFIG_MMU) /* SH-X TLB */ |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 270 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \ |
| 271 | _PAGE_ACCESSED | _PAGE_FLAGS_HARD) |
| 272 | |
| 273 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ |
| 274 | _PAGE_CACHABLE | _PAGE_ACCESSED | \ |
| 275 | _PAGE_FLAGS_HARD) |
| 276 | |
| 277 | #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | \ |
| 278 | _PAGE_ACCESSED | _PAGE_FLAGS_HARD) |
| 279 | |
| 280 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | \ |
| 281 | _PAGE_ACCESSED | _PAGE_FLAGS_HARD) |
| 282 | |
| 283 | #define PAGE_EXECREAD PAGE_READONLY |
| 284 | #define PAGE_RWX PAGE_SHARED |
| 285 | #define PAGE_WRITEONLY PAGE_SHARED |
| 286 | |
| 287 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | \ |
| 288 | _PAGE_DIRTY | _PAGE_ACCESSED | \ |
| 289 | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD) |
| 290 | |
| 291 | #define PAGE_KERNEL_NOCACHE \ |
| 292 | __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \ |
| 293 | _PAGE_ACCESSED | _PAGE_HW_SHARED | \ |
| 294 | _PAGE_FLAGS_HARD) |
| 295 | |
| 296 | #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \ |
| 297 | _PAGE_DIRTY | _PAGE_ACCESSED | \ |
| 298 | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD) |
| 299 | |
| 300 | #define PAGE_KERNEL_PCC(slot, type) \ |
| 301 | __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \ |
| 302 | _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \ |
| 303 | (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \ |
| 304 | (type)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | #else /* no mmu */ |
| 306 | #define PAGE_NONE __pgprot(0) |
| 307 | #define PAGE_SHARED __pgprot(0) |
| 308 | #define PAGE_COPY __pgprot(0) |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 309 | #define PAGE_EXECREAD __pgprot(0) |
| 310 | #define PAGE_RWX __pgprot(0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | #define PAGE_READONLY __pgprot(0) |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 312 | #define PAGE_WRITEONLY __pgprot(0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | #define PAGE_KERNEL __pgprot(0) |
| 314 | #define PAGE_KERNEL_NOCACHE __pgprot(0) |
| 315 | #define PAGE_KERNEL_RO __pgprot(0) |
| 316 | #define PAGE_KERNEL_PCC __pgprot(0) |
| 317 | #endif |
| 318 | |
Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 319 | #endif /* __ASSEMBLY__ */ |
| 320 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | /* |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 322 | * SH-X and lower (legacy) SuperH parts (SH-3, SH-4, some SH-4A) can't do page |
| 323 | * protection for execute, and considers it the same as a read. Also, write |
| 324 | * permission implies read permission. This is the closest we can get.. |
| 325 | * |
| 326 | * SH-X2 (SH7785) and later parts take this to the opposite end of the extreme, |
| 327 | * not only supporting separate execute, read, and write bits, but having |
| 328 | * completely separate permission bits for user and kernel space. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | */ |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 330 | /*xwr*/ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | #define __P000 PAGE_NONE |
| 332 | #define __P001 PAGE_READONLY |
| 333 | #define __P010 PAGE_COPY |
| 334 | #define __P011 PAGE_COPY |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 335 | #define __P100 PAGE_EXECREAD |
| 336 | #define __P101 PAGE_EXECREAD |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | #define __P110 PAGE_COPY |
| 338 | #define __P111 PAGE_COPY |
| 339 | |
| 340 | #define __S000 PAGE_NONE |
| 341 | #define __S001 PAGE_READONLY |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 342 | #define __S010 PAGE_WRITEONLY |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 343 | #define __S011 PAGE_SHARED |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 344 | #define __S100 PAGE_EXECREAD |
| 345 | #define __S101 PAGE_EXECREAD |
| 346 | #define __S110 PAGE_RWX |
| 347 | #define __S111 PAGE_RWX |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 | |
Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 349 | #ifndef __ASSEMBLY__ |
| 350 | |
| 351 | /* |
| 352 | * Certain architectures need to do special things when PTEs |
| 353 | * within a page table are directly modified. Thus, the following |
| 354 | * hook is made available. |
| 355 | */ |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 356 | #ifdef CONFIG_X2TLB |
| 357 | static inline void set_pte(pte_t *ptep, pte_t pte) |
| 358 | { |
| 359 | ptep->pte_high = pte.pte_high; |
| 360 | smp_wmb(); |
| 361 | ptep->pte_low = pte.pte_low; |
| 362 | } |
| 363 | #else |
Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 364 | #define set_pte(pteptr, pteval) (*(pteptr) = pteval) |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 365 | #endif |
| 366 | |
Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 367 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) |
| 368 | |
| 369 | /* |
| 370 | * (pmds are folded into pgds so this doesn't get actually called, |
| 371 | * but the define is needed for a generic inline function.) |
| 372 | */ |
| 373 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) |
| 374 | |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 375 | #define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT))) |
Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 376 | #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) |
| 377 | #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) |
| 378 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | #define pte_none(x) (!pte_val(x)) |
| 380 | #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 381 | #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | |
| 383 | #define pmd_none(x) (!pmd_val(x)) |
Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 384 | #define pmd_present(x) (pmd_val(x)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) |
Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 386 | #define pmd_bad(x) (pmd_val(x) & ~PAGE_MASK) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | |
| 388 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) |
Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 389 | #define pte_page(x) phys_to_page(pte_val(x)&PTE_PHYS_MASK) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | |
| 391 | /* |
| 392 | * The following only work if pte_present() is true. |
| 393 | * Undefined behaviour if not.. |
| 394 | */ |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 395 | #define pte_not_present(pte) (!(pte_val(pte) & _PAGE_PRESENT)) |
| 396 | #define pte_dirty(pte) (pte_val(pte) & _PAGE_DIRTY) |
| 397 | #define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED) |
| 398 | #define pte_file(pte) (pte_val(pte) & _PAGE_FILE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 400 | #ifdef CONFIG_X2TLB |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 401 | #define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE) |
| 402 | #else |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 403 | #define pte_write(pte) (pte_val(pte) & _PAGE_RW) |
Paul Mundt | d229401 | 2005-11-07 00:58:23 -0800 | [diff] [blame] | 404 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 405 | |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 406 | #define PTE_BIT_FUNC(h,fn,op) \ |
| 407 | static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; } |
| 408 | |
| 409 | #ifdef CONFIG_X2TLB |
| 410 | /* |
| 411 | * We cheat a bit in the SH-X2 TLB case. As the permission bits are |
| 412 | * individually toggled (and user permissions are entirely decoupled from |
| 413 | * kernel permissions), we attempt to couple them a bit more sanely here. |
| 414 | */ |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 415 | PTE_BIT_FUNC(high, wrprotect, &= ~_PAGE_EXT_USER_WRITE); |
| 416 | PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE); |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 417 | PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE); |
| 418 | #else |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 419 | PTE_BIT_FUNC(low, wrprotect, &= ~_PAGE_RW); |
| 420 | PTE_BIT_FUNC(low, mkwrite, |= _PAGE_RW); |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 421 | PTE_BIT_FUNC(low, mkhuge, |= _PAGE_SZHUGE); |
| 422 | #endif |
| 423 | |
| 424 | PTE_BIT_FUNC(low, mkclean, &= ~_PAGE_DIRTY); |
| 425 | PTE_BIT_FUNC(low, mkdirty, |= _PAGE_DIRTY); |
| 426 | PTE_BIT_FUNC(low, mkold, &= ~_PAGE_ACCESSED); |
| 427 | PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED); |
| 428 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 | /* |
| 430 | * Macro and implementation to make a page protection as uncachable. |
| 431 | */ |
| 432 | #define pgprot_noncached pgprot_noncached |
| 433 | |
| 434 | static inline pgprot_t pgprot_noncached(pgprot_t _prot) |
| 435 | { |
| 436 | unsigned long prot = pgprot_val(_prot); |
| 437 | |
| 438 | prot &= ~_PAGE_CACHABLE; |
| 439 | return __pgprot(prot); |
| 440 | } |
| 441 | |
| 442 | #define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE) |
| 443 | |
| 444 | /* |
| 445 | * Conversion functions: convert a page and protection to a page entry, |
| 446 | * and a page entry and page directory to the page they refer to. |
| 447 | * |
| 448 | * extern pte_t mk_pte(struct page *page, pgprot_t pgprot) |
| 449 | */ |
| 450 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) |
| 451 | |
| 452 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 453 | { |
| 454 | set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | |
| 455 | pgprot_val(newprot))); |
| 456 | return pte; |
| 457 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | |
Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 459 | #define pmd_page_vaddr(pmd) pmd_val(pmd) |
| 460 | #define pmd_page(pmd) (virt_to_page(pmd_val(pmd))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 461 | |
| 462 | /* to find an entry in a page-table-directory. */ |
| 463 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) |
| 464 | #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) |
| 465 | |
| 466 | /* to find an entry in a kernel page-table-directory */ |
| 467 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
| 468 | |
| 469 | /* Find an entry in the third-level page table.. */ |
| 470 | #define pte_index(address) \ |
| 471 | ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
| 472 | #define pte_offset_kernel(dir, address) \ |
Dave McCracken | 46a82b2 | 2006-09-25 23:31:48 -0700 | [diff] [blame] | 473 | ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 | #define pte_offset_map(dir, address) pte_offset_kernel(dir, address) |
| 475 | #define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address) |
| 476 | #define pte_unmap(pte) do { } while (0) |
| 477 | #define pte_unmap_nested(pte) do { } while (0) |
| 478 | |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 479 | #ifdef CONFIG_X2TLB |
| 480 | #define pte_ERROR(e) \ |
| 481 | printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, \ |
| 482 | &(e), (e).pte_high, (e).pte_low) |
| 483 | #else |
Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 484 | #define pte_ERROR(e) \ |
| 485 | printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 486 | #endif |
| 487 | |
Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 488 | #define pgd_ERROR(e) \ |
| 489 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) |
| 490 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 491 | struct vm_area_struct; |
| 492 | extern void update_mmu_cache(struct vm_area_struct * vma, |
| 493 | unsigned long address, pte_t pte); |
| 494 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 495 | /* |
Paul Mundt | b9b382d | 2006-12-07 12:43:06 +0900 | [diff] [blame] | 496 | * Encode and de-code a swap entry |
| 497 | * |
| 498 | * Constraints: |
| 499 | * _PAGE_FILE at bit 0 |
| 500 | * _PAGE_PRESENT at bit 8 |
| 501 | * _PAGE_PROTNONE at bit 9 |
| 502 | * |
| 503 | * For the normal case, we encode the swap type into bits 0:7 and the |
| 504 | * swap offset into bits 10:30. For the 64-bit PTE case, we keep the |
| 505 | * preserved bits in the low 32-bits and use the upper 32 as the swap |
| 506 | * offset (along with a 5-bit type), following the same approach as x86 |
| 507 | * PAE. This keeps the logic quite simple, and allows for a full 32 |
| 508 | * PTE_FILE_MAX_BITS, as opposed to the 29-bits we're constrained with |
| 509 | * in the pte_low case. |
| 510 | * |
| 511 | * As is evident by the Alpha code, if we ever get a 64-bit unsigned |
| 512 | * long (swp_entry_t) to match up with the 64-bit PTEs, this all becomes |
| 513 | * much cleaner.. |
| 514 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 515 | * NOTE: We should set ZEROs at the position of _PAGE_PRESENT |
| 516 | * and _PAGE_PROTNONE bits |
| 517 | */ |
Paul Mundt | b9b382d | 2006-12-07 12:43:06 +0900 | [diff] [blame] | 518 | #ifdef CONFIG_X2TLB |
| 519 | #define __swp_type(x) ((x).val & 0x1f) |
| 520 | #define __swp_offset(x) ((x).val >> 5) |
| 521 | #define __swp_entry(type, offset) ((swp_entry_t){ (type) | (offset) << 5}) |
| 522 | #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) |
| 523 | #define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val }) |
| 524 | |
| 525 | /* |
| 526 | * Encode and decode a nonlinear file mapping entry |
| 527 | */ |
| 528 | #define pte_to_pgoff(pte) ((pte).pte_high) |
| 529 | #define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) }) |
| 530 | |
| 531 | #define PTE_FILE_MAX_BITS 32 |
| 532 | #else |
| 533 | #define __swp_type(x) ((x).val & 0xff) |
| 534 | #define __swp_offset(x) ((x).val >> 10) |
Paul Mundt | b6250e3 | 2006-12-07 17:27:18 +0900 | [diff] [blame] | 535 | #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) <<10}) |
Paul Mundt | b9b382d | 2006-12-07 12:43:06 +0900 | [diff] [blame] | 536 | |
| 537 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 1 }) |
| 538 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 1 }) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 539 | |
| 540 | /* |
| 541 | * Encode and decode a nonlinear file mapping entry |
| 542 | */ |
| 543 | #define PTE_FILE_MAX_BITS 29 |
| 544 | #define pte_to_pgoff(pte) (pte_val(pte) >> 1) |
| 545 | #define pgoff_to_pte(off) ((pte_t) { ((off) << 1) | _PAGE_FILE }) |
Paul Mundt | b9b382d | 2006-12-07 12:43:06 +0900 | [diff] [blame] | 546 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 547 | |
| 548 | typedef pte_t *pte_addr_t; |
| 549 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 550 | #define kern_addr_valid(addr) (1) |
| 551 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 552 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
| 553 | remap_pfn_range(vma, vaddr, pfn, size, prot) |
| 554 | |
Tim Schmielau | 8c65b4a | 2005-11-07 00:59:43 -0800 | [diff] [blame] | 555 | struct mm_struct; |
| 556 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 557 | /* |
| 558 | * No page table caches to initialise |
| 559 | */ |
| 560 | #define pgtable_cache_init() do { } while (0) |
| 561 | |
| 562 | #ifndef CONFIG_MMU |
| 563 | extern unsigned int kobjsize(const void *objp); |
| 564 | #endif /* !CONFIG_MMU */ |
| 565 | |
Paul Mundt | 39e688a | 2007-03-05 19:46:47 +0900 | [diff] [blame] | 566 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) |
| 567 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
| 568 | extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); |
| 569 | #endif |
| 570 | |
Paul Mundt | 21440cf | 2006-11-20 14:30:26 +0900 | [diff] [blame] | 571 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
| 572 | extern void paging_init(void); |
| 573 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 574 | #include <asm-generic/pgtable.h> |
| 575 | |
Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 576 | #endif /* !__ASSEMBLY__ */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 577 | #endif /* __ASM_SH_PAGE_H */ |