Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SH64_PGTABLE_H |
| 2 | #define __ASM_SH64_PGTABLE_H |
| 3 | |
| 4 | #include <asm-generic/4level-fixup.h> |
| 5 | |
| 6 | /* |
| 7 | * This file is subject to the terms and conditions of the GNU General Public |
| 8 | * License. See the file "COPYING" in the main directory of this archive |
| 9 | * for more details. |
| 10 | * |
| 11 | * include/asm-sh64/pgtable.h |
| 12 | * |
| 13 | * Copyright (C) 2000, 2001 Paolo Alberelli |
| 14 | * Copyright (C) 2003, 2004 Paul Mundt |
| 15 | * Copyright (C) 2003, 2004 Richard Curnow |
| 16 | * |
| 17 | * This file contains the functions and defines necessary to modify and use |
| 18 | * the SuperH page table tree. |
| 19 | */ |
| 20 | |
| 21 | #ifndef __ASSEMBLY__ |
| 22 | #include <asm/processor.h> |
| 23 | #include <asm/page.h> |
| 24 | #include <linux/threads.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | |
Tim Schmielau | 8c65b4a | 2005-11-07 00:59:43 -0800 | [diff] [blame] | 26 | struct vm_area_struct; |
| 27 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | extern void paging_init(void); |
| 29 | |
| 30 | /* We provide our own get_unmapped_area to avoid cache synonym issue */ |
| 31 | #define HAVE_ARCH_UNMAPPED_AREA |
| 32 | |
| 33 | /* |
| 34 | * Basically we have the same two-level (which is the logical three level |
| 35 | * Linux page table layout folded) page tables as the i386. |
| 36 | */ |
| 37 | |
| 38 | /* |
| 39 | * ZERO_PAGE is a global shared page that is always zero: used |
| 40 | * for zero-mapped memory areas etc.. |
| 41 | */ |
| 42 | extern unsigned char empty_zero_page[PAGE_SIZE]; |
| 43 | #define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page)) |
| 44 | |
| 45 | #endif /* !__ASSEMBLY__ */ |
| 46 | |
| 47 | /* |
| 48 | * NEFF and NPHYS related defines. |
| 49 | * FIXME : These need to be model-dependent. For now this is OK, SH5-101 and SH5-103 |
| 50 | * implement 32 bits effective and 32 bits physical. But future implementations may |
| 51 | * extend beyond this. |
| 52 | */ |
| 53 | #define NEFF 32 |
| 54 | #define NEFF_SIGN (1LL << (NEFF - 1)) |
| 55 | #define NEFF_MASK (-1LL << NEFF) |
| 56 | |
| 57 | #define NPHYS 32 |
| 58 | #define NPHYS_SIGN (1LL << (NPHYS - 1)) |
| 59 | #define NPHYS_MASK (-1LL << NPHYS) |
| 60 | |
| 61 | /* Typically 2-level is sufficient up to 32 bits of virtual address space, beyond |
| 62 | that 3-level would be appropriate. */ |
| 63 | #if defined(CONFIG_SH64_PGTABLE_2_LEVEL) |
| 64 | /* For 4k pages, this contains 512 entries, i.e. 9 bits worth of address. */ |
| 65 | #define PTRS_PER_PTE ((1<<PAGE_SHIFT)/sizeof(unsigned long long)) |
| 66 | #define PTE_MAGNITUDE 3 /* sizeof(unsigned long long) magnit. */ |
| 67 | #define PTE_SHIFT PAGE_SHIFT |
| 68 | #define PTE_BITS (PAGE_SHIFT - PTE_MAGNITUDE) |
| 69 | |
| 70 | /* top level: PMD. */ |
| 71 | #define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS) |
| 72 | #define PGD_BITS (NEFF - PGDIR_SHIFT) |
| 73 | #define PTRS_PER_PGD (1<<PGD_BITS) |
| 74 | |
| 75 | /* middle level: PMD. This doesn't do anything for the 2-level case. */ |
| 76 | #define PTRS_PER_PMD (1) |
| 77 | |
| 78 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
| 79 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
| 80 | #define PMD_SHIFT PGDIR_SHIFT |
| 81 | #define PMD_SIZE PGDIR_SIZE |
| 82 | #define PMD_MASK PGDIR_MASK |
| 83 | |
| 84 | #elif defined(CONFIG_SH64_PGTABLE_3_LEVEL) |
| 85 | /* |
| 86 | * three-level asymmetric paging structure: PGD is top level. |
| 87 | * The asymmetry comes from 32-bit pointers and 64-bit PTEs. |
| 88 | */ |
| 89 | /* bottom level: PTE. It's 9 bits = 512 pointers */ |
| 90 | #define PTRS_PER_PTE ((1<<PAGE_SHIFT)/sizeof(unsigned long long)) |
| 91 | #define PTE_MAGNITUDE 3 /* sizeof(unsigned long long) magnit. */ |
| 92 | #define PTE_SHIFT PAGE_SHIFT |
| 93 | #define PTE_BITS (PAGE_SHIFT - PTE_MAGNITUDE) |
| 94 | |
| 95 | /* middle level: PMD. It's 10 bits = 1024 pointers */ |
| 96 | #define PTRS_PER_PMD ((1<<PAGE_SHIFT)/sizeof(unsigned long long *)) |
| 97 | #define PMD_MAGNITUDE 2 /* sizeof(unsigned long long *) magnit. */ |
| 98 | #define PMD_SHIFT (PTE_SHIFT + PTE_BITS) |
| 99 | #define PMD_BITS (PAGE_SHIFT - PMD_MAGNITUDE) |
| 100 | |
| 101 | /* top level: PMD. It's 1 bit = 2 pointers */ |
| 102 | #define PGDIR_SHIFT (PMD_SHIFT + PMD_BITS) |
| 103 | #define PGD_BITS (NEFF - PGDIR_SHIFT) |
| 104 | #define PTRS_PER_PGD (1<<PGD_BITS) |
| 105 | |
| 106 | #define PMD_SIZE (1UL << PMD_SHIFT) |
| 107 | #define PMD_MASK (~(PMD_SIZE-1)) |
| 108 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
| 109 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
| 110 | |
| 111 | #else |
| 112 | #error "No defined number of page table levels" |
| 113 | #endif |
| 114 | |
| 115 | /* |
| 116 | * Error outputs. |
| 117 | */ |
| 118 | #define pte_ERROR(e) \ |
| 119 | printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e)) |
| 120 | #define pmd_ERROR(e) \ |
| 121 | printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) |
| 122 | #define pgd_ERROR(e) \ |
| 123 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) |
| 124 | |
| 125 | /* |
| 126 | * Table setting routines. Used within arch/mm only. |
| 127 | */ |
| 128 | #define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval) |
| 129 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) |
| 130 | |
| 131 | static __inline__ void set_pte(pte_t *pteptr, pte_t pteval) |
| 132 | { |
| 133 | unsigned long long x = ((unsigned long long) pteval.pte); |
| 134 | unsigned long long *xp = (unsigned long long *) pteptr; |
| 135 | /* |
| 136 | * Sign-extend based on NPHYS. |
| 137 | */ |
| 138 | *(xp) = (x & NPHYS_SIGN) ? (x | NPHYS_MASK) : x; |
| 139 | } |
| 140 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) |
| 141 | |
| 142 | static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep) |
| 143 | { |
| 144 | pmd_val(*pmdp) = (unsigned long) ptep; |
| 145 | } |
| 146 | |
| 147 | /* |
| 148 | * PGD defines. Top level. |
| 149 | */ |
| 150 | |
| 151 | /* To find an entry in a generic PGD. */ |
| 152 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) |
| 153 | #define __pgd_offset(address) pgd_index(address) |
| 154 | #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) |
| 155 | |
| 156 | /* To find an entry in a kernel PGD. */ |
| 157 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
| 158 | |
| 159 | /* |
| 160 | * PGD level access routines. |
| 161 | * |
| 162 | * Note1: |
| 163 | * There's no need to use physical addresses since the tree walk is all |
| 164 | * in performed in software, until the PTE translation. |
| 165 | * |
| 166 | * Note 2: |
| 167 | * A PGD entry can be uninitialized (_PGD_UNUSED), generically bad, |
| 168 | * clear (_PGD_EMPTY), present. When present, lower 3 nibbles contain |
| 169 | * _KERNPG_TABLE. Being a kernel virtual pointer also bit 31 must |
| 170 | * be 1. Assuming an arbitrary clear value of bit 31 set to 0 and |
| 171 | * lower 3 nibbles set to 0xFFF (_PGD_EMPTY) any other value is a |
| 172 | * bad pgd that must be notified via printk(). |
| 173 | * |
| 174 | */ |
| 175 | #define _PGD_EMPTY 0x0 |
| 176 | |
| 177 | #if defined(CONFIG_SH64_PGTABLE_2_LEVEL) |
| 178 | static inline int pgd_none(pgd_t pgd) { return 0; } |
| 179 | static inline int pgd_bad(pgd_t pgd) { return 0; } |
| 180 | #define pgd_present(pgd) ((pgd_val(pgd) & _PAGE_PRESENT) ? 1 : 0) |
| 181 | #define pgd_clear(xx) do { } while(0) |
| 182 | |
| 183 | #elif defined(CONFIG_SH64_PGTABLE_3_LEVEL) |
| 184 | #define pgd_present(pgd_entry) (1) |
| 185 | #define pgd_none(pgd_entry) (pgd_val((pgd_entry)) == _PGD_EMPTY) |
| 186 | /* TODO: Think later about what a useful definition of 'bad' would be now. */ |
| 187 | #define pgd_bad(pgd_entry) (0) |
| 188 | #define pgd_clear(pgd_entry_p) (set_pgd((pgd_entry_p), __pgd(_PGD_EMPTY))) |
| 189 | |
| 190 | #endif |
| 191 | |
| 192 | |
| 193 | #define pgd_page(pgd_entry) ((unsigned long) (pgd_val(pgd_entry) & PAGE_MASK)) |
| 194 | |
| 195 | /* |
| 196 | * PMD defines. Middle level. |
| 197 | */ |
| 198 | |
| 199 | /* PGD to PMD dereferencing */ |
| 200 | #if defined(CONFIG_SH64_PGTABLE_2_LEVEL) |
| 201 | static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) |
| 202 | { |
| 203 | return (pmd_t *) dir; |
| 204 | } |
| 205 | #elif defined(CONFIG_SH64_PGTABLE_3_LEVEL) |
| 206 | #define __pmd_offset(address) \ |
| 207 | (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) |
| 208 | #define pmd_offset(dir, addr) \ |
| 209 | ((pmd_t *) ((pgd_val(*(dir))) & PAGE_MASK) + __pmd_offset((addr))) |
| 210 | #endif |
| 211 | |
| 212 | /* |
| 213 | * PMD level access routines. Same notes as above. |
| 214 | */ |
| 215 | #define _PMD_EMPTY 0x0 |
| 216 | /* Either the PMD is empty or present, it's not paged out */ |
| 217 | #define pmd_present(pmd_entry) (pmd_val(pmd_entry) & _PAGE_PRESENT) |
| 218 | #define pmd_clear(pmd_entry_p) (set_pmd((pmd_entry_p), __pmd(_PMD_EMPTY))) |
| 219 | #define pmd_none(pmd_entry) (pmd_val((pmd_entry)) == _PMD_EMPTY) |
| 220 | #define pmd_bad(pmd_entry) ((pmd_val(pmd_entry) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) |
| 221 | |
| 222 | #define pmd_page_kernel(pmd_entry) \ |
| 223 | ((unsigned long) __va(pmd_val(pmd_entry) & PAGE_MASK)) |
| 224 | |
| 225 | #define pmd_page(pmd) \ |
| 226 | (virt_to_page(pmd_val(pmd))) |
| 227 | |
| 228 | /* PMD to PTE dereferencing */ |
| 229 | #define pte_index(address) \ |
| 230 | ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
| 231 | |
| 232 | #define pte_offset_kernel(dir, addr) \ |
| 233 | ((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr))) |
| 234 | |
| 235 | #define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr) |
| 236 | #define pte_offset_map_nested(dir,addr) pte_offset_kernel(dir, addr) |
| 237 | #define pte_unmap(pte) do { } while (0) |
| 238 | #define pte_unmap_nested(pte) do { } while (0) |
| 239 | |
| 240 | /* Round it up ! */ |
| 241 | #define USER_PTRS_PER_PGD ((TASK_SIZE+PGDIR_SIZE-1)/PGDIR_SIZE) |
Hugh Dickins | d455a36 | 2005-04-19 13:29:23 -0700 | [diff] [blame] | 242 | #define FIRST_USER_ADDRESS 0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | |
| 244 | #ifndef __ASSEMBLY__ |
| 245 | #define VMALLOC_END 0xff000000 |
| 246 | #define VMALLOC_START 0xf0000000 |
| 247 | #define VMALLOC_VMADDR(x) ((unsigned long)(x)) |
| 248 | |
| 249 | #define IOBASE_VADDR 0xff000000 |
| 250 | #define IOBASE_END 0xffffffff |
| 251 | |
| 252 | /* |
| 253 | * PTEL coherent flags. |
| 254 | * See Chapter 17 ST50 CPU Core Volume 1, Architecture. |
| 255 | */ |
| 256 | /* The bits that are required in the SH-5 TLB are placed in the h/w-defined |
| 257 | positions, to avoid expensive bit shuffling on every refill. The remaining |
| 258 | bits are used for s/w purposes and masked out on each refill. |
| 259 | |
| 260 | Note, the PTE slots are used to hold data of type swp_entry_t when a page is |
| 261 | swapped out. Only the _PAGE_PRESENT flag is significant when the page is |
| 262 | swapped out, and it must be placed so that it doesn't overlap either the |
| 263 | type or offset fields of swp_entry_t. For x86, offset is at [31:8] and type |
| 264 | at [6:1], with _PAGE_PRESENT at bit 0 for both pte_t and swp_entry_t. This |
| 265 | scheme doesn't map to SH-5 because bit [0] controls cacheability. So bit |
| 266 | [2] is used for _PAGE_PRESENT and the type field of swp_entry_t is split |
| 267 | into 2 pieces. That is handled by SWP_ENTRY and SWP_TYPE below. */ |
| 268 | #define _PAGE_WT 0x001 /* CB0: if cacheable, 1->write-thru, 0->write-back */ |
| 269 | #define _PAGE_DEVICE 0x001 /* CB0: if uncacheable, 1->device (i.e. no write-combining or reordering at bus level) */ |
| 270 | #define _PAGE_CACHABLE 0x002 /* CB1: uncachable/cachable */ |
| 271 | #define _PAGE_PRESENT 0x004 /* software: page referenced */ |
| 272 | #define _PAGE_FILE 0x004 /* software: only when !present */ |
| 273 | #define _PAGE_SIZE0 0x008 /* SZ0-bit : size of page */ |
| 274 | #define _PAGE_SIZE1 0x010 /* SZ1-bit : size of page */ |
| 275 | #define _PAGE_SHARED 0x020 /* software: reflects PTEH's SH */ |
| 276 | #define _PAGE_READ 0x040 /* PR0-bit : read access allowed */ |
| 277 | #define _PAGE_EXECUTE 0x080 /* PR1-bit : execute access allowed */ |
| 278 | #define _PAGE_WRITE 0x100 /* PR2-bit : write access allowed */ |
| 279 | #define _PAGE_USER 0x200 /* PR3-bit : user space access allowed */ |
| 280 | #define _PAGE_DIRTY 0x400 /* software: page accessed in write */ |
| 281 | #define _PAGE_ACCESSED 0x800 /* software: page referenced */ |
| 282 | |
| 283 | /* Mask which drops software flags */ |
| 284 | #define _PAGE_FLAGS_HARDWARE_MASK 0xfffffffffffff3dbLL |
| 285 | |
| 286 | /* |
| 287 | * HugeTLB support |
| 288 | */ |
| 289 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) |
| 290 | #define _PAGE_SZHUGE (_PAGE_SIZE0) |
| 291 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) |
| 292 | #define _PAGE_SZHUGE (_PAGE_SIZE1) |
| 293 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB) |
| 294 | #define _PAGE_SZHUGE (_PAGE_SIZE0 | _PAGE_SIZE1) |
| 295 | #endif |
| 296 | |
| 297 | /* |
| 298 | * Default flags for a Kernel page. |
| 299 | * This is fundametally also SHARED because the main use of this define |
| 300 | * (other than for PGD/PMD entries) is for the VMALLOC pool which is |
| 301 | * contextless. |
| 302 | * |
| 303 | * _PAGE_EXECUTE is required for modules |
| 304 | * |
| 305 | */ |
| 306 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ |
| 307 | _PAGE_EXECUTE | \ |
| 308 | _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_DIRTY | \ |
| 309 | _PAGE_SHARED) |
| 310 | |
| 311 | /* Default flags for a User page */ |
| 312 | #define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER) |
| 313 | |
| 314 | #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) |
| 315 | |
| 316 | #define PAGE_NONE __pgprot(_PAGE_CACHABLE | _PAGE_ACCESSED) |
| 317 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ |
| 318 | _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_USER | \ |
| 319 | _PAGE_SHARED) |
| 320 | /* We need to include PAGE_EXECUTE in PAGE_COPY because it is the default |
| 321 | * protection mode for the stack. */ |
| 322 | #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHABLE | \ |
| 323 | _PAGE_ACCESSED | _PAGE_USER | _PAGE_EXECUTE) |
| 324 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_CACHABLE | \ |
| 325 | _PAGE_ACCESSED | _PAGE_USER) |
| 326 | #define PAGE_KERNEL __pgprot(_KERNPG_TABLE) |
| 327 | |
| 328 | |
| 329 | /* |
| 330 | * In ST50 we have full permissions (Read/Write/Execute/Shared). |
| 331 | * Just match'em all. These are for mmap(), therefore all at least |
| 332 | * User/Cachable/Present/Accessed. No point in making Fault on Write. |
| 333 | */ |
| 334 | #define __MMAP_COMMON (_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED) |
| 335 | /* sxwr */ |
| 336 | #define __P000 __pgprot(__MMAP_COMMON) |
| 337 | #define __P001 __pgprot(__MMAP_COMMON | _PAGE_READ) |
| 338 | #define __P010 __pgprot(__MMAP_COMMON) |
| 339 | #define __P011 __pgprot(__MMAP_COMMON | _PAGE_READ) |
| 340 | #define __P100 __pgprot(__MMAP_COMMON | _PAGE_EXECUTE) |
| 341 | #define __P101 __pgprot(__MMAP_COMMON | _PAGE_EXECUTE | _PAGE_READ) |
| 342 | #define __P110 __pgprot(__MMAP_COMMON | _PAGE_EXECUTE) |
| 343 | #define __P111 __pgprot(__MMAP_COMMON | _PAGE_EXECUTE | _PAGE_READ) |
| 344 | |
| 345 | #define __S000 __pgprot(__MMAP_COMMON | _PAGE_SHARED) |
| 346 | #define __S001 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_READ) |
| 347 | #define __S010 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_WRITE) |
| 348 | #define __S011 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_READ | _PAGE_WRITE) |
| 349 | #define __S100 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE) |
| 350 | #define __S101 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE | _PAGE_READ) |
| 351 | #define __S110 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE | _PAGE_WRITE) |
| 352 | #define __S111 __pgprot(__MMAP_COMMON | _PAGE_SHARED | _PAGE_EXECUTE | _PAGE_READ | _PAGE_WRITE) |
| 353 | |
| 354 | /* Make it a device mapping for maximum safety (e.g. for mapping device |
| 355 | registers into user-space via /dev/map). */ |
| 356 | #define pgprot_noncached(x) __pgprot(((x).pgprot & ~(_PAGE_CACHABLE)) | _PAGE_DEVICE) |
| 357 | #define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE) |
| 358 | |
| 359 | /* |
| 360 | * Handling allocation failures during page table setup. |
| 361 | */ |
| 362 | extern void __handle_bad_pmd_kernel(pmd_t * pmd); |
| 363 | #define __handle_bad_pmd(x) __handle_bad_pmd_kernel(x) |
| 364 | |
| 365 | /* |
| 366 | * PTE level access routines. |
| 367 | * |
| 368 | * Note1: |
| 369 | * It's the tree walk leaf. This is physical address to be stored. |
| 370 | * |
| 371 | * Note 2: |
| 372 | * Regarding the choice of _PTE_EMPTY: |
| 373 | |
| 374 | We must choose a bit pattern that cannot be valid, whether or not the page |
| 375 | is present. bit[2]==1 => present, bit[2]==0 => swapped out. If swapped |
| 376 | out, bits [31:8], [6:3], [1:0] are under swapper control, so only bit[7] is |
| 377 | left for us to select. If we force bit[7]==0 when swapped out, we could use |
| 378 | the combination bit[7,2]=2'b10 to indicate an empty PTE. Alternatively, if |
| 379 | we force bit[7]==1 when swapped out, we can use all zeroes to indicate |
| 380 | empty. This is convenient, because the page tables get cleared to zero |
| 381 | when they are allocated. |
| 382 | |
| 383 | */ |
| 384 | #define _PTE_EMPTY 0x0 |
| 385 | #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) |
| 386 | #define pte_clear(mm,addr,xp) (set_pte_at(mm, addr, xp, __pte(_PTE_EMPTY))) |
| 387 | #define pte_none(x) (pte_val(x) == _PTE_EMPTY) |
| 388 | |
| 389 | /* |
| 390 | * Some definitions to translate between mem_map, PTEs, and page |
| 391 | * addresses: |
| 392 | */ |
| 393 | |
| 394 | /* |
| 395 | * Given a PTE, return the index of the mem_map[] entry corresponding |
| 396 | * to the page frame the PTE. Get the absolute physical address, make |
| 397 | * a relative physical address and translate it to an index. |
| 398 | */ |
| 399 | #define pte_pagenr(x) (((unsigned long) (pte_val(x)) - \ |
| 400 | __MEMORY_START) >> PAGE_SHIFT) |
| 401 | |
| 402 | /* |
| 403 | * Given a PTE, return the "struct page *". |
| 404 | */ |
| 405 | #define pte_page(x) (mem_map + pte_pagenr(x)) |
| 406 | |
| 407 | /* |
| 408 | * Return number of (down rounded) MB corresponding to x pages. |
| 409 | */ |
| 410 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) |
| 411 | |
| 412 | |
| 413 | /* |
| 414 | * The following have defined behavior only work if pte_present() is true. |
| 415 | */ |
| 416 | static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_READ; } |
| 417 | static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXECUTE; } |
| 418 | static inline int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; } |
| 419 | static inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; } |
| 420 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } |
| 421 | static inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_WRITE; } |
| 422 | |
Adrian Bunk | ca5ed2f | 2006-01-09 20:54:47 -0800 | [diff] [blame] | 423 | static inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_READ)); return pte; } |
| 424 | static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; } |
| 425 | static inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_EXECUTE)); return pte; } |
| 426 | static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; } |
| 427 | static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 | |
Adrian Bunk | ca5ed2f | 2006-01-09 20:54:47 -0800 | [diff] [blame] | 429 | static inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_READ)); return pte; } |
| 430 | static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; } |
| 431 | static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_EXECUTE)); return pte; } |
| 432 | static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } |
| 433 | static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } |
| 434 | static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; } |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 435 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | |
| 437 | /* |
| 438 | * Conversion functions: convert a page and protection to a page entry. |
| 439 | * |
| 440 | * extern pte_t mk_pte(struct page *page, pgprot_t pgprot) |
| 441 | */ |
| 442 | #define mk_pte(page,pgprot) \ |
| 443 | ({ \ |
| 444 | pte_t __pte; \ |
| 445 | \ |
| 446 | set_pte(&__pte, __pte((((page)-mem_map) << PAGE_SHIFT) | \ |
| 447 | __MEMORY_START | pgprot_val((pgprot)))); \ |
| 448 | __pte; \ |
| 449 | }) |
| 450 | |
| 451 | /* |
| 452 | * This takes a (absolute) physical page address that is used |
| 453 | * by the remapping functions |
| 454 | */ |
| 455 | #define mk_pte_phys(physpage, pgprot) \ |
| 456 | ({ pte_t __pte; set_pte(&__pte, __pte(physpage | pgprot_val(pgprot))); __pte; }) |
| 457 | |
Adrian Bunk | ca5ed2f | 2006-01-09 20:54:47 -0800 | [diff] [blame] | 458 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 459 | { set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; } |
| 460 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 461 | typedef pte_t *pte_addr_t; |
| 462 | #define pgtable_cache_init() do { } while (0) |
| 463 | |
| 464 | extern void update_mmu_cache(struct vm_area_struct * vma, |
| 465 | unsigned long address, pte_t pte); |
| 466 | |
| 467 | /* Encode and decode a swap entry */ |
| 468 | #define __swp_type(x) (((x).val & 3) + (((x).val >> 1) & 0x3c)) |
| 469 | #define __swp_offset(x) ((x).val >> 8) |
| 470 | #define __swp_entry(type, offset) ((swp_entry_t) { ((offset << 8) + ((type & 0x3c) << 1) + (type & 3)) }) |
| 471 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
| 472 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) |
| 473 | |
| 474 | /* Encode and decode a nonlinear file mapping entry */ |
| 475 | #define PTE_FILE_MAX_BITS 29 |
| 476 | #define pte_to_pgoff(pte) (pte_val(pte)) |
| 477 | #define pgoff_to_pte(off) ((pte_t) { (off) | _PAGE_FILE }) |
| 478 | |
| 479 | /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ |
| 480 | #define PageSkip(page) (0) |
| 481 | #define kern_addr_valid(addr) (1) |
| 482 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 483 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
| 484 | remap_pfn_range(vma, vaddr, pfn, size, prot) |
| 485 | |
| 486 | #define MK_IOSPACE_PFN(space, pfn) (pfn) |
| 487 | #define GET_IOSPACE(pfn) 0 |
| 488 | #define GET_PFN(pfn) (pfn) |
| 489 | |
| 490 | #endif /* !__ASSEMBLY__ */ |
| 491 | |
| 492 | /* |
| 493 | * No page table caches to initialise |
| 494 | */ |
| 495 | #define pgtable_cache_init() do { } while (0) |
| 496 | |
| 497 | #define pte_pfn(x) (((unsigned long)((x).pte)) >> PAGE_SHIFT) |
| 498 | #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) |
| 499 | #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) |
| 500 | |
| 501 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
| 502 | |
| 503 | #include <asm-generic/pgtable.h> |
| 504 | |
| 505 | #endif /* __ASM_SH64_PGTABLE_H */ |