Richard Kuo | a7e7984 | 2011-10-31 18:53:38 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Page table support for the Hexagon architecture |
| 3 | * |
| 4 | * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 and |
| 8 | * only version 2 as published by the Free Software Foundation. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License |
| 16 | * along with this program; if not, write to the Free Software |
| 17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |
| 18 | * 02110-1301, USA. |
| 19 | */ |
| 20 | |
| 21 | #ifndef _ASM_PGTABLE_H |
| 22 | #define _ASM_PGTABLE_H |
| 23 | |
| 24 | /* |
| 25 | * Page table definitions for Qualcomm Hexagon processor. |
| 26 | */ |
| 27 | #include <linux/swap.h> |
| 28 | #include <asm/page.h> |
| 29 | #include <asm-generic/pgtable-nopmd.h> |
| 30 | |
| 31 | /* A handy thing to have if one has the RAM. Declared in head.S */ |
| 32 | extern unsigned long empty_zero_page; |
| 33 | extern unsigned long zero_page_mask; |
| 34 | |
| 35 | /* |
| 36 | * The PTE model described here is that of the Hexagon Virtual Machine, |
| 37 | * which autonomously walks 2-level page tables. At a lower level, we |
| 38 | * also describe the RISCish software-loaded TLB entry structure of |
| 39 | * the underlying Hexagon processor. A kernel built to run on the |
| 40 | * virtual machine has no need to know about the underlying hardware. |
| 41 | */ |
| 42 | #include <asm/vm_mmu.h> |
| 43 | |
| 44 | /* |
| 45 | * To maximize the comfort level for the PTE manipulation macros, |
| 46 | * define the "well known" architecture-specific bits. |
| 47 | */ |
| 48 | #define _PAGE_READ __HVM_PTE_R |
| 49 | #define _PAGE_WRITE __HVM_PTE_W |
| 50 | #define _PAGE_EXECUTE __HVM_PTE_X |
| 51 | #define _PAGE_USER __HVM_PTE_U |
| 52 | |
| 53 | /* |
| 54 | * We have a total of 4 "soft" bits available in the abstract PTE. |
| 55 | * The two mandatory software bits are Dirty and Accessed. |
| 56 | * To make nonlinear swap work according to the more recent |
| 57 | * model, we want a low order "Present" bit to indicate whether |
| 58 | * the PTE describes MMU programming or swap space. |
| 59 | */ |
| 60 | #define _PAGE_PRESENT (1<<0) |
| 61 | #define _PAGE_DIRTY (1<<1) |
| 62 | #define _PAGE_ACCESSED (1<<2) |
| 63 | |
| 64 | /* |
| 65 | * _PAGE_FILE is only meaningful if _PAGE_PRESENT is false, while |
| 66 | * _PAGE_DIRTY is only meaningful if _PAGE_PRESENT is true. |
| 67 | * So we can overload the bit... |
| 68 | */ |
| 69 | #define _PAGE_FILE _PAGE_DIRTY /* set: pagecache, unset = swap */ |
| 70 | |
| 71 | /* |
| 72 | * For now, let's say that Valid and Present are the same thing. |
| 73 | * Alternatively, we could say that it's the "or" of R, W, and X |
| 74 | * permissions. |
| 75 | */ |
| 76 | #define _PAGE_VALID _PAGE_PRESENT |
| 77 | |
| 78 | /* |
| 79 | * We're not defining _PAGE_GLOBAL here, since there's no concept |
| 80 | * of global pages or ASIDs exposed to the Hexagon Virtual Machine, |
| 81 | * and we want to use the same page table structures and macros in |
| 82 | * the native kernel as we do in the virtual machine kernel. |
| 83 | * So we'll put up with a bit of inefficiency for now... |
| 84 | */ |
| 85 | |
| 86 | /* |
| 87 | * Top "FOURTH" level (pgd), which for the Hexagon VM is really |
| 88 | * only the second from the bottom, pgd and pud both being collapsed. |
| 89 | * Each entry represents 4MB of virtual address space, 4K of table |
| 90 | * thus maps the full 4GB. |
| 91 | */ |
| 92 | #define PGDIR_SHIFT 22 |
| 93 | #define PTRS_PER_PGD 1024 |
| 94 | |
| 95 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
| 96 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
| 97 | |
| 98 | #ifdef CONFIG_PAGE_SIZE_4KB |
| 99 | #define PTRS_PER_PTE 1024 |
| 100 | #endif |
| 101 | |
| 102 | #ifdef CONFIG_PAGE_SIZE_16KB |
| 103 | #define PTRS_PER_PTE 256 |
| 104 | #endif |
| 105 | |
| 106 | #ifdef CONFIG_PAGE_SIZE_64KB |
| 107 | #define PTRS_PER_PTE 64 |
| 108 | #endif |
| 109 | |
| 110 | #ifdef CONFIG_PAGE_SIZE_256KB |
| 111 | #define PTRS_PER_PTE 16 |
| 112 | #endif |
| 113 | |
| 114 | #ifdef CONFIG_PAGE_SIZE_1MB |
| 115 | #define PTRS_PER_PTE 4 |
| 116 | #endif |
| 117 | |
| 118 | /* Any bigger and the PTE disappears. */ |
| 119 | #define pgd_ERROR(e) \ |
| 120 | printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__,\ |
| 121 | pgd_val(e)) |
| 122 | |
| 123 | /* |
| 124 | * Page Protection Constants. Includes (in this variant) cache attributes. |
| 125 | */ |
| 126 | extern unsigned long _dflt_cache_att; |
| 127 | |
| 128 | #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER | \ |
| 129 | _dflt_cache_att) |
| 130 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \ |
| 131 | _PAGE_READ | _PAGE_EXECUTE | _dflt_cache_att) |
| 132 | #define PAGE_COPY PAGE_READONLY |
| 133 | #define PAGE_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ |
| 134 | _PAGE_READ | _PAGE_EXECUTE | _dflt_cache_att) |
| 135 | #define PAGE_COPY_EXEC PAGE_EXEC |
| 136 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \ |
| 137 | _PAGE_EXECUTE | _PAGE_WRITE | _dflt_cache_att) |
| 138 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | \ |
| 139 | _PAGE_WRITE | _PAGE_EXECUTE | _dflt_cache_att) |
| 140 | |
| 141 | |
| 142 | /* |
| 143 | * Aliases for mapping mmap() protection bits to page protections. |
| 144 | * These get used for static initialization, so using the _dflt_cache_att |
| 145 | * variable for the default cache attribute isn't workable. If the |
| 146 | * default gets changed at boot time, the boot option code has to |
| 147 | * update data structures like the protaction_map[] array. |
| 148 | */ |
| 149 | #define CACHEDEF (CACHE_DEFAULT << 6) |
| 150 | |
| 151 | /* Private (copy-on-write) page protections. */ |
| 152 | #define __P000 __pgprot(_PAGE_PRESENT | _PAGE_USER | CACHEDEF) |
| 153 | #define __P001 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | CACHEDEF) |
| 154 | #define __P010 __P000 /* Write-only copy-on-write */ |
| 155 | #define __P011 __P001 /* Read/Write copy-on-write */ |
| 156 | #define __P100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ |
| 157 | _PAGE_EXECUTE | CACHEDEF) |
| 158 | #define __P101 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_EXECUTE | \ |
| 159 | _PAGE_READ | CACHEDEF) |
| 160 | #define __P110 __P100 /* Write/execute copy-on-write */ |
| 161 | #define __P111 __P101 /* Read/Write/Execute, copy-on-write */ |
| 162 | |
| 163 | /* Shared page protections. */ |
| 164 | #define __S000 __P000 |
| 165 | #define __S001 __P001 |
| 166 | #define __S010 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ |
| 167 | _PAGE_WRITE | CACHEDEF) |
| 168 | #define __S011 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \ |
| 169 | _PAGE_WRITE | CACHEDEF) |
| 170 | #define __S100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ |
| 171 | _PAGE_EXECUTE | CACHEDEF) |
| 172 | #define __S101 __P101 |
| 173 | #define __S110 __pgprot(_PAGE_PRESENT | _PAGE_USER | \ |
| 174 | _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF) |
| 175 | #define __S111 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \ |
| 176 | _PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF) |
| 177 | |
| 178 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* located in head.S */ |
| 179 | |
| 180 | /* Seems to be zero even in architectures where the zero page is firewalled? */ |
| 181 | #define FIRST_USER_ADDRESS 0 |
| 182 | #define pte_special(pte) 0 |
| 183 | #define pte_mkspecial(pte) (pte) |
| 184 | |
| 185 | /* HUGETLB not working currently */ |
| 186 | #ifdef CONFIG_HUGETLB_PAGE |
| 187 | #define pte_mkhuge(pte) __pte((pte_val(pte) & ~0x3) | HVM_HUGEPAGE_SIZE) |
| 188 | #endif |
| 189 | |
| 190 | /* |
| 191 | * For now, assume that higher-level code will do TLB/MMU invalidations |
| 192 | * and don't insert that overhead into this low-level function. |
| 193 | */ |
| 194 | extern void sync_icache_dcache(pte_t pte); |
| 195 | |
| 196 | #define pte_present_exec_user(pte) \ |
| 197 | ((pte_val(pte) & (_PAGE_EXECUTE | _PAGE_USER)) == \ |
| 198 | (_PAGE_EXECUTE | _PAGE_USER)) |
| 199 | |
| 200 | static inline void set_pte(pte_t *ptep, pte_t pteval) |
| 201 | { |
| 202 | /* should really be using pte_exec, if it weren't declared later. */ |
| 203 | if (pte_present_exec_user(pteval)) |
| 204 | sync_icache_dcache(pteval); |
| 205 | |
| 206 | *ptep = pteval; |
| 207 | } |
| 208 | |
| 209 | /* |
| 210 | * For the Hexagon Virtual Machine MMU (or its emulation), a null/invalid |
| 211 | * L1 PTE (PMD/PGD) has 7 in the least significant bits. For the L2 PTE |
| 212 | * (Linux PTE), the key is to have bits 11..9 all zero. We'd use 0x7 |
| 213 | * as a universal null entry, but some of those least significant bits |
| 214 | * are interpreted by software. |
| 215 | */ |
| 216 | #define _NULL_PMD 0x7 |
| 217 | #define _NULL_PTE 0x0 |
| 218 | |
| 219 | static inline void pmd_clear(pmd_t *pmd_entry_ptr) |
| 220 | { |
| 221 | pmd_val(*pmd_entry_ptr) = _NULL_PMD; |
| 222 | } |
| 223 | |
| 224 | /* |
| 225 | * Conveniently, a null PTE value is invalid. |
| 226 | */ |
| 227 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, |
| 228 | pte_t *ptep) |
| 229 | { |
| 230 | pte_val(*ptep) = _NULL_PTE; |
| 231 | } |
| 232 | |
| 233 | #ifdef NEED_PMD_INDEX_DESPITE_BEING_2_LEVEL |
| 234 | /** |
| 235 | * pmd_index - returns the index of the entry in the PMD page |
| 236 | * which would control the given virtual address |
| 237 | */ |
| 238 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) |
| 239 | |
| 240 | #endif |
| 241 | |
| 242 | /** |
| 243 | * pgd_index - returns the index of the entry in the PGD page |
| 244 | * which would control the given virtual address |
| 245 | * |
| 246 | * This returns the *index* for the address in the pgd_t |
| 247 | */ |
| 248 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) |
| 249 | |
| 250 | /* |
| 251 | * pgd_offset - find an offset in a page-table-directory |
| 252 | */ |
| 253 | #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) |
| 254 | |
| 255 | /* |
| 256 | * pgd_offset_k - get kernel (init_mm) pgd entry pointer for addr |
| 257 | */ |
| 258 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
| 259 | |
| 260 | /** |
| 261 | * pmd_none - check if pmd_entry is mapped |
| 262 | * @pmd_entry: pmd entry |
| 263 | * |
| 264 | * MIPS checks it against that "invalid pte table" thing. |
| 265 | */ |
| 266 | static inline int pmd_none(pmd_t pmd) |
| 267 | { |
| 268 | return pmd_val(pmd) == _NULL_PMD; |
| 269 | } |
| 270 | |
| 271 | /** |
| 272 | * pmd_present - is there a page table behind this? |
| 273 | * Essentially the inverse of pmd_none. We maybe |
| 274 | * save an inline instruction by defining it this |
| 275 | * way, instead of simply "!pmd_none". |
| 276 | */ |
| 277 | static inline int pmd_present(pmd_t pmd) |
| 278 | { |
| 279 | return pmd_val(pmd) != (unsigned long)_NULL_PMD; |
| 280 | } |
| 281 | |
| 282 | /** |
| 283 | * pmd_bad - check if a PMD entry is "bad". That might mean swapped out. |
| 284 | * As we have no known cause of badness, it's null, as it is for many |
| 285 | * architectures. |
| 286 | */ |
| 287 | static inline int pmd_bad(pmd_t pmd) |
| 288 | { |
| 289 | return 0; |
| 290 | } |
| 291 | |
| 292 | /* |
| 293 | * pmd_page - converts a PMD entry to a page pointer |
| 294 | */ |
| 295 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) |
| 296 | #define pmd_pgtable(pmd) pmd_page(pmd) |
| 297 | |
| 298 | /** |
| 299 | * pte_none - check if pte is mapped |
| 300 | * @pte: pte_t entry |
| 301 | */ |
| 302 | static inline int pte_none(pte_t pte) |
| 303 | { |
| 304 | return pte_val(pte) == _NULL_PTE; |
| 305 | }; |
| 306 | |
| 307 | /* |
| 308 | * pte_present - check if page is present |
| 309 | */ |
| 310 | static inline int pte_present(pte_t pte) |
| 311 | { |
| 312 | return pte_val(pte) & _PAGE_PRESENT; |
| 313 | } |
| 314 | |
| 315 | /* mk_pte - make a PTE out of a page pointer and protection bits */ |
| 316 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) |
| 317 | |
| 318 | /* pte_page - returns a page (frame pointer/descriptor?) based on a PTE */ |
| 319 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
| 320 | |
| 321 | /* pte_mkold - mark PTE as not recently accessed */ |
| 322 | static inline pte_t pte_mkold(pte_t pte) |
| 323 | { |
| 324 | pte_val(pte) &= ~_PAGE_ACCESSED; |
| 325 | return pte; |
| 326 | } |
| 327 | |
| 328 | /* pte_mkyoung - mark PTE as recently accessed */ |
| 329 | static inline pte_t pte_mkyoung(pte_t pte) |
| 330 | { |
| 331 | pte_val(pte) |= _PAGE_ACCESSED; |
| 332 | return pte; |
| 333 | } |
| 334 | |
| 335 | /* pte_mkclean - mark page as in sync with backing store */ |
| 336 | static inline pte_t pte_mkclean(pte_t pte) |
| 337 | { |
| 338 | pte_val(pte) &= ~_PAGE_DIRTY; |
| 339 | return pte; |
| 340 | } |
| 341 | |
| 342 | /* pte_mkdirty - mark page as modified */ |
| 343 | static inline pte_t pte_mkdirty(pte_t pte) |
| 344 | { |
| 345 | pte_val(pte) |= _PAGE_DIRTY; |
| 346 | return pte; |
| 347 | } |
| 348 | |
| 349 | /* pte_young - "is PTE marked as accessed"? */ |
| 350 | static inline int pte_young(pte_t pte) |
| 351 | { |
| 352 | return pte_val(pte) & _PAGE_ACCESSED; |
| 353 | } |
| 354 | |
| 355 | /* pte_dirty - "is PTE dirty?" */ |
| 356 | static inline int pte_dirty(pte_t pte) |
| 357 | { |
| 358 | return pte_val(pte) & _PAGE_DIRTY; |
| 359 | } |
| 360 | |
| 361 | /* pte_modify - set protection bits on PTE */ |
| 362 | static inline pte_t pte_modify(pte_t pte, pgprot_t prot) |
| 363 | { |
| 364 | pte_val(pte) &= PAGE_MASK; |
| 365 | pte_val(pte) |= pgprot_val(prot); |
| 366 | return pte; |
| 367 | } |
| 368 | |
| 369 | /* pte_wrprotect - mark page as not writable */ |
| 370 | static inline pte_t pte_wrprotect(pte_t pte) |
| 371 | { |
| 372 | pte_val(pte) &= ~_PAGE_WRITE; |
| 373 | return pte; |
| 374 | } |
| 375 | |
| 376 | /* pte_mkwrite - mark page as writable */ |
| 377 | static inline pte_t pte_mkwrite(pte_t pte) |
| 378 | { |
| 379 | pte_val(pte) |= _PAGE_WRITE; |
| 380 | return pte; |
| 381 | } |
| 382 | |
| 383 | /* pte_mkexec - mark PTE as executable */ |
| 384 | static inline pte_t pte_mkexec(pte_t pte) |
| 385 | { |
| 386 | pte_val(pte) |= _PAGE_EXECUTE; |
| 387 | return pte; |
| 388 | } |
| 389 | |
| 390 | /* pte_read - "is PTE marked as readable?" */ |
| 391 | static inline int pte_read(pte_t pte) |
| 392 | { |
| 393 | return pte_val(pte) & _PAGE_READ; |
| 394 | } |
| 395 | |
| 396 | /* pte_write - "is PTE marked as writable?" */ |
| 397 | static inline int pte_write(pte_t pte) |
| 398 | { |
| 399 | return pte_val(pte) & _PAGE_WRITE; |
| 400 | } |
| 401 | |
| 402 | |
| 403 | /* pte_exec - "is PTE marked as executable?" */ |
| 404 | static inline int pte_exec(pte_t pte) |
| 405 | { |
| 406 | return pte_val(pte) & _PAGE_EXECUTE; |
| 407 | } |
| 408 | |
| 409 | /* __pte_to_swp_entry - extract swap entry from PTE */ |
| 410 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
| 411 | |
| 412 | /* __swp_entry_to_pte - extract PTE from swap entry */ |
| 413 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) |
| 414 | |
| 415 | /* pfn_pte - convert page number and protection value to page table entry */ |
| 416 | #define pfn_pte(pfn, pgprot) __pte((pfn << PAGE_SHIFT) | pgprot_val(pgprot)) |
| 417 | |
| 418 | /* pte_pfn - convert pte to page frame number */ |
| 419 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) |
| 420 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) |
| 421 | |
| 422 | /* |
| 423 | * set_pte_at - update page table and do whatever magic may be |
| 424 | * necessary to make the underlying hardware/firmware take note. |
| 425 | * |
| 426 | * VM may require a virtual instruction to alert the MMU. |
| 427 | */ |
| 428 | #define set_pte_at(mm, addr, ptep, pte) set_pte(ptep, pte) |
| 429 | |
| 430 | /* |
| 431 | * May need to invoke the virtual machine as well... |
| 432 | */ |
| 433 | #define pte_unmap(pte) do { } while (0) |
| 434 | #define pte_unmap_nested(pte) do { } while (0) |
| 435 | |
| 436 | /* |
| 437 | * pte_offset_map - returns the linear address of the page table entry |
| 438 | * corresponding to an address |
| 439 | */ |
| 440 | #define pte_offset_map(dir, address) \ |
| 441 | ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) |
| 442 | |
| 443 | #define pte_offset_map_nested(pmd, addr) pte_offset_map(pmd, addr) |
| 444 | |
| 445 | /* pte_offset_kernel - kernel version of pte_offset */ |
| 446 | #define pte_offset_kernel(dir, address) \ |
| 447 | ((pte_t *) (unsigned long) __va(pmd_val(*dir) & PAGE_MASK) \ |
| 448 | + __pte_offset(address)) |
| 449 | |
| 450 | /* ZERO_PAGE - returns the globally shared zero page */ |
| 451 | #define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page)) |
| 452 | |
| 453 | #define __pte_offset(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
| 454 | |
| 455 | /* Nothing special about IO remapping at this point */ |
| 456 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
| 457 | remap_pfn_range(vma, vaddr, pfn, size, prot) |
| 458 | |
| 459 | /* I think this is in case we have page table caches; needed by init/main.c */ |
| 460 | #define pgtable_cache_init() do { } while (0) |
| 461 | |
| 462 | /* |
| 463 | * Swap/file PTE definitions. If _PAGE_PRESENT is zero, the rest of the |
| 464 | * PTE is interpreted as swap information. Depending on the _PAGE_FILE |
| 465 | * bit, the remaining free bits are eitehr interpreted as a file offset |
| 466 | * or a swap type/offset tuple. Rather than have the TLB fill handler |
| 467 | * test _PAGE_PRESENT, we're going to reserve the permissions bits |
| 468 | * and set them to all zeros for swap entries, which speeds up the |
| 469 | * miss handler at the cost of 3 bits of offset. That trade-off can |
| 470 | * be revisited if necessary, but Hexagon processor architecture and |
| 471 | * target applications suggest a lot of TLB misses and not much swap space. |
| 472 | * |
| 473 | * Format of swap PTE: |
| 474 | * bit 0: Present (zero) |
| 475 | * bit 1: _PAGE_FILE (zero) |
| 476 | * bits 2-6: swap type (arch independent layer uses 5 bits max) |
| 477 | * bits 7-9: bits 2:0 of offset |
| 478 | * bits 10-12: effectively _PAGE_PROTNONE (all zero) |
| 479 | * bits 13-31: bits 21:3 of swap offset |
| 480 | * |
| 481 | * Format of file PTE: |
| 482 | * bit 0: Present (zero) |
| 483 | * bit 1: _PAGE_FILE (zero) |
| 484 | * bits 2-9: bits 7:0 of offset |
| 485 | * bits 10-12: effectively _PAGE_PROTNONE (all zero) |
| 486 | * bits 13-31: bits 26:8 of swap offset |
| 487 | * |
| 488 | * The split offset makes some of the following macros a little gnarly, |
| 489 | * but there's plenty of precedent for this sort of thing. |
| 490 | */ |
| 491 | #define PTE_FILE_MAX_BITS 27 |
| 492 | |
| 493 | /* Used for swap PTEs */ |
| 494 | #define __swp_type(swp_pte) (((swp_pte).val >> 2) & 0x1f) |
| 495 | |
| 496 | #define __swp_offset(swp_pte) \ |
| 497 | ((((swp_pte).val >> 7) & 0x7) | (((swp_pte).val >> 10) & 0x003ffff8)) |
| 498 | |
| 499 | #define __swp_entry(type, offset) \ |
| 500 | ((swp_entry_t) { \ |
| 501 | ((type << 2) | \ |
| 502 | ((offset & 0x3ffff8) << 10) | ((offset & 0x7) << 7)) }) |
| 503 | |
| 504 | /* Used for file PTEs */ |
| 505 | #define pte_file(pte) \ |
| 506 | ((pte_val(pte) & (_PAGE_FILE | _PAGE_PRESENT)) == _PAGE_FILE) |
| 507 | |
| 508 | #define pte_to_pgoff(pte) \ |
| 509 | (((pte_val(pte) >> 2) & 0xff) | ((pte_val(pte) >> 5) & 0x07ffff00)) |
| 510 | |
| 511 | #define pgoff_to_pte(off) \ |
| 512 | ((pte_t) { ((((off) & 0x7ffff00) << 5) | (((off) & 0xff) << 2)\ |
| 513 | | _PAGE_FILE) }) |
| 514 | |
| 515 | /* Oh boy. There are a lot of possible arch overrides found in this file. */ |
| 516 | #include <asm-generic/pgtable.h> |
| 517 | |
| 518 | #endif |