Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/include/asm-xtensa/page.h |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | * |
| 8 | * Copyright (C) 2001 - 2005 Tensilica Inc. |
| 9 | */ |
| 10 | |
| 11 | #ifndef _XTENSA_PGTABLE_H |
| 12 | #define _XTENSA_PGTABLE_H |
| 13 | |
| 14 | #include <asm-generic/pgtable-nopmd.h> |
| 15 | #include <asm/page.h> |
| 16 | |
| 17 | /* Assertions. */ |
| 18 | |
| 19 | #ifdef CONFIG_MMU |
| 20 | |
| 21 | |
| 22 | #if (XCHAL_MMU_RINGS < 2) |
| 23 | # error Linux build assumes at least 2 ring levels. |
| 24 | #endif |
| 25 | |
| 26 | #if (XCHAL_MMU_CA_BITS != 4) |
| 27 | # error We assume exactly four bits for CA. |
| 28 | #endif |
| 29 | |
| 30 | #if (XCHAL_MMU_SR_BITS != 0) |
| 31 | # error We have no room for SR bits. |
| 32 | #endif |
| 33 | |
| 34 | /* |
| 35 | * Use the first min-wired way for mapping page-table pages. |
| 36 | * Page coloring requires a second min-wired way. |
| 37 | */ |
| 38 | |
| 39 | #if (XCHAL_DTLB_MINWIRED_SETS == 0) |
| 40 | # error Need a min-wired way for mapping page-table pages |
| 41 | #endif |
| 42 | |
| 43 | #define DTLB_WAY_PGTABLE XCHAL_DTLB_SET(XCHAL_DTLB_MINWIRED_SET0, WAY) |
| 44 | |
| 45 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK |
| 46 | # if XCHAL_DTLB_SET(XCHAL_DTLB_MINWIRED_SET0, WAYS) >= 2 |
| 47 | # define DTLB_WAY_DCACHE_ALIAS0 (DTLB_WAY_PGTABLE + 1) |
| 48 | # define DTLB_WAY_DCACHE_ALIAS1 (DTLB_WAY_PGTABLE + 2) |
| 49 | # else |
| 50 | # error Page coloring requires its own wired dtlb way! |
| 51 | # endif |
| 52 | #endif |
| 53 | |
| 54 | #endif /* CONFIG_MMU */ |
| 55 | |
| 56 | /* |
| 57 | * We only use two ring levels, user and kernel space. |
| 58 | */ |
| 59 | |
| 60 | #define USER_RING 1 /* user ring level */ |
| 61 | #define KERNEL_RING 0 /* kernel ring level */ |
| 62 | |
| 63 | /* |
| 64 | * The Xtensa architecture port of Linux has a two-level page table system, |
| 65 | * i.e. the logical three-level Linux page table layout are folded. |
| 66 | * Each task has the following memory page tables: |
| 67 | * |
| 68 | * PGD table (page directory), ie. 3rd-level page table: |
| 69 | * One page (4 kB) of 1024 (PTRS_PER_PGD) pointers to PTE tables |
| 70 | * (Architectures that don't have the PMD folded point to the PMD tables) |
| 71 | * |
| 72 | * The pointer to the PGD table for a given task can be retrieved from |
| 73 | * the task structure (struct task_struct*) t, e.g. current(): |
| 74 | * (t->mm ? t->mm : t->active_mm)->pgd |
| 75 | * |
| 76 | * PMD tables (page middle-directory), ie. 2nd-level page tables: |
| 77 | * Absent for the Xtensa architecture (folded, PTRS_PER_PMD == 1). |
| 78 | * |
| 79 | * PTE tables (page table entry), ie. 1st-level page tables: |
| 80 | * One page (4 kB) of 1024 (PTRS_PER_PTE) PTEs with a special PTE |
| 81 | * invalid_pte_table for absent mappings. |
| 82 | * |
| 83 | * The individual pages are 4 kB big with special pages for the empty_zero_page. |
| 84 | */ |
| 85 | #define PGDIR_SHIFT 22 |
| 86 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
| 87 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
| 88 | |
| 89 | /* |
| 90 | * Entries per page directory level: we use two-level, so |
| 91 | * we don't really have any PMD directory physically. |
| 92 | */ |
| 93 | #define PTRS_PER_PTE 1024 |
| 94 | #define PTRS_PER_PTE_SHIFT 10 |
| 95 | #define PTRS_PER_PMD 1 |
| 96 | #define PTRS_PER_PGD 1024 |
| 97 | #define PGD_ORDER 0 |
| 98 | #define PMD_ORDER 0 |
| 99 | #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) |
| 100 | #define FIRST_USER_ADDRESS XCHAL_SEG_MAPPABLE_VADDR |
| 101 | #define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT) |
| 102 | |
| 103 | /* virtual memory area. We keep a distance to other memory regions to be |
| 104 | * on the safe side. We also use this area for cache aliasing. |
| 105 | */ |
| 106 | |
| 107 | // FIXME: virtual memory area must be configuration-dependent |
| 108 | |
| 109 | #define VMALLOC_START 0xC0000000 |
| 110 | #define VMALLOC_END 0xC7FF0000 |
| 111 | |
| 112 | /* Xtensa Linux config PTE layout (when present): |
| 113 | * 31-12: PPN |
| 114 | * 11-6: Software |
| 115 | * 5-4: RING |
| 116 | * 3-0: CA |
| 117 | * |
| 118 | * Similar to the Alpha and MIPS ports, we need to keep track of the ref |
| 119 | * and mod bits in software. We have a software "you can read |
| 120 | * from this page" bit, and a hardware one which actually lets the |
| 121 | * process read from the page. On the same token we have a software |
| 122 | * writable bit and the real hardware one which actually lets the |
| 123 | * process write to the page. |
| 124 | * |
| 125 | * See further below for PTE layout for swapped-out pages. |
| 126 | */ |
| 127 | |
| 128 | #define _PAGE_VALID (1<<0) /* hardware: page is accessible */ |
| 129 | #define _PAGE_WRENABLE (1<<1) /* hardware: page is writable */ |
| 130 | |
| 131 | /* None of these cache modes include MP coherency: */ |
| 132 | #define _PAGE_NO_CACHE (0<<2) /* bypass, non-speculative */ |
| 133 | #if XCHAL_DCACHE_IS_WRITEBACK |
| 134 | # define _PAGE_WRITEBACK (1<<2) /* write back */ |
| 135 | # define _PAGE_WRITETHRU (2<<2) /* write through */ |
| 136 | #else |
| 137 | # define _PAGE_WRITEBACK (1<<2) /* assume write through */ |
| 138 | # define _PAGE_WRITETHRU (1<<2) |
| 139 | #endif |
| 140 | #define _PAGE_NOALLOC (3<<2) /* don't allocate cache,if not cached */ |
| 141 | #define _CACHE_MASK (3<<2) |
| 142 | |
| 143 | #define _PAGE_USER (1<<4) /* user access (ring=1) */ |
| 144 | #define _PAGE_KERNEL (0<<4) /* kernel access (ring=0) */ |
| 145 | |
| 146 | /* Software */ |
| 147 | #define _PAGE_RW (1<<6) /* software: page writable */ |
| 148 | #define _PAGE_DIRTY (1<<7) /* software: page dirty */ |
| 149 | #define _PAGE_ACCESSED (1<<8) /* software: page accessed (read) */ |
| 150 | #define _PAGE_FILE (1<<9) /* nonlinear file mapping*/ |
| 151 | |
| 152 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _CACHE_MASK | _PAGE_DIRTY) |
| 153 | #define _PAGE_PRESENT ( _PAGE_VALID | _PAGE_WRITEBACK | _PAGE_ACCESSED) |
| 154 | |
| 155 | #ifdef CONFIG_MMU |
| 156 | |
| 157 | # define PAGE_NONE __pgprot(_PAGE_PRESENT) |
| 158 | # define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_RW) |
| 159 | # define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER) |
| 160 | # define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER) |
| 161 | # define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_KERNEL | _PAGE_WRENABLE) |
| 162 | # define PAGE_INVALID __pgprot(_PAGE_USER) |
| 163 | |
| 164 | # if (DCACHE_WAY_SIZE > PAGE_SIZE) |
| 165 | # define PAGE_DIRECTORY __pgprot(_PAGE_VALID | _PAGE_ACCESSED | _PAGE_KERNEL) |
| 166 | # else |
| 167 | # define PAGE_DIRECTORY __pgprot(_PAGE_PRESENT | _PAGE_KERNEL) |
| 168 | # endif |
| 169 | |
| 170 | #else /* no mmu */ |
| 171 | |
| 172 | # define PAGE_NONE __pgprot(0) |
| 173 | # define PAGE_SHARED __pgprot(0) |
| 174 | # define PAGE_COPY __pgprot(0) |
| 175 | # define PAGE_READONLY __pgprot(0) |
| 176 | # define PAGE_KERNEL __pgprot(0) |
| 177 | |
| 178 | #endif |
| 179 | |
| 180 | /* |
| 181 | * On certain configurations of Xtensa MMUs (eg. the initial Linux config), |
| 182 | * the MMU can't do page protection for execute, and considers that the same as |
| 183 | * read. Also, write permissions may imply read permissions. |
| 184 | * What follows is the closest we can get by reasonable means.. |
| 185 | * See linux/mm/mmap.c for protection_map[] array that uses these definitions. |
| 186 | */ |
| 187 | #define __P000 PAGE_NONE /* private --- */ |
| 188 | #define __P001 PAGE_READONLY /* private --r */ |
| 189 | #define __P010 PAGE_COPY /* private -w- */ |
| 190 | #define __P011 PAGE_COPY /* private -wr */ |
| 191 | #define __P100 PAGE_READONLY /* private x-- */ |
| 192 | #define __P101 PAGE_READONLY /* private x-r */ |
| 193 | #define __P110 PAGE_COPY /* private xw- */ |
| 194 | #define __P111 PAGE_COPY /* private xwr */ |
| 195 | |
| 196 | #define __S000 PAGE_NONE /* shared --- */ |
| 197 | #define __S001 PAGE_READONLY /* shared --r */ |
| 198 | #define __S010 PAGE_SHARED /* shared -w- */ |
| 199 | #define __S011 PAGE_SHARED /* shared -wr */ |
| 200 | #define __S100 PAGE_READONLY /* shared x-- */ |
| 201 | #define __S101 PAGE_READONLY /* shared x-r */ |
| 202 | #define __S110 PAGE_SHARED /* shared xw- */ |
| 203 | #define __S111 PAGE_SHARED /* shared xwr */ |
| 204 | |
| 205 | #ifndef __ASSEMBLY__ |
| 206 | |
| 207 | #define pte_ERROR(e) \ |
| 208 | printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) |
| 209 | #define pgd_ERROR(e) \ |
| 210 | printk("%s:%d: bad pgd entry %08lx.\n", __FILE__, __LINE__, pgd_val(e)) |
| 211 | |
| 212 | extern unsigned long empty_zero_page[1024]; |
| 213 | |
| 214 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) |
| 215 | |
| 216 | extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)]; |
| 217 | |
| 218 | /* |
| 219 | * The pmd contains the kernel virtual address of the pte page. |
| 220 | */ |
| 221 | #define pmd_page_kernel(pmd) ((unsigned long)(pmd_val(pmd) & PAGE_MASK)) |
| 222 | #define pmd_page(pmd) virt_to_page(pmd_val(pmd)) |
| 223 | |
| 224 | /* |
| 225 | * The following only work if pte_present() is true. |
| 226 | */ |
| 227 | #define pte_none(pte) (!(pte_val(pte) ^ _PAGE_USER)) |
| 228 | #define pte_present(pte) (pte_val(pte) & _PAGE_VALID) |
| 229 | #define pte_clear(mm,addr,ptep) \ |
| 230 | do { update_pte(ptep, __pte(_PAGE_USER)); } while(0) |
| 231 | |
| 232 | #define pmd_none(pmd) (!pmd_val(pmd)) |
| 233 | #define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK) |
| 234 | #define pmd_clear(pmdp) do { set_pmd(pmdp, __pmd(0)); } while (0) |
| 235 | #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) |
| 236 | |
| 237 | /* Note: We use the _PAGE_USER bit to indicate write-protect kernel memory */ |
| 238 | |
| 239 | static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; } |
| 240 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } |
| 241 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } |
| 242 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } |
| 243 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } |
| 244 | static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~(_PAGE_RW | _PAGE_WRENABLE); return pte; } |
| 245 | static inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_USER; return pte; } |
| 246 | static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } |
| 247 | static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } |
| 248 | static inline pte_t pte_mkread(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; } |
| 249 | static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } |
| 250 | static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } |
| 251 | static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_RW; return pte; } |
| 252 | |
| 253 | /* |
| 254 | * Conversion functions: convert a page and protection to a page entry, |
| 255 | * and a page entry and page directory to the page they refer to. |
| 256 | */ |
| 257 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) |
| 258 | #define pte_same(a,b) (pte_val(a) == pte_val(b)) |
| 259 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
| 260 | #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) |
| 261 | #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) |
| 262 | |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 263 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 264 | { |
| 265 | return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); |
| 266 | } |
| 267 | |
| 268 | /* |
| 269 | * Certain architectures need to do special things when pte's |
| 270 | * within a page table are directly modified. Thus, the following |
| 271 | * hook is made available. |
| 272 | */ |
| 273 | static inline void update_pte(pte_t *ptep, pte_t pteval) |
| 274 | { |
| 275 | *ptep = pteval; |
| 276 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK |
| 277 | __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (ptep)); |
| 278 | #endif |
| 279 | } |
| 280 | |
Tim Schmielau | 8c65b4a | 2005-11-07 00:59:43 -0800 | [diff] [blame] | 281 | struct mm_struct; |
| 282 | |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 283 | static inline void |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 284 | set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval) |
| 285 | { |
| 286 | update_pte(ptep, pteval); |
| 287 | } |
| 288 | |
| 289 | |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 290 | static inline void |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 291 | set_pmd(pmd_t *pmdp, pmd_t pmdval) |
| 292 | { |
| 293 | *pmdp = pmdval; |
| 294 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK |
| 295 | __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp)); |
| 296 | #endif |
| 297 | } |
| 298 | |
Tim Schmielau | 8c65b4a | 2005-11-07 00:59:43 -0800 | [diff] [blame] | 299 | struct vm_area_struct; |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 300 | |
| 301 | static inline int |
| 302 | ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, |
| 303 | pte_t *ptep) |
| 304 | { |
| 305 | pte_t pte = *ptep; |
| 306 | if (!pte_young(pte)) |
| 307 | return 0; |
| 308 | update_pte(ptep, pte_mkold(pte)); |
| 309 | return 1; |
| 310 | } |
| 311 | |
| 312 | static inline int |
| 313 | ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, |
| 314 | pte_t *ptep) |
| 315 | { |
| 316 | pte_t pte = *ptep; |
| 317 | if (!pte_dirty(pte)) |
| 318 | return 0; |
| 319 | update_pte(ptep, pte_mkclean(pte)); |
| 320 | return 1; |
| 321 | } |
| 322 | |
| 323 | static inline pte_t |
| 324 | ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
| 325 | { |
| 326 | pte_t pte = *ptep; |
| 327 | pte_clear(mm, addr, ptep); |
| 328 | return pte; |
| 329 | } |
| 330 | |
| 331 | static inline void |
| 332 | ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
| 333 | { |
| 334 | pte_t pte = *ptep; |
| 335 | update_pte(ptep, pte_wrprotect(pte)); |
| 336 | } |
| 337 | |
| 338 | /* to find an entry in a kernel page-table-directory */ |
| 339 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
| 340 | |
| 341 | /* to find an entry in a page-table-directory */ |
| 342 | #define pgd_offset(mm,address) ((mm)->pgd + pgd_index(address)) |
| 343 | |
| 344 | #define pgd_index(address) ((address) >> PGDIR_SHIFT) |
| 345 | |
| 346 | /* Find an entry in the second-level page table.. */ |
| 347 | #define pmd_offset(dir,address) ((pmd_t*)(dir)) |
| 348 | |
| 349 | /* Find an entry in the third-level page table.. */ |
| 350 | #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
| 351 | #define pte_offset_kernel(dir,addr) \ |
| 352 | ((pte_t*) pmd_page_kernel(*(dir)) + pte_index(addr)) |
| 353 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr)) |
| 354 | #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir),(addr)) |
| 355 | |
| 356 | #define pte_unmap(pte) do { } while (0) |
| 357 | #define pte_unmap_nested(pte) do { } while (0) |
| 358 | |
| 359 | |
| 360 | /* |
| 361 | * Encode and decode a swap entry. |
| 362 | * Each PTE in a process VM's page table is either: |
| 363 | * "present" -- valid and not swapped out, protection bits are meaningful; |
| 364 | * "not present" -- which further subdivides in these two cases: |
| 365 | * "none" -- no mapping at all; identified by pte_none(), set by pte_clear( |
| 366 | * "swapped out" -- the page is swapped out, and the SWP macros below |
| 367 | * are used to store swap file info in the PTE itself. |
| 368 | * |
| 369 | * In the Xtensa processor MMU, any PTE entries in user space (or anywhere |
| 370 | * in virtual memory that can map differently across address spaces) |
| 371 | * must have a correct ring value that represents the RASID field that |
| 372 | * is changed when switching address spaces. Eg. such PTE entries cannot |
| 373 | * be set to ring zero, because that can cause a (global) kernel ASID |
| 374 | * entry to be created in the TLBs (even with invalid cache attribute), |
| 375 | * potentially causing a multihit exception when going back to another |
| 376 | * address space that mapped the same virtual address at another ring. |
| 377 | * |
| 378 | * SO: we avoid using ring bits (_PAGE_RING_MASK) in "not present" PTEs. |
| 379 | * We also avoid using the _PAGE_VALID bit which must be zero for non-present |
| 380 | * pages. |
| 381 | * |
| 382 | * We end up with the following available bits: 1..3 and 7..31. |
| 383 | * We don't bother with 1..3 for now (we can use them later if needed), |
| 384 | * and chose to allocate 6 bits for SWP_TYPE and the remaining 19 bits |
| 385 | * for SWP_OFFSET. At least 5 bits are needed for SWP_TYPE, because it |
| 386 | * is currently implemented as an index into swap_info[MAX_SWAPFILES] |
| 387 | * and MAX_SWAPFILES is currently defined as 32 in <linux/swap.h>. |
| 388 | * However, for some reason all other architectures in the 2.4 kernel |
| 389 | * reserve either 6, 7, or 8 bits so I'll not detract from that for now. :) |
| 390 | * SWP_OFFSET is an offset into the swap file in page-size units, so |
| 391 | * with 4 kB pages, 19 bits supports a maximum swap file size of 2 GB. |
| 392 | * |
| 393 | * FIXME: 2 GB isn't very big. Other bits can be used to allow |
| 394 | * larger swap sizes. In the meantime, it appears relatively easy to get |
| 395 | * around the 2 GB limitation by simply using multiple swap files. |
| 396 | */ |
| 397 | |
| 398 | #define __swp_type(entry) (((entry).val >> 7) & 0x3f) |
| 399 | #define __swp_offset(entry) ((entry).val >> 13) |
| 400 | #define __swp_entry(type,offs) ((swp_entry_t) {((type) << 7) | ((offs) << 13)}) |
| 401 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
| 402 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) |
| 403 | |
| 404 | #define PTE_FILE_MAX_BITS 29 |
| 405 | #define pte_to_pgoff(pte) (pte_val(pte) >> 3) |
| 406 | #define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE }) |
| 407 | |
| 408 | |
| 409 | #endif /* !defined (__ASSEMBLY__) */ |
| 410 | |
| 411 | |
| 412 | #ifdef __ASSEMBLY__ |
| 413 | |
| 414 | /* Assembly macro _PGD_INDEX is the same as C pgd_index(unsigned long), |
| 415 | * _PGD_OFFSET as C pgd_offset(struct mm_struct*, unsigned long), |
| 416 | * _PMD_OFFSET as C pmd_offset(pgd_t*, unsigned long) |
| 417 | * _PTE_OFFSET as C pte_offset(pmd_t*, unsigned long) |
| 418 | * |
| 419 | * Note: We require an additional temporary register which can be the same as |
| 420 | * the register that holds the address. |
| 421 | * |
| 422 | * ((pte_t*) ((unsigned long)(pmd_val(*pmd) & PAGE_MASK)) + pte_index(addr)) |
| 423 | * |
| 424 | */ |
| 425 | #define _PGD_INDEX(rt,rs) extui rt, rs, PGDIR_SHIFT, 32-PGDIR_SHIFT |
| 426 | #define _PTE_INDEX(rt,rs) extui rt, rs, PAGE_SHIFT, PTRS_PER_PTE_SHIFT |
| 427 | |
| 428 | #define _PGD_OFFSET(mm,adr,tmp) l32i mm, mm, MM_PGD; \ |
| 429 | _PGD_INDEX(tmp, adr); \ |
| 430 | addx4 mm, tmp, mm |
| 431 | |
| 432 | #define _PTE_OFFSET(pmd,adr,tmp) _PTE_INDEX(tmp, adr); \ |
| 433 | srli pmd, pmd, PAGE_SHIFT; \ |
| 434 | slli pmd, pmd, PAGE_SHIFT; \ |
| 435 | addx4 pmd, tmp, pmd |
| 436 | |
| 437 | #else |
| 438 | |
| 439 | extern void paging_init(void); |
| 440 | |
| 441 | #define kern_addr_valid(addr) (1) |
| 442 | |
| 443 | extern void update_mmu_cache(struct vm_area_struct * vma, |
| 444 | unsigned long address, pte_t pte); |
| 445 | |
| 446 | /* |
Randy Dunlap | 33bf561 | 2005-09-13 01:25:50 -0700 | [diff] [blame] | 447 | * remap a physical page `pfn' of size `size' with page protection `prot' |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 448 | * into virtual address `from' |
| 449 | */ |
Randy Dunlap | 33bf561 | 2005-09-13 01:25:50 -0700 | [diff] [blame] | 450 | #define io_remap_pfn_range(vma,from,pfn,size,prot) \ |
| 451 | remap_pfn_range(vma, from, pfn, size, prot) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 452 | |
| 453 | |
| 454 | /* No page table caches to init */ |
| 455 | |
| 456 | #define pgtable_cache_init() do { } while (0) |
| 457 | |
| 458 | typedef pte_t *pte_addr_t; |
| 459 | |
| 460 | #endif /* !defined (__ASSEMBLY__) */ |
| 461 | |
| 462 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
| 463 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY |
| 464 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
| 465 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
| 466 | #define __HAVE_ARCH_PTEP_MKDIRTY |
| 467 | #define __HAVE_ARCH_PTE_SAME |
| 468 | |
| 469 | #include <asm-generic/pgtable.h> |
| 470 | |
| 471 | #endif /* _XTENSA_PGTABLE_H */ |