Michal Simek | 6a3cece | 2009-03-27 14:25:37 +0100 | [diff] [blame] | 1 | /* |
Michal Simek | 15902bf | 2009-05-26 16:30:15 +0200 | [diff] [blame] | 2 | * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> |
| 3 | * Copyright (C) 2008-2009 PetaLogix |
Michal Simek | 6a3cece | 2009-03-27 14:25:37 +0100 | [diff] [blame] | 4 | * Copyright (C) 2006 Atmark Techno, Inc. |
| 5 | * |
| 6 | * This file is subject to the terms and conditions of the GNU General Public |
| 7 | * License. See the file "COPYING" in the main directory of this archive |
| 8 | * for more details. |
| 9 | */ |
| 10 | |
| 11 | #ifndef _ASM_MICROBLAZE_PGTABLE_H |
| 12 | #define _ASM_MICROBLAZE_PGTABLE_H |
| 13 | |
| 14 | #include <asm/setup.h> |
| 15 | |
| 16 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
| 17 | remap_pfn_range(vma, vaddr, pfn, size, prot) |
| 18 | |
Michal Simek | 79bf3a1 | 2010-01-20 15:17:08 +0100 | [diff] [blame] | 19 | #ifndef __ASSEMBLY__ |
| 20 | extern int mem_init_done; |
| 21 | #endif |
| 22 | |
Michal Simek | 15902bf | 2009-05-26 16:30:15 +0200 | [diff] [blame] | 23 | #ifndef CONFIG_MMU |
| 24 | |
Michal Simek | 6a3cece | 2009-03-27 14:25:37 +0100 | [diff] [blame] | 25 | #define pgd_present(pgd) (1) /* pages are always present on non MMU */ |
| 26 | #define pgd_none(pgd) (0) |
| 27 | #define pgd_bad(pgd) (0) |
| 28 | #define pgd_clear(pgdp) |
| 29 | #define kern_addr_valid(addr) (1) |
| 30 | #define pmd_offset(a, b) ((void *) 0) |
| 31 | |
| 32 | #define PAGE_NONE __pgprot(0) /* these mean nothing to non MMU */ |
| 33 | #define PAGE_SHARED __pgprot(0) /* these mean nothing to non MMU */ |
| 34 | #define PAGE_COPY __pgprot(0) /* these mean nothing to non MMU */ |
| 35 | #define PAGE_READONLY __pgprot(0) /* these mean nothing to non MMU */ |
| 36 | #define PAGE_KERNEL __pgprot(0) /* these mean nothing to non MMU */ |
| 37 | |
Arnd Bergmann | 0c60155 | 2009-05-01 21:44:51 +0000 | [diff] [blame] | 38 | #define pgprot_noncached(x) (x) |
| 39 | |
Michal Simek | 6a3cece | 2009-03-27 14:25:37 +0100 | [diff] [blame] | 40 | #define __swp_type(x) (0) |
| 41 | #define __swp_offset(x) (0) |
| 42 | #define __swp_entry(typ, off) ((swp_entry_t) { ((typ) | ((off) << 7)) }) |
| 43 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
| 44 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) |
| 45 | |
| 46 | #ifndef __ASSEMBLY__ |
| 47 | static inline int pte_file(pte_t pte) { return 0; } |
| 48 | #endif /* __ASSEMBLY__ */ |
| 49 | |
| 50 | #define ZERO_PAGE(vaddr) ({ BUG(); NULL; }) |
| 51 | |
| 52 | #define swapper_pg_dir ((pgd_t *) NULL) |
| 53 | |
| 54 | #define pgtable_cache_init() do {} while (0) |
| 55 | |
| 56 | #define arch_enter_lazy_cpu_mode() do {} while (0) |
| 57 | |
Michal Simek | 79bf3a1 | 2010-01-20 15:17:08 +0100 | [diff] [blame] | 58 | #define pgprot_noncached_wc(prot) prot |
| 59 | |
Michal Simek | 15902bf | 2009-05-26 16:30:15 +0200 | [diff] [blame] | 60 | #else /* CONFIG_MMU */ |
| 61 | |
| 62 | #include <asm-generic/4level-fixup.h> |
| 63 | |
| 64 | #ifdef __KERNEL__ |
| 65 | #ifndef __ASSEMBLY__ |
| 66 | |
| 67 | #include <linux/sched.h> |
| 68 | #include <linux/threads.h> |
| 69 | #include <asm/processor.h> /* For TASK_SIZE */ |
| 70 | #include <asm/mmu.h> |
| 71 | #include <asm/page.h> |
| 72 | |
| 73 | #define FIRST_USER_ADDRESS 0 |
| 74 | |
| 75 | extern unsigned long va_to_phys(unsigned long address); |
| 76 | extern pte_t *va_to_pte(unsigned long address); |
Michal Simek | 15902bf | 2009-05-26 16:30:15 +0200 | [diff] [blame] | 77 | |
| 78 | /* |
| 79 | * The following only work if pte_present() is true. |
| 80 | * Undefined behaviour if not.. |
| 81 | */ |
| 82 | |
| 83 | static inline int pte_special(pte_t pte) { return 0; } |
| 84 | |
| 85 | static inline pte_t pte_mkspecial(pte_t pte) { return pte; } |
| 86 | |
| 87 | /* Start and end of the vmalloc area. */ |
| 88 | /* Make sure to map the vmalloc area above the pinned kernel memory area |
| 89 | of 32Mb. */ |
| 90 | #define VMALLOC_START (CONFIG_KERNEL_START + \ |
| 91 | max(32 * 1024 * 1024UL, memory_size)) |
| 92 | #define VMALLOC_END ioremap_bot |
Michal Simek | 15902bf | 2009-05-26 16:30:15 +0200 | [diff] [blame] | 93 | |
| 94 | #endif /* __ASSEMBLY__ */ |
| 95 | |
| 96 | /* |
Michal Simek | a6475c1 | 2010-01-18 15:27:10 +0100 | [diff] [blame] | 97 | * Macro to mark a page protection value as "uncacheable". |
| 98 | */ |
| 99 | |
| 100 | #define _PAGE_CACHE_CTL (_PAGE_GUARDED | _PAGE_NO_CACHE | \ |
| 101 | _PAGE_WRITETHRU) |
| 102 | |
| 103 | #define pgprot_noncached(prot) \ |
| 104 | (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ |
| 105 | _PAGE_NO_CACHE | _PAGE_GUARDED)) |
| 106 | |
| 107 | #define pgprot_noncached_wc(prot) \ |
| 108 | (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ |
| 109 | _PAGE_NO_CACHE)) |
| 110 | |
| 111 | /* |
Michal Simek | 15902bf | 2009-05-26 16:30:15 +0200 | [diff] [blame] | 112 | * The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash |
| 113 | * table containing PTEs, together with a set of 16 segment registers, to |
| 114 | * define the virtual to physical address mapping. |
| 115 | * |
| 116 | * We use the hash table as an extended TLB, i.e. a cache of currently |
| 117 | * active mappings. We maintain a two-level page table tree, much |
| 118 | * like that used by the i386, for the sake of the Linux memory |
| 119 | * management code. Low-level assembler code in hashtable.S |
| 120 | * (procedure hash_page) is responsible for extracting ptes from the |
| 121 | * tree and putting them into the hash table when necessary, and |
| 122 | * updating the accessed and modified bits in the page table tree. |
| 123 | */ |
| 124 | |
| 125 | /* |
| 126 | * The MicroBlaze processor has a TLB architecture identical to PPC-40x. The |
| 127 | * instruction and data sides share a unified, 64-entry, semi-associative |
| 128 | * TLB which is maintained totally under software control. In addition, the |
| 129 | * instruction side has a hardware-managed, 2,4, or 8-entry, fully-associative |
| 130 | * TLB which serves as a first level to the shared TLB. These two TLBs are |
| 131 | * known as the UTLB and ITLB, respectively (see "mmu.h" for definitions). |
| 132 | */ |
| 133 | |
| 134 | /* |
| 135 | * The normal case is that PTEs are 32-bits and we have a 1-page |
| 136 | * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus |
| 137 | * |
| 138 | */ |
| 139 | |
| 140 | /* PMD_SHIFT determines the size of the area mapped by the PTE pages */ |
| 141 | #define PMD_SHIFT (PAGE_SHIFT + PTE_SHIFT) |
| 142 | #define PMD_SIZE (1UL << PMD_SHIFT) |
| 143 | #define PMD_MASK (~(PMD_SIZE-1)) |
| 144 | |
| 145 | /* PGDIR_SHIFT determines what a top-level page table entry can map */ |
| 146 | #define PGDIR_SHIFT PMD_SHIFT |
| 147 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
| 148 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
| 149 | |
| 150 | /* |
| 151 | * entries per page directory level: our page-table tree is two-level, so |
| 152 | * we don't really have any PMD directory. |
| 153 | */ |
| 154 | #define PTRS_PER_PTE (1 << PTE_SHIFT) |
| 155 | #define PTRS_PER_PMD 1 |
| 156 | #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) |
| 157 | |
| 158 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) |
| 159 | #define FIRST_USER_PGD_NR 0 |
| 160 | |
| 161 | #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) |
| 162 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) |
| 163 | |
| 164 | #define pte_ERROR(e) \ |
| 165 | printk(KERN_ERR "%s:%d: bad pte "PTE_FMT".\n", \ |
| 166 | __FILE__, __LINE__, pte_val(e)) |
| 167 | #define pmd_ERROR(e) \ |
| 168 | printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \ |
| 169 | __FILE__, __LINE__, pmd_val(e)) |
| 170 | #define pgd_ERROR(e) \ |
| 171 | printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \ |
| 172 | __FILE__, __LINE__, pgd_val(e)) |
| 173 | |
| 174 | /* |
| 175 | * Bits in a linux-style PTE. These match the bits in the |
| 176 | * (hardware-defined) PTE as closely as possible. |
| 177 | */ |
| 178 | |
| 179 | /* There are several potential gotchas here. The hardware TLBLO |
| 180 | * field looks like this: |
| 181 | * |
| 182 | * 0 1 2 3 4 ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31 |
| 183 | * RPN..................... 0 0 EX WR ZSEL....... W I M G |
| 184 | * |
| 185 | * Where possible we make the Linux PTE bits match up with this |
| 186 | * |
| 187 | * - bits 20 and 21 must be cleared, because we use 4k pages (4xx can |
| 188 | * support down to 1k pages), this is done in the TLBMiss exception |
| 189 | * handler. |
| 190 | * - We use only zones 0 (for kernel pages) and 1 (for user pages) |
| 191 | * of the 16 available. Bit 24-26 of the TLB are cleared in the TLB |
| 192 | * miss handler. Bit 27 is PAGE_USER, thus selecting the correct |
| 193 | * zone. |
| 194 | * - PRESENT *must* be in the bottom two bits because swap cache |
| 195 | * entries use the top 30 bits. Because 4xx doesn't support SMP |
| 196 | * anyway, M is irrelevant so we borrow it for PAGE_PRESENT. Bit 30 |
| 197 | * is cleared in the TLB miss handler before the TLB entry is loaded. |
| 198 | * - All other bits of the PTE are loaded into TLBLO without |
| 199 | * * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for |
| 200 | * software PTE bits. We actually use use bits 21, 24, 25, and |
| 201 | * 30 respectively for the software bits: ACCESSED, DIRTY, RW, and |
| 202 | * PRESENT. |
| 203 | */ |
| 204 | |
| 205 | /* Definitions for MicroBlaze. */ |
| 206 | #define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */ |
Michal Simek | f14d6f7 | 2009-07-15 13:39:35 +0200 | [diff] [blame] | 207 | #define _PAGE_FILE 0x001 /* when !present: nonlinear file mapping */ |
Michal Simek | 15902bf | 2009-05-26 16:30:15 +0200 | [diff] [blame] | 208 | #define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */ |
| 209 | #define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */ |
| 210 | #define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */ |
| 211 | #define _PAGE_USER 0x010 /* matches one of the zone permission bits */ |
| 212 | #define _PAGE_RW 0x040 /* software: Writes permitted */ |
| 213 | #define _PAGE_DIRTY 0x080 /* software: dirty page */ |
| 214 | #define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */ |
| 215 | #define _PAGE_HWEXEC 0x200 /* hardware: EX permission */ |
| 216 | #define _PAGE_ACCESSED 0x400 /* software: R: page referenced */ |
| 217 | #define _PMD_PRESENT PAGE_MASK |
| 218 | |
| 219 | /* |
| 220 | * Some bits are unused... |
| 221 | */ |
| 222 | #ifndef _PAGE_HASHPTE |
| 223 | #define _PAGE_HASHPTE 0 |
| 224 | #endif |
| 225 | #ifndef _PTE_NONE_MASK |
| 226 | #define _PTE_NONE_MASK 0 |
| 227 | #endif |
| 228 | #ifndef _PAGE_SHARED |
| 229 | #define _PAGE_SHARED 0 |
| 230 | #endif |
| 231 | #ifndef _PAGE_HWWRITE |
| 232 | #define _PAGE_HWWRITE 0 |
| 233 | #endif |
| 234 | #ifndef _PAGE_HWEXEC |
| 235 | #define _PAGE_HWEXEC 0 |
| 236 | #endif |
| 237 | #ifndef _PAGE_EXEC |
| 238 | #define _PAGE_EXEC 0 |
| 239 | #endif |
| 240 | |
| 241 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) |
| 242 | |
| 243 | /* |
| 244 | * Note: the _PAGE_COHERENT bit automatically gets set in the hardware |
| 245 | * PTE if CONFIG_SMP is defined (hash_page does this); there is no need |
| 246 | * to have it in the Linux PTE, and in fact the bit could be reused for |
| 247 | * another purpose. -- paulus. |
| 248 | */ |
| 249 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED) |
| 250 | #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE) |
| 251 | |
| 252 | #define _PAGE_KERNEL \ |
| 253 | (_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC) |
| 254 | |
| 255 | #define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED) |
| 256 | |
| 257 | #define PAGE_NONE __pgprot(_PAGE_BASE) |
| 258 | #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) |
| 259 | #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) |
| 260 | #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) |
| 261 | #define PAGE_SHARED_X \ |
| 262 | __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) |
| 263 | #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) |
| 264 | #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) |
| 265 | |
| 266 | #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) |
| 267 | #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_SHARED) |
| 268 | #define PAGE_KERNEL_CI __pgprot(_PAGE_IO) |
| 269 | |
| 270 | /* |
| 271 | * We consider execute permission the same as read. |
| 272 | * Also, write permissions imply read permissions. |
| 273 | */ |
| 274 | #define __P000 PAGE_NONE |
| 275 | #define __P001 PAGE_READONLY_X |
| 276 | #define __P010 PAGE_COPY |
| 277 | #define __P011 PAGE_COPY_X |
| 278 | #define __P100 PAGE_READONLY |
| 279 | #define __P101 PAGE_READONLY_X |
| 280 | #define __P110 PAGE_COPY |
| 281 | #define __P111 PAGE_COPY_X |
| 282 | |
| 283 | #define __S000 PAGE_NONE |
| 284 | #define __S001 PAGE_READONLY_X |
| 285 | #define __S010 PAGE_SHARED |
| 286 | #define __S011 PAGE_SHARED_X |
| 287 | #define __S100 PAGE_READONLY |
| 288 | #define __S101 PAGE_READONLY_X |
| 289 | #define __S110 PAGE_SHARED |
| 290 | #define __S111 PAGE_SHARED_X |
| 291 | |
| 292 | #ifndef __ASSEMBLY__ |
| 293 | /* |
| 294 | * ZERO_PAGE is a global shared page that is always zero: used |
| 295 | * for zero-mapped memory areas etc.. |
| 296 | */ |
| 297 | extern unsigned long empty_zero_page[1024]; |
| 298 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) |
| 299 | |
| 300 | #endif /* __ASSEMBLY__ */ |
| 301 | |
| 302 | #define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0) |
| 303 | #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) |
| 304 | #define pte_clear(mm, addr, ptep) \ |
| 305 | do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0) |
| 306 | |
| 307 | #define pmd_none(pmd) (!pmd_val(pmd)) |
| 308 | #define pmd_bad(pmd) ((pmd_val(pmd) & _PMD_PRESENT) == 0) |
| 309 | #define pmd_present(pmd) ((pmd_val(pmd) & _PMD_PRESENT) != 0) |
| 310 | #define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0) |
| 311 | |
| 312 | #define pte_page(x) (mem_map + (unsigned long) \ |
| 313 | ((pte_val(x) - memory_start) >> PAGE_SHIFT)) |
| 314 | #define PFN_SHIFT_OFFSET (PAGE_SHIFT) |
| 315 | |
| 316 | #define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET) |
| 317 | |
| 318 | #define pfn_pte(pfn, prot) \ |
| 319 | __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) | pgprot_val(prot)) |
| 320 | |
| 321 | #ifndef __ASSEMBLY__ |
| 322 | /* |
| 323 | * The "pgd_xxx()" functions here are trivial for a folded two-level |
| 324 | * setup: the pgd is never bad, and a pmd always exists (as it's folded |
| 325 | * into the pgd entry) |
| 326 | */ |
| 327 | static inline int pgd_none(pgd_t pgd) { return 0; } |
| 328 | static inline int pgd_bad(pgd_t pgd) { return 0; } |
| 329 | static inline int pgd_present(pgd_t pgd) { return 1; } |
| 330 | #define pgd_clear(xp) do { } while (0) |
| 331 | #define pgd_page(pgd) \ |
| 332 | ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK)) |
| 333 | |
| 334 | /* |
| 335 | * The following only work if pte_present() is true. |
| 336 | * Undefined behaviour if not.. |
| 337 | */ |
| 338 | static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; } |
| 339 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } |
| 340 | static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } |
| 341 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } |
| 342 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } |
Michal Simek | f14d6f7 | 2009-07-15 13:39:35 +0200 | [diff] [blame] | 343 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } |
Michal Simek | 15902bf | 2009-05-26 16:30:15 +0200 | [diff] [blame] | 344 | |
| 345 | static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } |
| 346 | static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } |
| 347 | |
| 348 | static inline pte_t pte_rdprotect(pte_t pte) \ |
| 349 | { pte_val(pte) &= ~_PAGE_USER; return pte; } |
| 350 | static inline pte_t pte_wrprotect(pte_t pte) \ |
| 351 | { pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; } |
| 352 | static inline pte_t pte_exprotect(pte_t pte) \ |
| 353 | { pte_val(pte) &= ~_PAGE_EXEC; return pte; } |
| 354 | static inline pte_t pte_mkclean(pte_t pte) \ |
| 355 | { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; } |
| 356 | static inline pte_t pte_mkold(pte_t pte) \ |
| 357 | { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } |
| 358 | |
| 359 | static inline pte_t pte_mkread(pte_t pte) \ |
| 360 | { pte_val(pte) |= _PAGE_USER; return pte; } |
| 361 | static inline pte_t pte_mkexec(pte_t pte) \ |
| 362 | { pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; } |
| 363 | static inline pte_t pte_mkwrite(pte_t pte) \ |
| 364 | { pte_val(pte) |= _PAGE_RW; return pte; } |
| 365 | static inline pte_t pte_mkdirty(pte_t pte) \ |
| 366 | { pte_val(pte) |= _PAGE_DIRTY; return pte; } |
| 367 | static inline pte_t pte_mkyoung(pte_t pte) \ |
| 368 | { pte_val(pte) |= _PAGE_ACCESSED; return pte; } |
| 369 | |
| 370 | /* |
| 371 | * Conversion functions: convert a page and protection to a page entry, |
| 372 | * and a page entry and page directory to the page they refer to. |
| 373 | */ |
| 374 | |
| 375 | static inline pte_t mk_pte_phys(phys_addr_t physpage, pgprot_t pgprot) |
| 376 | { |
| 377 | pte_t pte; |
| 378 | pte_val(pte) = physpage | pgprot_val(pgprot); |
| 379 | return pte; |
| 380 | } |
| 381 | |
| 382 | #define mk_pte(page, pgprot) \ |
| 383 | ({ \ |
| 384 | pte_t pte; \ |
| 385 | pte_val(pte) = (((page - mem_map) << PAGE_SHIFT) + memory_start) | \ |
| 386 | pgprot_val(pgprot); \ |
| 387 | pte; \ |
| 388 | }) |
| 389 | |
| 390 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
| 391 | { |
| 392 | pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); |
| 393 | return pte; |
| 394 | } |
| 395 | |
| 396 | /* |
| 397 | * Atomic PTE updates. |
| 398 | * |
| 399 | * pte_update clears and sets bit atomically, and returns |
| 400 | * the old pte value. |
| 401 | * The ((unsigned long)(p+1) - 4) hack is to get to the least-significant |
| 402 | * 32 bits of the PTE regardless of whether PTEs are 32 or 64 bits. |
| 403 | */ |
| 404 | static inline unsigned long pte_update(pte_t *p, unsigned long clr, |
| 405 | unsigned long set) |
| 406 | { |
| 407 | unsigned long old, tmp, msr; |
| 408 | |
| 409 | __asm__ __volatile__("\ |
| 410 | msrclr %2, 0x2\n\ |
| 411 | nop\n\ |
| 412 | lw %0, %4, r0\n\ |
| 413 | andn %1, %0, %5\n\ |
| 414 | or %1, %1, %6\n\ |
| 415 | sw %1, %4, r0\n\ |
| 416 | mts rmsr, %2\n\ |
| 417 | nop" |
| 418 | : "=&r" (old), "=&r" (tmp), "=&r" (msr), "=m" (*p) |
Michal Simek | ae8ee15 | 2010-02-22 12:09:02 +0100 | [diff] [blame] | 419 | : "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set), "m" (*p) |
Michal Simek | 15902bf | 2009-05-26 16:30:15 +0200 | [diff] [blame] | 420 | : "cc"); |
| 421 | |
| 422 | return old; |
| 423 | } |
| 424 | |
| 425 | /* |
| 426 | * set_pte stores a linux PTE into the linux page table. |
| 427 | */ |
| 428 | static inline void set_pte(struct mm_struct *mm, unsigned long addr, |
| 429 | pte_t *ptep, pte_t pte) |
| 430 | { |
| 431 | *ptep = pte; |
| 432 | } |
| 433 | |
| 434 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
| 435 | pte_t *ptep, pte_t pte) |
| 436 | { |
| 437 | *ptep = pte; |
| 438 | } |
| 439 | |
| 440 | static inline int ptep_test_and_clear_young(struct mm_struct *mm, |
| 441 | unsigned long addr, pte_t *ptep) |
| 442 | { |
| 443 | return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0; |
| 444 | } |
| 445 | |
| 446 | static inline int ptep_test_and_clear_dirty(struct mm_struct *mm, |
| 447 | unsigned long addr, pte_t *ptep) |
| 448 | { |
| 449 | return (pte_update(ptep, \ |
| 450 | (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0; |
| 451 | } |
| 452 | |
| 453 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, |
| 454 | unsigned long addr, pte_t *ptep) |
| 455 | { |
| 456 | return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0)); |
| 457 | } |
| 458 | |
| 459 | /*static inline void ptep_set_wrprotect(struct mm_struct *mm, |
| 460 | unsigned long addr, pte_t *ptep) |
| 461 | { |
| 462 | pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0); |
| 463 | }*/ |
| 464 | |
| 465 | static inline void ptep_mkdirty(struct mm_struct *mm, |
| 466 | unsigned long addr, pte_t *ptep) |
| 467 | { |
| 468 | pte_update(ptep, 0, _PAGE_DIRTY); |
| 469 | } |
| 470 | |
| 471 | /*#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)*/ |
| 472 | |
| 473 | /* Convert pmd entry to page */ |
| 474 | /* our pmd entry is an effective address of pte table*/ |
| 475 | /* returns effective address of the pmd entry*/ |
| 476 | #define pmd_page_kernel(pmd) ((unsigned long) (pmd_val(pmd) & PAGE_MASK)) |
| 477 | |
| 478 | /* returns struct *page of the pmd entry*/ |
| 479 | #define pmd_page(pmd) (pfn_to_page(__pa(pmd_val(pmd)) >> PAGE_SHIFT)) |
| 480 | |
| 481 | /* to find an entry in a kernel page-table-directory */ |
| 482 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
| 483 | |
| 484 | /* to find an entry in a page-table-directory */ |
| 485 | #define pgd_index(address) ((address) >> PGDIR_SHIFT) |
| 486 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) |
| 487 | |
| 488 | /* Find an entry in the second-level page table.. */ |
| 489 | static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address) |
| 490 | { |
| 491 | return (pmd_t *) dir; |
| 492 | } |
| 493 | |
| 494 | /* Find an entry in the third-level page table.. */ |
| 495 | #define pte_index(address) \ |
| 496 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
| 497 | #define pte_offset_kernel(dir, addr) \ |
| 498 | ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr)) |
| 499 | #define pte_offset_map(dir, addr) \ |
| 500 | ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr)) |
| 501 | #define pte_offset_map_nested(dir, addr) \ |
| 502 | ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr)) |
| 503 | |
| 504 | #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) |
| 505 | #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) |
| 506 | |
| 507 | /* Encode and decode a nonlinear file mapping entry */ |
| 508 | #define PTE_FILE_MAX_BITS 29 |
| 509 | #define pte_to_pgoff(pte) (pte_val(pte) >> 3) |
Michal Simek | f14d6f7 | 2009-07-15 13:39:35 +0200 | [diff] [blame] | 510 | #define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE }) |
Michal Simek | 15902bf | 2009-05-26 16:30:15 +0200 | [diff] [blame] | 511 | |
| 512 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
| 513 | |
| 514 | /* |
| 515 | * When flushing the tlb entry for a page, we also need to flush the hash |
| 516 | * table entry. flush_hash_page is assembler (for speed) in hashtable.S. |
| 517 | */ |
| 518 | extern int flush_hash_page(unsigned context, unsigned long va, pte_t *ptep); |
| 519 | |
| 520 | /* Add an HPTE to the hash table */ |
| 521 | extern void add_hash_page(unsigned context, unsigned long va, pte_t *ptep); |
| 522 | |
| 523 | /* |
| 524 | * Encode and decode a swap entry. |
| 525 | * Note that the bits we use in a PTE for representing a swap entry |
| 526 | * must not include the _PAGE_PRESENT bit, or the _PAGE_HASHPTE bit |
| 527 | * (if used). -- paulus |
| 528 | */ |
| 529 | #define __swp_type(entry) ((entry).val & 0x3f) |
| 530 | #define __swp_offset(entry) ((entry).val >> 6) |
| 531 | #define __swp_entry(type, offset) \ |
| 532 | ((swp_entry_t) { (type) | ((offset) << 6) }) |
| 533 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 2 }) |
| 534 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 2 }) |
| 535 | |
| 536 | |
| 537 | /* CONFIG_APUS */ |
| 538 | /* For virtual address to physical address conversion */ |
| 539 | extern void cache_clear(__u32 addr, int length); |
| 540 | extern void cache_push(__u32 addr, int length); |
| 541 | extern int mm_end_of_chunk(unsigned long addr, int len); |
| 542 | extern unsigned long iopa(unsigned long addr); |
| 543 | /* extern unsigned long mm_ptov(unsigned long addr) \ |
| 544 | __attribute__ ((const)); TBD */ |
| 545 | |
| 546 | /* Values for nocacheflag and cmode */ |
| 547 | /* These are not used by the APUS kernel_map, but prevents |
| 548 | * compilation errors. |
| 549 | */ |
| 550 | #define IOMAP_FULL_CACHING 0 |
| 551 | #define IOMAP_NOCACHE_SER 1 |
| 552 | #define IOMAP_NOCACHE_NONSER 2 |
| 553 | #define IOMAP_NO_COPYBACK 3 |
| 554 | |
| 555 | /* |
| 556 | * Map some physical address range into the kernel address space. |
| 557 | */ |
| 558 | extern unsigned long kernel_map(unsigned long paddr, unsigned long size, |
| 559 | int nocacheflag, unsigned long *memavailp); |
| 560 | |
| 561 | /* |
| 562 | * Set cache mode of (kernel space) address range. |
| 563 | */ |
| 564 | extern void kernel_set_cachemode(unsigned long address, unsigned long size, |
| 565 | unsigned int cmode); |
| 566 | |
| 567 | /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ |
| 568 | #define kern_addr_valid(addr) (1) |
| 569 | |
| 570 | #define io_remap_page_range remap_page_range |
| 571 | |
| 572 | /* |
| 573 | * No page table caches to initialise |
| 574 | */ |
| 575 | #define pgtable_cache_init() do { } while (0) |
| 576 | |
| 577 | void do_page_fault(struct pt_regs *regs, unsigned long address, |
| 578 | unsigned long error_code); |
| 579 | |
| 580 | void __init io_block_mapping(unsigned long virt, phys_addr_t phys, |
| 581 | unsigned int size, int flags); |
| 582 | |
| 583 | void __init adjust_total_lowmem(void); |
| 584 | void mapin_ram(void); |
| 585 | int map_page(unsigned long va, phys_addr_t pa, int flags); |
| 586 | |
| 587 | extern int mem_init_done; |
Michal Simek | 15902bf | 2009-05-26 16:30:15 +0200 | [diff] [blame] | 588 | |
| 589 | asmlinkage void __init mmu_init(void); |
| 590 | |
| 591 | void __init *early_get_page(void); |
| 592 | |
Michal Simek | 15902bf | 2009-05-26 16:30:15 +0200 | [diff] [blame] | 593 | #endif /* __ASSEMBLY__ */ |
| 594 | #endif /* __KERNEL__ */ |
| 595 | |
| 596 | #endif /* CONFIG_MMU */ |
| 597 | |
Michal Simek | 6a3cece | 2009-03-27 14:25:37 +0100 | [diff] [blame] | 598 | #ifndef __ASSEMBLY__ |
| 599 | #include <asm-generic/pgtable.h> |
| 600 | |
Michal Simek | ae8ee15 | 2010-02-22 12:09:02 +0100 | [diff] [blame] | 601 | extern unsigned long ioremap_bot, ioremap_base; |
| 602 | |
| 603 | void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle); |
| 604 | void consistent_free(void *vaddr); |
| 605 | void consistent_sync(void *vaddr, size_t size, int direction); |
| 606 | void consistent_sync_page(struct page *page, unsigned long offset, |
| 607 | size_t size, int direction); |
| 608 | |
Michal Simek | 6a3cece | 2009-03-27 14:25:37 +0100 | [diff] [blame] | 609 | void setup_memory(void); |
| 610 | #endif /* __ASSEMBLY__ */ |
| 611 | |
| 612 | #endif /* _ASM_MICROBLAZE_PGTABLE_H */ |