Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 1 | /* |
Chris Zankel | 26465f2 | 2007-08-06 23:12:24 -0700 | [diff] [blame] | 2 | * include/asm-xtensa/page.h |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | * |
Chris Zankel | 26465f2 | 2007-08-06 23:12:24 -0700 | [diff] [blame] | 8 | * Copyright (C) 2001 - 2007 Tensilica Inc. |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 9 | */ |
| 10 | |
| 11 | #ifndef _XTENSA_PAGE_H |
| 12 | #define _XTENSA_PAGE_H |
| 13 | |
| 14 | #ifdef __KERNEL__ |
| 15 | |
| 16 | #include <asm/processor.h> |
Chris Zankel | 26465f2 | 2007-08-06 23:12:24 -0700 | [diff] [blame] | 17 | #include <asm/types.h> |
Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 18 | #include <asm/cache.h> |
Chris Zankel | 26465f2 | 2007-08-06 23:12:24 -0700 | [diff] [blame] | 19 | |
| 20 | /* |
| 21 | * Fixed TLB translations in the processor. |
| 22 | */ |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 23 | |
Chris Zankel | 173d6681 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 24 | #define XCHAL_KSEG_CACHED_VADDR 0xd0000000 |
| 25 | #define XCHAL_KSEG_BYPASS_VADDR 0xd8000000 |
| 26 | #define XCHAL_KSEG_PADDR 0x00000000 |
| 27 | #define XCHAL_KSEG_SIZE 0x08000000 |
| 28 | |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 29 | /* |
| 30 | * PAGE_SHIFT determines the page size |
| 31 | * PAGE_ALIGN(x) aligns the pointer to the (next) page boundary |
| 32 | */ |
| 33 | |
Chris Zankel | 173d6681 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 34 | #define PAGE_SHIFT 12 |
Chris Zankel | 26465f2 | 2007-08-06 23:12:24 -0700 | [diff] [blame] | 35 | #define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 36 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
| 37 | #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE - 1) & PAGE_MASK) |
| 38 | |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 39 | #define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR |
Chris Zankel | 26465f2 | 2007-08-06 23:12:24 -0700 | [diff] [blame] | 40 | #define MAX_MEM_PFN XCHAL_KSEG_SIZE |
| 41 | #define PGTABLE_START 0x80000000 |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 42 | |
Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 43 | /* |
| 44 | * Cache aliasing: |
| 45 | * |
| 46 | * If the cache size for one way is greater than the page size, we have to |
| 47 | * deal with cache aliasing. The cache index is wider than the page size: |
| 48 | * |
| 49 | * | |cache| cache index |
| 50 | * | pfn |off| virtual address |
| 51 | * |xxxx:X|zzz| |
| 52 | * | : | | |
| 53 | * | \ / | | |
| 54 | * |trans.| | |
| 55 | * | / \ | | |
| 56 | * |yyyy:Y|zzz| physical address |
| 57 | * |
| 58 | * When the page number is translated to the physical page address, the lowest |
| 59 | * bit(s) (X) that are part of the cache index are also translated (Y). |
| 60 | * If this translation changes bit(s) (X), the cache index is also afected, |
| 61 | * thus resulting in a different cache line than before. |
| 62 | * The kernel does not provide a mechanism to ensure that the page color |
| 63 | * (represented by this bit) remains the same when allocated or when pages |
| 64 | * are remapped. When user pages are mapped into kernel space, the color of |
| 65 | * the page might also change. |
| 66 | * |
| 67 | * We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2 |
| 68 | * to temporarily map a patch so we can match the color. |
| 69 | */ |
| 70 | |
| 71 | #if DCACHE_WAY_SIZE > PAGE_SIZE |
| 72 | # define DCACHE_ALIAS_ORDER (DCACHE_WAY_SHIFT - PAGE_SHIFT) |
| 73 | # define DCACHE_ALIAS_MASK (PAGE_MASK & (DCACHE_WAY_SIZE - 1)) |
| 74 | # define DCACHE_ALIAS(a) (((a) & DCACHE_ALIAS_MASK) >> PAGE_SHIFT) |
| 75 | # define DCACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & DCACHE_ALIAS_MASK) == 0) |
| 76 | #else |
| 77 | # define DCACHE_ALIAS_ORDER 0 |
| 78 | #endif |
| 79 | |
| 80 | #if ICACHE_WAY_SIZE > PAGE_SIZE |
| 81 | # define ICACHE_ALIAS_ORDER (ICACHE_WAY_SHIFT - PAGE_SHIFT) |
| 82 | # define ICACHE_ALIAS_MASK (PAGE_MASK & (ICACHE_WAY_SIZE - 1)) |
| 83 | # define ICACHE_ALIAS(a) (((a) & ICACHE_ALIAS_MASK) >> PAGE_SHIFT) |
| 84 | # define ICACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & ICACHE_ALIAS_MASK) == 0) |
| 85 | #else |
| 86 | # define ICACHE_ALIAS_ORDER 0 |
| 87 | #endif |
| 88 | |
| 89 | |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 90 | #ifdef __ASSEMBLY__ |
| 91 | |
| 92 | #define __pgprot(x) (x) |
| 93 | |
| 94 | #else |
| 95 | |
| 96 | /* |
| 97 | * These are used to make use of C type-checking.. |
| 98 | */ |
| 99 | |
| 100 | typedef struct { unsigned long pte; } pte_t; /* page table entry */ |
| 101 | typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */ |
| 102 | typedef struct { unsigned long pgprot; } pgprot_t; |
| 103 | |
| 104 | #define pte_val(x) ((x).pte) |
| 105 | #define pgd_val(x) ((x).pgd) |
| 106 | #define pgprot_val(x) ((x).pgprot) |
| 107 | |
| 108 | #define __pte(x) ((pte_t) { (x) } ) |
| 109 | #define __pgd(x) ((pgd_t) { (x) } ) |
| 110 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
| 111 | |
| 112 | /* |
| 113 | * Pure 2^n version of get_order |
Chris Zankel | 26465f2 | 2007-08-06 23:12:24 -0700 | [diff] [blame] | 114 | * Use 'nsau' instructions if supported by the processor or the generic version. |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 115 | */ |
| 116 | |
Chris Zankel | 26465f2 | 2007-08-06 23:12:24 -0700 | [diff] [blame] | 117 | #if XCHAL_HAVE_NSA |
| 118 | |
| 119 | static inline __attribute_const__ int get_order(unsigned long size) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 120 | { |
Chris Zankel | 26465f2 | 2007-08-06 23:12:24 -0700 | [diff] [blame] | 121 | int lz; |
| 122 | asm ("nsau %0, %1" : "=r" (lz) : "r" ((size - 1) >> PAGE_SHIFT)); |
| 123 | return 32 - lz; |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 124 | } |
| 125 | |
Chris Zankel | 26465f2 | 2007-08-06 23:12:24 -0700 | [diff] [blame] | 126 | #else |
| 127 | |
| 128 | # include <asm-generic/page.h> |
| 129 | |
| 130 | #endif |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 131 | |
| 132 | struct page; |
| 133 | extern void clear_page(void *page); |
| 134 | extern void copy_page(void *to, void *from); |
| 135 | |
| 136 | /* |
| 137 | * If we have cache aliasing and writeback caches, we might have to do |
| 138 | * some extra work |
| 139 | */ |
| 140 | |
Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 141 | #if DCACHE_WAY_SIZE > PAGE_SIZE |
| 142 | extern void clear_user_page(void*, unsigned long, struct page*); |
| 143 | extern void copy_user_page(void*, void*, unsigned long, struct page*); |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 144 | #else |
Chris Zankel | 6656920 | 2007-08-22 10:14:51 -0700 | [diff] [blame] | 145 | # define clear_user_page(page, vaddr, pg) clear_page(page) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 146 | # define copy_user_page(to, from, vaddr, pg) copy_page(to, from) |
| 147 | #endif |
| 148 | |
| 149 | /* |
| 150 | * This handles the memory map. We handle pages at |
| 151 | * XCHAL_KSEG_CACHED_VADDR for kernels with 32 bit address space. |
| 152 | * These macros are for conversion of kernel address, not user |
| 153 | * addresses. |
| 154 | */ |
| 155 | |
| 156 | #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) |
| 157 | #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) |
| 158 | #define pfn_valid(pfn) ((unsigned long)pfn < max_mapnr) |
KAMEZAWA Hiroyuki | 655a044 | 2006-03-27 01:15:52 -0800 | [diff] [blame] | 159 | #ifdef CONFIG_DISCONTIGMEM |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 160 | # error CONFIG_DISCONTIGMEM not supported |
| 161 | #endif |
| 162 | |
| 163 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
| 164 | #define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT) |
| 165 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) |
| 166 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) |
| 167 | |
| 168 | #define WANT_PAGE_VIRTUAL |
| 169 | |
| 170 | |
| 171 | #endif /* __ASSEMBLY__ */ |
| 172 | |
| 173 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ |
| 174 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
| 175 | |
KAMEZAWA Hiroyuki | 655a044 | 2006-03-27 01:15:52 -0800 | [diff] [blame] | 176 | #include <asm-generic/memory_model.h> |
Chris Zankel | de4f6e5 | 2007-05-31 17:47:01 -0700 | [diff] [blame] | 177 | #endif /* __KERNEL__ */ |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 178 | #endif /* _XTENSA_PAGE_H */ |