Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 1 | /* |
| 2 | * include/asm-xtensa/mmu_context.h |
| 3 | * |
| 4 | * Switch an MMU context. |
| 5 | * |
| 6 | * This file is subject to the terms and conditions of the GNU General Public |
| 7 | * License. See the file "COPYING" in the main directory of this archive |
| 8 | * for more details. |
| 9 | * |
| 10 | * Copyright (C) 2001 - 2005 Tensilica Inc. |
| 11 | */ |
| 12 | |
| 13 | #ifndef _XTENSA_MMU_CONTEXT_H |
| 14 | #define _XTENSA_MMU_CONTEXT_H |
| 15 | |
| 16 | #include <linux/config.h> |
| 17 | #include <linux/stringify.h> |
| 18 | |
| 19 | #include <asm/pgtable.h> |
| 20 | #include <asm/mmu_context.h> |
| 21 | #include <asm/cacheflush.h> |
| 22 | #include <asm/tlbflush.h> |
| 23 | |
| 24 | /* |
| 25 | * Linux was ported to Xtensa assuming all auto-refill ways in set 0 |
| 26 | * had the same properties (a very likely assumption). Multiple sets |
| 27 | * of auto-refill ways will still work properly, but not as optimally |
| 28 | * as the Xtensa designer may have assumed. |
| 29 | * |
| 30 | * We make this case a hard #error, killing the kernel build, to alert |
| 31 | * the developer to this condition (which is more likely an error). |
| 32 | * You super-duper clever developers can change it to a warning or |
| 33 | * remove it altogether if you think you know what you're doing. :) |
| 34 | */ |
| 35 | |
| 36 | #if (XCHAL_HAVE_TLBS != 1) |
| 37 | # error "Linux must have an MMU!" |
| 38 | #endif |
| 39 | |
| 40 | #if ((XCHAL_ITLB_ARF_WAYS == 0) || (XCHAL_DTLB_ARF_WAYS == 0)) |
| 41 | # error "MMU must have auto-refill ways" |
| 42 | #endif |
| 43 | |
| 44 | #if ((XCHAL_ITLB_ARF_SETS != 1) || (XCHAL_DTLB_ARF_SETS != 1)) |
| 45 | # error Linux may not use all auto-refill ways as efficiently as you think |
| 46 | #endif |
| 47 | |
| 48 | #if (XCHAL_MMU_MAX_PTE_PAGE_SIZE != XCHAL_MMU_MIN_PTE_PAGE_SIZE) |
| 49 | # error Only one page size allowed! |
| 50 | #endif |
| 51 | |
| 52 | extern unsigned long asid_cache; |
| 53 | extern pgd_t *current_pgd; |
| 54 | |
| 55 | /* |
| 56 | * Define the number of entries per auto-refill way in set 0 of both I and D |
| 57 | * TLBs. We deal only with set 0 here (an assumption further explained in |
| 58 | * assertions.h). Also, define the total number of ARF entries in both TLBs. |
| 59 | */ |
| 60 | |
| 61 | #define ITLB_ENTRIES_PER_ARF_WAY (XCHAL_ITLB_SET(XCHAL_ITLB_ARF_SET0,ENTRIES)) |
| 62 | #define DTLB_ENTRIES_PER_ARF_WAY (XCHAL_DTLB_SET(XCHAL_DTLB_ARF_SET0,ENTRIES)) |
| 63 | |
| 64 | #define ITLB_ENTRIES \ |
| 65 | (ITLB_ENTRIES_PER_ARF_WAY * (XCHAL_ITLB_SET(XCHAL_ITLB_ARF_SET0,WAYS))) |
| 66 | #define DTLB_ENTRIES \ |
| 67 | (DTLB_ENTRIES_PER_ARF_WAY * (XCHAL_DTLB_SET(XCHAL_DTLB_ARF_SET0,WAYS))) |
| 68 | |
| 69 | |
| 70 | /* |
| 71 | * SMALLEST_NTLB_ENTRIES is the smaller of ITLB_ENTRIES and DTLB_ENTRIES. |
| 72 | * In practice, they are probably equal. This macro simplifies function |
| 73 | * flush_tlb_range(). |
| 74 | */ |
| 75 | |
| 76 | #if (DTLB_ENTRIES < ITLB_ENTRIES) |
| 77 | # define SMALLEST_NTLB_ENTRIES DTLB_ENTRIES |
| 78 | #else |
| 79 | # define SMALLEST_NTLB_ENTRIES ITLB_ENTRIES |
| 80 | #endif |
| 81 | |
| 82 | |
| 83 | /* |
| 84 | * asid_cache tracks only the ASID[USER_RING] field of the RASID special |
| 85 | * register, which is the current user-task asid allocation value. |
| 86 | * mm->context has the same meaning. When it comes time to write the |
| 87 | * asid_cache or mm->context values to the RASID special register, we first |
| 88 | * shift the value left by 8, then insert the value. |
| 89 | * ASID[0] always contains the kernel's asid value, and we reserve three |
| 90 | * other asid values that we never assign to user tasks. |
| 91 | */ |
| 92 | |
| 93 | #define ASID_INC 0x1 |
| 94 | #define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1) |
| 95 | |
| 96 | /* |
| 97 | * XCHAL_MMU_ASID_INVALID is a configurable Xtensa processor constant |
| 98 | * indicating invalid address space. XCHAL_MMU_ASID_KERNEL is a configurable |
| 99 | * Xtensa processor constant indicating the kernel address space. They can |
| 100 | * be arbitrary values. |
| 101 | * |
| 102 | * We identify three more unique, reserved ASID values to use in the unused |
| 103 | * ring positions. No other user process will be assigned these reserved |
| 104 | * ASID values. |
| 105 | * |
| 106 | * For example, given that |
| 107 | * |
| 108 | * XCHAL_MMU_ASID_INVALID == 0 |
| 109 | * XCHAL_MMU_ASID_KERNEL == 1 |
| 110 | * |
| 111 | * the following maze of #if statements would generate |
| 112 | * |
| 113 | * ASID_RESERVED_1 == 2 |
| 114 | * ASID_RESERVED_2 == 3 |
| 115 | * ASID_RESERVED_3 == 4 |
| 116 | * ASID_FIRST_NONRESERVED == 5 |
| 117 | */ |
| 118 | |
| 119 | #if (XCHAL_MMU_ASID_INVALID != XCHAL_MMU_ASID_KERNEL + 1) |
| 120 | # define ASID_RESERVED_1 ((XCHAL_MMU_ASID_KERNEL + 1) & ASID_MASK) |
| 121 | #else |
| 122 | # define ASID_RESERVED_1 ((XCHAL_MMU_ASID_KERNEL + 2) & ASID_MASK) |
| 123 | #endif |
| 124 | |
| 125 | #if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_1 + 1) |
| 126 | # define ASID_RESERVED_2 ((ASID_RESERVED_1 + 1) & ASID_MASK) |
| 127 | #else |
| 128 | # define ASID_RESERVED_2 ((ASID_RESERVED_1 + 2) & ASID_MASK) |
| 129 | #endif |
| 130 | |
| 131 | #if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_2 + 1) |
| 132 | # define ASID_RESERVED_3 ((ASID_RESERVED_2 + 1) & ASID_MASK) |
| 133 | #else |
| 134 | # define ASID_RESERVED_3 ((ASID_RESERVED_2 + 2) & ASID_MASK) |
| 135 | #endif |
| 136 | |
| 137 | #if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_3 + 1) |
| 138 | # define ASID_FIRST_NONRESERVED ((ASID_RESERVED_3 + 1) & ASID_MASK) |
| 139 | #else |
| 140 | # define ASID_FIRST_NONRESERVED ((ASID_RESERVED_3 + 2) & ASID_MASK) |
| 141 | #endif |
| 142 | |
| 143 | #define ASID_ALL_RESERVED ( ((ASID_RESERVED_1) << 24) + \ |
| 144 | ((ASID_RESERVED_2) << 16) + \ |
| 145 | ((ASID_RESERVED_3) << 8) + \ |
| 146 | ((XCHAL_MMU_ASID_KERNEL)) ) |
| 147 | |
| 148 | |
| 149 | /* |
| 150 | * NO_CONTEXT is the invalid ASID value that we don't ever assign to |
| 151 | * any user or kernel context. NO_CONTEXT is a better mnemonic than |
| 152 | * XCHAL_MMU_ASID_INVALID, so we use it in code instead. |
| 153 | */ |
| 154 | |
| 155 | #define NO_CONTEXT XCHAL_MMU_ASID_INVALID |
| 156 | |
| 157 | #if (KERNEL_RING != 0) |
| 158 | # error The KERNEL_RING really should be zero. |
| 159 | #endif |
| 160 | |
| 161 | #if (USER_RING >= XCHAL_MMU_RINGS) |
| 162 | # error USER_RING cannot be greater than the highest numbered ring. |
| 163 | #endif |
| 164 | |
| 165 | #if (USER_RING == KERNEL_RING) |
| 166 | # error The user and kernel rings really should not be equal. |
| 167 | #endif |
| 168 | |
| 169 | #if (USER_RING == 1) |
| 170 | #define ASID_INSERT(x) ( ((ASID_RESERVED_1) << 24) + \ |
| 171 | ((ASID_RESERVED_2) << 16) + \ |
| 172 | (((x) & (ASID_MASK)) << 8) + \ |
| 173 | ((XCHAL_MMU_ASID_KERNEL)) ) |
| 174 | |
| 175 | #elif (USER_RING == 2) |
| 176 | #define ASID_INSERT(x) ( ((ASID_RESERVED_1) << 24) + \ |
| 177 | (((x) & (ASID_MASK)) << 16) + \ |
| 178 | ((ASID_RESERVED_2) << 8) + \ |
| 179 | ((XCHAL_MMU_ASID_KERNEL)) ) |
| 180 | |
| 181 | #elif (USER_RING == 3) |
| 182 | #define ASID_INSERT(x) ( (((x) & (ASID_MASK)) << 24) + \ |
| 183 | ((ASID_RESERVED_1) << 16) + \ |
| 184 | ((ASID_RESERVED_2) << 8) + \ |
| 185 | ((XCHAL_MMU_ASID_KERNEL)) ) |
| 186 | |
| 187 | #else |
| 188 | #error Goofy value for USER_RING |
| 189 | |
| 190 | #endif /* USER_RING == 1 */ |
| 191 | |
| 192 | |
| 193 | /* |
| 194 | * All unused by hardware upper bits will be considered |
| 195 | * as a software asid extension. |
| 196 | */ |
| 197 | |
| 198 | #define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) |
| 199 | #define ASID_FIRST_VERSION \ |
| 200 | ((unsigned long)(~ASID_VERSION_MASK) + 1 + ASID_FIRST_NONRESERVED) |
| 201 | |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 202 | static inline void set_rasid_register (unsigned long val) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 203 | { |
| 204 | __asm__ __volatile__ (" wsr %0, "__stringify(RASID)"\n\t" |
| 205 | " isync\n" : : "a" (val)); |
| 206 | } |
| 207 | |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 208 | static inline unsigned long get_rasid_register (void) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 209 | { |
| 210 | unsigned long tmp; |
| 211 | __asm__ __volatile__ (" rsr %0, "__stringify(RASID)"\n\t" : "=a" (tmp)); |
| 212 | return tmp; |
| 213 | } |
| 214 | |
| 215 | |
| 216 | #if ((XCHAL_MMU_ASID_INVALID == 0) && (XCHAL_MMU_ASID_KERNEL == 1)) |
| 217 | |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 218 | static inline void |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 219 | get_new_mmu_context(struct mm_struct *mm, unsigned long asid) |
| 220 | { |
| 221 | extern void flush_tlb_all(void); |
| 222 | if (! ((asid += ASID_INC) & ASID_MASK) ) { |
| 223 | flush_tlb_all(); /* start new asid cycle */ |
| 224 | if (!asid) /* fix version if needed */ |
| 225 | asid = ASID_FIRST_VERSION - ASID_FIRST_NONRESERVED; |
| 226 | asid += ASID_FIRST_NONRESERVED; |
| 227 | } |
| 228 | mm->context = asid_cache = asid; |
| 229 | } |
| 230 | |
| 231 | #else |
| 232 | #warning ASID_{INVALID,KERNEL} values impose non-optimal get_new_mmu_context implementation |
| 233 | |
| 234 | /* XCHAL_MMU_ASID_INVALID == 0 and XCHAL_MMU_ASID_KERNEL ==1 are |
| 235 | really the best, but if you insist... */ |
| 236 | |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 237 | static inline int validate_asid (unsigned long asid) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 238 | { |
| 239 | switch (asid) { |
| 240 | case XCHAL_MMU_ASID_INVALID: |
| 241 | case XCHAL_MMU_ASID_KERNEL: |
| 242 | case ASID_RESERVED_1: |
| 243 | case ASID_RESERVED_2: |
| 244 | case ASID_RESERVED_3: |
| 245 | return 0; /* can't use these values as ASIDs */ |
| 246 | } |
| 247 | return 1; /* valid */ |
| 248 | } |
| 249 | |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 250 | static inline void |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 251 | get_new_mmu_context(struct mm_struct *mm, unsigned long asid) |
| 252 | { |
| 253 | extern void flush_tlb_all(void); |
| 254 | while (1) { |
| 255 | asid += ASID_INC; |
| 256 | if ( ! (asid & ASID_MASK) ) { |
| 257 | flush_tlb_all(); /* start new asid cycle */ |
| 258 | if (!asid) /* fix version if needed */ |
| 259 | asid = ASID_FIRST_VERSION - ASID_FIRST_NONRESERVED; |
| 260 | asid += ASID_FIRST_NONRESERVED; |
| 261 | break; /* no need to validate here */ |
| 262 | } |
| 263 | if (validate_asid (asid & ASID_MASK)) |
| 264 | break; |
| 265 | } |
| 266 | mm->context = asid_cache = asid; |
| 267 | } |
| 268 | |
| 269 | #endif |
| 270 | |
| 271 | |
| 272 | /* |
| 273 | * Initialize the context related info for a new mm_struct |
| 274 | * instance. |
| 275 | */ |
| 276 | |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 277 | static inline int |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 278 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
| 279 | { |
| 280 | mm->context = NO_CONTEXT; |
| 281 | return 0; |
| 282 | } |
| 283 | |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 284 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 285 | struct task_struct *tsk) |
| 286 | { |
| 287 | unsigned long asid = asid_cache; |
| 288 | |
| 289 | /* Check if our ASID is of an older version and thus invalid */ |
| 290 | |
| 291 | if ((next->context ^ asid) & ASID_VERSION_MASK) |
| 292 | get_new_mmu_context(next, asid); |
| 293 | |
| 294 | set_rasid_register (ASID_INSERT(next->context)); |
| 295 | invalidate_page_directory(); |
| 296 | } |
| 297 | |
| 298 | #define deactivate_mm(tsk, mm) do { } while(0) |
| 299 | |
| 300 | /* |
| 301 | * Destroy context related info for an mm_struct that is about |
| 302 | * to be put to rest. |
| 303 | */ |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 304 | static inline void destroy_context(struct mm_struct *mm) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 305 | { |
| 306 | /* Nothing to do. */ |
| 307 | } |
| 308 | |
| 309 | /* |
| 310 | * After we have set current->mm to a new value, this activates |
| 311 | * the context for the new mm so we see the new mappings. |
| 312 | */ |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 313 | static inline void |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 314 | activate_mm(struct mm_struct *prev, struct mm_struct *next) |
| 315 | { |
| 316 | /* Unconditionally get a new ASID. */ |
| 317 | |
| 318 | get_new_mmu_context(next, asid_cache); |
| 319 | set_rasid_register (ASID_INSERT(next->context)); |
| 320 | invalidate_page_directory(); |
| 321 | } |
| 322 | |
| 323 | |
| 324 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
| 325 | { |
| 326 | /* Nothing to do. */ |
| 327 | |
| 328 | } |
| 329 | |
| 330 | #endif /* _XTENSA_MMU_CONTEXT_H */ |