Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 1 | /* |
| 2 | * include/asm-xtensa/mmu_context.h |
| 3 | * |
| 4 | * Switch an MMU context. |
| 5 | * |
| 6 | * This file is subject to the terms and conditions of the GNU General Public |
| 7 | * License. See the file "COPYING" in the main directory of this archive |
| 8 | * for more details. |
| 9 | * |
| 10 | * Copyright (C) 2001 - 2005 Tensilica Inc. |
| 11 | */ |
| 12 | |
| 13 | #ifndef _XTENSA_MMU_CONTEXT_H |
| 14 | #define _XTENSA_MMU_CONTEXT_H |
| 15 | |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 16 | #include <linux/stringify.h> |
| 17 | |
| 18 | #include <asm/pgtable.h> |
| 19 | #include <asm/mmu_context.h> |
| 20 | #include <asm/cacheflush.h> |
| 21 | #include <asm/tlbflush.h> |
| 22 | |
| 23 | /* |
| 24 | * Linux was ported to Xtensa assuming all auto-refill ways in set 0 |
| 25 | * had the same properties (a very likely assumption). Multiple sets |
| 26 | * of auto-refill ways will still work properly, but not as optimally |
| 27 | * as the Xtensa designer may have assumed. |
| 28 | * |
| 29 | * We make this case a hard #error, killing the kernel build, to alert |
| 30 | * the developer to this condition (which is more likely an error). |
| 31 | * You super-duper clever developers can change it to a warning or |
| 32 | * remove it altogether if you think you know what you're doing. :) |
| 33 | */ |
| 34 | |
| 35 | #if (XCHAL_HAVE_TLBS != 1) |
| 36 | # error "Linux must have an MMU!" |
| 37 | #endif |
| 38 | |
| 39 | #if ((XCHAL_ITLB_ARF_WAYS == 0) || (XCHAL_DTLB_ARF_WAYS == 0)) |
| 40 | # error "MMU must have auto-refill ways" |
| 41 | #endif |
| 42 | |
| 43 | #if ((XCHAL_ITLB_ARF_SETS != 1) || (XCHAL_DTLB_ARF_SETS != 1)) |
| 44 | # error Linux may not use all auto-refill ways as efficiently as you think |
| 45 | #endif |
| 46 | |
| 47 | #if (XCHAL_MMU_MAX_PTE_PAGE_SIZE != XCHAL_MMU_MIN_PTE_PAGE_SIZE) |
| 48 | # error Only one page size allowed! |
| 49 | #endif |
| 50 | |
| 51 | extern unsigned long asid_cache; |
| 52 | extern pgd_t *current_pgd; |
| 53 | |
| 54 | /* |
| 55 | * Define the number of entries per auto-refill way in set 0 of both I and D |
| 56 | * TLBs. We deal only with set 0 here (an assumption further explained in |
| 57 | * assertions.h). Also, define the total number of ARF entries in both TLBs. |
| 58 | */ |
| 59 | |
| 60 | #define ITLB_ENTRIES_PER_ARF_WAY (XCHAL_ITLB_SET(XCHAL_ITLB_ARF_SET0,ENTRIES)) |
| 61 | #define DTLB_ENTRIES_PER_ARF_WAY (XCHAL_DTLB_SET(XCHAL_DTLB_ARF_SET0,ENTRIES)) |
| 62 | |
| 63 | #define ITLB_ENTRIES \ |
| 64 | (ITLB_ENTRIES_PER_ARF_WAY * (XCHAL_ITLB_SET(XCHAL_ITLB_ARF_SET0,WAYS))) |
| 65 | #define DTLB_ENTRIES \ |
| 66 | (DTLB_ENTRIES_PER_ARF_WAY * (XCHAL_DTLB_SET(XCHAL_DTLB_ARF_SET0,WAYS))) |
| 67 | |
| 68 | |
| 69 | /* |
| 70 | * SMALLEST_NTLB_ENTRIES is the smaller of ITLB_ENTRIES and DTLB_ENTRIES. |
| 71 | * In practice, they are probably equal. This macro simplifies function |
| 72 | * flush_tlb_range(). |
| 73 | */ |
| 74 | |
| 75 | #if (DTLB_ENTRIES < ITLB_ENTRIES) |
| 76 | # define SMALLEST_NTLB_ENTRIES DTLB_ENTRIES |
| 77 | #else |
| 78 | # define SMALLEST_NTLB_ENTRIES ITLB_ENTRIES |
| 79 | #endif |
| 80 | |
| 81 | |
| 82 | /* |
| 83 | * asid_cache tracks only the ASID[USER_RING] field of the RASID special |
| 84 | * register, which is the current user-task asid allocation value. |
| 85 | * mm->context has the same meaning. When it comes time to write the |
| 86 | * asid_cache or mm->context values to the RASID special register, we first |
| 87 | * shift the value left by 8, then insert the value. |
| 88 | * ASID[0] always contains the kernel's asid value, and we reserve three |
| 89 | * other asid values that we never assign to user tasks. |
| 90 | */ |
| 91 | |
| 92 | #define ASID_INC 0x1 |
| 93 | #define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1) |
| 94 | |
| 95 | /* |
| 96 | * XCHAL_MMU_ASID_INVALID is a configurable Xtensa processor constant |
| 97 | * indicating invalid address space. XCHAL_MMU_ASID_KERNEL is a configurable |
| 98 | * Xtensa processor constant indicating the kernel address space. They can |
| 99 | * be arbitrary values. |
| 100 | * |
| 101 | * We identify three more unique, reserved ASID values to use in the unused |
| 102 | * ring positions. No other user process will be assigned these reserved |
| 103 | * ASID values. |
| 104 | * |
| 105 | * For example, given that |
| 106 | * |
| 107 | * XCHAL_MMU_ASID_INVALID == 0 |
| 108 | * XCHAL_MMU_ASID_KERNEL == 1 |
| 109 | * |
| 110 | * the following maze of #if statements would generate |
| 111 | * |
| 112 | * ASID_RESERVED_1 == 2 |
| 113 | * ASID_RESERVED_2 == 3 |
| 114 | * ASID_RESERVED_3 == 4 |
| 115 | * ASID_FIRST_NONRESERVED == 5 |
| 116 | */ |
| 117 | |
| 118 | #if (XCHAL_MMU_ASID_INVALID != XCHAL_MMU_ASID_KERNEL + 1) |
| 119 | # define ASID_RESERVED_1 ((XCHAL_MMU_ASID_KERNEL + 1) & ASID_MASK) |
| 120 | #else |
| 121 | # define ASID_RESERVED_1 ((XCHAL_MMU_ASID_KERNEL + 2) & ASID_MASK) |
| 122 | #endif |
| 123 | |
| 124 | #if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_1 + 1) |
| 125 | # define ASID_RESERVED_2 ((ASID_RESERVED_1 + 1) & ASID_MASK) |
| 126 | #else |
| 127 | # define ASID_RESERVED_2 ((ASID_RESERVED_1 + 2) & ASID_MASK) |
| 128 | #endif |
| 129 | |
| 130 | #if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_2 + 1) |
| 131 | # define ASID_RESERVED_3 ((ASID_RESERVED_2 + 1) & ASID_MASK) |
| 132 | #else |
| 133 | # define ASID_RESERVED_3 ((ASID_RESERVED_2 + 2) & ASID_MASK) |
| 134 | #endif |
| 135 | |
| 136 | #if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_3 + 1) |
| 137 | # define ASID_FIRST_NONRESERVED ((ASID_RESERVED_3 + 1) & ASID_MASK) |
| 138 | #else |
| 139 | # define ASID_FIRST_NONRESERVED ((ASID_RESERVED_3 + 2) & ASID_MASK) |
| 140 | #endif |
| 141 | |
| 142 | #define ASID_ALL_RESERVED ( ((ASID_RESERVED_1) << 24) + \ |
| 143 | ((ASID_RESERVED_2) << 16) + \ |
| 144 | ((ASID_RESERVED_3) << 8) + \ |
| 145 | ((XCHAL_MMU_ASID_KERNEL)) ) |
| 146 | |
| 147 | |
| 148 | /* |
| 149 | * NO_CONTEXT is the invalid ASID value that we don't ever assign to |
| 150 | * any user or kernel context. NO_CONTEXT is a better mnemonic than |
| 151 | * XCHAL_MMU_ASID_INVALID, so we use it in code instead. |
| 152 | */ |
| 153 | |
| 154 | #define NO_CONTEXT XCHAL_MMU_ASID_INVALID |
| 155 | |
| 156 | #if (KERNEL_RING != 0) |
| 157 | # error The KERNEL_RING really should be zero. |
| 158 | #endif |
| 159 | |
| 160 | #if (USER_RING >= XCHAL_MMU_RINGS) |
| 161 | # error USER_RING cannot be greater than the highest numbered ring. |
| 162 | #endif |
| 163 | |
| 164 | #if (USER_RING == KERNEL_RING) |
| 165 | # error The user and kernel rings really should not be equal. |
| 166 | #endif |
| 167 | |
| 168 | #if (USER_RING == 1) |
| 169 | #define ASID_INSERT(x) ( ((ASID_RESERVED_1) << 24) + \ |
| 170 | ((ASID_RESERVED_2) << 16) + \ |
| 171 | (((x) & (ASID_MASK)) << 8) + \ |
| 172 | ((XCHAL_MMU_ASID_KERNEL)) ) |
| 173 | |
| 174 | #elif (USER_RING == 2) |
| 175 | #define ASID_INSERT(x) ( ((ASID_RESERVED_1) << 24) + \ |
| 176 | (((x) & (ASID_MASK)) << 16) + \ |
| 177 | ((ASID_RESERVED_2) << 8) + \ |
| 178 | ((XCHAL_MMU_ASID_KERNEL)) ) |
| 179 | |
| 180 | #elif (USER_RING == 3) |
| 181 | #define ASID_INSERT(x) ( (((x) & (ASID_MASK)) << 24) + \ |
| 182 | ((ASID_RESERVED_1) << 16) + \ |
| 183 | ((ASID_RESERVED_2) << 8) + \ |
| 184 | ((XCHAL_MMU_ASID_KERNEL)) ) |
| 185 | |
| 186 | #else |
| 187 | #error Goofy value for USER_RING |
| 188 | |
| 189 | #endif /* USER_RING == 1 */ |
| 190 | |
| 191 | |
| 192 | /* |
| 193 | * All unused by hardware upper bits will be considered |
| 194 | * as a software asid extension. |
| 195 | */ |
| 196 | |
| 197 | #define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) |
| 198 | #define ASID_FIRST_VERSION \ |
| 199 | ((unsigned long)(~ASID_VERSION_MASK) + 1 + ASID_FIRST_NONRESERVED) |
| 200 | |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 201 | static inline void set_rasid_register (unsigned long val) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 202 | { |
| 203 | __asm__ __volatile__ (" wsr %0, "__stringify(RASID)"\n\t" |
| 204 | " isync\n" : : "a" (val)); |
| 205 | } |
| 206 | |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 207 | static inline unsigned long get_rasid_register (void) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 208 | { |
| 209 | unsigned long tmp; |
| 210 | __asm__ __volatile__ (" rsr %0, "__stringify(RASID)"\n\t" : "=a" (tmp)); |
| 211 | return tmp; |
| 212 | } |
| 213 | |
| 214 | |
| 215 | #if ((XCHAL_MMU_ASID_INVALID == 0) && (XCHAL_MMU_ASID_KERNEL == 1)) |
| 216 | |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 217 | static inline void |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 218 | get_new_mmu_context(struct mm_struct *mm, unsigned long asid) |
| 219 | { |
| 220 | extern void flush_tlb_all(void); |
| 221 | if (! ((asid += ASID_INC) & ASID_MASK) ) { |
| 222 | flush_tlb_all(); /* start new asid cycle */ |
| 223 | if (!asid) /* fix version if needed */ |
| 224 | asid = ASID_FIRST_VERSION - ASID_FIRST_NONRESERVED; |
| 225 | asid += ASID_FIRST_NONRESERVED; |
| 226 | } |
| 227 | mm->context = asid_cache = asid; |
| 228 | } |
| 229 | |
| 230 | #else |
| 231 | #warning ASID_{INVALID,KERNEL} values impose non-optimal get_new_mmu_context implementation |
| 232 | |
| 233 | /* XCHAL_MMU_ASID_INVALID == 0 and XCHAL_MMU_ASID_KERNEL ==1 are |
| 234 | really the best, but if you insist... */ |
| 235 | |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 236 | static inline int validate_asid (unsigned long asid) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 237 | { |
| 238 | switch (asid) { |
| 239 | case XCHAL_MMU_ASID_INVALID: |
| 240 | case XCHAL_MMU_ASID_KERNEL: |
| 241 | case ASID_RESERVED_1: |
| 242 | case ASID_RESERVED_2: |
| 243 | case ASID_RESERVED_3: |
| 244 | return 0; /* can't use these values as ASIDs */ |
| 245 | } |
| 246 | return 1; /* valid */ |
| 247 | } |
| 248 | |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 249 | static inline void |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 250 | get_new_mmu_context(struct mm_struct *mm, unsigned long asid) |
| 251 | { |
| 252 | extern void flush_tlb_all(void); |
| 253 | while (1) { |
| 254 | asid += ASID_INC; |
| 255 | if ( ! (asid & ASID_MASK) ) { |
| 256 | flush_tlb_all(); /* start new asid cycle */ |
| 257 | if (!asid) /* fix version if needed */ |
| 258 | asid = ASID_FIRST_VERSION - ASID_FIRST_NONRESERVED; |
| 259 | asid += ASID_FIRST_NONRESERVED; |
| 260 | break; /* no need to validate here */ |
| 261 | } |
| 262 | if (validate_asid (asid & ASID_MASK)) |
| 263 | break; |
| 264 | } |
| 265 | mm->context = asid_cache = asid; |
| 266 | } |
| 267 | |
| 268 | #endif |
| 269 | |
| 270 | |
| 271 | /* |
| 272 | * Initialize the context related info for a new mm_struct |
| 273 | * instance. |
| 274 | */ |
| 275 | |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 276 | static inline int |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 277 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
| 278 | { |
| 279 | mm->context = NO_CONTEXT; |
| 280 | return 0; |
| 281 | } |
| 282 | |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 283 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 284 | struct task_struct *tsk) |
| 285 | { |
| 286 | unsigned long asid = asid_cache; |
| 287 | |
| 288 | /* Check if our ASID is of an older version and thus invalid */ |
| 289 | |
| 290 | if ((next->context ^ asid) & ASID_VERSION_MASK) |
| 291 | get_new_mmu_context(next, asid); |
| 292 | |
| 293 | set_rasid_register (ASID_INSERT(next->context)); |
| 294 | invalidate_page_directory(); |
| 295 | } |
| 296 | |
| 297 | #define deactivate_mm(tsk, mm) do { } while(0) |
| 298 | |
| 299 | /* |
| 300 | * Destroy context related info for an mm_struct that is about |
| 301 | * to be put to rest. |
| 302 | */ |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 303 | static inline void destroy_context(struct mm_struct *mm) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 304 | { |
| 305 | /* Nothing to do. */ |
| 306 | } |
| 307 | |
| 308 | /* |
| 309 | * After we have set current->mm to a new value, this activates |
| 310 | * the context for the new mm so we see the new mappings. |
| 311 | */ |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 312 | static inline void |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 313 | activate_mm(struct mm_struct *prev, struct mm_struct *next) |
| 314 | { |
| 315 | /* Unconditionally get a new ASID. */ |
| 316 | |
| 317 | get_new_mmu_context(next, asid_cache); |
| 318 | set_rasid_register (ASID_INSERT(next->context)); |
| 319 | invalidate_page_directory(); |
| 320 | } |
| 321 | |
| 322 | |
| 323 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
| 324 | { |
| 325 | /* Nothing to do. */ |
| 326 | |
| 327 | } |
| 328 | |
| 329 | #endif /* _XTENSA_MMU_CONTEXT_H */ |