Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 1 | /* |
| 2 | * include/asm-xtensa/mmu_context.h |
| 3 | * |
| 4 | * Switch an MMU context. |
| 5 | * |
| 6 | * This file is subject to the terms and conditions of the GNU General Public |
| 7 | * License. See the file "COPYING" in the main directory of this archive |
| 8 | * for more details. |
| 9 | * |
| 10 | * Copyright (C) 2001 - 2005 Tensilica Inc. |
| 11 | */ |
| 12 | |
| 13 | #ifndef _XTENSA_MMU_CONTEXT_H |
| 14 | #define _XTENSA_MMU_CONTEXT_H |
| 15 | |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 16 | #include <linux/stringify.h> |
Chris Zankel | de4f6e5 | 2007-05-31 17:47:01 -0700 | [diff] [blame] | 17 | #include <linux/sched.h> |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 18 | |
| 19 | #include <asm/pgtable.h> |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 20 | #include <asm/cacheflush.h> |
| 21 | #include <asm/tlbflush.h> |
Jeremy Fitzhardinge | d6dd61c | 2007-05-02 19:27:14 +0200 | [diff] [blame] | 22 | #include <asm-generic/mm_hooks.h> |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 23 | |
Chris Zankel | 173d6681 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 24 | #define XCHAL_MMU_ASID_BITS 8 |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 25 | |
| 26 | #if (XCHAL_HAVE_TLBS != 1) |
| 27 | # error "Linux must have an MMU!" |
| 28 | #endif |
| 29 | |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 30 | extern unsigned long asid_cache; |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 31 | |
| 32 | /* |
| 33 | * NO_CONTEXT is the invalid ASID value that we don't ever assign to |
Chris Zankel | 173d6681 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 34 | * any user or kernel context. |
| 35 | * |
| 36 | * 0 invalid |
| 37 | * 1 kernel |
| 38 | * 2 reserved |
| 39 | * 3 reserved |
| 40 | * 4...255 available |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 41 | */ |
| 42 | |
Chris Zankel | 173d6681 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 43 | #define NO_CONTEXT 0 |
| 44 | #define ASID_USER_FIRST 4 |
| 45 | #define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1) |
| 46 | #define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8)) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 47 | |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 48 | static inline void set_rasid_register (unsigned long val) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 49 | { |
| 50 | __asm__ __volatile__ (" wsr %0, "__stringify(RASID)"\n\t" |
| 51 | " isync\n" : : "a" (val)); |
| 52 | } |
| 53 | |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 54 | static inline unsigned long get_rasid_register (void) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 55 | { |
| 56 | unsigned long tmp; |
Chris Zankel | 173d6681 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 57 | __asm__ __volatile__ (" rsr %0,"__stringify(RASID)"\n\t" : "=a" (tmp)); |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 58 | return tmp; |
| 59 | } |
| 60 | |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 61 | static inline void |
Chris Zankel | 173d6681 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 62 | __get_new_mmu_context(struct mm_struct *mm) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 63 | { |
| 64 | extern void flush_tlb_all(void); |
Chris Zankel | 173d6681 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 65 | if (! (++asid_cache & ASID_MASK) ) { |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 66 | flush_tlb_all(); /* start new asid cycle */ |
Chris Zankel | 173d6681 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 67 | asid_cache += ASID_USER_FIRST; |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 68 | } |
Chris Zankel | 173d6681 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 69 | mm->context = asid_cache; |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 70 | } |
| 71 | |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 72 | static inline void |
Chris Zankel | 173d6681 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 73 | __load_mmu_context(struct mm_struct *mm) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 74 | { |
Chris Zankel | 173d6681 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 75 | set_rasid_register(ASID_INSERT(mm->context)); |
| 76 | invalidate_page_directory(); |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 77 | } |
| 78 | |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 79 | /* |
| 80 | * Initialize the context related info for a new mm_struct |
| 81 | * instance. |
| 82 | */ |
| 83 | |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 84 | static inline int |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 85 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
| 86 | { |
| 87 | mm->context = NO_CONTEXT; |
| 88 | return 0; |
| 89 | } |
| 90 | |
Chris Zankel | 173d6681 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 91 | /* |
| 92 | * After we have set current->mm to a new value, this activates |
| 93 | * the context for the new mm so we see the new mappings. |
| 94 | */ |
| 95 | static inline void |
| 96 | activate_mm(struct mm_struct *prev, struct mm_struct *next) |
| 97 | { |
| 98 | /* Unconditionally get a new ASID. */ |
| 99 | |
| 100 | __get_new_mmu_context(next); |
| 101 | __load_mmu_context(next); |
| 102 | } |
| 103 | |
| 104 | |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 105 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 106 | struct task_struct *tsk) |
| 107 | { |
| 108 | unsigned long asid = asid_cache; |
| 109 | |
| 110 | /* Check if our ASID is of an older version and thus invalid */ |
| 111 | |
Chris Zankel | 173d6681 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 112 | if (next->context == NO_CONTEXT || ((next->context^asid) & ~ASID_MASK)) |
| 113 | __get_new_mmu_context(next); |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 114 | |
Chris Zankel | 173d6681 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 115 | __load_mmu_context(next); |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 116 | } |
| 117 | |
| 118 | #define deactivate_mm(tsk, mm) do { } while(0) |
| 119 | |
| 120 | /* |
| 121 | * Destroy context related info for an mm_struct that is about |
| 122 | * to be put to rest. |
| 123 | */ |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 124 | static inline void destroy_context(struct mm_struct *mm) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 125 | { |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 126 | invalidate_page_directory(); |
| 127 | } |
| 128 | |
| 129 | |
| 130 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
| 131 | { |
| 132 | /* Nothing to do. */ |
| 133 | |
| 134 | } |
| 135 | |
| 136 | #endif /* _XTENSA_MMU_CONTEXT_H */ |