Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifdef __KERNEL__ |
| 2 | #ifndef __PPC_MMU_CONTEXT_H |
| 3 | #define __PPC_MMU_CONTEXT_H |
| 4 | |
Jiri Slaby | 1977f03 | 2007-10-18 23:40:25 -0700 | [diff] [blame] | 5 | #include <linux/bitops.h> |
| 6 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <asm/atomic.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include <asm/mmu.h> |
| 9 | #include <asm/cputable.h> |
Jeremy Fitzhardinge | d6dd61c | 2007-05-02 19:27:14 +0200 | [diff] [blame] | 10 | #include <asm-generic/mm_hooks.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | |
| 12 | /* |
| 13 | * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs |
| 14 | * (virtual segment identifiers) for each context. Although the |
| 15 | * hardware supports 24-bit VSIDs, and thus >1 million contexts, |
| 16 | * we only use 32,768 of them. That is ample, since there can be |
| 17 | * at most around 30,000 tasks in the system anyway, and it means |
| 18 | * that we can use a bitmap to indicate which contexts are in use. |
| 19 | * Using a bitmap means that we entirely avoid all of the problems |
| 20 | * that we used to have when the context number overflowed, |
| 21 | * particularly on SMP systems. |
| 22 | * -- paulus. |
| 23 | */ |
| 24 | |
| 25 | /* |
| 26 | * This function defines the mapping from contexts to VSIDs (virtual |
| 27 | * segment IDs). We use a skew on both the context and the high 4 bits |
| 28 | * of the 32-bit virtual address (the "effective segment ID") in order |
| 29 | * to spread out the entries in the MMU hash table. Note, if this |
| 30 | * function is changed then arch/ppc/mm/hashtable.S will have to be |
| 31 | * changed to correspond. |
| 32 | */ |
| 33 | #define CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \ |
| 34 | & 0xffffff) |
| 35 | |
| 36 | /* |
| 37 | The MPC8xx has only 16 contexts. We rotate through them on each |
| 38 | task switch. A better way would be to keep track of tasks that |
| 39 | own contexts, and implement an LRU usage. That way very active |
| 40 | tasks don't always have to pay the TLB reload overhead. The |
| 41 | kernel pages are mapped shared, so the kernel can run on behalf |
| 42 | of any task that makes a kernel entry. Shared does not mean they |
| 43 | are not protected, just that the ASID comparison is not performed. |
| 44 | -- Dan |
| 45 | |
| 46 | The IBM4xx has 256 contexts, so we can just rotate through these |
| 47 | as a way of "switching" contexts. If the TID of the TLB is zero, |
| 48 | the PID/TID comparison is disabled, so we can use a TID of zero |
| 49 | to represent all kernel pages as shared among all contexts. |
| 50 | -- Dan |
| 51 | */ |
| 52 | |
| 53 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
| 54 | { |
| 55 | } |
| 56 | |
| 57 | #ifdef CONFIG_8xx |
| 58 | #define NO_CONTEXT 16 |
| 59 | #define LAST_CONTEXT 15 |
| 60 | #define FIRST_CONTEXT 0 |
| 61 | |
| 62 | #elif defined(CONFIG_4xx) |
| 63 | #define NO_CONTEXT 256 |
| 64 | #define LAST_CONTEXT 255 |
| 65 | #define FIRST_CONTEXT 1 |
| 66 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | #else |
| 68 | |
| 69 | /* PPC 6xx, 7xx CPUs */ |
Paul Mackerras | 6218a76 | 2006-06-11 14:15:17 +1000 | [diff] [blame] | 70 | #define NO_CONTEXT ((unsigned long) -1) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | #define LAST_CONTEXT 32767 |
| 72 | #define FIRST_CONTEXT 1 |
| 73 | #endif |
| 74 | |
| 75 | /* |
| 76 | * Set the current MMU context. |
| 77 | * On 32-bit PowerPCs (other than the 8xx embedded chips), this is done by |
| 78 | * loading up the segment registers for the user part of the address space. |
| 79 | * |
| 80 | * Since the PGD is immediately available, it is much faster to simply |
| 81 | * pass this along as a second parameter, which is required for 8xx and |
| 82 | * can be used for debugging on all processors (if you happen to have |
| 83 | * an Abatron). |
| 84 | */ |
Paul Mackerras | 6218a76 | 2006-06-11 14:15:17 +1000 | [diff] [blame] | 85 | extern void set_context(unsigned long contextid, pgd_t *pgd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | |
| 87 | /* |
| 88 | * Bitmap of contexts in use. |
| 89 | * The size of this bitmap is LAST_CONTEXT + 1 bits. |
| 90 | */ |
| 91 | extern unsigned long context_map[]; |
| 92 | |
| 93 | /* |
| 94 | * This caches the next context number that we expect to be free. |
| 95 | * Its use is an optimization only, we can't rely on this context |
| 96 | * number to be free, but it usually will be. |
| 97 | */ |
Paul Mackerras | 6218a76 | 2006-06-11 14:15:17 +1000 | [diff] [blame] | 98 | extern unsigned long next_mmu_context; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | |
| 100 | /* |
| 101 | * If we don't have sufficient contexts to give one to every task |
| 102 | * that could be in the system, we need to be able to steal contexts. |
| 103 | * These variables support that. |
| 104 | */ |
| 105 | #if LAST_CONTEXT < 30000 |
| 106 | #define FEW_CONTEXTS 1 |
| 107 | extern atomic_t nr_free_contexts; |
| 108 | extern struct mm_struct *context_mm[LAST_CONTEXT+1]; |
| 109 | extern void steal_context(void); |
| 110 | #endif |
| 111 | |
| 112 | /* |
| 113 | * Get a new mmu context for the address space described by `mm'. |
| 114 | */ |
| 115 | static inline void get_mmu_context(struct mm_struct *mm) |
| 116 | { |
Paul Mackerras | 6218a76 | 2006-06-11 14:15:17 +1000 | [diff] [blame] | 117 | unsigned long ctx; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | |
Paul Mackerras | 6218a76 | 2006-06-11 14:15:17 +1000 | [diff] [blame] | 119 | if (mm->context.id != NO_CONTEXT) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | return; |
| 121 | #ifdef FEW_CONTEXTS |
| 122 | while (atomic_dec_if_positive(&nr_free_contexts) < 0) |
| 123 | steal_context(); |
| 124 | #endif |
| 125 | ctx = next_mmu_context; |
| 126 | while (test_and_set_bit(ctx, context_map)) { |
| 127 | ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx); |
| 128 | if (ctx > LAST_CONTEXT) |
| 129 | ctx = 0; |
| 130 | } |
| 131 | next_mmu_context = (ctx + 1) & LAST_CONTEXT; |
Paul Mackerras | 6218a76 | 2006-06-11 14:15:17 +1000 | [diff] [blame] | 132 | mm->context.id = ctx; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | #ifdef FEW_CONTEXTS |
| 134 | context_mm[ctx] = mm; |
| 135 | #endif |
| 136 | } |
| 137 | |
| 138 | /* |
| 139 | * Set up the context for a new address space. |
| 140 | */ |
Paul Mackerras | 6218a76 | 2006-06-11 14:15:17 +1000 | [diff] [blame] | 141 | static inline int init_new_context(struct task_struct *t, struct mm_struct *mm) |
| 142 | { |
| 143 | mm->context.id = NO_CONTEXT; |
| 144 | mm->context.vdso_base = 0; |
| 145 | return 0; |
| 146 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | |
| 148 | /* |
| 149 | * We're finished using the context for an address space. |
| 150 | */ |
| 151 | static inline void destroy_context(struct mm_struct *mm) |
| 152 | { |
Guillaume Autran | ddca3b8 | 2005-07-13 01:10:45 -0700 | [diff] [blame] | 153 | preempt_disable(); |
Paul Mackerras | 6218a76 | 2006-06-11 14:15:17 +1000 | [diff] [blame] | 154 | if (mm->context.id != NO_CONTEXT) { |
| 155 | clear_bit(mm->context.id, context_map); |
| 156 | mm->context.id = NO_CONTEXT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | #ifdef FEW_CONTEXTS |
| 158 | atomic_inc(&nr_free_contexts); |
| 159 | #endif |
| 160 | } |
Guillaume Autran | ddca3b8 | 2005-07-13 01:10:45 -0700 | [diff] [blame] | 161 | preempt_enable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | } |
| 163 | |
| 164 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
| 165 | struct task_struct *tsk) |
| 166 | { |
| 167 | #ifdef CONFIG_ALTIVEC |
Kumar Gala | 10b35d9 | 2005-09-23 14:08:58 -0500 | [diff] [blame] | 168 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
| 169 | asm volatile ("dssall;\n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | #ifndef CONFIG_POWER4 |
| 171 | "sync;\n" /* G4 needs a sync here, G5 apparently not */ |
| 172 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | : : ); |
| 174 | #endif /* CONFIG_ALTIVEC */ |
| 175 | |
| 176 | tsk->thread.pgdir = next->pgd; |
| 177 | |
| 178 | /* No need to flush userspace segments if the mm doesnt change */ |
| 179 | if (prev == next) |
| 180 | return; |
| 181 | |
| 182 | /* Setup new userspace context */ |
| 183 | get_mmu_context(next); |
Paul Mackerras | 6218a76 | 2006-06-11 14:15:17 +1000 | [diff] [blame] | 184 | set_context(next->context.id, next->pgd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | } |
| 186 | |
| 187 | #define deactivate_mm(tsk,mm) do { } while (0) |
| 188 | |
| 189 | /* |
| 190 | * After we have set current->mm to a new value, this activates |
| 191 | * the context for the new mm so we see the new mappings. |
| 192 | */ |
| 193 | #define activate_mm(active_mm, mm) switch_mm(active_mm, mm, current) |
| 194 | |
| 195 | extern void mmu_context_init(void); |
| 196 | |
| 197 | #endif /* __PPC_MMU_CONTEXT_H */ |
| 198 | #endif /* __KERNEL__ */ |