Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ASM_IA64_MMU_CONTEXT_H |
| 2 | #define _ASM_IA64_MMU_CONTEXT_H |
| 3 | |
| 4 | /* |
| 5 | * Copyright (C) 1998-2002 Hewlett-Packard Co |
| 6 | * David Mosberger-Tang <davidm@hpl.hp.com> |
| 7 | */ |
| 8 | |
| 9 | /* |
Chen, Kenneth W | 58cd908 | 2005-10-29 18:47:04 -0700 | [diff] [blame] | 10 | * Routines to manage the allocation of task context numbers. Task context |
| 11 | * numbers are used to reduce or eliminate the need to perform TLB flushes |
| 12 | * due to context switches. Context numbers are implemented using ia-64 |
| 13 | * region ids. Since the IA-64 TLB does not consider the region number when |
| 14 | * performing a TLB lookup, we need to assign a unique region id to each |
| 15 | * region in a process. We use the least significant three bits in aregion |
| 16 | * id for this purpose. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | */ |
| 18 | |
| 19 | #define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */ |
| 20 | |
| 21 | #define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61)) |
| 22 | |
Peter Chubb | 0a41e25 | 2005-08-16 19:54:00 -0700 | [diff] [blame] | 23 | # include <asm/page.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | # ifndef __ASSEMBLY__ |
| 25 | |
| 26 | #include <linux/compiler.h> |
| 27 | #include <linux/percpu.h> |
| 28 | #include <linux/sched.h> |
| 29 | #include <linux/spinlock.h> |
| 30 | |
| 31 | #include <asm/processor.h> |
| 32 | |
| 33 | struct ia64_ctx { |
| 34 | spinlock_t lock; |
| 35 | unsigned int next; /* next context number to use */ |
Peter Keilty | dcc17d1 | 2005-10-31 16:44:47 -0500 | [diff] [blame] | 36 | unsigned int limit; /* available free range */ |
| 37 | unsigned int max_ctx; /* max. context value supported by all CPUs */ |
| 38 | /* call wrap_mmu_context when next >= max */ |
| 39 | unsigned long *bitmap; /* bitmap size is max_ctx+1 */ |
| 40 | unsigned long *flushmap;/* pending rid to be flushed */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | }; |
| 42 | |
| 43 | extern struct ia64_ctx ia64_ctx; |
| 44 | DECLARE_PER_CPU(u8, ia64_need_tlb_flush); |
| 45 | |
Peter Keilty | dcc17d1 | 2005-10-31 16:44:47 -0500 | [diff] [blame] | 46 | extern void mmu_context_init (void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | extern void wrap_mmu_context (struct mm_struct *mm); |
| 48 | |
| 49 | static inline void |
| 50 | enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk) |
| 51 | { |
| 52 | } |
| 53 | |
| 54 | /* |
Chen, Kenneth W | 58cd908 | 2005-10-29 18:47:04 -0700 | [diff] [blame] | 55 | * When the context counter wraps around all TLBs need to be flushed because |
| 56 | * an old context number might have been reused. This is signalled by the |
| 57 | * ia64_need_tlb_flush per-CPU variable, which is checked in the routine |
| 58 | * below. Called by activate_mm(). <efocht@ess.nec.de> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | */ |
| 60 | static inline void |
| 61 | delayed_tlb_flush (void) |
| 62 | { |
| 63 | extern void local_flush_tlb_all (void); |
David Mosberger-Tang | badea12 | 2005-07-25 22:23:00 -0700 | [diff] [blame] | 64 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | |
| 66 | if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) { |
David Mosberger-Tang | badea12 | 2005-07-25 22:23:00 -0700 | [diff] [blame] | 67 | spin_lock_irqsave(&ia64_ctx.lock, flags); |
Chen, Kenneth W | 58cd908 | 2005-10-29 18:47:04 -0700 | [diff] [blame] | 68 | if (__ia64_per_cpu_var(ia64_need_tlb_flush)) { |
| 69 | local_flush_tlb_all(); |
| 70 | __ia64_per_cpu_var(ia64_need_tlb_flush) = 0; |
David Mosberger-Tang | badea12 | 2005-07-25 22:23:00 -0700 | [diff] [blame] | 71 | } |
| 72 | spin_unlock_irqrestore(&ia64_ctx.lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | } |
| 74 | } |
| 75 | |
David Mosberger-Tang | badea12 | 2005-07-25 22:23:00 -0700 | [diff] [blame] | 76 | static inline nv_mm_context_t |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | get_mmu_context (struct mm_struct *mm) |
| 78 | { |
| 79 | unsigned long flags; |
David Mosberger-Tang | badea12 | 2005-07-25 22:23:00 -0700 | [diff] [blame] | 80 | nv_mm_context_t context = mm->context; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | |
Chen, Kenneth W | 58cd908 | 2005-10-29 18:47:04 -0700 | [diff] [blame] | 82 | if (likely(context)) |
| 83 | goto out; |
| 84 | |
| 85 | spin_lock_irqsave(&ia64_ctx.lock, flags); |
| 86 | /* re-check, now that we've got the lock: */ |
| 87 | context = mm->context; |
| 88 | if (context == 0) { |
| 89 | cpus_clear(mm->cpu_vm_mask); |
| 90 | if (ia64_ctx.next >= ia64_ctx.limit) { |
| 91 | ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, |
| 92 | ia64_ctx.max_ctx, ia64_ctx.next); |
| 93 | ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap, |
| 94 | ia64_ctx.max_ctx, ia64_ctx.next); |
| 95 | if (ia64_ctx.next >= ia64_ctx.max_ctx) |
| 96 | wrap_mmu_context(mm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | } |
Chen, Kenneth W | 58cd908 | 2005-10-29 18:47:04 -0700 | [diff] [blame] | 98 | mm->context = context = ia64_ctx.next++; |
| 99 | __set_bit(context, ia64_ctx.bitmap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | } |
Chen, Kenneth W | 58cd908 | 2005-10-29 18:47:04 -0700 | [diff] [blame] | 101 | spin_unlock_irqrestore(&ia64_ctx.lock, flags); |
| 102 | out: |
David Mosberger-Tang | badea12 | 2005-07-25 22:23:00 -0700 | [diff] [blame] | 103 | /* |
| 104 | * Ensure we're not starting to use "context" before any old |
| 105 | * uses of it are gone from our TLB. |
| 106 | */ |
| 107 | delayed_tlb_flush(); |
| 108 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | return context; |
| 110 | } |
| 111 | |
| 112 | /* |
Chen, Kenneth W | 58cd908 | 2005-10-29 18:47:04 -0700 | [diff] [blame] | 113 | * Initialize context number to some sane value. MM is guaranteed to be a |
| 114 | * brand-new address-space, so no TLB flushing is needed, ever. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | */ |
| 116 | static inline int |
| 117 | init_new_context (struct task_struct *p, struct mm_struct *mm) |
| 118 | { |
| 119 | mm->context = 0; |
| 120 | return 0; |
| 121 | } |
| 122 | |
| 123 | static inline void |
| 124 | destroy_context (struct mm_struct *mm) |
| 125 | { |
| 126 | /* Nothing to do. */ |
| 127 | } |
| 128 | |
| 129 | static inline void |
David Mosberger-Tang | badea12 | 2005-07-25 22:23:00 -0700 | [diff] [blame] | 130 | reload_context (nv_mm_context_t context) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | { |
| 132 | unsigned long rid; |
| 133 | unsigned long rid_incr = 0; |
| 134 | unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4; |
| 135 | |
Peter Chubb | 0a41e25 | 2005-08-16 19:54:00 -0700 | [diff] [blame] | 136 | old_rr4 = ia64_get_rr(RGN_BASE(RGN_HPAGE)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | rid = context << 3; /* make space for encoding the region number */ |
| 138 | rid_incr = 1 << 8; |
| 139 | |
| 140 | /* encode the region id, preferred page size, and VHPT enable bit: */ |
| 141 | rr0 = (rid << 8) | (PAGE_SHIFT << 2) | 1; |
| 142 | rr1 = rr0 + 1*rid_incr; |
| 143 | rr2 = rr0 + 2*rid_incr; |
| 144 | rr3 = rr0 + 3*rid_incr; |
| 145 | rr4 = rr0 + 4*rid_incr; |
| 146 | #ifdef CONFIG_HUGETLB_PAGE |
| 147 | rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc); |
Peter Chubb | 0a41e25 | 2005-08-16 19:54:00 -0700 | [diff] [blame] | 148 | |
| 149 | # if RGN_HPAGE != 4 |
| 150 | # error "reload_context assumes RGN_HPAGE is 4" |
| 151 | # endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | #endif |
| 153 | |
| 154 | ia64_set_rr(0x0000000000000000UL, rr0); |
| 155 | ia64_set_rr(0x2000000000000000UL, rr1); |
| 156 | ia64_set_rr(0x4000000000000000UL, rr2); |
| 157 | ia64_set_rr(0x6000000000000000UL, rr3); |
| 158 | ia64_set_rr(0x8000000000000000UL, rr4); |
| 159 | ia64_srlz_i(); /* srlz.i implies srlz.d */ |
| 160 | } |
| 161 | |
Peter Chubb | a68db76 | 2005-06-23 21:14:00 -0700 | [diff] [blame] | 162 | /* |
| 163 | * Must be called with preemption off |
| 164 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | static inline void |
| 166 | activate_context (struct mm_struct *mm) |
| 167 | { |
David Mosberger-Tang | badea12 | 2005-07-25 22:23:00 -0700 | [diff] [blame] | 168 | nv_mm_context_t context; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | |
| 170 | do { |
| 171 | context = get_mmu_context(mm); |
| 172 | if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) |
| 173 | cpu_set(smp_processor_id(), mm->cpu_vm_mask); |
| 174 | reload_context(context); |
Chen, Kenneth W | 58cd908 | 2005-10-29 18:47:04 -0700 | [diff] [blame] | 175 | /* |
| 176 | * in the unlikely event of a TLB-flush by another thread, |
| 177 | * redo the load. |
| 178 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | } while (unlikely(context != mm->context)); |
| 180 | } |
| 181 | |
| 182 | #define deactivate_mm(tsk,mm) do { } while (0) |
| 183 | |
| 184 | /* |
| 185 | * Switch from address space PREV to address space NEXT. |
| 186 | */ |
| 187 | static inline void |
| 188 | activate_mm (struct mm_struct *prev, struct mm_struct *next) |
| 189 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | /* |
Chen, Kenneth W | 58cd908 | 2005-10-29 18:47:04 -0700 | [diff] [blame] | 191 | * We may get interrupts here, but that's OK because interrupt |
| 192 | * handlers cannot touch user-space. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | */ |
| 194 | ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd)); |
| 195 | activate_context(next); |
| 196 | } |
| 197 | |
| 198 | #define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm) |
| 199 | |
| 200 | # endif /* ! __ASSEMBLY__ */ |
| 201 | #endif /* _ASM_IA64_MMU_CONTEXT_H */ |