David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 1 | /* arch/sparc64/mm/tsb.c |
| 2 | * |
| 3 | * Copyright (C) 2006 David S. Miller <davem@davemloft.net> |
| 4 | */ |
| 5 | |
| 6 | #include <linux/kernel.h> |
| 7 | #include <asm/system.h> |
| 8 | #include <asm/page.h> |
| 9 | #include <asm/tlbflush.h> |
| 10 | #include <asm/tlb.h> |
David S. Miller | 09f9428 | 2006-01-31 18:31:06 -0800 | [diff] [blame] | 11 | #include <asm/mmu_context.h> |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 12 | #include <asm/pgtable.h> |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 13 | #include <asm/tsb.h> |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 14 | |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 15 | extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; |
| 16 | |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 17 | static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries) |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 18 | { |
| 19 | vaddr >>= PAGE_SHIFT; |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 20 | return vaddr & (nentries - 1); |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 21 | } |
| 22 | |
David S. Miller | 8b23427 | 2006-02-17 18:01:02 -0800 | [diff] [blame] | 23 | static inline int tag_compare(unsigned long tag, unsigned long vaddr) |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 24 | { |
David S. Miller | 8b23427 | 2006-02-17 18:01:02 -0800 | [diff] [blame] | 25 | return (tag == (vaddr >> 22)); |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 26 | } |
| 27 | |
| 28 | /* TSB flushes need only occur on the processor initiating the address |
| 29 | * space modification, not on each cpu the address space has run on. |
| 30 | * Only the TLB flush needs that treatment. |
| 31 | */ |
| 32 | |
| 33 | void flush_tsb_kernel_range(unsigned long start, unsigned long end) |
| 34 | { |
| 35 | unsigned long v; |
| 36 | |
| 37 | for (v = start; v < end; v += PAGE_SIZE) { |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 38 | unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES); |
| 39 | struct tsb *ent = &swapper_tsb[hash]; |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 40 | |
David S. Miller | 8b23427 | 2006-02-17 18:01:02 -0800 | [diff] [blame] | 41 | if (tag_compare(ent->tag, v)) { |
| 42 | ent->tag = (1UL << TSB_TAG_INVALID_BIT); |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 43 | membar_storeload_storestore(); |
| 44 | } |
| 45 | } |
| 46 | } |
| 47 | |
| 48 | void flush_tsb_user(struct mmu_gather *mp) |
| 49 | { |
| 50 | struct mm_struct *mm = mp->mm; |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 51 | struct tsb *tsb = mm->context.tsb; |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 52 | unsigned long nentries = mm->context.tsb_nentries; |
David S. Miller | 8b23427 | 2006-02-17 18:01:02 -0800 | [diff] [blame] | 53 | unsigned long base; |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 54 | int i; |
| 55 | |
David S. Miller | de635d8 | 2006-02-15 21:01:31 -0800 | [diff] [blame] | 56 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) |
David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 57 | base = __pa(tsb); |
| 58 | else |
| 59 | base = (unsigned long) tsb; |
| 60 | |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 61 | for (i = 0; i < mp->tlb_nr; i++) { |
| 62 | unsigned long v = mp->vaddrs[i]; |
David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 63 | unsigned long tag, ent, hash; |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 64 | |
| 65 | v &= ~0x1UL; |
| 66 | |
David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 67 | hash = tsb_hash(v, nentries); |
| 68 | ent = base + (hash * sizeof(struct tsb)); |
David S. Miller | 8b23427 | 2006-02-17 18:01:02 -0800 | [diff] [blame] | 69 | tag = (v >> 22UL); |
David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 70 | |
| 71 | tsb_flush(ent, tag); |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 72 | } |
| 73 | } |
David S. Miller | 09f9428 | 2006-01-31 18:31:06 -0800 | [diff] [blame] | 74 | |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 75 | static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) |
| 76 | { |
| 77 | unsigned long tsb_reg, base, tsb_paddr; |
| 78 | unsigned long page_sz, tte; |
| 79 | |
| 80 | mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb); |
| 81 | |
| 82 | base = TSBMAP_BASE; |
David S. Miller | c4bce90 | 2006-02-11 21:57:54 -0800 | [diff] [blame] | 83 | tte = pgprot_val(PAGE_KERNEL_LOCKED); |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 84 | tsb_paddr = __pa(mm->context.tsb); |
David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 85 | BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 86 | |
| 87 | /* Use the smallest page size that can map the whole TSB |
| 88 | * in one TLB entry. |
| 89 | */ |
| 90 | switch (tsb_bytes) { |
| 91 | case 8192 << 0: |
| 92 | tsb_reg = 0x0UL; |
| 93 | #ifdef DCACHE_ALIASING_POSSIBLE |
| 94 | base += (tsb_paddr & 8192); |
| 95 | #endif |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 96 | page_sz = 8192; |
| 97 | break; |
| 98 | |
| 99 | case 8192 << 1: |
| 100 | tsb_reg = 0x1UL; |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 101 | page_sz = 64 * 1024; |
| 102 | break; |
| 103 | |
| 104 | case 8192 << 2: |
| 105 | tsb_reg = 0x2UL; |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 106 | page_sz = 64 * 1024; |
| 107 | break; |
| 108 | |
| 109 | case 8192 << 3: |
| 110 | tsb_reg = 0x3UL; |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 111 | page_sz = 64 * 1024; |
| 112 | break; |
| 113 | |
| 114 | case 8192 << 4: |
| 115 | tsb_reg = 0x4UL; |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 116 | page_sz = 512 * 1024; |
| 117 | break; |
| 118 | |
| 119 | case 8192 << 5: |
| 120 | tsb_reg = 0x5UL; |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 121 | page_sz = 512 * 1024; |
| 122 | break; |
| 123 | |
| 124 | case 8192 << 6: |
| 125 | tsb_reg = 0x6UL; |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 126 | page_sz = 512 * 1024; |
| 127 | break; |
| 128 | |
| 129 | case 8192 << 7: |
| 130 | tsb_reg = 0x7UL; |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 131 | page_sz = 4 * 1024 * 1024; |
| 132 | break; |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 133 | |
| 134 | default: |
| 135 | BUG(); |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 136 | }; |
David S. Miller | c4bce90 | 2006-02-11 21:57:54 -0800 | [diff] [blame] | 137 | tte |= pte_sz_bits(page_sz); |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 138 | |
David S. Miller | 618e9ed | 2006-02-09 17:21:53 -0800 | [diff] [blame] | 139 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) { |
David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 140 | /* Physical mapping, no locked TLB entry for TSB. */ |
| 141 | tsb_reg |= tsb_paddr; |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 142 | |
David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 143 | mm->context.tsb_reg_val = tsb_reg; |
| 144 | mm->context.tsb_map_vaddr = 0; |
| 145 | mm->context.tsb_map_pte = 0; |
| 146 | } else { |
| 147 | tsb_reg |= base; |
| 148 | tsb_reg |= (tsb_paddr & (page_sz - 1UL)); |
| 149 | tte |= (tsb_paddr & ~(page_sz - 1UL)); |
| 150 | |
| 151 | mm->context.tsb_reg_val = tsb_reg; |
| 152 | mm->context.tsb_map_vaddr = base; |
| 153 | mm->context.tsb_map_pte = tte; |
| 154 | } |
| 155 | |
David S. Miller | 618e9ed | 2006-02-09 17:21:53 -0800 | [diff] [blame] | 156 | /* Setup the Hypervisor TSB descriptor. */ |
| 157 | if (tlb_type == hypervisor) { |
| 158 | struct hv_tsb_descr *hp = &mm->context.tsb_descr; |
| 159 | |
| 160 | switch (PAGE_SIZE) { |
| 161 | case 8192: |
| 162 | default: |
| 163 | hp->pgsz_idx = HV_PGSZ_IDX_8K; |
| 164 | break; |
| 165 | |
| 166 | case 64 * 1024: |
| 167 | hp->pgsz_idx = HV_PGSZ_IDX_64K; |
| 168 | break; |
| 169 | |
| 170 | case 512 * 1024: |
| 171 | hp->pgsz_idx = HV_PGSZ_IDX_512K; |
| 172 | break; |
| 173 | |
| 174 | case 4 * 1024 * 1024: |
| 175 | hp->pgsz_idx = HV_PGSZ_IDX_4MB; |
| 176 | break; |
| 177 | }; |
| 178 | hp->assoc = 1; |
| 179 | hp->num_ttes = tsb_bytes / 16; |
| 180 | hp->ctx_idx = 0; |
| 181 | switch (PAGE_SIZE) { |
| 182 | case 8192: |
| 183 | default: |
| 184 | hp->pgsz_mask = HV_PGSZ_MASK_8K; |
| 185 | break; |
| 186 | |
| 187 | case 64 * 1024: |
| 188 | hp->pgsz_mask = HV_PGSZ_MASK_64K; |
| 189 | break; |
| 190 | |
| 191 | case 512 * 1024: |
| 192 | hp->pgsz_mask = HV_PGSZ_MASK_512K; |
| 193 | break; |
| 194 | |
| 195 | case 4 * 1024 * 1024: |
| 196 | hp->pgsz_mask = HV_PGSZ_MASK_4MB; |
| 197 | break; |
| 198 | }; |
| 199 | hp->tsb_base = tsb_paddr; |
| 200 | hp->resv = 0; |
| 201 | } |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 202 | } |
| 203 | |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 204 | /* The page tables are locked against modifications while this |
| 205 | * runs. |
| 206 | * |
| 207 | * XXX do some prefetching... |
| 208 | */ |
| 209 | static void copy_tsb(struct tsb *old_tsb, unsigned long old_size, |
| 210 | struct tsb *new_tsb, unsigned long new_size) |
| 211 | { |
| 212 | unsigned long old_nentries = old_size / sizeof(struct tsb); |
| 213 | unsigned long new_nentries = new_size / sizeof(struct tsb); |
| 214 | unsigned long i; |
| 215 | |
| 216 | for (i = 0; i < old_nentries; i++) { |
| 217 | register unsigned long tag asm("o4"); |
| 218 | register unsigned long pte asm("o5"); |
David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 219 | unsigned long v, hash; |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 220 | |
David S. Miller | e92b925 | 2006-02-11 10:19:37 -0800 | [diff] [blame] | 221 | if (tlb_type == hypervisor) { |
| 222 | __asm__ __volatile__( |
| 223 | "ldda [%2] %3, %0" |
| 224 | : "=r" (tag), "=r" (pte) |
| 225 | : "r" (__pa(&old_tsb[i])), |
| 226 | "i" (ASI_QUAD_LDD_PHYS_4V)); |
| 227 | } else if (tlb_type == cheetah_plus) { |
David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 228 | __asm__ __volatile__( |
| 229 | "ldda [%2] %3, %0" |
| 230 | : "=r" (tag), "=r" (pte) |
| 231 | : "r" (__pa(&old_tsb[i])), |
| 232 | "i" (ASI_QUAD_LDD_PHYS)); |
| 233 | } else { |
| 234 | __asm__ __volatile__( |
| 235 | "ldda [%2] %3, %0" |
| 236 | : "=r" (tag), "=r" (pte) |
| 237 | : "r" (&old_tsb[i]), |
| 238 | "i" (ASI_NUCLEUS_QUAD_LDD)); |
| 239 | } |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 240 | |
David S. Miller | 8b23427 | 2006-02-17 18:01:02 -0800 | [diff] [blame] | 241 | if (tag & ((1UL << TSB_TAG_LOCK_BIT) | |
| 242 | (1UL << TSB_TAG_INVALID_BIT))) |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 243 | continue; |
| 244 | |
| 245 | /* We only put base page size PTEs into the TSB, |
| 246 | * but that might change in the future. This code |
| 247 | * would need to be changed if we start putting larger |
| 248 | * page size PTEs into there. |
| 249 | */ |
| 250 | WARN_ON((pte & _PAGE_ALL_SZ_BITS) != _PAGE_SZBITS); |
| 251 | |
| 252 | /* The tag holds bits 22 to 63 of the virtual address |
| 253 | * and the context. Clear out the context, and shift |
| 254 | * up to make a virtual address. |
| 255 | */ |
| 256 | v = (tag & ((1UL << 42UL) - 1UL)) << 22UL; |
| 257 | |
| 258 | /* The implied bits of the tag (bits 13 to 21) are |
| 259 | * determined by the TSB entry index, so fill that in. |
| 260 | */ |
| 261 | v |= (i & (512UL - 1UL)) << 13UL; |
| 262 | |
| 263 | hash = tsb_hash(v, new_nentries); |
David S. Miller | e92b925 | 2006-02-11 10:19:37 -0800 | [diff] [blame] | 264 | if (tlb_type == cheetah_plus || |
| 265 | tlb_type == hypervisor) { |
David S. Miller | 517af33 | 2006-02-01 15:55:21 -0800 | [diff] [blame] | 266 | __asm__ __volatile__( |
| 267 | "stxa %0, [%1] %2\n\t" |
| 268 | "stxa %3, [%4] %2" |
| 269 | : /* no outputs */ |
| 270 | : "r" (tag), |
| 271 | "r" (__pa(&new_tsb[hash].tag)), |
| 272 | "i" (ASI_PHYS_USE_EC), |
| 273 | "r" (pte), |
| 274 | "r" (__pa(&new_tsb[hash].pte))); |
| 275 | } else { |
| 276 | new_tsb[hash].tag = tag; |
| 277 | new_tsb[hash].pte = pte; |
| 278 | } |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 279 | } |
| 280 | } |
| 281 | |
| 282 | /* When the RSS of an address space exceeds mm->context.tsb_rss_limit, |
| 283 | * update_mmu_cache() invokes this routine to try and grow the TSB. |
| 284 | * When we reach the maximum TSB size supported, we stick ~0UL into |
| 285 | * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache() |
| 286 | * will not trigger any longer. |
| 287 | * |
| 288 | * The TSB can be anywhere from 8K to 1MB in size, in increasing powers |
| 289 | * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB |
| 290 | * must be 512K aligned. |
| 291 | * |
| 292 | * The idea here is to grow the TSB when the RSS of the process approaches |
| 293 | * the number of entries that the current TSB can hold at once. Currently, |
| 294 | * we trigger when the RSS hits 3/4 of the TSB capacity. |
| 295 | */ |
| 296 | void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags) |
| 297 | { |
| 298 | unsigned long max_tsb_size = 1 * 1024 * 1024; |
| 299 | unsigned long size, old_size; |
| 300 | struct page *page; |
| 301 | struct tsb *old_tsb; |
| 302 | |
| 303 | if (max_tsb_size > (PAGE_SIZE << MAX_ORDER)) |
| 304 | max_tsb_size = (PAGE_SIZE << MAX_ORDER); |
| 305 | |
| 306 | for (size = PAGE_SIZE; size < max_tsb_size; size <<= 1UL) { |
| 307 | unsigned long n_entries = size / sizeof(struct tsb); |
| 308 | |
| 309 | n_entries = (n_entries * 3) / 4; |
| 310 | if (n_entries > rss) |
| 311 | break; |
| 312 | } |
| 313 | |
David S. Miller | 8b23427 | 2006-02-17 18:01:02 -0800 | [diff] [blame] | 314 | page = alloc_pages(gfp_flags, get_order(size)); |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 315 | if (unlikely(!page)) |
| 316 | return; |
| 317 | |
David S. Miller | 8b23427 | 2006-02-17 18:01:02 -0800 | [diff] [blame] | 318 | /* Mark all tags as invalid. */ |
| 319 | memset(page_address(page), 0x40, size); |
| 320 | |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 321 | if (size == max_tsb_size) |
| 322 | mm->context.tsb_rss_limit = ~0UL; |
| 323 | else |
| 324 | mm->context.tsb_rss_limit = |
| 325 | ((size / sizeof(struct tsb)) * 3) / 4; |
| 326 | |
| 327 | old_tsb = mm->context.tsb; |
| 328 | old_size = mm->context.tsb_nentries * sizeof(struct tsb); |
| 329 | |
| 330 | if (old_tsb) |
| 331 | copy_tsb(old_tsb, old_size, page_address(page), size); |
| 332 | |
| 333 | mm->context.tsb = page_address(page); |
| 334 | setup_tsb_params(mm, size); |
| 335 | |
| 336 | /* If old_tsb is NULL, we're being invoked for the first time |
| 337 | * from init_new_context(). |
| 338 | */ |
| 339 | if (old_tsb) { |
| 340 | /* Now force all other processors to reload the new |
| 341 | * TSB state. |
| 342 | */ |
| 343 | smp_tsb_sync(mm); |
| 344 | |
| 345 | /* Finally reload it on the local cpu. No further |
| 346 | * references will remain to the old TSB and we can |
| 347 | * thus free it up. |
| 348 | */ |
| 349 | tsb_context_switch(mm); |
| 350 | |
| 351 | free_pages((unsigned long) old_tsb, get_order(old_size)); |
| 352 | } |
| 353 | } |
| 354 | |
David S. Miller | 09f9428 | 2006-01-31 18:31:06 -0800 | [diff] [blame] | 355 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
| 356 | { |
David S. Miller | 09f9428 | 2006-01-31 18:31:06 -0800 | [diff] [blame] | 357 | |
| 358 | mm->context.sparc64_ctx_val = 0UL; |
David S. Miller | 09f9428 | 2006-01-31 18:31:06 -0800 | [diff] [blame] | 359 | |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 360 | /* copy_mm() copies over the parent's mm_struct before calling |
| 361 | * us, so we need to zero out the TSB pointer or else tsb_grow() |
| 362 | * will be confused and think there is an older TSB to free up. |
| 363 | */ |
| 364 | mm->context.tsb = NULL; |
David S. Miller | f4e841d | 2006-02-02 16:16:24 -0800 | [diff] [blame] | 365 | tsb_grow(mm, 0, GFP_KERNEL); |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 366 | |
| 367 | if (unlikely(!mm->context.tsb)) |
| 368 | return -ENOMEM; |
David S. Miller | 09f9428 | 2006-01-31 18:31:06 -0800 | [diff] [blame] | 369 | |
| 370 | return 0; |
| 371 | } |
| 372 | |
| 373 | void destroy_context(struct mm_struct *mm) |
| 374 | { |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 375 | unsigned long size = mm->context.tsb_nentries * sizeof(struct tsb); |
David S. Miller | 77b838f | 2006-02-23 21:40:15 -0800 | [diff] [blame] | 376 | unsigned long flags; |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 377 | |
| 378 | free_pages((unsigned long) mm->context.tsb, get_order(size)); |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 379 | |
| 380 | /* We can remove these later, but for now it's useful |
| 381 | * to catch any bogus post-destroy_context() references |
| 382 | * to the TSB. |
| 383 | */ |
| 384 | mm->context.tsb = NULL; |
| 385 | mm->context.tsb_reg_val = 0UL; |
David S. Miller | 09f9428 | 2006-01-31 18:31:06 -0800 | [diff] [blame] | 386 | |
David S. Miller | 77b838f | 2006-02-23 21:40:15 -0800 | [diff] [blame] | 387 | spin_lock_irqsave(&ctx_alloc_lock, flags); |
David S. Miller | 09f9428 | 2006-01-31 18:31:06 -0800 | [diff] [blame] | 388 | |
| 389 | if (CTX_VALID(mm->context)) { |
| 390 | unsigned long nr = CTX_NRBITS(mm->context); |
| 391 | mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); |
| 392 | } |
| 393 | |
David S. Miller | 77b838f | 2006-02-23 21:40:15 -0800 | [diff] [blame] | 394 | spin_unlock_irqrestore(&ctx_alloc_lock, flags); |
David S. Miller | 09f9428 | 2006-01-31 18:31:06 -0800 | [diff] [blame] | 395 | } |