David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 1 | /* arch/sparc64/mm/tsb.c |
| 2 | * |
| 3 | * Copyright (C) 2006 David S. Miller <davem@davemloft.net> |
| 4 | */ |
| 5 | |
| 6 | #include <linux/kernel.h> |
| 7 | #include <asm/system.h> |
| 8 | #include <asm/page.h> |
| 9 | #include <asm/tlbflush.h> |
| 10 | #include <asm/tlb.h> |
David S. Miller | 09f9428 | 2006-01-31 18:31:06 -0800 | [diff] [blame] | 11 | #include <asm/mmu_context.h> |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 12 | #include <asm/pgtable.h> |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 13 | #include <asm/tsb.h> |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 14 | |
| 15 | /* We use an 8K TSB for the whole kernel, this allows to |
| 16 | * handle about 4MB of modules and vmalloc mappings without |
| 17 | * incurring many hash conflicts. |
| 18 | */ |
| 19 | #define KERNEL_TSB_SIZE_BYTES 8192 |
| 20 | #define KERNEL_TSB_NENTRIES \ |
| 21 | (KERNEL_TSB_SIZE_BYTES / sizeof(struct tsb)) |
| 22 | |
| 23 | extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; |
| 24 | |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 25 | static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries) |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 26 | { |
| 27 | vaddr >>= PAGE_SHIFT; |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 28 | return vaddr & (nentries - 1); |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 29 | } |
| 30 | |
| 31 | static inline int tag_compare(struct tsb *entry, unsigned long vaddr, unsigned long context) |
| 32 | { |
| 33 | if (context == ~0UL) |
| 34 | return 1; |
| 35 | |
| 36 | return (entry->tag == ((vaddr >> 22) | (context << 48))); |
| 37 | } |
| 38 | |
| 39 | /* TSB flushes need only occur on the processor initiating the address |
| 40 | * space modification, not on each cpu the address space has run on. |
| 41 | * Only the TLB flush needs that treatment. |
| 42 | */ |
| 43 | |
| 44 | void flush_tsb_kernel_range(unsigned long start, unsigned long end) |
| 45 | { |
| 46 | unsigned long v; |
| 47 | |
| 48 | for (v = start; v < end; v += PAGE_SIZE) { |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 49 | unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES); |
| 50 | struct tsb *ent = &swapper_tsb[hash]; |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 51 | |
| 52 | if (tag_compare(ent, v, 0)) { |
| 53 | ent->tag = 0UL; |
| 54 | membar_storeload_storestore(); |
| 55 | } |
| 56 | } |
| 57 | } |
| 58 | |
| 59 | void flush_tsb_user(struct mmu_gather *mp) |
| 60 | { |
| 61 | struct mm_struct *mm = mp->mm; |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 62 | struct tsb *tsb = mm->context.tsb; |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 63 | unsigned long ctx = ~0UL; |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 64 | unsigned long nentries = mm->context.tsb_nentries; |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 65 | int i; |
| 66 | |
| 67 | if (CTX_VALID(mm->context)) |
| 68 | ctx = CTX_HWBITS(mm->context); |
| 69 | |
| 70 | for (i = 0; i < mp->tlb_nr; i++) { |
| 71 | unsigned long v = mp->vaddrs[i]; |
| 72 | struct tsb *ent; |
| 73 | |
| 74 | v &= ~0x1UL; |
| 75 | |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 76 | ent = &tsb[tsb_hash(v, nentries)]; |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 77 | if (tag_compare(ent, v, ctx)) { |
| 78 | ent->tag = 0UL; |
| 79 | membar_storeload_storestore(); |
| 80 | } |
| 81 | } |
| 82 | } |
David S. Miller | 09f9428 | 2006-01-31 18:31:06 -0800 | [diff] [blame] | 83 | |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 84 | static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) |
| 85 | { |
| 86 | unsigned long tsb_reg, base, tsb_paddr; |
| 87 | unsigned long page_sz, tte; |
| 88 | |
| 89 | mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb); |
| 90 | |
| 91 | base = TSBMAP_BASE; |
| 92 | tte = (_PAGE_VALID | _PAGE_L | _PAGE_CP | |
| 93 | _PAGE_CV | _PAGE_P | _PAGE_W); |
| 94 | tsb_paddr = __pa(mm->context.tsb); |
| 95 | |
| 96 | /* Use the smallest page size that can map the whole TSB |
| 97 | * in one TLB entry. |
| 98 | */ |
| 99 | switch (tsb_bytes) { |
| 100 | case 8192 << 0: |
| 101 | tsb_reg = 0x0UL; |
| 102 | #ifdef DCACHE_ALIASING_POSSIBLE |
| 103 | base += (tsb_paddr & 8192); |
| 104 | #endif |
| 105 | tte |= _PAGE_SZ8K; |
| 106 | page_sz = 8192; |
| 107 | break; |
| 108 | |
| 109 | case 8192 << 1: |
| 110 | tsb_reg = 0x1UL; |
| 111 | tte |= _PAGE_SZ64K; |
| 112 | page_sz = 64 * 1024; |
| 113 | break; |
| 114 | |
| 115 | case 8192 << 2: |
| 116 | tsb_reg = 0x2UL; |
| 117 | tte |= _PAGE_SZ64K; |
| 118 | page_sz = 64 * 1024; |
| 119 | break; |
| 120 | |
| 121 | case 8192 << 3: |
| 122 | tsb_reg = 0x3UL; |
| 123 | tte |= _PAGE_SZ64K; |
| 124 | page_sz = 64 * 1024; |
| 125 | break; |
| 126 | |
| 127 | case 8192 << 4: |
| 128 | tsb_reg = 0x4UL; |
| 129 | tte |= _PAGE_SZ512K; |
| 130 | page_sz = 512 * 1024; |
| 131 | break; |
| 132 | |
| 133 | case 8192 << 5: |
| 134 | tsb_reg = 0x5UL; |
| 135 | tte |= _PAGE_SZ512K; |
| 136 | page_sz = 512 * 1024; |
| 137 | break; |
| 138 | |
| 139 | case 8192 << 6: |
| 140 | tsb_reg = 0x6UL; |
| 141 | tte |= _PAGE_SZ512K; |
| 142 | page_sz = 512 * 1024; |
| 143 | break; |
| 144 | |
| 145 | case 8192 << 7: |
| 146 | tsb_reg = 0x7UL; |
| 147 | tte |= _PAGE_SZ4MB; |
| 148 | page_sz = 4 * 1024 * 1024; |
| 149 | break; |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 150 | |
| 151 | default: |
| 152 | BUG(); |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 153 | }; |
| 154 | |
| 155 | tsb_reg |= base; |
| 156 | tsb_reg |= (tsb_paddr & (page_sz - 1UL)); |
| 157 | tte |= (tsb_paddr & ~(page_sz - 1UL)); |
| 158 | |
| 159 | mm->context.tsb_reg_val = tsb_reg; |
| 160 | mm->context.tsb_map_vaddr = base; |
| 161 | mm->context.tsb_map_pte = tte; |
| 162 | } |
| 163 | |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 164 | /* The page tables are locked against modifications while this |
| 165 | * runs. |
| 166 | * |
| 167 | * XXX do some prefetching... |
| 168 | */ |
| 169 | static void copy_tsb(struct tsb *old_tsb, unsigned long old_size, |
| 170 | struct tsb *new_tsb, unsigned long new_size) |
| 171 | { |
| 172 | unsigned long old_nentries = old_size / sizeof(struct tsb); |
| 173 | unsigned long new_nentries = new_size / sizeof(struct tsb); |
| 174 | unsigned long i; |
| 175 | |
| 176 | for (i = 0; i < old_nentries; i++) { |
| 177 | register unsigned long tag asm("o4"); |
| 178 | register unsigned long pte asm("o5"); |
| 179 | unsigned long v; |
| 180 | unsigned int hash; |
| 181 | |
| 182 | __asm__ __volatile__( |
| 183 | "ldda [%2] %3, %0" |
| 184 | : "=r" (tag), "=r" (pte) |
| 185 | : "r" (&old_tsb[i]), "i" (ASI_NUCLEUS_QUAD_LDD)); |
| 186 | |
David S. Miller | 4753eb2 | 2006-01-31 18:32:44 -0800 | [diff] [blame] | 187 | if (!tag || (tag & (1UL << TSB_TAG_LOCK_BIT))) |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 188 | continue; |
| 189 | |
| 190 | /* We only put base page size PTEs into the TSB, |
| 191 | * but that might change in the future. This code |
| 192 | * would need to be changed if we start putting larger |
| 193 | * page size PTEs into there. |
| 194 | */ |
| 195 | WARN_ON((pte & _PAGE_ALL_SZ_BITS) != _PAGE_SZBITS); |
| 196 | |
| 197 | /* The tag holds bits 22 to 63 of the virtual address |
| 198 | * and the context. Clear out the context, and shift |
| 199 | * up to make a virtual address. |
| 200 | */ |
| 201 | v = (tag & ((1UL << 42UL) - 1UL)) << 22UL; |
| 202 | |
| 203 | /* The implied bits of the tag (bits 13 to 21) are |
| 204 | * determined by the TSB entry index, so fill that in. |
| 205 | */ |
| 206 | v |= (i & (512UL - 1UL)) << 13UL; |
| 207 | |
| 208 | hash = tsb_hash(v, new_nentries); |
| 209 | new_tsb[hash].tag = tag; |
| 210 | new_tsb[hash].pte = pte; |
| 211 | } |
| 212 | } |
| 213 | |
| 214 | /* When the RSS of an address space exceeds mm->context.tsb_rss_limit, |
| 215 | * update_mmu_cache() invokes this routine to try and grow the TSB. |
| 216 | * When we reach the maximum TSB size supported, we stick ~0UL into |
| 217 | * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache() |
| 218 | * will not trigger any longer. |
| 219 | * |
| 220 | * The TSB can be anywhere from 8K to 1MB in size, in increasing powers |
| 221 | * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB |
| 222 | * must be 512K aligned. |
| 223 | * |
| 224 | * The idea here is to grow the TSB when the RSS of the process approaches |
| 225 | * the number of entries that the current TSB can hold at once. Currently, |
| 226 | * we trigger when the RSS hits 3/4 of the TSB capacity. |
| 227 | */ |
| 228 | void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags) |
| 229 | { |
| 230 | unsigned long max_tsb_size = 1 * 1024 * 1024; |
| 231 | unsigned long size, old_size; |
| 232 | struct page *page; |
| 233 | struct tsb *old_tsb; |
| 234 | |
| 235 | if (max_tsb_size > (PAGE_SIZE << MAX_ORDER)) |
| 236 | max_tsb_size = (PAGE_SIZE << MAX_ORDER); |
| 237 | |
| 238 | for (size = PAGE_SIZE; size < max_tsb_size; size <<= 1UL) { |
| 239 | unsigned long n_entries = size / sizeof(struct tsb); |
| 240 | |
| 241 | n_entries = (n_entries * 3) / 4; |
| 242 | if (n_entries > rss) |
| 243 | break; |
| 244 | } |
| 245 | |
| 246 | page = alloc_pages(gfp_flags | __GFP_ZERO, get_order(size)); |
| 247 | if (unlikely(!page)) |
| 248 | return; |
| 249 | |
| 250 | if (size == max_tsb_size) |
| 251 | mm->context.tsb_rss_limit = ~0UL; |
| 252 | else |
| 253 | mm->context.tsb_rss_limit = |
| 254 | ((size / sizeof(struct tsb)) * 3) / 4; |
| 255 | |
| 256 | old_tsb = mm->context.tsb; |
| 257 | old_size = mm->context.tsb_nentries * sizeof(struct tsb); |
| 258 | |
| 259 | if (old_tsb) |
| 260 | copy_tsb(old_tsb, old_size, page_address(page), size); |
| 261 | |
| 262 | mm->context.tsb = page_address(page); |
| 263 | setup_tsb_params(mm, size); |
| 264 | |
| 265 | /* If old_tsb is NULL, we're being invoked for the first time |
| 266 | * from init_new_context(). |
| 267 | */ |
| 268 | if (old_tsb) { |
| 269 | /* Now force all other processors to reload the new |
| 270 | * TSB state. |
| 271 | */ |
| 272 | smp_tsb_sync(mm); |
| 273 | |
| 274 | /* Finally reload it on the local cpu. No further |
| 275 | * references will remain to the old TSB and we can |
| 276 | * thus free it up. |
| 277 | */ |
| 278 | tsb_context_switch(mm); |
| 279 | |
| 280 | free_pages((unsigned long) old_tsb, get_order(old_size)); |
| 281 | } |
| 282 | } |
| 283 | |
David S. Miller | 09f9428 | 2006-01-31 18:31:06 -0800 | [diff] [blame] | 284 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
| 285 | { |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 286 | unsigned long initial_rss; |
David S. Miller | 09f9428 | 2006-01-31 18:31:06 -0800 | [diff] [blame] | 287 | |
| 288 | mm->context.sparc64_ctx_val = 0UL; |
David S. Miller | 09f9428 | 2006-01-31 18:31:06 -0800 | [diff] [blame] | 289 | |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 290 | /* copy_mm() copies over the parent's mm_struct before calling |
| 291 | * us, so we need to zero out the TSB pointer or else tsb_grow() |
| 292 | * will be confused and think there is an older TSB to free up. |
| 293 | */ |
| 294 | mm->context.tsb = NULL; |
| 295 | |
| 296 | /* If this is fork, inherit the parent's TSB size. We would |
| 297 | * grow it to that size on the first page fault anyways. |
| 298 | */ |
| 299 | initial_rss = mm->context.tsb_nentries; |
| 300 | if (initial_rss) |
| 301 | initial_rss -= 1; |
| 302 | |
| 303 | tsb_grow(mm, initial_rss, GFP_KERNEL); |
| 304 | |
| 305 | if (unlikely(!mm->context.tsb)) |
| 306 | return -ENOMEM; |
David S. Miller | 09f9428 | 2006-01-31 18:31:06 -0800 | [diff] [blame] | 307 | |
| 308 | return 0; |
| 309 | } |
| 310 | |
| 311 | void destroy_context(struct mm_struct *mm) |
| 312 | { |
David S. Miller | bd40791 | 2006-01-31 18:31:38 -0800 | [diff] [blame] | 313 | unsigned long size = mm->context.tsb_nentries * sizeof(struct tsb); |
| 314 | |
| 315 | free_pages((unsigned long) mm->context.tsb, get_order(size)); |
David S. Miller | 98c5584 | 2006-01-31 18:31:20 -0800 | [diff] [blame] | 316 | |
| 317 | /* We can remove these later, but for now it's useful |
| 318 | * to catch any bogus post-destroy_context() references |
| 319 | * to the TSB. |
| 320 | */ |
| 321 | mm->context.tsb = NULL; |
| 322 | mm->context.tsb_reg_val = 0UL; |
David S. Miller | 09f9428 | 2006-01-31 18:31:06 -0800 | [diff] [blame] | 323 | |
| 324 | spin_lock(&ctx_alloc_lock); |
| 325 | |
| 326 | if (CTX_VALID(mm->context)) { |
| 327 | unsigned long nr = CTX_NRBITS(mm->context); |
| 328 | mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); |
| 329 | } |
| 330 | |
| 331 | spin_unlock(&ctx_alloc_lock); |
| 332 | } |