blob: 975242ab88ee23a66045d6ee2f1ea1cdeb504697 [file] [log] [blame]
David S. Miller74bf4312006-01-31 18:29:18 -08001/* arch/sparc64/mm/tsb.c
2 *
3 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
4 */
5
6#include <linux/kernel.h>
7#include <asm/system.h>
8#include <asm/page.h>
9#include <asm/tlbflush.h>
10#include <asm/tlb.h>
David S. Miller09f94282006-01-31 18:31:06 -080011#include <asm/mmu_context.h>
David S. Miller98c55842006-01-31 18:31:20 -080012#include <asm/pgtable.h>
David S. Millerbd407912006-01-31 18:31:38 -080013#include <asm/tsb.h>
David S. Miller74bf4312006-01-31 18:29:18 -080014
David S. Miller74bf4312006-01-31 18:29:18 -080015extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
16
David S. Miller98c55842006-01-31 18:31:20 -080017static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries)
David S. Miller74bf4312006-01-31 18:29:18 -080018{
19 vaddr >>= PAGE_SHIFT;
David S. Miller98c55842006-01-31 18:31:20 -080020 return vaddr & (nentries - 1);
David S. Miller74bf4312006-01-31 18:29:18 -080021}
22
David S. Miller517af332006-02-01 15:55:21 -080023static inline int tag_compare(unsigned long tag, unsigned long vaddr, unsigned long context)
David S. Miller74bf4312006-01-31 18:29:18 -080024{
David S. Miller517af332006-02-01 15:55:21 -080025 return (tag == ((vaddr >> 22) | (context << 48)));
David S. Miller74bf4312006-01-31 18:29:18 -080026}
27
28/* TSB flushes need only occur on the processor initiating the address
29 * space modification, not on each cpu the address space has run on.
30 * Only the TLB flush needs that treatment.
31 */
32
33void flush_tsb_kernel_range(unsigned long start, unsigned long end)
34{
35 unsigned long v;
36
37 for (v = start; v < end; v += PAGE_SIZE) {
David S. Miller98c55842006-01-31 18:31:20 -080038 unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES);
39 struct tsb *ent = &swapper_tsb[hash];
David S. Miller74bf4312006-01-31 18:29:18 -080040
David S. Miller517af332006-02-01 15:55:21 -080041 if (tag_compare(ent->tag, v, 0)) {
David S. Miller74bf4312006-01-31 18:29:18 -080042 ent->tag = 0UL;
43 membar_storeload_storestore();
44 }
45 }
46}
47
48void flush_tsb_user(struct mmu_gather *mp)
49{
50 struct mm_struct *mm = mp->mm;
David S. Miller98c55842006-01-31 18:31:20 -080051 struct tsb *tsb = mm->context.tsb;
David S. Miller98c55842006-01-31 18:31:20 -080052 unsigned long nentries = mm->context.tsb_nentries;
David S. Miller517af332006-02-01 15:55:21 -080053 unsigned long ctx, base;
David S. Miller74bf4312006-01-31 18:29:18 -080054 int i;
55
David S. Miller517af332006-02-01 15:55:21 -080056 if (unlikely(!CTX_VALID(mm->context)))
57 return;
David S. Miller74bf4312006-01-31 18:29:18 -080058
David S. Miller517af332006-02-01 15:55:21 -080059 ctx = CTX_HWBITS(mm->context);
60
61 if (tlb_type == cheetah_plus)
62 base = __pa(tsb);
63 else
64 base = (unsigned long) tsb;
65
David S. Miller74bf4312006-01-31 18:29:18 -080066 for (i = 0; i < mp->tlb_nr; i++) {
67 unsigned long v = mp->vaddrs[i];
David S. Miller517af332006-02-01 15:55:21 -080068 unsigned long tag, ent, hash;
David S. Miller74bf4312006-01-31 18:29:18 -080069
70 v &= ~0x1UL;
71
David S. Miller517af332006-02-01 15:55:21 -080072 hash = tsb_hash(v, nentries);
73 ent = base + (hash * sizeof(struct tsb));
74 tag = (v >> 22UL) | (ctx << 48UL);
75
76 tsb_flush(ent, tag);
David S. Miller74bf4312006-01-31 18:29:18 -080077 }
78}
David S. Miller09f94282006-01-31 18:31:06 -080079
David S. Miller98c55842006-01-31 18:31:20 -080080static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
81{
82 unsigned long tsb_reg, base, tsb_paddr;
83 unsigned long page_sz, tte;
84
85 mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb);
86
87 base = TSBMAP_BASE;
David S. Millerc4bce902006-02-11 21:57:54 -080088 tte = pgprot_val(PAGE_KERNEL_LOCKED);
David S. Miller98c55842006-01-31 18:31:20 -080089 tsb_paddr = __pa(mm->context.tsb);
David S. Miller517af332006-02-01 15:55:21 -080090 BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
David S. Miller98c55842006-01-31 18:31:20 -080091
92 /* Use the smallest page size that can map the whole TSB
93 * in one TLB entry.
94 */
95 switch (tsb_bytes) {
96 case 8192 << 0:
97 tsb_reg = 0x0UL;
98#ifdef DCACHE_ALIASING_POSSIBLE
99 base += (tsb_paddr & 8192);
100#endif
David S. Miller98c55842006-01-31 18:31:20 -0800101 page_sz = 8192;
102 break;
103
104 case 8192 << 1:
105 tsb_reg = 0x1UL;
David S. Miller98c55842006-01-31 18:31:20 -0800106 page_sz = 64 * 1024;
107 break;
108
109 case 8192 << 2:
110 tsb_reg = 0x2UL;
David S. Miller98c55842006-01-31 18:31:20 -0800111 page_sz = 64 * 1024;
112 break;
113
114 case 8192 << 3:
115 tsb_reg = 0x3UL;
David S. Miller98c55842006-01-31 18:31:20 -0800116 page_sz = 64 * 1024;
117 break;
118
119 case 8192 << 4:
120 tsb_reg = 0x4UL;
David S. Miller98c55842006-01-31 18:31:20 -0800121 page_sz = 512 * 1024;
122 break;
123
124 case 8192 << 5:
125 tsb_reg = 0x5UL;
David S. Miller98c55842006-01-31 18:31:20 -0800126 page_sz = 512 * 1024;
127 break;
128
129 case 8192 << 6:
130 tsb_reg = 0x6UL;
David S. Miller98c55842006-01-31 18:31:20 -0800131 page_sz = 512 * 1024;
132 break;
133
134 case 8192 << 7:
135 tsb_reg = 0x7UL;
David S. Miller98c55842006-01-31 18:31:20 -0800136 page_sz = 4 * 1024 * 1024;
137 break;
David S. Millerbd407912006-01-31 18:31:38 -0800138
139 default:
140 BUG();
David S. Miller98c55842006-01-31 18:31:20 -0800141 };
David S. Millerc4bce902006-02-11 21:57:54 -0800142 tte |= pte_sz_bits(page_sz);
David S. Miller98c55842006-01-31 18:31:20 -0800143
David S. Miller618e9ed2006-02-09 17:21:53 -0800144 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
David S. Miller517af332006-02-01 15:55:21 -0800145 /* Physical mapping, no locked TLB entry for TSB. */
146 tsb_reg |= tsb_paddr;
David S. Miller98c55842006-01-31 18:31:20 -0800147
David S. Miller517af332006-02-01 15:55:21 -0800148 mm->context.tsb_reg_val = tsb_reg;
149 mm->context.tsb_map_vaddr = 0;
150 mm->context.tsb_map_pte = 0;
151 } else {
152 tsb_reg |= base;
153 tsb_reg |= (tsb_paddr & (page_sz - 1UL));
154 tte |= (tsb_paddr & ~(page_sz - 1UL));
155
156 mm->context.tsb_reg_val = tsb_reg;
157 mm->context.tsb_map_vaddr = base;
158 mm->context.tsb_map_pte = tte;
159 }
160
David S. Miller618e9ed2006-02-09 17:21:53 -0800161 /* Setup the Hypervisor TSB descriptor. */
162 if (tlb_type == hypervisor) {
163 struct hv_tsb_descr *hp = &mm->context.tsb_descr;
164
165 switch (PAGE_SIZE) {
166 case 8192:
167 default:
168 hp->pgsz_idx = HV_PGSZ_IDX_8K;
169 break;
170
171 case 64 * 1024:
172 hp->pgsz_idx = HV_PGSZ_IDX_64K;
173 break;
174
175 case 512 * 1024:
176 hp->pgsz_idx = HV_PGSZ_IDX_512K;
177 break;
178
179 case 4 * 1024 * 1024:
180 hp->pgsz_idx = HV_PGSZ_IDX_4MB;
181 break;
182 };
183 hp->assoc = 1;
184 hp->num_ttes = tsb_bytes / 16;
185 hp->ctx_idx = 0;
186 switch (PAGE_SIZE) {
187 case 8192:
188 default:
189 hp->pgsz_mask = HV_PGSZ_MASK_8K;
190 break;
191
192 case 64 * 1024:
193 hp->pgsz_mask = HV_PGSZ_MASK_64K;
194 break;
195
196 case 512 * 1024:
197 hp->pgsz_mask = HV_PGSZ_MASK_512K;
198 break;
199
200 case 4 * 1024 * 1024:
201 hp->pgsz_mask = HV_PGSZ_MASK_4MB;
202 break;
203 };
204 hp->tsb_base = tsb_paddr;
205 hp->resv = 0;
206 }
David S. Miller98c55842006-01-31 18:31:20 -0800207}
208
David S. Millerbd407912006-01-31 18:31:38 -0800209/* The page tables are locked against modifications while this
210 * runs.
211 *
212 * XXX do some prefetching...
213 */
214static void copy_tsb(struct tsb *old_tsb, unsigned long old_size,
215 struct tsb *new_tsb, unsigned long new_size)
216{
217 unsigned long old_nentries = old_size / sizeof(struct tsb);
218 unsigned long new_nentries = new_size / sizeof(struct tsb);
219 unsigned long i;
220
221 for (i = 0; i < old_nentries; i++) {
222 register unsigned long tag asm("o4");
223 register unsigned long pte asm("o5");
David S. Miller517af332006-02-01 15:55:21 -0800224 unsigned long v, hash;
David S. Millerbd407912006-01-31 18:31:38 -0800225
David S. Millere92b9252006-02-11 10:19:37 -0800226 if (tlb_type == hypervisor) {
227 __asm__ __volatile__(
228 "ldda [%2] %3, %0"
229 : "=r" (tag), "=r" (pte)
230 : "r" (__pa(&old_tsb[i])),
231 "i" (ASI_QUAD_LDD_PHYS_4V));
232 } else if (tlb_type == cheetah_plus) {
David S. Miller517af332006-02-01 15:55:21 -0800233 __asm__ __volatile__(
234 "ldda [%2] %3, %0"
235 : "=r" (tag), "=r" (pte)
236 : "r" (__pa(&old_tsb[i])),
237 "i" (ASI_QUAD_LDD_PHYS));
238 } else {
239 __asm__ __volatile__(
240 "ldda [%2] %3, %0"
241 : "=r" (tag), "=r" (pte)
242 : "r" (&old_tsb[i]),
243 "i" (ASI_NUCLEUS_QUAD_LDD));
244 }
David S. Millerbd407912006-01-31 18:31:38 -0800245
David S. Miller4753eb22006-01-31 18:32:44 -0800246 if (!tag || (tag & (1UL << TSB_TAG_LOCK_BIT)))
David S. Millerbd407912006-01-31 18:31:38 -0800247 continue;
248
249 /* We only put base page size PTEs into the TSB,
250 * but that might change in the future. This code
251 * would need to be changed if we start putting larger
252 * page size PTEs into there.
253 */
254 WARN_ON((pte & _PAGE_ALL_SZ_BITS) != _PAGE_SZBITS);
255
256 /* The tag holds bits 22 to 63 of the virtual address
257 * and the context. Clear out the context, and shift
258 * up to make a virtual address.
259 */
260 v = (tag & ((1UL << 42UL) - 1UL)) << 22UL;
261
262 /* The implied bits of the tag (bits 13 to 21) are
263 * determined by the TSB entry index, so fill that in.
264 */
265 v |= (i & (512UL - 1UL)) << 13UL;
266
267 hash = tsb_hash(v, new_nentries);
David S. Millere92b9252006-02-11 10:19:37 -0800268 if (tlb_type == cheetah_plus ||
269 tlb_type == hypervisor) {
David S. Miller517af332006-02-01 15:55:21 -0800270 __asm__ __volatile__(
271 "stxa %0, [%1] %2\n\t"
272 "stxa %3, [%4] %2"
273 : /* no outputs */
274 : "r" (tag),
275 "r" (__pa(&new_tsb[hash].tag)),
276 "i" (ASI_PHYS_USE_EC),
277 "r" (pte),
278 "r" (__pa(&new_tsb[hash].pte)));
279 } else {
280 new_tsb[hash].tag = tag;
281 new_tsb[hash].pte = pte;
282 }
David S. Millerbd407912006-01-31 18:31:38 -0800283 }
284}
285
286/* When the RSS of an address space exceeds mm->context.tsb_rss_limit,
287 * update_mmu_cache() invokes this routine to try and grow the TSB.
288 * When we reach the maximum TSB size supported, we stick ~0UL into
289 * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache()
290 * will not trigger any longer.
291 *
292 * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
293 * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB
294 * must be 512K aligned.
295 *
296 * The idea here is to grow the TSB when the RSS of the process approaches
297 * the number of entries that the current TSB can hold at once. Currently,
298 * we trigger when the RSS hits 3/4 of the TSB capacity.
299 */
300void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags)
301{
302 unsigned long max_tsb_size = 1 * 1024 * 1024;
303 unsigned long size, old_size;
304 struct page *page;
305 struct tsb *old_tsb;
306
307 if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
308 max_tsb_size = (PAGE_SIZE << MAX_ORDER);
309
310 for (size = PAGE_SIZE; size < max_tsb_size; size <<= 1UL) {
311 unsigned long n_entries = size / sizeof(struct tsb);
312
313 n_entries = (n_entries * 3) / 4;
314 if (n_entries > rss)
315 break;
316 }
317
318 page = alloc_pages(gfp_flags | __GFP_ZERO, get_order(size));
319 if (unlikely(!page))
320 return;
321
322 if (size == max_tsb_size)
323 mm->context.tsb_rss_limit = ~0UL;
324 else
325 mm->context.tsb_rss_limit =
326 ((size / sizeof(struct tsb)) * 3) / 4;
327
328 old_tsb = mm->context.tsb;
329 old_size = mm->context.tsb_nentries * sizeof(struct tsb);
330
331 if (old_tsb)
332 copy_tsb(old_tsb, old_size, page_address(page), size);
333
334 mm->context.tsb = page_address(page);
335 setup_tsb_params(mm, size);
336
337 /* If old_tsb is NULL, we're being invoked for the first time
338 * from init_new_context().
339 */
340 if (old_tsb) {
341 /* Now force all other processors to reload the new
342 * TSB state.
343 */
344 smp_tsb_sync(mm);
345
346 /* Finally reload it on the local cpu. No further
347 * references will remain to the old TSB and we can
348 * thus free it up.
349 */
350 tsb_context_switch(mm);
351
352 free_pages((unsigned long) old_tsb, get_order(old_size));
353 }
354}
355
David S. Miller09f94282006-01-31 18:31:06 -0800356int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
357{
David S. Miller09f94282006-01-31 18:31:06 -0800358
359 mm->context.sparc64_ctx_val = 0UL;
David S. Miller09f94282006-01-31 18:31:06 -0800360
David S. Millerbd407912006-01-31 18:31:38 -0800361 /* copy_mm() copies over the parent's mm_struct before calling
362 * us, so we need to zero out the TSB pointer or else tsb_grow()
363 * will be confused and think there is an older TSB to free up.
364 */
365 mm->context.tsb = NULL;
David S. Millerf4e841d2006-02-02 16:16:24 -0800366 tsb_grow(mm, 0, GFP_KERNEL);
David S. Millerbd407912006-01-31 18:31:38 -0800367
368 if (unlikely(!mm->context.tsb))
369 return -ENOMEM;
David S. Miller09f94282006-01-31 18:31:06 -0800370
371 return 0;
372}
373
374void destroy_context(struct mm_struct *mm)
375{
David S. Millerbd407912006-01-31 18:31:38 -0800376 unsigned long size = mm->context.tsb_nentries * sizeof(struct tsb);
377
378 free_pages((unsigned long) mm->context.tsb, get_order(size));
David S. Miller98c55842006-01-31 18:31:20 -0800379
380 /* We can remove these later, but for now it's useful
381 * to catch any bogus post-destroy_context() references
382 * to the TSB.
383 */
384 mm->context.tsb = NULL;
385 mm->context.tsb_reg_val = 0UL;
David S. Miller09f94282006-01-31 18:31:06 -0800386
387 spin_lock(&ctx_alloc_lock);
388
389 if (CTX_VALID(mm->context)) {
390 unsigned long nr = CTX_NRBITS(mm->context);
391 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
392 }
393
394 spin_unlock(&ctx_alloc_lock);
395}