blob: 3eb8670282fd5151f41ca055e16ca63f49f5379c [file] [log] [blame]
David S. Miller74bf4312006-01-31 18:29:18 -08001/* arch/sparc64/mm/tsb.c
2 *
3 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
4 */
5
6#include <linux/kernel.h>
7#include <asm/system.h>
8#include <asm/page.h>
9#include <asm/tlbflush.h>
10#include <asm/tlb.h>
David S. Miller09f94282006-01-31 18:31:06 -080011#include <asm/mmu_context.h>
David S. Miller98c55842006-01-31 18:31:20 -080012#include <asm/pgtable.h>
David S. Millerbd407912006-01-31 18:31:38 -080013#include <asm/tsb.h>
David S. Miller74bf4312006-01-31 18:29:18 -080014
David S. Miller74bf4312006-01-31 18:29:18 -080015extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
16
David S. Miller98c55842006-01-31 18:31:20 -080017static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries)
David S. Miller74bf4312006-01-31 18:29:18 -080018{
19 vaddr >>= PAGE_SHIFT;
David S. Miller98c55842006-01-31 18:31:20 -080020 return vaddr & (nentries - 1);
David S. Miller74bf4312006-01-31 18:29:18 -080021}
22
David S. Miller8b234272006-02-17 18:01:02 -080023static inline int tag_compare(unsigned long tag, unsigned long vaddr)
David S. Miller74bf4312006-01-31 18:29:18 -080024{
David S. Miller8b234272006-02-17 18:01:02 -080025 return (tag == (vaddr >> 22));
David S. Miller74bf4312006-01-31 18:29:18 -080026}
27
28/* TSB flushes need only occur on the processor initiating the address
29 * space modification, not on each cpu the address space has run on.
30 * Only the TLB flush needs that treatment.
31 */
32
33void flush_tsb_kernel_range(unsigned long start, unsigned long end)
34{
35 unsigned long v;
36
37 for (v = start; v < end; v += PAGE_SIZE) {
David S. Miller98c55842006-01-31 18:31:20 -080038 unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES);
39 struct tsb *ent = &swapper_tsb[hash];
David S. Miller74bf4312006-01-31 18:29:18 -080040
David S. Miller8b234272006-02-17 18:01:02 -080041 if (tag_compare(ent->tag, v)) {
42 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
David S. Miller74bf4312006-01-31 18:29:18 -080043 membar_storeload_storestore();
44 }
45 }
46}
47
48void flush_tsb_user(struct mmu_gather *mp)
49{
50 struct mm_struct *mm = mp->mm;
David S. Miller7a1ac522006-03-16 02:02:32 -080051 unsigned long nentries, base, flags;
52 struct tsb *tsb;
David S. Miller74bf4312006-01-31 18:29:18 -080053 int i;
54
David S. Miller7a1ac522006-03-16 02:02:32 -080055 spin_lock_irqsave(&mm->context.lock, flags);
56
57 tsb = mm->context.tsb;
58 nentries = mm->context.tsb_nentries;
59
David S. Millerde635d82006-02-15 21:01:31 -080060 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
David S. Miller517af332006-02-01 15:55:21 -080061 base = __pa(tsb);
62 else
63 base = (unsigned long) tsb;
64
David S. Miller74bf4312006-01-31 18:29:18 -080065 for (i = 0; i < mp->tlb_nr; i++) {
66 unsigned long v = mp->vaddrs[i];
David S. Miller517af332006-02-01 15:55:21 -080067 unsigned long tag, ent, hash;
David S. Miller74bf4312006-01-31 18:29:18 -080068
69 v &= ~0x1UL;
70
David S. Miller517af332006-02-01 15:55:21 -080071 hash = tsb_hash(v, nentries);
72 ent = base + (hash * sizeof(struct tsb));
David S. Miller8b234272006-02-17 18:01:02 -080073 tag = (v >> 22UL);
David S. Miller517af332006-02-01 15:55:21 -080074
75 tsb_flush(ent, tag);
David S. Miller74bf4312006-01-31 18:29:18 -080076 }
David S. Miller7a1ac522006-03-16 02:02:32 -080077
78 spin_unlock_irqrestore(&mm->context.lock, flags);
David S. Miller74bf4312006-01-31 18:29:18 -080079}
David S. Miller09f94282006-01-31 18:31:06 -080080
David S. Miller98c55842006-01-31 18:31:20 -080081static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
82{
83 unsigned long tsb_reg, base, tsb_paddr;
84 unsigned long page_sz, tte;
85
86 mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb);
87
88 base = TSBMAP_BASE;
David S. Millerc4bce902006-02-11 21:57:54 -080089 tte = pgprot_val(PAGE_KERNEL_LOCKED);
David S. Miller98c55842006-01-31 18:31:20 -080090 tsb_paddr = __pa(mm->context.tsb);
David S. Miller517af332006-02-01 15:55:21 -080091 BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
David S. Miller98c55842006-01-31 18:31:20 -080092
93 /* Use the smallest page size that can map the whole TSB
94 * in one TLB entry.
95 */
96 switch (tsb_bytes) {
97 case 8192 << 0:
98 tsb_reg = 0x0UL;
99#ifdef DCACHE_ALIASING_POSSIBLE
100 base += (tsb_paddr & 8192);
101#endif
David S. Miller98c55842006-01-31 18:31:20 -0800102 page_sz = 8192;
103 break;
104
105 case 8192 << 1:
106 tsb_reg = 0x1UL;
David S. Miller98c55842006-01-31 18:31:20 -0800107 page_sz = 64 * 1024;
108 break;
109
110 case 8192 << 2:
111 tsb_reg = 0x2UL;
David S. Miller98c55842006-01-31 18:31:20 -0800112 page_sz = 64 * 1024;
113 break;
114
115 case 8192 << 3:
116 tsb_reg = 0x3UL;
David S. Miller98c55842006-01-31 18:31:20 -0800117 page_sz = 64 * 1024;
118 break;
119
120 case 8192 << 4:
121 tsb_reg = 0x4UL;
David S. Miller98c55842006-01-31 18:31:20 -0800122 page_sz = 512 * 1024;
123 break;
124
125 case 8192 << 5:
126 tsb_reg = 0x5UL;
David S. Miller98c55842006-01-31 18:31:20 -0800127 page_sz = 512 * 1024;
128 break;
129
130 case 8192 << 6:
131 tsb_reg = 0x6UL;
David S. Miller98c55842006-01-31 18:31:20 -0800132 page_sz = 512 * 1024;
133 break;
134
135 case 8192 << 7:
136 tsb_reg = 0x7UL;
David S. Miller98c55842006-01-31 18:31:20 -0800137 page_sz = 4 * 1024 * 1024;
138 break;
David S. Millerbd407912006-01-31 18:31:38 -0800139
140 default:
141 BUG();
David S. Miller98c55842006-01-31 18:31:20 -0800142 };
David S. Millerc4bce902006-02-11 21:57:54 -0800143 tte |= pte_sz_bits(page_sz);
David S. Miller98c55842006-01-31 18:31:20 -0800144
David S. Miller618e9ed2006-02-09 17:21:53 -0800145 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
David S. Miller517af332006-02-01 15:55:21 -0800146 /* Physical mapping, no locked TLB entry for TSB. */
147 tsb_reg |= tsb_paddr;
David S. Miller98c55842006-01-31 18:31:20 -0800148
David S. Miller517af332006-02-01 15:55:21 -0800149 mm->context.tsb_reg_val = tsb_reg;
150 mm->context.tsb_map_vaddr = 0;
151 mm->context.tsb_map_pte = 0;
152 } else {
153 tsb_reg |= base;
154 tsb_reg |= (tsb_paddr & (page_sz - 1UL));
155 tte |= (tsb_paddr & ~(page_sz - 1UL));
156
157 mm->context.tsb_reg_val = tsb_reg;
158 mm->context.tsb_map_vaddr = base;
159 mm->context.tsb_map_pte = tte;
160 }
161
David S. Miller618e9ed2006-02-09 17:21:53 -0800162 /* Setup the Hypervisor TSB descriptor. */
163 if (tlb_type == hypervisor) {
164 struct hv_tsb_descr *hp = &mm->context.tsb_descr;
165
166 switch (PAGE_SIZE) {
167 case 8192:
168 default:
169 hp->pgsz_idx = HV_PGSZ_IDX_8K;
170 break;
171
172 case 64 * 1024:
173 hp->pgsz_idx = HV_PGSZ_IDX_64K;
174 break;
175
176 case 512 * 1024:
177 hp->pgsz_idx = HV_PGSZ_IDX_512K;
178 break;
179
180 case 4 * 1024 * 1024:
181 hp->pgsz_idx = HV_PGSZ_IDX_4MB;
182 break;
183 };
184 hp->assoc = 1;
185 hp->num_ttes = tsb_bytes / 16;
186 hp->ctx_idx = 0;
187 switch (PAGE_SIZE) {
188 case 8192:
189 default:
190 hp->pgsz_mask = HV_PGSZ_MASK_8K;
191 break;
192
193 case 64 * 1024:
194 hp->pgsz_mask = HV_PGSZ_MASK_64K;
195 break;
196
197 case 512 * 1024:
198 hp->pgsz_mask = HV_PGSZ_MASK_512K;
199 break;
200
201 case 4 * 1024 * 1024:
202 hp->pgsz_mask = HV_PGSZ_MASK_4MB;
203 break;
204 };
205 hp->tsb_base = tsb_paddr;
206 hp->resv = 0;
207 }
David S. Miller98c55842006-01-31 18:31:20 -0800208}
209
David S. Millerbd407912006-01-31 18:31:38 -0800210/* When the RSS of an address space exceeds mm->context.tsb_rss_limit,
David S. Miller7a1ac522006-03-16 02:02:32 -0800211 * do_sparc64_fault() invokes this routine to try and grow the TSB.
212 *
David S. Millerbd407912006-01-31 18:31:38 -0800213 * When we reach the maximum TSB size supported, we stick ~0UL into
214 * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache()
215 * will not trigger any longer.
216 *
217 * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
218 * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB
David S. Millerb52439c2006-03-17 23:40:47 -0800219 * must be 512K aligned. It also must be physically contiguous, so we
220 * cannot use vmalloc().
David S. Millerbd407912006-01-31 18:31:38 -0800221 *
222 * The idea here is to grow the TSB when the RSS of the process approaches
223 * the number of entries that the current TSB can hold at once. Currently,
224 * we trigger when the RSS hits 3/4 of the TSB capacity.
225 */
David S. Miller7a1ac522006-03-16 02:02:32 -0800226void tsb_grow(struct mm_struct *mm, unsigned long rss)
David S. Millerbd407912006-01-31 18:31:38 -0800227{
228 unsigned long max_tsb_size = 1 * 1024 * 1024;
David S. Miller7a1ac522006-03-16 02:02:32 -0800229 unsigned long size, old_size, flags;
David S. Millerbd407912006-01-31 18:31:38 -0800230 struct page *page;
David S. Miller7a1ac522006-03-16 02:02:32 -0800231 struct tsb *old_tsb, *new_tsb;
David S. Millerb52439c2006-03-17 23:40:47 -0800232 unsigned long order, new_rss_limit;
233 gfp_t gfp_flags;
David S. Millerbd407912006-01-31 18:31:38 -0800234
235 if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
236 max_tsb_size = (PAGE_SIZE << MAX_ORDER);
237
238 for (size = PAGE_SIZE; size < max_tsb_size; size <<= 1UL) {
239 unsigned long n_entries = size / sizeof(struct tsb);
240
241 n_entries = (n_entries * 3) / 4;
242 if (n_entries > rss)
243 break;
244 }
245
David S. Millerb52439c2006-03-17 23:40:47 -0800246 if (size == max_tsb_size)
247 new_rss_limit = ~0UL;
248 else
249 new_rss_limit = ((size / sizeof(struct tsb)) * 3) / 4;
250
251retry_page_alloc:
252 order = get_order(size);
253 gfp_flags = GFP_KERNEL;
254 if (order > 1)
255 gfp_flags = __GFP_NOWARN | __GFP_NORETRY;
256
257 page = alloc_pages(gfp_flags, order);
258 if (unlikely(!page)) {
259 /* Not being able to fork due to a high-order TSB
260 * allocation failure is very bad behavior. Just back
261 * down to a 0-order allocation and force no TSB
262 * growing for this address space.
263 */
264 if (mm->context.tsb == NULL && order > 0) {
265 size = PAGE_SIZE;
266 new_rss_limit = ~0UL;
267 goto retry_page_alloc;
268 }
269
270 /* If we failed on a TSB grow, we are under serious
271 * memory pressure so don't try to grow any more.
272 */
273 if (mm->context.tsb != NULL)
274 mm->context.tsb_rss_limit = ~0UL;
David S. Millerbd407912006-01-31 18:31:38 -0800275 return;
David S. Millerb52439c2006-03-17 23:40:47 -0800276 }
David S. Millerbd407912006-01-31 18:31:38 -0800277
David S. Miller8b234272006-02-17 18:01:02 -0800278 /* Mark all tags as invalid. */
David S. Miller7a1ac522006-03-16 02:02:32 -0800279 new_tsb = page_address(page);
280 memset(new_tsb, 0x40, size);
281
282 /* Ok, we are about to commit the changes. If we are
283 * growing an existing TSB the locking is very tricky,
284 * so WATCH OUT!
285 *
286 * We have to hold mm->context.lock while committing to the
287 * new TSB, this synchronizes us with processors in
288 * flush_tsb_user() and switch_mm() for this address space.
289 *
290 * But even with that lock held, processors run asynchronously
291 * accessing the old TSB via TLB miss handling. This is OK
292 * because those actions are just propagating state from the
293 * Linux page tables into the TSB, page table mappings are not
294 * being changed. If a real fault occurs, the processor will
295 * synchronize with us when it hits flush_tsb_user(), this is
296 * also true for the case where vmscan is modifying the page
297 * tables. The only thing we need to be careful with is to
298 * skip any locked TSB entries during copy_tsb().
299 *
300 * When we finish committing to the new TSB, we have to drop
301 * the lock and ask all other cpus running this address space
302 * to run tsb_context_switch() to see the new TSB table.
303 */
304 spin_lock_irqsave(&mm->context.lock, flags);
305
306 old_tsb = mm->context.tsb;
307 old_size = mm->context.tsb_nentries * sizeof(struct tsb);
308
309 /* Handle multiple threads trying to grow the TSB at the same time.
310 * One will get in here first, and bump the size and the RSS limit.
311 * The others will get in here next and hit this check.
312 */
313 if (unlikely(old_tsb && (rss < mm->context.tsb_rss_limit))) {
314 spin_unlock_irqrestore(&mm->context.lock, flags);
315
316 free_pages((unsigned long) new_tsb, get_order(size));
317 return;
318 }
David S. Miller8b234272006-02-17 18:01:02 -0800319
David S. Millerb52439c2006-03-17 23:40:47 -0800320 mm->context.tsb_rss_limit = new_rss_limit;
David S. Millerbd407912006-01-31 18:31:38 -0800321
David S. Miller7a1ac522006-03-16 02:02:32 -0800322 if (old_tsb) {
323 extern void copy_tsb(unsigned long old_tsb_base,
324 unsigned long old_tsb_size,
325 unsigned long new_tsb_base,
326 unsigned long new_tsb_size);
327 unsigned long old_tsb_base = (unsigned long) old_tsb;
328 unsigned long new_tsb_base = (unsigned long) new_tsb;
David S. Millerbd407912006-01-31 18:31:38 -0800329
David S. Miller7a1ac522006-03-16 02:02:32 -0800330 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
331 old_tsb_base = __pa(old_tsb_base);
332 new_tsb_base = __pa(new_tsb_base);
333 }
334 copy_tsb(old_tsb_base, old_size, new_tsb_base, size);
335 }
David S. Millerbd407912006-01-31 18:31:38 -0800336
David S. Miller7a1ac522006-03-16 02:02:32 -0800337 mm->context.tsb = new_tsb;
David S. Millerbd407912006-01-31 18:31:38 -0800338 setup_tsb_params(mm, size);
339
David S. Miller7a1ac522006-03-16 02:02:32 -0800340 spin_unlock_irqrestore(&mm->context.lock, flags);
341
David S. Millerbd407912006-01-31 18:31:38 -0800342 /* If old_tsb is NULL, we're being invoked for the first time
343 * from init_new_context().
344 */
345 if (old_tsb) {
David S. Miller7a1ac522006-03-16 02:02:32 -0800346 /* Reload it on the local cpu. */
David S. Millerbd407912006-01-31 18:31:38 -0800347 tsb_context_switch(mm);
348
David S. Miller7a1ac522006-03-16 02:02:32 -0800349 /* Now force other processors to do the same. */
350 smp_tsb_sync(mm);
351
352 /* Now it is safe to free the old tsb. */
David S. Millerbd407912006-01-31 18:31:38 -0800353 free_pages((unsigned long) old_tsb, get_order(old_size));
354 }
355}
356
David S. Miller09f94282006-01-31 18:31:06 -0800357int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
358{
David S. Millera77754b2006-03-06 19:59:50 -0800359 spin_lock_init(&mm->context.lock);
David S. Miller09f94282006-01-31 18:31:06 -0800360
361 mm->context.sparc64_ctx_val = 0UL;
David S. Miller09f94282006-01-31 18:31:06 -0800362
David S. Millerbd407912006-01-31 18:31:38 -0800363 /* copy_mm() copies over the parent's mm_struct before calling
364 * us, so we need to zero out the TSB pointer or else tsb_grow()
365 * will be confused and think there is an older TSB to free up.
366 */
367 mm->context.tsb = NULL;
David S. Miller7a1ac522006-03-16 02:02:32 -0800368
369 /* If this is fork, inherit the parent's TSB size. We would
370 * grow it to that size on the first page fault anyways.
371 */
372 tsb_grow(mm, get_mm_rss(mm));
David S. Millerbd407912006-01-31 18:31:38 -0800373
374 if (unlikely(!mm->context.tsb))
375 return -ENOMEM;
David S. Miller09f94282006-01-31 18:31:06 -0800376
377 return 0;
378}
379
380void destroy_context(struct mm_struct *mm)
381{
David S. Millerbd407912006-01-31 18:31:38 -0800382 unsigned long size = mm->context.tsb_nentries * sizeof(struct tsb);
David S. Miller77b838f2006-02-23 21:40:15 -0800383 unsigned long flags;
David S. Millerbd407912006-01-31 18:31:38 -0800384
385 free_pages((unsigned long) mm->context.tsb, get_order(size));
David S. Miller98c55842006-01-31 18:31:20 -0800386
387 /* We can remove these later, but for now it's useful
388 * to catch any bogus post-destroy_context() references
389 * to the TSB.
390 */
391 mm->context.tsb = NULL;
392 mm->context.tsb_reg_val = 0UL;
David S. Miller09f94282006-01-31 18:31:06 -0800393
David S. Miller77b838f2006-02-23 21:40:15 -0800394 spin_lock_irqsave(&ctx_alloc_lock, flags);
David S. Miller09f94282006-01-31 18:31:06 -0800395
396 if (CTX_VALID(mm->context)) {
397 unsigned long nr = CTX_NRBITS(mm->context);
398 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
399 }
400
David S. Miller77b838f2006-02-23 21:40:15 -0800401 spin_unlock_irqrestore(&ctx_alloc_lock, flags);
David S. Miller09f94282006-01-31 18:31:06 -0800402}