blob: 101d7c82870be8a704a2ab84f07d97a2d890f37a [file] [log] [blame]
David S. Miller74bf4312006-01-31 18:29:18 -08001/* arch/sparc64/mm/tsb.c
2 *
David S. Millera3cf5e62008-08-03 00:01:05 -07003 * Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net>
David S. Miller74bf4312006-01-31 18:29:18 -08004 */
5
6#include <linux/kernel.h>
David S. Millera3cf5e62008-08-03 00:01:05 -07007#include <linux/preempt.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09008#include <linux/slab.h>
David S. Miller74bf4312006-01-31 18:29:18 -08009#include <asm/system.h>
10#include <asm/page.h>
11#include <asm/tlbflush.h>
12#include <asm/tlb.h>
David S. Miller09f94282006-01-31 18:31:06 -080013#include <asm/mmu_context.h>
David S. Miller98c55842006-01-31 18:31:20 -080014#include <asm/pgtable.h>
David S. Millerbd407912006-01-31 18:31:38 -080015#include <asm/tsb.h>
David S. Miller9b4006d2006-03-18 18:12:42 -080016#include <asm/oplib.h>
David S. Miller74bf4312006-01-31 18:29:18 -080017
David S. Miller74bf4312006-01-31 18:29:18 -080018extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
19
David S. Millerdcc1e8d2006-03-22 00:49:59 -080020static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries)
David S. Miller74bf4312006-01-31 18:29:18 -080021{
David S. Millerdcc1e8d2006-03-22 00:49:59 -080022 vaddr >>= hash_shift;
David S. Miller98c55842006-01-31 18:31:20 -080023 return vaddr & (nentries - 1);
David S. Miller74bf4312006-01-31 18:29:18 -080024}
25
David S. Miller8b234272006-02-17 18:01:02 -080026static inline int tag_compare(unsigned long tag, unsigned long vaddr)
David S. Miller74bf4312006-01-31 18:29:18 -080027{
David S. Miller8b234272006-02-17 18:01:02 -080028 return (tag == (vaddr >> 22));
David S. Miller74bf4312006-01-31 18:29:18 -080029}
30
31/* TSB flushes need only occur on the processor initiating the address
32 * space modification, not on each cpu the address space has run on.
33 * Only the TLB flush needs that treatment.
34 */
35
36void flush_tsb_kernel_range(unsigned long start, unsigned long end)
37{
38 unsigned long v;
39
40 for (v = start; v < end; v += PAGE_SIZE) {
David S. Millerdcc1e8d2006-03-22 00:49:59 -080041 unsigned long hash = tsb_hash(v, PAGE_SHIFT,
42 KERNEL_TSB_NENTRIES);
David S. Miller98c55842006-01-31 18:31:20 -080043 struct tsb *ent = &swapper_tsb[hash];
David S. Miller74bf4312006-01-31 18:29:18 -080044
David S. Miller293666b2008-11-15 13:33:25 -080045 if (tag_compare(ent->tag, v))
David S. Miller8b234272006-02-17 18:01:02 -080046 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
David S. Miller74bf4312006-01-31 18:29:18 -080047 }
48}
49
David S. Millerdcc1e8d2006-03-22 00:49:59 -080050static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, unsigned long tsb, unsigned long nentries)
David S. Miller74bf4312006-01-31 18:29:18 -080051{
David S. Millerdcc1e8d2006-03-22 00:49:59 -080052 unsigned long i;
David S. Miller74bf4312006-01-31 18:29:18 -080053
David S. Miller74bf4312006-01-31 18:29:18 -080054 for (i = 0; i < mp->tlb_nr; i++) {
55 unsigned long v = mp->vaddrs[i];
David S. Miller517af332006-02-01 15:55:21 -080056 unsigned long tag, ent, hash;
David S. Miller74bf4312006-01-31 18:29:18 -080057
58 v &= ~0x1UL;
59
David S. Millerdcc1e8d2006-03-22 00:49:59 -080060 hash = tsb_hash(v, hash_shift, nentries);
61 ent = tsb + (hash * sizeof(struct tsb));
David S. Miller8b234272006-02-17 18:01:02 -080062 tag = (v >> 22UL);
David S. Miller517af332006-02-01 15:55:21 -080063
64 tsb_flush(ent, tag);
David S. Miller74bf4312006-01-31 18:29:18 -080065 }
David S. Millerdcc1e8d2006-03-22 00:49:59 -080066}
David S. Miller7a1ac522006-03-16 02:02:32 -080067
David S. Millerdcc1e8d2006-03-22 00:49:59 -080068void flush_tsb_user(struct mmu_gather *mp)
69{
70 struct mm_struct *mm = mp->mm;
71 unsigned long nentries, base, flags;
72
73 spin_lock_irqsave(&mm->context.lock, flags);
74
75 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
76 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
77 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
78 base = __pa(base);
79 __flush_tsb_one(mp, PAGE_SHIFT, base, nentries);
80
81#ifdef CONFIG_HUGETLB_PAGE
82 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
83 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
84 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
85 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
86 base = __pa(base);
87 __flush_tsb_one(mp, HPAGE_SHIFT, base, nentries);
88 }
89#endif
David S. Miller7a1ac522006-03-16 02:02:32 -080090 spin_unlock_irqrestore(&mm->context.lock, flags);
David S. Miller74bf4312006-01-31 18:29:18 -080091}
David S. Miller09f94282006-01-31 18:31:06 -080092
David S. Millerdcc1e8d2006-03-22 00:49:59 -080093#if defined(CONFIG_SPARC64_PAGE_SIZE_8KB)
94#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
95#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
96#elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB)
97#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_64K
98#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_64K
David S. Millerdcc1e8d2006-03-22 00:49:59 -080099#else
100#error Broken base page size setting...
101#endif
102
103#ifdef CONFIG_HUGETLB_PAGE
104#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
105#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_64K
106#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_64K
107#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
108#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_512K
109#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_512K
110#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
111#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB
112#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB
113#else
114#error Broken huge page size setting...
115#endif
116#endif
117
118static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes)
David S. Miller98c55842006-01-31 18:31:20 -0800119{
120 unsigned long tsb_reg, base, tsb_paddr;
121 unsigned long page_sz, tte;
122
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800123 mm->context.tsb_block[tsb_idx].tsb_nentries =
124 tsb_bytes / sizeof(struct tsb);
David S. Miller98c55842006-01-31 18:31:20 -0800125
126 base = TSBMAP_BASE;
David S. Millerc4bce902006-02-11 21:57:54 -0800127 tte = pgprot_val(PAGE_KERNEL_LOCKED);
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800128 tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
David S. Miller517af332006-02-01 15:55:21 -0800129 BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
David S. Miller98c55842006-01-31 18:31:20 -0800130
131 /* Use the smallest page size that can map the whole TSB
132 * in one TLB entry.
133 */
134 switch (tsb_bytes) {
135 case 8192 << 0:
136 tsb_reg = 0x0UL;
137#ifdef DCACHE_ALIASING_POSSIBLE
138 base += (tsb_paddr & 8192);
139#endif
David S. Miller98c55842006-01-31 18:31:20 -0800140 page_sz = 8192;
141 break;
142
143 case 8192 << 1:
144 tsb_reg = 0x1UL;
David S. Miller98c55842006-01-31 18:31:20 -0800145 page_sz = 64 * 1024;
146 break;
147
148 case 8192 << 2:
149 tsb_reg = 0x2UL;
David S. Miller98c55842006-01-31 18:31:20 -0800150 page_sz = 64 * 1024;
151 break;
152
153 case 8192 << 3:
154 tsb_reg = 0x3UL;
David S. Miller98c55842006-01-31 18:31:20 -0800155 page_sz = 64 * 1024;
156 break;
157
158 case 8192 << 4:
159 tsb_reg = 0x4UL;
David S. Miller98c55842006-01-31 18:31:20 -0800160 page_sz = 512 * 1024;
161 break;
162
163 case 8192 << 5:
164 tsb_reg = 0x5UL;
David S. Miller98c55842006-01-31 18:31:20 -0800165 page_sz = 512 * 1024;
166 break;
167
168 case 8192 << 6:
169 tsb_reg = 0x6UL;
David S. Miller98c55842006-01-31 18:31:20 -0800170 page_sz = 512 * 1024;
171 break;
172
173 case 8192 << 7:
174 tsb_reg = 0x7UL;
David S. Miller98c55842006-01-31 18:31:20 -0800175 page_sz = 4 * 1024 * 1024;
176 break;
David S. Millerbd407912006-01-31 18:31:38 -0800177
178 default:
David S. Miller7e5766f2007-10-29 00:36:09 -0700179 printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n",
180 current->comm, current->pid, tsb_bytes);
181 do_exit(SIGSEGV);
David S. Miller98c55842006-01-31 18:31:20 -0800182 };
David S. Millerc4bce902006-02-11 21:57:54 -0800183 tte |= pte_sz_bits(page_sz);
David S. Miller98c55842006-01-31 18:31:20 -0800184
David S. Miller618e9ed2006-02-09 17:21:53 -0800185 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
David S. Miller517af332006-02-01 15:55:21 -0800186 /* Physical mapping, no locked TLB entry for TSB. */
187 tsb_reg |= tsb_paddr;
David S. Miller98c55842006-01-31 18:31:20 -0800188
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800189 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
190 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0;
191 mm->context.tsb_block[tsb_idx].tsb_map_pte = 0;
David S. Miller517af332006-02-01 15:55:21 -0800192 } else {
193 tsb_reg |= base;
194 tsb_reg |= (tsb_paddr & (page_sz - 1UL));
195 tte |= (tsb_paddr & ~(page_sz - 1UL));
196
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800197 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
198 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base;
199 mm->context.tsb_block[tsb_idx].tsb_map_pte = tte;
David S. Miller517af332006-02-01 15:55:21 -0800200 }
201
David S. Miller618e9ed2006-02-09 17:21:53 -0800202 /* Setup the Hypervisor TSB descriptor. */
203 if (tlb_type == hypervisor) {
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800204 struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx];
David S. Miller618e9ed2006-02-09 17:21:53 -0800205
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800206 switch (tsb_idx) {
207 case MM_TSB_BASE:
208 hp->pgsz_idx = HV_PGSZ_IDX_BASE;
209 break;
210#ifdef CONFIG_HUGETLB_PAGE
211 case MM_TSB_HUGE:
212 hp->pgsz_idx = HV_PGSZ_IDX_HUGE;
213 break;
214#endif
David S. Miller618e9ed2006-02-09 17:21:53 -0800215 default:
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800216 BUG();
David S. Miller618e9ed2006-02-09 17:21:53 -0800217 };
218 hp->assoc = 1;
219 hp->num_ttes = tsb_bytes / 16;
220 hp->ctx_idx = 0;
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800221 switch (tsb_idx) {
222 case MM_TSB_BASE:
223 hp->pgsz_mask = HV_PGSZ_MASK_BASE;
224 break;
225#ifdef CONFIG_HUGETLB_PAGE
226 case MM_TSB_HUGE:
227 hp->pgsz_mask = HV_PGSZ_MASK_HUGE;
228 break;
229#endif
David S. Miller618e9ed2006-02-09 17:21:53 -0800230 default:
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800231 BUG();
David S. Miller618e9ed2006-02-09 17:21:53 -0800232 };
233 hp->tsb_base = tsb_paddr;
234 hp->resv = 0;
235 }
David S. Miller98c55842006-01-31 18:31:20 -0800236}
237
Christoph Lametere18b8902006-12-06 20:33:20 -0800238static struct kmem_cache *tsb_caches[8] __read_mostly;
David S. Miller9b4006d2006-03-18 18:12:42 -0800239
240static const char *tsb_cache_names[8] = {
241 "tsb_8KB",
242 "tsb_16KB",
243 "tsb_32KB",
244 "tsb_64KB",
245 "tsb_128KB",
246 "tsb_256KB",
247 "tsb_512KB",
248 "tsb_1MB",
249};
250
David Miller3a2cba92007-05-06 14:49:51 -0700251void __init pgtable_cache_init(void)
David S. Miller9b4006d2006-03-18 18:12:42 -0800252{
253 unsigned long i;
254
255 for (i = 0; i < 8; i++) {
256 unsigned long size = 8192 << i;
257 const char *name = tsb_cache_names[i];
258
259 tsb_caches[i] = kmem_cache_create(name,
260 size, size,
Paul Mundt20c2df82007-07-20 10:11:58 +0900261 0, NULL);
David S. Miller9b4006d2006-03-18 18:12:42 -0800262 if (!tsb_caches[i]) {
263 prom_printf("Could not create %s cache\n", name);
264 prom_halt();
265 }
266 }
267}
268
David S. Miller08714202008-11-16 23:49:24 -0800269int sysctl_tsb_ratio = -2;
270
271static unsigned long tsb_size_to_rss_limit(unsigned long new_size)
272{
273 unsigned long num_ents = (new_size / sizeof(struct tsb));
274
275 if (sysctl_tsb_ratio < 0)
276 return num_ents - (num_ents >> -sysctl_tsb_ratio);
277 else
278 return num_ents + (num_ents >> sysctl_tsb_ratio);
279}
280
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800281/* When the RSS of an address space exceeds tsb_rss_limit for a TSB,
282 * do_sparc64_fault() invokes this routine to try and grow it.
David S. Miller7a1ac522006-03-16 02:02:32 -0800283 *
David S. Millerbd407912006-01-31 18:31:38 -0800284 * When we reach the maximum TSB size supported, we stick ~0UL into
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800285 * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault()
David S. Millerbd407912006-01-31 18:31:38 -0800286 * will not trigger any longer.
287 *
288 * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
289 * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB
David S. Millerb52439c2006-03-17 23:40:47 -0800290 * must be 512K aligned. It also must be physically contiguous, so we
291 * cannot use vmalloc().
David S. Millerbd407912006-01-31 18:31:38 -0800292 *
293 * The idea here is to grow the TSB when the RSS of the process approaches
294 * the number of entries that the current TSB can hold at once. Currently,
295 * we trigger when the RSS hits 3/4 of the TSB capacity.
296 */
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800297void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
David S. Millerbd407912006-01-31 18:31:38 -0800298{
299 unsigned long max_tsb_size = 1 * 1024 * 1024;
David S. Miller9b4006d2006-03-18 18:12:42 -0800300 unsigned long new_size, old_size, flags;
David S. Miller7a1ac522006-03-16 02:02:32 -0800301 struct tsb *old_tsb, *new_tsb;
David S. Miller9b4006d2006-03-18 18:12:42 -0800302 unsigned long new_cache_index, old_cache_index;
303 unsigned long new_rss_limit;
David S. Millerb52439c2006-03-17 23:40:47 -0800304 gfp_t gfp_flags;
David S. Millerbd407912006-01-31 18:31:38 -0800305
306 if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
307 max_tsb_size = (PAGE_SIZE << MAX_ORDER);
308
David S. Miller9b4006d2006-03-18 18:12:42 -0800309 new_cache_index = 0;
310 for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) {
David S. Miller08714202008-11-16 23:49:24 -0800311 new_rss_limit = tsb_size_to_rss_limit(new_size);
312 if (new_rss_limit > rss)
David S. Millerbd407912006-01-31 18:31:38 -0800313 break;
David S. Miller9b4006d2006-03-18 18:12:42 -0800314 new_cache_index++;
David S. Millerbd407912006-01-31 18:31:38 -0800315 }
316
David S. Miller9b4006d2006-03-18 18:12:42 -0800317 if (new_size == max_tsb_size)
David S. Millerb52439c2006-03-17 23:40:47 -0800318 new_rss_limit = ~0UL;
David S. Millerb52439c2006-03-17 23:40:47 -0800319
David S. Miller9b4006d2006-03-18 18:12:42 -0800320retry_tsb_alloc:
David S. Millerb52439c2006-03-17 23:40:47 -0800321 gfp_flags = GFP_KERNEL;
David S. Miller9b4006d2006-03-18 18:12:42 -0800322 if (new_size > (PAGE_SIZE * 2))
David S. Millerb52439c2006-03-17 23:40:47 -0800323 gfp_flags = __GFP_NOWARN | __GFP_NORETRY;
324
David S. Miller1f261ef2008-03-19 04:53:58 -0700325 new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index],
326 gfp_flags, numa_node_id());
David S. Miller9b4006d2006-03-18 18:12:42 -0800327 if (unlikely(!new_tsb)) {
David S. Millerb52439c2006-03-17 23:40:47 -0800328 /* Not being able to fork due to a high-order TSB
329 * allocation failure is very bad behavior. Just back
330 * down to a 0-order allocation and force no TSB
331 * growing for this address space.
332 */
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800333 if (mm->context.tsb_block[tsb_index].tsb == NULL &&
334 new_cache_index > 0) {
David S. Miller9b4006d2006-03-18 18:12:42 -0800335 new_cache_index = 0;
336 new_size = 8192;
David S. Millerb52439c2006-03-17 23:40:47 -0800337 new_rss_limit = ~0UL;
David S. Miller9b4006d2006-03-18 18:12:42 -0800338 goto retry_tsb_alloc;
David S. Millerb52439c2006-03-17 23:40:47 -0800339 }
340
341 /* If we failed on a TSB grow, we are under serious
342 * memory pressure so don't try to grow any more.
343 */
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800344 if (mm->context.tsb_block[tsb_index].tsb != NULL)
345 mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL;
David S. Millerbd407912006-01-31 18:31:38 -0800346 return;
David S. Millerb52439c2006-03-17 23:40:47 -0800347 }
David S. Millerbd407912006-01-31 18:31:38 -0800348
David S. Miller8b234272006-02-17 18:01:02 -0800349 /* Mark all tags as invalid. */
David S. Millerbb8646d2006-03-18 23:55:11 -0800350 tsb_init(new_tsb, new_size);
David S. Miller7a1ac522006-03-16 02:02:32 -0800351
352 /* Ok, we are about to commit the changes. If we are
353 * growing an existing TSB the locking is very tricky,
354 * so WATCH OUT!
355 *
356 * We have to hold mm->context.lock while committing to the
357 * new TSB, this synchronizes us with processors in
358 * flush_tsb_user() and switch_mm() for this address space.
359 *
360 * But even with that lock held, processors run asynchronously
361 * accessing the old TSB via TLB miss handling. This is OK
362 * because those actions are just propagating state from the
363 * Linux page tables into the TSB, page table mappings are not
364 * being changed. If a real fault occurs, the processor will
365 * synchronize with us when it hits flush_tsb_user(), this is
366 * also true for the case where vmscan is modifying the page
367 * tables. The only thing we need to be careful with is to
368 * skip any locked TSB entries during copy_tsb().
369 *
370 * When we finish committing to the new TSB, we have to drop
371 * the lock and ask all other cpus running this address space
372 * to run tsb_context_switch() to see the new TSB table.
373 */
374 spin_lock_irqsave(&mm->context.lock, flags);
375
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800376 old_tsb = mm->context.tsb_block[tsb_index].tsb;
377 old_cache_index =
378 (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL);
379 old_size = (mm->context.tsb_block[tsb_index].tsb_nentries *
380 sizeof(struct tsb));
David S. Miller7a1ac522006-03-16 02:02:32 -0800381
David S. Miller9b4006d2006-03-18 18:12:42 -0800382
David S. Miller7a1ac522006-03-16 02:02:32 -0800383 /* Handle multiple threads trying to grow the TSB at the same time.
384 * One will get in here first, and bump the size and the RSS limit.
385 * The others will get in here next and hit this check.
386 */
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800387 if (unlikely(old_tsb &&
388 (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) {
David S. Miller7a1ac522006-03-16 02:02:32 -0800389 spin_unlock_irqrestore(&mm->context.lock, flags);
390
David S. Miller9b4006d2006-03-18 18:12:42 -0800391 kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
David S. Miller7a1ac522006-03-16 02:02:32 -0800392 return;
393 }
David S. Miller8b234272006-02-17 18:01:02 -0800394
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800395 mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit;
David S. Millerbd407912006-01-31 18:31:38 -0800396
David S. Miller7a1ac522006-03-16 02:02:32 -0800397 if (old_tsb) {
398 extern void copy_tsb(unsigned long old_tsb_base,
399 unsigned long old_tsb_size,
400 unsigned long new_tsb_base,
401 unsigned long new_tsb_size);
402 unsigned long old_tsb_base = (unsigned long) old_tsb;
403 unsigned long new_tsb_base = (unsigned long) new_tsb;
David S. Millerbd407912006-01-31 18:31:38 -0800404
David S. Miller7a1ac522006-03-16 02:02:32 -0800405 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
406 old_tsb_base = __pa(old_tsb_base);
407 new_tsb_base = __pa(new_tsb_base);
408 }
David S. Miller9b4006d2006-03-18 18:12:42 -0800409 copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
David S. Miller7a1ac522006-03-16 02:02:32 -0800410 }
David S. Millerbd407912006-01-31 18:31:38 -0800411
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800412 mm->context.tsb_block[tsb_index].tsb = new_tsb;
413 setup_tsb_params(mm, tsb_index, new_size);
David S. Millerbd407912006-01-31 18:31:38 -0800414
David S. Miller7a1ac522006-03-16 02:02:32 -0800415 spin_unlock_irqrestore(&mm->context.lock, flags);
416
David S. Millerbd407912006-01-31 18:31:38 -0800417 /* If old_tsb is NULL, we're being invoked for the first time
418 * from init_new_context().
419 */
420 if (old_tsb) {
David S. Miller7a1ac522006-03-16 02:02:32 -0800421 /* Reload it on the local cpu. */
David S. Millerbd407912006-01-31 18:31:38 -0800422 tsb_context_switch(mm);
423
David S. Miller7a1ac522006-03-16 02:02:32 -0800424 /* Now force other processors to do the same. */
David S. Millera3cf5e62008-08-03 00:01:05 -0700425 preempt_disable();
David S. Miller7a1ac522006-03-16 02:02:32 -0800426 smp_tsb_sync(mm);
David S. Millera3cf5e62008-08-03 00:01:05 -0700427 preempt_enable();
David S. Miller7a1ac522006-03-16 02:02:32 -0800428
429 /* Now it is safe to free the old tsb. */
David S. Miller9b4006d2006-03-18 18:12:42 -0800430 kmem_cache_free(tsb_caches[old_cache_index], old_tsb);
David S. Millerbd407912006-01-31 18:31:38 -0800431 }
432}
433
David S. Miller09f94282006-01-31 18:31:06 -0800434int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
435{
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800436#ifdef CONFIG_HUGETLB_PAGE
437 unsigned long huge_pte_count;
438#endif
439 unsigned int i;
440
David S. Millera77754b2006-03-06 19:59:50 -0800441 spin_lock_init(&mm->context.lock);
David S. Miller09f94282006-01-31 18:31:06 -0800442
443 mm->context.sparc64_ctx_val = 0UL;
David S. Miller09f94282006-01-31 18:31:06 -0800444
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800445#ifdef CONFIG_HUGETLB_PAGE
446 /* We reset it to zero because the fork() page copying
447 * will re-increment the counters as the parent PTEs are
448 * copied into the child address space.
449 */
450 huge_pte_count = mm->context.huge_pte_count;
451 mm->context.huge_pte_count = 0;
452#endif
453
David S. Millerbd407912006-01-31 18:31:38 -0800454 /* copy_mm() copies over the parent's mm_struct before calling
455 * us, so we need to zero out the TSB pointer or else tsb_grow()
456 * will be confused and think there is an older TSB to free up.
457 */
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800458 for (i = 0; i < MM_NUM_TSBS; i++)
459 mm->context.tsb_block[i].tsb = NULL;
David S. Miller7a1ac522006-03-16 02:02:32 -0800460
461 /* If this is fork, inherit the parent's TSB size. We would
462 * grow it to that size on the first page fault anyways.
463 */
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800464 tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
David S. Millerbd407912006-01-31 18:31:38 -0800465
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800466#ifdef CONFIG_HUGETLB_PAGE
467 if (unlikely(huge_pte_count))
468 tsb_grow(mm, MM_TSB_HUGE, huge_pte_count);
469#endif
470
471 if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
David S. Millerbd407912006-01-31 18:31:38 -0800472 return -ENOMEM;
David S. Miller09f94282006-01-31 18:31:06 -0800473
474 return 0;
475}
476
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800477static void tsb_destroy_one(struct tsb_config *tp)
478{
479 unsigned long cache_index;
480
481 if (!tp->tsb)
482 return;
483 cache_index = tp->tsb_reg_val & 0x7UL;
484 kmem_cache_free(tsb_caches[cache_index], tp->tsb);
485 tp->tsb = NULL;
486 tp->tsb_reg_val = 0UL;
487}
488
David S. Miller09f94282006-01-31 18:31:06 -0800489void destroy_context(struct mm_struct *mm)
490{
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800491 unsigned long flags, i;
David S. Millerbd407912006-01-31 18:31:38 -0800492
David S. Millerdcc1e8d2006-03-22 00:49:59 -0800493 for (i = 0; i < MM_NUM_TSBS; i++)
494 tsb_destroy_one(&mm->context.tsb_block[i]);
David S. Miller09f94282006-01-31 18:31:06 -0800495
David S. Miller77b838f2006-02-23 21:40:15 -0800496 spin_lock_irqsave(&ctx_alloc_lock, flags);
David S. Miller09f94282006-01-31 18:31:06 -0800497
498 if (CTX_VALID(mm->context)) {
499 unsigned long nr = CTX_NRBITS(mm->context);
500 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
501 }
502
David S. Miller77b838f2006-02-23 21:40:15 -0800503 spin_unlock_irqrestore(&ctx_alloc_lock, flags);
David S. Miller09f94282006-01-31 18:31:06 -0800504}