blob: dfe7144fcdf67b09fe5d6bb1e75d3269e0382c36 [file] [log] [blame]
David S. Miller74bf4312006-01-31 18:29:18 -08001/* arch/sparc64/mm/tsb.c
2 *
3 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
4 */
5
6#include <linux/kernel.h>
7#include <asm/system.h>
8#include <asm/page.h>
9#include <asm/tlbflush.h>
10#include <asm/tlb.h>
David S. Miller09f94282006-01-31 18:31:06 -080011#include <asm/mmu_context.h>
David S. Miller98c55842006-01-31 18:31:20 -080012#include <asm/pgtable.h>
David S. Miller74bf4312006-01-31 18:29:18 -080013
14/* We use an 8K TSB for the whole kernel, this allows to
15 * handle about 4MB of modules and vmalloc mappings without
16 * incurring many hash conflicts.
17 */
18#define KERNEL_TSB_SIZE_BYTES 8192
19#define KERNEL_TSB_NENTRIES \
20 (KERNEL_TSB_SIZE_BYTES / sizeof(struct tsb))
21
22extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
23
David S. Miller98c55842006-01-31 18:31:20 -080024static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries)
David S. Miller74bf4312006-01-31 18:29:18 -080025{
26 vaddr >>= PAGE_SHIFT;
David S. Miller98c55842006-01-31 18:31:20 -080027 return vaddr & (nentries - 1);
David S. Miller74bf4312006-01-31 18:29:18 -080028}
29
30static inline int tag_compare(struct tsb *entry, unsigned long vaddr, unsigned long context)
31{
32 if (context == ~0UL)
33 return 1;
34
35 return (entry->tag == ((vaddr >> 22) | (context << 48)));
36}
37
38/* TSB flushes need only occur on the processor initiating the address
39 * space modification, not on each cpu the address space has run on.
40 * Only the TLB flush needs that treatment.
41 */
42
43void flush_tsb_kernel_range(unsigned long start, unsigned long end)
44{
45 unsigned long v;
46
47 for (v = start; v < end; v += PAGE_SIZE) {
David S. Miller98c55842006-01-31 18:31:20 -080048 unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES);
49 struct tsb *ent = &swapper_tsb[hash];
David S. Miller74bf4312006-01-31 18:29:18 -080050
51 if (tag_compare(ent, v, 0)) {
52 ent->tag = 0UL;
53 membar_storeload_storestore();
54 }
55 }
56}
57
58void flush_tsb_user(struct mmu_gather *mp)
59{
60 struct mm_struct *mm = mp->mm;
David S. Miller98c55842006-01-31 18:31:20 -080061 struct tsb *tsb = mm->context.tsb;
David S. Miller74bf4312006-01-31 18:29:18 -080062 unsigned long ctx = ~0UL;
David S. Miller98c55842006-01-31 18:31:20 -080063 unsigned long nentries = mm->context.tsb_nentries;
David S. Miller74bf4312006-01-31 18:29:18 -080064 int i;
65
66 if (CTX_VALID(mm->context))
67 ctx = CTX_HWBITS(mm->context);
68
69 for (i = 0; i < mp->tlb_nr; i++) {
70 unsigned long v = mp->vaddrs[i];
71 struct tsb *ent;
72
73 v &= ~0x1UL;
74
David S. Miller98c55842006-01-31 18:31:20 -080075 ent = &tsb[tsb_hash(v, nentries)];
David S. Miller74bf4312006-01-31 18:29:18 -080076 if (tag_compare(ent, v, ctx)) {
77 ent->tag = 0UL;
78 membar_storeload_storestore();
79 }
80 }
81}
David S. Miller09f94282006-01-31 18:31:06 -080082
David S. Miller98c55842006-01-31 18:31:20 -080083static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
84{
85 unsigned long tsb_reg, base, tsb_paddr;
86 unsigned long page_sz, tte;
87
88 mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb);
89
90 base = TSBMAP_BASE;
91 tte = (_PAGE_VALID | _PAGE_L | _PAGE_CP |
92 _PAGE_CV | _PAGE_P | _PAGE_W);
93 tsb_paddr = __pa(mm->context.tsb);
94
95 /* Use the smallest page size that can map the whole TSB
96 * in one TLB entry.
97 */
98 switch (tsb_bytes) {
99 case 8192 << 0:
100 tsb_reg = 0x0UL;
101#ifdef DCACHE_ALIASING_POSSIBLE
102 base += (tsb_paddr & 8192);
103#endif
104 tte |= _PAGE_SZ8K;
105 page_sz = 8192;
106 break;
107
108 case 8192 << 1:
109 tsb_reg = 0x1UL;
110 tte |= _PAGE_SZ64K;
111 page_sz = 64 * 1024;
112 break;
113
114 case 8192 << 2:
115 tsb_reg = 0x2UL;
116 tte |= _PAGE_SZ64K;
117 page_sz = 64 * 1024;
118 break;
119
120 case 8192 << 3:
121 tsb_reg = 0x3UL;
122 tte |= _PAGE_SZ64K;
123 page_sz = 64 * 1024;
124 break;
125
126 case 8192 << 4:
127 tsb_reg = 0x4UL;
128 tte |= _PAGE_SZ512K;
129 page_sz = 512 * 1024;
130 break;
131
132 case 8192 << 5:
133 tsb_reg = 0x5UL;
134 tte |= _PAGE_SZ512K;
135 page_sz = 512 * 1024;
136 break;
137
138 case 8192 << 6:
139 tsb_reg = 0x6UL;
140 tte |= _PAGE_SZ512K;
141 page_sz = 512 * 1024;
142 break;
143
144 case 8192 << 7:
145 tsb_reg = 0x7UL;
146 tte |= _PAGE_SZ4MB;
147 page_sz = 4 * 1024 * 1024;
148 break;
149 };
150
151 tsb_reg |= base;
152 tsb_reg |= (tsb_paddr & (page_sz - 1UL));
153 tte |= (tsb_paddr & ~(page_sz - 1UL));
154
155 mm->context.tsb_reg_val = tsb_reg;
156 mm->context.tsb_map_vaddr = base;
157 mm->context.tsb_map_pte = tte;
158}
159
David S. Miller09f94282006-01-31 18:31:06 -0800160int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
161{
162 unsigned long page = get_zeroed_page(GFP_KERNEL);
163
164 mm->context.sparc64_ctx_val = 0UL;
165 if (unlikely(!page))
166 return -ENOMEM;
167
David S. Miller98c55842006-01-31 18:31:20 -0800168 mm->context.tsb = (struct tsb *) page;
169 setup_tsb_params(mm, PAGE_SIZE);
David S. Miller09f94282006-01-31 18:31:06 -0800170
171 return 0;
172}
173
174void destroy_context(struct mm_struct *mm)
175{
David S. Miller98c55842006-01-31 18:31:20 -0800176 free_page((unsigned long) mm->context.tsb);
177
178 /* We can remove these later, but for now it's useful
179 * to catch any bogus post-destroy_context() references
180 * to the TSB.
181 */
182 mm->context.tsb = NULL;
183 mm->context.tsb_reg_val = 0UL;
David S. Miller09f94282006-01-31 18:31:06 -0800184
185 spin_lock(&ctx_alloc_lock);
186
187 if (CTX_VALID(mm->context)) {
188 unsigned long nr = CTX_NRBITS(mm->context);
189 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
190 }
191
192 spin_unlock(&ctx_alloc_lock);
193}