blob: 272aa4f7657e2c3bbba5772809e8798c657a32c8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* arch/sparc64/mm/tlb.c
2 *
3 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
4 */
5
6#include <linux/kernel.h>
7#include <linux/init.h>
8#include <linux/percpu.h>
9#include <linux/mm.h>
10#include <linux/swap.h>
David S. Millerc9f29462006-04-30 22:54:27 -070011#include <linux/preempt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
13#include <asm/pgtable.h>
14#include <asm/pgalloc.h>
15#include <asm/tlbflush.h>
16#include <asm/cacheflush.h>
17#include <asm/mmu_context.h>
18#include <asm/tlb.h>
19
20/* Heavily inspired by the ppc64 code. */
21
Peter Zijlstra90f08e32011-05-24 17:11:50 -070022static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24void flush_tlb_pending(void)
25{
Peter Zijlstra90f08e32011-05-24 17:11:50 -070026 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
David S. Millerf36391d2013-04-19 17:26:26 -040027 struct mm_struct *mm = tb->mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
David S. Millerf36391d2013-04-19 17:26:26 -040029 if (!tb->tlb_nr)
30 goto out;
David S. Miller74bf4312006-01-31 18:29:18 -080031
David S. Millerf36391d2013-04-19 17:26:26 -040032 flush_tsb_user(tb);
33
34 if (CTX_VALID(mm->context)) {
35 if (tb->tlb_nr == 1) {
36 global_flush_tlb_page(mm, tb->vaddrs[0]);
37 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#ifdef CONFIG_SMP
Peter Zijlstra90f08e32011-05-24 17:11:50 -070039 smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
40 &tb->vaddrs[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#else
Peter Zijlstra90f08e32011-05-24 17:11:50 -070042 __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
43 tb->tlb_nr, &tb->vaddrs[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#endif
45 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 }
David S. Millerc9f29462006-04-30 22:54:27 -070047
David S. Millerf36391d2013-04-19 17:26:26 -040048 tb->tlb_nr = 0;
49
50out:
Peter Zijlstra90f08e32011-05-24 17:11:50 -070051 put_cpu_var(tlb_batch);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052}
53
David S. Millerf36391d2013-04-19 17:26:26 -040054void arch_enter_lazy_mmu_mode(void)
55{
56 struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
57
58 tb->active = 1;
59}
60
61void arch_leave_lazy_mmu_mode(void)
62{
63 struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
64
65 if (tb->tlb_nr)
66 flush_tlb_pending();
67 tb->active = 0;
68}
69
David Miller9e695d22012-10-08 16:34:29 -070070static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
71 bool exec)
Linus Torvalds1da177e2005-04-16 15:20:36 -070072{
Peter Zijlstra90f08e32011-05-24 17:11:50 -070073 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 unsigned long nr;
75
76 vaddr &= PAGE_MASK;
David Miller9e695d22012-10-08 16:34:29 -070077 if (exec)
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 vaddr |= 0x1UL;
79
David Miller9e695d22012-10-08 16:34:29 -070080 nr = tb->tlb_nr;
81
82 if (unlikely(nr != 0 && mm != tb->mm)) {
83 flush_tlb_pending();
84 nr = 0;
85 }
86
David S. Millerf36391d2013-04-19 17:26:26 -040087 if (!tb->active) {
88 global_flush_tlb_page(mm, vaddr);
89 flush_tsb_user_page(mm, vaddr);
90 return;
91 }
92
David Miller9e695d22012-10-08 16:34:29 -070093 if (nr == 0)
94 tb->mm = mm;
95
96 tb->vaddrs[nr] = vaddr;
97 tb->tlb_nr = ++nr;
98 if (nr >= TLB_BATCH_NR)
99 flush_tlb_pending();
100
101 put_cpu_var(tlb_batch);
102}
103
104void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
105 pte_t *ptep, pte_t orig, int fullmm)
106{
David S. Miller7a591cf2006-02-26 19:44:50 -0800107 if (tlb_type != hypervisor &&
108 pte_dirty(orig)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 unsigned long paddr, pfn = pte_pfn(orig);
110 struct address_space *mapping;
111 struct page *page;
112
113 if (!pfn_valid(pfn))
114 goto no_cache_flush;
115
116 page = pfn_to_page(pfn);
117 if (PageReserved(page))
118 goto no_cache_flush;
119
120 /* A real file page? */
121 mapping = page_mapping(page);
122 if (!mapping)
123 goto no_cache_flush;
124
125 paddr = (unsigned long) page_address(page);
126 if ((paddr ^ vaddr) & (1 << 13))
127 flush_dcache_page_all(mm, page);
128 }
129
130no_cache_flush:
David Miller9e695d22012-10-08 16:34:29 -0700131 if (!fullmm)
132 tlb_batch_add_one(mm, vaddr, pte_exec(orig));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133}
David Miller9e695d22012-10-08 16:34:29 -0700134
135#ifdef CONFIG_TRANSPARENT_HUGEPAGE
136static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
137 pmd_t pmd, bool exec)
138{
139 unsigned long end;
140 pte_t *pte;
141
142 pte = pte_offset_map(&pmd, vaddr);
143 end = vaddr + HPAGE_SIZE;
144 while (vaddr < end) {
145 if (pte_val(*pte) & _PAGE_VALID)
146 tlb_batch_add_one(mm, vaddr, exec);
147 pte++;
148 vaddr += PAGE_SIZE;
149 }
150 pte_unmap(pte);
151}
152
153void set_pmd_at(struct mm_struct *mm, unsigned long addr,
154 pmd_t *pmdp, pmd_t pmd)
155{
156 pmd_t orig = *pmdp;
157
158 *pmdp = pmd;
159
160 if (mm == &init_mm)
161 return;
162
163 if ((pmd_val(pmd) ^ pmd_val(orig)) & PMD_ISHUGE) {
164 if (pmd_val(pmd) & PMD_ISHUGE)
165 mm->context.huge_pte_count++;
166 else
167 mm->context.huge_pte_count--;
David S. Miller0fbebed2013-02-19 22:34:10 -0800168
169 /* Do not try to allocate the TSB hash table if we
170 * don't have one already. We have various locks held
171 * and thus we'll end up doing a GFP_KERNEL allocation
172 * in an atomic context.
173 *
174 * Instead, we let the first TLB miss on a hugepage
175 * take care of this.
176 */
David Miller9e695d22012-10-08 16:34:29 -0700177 }
178
179 if (!pmd_none(orig)) {
180 bool exec = ((pmd_val(orig) & PMD_HUGE_EXEC) != 0);
181
182 addr &= HPAGE_MASK;
183 if (pmd_val(orig) & PMD_ISHUGE)
184 tlb_batch_add_one(mm, addr, exec);
185 else
186 tlb_batch_pmd_scan(mm, addr, orig, exec);
187 }
188}
189
190void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable)
191{
192 struct list_head *lh = (struct list_head *) pgtable;
193
194 assert_spin_locked(&mm->page_table_lock);
195
196 /* FIFO */
197 if (!mm->pmd_huge_pte)
198 INIT_LIST_HEAD(lh);
199 else
200 list_add(lh, (struct list_head *) mm->pmd_huge_pte);
201 mm->pmd_huge_pte = pgtable;
202}
203
204pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm)
205{
206 struct list_head *lh;
207 pgtable_t pgtable;
208
209 assert_spin_locked(&mm->page_table_lock);
210
211 /* FIFO */
212 pgtable = mm->pmd_huge_pte;
213 lh = (struct list_head *) pgtable;
214 if (list_empty(lh))
215 mm->pmd_huge_pte = NULL;
216 else {
217 mm->pmd_huge_pte = (pgtable_t) lh->next;
218 list_del(lh);
219 }
220 pte_val(pgtable[0]) = 0;
221 pte_val(pgtable[1]) = 0;
222
223 return pgtable;
224}
225#endif /* CONFIG_TRANSPARENT_HUGEPAGE */