blob: c56a195c90719fc3eb1400ad14e6b3ae27bb8417 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* arch/sparc64/mm/tlb.c
2 *
3 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
4 */
5
6#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/percpu.h>
8#include <linux/mm.h>
9#include <linux/swap.h>
David S. Millerc9f29462006-04-30 22:54:27 -070010#include <linux/preempt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
12#include <asm/pgtable.h>
13#include <asm/pgalloc.h>
14#include <asm/tlbflush.h>
15#include <asm/cacheflush.h>
16#include <asm/mmu_context.h>
17#include <asm/tlb.h>
18
19/* Heavily inspired by the ppc64 code. */
20
Peter Zijlstra90f08e32011-05-24 17:11:50 -070021static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
23void flush_tlb_pending(void)
24{
Peter Zijlstra90f08e32011-05-24 17:11:50 -070025 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
David S. Millerf36391d2013-04-19 17:26:26 -040026 struct mm_struct *mm = tb->mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
David S. Millerf36391d2013-04-19 17:26:26 -040028 if (!tb->tlb_nr)
29 goto out;
David S. Miller74bf4312006-01-31 18:29:18 -080030
David S. Millerf36391d2013-04-19 17:26:26 -040031 flush_tsb_user(tb);
32
33 if (CTX_VALID(mm->context)) {
34 if (tb->tlb_nr == 1) {
35 global_flush_tlb_page(mm, tb->vaddrs[0]);
36 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#ifdef CONFIG_SMP
Peter Zijlstra90f08e32011-05-24 17:11:50 -070038 smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
39 &tb->vaddrs[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#else
Peter Zijlstra90f08e32011-05-24 17:11:50 -070041 __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
42 tb->tlb_nr, &tb->vaddrs[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#endif
44 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 }
David S. Millerc9f29462006-04-30 22:54:27 -070046
David S. Millerf36391d2013-04-19 17:26:26 -040047 tb->tlb_nr = 0;
48
49out:
Peter Zijlstra90f08e32011-05-24 17:11:50 -070050 put_cpu_var(tlb_batch);
Linus Torvalds1da177e2005-04-16 15:20:36 -070051}
52
David S. Millerf36391d2013-04-19 17:26:26 -040053void arch_enter_lazy_mmu_mode(void)
54{
Christoph Lameter494fc422014-08-17 12:30:54 -050055 struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
David S. Millerf36391d2013-04-19 17:26:26 -040056
57 tb->active = 1;
58}
59
60void arch_leave_lazy_mmu_mode(void)
61{
Christoph Lameter494fc422014-08-17 12:30:54 -050062 struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
David S. Millerf36391d2013-04-19 17:26:26 -040063
64 if (tb->tlb_nr)
65 flush_tlb_pending();
66 tb->active = 0;
67}
68
David Miller9e695d22012-10-08 16:34:29 -070069static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
Nitin Gupta24e49ee2016-03-30 11:17:13 -070070 bool exec, bool huge)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Peter Zijlstra90f08e32011-05-24 17:11:50 -070072 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 unsigned long nr;
74
75 vaddr &= PAGE_MASK;
David Miller9e695d22012-10-08 16:34:29 -070076 if (exec)
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 vaddr |= 0x1UL;
78
David Miller9e695d22012-10-08 16:34:29 -070079 nr = tb->tlb_nr;
80
81 if (unlikely(nr != 0 && mm != tb->mm)) {
82 flush_tlb_pending();
83 nr = 0;
84 }
85
David S. Millerf36391d2013-04-19 17:26:26 -040086 if (!tb->active) {
Nitin Gupta24e49ee2016-03-30 11:17:13 -070087 flush_tsb_user_page(mm, vaddr, huge);
Dave Kleikamp23a01132013-06-18 09:05:36 -050088 global_flush_tlb_page(mm, vaddr);
David S. Millerf0af9702013-04-24 16:52:18 -070089 goto out;
David S. Millerf36391d2013-04-19 17:26:26 -040090 }
91
Nitin Gupta24e49ee2016-03-30 11:17:13 -070092 if (nr == 0) {
David Miller9e695d22012-10-08 16:34:29 -070093 tb->mm = mm;
Nitin Gupta24e49ee2016-03-30 11:17:13 -070094 tb->huge = huge;
95 }
96
97 if (tb->huge != huge) {
98 flush_tlb_pending();
99 tb->huge = huge;
100 nr = 0;
101 }
David Miller9e695d22012-10-08 16:34:29 -0700102
103 tb->vaddrs[nr] = vaddr;
104 tb->tlb_nr = ++nr;
105 if (nr >= TLB_BATCH_NR)
106 flush_tlb_pending();
107
David S. Millerf0af9702013-04-24 16:52:18 -0700108out:
David Miller9e695d22012-10-08 16:34:29 -0700109 put_cpu_var(tlb_batch);
110}
111
112void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
113 pte_t *ptep, pte_t orig, int fullmm)
114{
Nitin Gupta24e49ee2016-03-30 11:17:13 -0700115 bool huge = is_hugetlb_pte(orig);
116
David S. Miller7a591cf2006-02-26 19:44:50 -0800117 if (tlb_type != hypervisor &&
118 pte_dirty(orig)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 unsigned long paddr, pfn = pte_pfn(orig);
120 struct address_space *mapping;
121 struct page *page;
122
123 if (!pfn_valid(pfn))
124 goto no_cache_flush;
125
126 page = pfn_to_page(pfn);
127 if (PageReserved(page))
128 goto no_cache_flush;
129
130 /* A real file page? */
131 mapping = page_mapping(page);
132 if (!mapping)
133 goto no_cache_flush;
134
135 paddr = (unsigned long) page_address(page);
136 if ((paddr ^ vaddr) & (1 << 13))
137 flush_dcache_page_all(mm, page);
138 }
139
140no_cache_flush:
David Miller9e695d22012-10-08 16:34:29 -0700141 if (!fullmm)
Nitin Gupta24e49ee2016-03-30 11:17:13 -0700142 tlb_batch_add_one(mm, vaddr, pte_exec(orig), huge);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143}
David Miller9e695d22012-10-08 16:34:29 -0700144
145#ifdef CONFIG_TRANSPARENT_HUGEPAGE
146static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
David S. Miller5b1e94f2014-04-20 21:55:01 -0400147 pmd_t pmd)
David Miller9e695d22012-10-08 16:34:29 -0700148{
149 unsigned long end;
150 pte_t *pte;
151
152 pte = pte_offset_map(&pmd, vaddr);
153 end = vaddr + HPAGE_SIZE;
154 while (vaddr < end) {
David S. Miller5b1e94f2014-04-20 21:55:01 -0400155 if (pte_val(*pte) & _PAGE_VALID) {
156 bool exec = pte_exec(*pte);
157
Nitin Gupta24e49ee2016-03-30 11:17:13 -0700158 tlb_batch_add_one(mm, vaddr, exec, false);
David S. Miller5b1e94f2014-04-20 21:55:01 -0400159 }
David Miller9e695d22012-10-08 16:34:29 -0700160 pte++;
161 vaddr += PAGE_SIZE;
162 }
163 pte_unmap(pte);
164}
165
166void set_pmd_at(struct mm_struct *mm, unsigned long addr,
167 pmd_t *pmdp, pmd_t pmd)
168{
169 pmd_t orig = *pmdp;
170
171 *pmdp = pmd;
172
173 if (mm == &init_mm)
174 return;
175
David S. Millera7b94032013-09-26 13:45:15 -0700176 if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
Mike Kravetz1e953d82016-08-31 13:48:19 -0700177 /*
178 * Note that this routine only sets pmds for THP pages.
179 * Hugetlb pages are handled elsewhere. We need to check
180 * for huge zero page. Huge zero pages are like hugetlb
181 * pages in that there is no RSS, but there is the need
182 * for TSB entries. So, huge zero page counts go into
183 * hugetlb_pte_count.
184 */
185 if (pmd_val(pmd) & _PAGE_PMD_HUGE) {
186 if (is_huge_zero_page(pmd_page(pmd)))
187 mm->context.hugetlb_pte_count++;
188 else
189 mm->context.thp_pte_count++;
190 } else {
191 if (is_huge_zero_page(pmd_page(orig)))
192 mm->context.hugetlb_pte_count--;
193 else
194 mm->context.thp_pte_count--;
195 }
David S. Miller0fbebed2013-02-19 22:34:10 -0800196
197 /* Do not try to allocate the TSB hash table if we
198 * don't have one already. We have various locks held
199 * and thus we'll end up doing a GFP_KERNEL allocation
200 * in an atomic context.
201 *
202 * Instead, we let the first TLB miss on a hugepage
203 * take care of this.
204 */
David Miller9e695d22012-10-08 16:34:29 -0700205 }
206
207 if (!pmd_none(orig)) {
David Miller9e695d22012-10-08 16:34:29 -0700208 addr &= HPAGE_MASK;
David S. Millera7b94032013-09-26 13:45:15 -0700209 if (pmd_trans_huge(orig)) {
David S. Miller5b1e94f2014-04-20 21:55:01 -0400210 pte_t orig_pte = __pte(pmd_val(orig));
211 bool exec = pte_exec(orig_pte);
212
Nitin Gupta24e49ee2016-03-30 11:17:13 -0700213 tlb_batch_add_one(mm, addr, exec, true);
214 tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
215 true);
David S. Miller37b3a8f2013-09-25 13:48:49 -0700216 } else {
David S. Miller5b1e94f2014-04-20 21:55:01 -0400217 tlb_batch_pmd_scan(mm, addr, orig);
David S. Miller37b3a8f2013-09-25 13:48:49 -0700218 }
David Miller9e695d22012-10-08 16:34:29 -0700219 }
220}
221
Mike Kravetz1e953d82016-08-31 13:48:19 -0700222/*
223 * This routine is only called when splitting a THP
224 */
David S. Miller51e5ef12014-04-24 13:58:02 -0700225void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
226 pmd_t *pmdp)
227{
228 pmd_t entry = *pmdp;
229
230 pmd_val(entry) &= ~_PAGE_VALID;
231
232 set_pmd_at(vma->vm_mm, address, pmdp, entry);
233 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
Mike Kravetz1e953d82016-08-31 13:48:19 -0700234
235 /*
236 * set_pmd_at() will not be called in a way to decrement
237 * thp_pte_count when splitting a THP, so do it now.
238 * Sanity check pmd before doing the actual decrement.
239 */
240 if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
241 !is_huge_zero_page(pmd_page(entry)))
242 (vma->vm_mm)->context.thp_pte_count--;
David S. Miller51e5ef12014-04-24 13:58:02 -0700243}
244
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -0700245void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
246 pgtable_t pgtable)
David Miller9e695d22012-10-08 16:34:29 -0700247{
248 struct list_head *lh = (struct list_head *) pgtable;
249
250 assert_spin_locked(&mm->page_table_lock);
251
252 /* FIFO */
Kirill A. Shutemovc389a252013-11-14 14:30:59 -0800253 if (!pmd_huge_pte(mm, pmdp))
David Miller9e695d22012-10-08 16:34:29 -0700254 INIT_LIST_HEAD(lh);
255 else
Kirill A. Shutemovc389a252013-11-14 14:30:59 -0800256 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
257 pmd_huge_pte(mm, pmdp) = pgtable;
David Miller9e695d22012-10-08 16:34:29 -0700258}
259
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -0700260pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
David Miller9e695d22012-10-08 16:34:29 -0700261{
262 struct list_head *lh;
263 pgtable_t pgtable;
264
265 assert_spin_locked(&mm->page_table_lock);
266
267 /* FIFO */
Kirill A. Shutemovc389a252013-11-14 14:30:59 -0800268 pgtable = pmd_huge_pte(mm, pmdp);
David Miller9e695d22012-10-08 16:34:29 -0700269 lh = (struct list_head *) pgtable;
270 if (list_empty(lh))
Kirill A. Shutemovc389a252013-11-14 14:30:59 -0800271 pmd_huge_pte(mm, pmdp) = NULL;
David Miller9e695d22012-10-08 16:34:29 -0700272 else {
Kirill A. Shutemovc389a252013-11-14 14:30:59 -0800273 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
David Miller9e695d22012-10-08 16:34:29 -0700274 list_del(lh);
275 }
276 pte_val(pgtable[0]) = 0;
277 pte_val(pgtable[1]) = 0;
278
279 return pgtable;
280}
281#endif /* CONFIG_TRANSPARENT_HUGEPAGE */