blob: ba6ae7ffdc2c9d5d1e3bbbb8d2af3b68a2f2572e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* arch/sparc64/mm/tlb.c
2 *
3 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
4 */
5
6#include <linux/kernel.h>
7#include <linux/init.h>
8#include <linux/percpu.h>
9#include <linux/mm.h>
10#include <linux/swap.h>
David S. Millerc9f29462006-04-30 22:54:27 -070011#include <linux/preempt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
13#include <asm/pgtable.h>
14#include <asm/pgalloc.h>
15#include <asm/tlbflush.h>
16#include <asm/cacheflush.h>
17#include <asm/mmu_context.h>
18#include <asm/tlb.h>
19
20/* Heavily inspired by the ppc64 code. */
21
Peter Zijlstra90f08e32011-05-24 17:11:50 -070022static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24void flush_tlb_pending(void)
25{
Peter Zijlstra90f08e32011-05-24 17:11:50 -070026 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Peter Zijlstra90f08e32011-05-24 17:11:50 -070028 if (tb->tlb_nr) {
29 flush_tsb_user(tb);
David S. Miller74bf4312006-01-31 18:29:18 -080030
Peter Zijlstra90f08e32011-05-24 17:11:50 -070031 if (CTX_VALID(tb->mm->context)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#ifdef CONFIG_SMP
Peter Zijlstra90f08e32011-05-24 17:11:50 -070033 smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
34 &tb->vaddrs[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#else
Peter Zijlstra90f08e32011-05-24 17:11:50 -070036 __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
37 tb->tlb_nr, &tb->vaddrs[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#endif
39 }
Peter Zijlstra90f08e32011-05-24 17:11:50 -070040 tb->tlb_nr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 }
David S. Millerc9f29462006-04-30 22:54:27 -070042
Peter Zijlstra90f08e32011-05-24 17:11:50 -070043 put_cpu_var(tlb_batch);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044}
45
David Miller9e695d22012-10-08 16:34:29 -070046static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
47 bool exec)
Linus Torvalds1da177e2005-04-16 15:20:36 -070048{
Peter Zijlstra90f08e32011-05-24 17:11:50 -070049 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 unsigned long nr;
51
52 vaddr &= PAGE_MASK;
David Miller9e695d22012-10-08 16:34:29 -070053 if (exec)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 vaddr |= 0x1UL;
55
David Miller9e695d22012-10-08 16:34:29 -070056 nr = tb->tlb_nr;
57
58 if (unlikely(nr != 0 && mm != tb->mm)) {
59 flush_tlb_pending();
60 nr = 0;
61 }
62
63 if (nr == 0)
64 tb->mm = mm;
65
66 tb->vaddrs[nr] = vaddr;
67 tb->tlb_nr = ++nr;
68 if (nr >= TLB_BATCH_NR)
69 flush_tlb_pending();
70
71 put_cpu_var(tlb_batch);
72}
73
74void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
75 pte_t *ptep, pte_t orig, int fullmm)
76{
David S. Miller7a591cf2006-02-26 19:44:50 -080077 if (tlb_type != hypervisor &&
78 pte_dirty(orig)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 unsigned long paddr, pfn = pte_pfn(orig);
80 struct address_space *mapping;
81 struct page *page;
82
83 if (!pfn_valid(pfn))
84 goto no_cache_flush;
85
86 page = pfn_to_page(pfn);
87 if (PageReserved(page))
88 goto no_cache_flush;
89
90 /* A real file page? */
91 mapping = page_mapping(page);
92 if (!mapping)
93 goto no_cache_flush;
94
95 paddr = (unsigned long) page_address(page);
96 if ((paddr ^ vaddr) & (1 << 13))
97 flush_dcache_page_all(mm, page);
98 }
99
100no_cache_flush:
David Miller9e695d22012-10-08 16:34:29 -0700101 if (!fullmm)
102 tlb_batch_add_one(mm, vaddr, pte_exec(orig));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103}
David Miller9e695d22012-10-08 16:34:29 -0700104
105#ifdef CONFIG_TRANSPARENT_HUGEPAGE
106static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
107 pmd_t pmd, bool exec)
108{
109 unsigned long end;
110 pte_t *pte;
111
112 pte = pte_offset_map(&pmd, vaddr);
113 end = vaddr + HPAGE_SIZE;
114 while (vaddr < end) {
115 if (pte_val(*pte) & _PAGE_VALID)
116 tlb_batch_add_one(mm, vaddr, exec);
117 pte++;
118 vaddr += PAGE_SIZE;
119 }
120 pte_unmap(pte);
121}
122
123void set_pmd_at(struct mm_struct *mm, unsigned long addr,
124 pmd_t *pmdp, pmd_t pmd)
125{
126 pmd_t orig = *pmdp;
127
128 *pmdp = pmd;
129
130 if (mm == &init_mm)
131 return;
132
133 if ((pmd_val(pmd) ^ pmd_val(orig)) & PMD_ISHUGE) {
134 if (pmd_val(pmd) & PMD_ISHUGE)
135 mm->context.huge_pte_count++;
136 else
137 mm->context.huge_pte_count--;
David S. Miller0fbebed2013-02-19 22:34:10 -0800138
139 /* Do not try to allocate the TSB hash table if we
140 * don't have one already. We have various locks held
141 * and thus we'll end up doing a GFP_KERNEL allocation
142 * in an atomic context.
143 *
144 * Instead, we let the first TLB miss on a hugepage
145 * take care of this.
146 */
David Miller9e695d22012-10-08 16:34:29 -0700147 }
148
149 if (!pmd_none(orig)) {
150 bool exec = ((pmd_val(orig) & PMD_HUGE_EXEC) != 0);
151
152 addr &= HPAGE_MASK;
153 if (pmd_val(orig) & PMD_ISHUGE)
154 tlb_batch_add_one(mm, addr, exec);
155 else
156 tlb_batch_pmd_scan(mm, addr, orig, exec);
157 }
158}
159
160void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable)
161{
162 struct list_head *lh = (struct list_head *) pgtable;
163
164 assert_spin_locked(&mm->page_table_lock);
165
166 /* FIFO */
167 if (!mm->pmd_huge_pte)
168 INIT_LIST_HEAD(lh);
169 else
170 list_add(lh, (struct list_head *) mm->pmd_huge_pte);
171 mm->pmd_huge_pte = pgtable;
172}
173
174pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm)
175{
176 struct list_head *lh;
177 pgtable_t pgtable;
178
179 assert_spin_locked(&mm->page_table_lock);
180
181 /* FIFO */
182 pgtable = mm->pmd_huge_pte;
183 lh = (struct list_head *) pgtable;
184 if (list_empty(lh))
185 mm->pmd_huge_pte = NULL;
186 else {
187 mm->pmd_huge_pte = (pgtable_t) lh->next;
188 list_del(lh);
189 }
190 pte_val(pgtable[0]) = 0;
191 pte_val(pgtable[1]) = 0;
192
193 return pgtable;
194}
195#endif /* CONFIG_TRANSPARENT_HUGEPAGE */