blob: 8b104be4662b3089e3ba5344cb61e13d85608161 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* arch/sparc64/mm/tlb.c
2 *
3 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
4 */
5
6#include <linux/kernel.h>
7#include <linux/init.h>
8#include <linux/percpu.h>
9#include <linux/mm.h>
10#include <linux/swap.h>
11
12#include <asm/pgtable.h>
13#include <asm/pgalloc.h>
14#include <asm/tlbflush.h>
15#include <asm/cacheflush.h>
16#include <asm/mmu_context.h>
17#include <asm/tlb.h>
18
19/* Heavily inspired by the ppc64 code. */
20
Hugh Dickinsfc2acab2005-10-29 18:16:03 -070021DEFINE_PER_CPU(struct mmu_gather, mmu_gathers) = { 0, };
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
23void flush_tlb_pending(void)
24{
25 struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
26
27 if (mp->tlb_nr) {
28 if (CTX_VALID(mp->mm->context)) {
29#ifdef CONFIG_SMP
30 smp_flush_tlb_pending(mp->mm, mp->tlb_nr,
31 &mp->vaddrs[0]);
32#else
33 __flush_tlb_pending(CTX_HWBITS(mp->mm->context),
34 mp->tlb_nr, &mp->vaddrs[0]);
35#endif
36 }
37 mp->tlb_nr = 0;
38 }
39}
40
41void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig)
42{
43 struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
44 unsigned long nr;
45
46 vaddr &= PAGE_MASK;
47 if (pte_exec(orig))
48 vaddr |= 0x1UL;
49
50 if (pte_dirty(orig)) {
51 unsigned long paddr, pfn = pte_pfn(orig);
52 struct address_space *mapping;
53 struct page *page;
54
55 if (!pfn_valid(pfn))
56 goto no_cache_flush;
57
58 page = pfn_to_page(pfn);
59 if (PageReserved(page))
60 goto no_cache_flush;
61
62 /* A real file page? */
63 mapping = page_mapping(page);
64 if (!mapping)
65 goto no_cache_flush;
66
67 paddr = (unsigned long) page_address(page);
68 if ((paddr ^ vaddr) & (1 << 13))
69 flush_dcache_page_all(mm, page);
70 }
71
72no_cache_flush:
73
Hugh Dickins4d6ddfa2005-10-29 18:16:02 -070074 if (mp->fullmm)
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 return;
76
77 nr = mp->tlb_nr;
78
79 if (unlikely(nr != 0 && mm != mp->mm)) {
80 flush_tlb_pending();
81 nr = 0;
82 }
83
84 if (nr == 0)
85 mp->mm = mm;
86
87 mp->vaddrs[nr] = vaddr;
88 mp->tlb_nr = ++nr;
89 if (nr >= TLB_BATCH_NR)
90 flush_tlb_pending();
91}
92
93void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
94{
95 struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
96 unsigned long nr = mp->tlb_nr;
97 long s = start, e = end, vpte_base;
98
Hugh Dickins4d6ddfa2005-10-29 18:16:02 -070099 if (mp->fullmm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 return;
101
102 /* If start is greater than end, that is a real problem. */
103 BUG_ON(start > end);
104
105 /* However, straddling the VA space hole is quite normal. */
106 s &= PMD_MASK;
107 e = (e + PMD_SIZE - 1) & PMD_MASK;
108
109 vpte_base = (tlb_type == spitfire ?
110 VPTE_BASE_SPITFIRE :
111 VPTE_BASE_CHEETAH);
112
113 if (unlikely(nr != 0 && mm != mp->mm)) {
114 flush_tlb_pending();
115 nr = 0;
116 }
117
118 if (nr == 0)
119 mp->mm = mm;
120
121 start = vpte_base + (s >> (PAGE_SHIFT - 3));
122 end = vpte_base + (e >> (PAGE_SHIFT - 3));
123
124 /* If the request straddles the VA space hole, we
125 * need to swap start and end. The reason this
126 * occurs is that "vpte_base" is the center of
127 * the linear page table mapping area. Thus,
128 * high addresses with the sign bit set map to
129 * addresses below vpte_base and non-sign bit
130 * addresses map to addresses above vpte_base.
131 */
132 if (end < start) {
133 unsigned long tmp = start;
134
135 start = end;
136 end = tmp;
137 }
138
139 while (start < end) {
140 mp->vaddrs[nr] = start;
141 mp->tlb_nr = ++nr;
142 if (nr >= TLB_BATCH_NR) {
143 flush_tlb_pending();
144 nr = 0;
145 }
146 start += PAGE_SIZE;
147 }
148 if (nr)
149 flush_tlb_pending();
150}