blob: 9d484c1fdc823052254efdcd82a1160c2e18b10b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ALPHA_TLBFLUSH_H
2#define _ALPHA_TLBFLUSH_H
3
4#include <linux/config.h>
5#include <linux/mm.h>
6#include <asm/compiler.h>
7
8#ifndef __EXTERN_INLINE
9#define __EXTERN_INLINE extern inline
10#define __MMU_EXTERN_INLINE
11#endif
12
13extern void __load_new_mm_context(struct mm_struct *);
14
15
16/* Use a few helper functions to hide the ugly broken ASN
17 numbers on early Alphas (ev4 and ev45). */
18
19__EXTERN_INLINE void
20ev4_flush_tlb_current(struct mm_struct *mm)
21{
22 __load_new_mm_context(mm);
23 tbiap();
24}
25
26__EXTERN_INLINE void
27ev5_flush_tlb_current(struct mm_struct *mm)
28{
29 __load_new_mm_context(mm);
30}
31
32/* Flush just one page in the current TLB set. We need to be very
33 careful about the icache here, there is no way to invalidate a
34 specific icache page. */
35
36__EXTERN_INLINE void
37ev4_flush_tlb_current_page(struct mm_struct * mm,
38 struct vm_area_struct *vma,
39 unsigned long addr)
40{
41 int tbi_flag = 2;
42 if (vma->vm_flags & VM_EXEC) {
43 __load_new_mm_context(mm);
44 tbi_flag = 3;
45 }
46 tbi(tbi_flag, addr);
47}
48
49__EXTERN_INLINE void
50ev5_flush_tlb_current_page(struct mm_struct * mm,
51 struct vm_area_struct *vma,
52 unsigned long addr)
53{
54 if (vma->vm_flags & VM_EXEC)
55 __load_new_mm_context(mm);
56 else
57 tbi(2, addr);
58}
59
60
61#ifdef CONFIG_ALPHA_GENERIC
62# define flush_tlb_current alpha_mv.mv_flush_tlb_current
63# define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page
64#else
65# ifdef CONFIG_ALPHA_EV4
66# define flush_tlb_current ev4_flush_tlb_current
67# define flush_tlb_current_page ev4_flush_tlb_current_page
68# else
69# define flush_tlb_current ev5_flush_tlb_current
70# define flush_tlb_current_page ev5_flush_tlb_current_page
71# endif
72#endif
73
74#ifdef __MMU_EXTERN_INLINE
75#undef __EXTERN_INLINE
76#undef __MMU_EXTERN_INLINE
77#endif
78
79/* Flush current user mapping. */
80static inline void
81flush_tlb(void)
82{
83 flush_tlb_current(current->active_mm);
84}
85
86/* Flush someone else's user mapping. */
87static inline void
88flush_tlb_other(struct mm_struct *mm)
89{
90 unsigned long *mmc = &mm->context[smp_processor_id()];
91 /* Check it's not zero first to avoid cacheline ping pong
92 when possible. */
93 if (*mmc) *mmc = 0;
94}
95
96/* Flush a specified range of user mapping page tables from TLB.
97 Although Alpha uses VPTE caches, this can be a nop, as Alpha does
98 not have finegrained tlb flushing, so it will flush VPTE stuff
99 during next flush_tlb_range. */
100
101static inline void
102flush_tlb_pgtables(struct mm_struct *mm, unsigned long start,
103 unsigned long end)
104{
105}
106
107#ifndef CONFIG_SMP
108/* Flush everything (kernel mapping may also have changed
109 due to vmalloc/vfree). */
110static inline void flush_tlb_all(void)
111{
112 tbia();
113}
114
115/* Flush a specified user mapping. */
116static inline void
117flush_tlb_mm(struct mm_struct *mm)
118{
119 if (mm == current->active_mm)
120 flush_tlb_current(mm);
121 else
122 flush_tlb_other(mm);
123}
124
125/* Page-granular tlb flush. */
126static inline void
127flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
128{
129 struct mm_struct *mm = vma->vm_mm;
130
131 if (mm == current->active_mm)
132 flush_tlb_current_page(mm, vma, addr);
133 else
134 flush_tlb_other(mm);
135}
136
137/* Flush a specified range of user mapping. On the Alpha we flush
138 the whole user tlb. */
139static inline void
140flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
141 unsigned long end)
142{
143 flush_tlb_mm(vma->vm_mm);
144}
145
146#else /* CONFIG_SMP */
147
148extern void flush_tlb_all(void);
149extern void flush_tlb_mm(struct mm_struct *);
150extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
151extern void flush_tlb_range(struct vm_area_struct *, unsigned long,
152 unsigned long);
153
154#endif /* CONFIG_SMP */
155
156#define flush_tlb_kernel_range(start, end) flush_tlb_all()
157
158#endif /* _ALPHA_TLBFLUSH_H */