blob: f0d6a9700f4c8351e20be4743d9782c590b9e016 [file] [log] [blame]
Sam Ravnborgf5e706a2008-07-17 21:55:51 -07001#ifndef _SPARC64_TLBFLUSH_H
2#define _SPARC64_TLBFLUSH_H
3
4#include <linux/mm.h>
5#include <asm/mmu_context.h>
6
7/* TSB flush operations. */
Peter Zijlstra90f08e32011-05-24 17:11:50 -07008
9#define TLB_BATCH_NR 192
10
11struct tlb_batch {
12 struct mm_struct *mm;
13 unsigned long tlb_nr;
David S. Millerf36391d2013-04-19 17:26:26 -040014 unsigned long active;
Peter Zijlstra90f08e32011-05-24 17:11:50 -070015 unsigned long vaddrs[TLB_BATCH_NR];
16};
17
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070018extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
Peter Zijlstra90f08e32011-05-24 17:11:50 -070019extern void flush_tsb_user(struct tlb_batch *tb);
David S. Millerf36391d2013-04-19 17:26:26 -040020extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070021
22/* TLB flush operations. */
23
David S. Millerf36391d2013-04-19 17:26:26 -040024static inline void flush_tlb_mm(struct mm_struct *mm)
25{
26}
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070027
David S. Millerf36391d2013-04-19 17:26:26 -040028static inline void flush_tlb_page(struct vm_area_struct *vma,
29 unsigned long vmaddr)
30{
31}
32
33static inline void flush_tlb_range(struct vm_area_struct *vma,
34 unsigned long start, unsigned long end)
35{
36}
37
38#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
39
40extern void flush_tlb_pending(void);
41extern void arch_enter_lazy_mmu_mode(void);
42extern void arch_leave_lazy_mmu_mode(void);
43#define arch_flush_lazy_mmu_mode() do {} while (0)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070044
45/* Local cpu only. */
46extern void __flush_tlb_all(void);
David S. Millerf36391d2013-04-19 17:26:26 -040047extern void __flush_tlb_page(unsigned long context, unsigned long vaddr);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070048extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
49
50#ifndef CONFIG_SMP
51
52#define flush_tlb_kernel_range(start,end) \
53do { flush_tsb_kernel_range(start,end); \
54 __flush_tlb_kernel_range(start,end); \
55} while (0)
56
David S. Millerf36391d2013-04-19 17:26:26 -040057static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
58{
59 __flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
60}
61
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070062#else /* CONFIG_SMP */
63
64extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
David S. Millerf36391d2013-04-19 17:26:26 -040065extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070066
67#define flush_tlb_kernel_range(start, end) \
68do { flush_tsb_kernel_range(start,end); \
69 smp_flush_tlb_kernel_range(start, end); \
70} while (0)
71
David S. Millerf36391d2013-04-19 17:26:26 -040072#define global_flush_tlb_page(mm, vaddr) \
73 smp_flush_tlb_page(mm, vaddr)
74
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070075#endif /* ! CONFIG_SMP */
76
77#endif /* _SPARC64_TLBFLUSH_H */