blob: 816d8202fa0af917669d2a33d2a65d96f34736c2 [file] [log] [blame]
Sam Ravnborgf5e706a2008-07-17 21:55:51 -07001#ifndef _SPARC64_TLBFLUSH_H
2#define _SPARC64_TLBFLUSH_H
3
Sam Ravnborgf5e706a2008-07-17 21:55:51 -07004#include <asm/mmu_context.h>
5
6/* TSB flush operations. */
Peter Zijlstra90f08e32011-05-24 17:11:50 -07007
8#define TLB_BATCH_NR 192
9
10struct tlb_batch {
11 struct mm_struct *mm;
12 unsigned long tlb_nr;
David S. Millerf36391d2013-04-19 17:26:26 -040013 unsigned long active;
Peter Zijlstra90f08e32011-05-24 17:11:50 -070014 unsigned long vaddrs[TLB_BATCH_NR];
15};
16
Sam Ravnborgf05a6862014-05-16 23:25:50 +020017void flush_tsb_kernel_range(unsigned long start, unsigned long end);
18void flush_tsb_user(struct tlb_batch *tb);
19void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070020
21/* TLB flush operations. */
22
David S. Millerf36391d2013-04-19 17:26:26 -040023static inline void flush_tlb_mm(struct mm_struct *mm)
24{
25}
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070026
David S. Millerf36391d2013-04-19 17:26:26 -040027static inline void flush_tlb_page(struct vm_area_struct *vma,
28 unsigned long vmaddr)
29{
30}
31
32static inline void flush_tlb_range(struct vm_area_struct *vma,
33 unsigned long start, unsigned long end)
34{
35}
36
37#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
38
Sam Ravnborgf05a6862014-05-16 23:25:50 +020039void flush_tlb_pending(void);
40void arch_enter_lazy_mmu_mode(void);
41void arch_leave_lazy_mmu_mode(void);
David S. Millerf36391d2013-04-19 17:26:26 -040042#define arch_flush_lazy_mmu_mode() do {} while (0)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070043
44/* Local cpu only. */
Sam Ravnborgf05a6862014-05-16 23:25:50 +020045void __flush_tlb_all(void);
46void __flush_tlb_page(unsigned long context, unsigned long vaddr);
47void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070048
49#ifndef CONFIG_SMP
50
51#define flush_tlb_kernel_range(start,end) \
52do { flush_tsb_kernel_range(start,end); \
53 __flush_tlb_kernel_range(start,end); \
54} while (0)
55
David S. Millerf36391d2013-04-19 17:26:26 -040056static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
57{
58 __flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
59}
60
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070061#else /* CONFIG_SMP */
62
Sam Ravnborgf05a6862014-05-16 23:25:50 +020063void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
64void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070065
66#define flush_tlb_kernel_range(start, end) \
67do { flush_tsb_kernel_range(start,end); \
68 smp_flush_tlb_kernel_range(start, end); \
69} while (0)
70
David S. Millerf36391d2013-04-19 17:26:26 -040071#define global_flush_tlb_page(mm, vaddr) \
72 smp_flush_tlb_page(mm, vaddr)
73
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070074#endif /* ! CONFIG_SMP */
75
76#endif /* _SPARC64_TLBFLUSH_H */