blob: a8e192e907003dd855f9bb232dede7ae5eb069f3 [file] [log] [blame]
Sam Ravnborgf5e706a2008-07-17 21:55:51 -07001#ifndef _SPARC64_TLBFLUSH_H
2#define _SPARC64_TLBFLUSH_H
3
Sam Ravnborgf5e706a2008-07-17 21:55:51 -07004#include <asm/mmu_context.h>
5
6/* TSB flush operations. */
Peter Zijlstra90f08e32011-05-24 17:11:50 -07007
8#define TLB_BATCH_NR 192
9
10struct tlb_batch {
Nitin Gupta24e49ee2016-03-30 11:17:13 -070011 bool huge;
Peter Zijlstra90f08e32011-05-24 17:11:50 -070012 struct mm_struct *mm;
13 unsigned long tlb_nr;
David S. Millerf36391d2013-04-19 17:26:26 -040014 unsigned long active;
Peter Zijlstra90f08e32011-05-24 17:11:50 -070015 unsigned long vaddrs[TLB_BATCH_NR];
16};
17
Sam Ravnborgf05a6862014-05-16 23:25:50 +020018void flush_tsb_kernel_range(unsigned long start, unsigned long end);
19void flush_tsb_user(struct tlb_batch *tb);
Nitin Gupta24e49ee2016-03-30 11:17:13 -070020void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070021
22/* TLB flush operations. */
23
David S. Millerf36391d2013-04-19 17:26:26 -040024static inline void flush_tlb_mm(struct mm_struct *mm)
25{
26}
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070027
David S. Millerf36391d2013-04-19 17:26:26 -040028static inline void flush_tlb_page(struct vm_area_struct *vma,
29 unsigned long vmaddr)
30{
31}
32
33static inline void flush_tlb_range(struct vm_area_struct *vma,
34 unsigned long start, unsigned long end)
35{
36}
37
David S. Miller4ca9a232014-08-04 20:07:37 -070038void flush_tlb_kernel_range(unsigned long start, unsigned long end);
39
David S. Millerf36391d2013-04-19 17:26:26 -040040#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
41
Sam Ravnborgf05a6862014-05-16 23:25:50 +020042void flush_tlb_pending(void);
43void arch_enter_lazy_mmu_mode(void);
44void arch_leave_lazy_mmu_mode(void);
David S. Millerf36391d2013-04-19 17:26:26 -040045#define arch_flush_lazy_mmu_mode() do {} while (0)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070046
47/* Local cpu only. */
Sam Ravnborgf05a6862014-05-16 23:25:50 +020048void __flush_tlb_all(void);
49void __flush_tlb_page(unsigned long context, unsigned long vaddr);
50void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070051
52#ifndef CONFIG_SMP
53
David S. Millerf36391d2013-04-19 17:26:26 -040054static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
55{
56 __flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
57}
58
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070059#else /* CONFIG_SMP */
60
Sam Ravnborgf05a6862014-05-16 23:25:50 +020061void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
62void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070063
David S. Millerf36391d2013-04-19 17:26:26 -040064#define global_flush_tlb_page(mm, vaddr) \
65 smp_flush_tlb_page(mm, vaddr)
66
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070067#endif /* ! CONFIG_SMP */
68
69#endif /* _SPARC64_TLBFLUSH_H */