blob: 96e5769b18b00fe8fbcd7d788c3c94457d15a98a [file] [log] [blame]
Aneesh Kumar K.V676012a2016-04-29 23:26:04 +10001#ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
2#define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
3
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +10004#define MMU_NO_CONTEXT ~0UL
5
6
Aneesh Kumar K.V676012a2016-04-29 23:26:04 +10007#include <asm/book3s/64/tlbflush-hash.h>
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +10008#include <asm/book3s/64/tlbflush-radix.h>
Aneesh Kumar K.V676012a2016-04-29 23:26:04 +10009
10static inline void flush_tlb_range(struct vm_area_struct *vma,
11 unsigned long start, unsigned long end)
12{
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100013 if (radix_enabled())
14 return radix__flush_tlb_range(vma, start, end);
Aneesh Kumar K.V676012a2016-04-29 23:26:04 +100015 return hash__flush_tlb_range(vma, start, end);
16}
17
18static inline void flush_tlb_kernel_range(unsigned long start,
19 unsigned long end)
20{
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100021 if (radix_enabled())
22 return radix__flush_tlb_kernel_range(start, end);
Aneesh Kumar K.V676012a2016-04-29 23:26:04 +100023 return hash__flush_tlb_kernel_range(start, end);
24}
25
26static inline void local_flush_tlb_mm(struct mm_struct *mm)
27{
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100028 if (radix_enabled())
29 return radix__local_flush_tlb_mm(mm);
Aneesh Kumar K.V676012a2016-04-29 23:26:04 +100030 return hash__local_flush_tlb_mm(mm);
31}
32
33static inline void local_flush_tlb_page(struct vm_area_struct *vma,
34 unsigned long vmaddr)
35{
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100036 if (radix_enabled())
37 return radix__local_flush_tlb_page(vma, vmaddr);
Aneesh Kumar K.V676012a2016-04-29 23:26:04 +100038 return hash__local_flush_tlb_page(vma, vmaddr);
39}
40
41static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
42 unsigned long vmaddr)
43{
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100044 if (radix_enabled())
45 return radix__flush_tlb_page(vma, vmaddr);
Aneesh Kumar K.V676012a2016-04-29 23:26:04 +100046 return hash__flush_tlb_page_nohash(vma, vmaddr);
47}
48
49static inline void tlb_flush(struct mmu_gather *tlb)
50{
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100051 if (radix_enabled())
52 return radix__tlb_flush(tlb);
Aneesh Kumar K.V676012a2016-04-29 23:26:04 +100053 return hash__tlb_flush(tlb);
54}
55
56#ifdef CONFIG_SMP
57static inline void flush_tlb_mm(struct mm_struct *mm)
58{
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100059 if (radix_enabled())
60 return radix__flush_tlb_mm(mm);
Aneesh Kumar K.V676012a2016-04-29 23:26:04 +100061 return hash__flush_tlb_mm(mm);
62}
63
64static inline void flush_tlb_page(struct vm_area_struct *vma,
65 unsigned long vmaddr)
66{
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100067 if (radix_enabled())
68 return radix__flush_tlb_page(vma, vmaddr);
Aneesh Kumar K.V676012a2016-04-29 23:26:04 +100069 return hash__flush_tlb_page(vma, vmaddr);
70}
71#else
72#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
73#define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)
74#endif /* CONFIG_SMP */
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +053075/*
76 * flush the page walk cache for the address
77 */
78static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
79{
80 /*
81 * Flush the page table walk cache on freeing a page table. We already
82 * have marked the upper/higher level page table entry none by now.
83 * So it is safe to flush PWC here.
84 */
85 if (!radix_enabled())
86 return;
Aneesh Kumar K.V676012a2016-04-29 23:26:04 +100087
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +053088 radix__flush_tlb_pwc(tlb, address);
89}
Aneesh Kumar K.V676012a2016-04-29 23:26:04 +100090#endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */