Aneesh Kumar K.V | 676012a | 2016-04-29 23:26:04 +1000 | [diff] [blame] | 1 | #ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H |
| 2 | #define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H |
| 3 | |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 4 | #define MMU_NO_CONTEXT ~0UL |
| 5 | |
| 6 | |
Aneesh Kumar K.V | 676012a | 2016-04-29 23:26:04 +1000 | [diff] [blame] | 7 | #include <asm/book3s/64/tlbflush-hash.h> |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 8 | #include <asm/book3s/64/tlbflush-radix.h> |
Aneesh Kumar K.V | 676012a | 2016-04-29 23:26:04 +1000 | [diff] [blame] | 9 | |
| 10 | static inline void flush_tlb_range(struct vm_area_struct *vma, |
| 11 | unsigned long start, unsigned long end) |
| 12 | { |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 13 | if (radix_enabled()) |
| 14 | return radix__flush_tlb_range(vma, start, end); |
Aneesh Kumar K.V | 676012a | 2016-04-29 23:26:04 +1000 | [diff] [blame] | 15 | return hash__flush_tlb_range(vma, start, end); |
| 16 | } |
| 17 | |
| 18 | static inline void flush_tlb_kernel_range(unsigned long start, |
| 19 | unsigned long end) |
| 20 | { |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 21 | if (radix_enabled()) |
| 22 | return radix__flush_tlb_kernel_range(start, end); |
Aneesh Kumar K.V | 676012a | 2016-04-29 23:26:04 +1000 | [diff] [blame] | 23 | return hash__flush_tlb_kernel_range(start, end); |
| 24 | } |
| 25 | |
| 26 | static inline void local_flush_tlb_mm(struct mm_struct *mm) |
| 27 | { |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 28 | if (radix_enabled()) |
| 29 | return radix__local_flush_tlb_mm(mm); |
Aneesh Kumar K.V | 676012a | 2016-04-29 23:26:04 +1000 | [diff] [blame] | 30 | return hash__local_flush_tlb_mm(mm); |
| 31 | } |
| 32 | |
| 33 | static inline void local_flush_tlb_page(struct vm_area_struct *vma, |
| 34 | unsigned long vmaddr) |
| 35 | { |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 36 | if (radix_enabled()) |
| 37 | return radix__local_flush_tlb_page(vma, vmaddr); |
Aneesh Kumar K.V | 676012a | 2016-04-29 23:26:04 +1000 | [diff] [blame] | 38 | return hash__local_flush_tlb_page(vma, vmaddr); |
| 39 | } |
| 40 | |
| 41 | static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, |
| 42 | unsigned long vmaddr) |
| 43 | { |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 44 | if (radix_enabled()) |
| 45 | return radix__flush_tlb_page(vma, vmaddr); |
Aneesh Kumar K.V | 676012a | 2016-04-29 23:26:04 +1000 | [diff] [blame] | 46 | return hash__flush_tlb_page_nohash(vma, vmaddr); |
| 47 | } |
| 48 | |
| 49 | static inline void tlb_flush(struct mmu_gather *tlb) |
| 50 | { |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 51 | if (radix_enabled()) |
| 52 | return radix__tlb_flush(tlb); |
Aneesh Kumar K.V | 676012a | 2016-04-29 23:26:04 +1000 | [diff] [blame] | 53 | return hash__tlb_flush(tlb); |
| 54 | } |
| 55 | |
| 56 | #ifdef CONFIG_SMP |
| 57 | static inline void flush_tlb_mm(struct mm_struct *mm) |
| 58 | { |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 59 | if (radix_enabled()) |
| 60 | return radix__flush_tlb_mm(mm); |
Aneesh Kumar K.V | 676012a | 2016-04-29 23:26:04 +1000 | [diff] [blame] | 61 | return hash__flush_tlb_mm(mm); |
| 62 | } |
| 63 | |
| 64 | static inline void flush_tlb_page(struct vm_area_struct *vma, |
| 65 | unsigned long vmaddr) |
| 66 | { |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 67 | if (radix_enabled()) |
| 68 | return radix__flush_tlb_page(vma, vmaddr); |
Aneesh Kumar K.V | 676012a | 2016-04-29 23:26:04 +1000 | [diff] [blame] | 69 | return hash__flush_tlb_page(vma, vmaddr); |
| 70 | } |
| 71 | #else |
| 72 | #define flush_tlb_mm(mm) local_flush_tlb_mm(mm) |
| 73 | #define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr) |
| 74 | #endif /* CONFIG_SMP */ |
Aneesh Kumar K.V | a145abf | 2016-06-08 19:55:51 +0530 | [diff] [blame] | 75 | /* |
| 76 | * flush the page walk cache for the address |
| 77 | */ |
| 78 | static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address) |
| 79 | { |
| 80 | /* |
| 81 | * Flush the page table walk cache on freeing a page table. We already |
| 82 | * have marked the upper/higher level page table entry none by now. |
| 83 | * So it is safe to flush PWC here. |
| 84 | */ |
| 85 | if (!radix_enabled()) |
| 86 | return; |
Aneesh Kumar K.V | 676012a | 2016-04-29 23:26:04 +1000 | [diff] [blame] | 87 | |
Aneesh Kumar K.V | a145abf | 2016-06-08 19:55:51 +0530 | [diff] [blame] | 88 | radix__flush_tlb_pwc(tlb, address); |
| 89 | } |
Aneesh Kumar K.V | 676012a | 2016-04-29 23:26:04 +1000 | [diff] [blame] | 90 | #endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */ |