Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _S390_TLBFLUSH_H |
| 2 | #define _S390_TLBFLUSH_H |
| 3 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | #include <linux/mm.h> |
Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 5 | #include <linux/sched.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | #include <asm/processor.h> |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 7 | #include <asm/pgalloc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | |
| 9 | /* |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame^] | 10 | * Flush all TLB entries on the local CPU. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | */ |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 12 | static inline void __tlb_flush_local(void) |
| 13 | { |
| 14 | asm volatile("ptlb" : : : "memory"); |
| 15 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | |
| 17 | /* |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame^] | 18 | * Flush TLB entries for a specific ASCE on all CPUs |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | */ |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame^] | 20 | static inline void __tlb_flush_idte(unsigned long asce) |
| 21 | { |
| 22 | /* Global TLB flush for the mm */ |
| 23 | asm volatile( |
| 24 | " .insn rrf,0xb98e0000,0,%0,%1,0" |
| 25 | : : "a" (2048), "a" (asce) : "cc"); |
| 26 | } |
| 27 | |
| 28 | /* |
| 29 | * Flush TLB entries for a specific ASCE on the local CPU |
| 30 | */ |
| 31 | static inline void __tlb_flush_idte_local(unsigned long asce) |
| 32 | { |
| 33 | /* Local TLB flush for the mm */ |
| 34 | asm volatile( |
| 35 | " .insn rrf,0xb98e0000,0,%0,%1,1" |
| 36 | : : "a" (2048), "a" (asce) : "cc"); |
| 37 | } |
| 38 | |
| 39 | #ifdef CONFIG_SMP |
Heiko Carstens | a806170 | 2008-04-17 07:46:26 +0200 | [diff] [blame] | 40 | void smp_ptlb_all(void); |
| 41 | |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame^] | 42 | /* |
| 43 | * Flush all TLB entries on all CPUs. |
| 44 | */ |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 45 | static inline void __tlb_flush_global(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | { |
Martin Schwidefsky | 94c12cc | 2006-09-28 16:56:43 +0200 | [diff] [blame] | 47 | register unsigned long reg2 asm("2"); |
| 48 | register unsigned long reg3 asm("3"); |
| 49 | register unsigned long reg4 asm("4"); |
| 50 | long dummy; |
| 51 | |
Heiko Carstens | f4815ac | 2012-05-23 16:24:51 +0200 | [diff] [blame] | 52 | #ifndef CONFIG_64BIT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | if (!MACHINE_HAS_CSP) { |
| 54 | smp_ptlb_all(); |
| 55 | return; |
| 56 | } |
Heiko Carstens | f4815ac | 2012-05-23 16:24:51 +0200 | [diff] [blame] | 57 | #endif /* CONFIG_64BIT */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | |
Martin Schwidefsky | 94c12cc | 2006-09-28 16:56:43 +0200 | [diff] [blame] | 59 | dummy = 0; |
| 60 | reg2 = reg3 = 0; |
| 61 | reg4 = ((unsigned long) &dummy) + 1; |
| 62 | asm volatile( |
| 63 | " csp %0,%2" |
| 64 | : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" ); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | } |
| 66 | |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame^] | 67 | /* |
| 68 | * Flush TLB entries for a specific mm on all CPUs (in case gmap is used |
| 69 | * this implicates multiple ASCEs!). |
| 70 | */ |
Martin Schwidefsky | 374b8f4 | 2008-04-17 07:45:58 +0200 | [diff] [blame] | 71 | static inline void __tlb_flush_full(struct mm_struct *mm) |
| 72 | { |
Martin Schwidefsky | 374b8f4 | 2008-04-17 07:45:58 +0200 | [diff] [blame] | 73 | preempt_disable(); |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame^] | 74 | atomic_add(0x10000, &mm->context.attach_count); |
| 75 | if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { |
| 76 | /* Local TLB flush */ |
Martin Schwidefsky | 374b8f4 | 2008-04-17 07:45:58 +0200 | [diff] [blame] | 77 | __tlb_flush_local(); |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame^] | 78 | } else { |
| 79 | /* Global TLB flush */ |
Martin Schwidefsky | 374b8f4 | 2008-04-17 07:45:58 +0200 | [diff] [blame] | 80 | __tlb_flush_global(); |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame^] | 81 | /* Reset TLB flush mask */ |
| 82 | if (MACHINE_HAS_TLB_LC) |
| 83 | cpumask_copy(mm_cpumask(mm), |
| 84 | &mm->context.cpu_attach_mask); |
| 85 | } |
| 86 | atomic_sub(0x10000, &mm->context.attach_count); |
Martin Schwidefsky | 374b8f4 | 2008-04-17 07:45:58 +0200 | [diff] [blame] | 87 | preempt_enable(); |
| 88 | } |
Martin Schwidefsky | 374b8f4 | 2008-04-17 07:45:58 +0200 | [diff] [blame] | 89 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | /* |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame^] | 91 | * Flush TLB entries for a specific ASCE on all CPUs. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | */ |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame^] | 93 | static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce) |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 94 | { |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame^] | 95 | int active, count; |
| 96 | |
| 97 | preempt_disable(); |
| 98 | active = (mm == current->active_mm) ? 1 : 0; |
| 99 | count = atomic_add_return(0x10000, &mm->context.attach_count); |
| 100 | if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active && |
| 101 | cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { |
| 102 | __tlb_flush_idte_local(asce); |
| 103 | } else { |
| 104 | if (MACHINE_HAS_IDTE) |
| 105 | __tlb_flush_idte(asce); |
| 106 | else |
| 107 | __tlb_flush_global(); |
| 108 | /* Reset TLB flush mask */ |
| 109 | if (MACHINE_HAS_TLB_LC) |
| 110 | cpumask_copy(mm_cpumask(mm), |
| 111 | &mm->context.cpu_attach_mask); |
| 112 | } |
| 113 | atomic_sub(0x10000, &mm->context.attach_count); |
| 114 | preempt_enable(); |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 115 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame^] | 117 | static inline void __tlb_flush_kernel(void) |
| 118 | { |
| 119 | if (MACHINE_HAS_IDTE) |
| 120 | __tlb_flush_idte((unsigned long) init_mm.pgd | |
| 121 | init_mm.context.asce_bits); |
| 122 | else |
| 123 | __tlb_flush_global(); |
| 124 | } |
| 125 | #else |
| 126 | #define __tlb_flush_global() __tlb_flush_local() |
| 127 | #define __tlb_flush_full(mm) __tlb_flush_local() |
| 128 | |
| 129 | /* |
| 130 | * Flush TLB entries for a specific ASCE on all CPUs. |
| 131 | */ |
| 132 | static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce) |
| 133 | { |
| 134 | if (MACHINE_HAS_TLB_LC) |
| 135 | __tlb_flush_idte_local(asce); |
| 136 | else |
| 137 | __tlb_flush_local(); |
| 138 | } |
| 139 | |
| 140 | static inline void __tlb_flush_kernel(void) |
| 141 | { |
| 142 | if (MACHINE_HAS_TLB_LC) |
| 143 | __tlb_flush_idte_local((unsigned long) init_mm.pgd | |
| 144 | init_mm.context.asce_bits); |
| 145 | else |
| 146 | __tlb_flush_local(); |
| 147 | } |
| 148 | #endif |
| 149 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 150 | static inline void __tlb_flush_mm(struct mm_struct * mm) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | { |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 152 | /* |
| 153 | * If the machine has IDTE we prefer to do a per mm flush |
| 154 | * on all cpus instead of doing a local flush if the mm |
| 155 | * only ran on the local cpu. |
| 156 | */ |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 157 | if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame^] | 158 | __tlb_flush_asce(mm, (unsigned long) mm->pgd | |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 159 | mm->context.asce_bits); |
Martin Schwidefsky | 043d070 | 2011-05-23 10:24:23 +0200 | [diff] [blame] | 160 | else |
| 161 | __tlb_flush_full(mm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | } |
| 163 | |
Martin Schwidefsky | 5c474a1 | 2013-08-16 13:31:40 +0200 | [diff] [blame] | 164 | static inline void __tlb_flush_mm_lazy(struct mm_struct * mm) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | { |
Martin Schwidefsky | 050eef3 | 2010-08-24 09:26:21 +0200 | [diff] [blame] | 166 | if (mm->context.flush_mm) { |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 167 | __tlb_flush_mm(mm); |
Martin Schwidefsky | 050eef3 | 2010-08-24 09:26:21 +0200 | [diff] [blame] | 168 | mm->context.flush_mm = 0; |
| 169 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | } |
| 171 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 172 | /* |
| 173 | * TLB flushing: |
| 174 | * flush_tlb() - flushes the current mm struct TLBs |
| 175 | * flush_tlb_all() - flushes all processes TLBs |
| 176 | * flush_tlb_mm(mm) - flushes the specified mm context TLB's |
| 177 | * flush_tlb_page(vma, vmaddr) - flushes one page |
| 178 | * flush_tlb_range(vma, start, end) - flushes a range of pages |
| 179 | * flush_tlb_kernel_range(start, end) - flushes a range of kernel pages |
| 180 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 182 | /* |
| 183 | * flush_tlb_mm goes together with ptep_set_wrprotect for the |
| 184 | * copy_page_range operation and flush_tlb_range is related to |
| 185 | * ptep_get_and_clear for change_protection. ptep_set_wrprotect and |
| 186 | * ptep_get_and_clear do not flush the TLBs directly if the mm has |
| 187 | * only one user. At the end of the update the flush_tlb_mm and |
| 188 | * flush_tlb_range functions need to do the flush. |
| 189 | */ |
| 190 | #define flush_tlb() do { } while (0) |
| 191 | #define flush_tlb_all() do { } while (0) |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 192 | #define flush_tlb_page(vma, addr) do { } while (0) |
Martin Schwidefsky | 8ffd74a | 2008-01-26 14:10:59 +0100 | [diff] [blame] | 193 | |
| 194 | static inline void flush_tlb_mm(struct mm_struct *mm) |
| 195 | { |
Martin Schwidefsky | 5c474a1 | 2013-08-16 13:31:40 +0200 | [diff] [blame] | 196 | __tlb_flush_mm_lazy(mm); |
Martin Schwidefsky | 8ffd74a | 2008-01-26 14:10:59 +0100 | [diff] [blame] | 197 | } |
| 198 | |
| 199 | static inline void flush_tlb_range(struct vm_area_struct *vma, |
| 200 | unsigned long start, unsigned long end) |
| 201 | { |
Martin Schwidefsky | 5c474a1 | 2013-08-16 13:31:40 +0200 | [diff] [blame] | 202 | __tlb_flush_mm_lazy(vma->vm_mm); |
Martin Schwidefsky | 8ffd74a | 2008-01-26 14:10:59 +0100 | [diff] [blame] | 203 | } |
| 204 | |
| 205 | static inline void flush_tlb_kernel_range(unsigned long start, |
| 206 | unsigned long end) |
| 207 | { |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame^] | 208 | __tlb_flush_kernel(); |
Martin Schwidefsky | 8ffd74a | 2008-01-26 14:10:59 +0100 | [diff] [blame] | 209 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | #endif /* _S390_TLBFLUSH_H */ |