Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 1 | /* |
| 2 | * TLB flush routines for radix kernels. |
| 3 | * |
| 4 | * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/mm.h> |
| 13 | #include <linux/hugetlb.h> |
| 14 | #include <linux/memblock.h> |
| 15 | |
| 16 | #include <asm/tlb.h> |
| 17 | #include <asm/tlbflush.h> |
| 18 | |
| 19 | static DEFINE_RAW_SPINLOCK(native_tlbie_lock); |
| 20 | |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 21 | #define RIC_FLUSH_TLB 0 |
| 22 | #define RIC_FLUSH_PWC 1 |
| 23 | #define RIC_FLUSH_ALL 2 |
| 24 | |
| 25 | static inline void __tlbiel_pid(unsigned long pid, int set, |
| 26 | unsigned long ric) |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 27 | { |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 28 | unsigned long rb,rs,prs,r; |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 29 | |
| 30 | rb = PPC_BIT(53); /* IS = 1 */ |
| 31 | rb |= set << PPC_BITLSHIFT(51); |
| 32 | rs = ((unsigned long)pid) << PPC_BITLSHIFT(31); |
| 33 | prs = 1; /* process scoped */ |
| 34 | r = 1; /* raidx format */ |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 35 | |
| 36 | asm volatile("ptesync": : :"memory"); |
| 37 | asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |" |
| 38 | "(%2 << 17) | (%3 << 18) | (%4 << 21)" |
| 39 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); |
| 40 | asm volatile("ptesync": : :"memory"); |
| 41 | } |
| 42 | |
| 43 | /* |
| 44 | * We use 128 set in radix mode and 256 set in hpt mode. |
| 45 | */ |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 46 | static inline void _tlbiel_pid(unsigned long pid, unsigned long ric) |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 47 | { |
| 48 | int set; |
| 49 | |
| 50 | for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) { |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 51 | __tlbiel_pid(pid, set, ric); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 52 | } |
| 53 | return; |
| 54 | } |
| 55 | |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 56 | static inline void _tlbie_pid(unsigned long pid, unsigned long ric) |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 57 | { |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 58 | unsigned long rb,rs,prs,r; |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 59 | |
| 60 | rb = PPC_BIT(53); /* IS = 1 */ |
| 61 | rs = pid << PPC_BITLSHIFT(31); |
| 62 | prs = 1; /* process scoped */ |
| 63 | r = 1; /* raidx format */ |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 64 | |
| 65 | asm volatile("ptesync": : :"memory"); |
| 66 | asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |" |
| 67 | "(%2 << 17) | (%3 << 18) | (%4 << 21)" |
| 68 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); |
| 69 | asm volatile("eieio; tlbsync; ptesync": : :"memory"); |
| 70 | } |
| 71 | |
| 72 | static inline void _tlbiel_va(unsigned long va, unsigned long pid, |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 73 | unsigned long ap, unsigned long ric) |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 74 | { |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 75 | unsigned long rb,rs,prs,r; |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 76 | |
| 77 | rb = va & ~(PPC_BITMASK(52, 63)); |
| 78 | rb |= ap << PPC_BITLSHIFT(58); |
| 79 | rs = pid << PPC_BITLSHIFT(31); |
| 80 | prs = 1; /* process scoped */ |
| 81 | r = 1; /* raidx format */ |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 82 | |
| 83 | asm volatile("ptesync": : :"memory"); |
| 84 | asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |" |
| 85 | "(%2 << 17) | (%3 << 18) | (%4 << 21)" |
| 86 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); |
| 87 | asm volatile("ptesync": : :"memory"); |
| 88 | } |
| 89 | |
| 90 | static inline void _tlbie_va(unsigned long va, unsigned long pid, |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 91 | unsigned long ap, unsigned long ric) |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 92 | { |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 93 | unsigned long rb,rs,prs,r; |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 94 | |
| 95 | rb = va & ~(PPC_BITMASK(52, 63)); |
| 96 | rb |= ap << PPC_BITLSHIFT(58); |
| 97 | rs = pid << PPC_BITLSHIFT(31); |
| 98 | prs = 1; /* process scoped */ |
| 99 | r = 1; /* raidx format */ |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 100 | |
| 101 | asm volatile("ptesync": : :"memory"); |
| 102 | asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |" |
| 103 | "(%2 << 17) | (%3 << 18) | (%4 << 21)" |
| 104 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); |
| 105 | asm volatile("eieio; tlbsync; ptesync": : :"memory"); |
| 106 | } |
| 107 | |
| 108 | /* |
| 109 | * Base TLB flushing operations: |
| 110 | * |
| 111 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's |
| 112 | * - flush_tlb_page(vma, vmaddr) flushes one page |
| 113 | * - flush_tlb_range(vma, start, end) flushes a range of pages |
| 114 | * - flush_tlb_kernel_range(start, end) flushes kernel pages |
| 115 | * |
| 116 | * - local_* variants of page and mm only apply to the current |
| 117 | * processor |
| 118 | */ |
| 119 | void radix__local_flush_tlb_mm(struct mm_struct *mm) |
| 120 | { |
Aneesh Kumar K.V | 9690c15 | 2016-06-02 15:14:48 +0530 | [diff] [blame] | 121 | unsigned long pid; |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 122 | |
| 123 | preempt_disable(); |
| 124 | pid = mm->context.id; |
| 125 | if (pid != MMU_NO_CONTEXT) |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 126 | _tlbiel_pid(pid, RIC_FLUSH_ALL); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 127 | preempt_enable(); |
| 128 | } |
| 129 | EXPORT_SYMBOL(radix__local_flush_tlb_mm); |
| 130 | |
Aneesh Kumar K.V | a145abf | 2016-06-08 19:55:51 +0530 | [diff] [blame^] | 131 | void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr) |
| 132 | { |
| 133 | unsigned long pid; |
| 134 | struct mm_struct *mm = tlb->mm; |
| 135 | |
| 136 | preempt_disable(); |
| 137 | |
| 138 | pid = mm->context.id; |
| 139 | if (pid != MMU_NO_CONTEXT) |
| 140 | _tlbiel_pid(pid, RIC_FLUSH_PWC); |
| 141 | |
| 142 | preempt_enable(); |
| 143 | } |
| 144 | EXPORT_SYMBOL(radix__local_flush_tlb_pwc); |
| 145 | |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 146 | void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, |
| 147 | unsigned long ap, int nid) |
| 148 | { |
Aneesh Kumar K.V | 9690c15 | 2016-06-02 15:14:48 +0530 | [diff] [blame] | 149 | unsigned long pid; |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 150 | |
| 151 | preempt_disable(); |
| 152 | pid = mm ? mm->context.id : 0; |
| 153 | if (pid != MMU_NO_CONTEXT) |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 154 | _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 155 | preempt_enable(); |
| 156 | } |
| 157 | |
| 158 | void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) |
| 159 | { |
Aneesh Kumar K.V | 4848376 | 2016-04-29 23:26:25 +1000 | [diff] [blame] | 160 | #ifdef CONFIG_HUGETLB_PAGE |
| 161 | /* need the return fix for nohash.c */ |
| 162 | if (vma && is_vm_hugetlb_page(vma)) |
| 163 | return __local_flush_hugetlb_page(vma, vmaddr); |
| 164 | #endif |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 165 | radix___local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, |
| 166 | mmu_get_ap(mmu_virtual_psize), 0); |
| 167 | } |
| 168 | EXPORT_SYMBOL(radix__local_flush_tlb_page); |
| 169 | |
| 170 | #ifdef CONFIG_SMP |
| 171 | static int mm_is_core_local(struct mm_struct *mm) |
| 172 | { |
| 173 | return cpumask_subset(mm_cpumask(mm), |
| 174 | topology_sibling_cpumask(smp_processor_id())); |
| 175 | } |
| 176 | |
| 177 | void radix__flush_tlb_mm(struct mm_struct *mm) |
| 178 | { |
Aneesh Kumar K.V | 9690c15 | 2016-06-02 15:14:48 +0530 | [diff] [blame] | 179 | unsigned long pid; |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 180 | |
| 181 | preempt_disable(); |
| 182 | pid = mm->context.id; |
| 183 | if (unlikely(pid == MMU_NO_CONTEXT)) |
| 184 | goto no_context; |
| 185 | |
| 186 | if (!mm_is_core_local(mm)) { |
| 187 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
| 188 | |
| 189 | if (lock_tlbie) |
| 190 | raw_spin_lock(&native_tlbie_lock); |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 191 | _tlbie_pid(pid, RIC_FLUSH_ALL); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 192 | if (lock_tlbie) |
| 193 | raw_spin_unlock(&native_tlbie_lock); |
| 194 | } else |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 195 | _tlbiel_pid(pid, RIC_FLUSH_ALL); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 196 | no_context: |
| 197 | preempt_enable(); |
| 198 | } |
| 199 | EXPORT_SYMBOL(radix__flush_tlb_mm); |
| 200 | |
Aneesh Kumar K.V | a145abf | 2016-06-08 19:55:51 +0530 | [diff] [blame^] | 201 | void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr) |
| 202 | { |
| 203 | unsigned long pid; |
| 204 | struct mm_struct *mm = tlb->mm; |
| 205 | |
| 206 | preempt_disable(); |
| 207 | |
| 208 | pid = mm->context.id; |
| 209 | if (unlikely(pid == MMU_NO_CONTEXT)) |
| 210 | goto no_context; |
| 211 | |
| 212 | if (!mm_is_core_local(mm)) { |
| 213 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
| 214 | |
| 215 | if (lock_tlbie) |
| 216 | raw_spin_lock(&native_tlbie_lock); |
| 217 | _tlbie_pid(pid, RIC_FLUSH_PWC); |
| 218 | if (lock_tlbie) |
| 219 | raw_spin_unlock(&native_tlbie_lock); |
| 220 | } else |
| 221 | _tlbiel_pid(pid, RIC_FLUSH_PWC); |
| 222 | no_context: |
| 223 | preempt_enable(); |
| 224 | } |
| 225 | EXPORT_SYMBOL(radix__flush_tlb_pwc); |
| 226 | |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 227 | void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, |
| 228 | unsigned long ap, int nid) |
| 229 | { |
Aneesh Kumar K.V | 9690c15 | 2016-06-02 15:14:48 +0530 | [diff] [blame] | 230 | unsigned long pid; |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 231 | |
| 232 | preempt_disable(); |
| 233 | pid = mm ? mm->context.id : 0; |
| 234 | if (unlikely(pid == MMU_NO_CONTEXT)) |
| 235 | goto bail; |
| 236 | if (!mm_is_core_local(mm)) { |
| 237 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
| 238 | |
| 239 | if (lock_tlbie) |
| 240 | raw_spin_lock(&native_tlbie_lock); |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 241 | _tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 242 | if (lock_tlbie) |
| 243 | raw_spin_unlock(&native_tlbie_lock); |
| 244 | } else |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 245 | _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 246 | bail: |
| 247 | preempt_enable(); |
| 248 | } |
| 249 | |
| 250 | void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) |
| 251 | { |
Aneesh Kumar K.V | 4848376 | 2016-04-29 23:26:25 +1000 | [diff] [blame] | 252 | #ifdef CONFIG_HUGETLB_PAGE |
| 253 | if (vma && is_vm_hugetlb_page(vma)) |
| 254 | return flush_hugetlb_page(vma, vmaddr); |
| 255 | #endif |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 256 | radix___flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, |
| 257 | mmu_get_ap(mmu_virtual_psize), 0); |
| 258 | } |
| 259 | EXPORT_SYMBOL(radix__flush_tlb_page); |
| 260 | |
| 261 | #endif /* CONFIG_SMP */ |
| 262 | |
| 263 | void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end) |
| 264 | { |
| 265 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
| 266 | |
| 267 | if (lock_tlbie) |
| 268 | raw_spin_lock(&native_tlbie_lock); |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 269 | _tlbie_pid(0, RIC_FLUSH_ALL); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 270 | if (lock_tlbie) |
| 271 | raw_spin_unlock(&native_tlbie_lock); |
| 272 | } |
| 273 | EXPORT_SYMBOL(radix__flush_tlb_kernel_range); |
| 274 | |
| 275 | /* |
| 276 | * Currently, for range flushing, we just do a full mm flush. Because |
| 277 | * we use this in code path where we don' track the page size. |
| 278 | */ |
| 279 | void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
| 280 | unsigned long end) |
| 281 | |
| 282 | { |
| 283 | struct mm_struct *mm = vma->vm_mm; |
| 284 | radix__flush_tlb_mm(mm); |
| 285 | } |
| 286 | EXPORT_SYMBOL(radix__flush_tlb_range); |
| 287 | |
| 288 | |
| 289 | void radix__tlb_flush(struct mmu_gather *tlb) |
| 290 | { |
| 291 | struct mm_struct *mm = tlb->mm; |
| 292 | radix__flush_tlb_mm(mm); |
| 293 | } |