Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 1 | /* |
| 2 | * TLB flush routines for radix kernels. |
| 3 | * |
| 4 | * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/mm.h> |
| 13 | #include <linux/hugetlb.h> |
| 14 | #include <linux/memblock.h> |
Balbir Singh | 8cd6d3c | 2016-07-13 15:05:20 +0530 | [diff] [blame] | 15 | #include <asm/ppc-opcode.h> |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 16 | |
| 17 | #include <asm/tlb.h> |
| 18 | #include <asm/tlbflush.h> |
Balbir Singh | 0428491 | 2017-04-11 15:23:25 +1000 | [diff] [blame] | 19 | #include <asm/trace.h> |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 20 | |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 21 | |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 22 | #define RIC_FLUSH_TLB 0 |
| 23 | #define RIC_FLUSH_PWC 1 |
| 24 | #define RIC_FLUSH_ALL 2 |
| 25 | |
| 26 | static inline void __tlbiel_pid(unsigned long pid, int set, |
| 27 | unsigned long ric) |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 28 | { |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 29 | unsigned long rb,rs,prs,r; |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 30 | |
| 31 | rb = PPC_BIT(53); /* IS = 1 */ |
| 32 | rb |= set << PPC_BITLSHIFT(51); |
| 33 | rs = ((unsigned long)pid) << PPC_BITLSHIFT(31); |
| 34 | prs = 1; /* process scoped */ |
| 35 | r = 1; /* raidx format */ |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 36 | |
Balbir Singh | 8cd6d3c | 2016-07-13 15:05:20 +0530 | [diff] [blame] | 37 | asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 38 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); |
Balbir Singh | 0428491 | 2017-04-11 15:23:25 +1000 | [diff] [blame] | 39 | trace_tlbie(0, 1, rb, rs, ric, prs, r); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 40 | } |
| 41 | |
| 42 | /* |
| 43 | * We use 128 set in radix mode and 256 set in hpt mode. |
| 44 | */ |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 45 | static inline void _tlbiel_pid(unsigned long pid, unsigned long ric) |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 46 | { |
| 47 | int set; |
| 48 | |
Aneesh Kumar K.V | f7327e0 | 2017-04-01 20:11:48 +0530 | [diff] [blame] | 49 | asm volatile("ptesync": : :"memory"); |
Aneesh Kumar K.V | a5998fc | 2017-04-26 21:38:17 +1000 | [diff] [blame] | 50 | |
| 51 | /* |
| 52 | * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL, |
| 53 | * also flush the entire Page Walk Cache. |
| 54 | */ |
| 55 | __tlbiel_pid(pid, 0, ric); |
| 56 | |
| 57 | if (ric == RIC_FLUSH_ALL) |
| 58 | /* For the remaining sets, just flush the TLB */ |
| 59 | ric = RIC_FLUSH_TLB; |
| 60 | |
| 61 | for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++) |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 62 | __tlbiel_pid(pid, set, ric); |
Aneesh Kumar K.V | a5998fc | 2017-04-26 21:38:17 +1000 | [diff] [blame] | 63 | |
Aneesh Kumar K.V | f7327e0 | 2017-04-01 20:11:48 +0530 | [diff] [blame] | 64 | asm volatile("ptesync": : :"memory"); |
Benjamin Herrenschmidt | 90c1e3c | 2017-02-06 13:05:16 +1100 | [diff] [blame] | 65 | asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory"); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 66 | } |
| 67 | |
Aneesh Kumar K.V | cf4f08b | 2017-04-26 21:38:30 +1000 | [diff] [blame] | 68 | static inline void tlbiel_pwc(unsigned long pid) |
| 69 | { |
| 70 | asm volatile("ptesync": : :"memory"); |
| 71 | |
| 72 | /* For PWC flush, we don't look at set number */ |
| 73 | __tlbiel_pid(pid, 0, RIC_FLUSH_PWC); |
| 74 | |
| 75 | asm volatile("ptesync": : :"memory"); |
| 76 | asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory"); |
| 77 | } |
| 78 | |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 79 | static inline void _tlbie_pid(unsigned long pid, unsigned long ric) |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 80 | { |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 81 | unsigned long rb,rs,prs,r; |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 82 | |
| 83 | rb = PPC_BIT(53); /* IS = 1 */ |
| 84 | rs = pid << PPC_BITLSHIFT(31); |
| 85 | prs = 1; /* process scoped */ |
| 86 | r = 1; /* raidx format */ |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 87 | |
| 88 | asm volatile("ptesync": : :"memory"); |
Balbir Singh | 8cd6d3c | 2016-07-13 15:05:20 +0530 | [diff] [blame] | 89 | asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 90 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); |
| 91 | asm volatile("eieio; tlbsync; ptesync": : :"memory"); |
Balbir Singh | 0428491 | 2017-04-11 15:23:25 +1000 | [diff] [blame] | 92 | trace_tlbie(0, 0, rb, rs, ric, prs, r); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 93 | } |
| 94 | |
| 95 | static inline void _tlbiel_va(unsigned long va, unsigned long pid, |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 96 | unsigned long ap, unsigned long ric) |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 97 | { |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 98 | unsigned long rb,rs,prs,r; |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 99 | |
| 100 | rb = va & ~(PPC_BITMASK(52, 63)); |
| 101 | rb |= ap << PPC_BITLSHIFT(58); |
| 102 | rs = pid << PPC_BITLSHIFT(31); |
| 103 | prs = 1; /* process scoped */ |
| 104 | r = 1; /* raidx format */ |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 105 | |
| 106 | asm volatile("ptesync": : :"memory"); |
Balbir Singh | 8cd6d3c | 2016-07-13 15:05:20 +0530 | [diff] [blame] | 107 | asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 108 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); |
| 109 | asm volatile("ptesync": : :"memory"); |
Balbir Singh | 0428491 | 2017-04-11 15:23:25 +1000 | [diff] [blame] | 110 | trace_tlbie(0, 1, rb, rs, ric, prs, r); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 111 | } |
| 112 | |
| 113 | static inline void _tlbie_va(unsigned long va, unsigned long pid, |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 114 | unsigned long ap, unsigned long ric) |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 115 | { |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 116 | unsigned long rb,rs,prs,r; |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 117 | |
| 118 | rb = va & ~(PPC_BITMASK(52, 63)); |
| 119 | rb |= ap << PPC_BITLSHIFT(58); |
| 120 | rs = pid << PPC_BITLSHIFT(31); |
| 121 | prs = 1; /* process scoped */ |
| 122 | r = 1; /* raidx format */ |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 123 | |
| 124 | asm volatile("ptesync": : :"memory"); |
Balbir Singh | 8cd6d3c | 2016-07-13 15:05:20 +0530 | [diff] [blame] | 125 | asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 126 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); |
| 127 | asm volatile("eieio; tlbsync; ptesync": : :"memory"); |
Balbir Singh | 0428491 | 2017-04-11 15:23:25 +1000 | [diff] [blame] | 128 | trace_tlbie(0, 0, rb, rs, ric, prs, r); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 129 | } |
| 130 | |
| 131 | /* |
| 132 | * Base TLB flushing operations: |
| 133 | * |
| 134 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's |
| 135 | * - flush_tlb_page(vma, vmaddr) flushes one page |
| 136 | * - flush_tlb_range(vma, start, end) flushes a range of pages |
| 137 | * - flush_tlb_kernel_range(start, end) flushes kernel pages |
| 138 | * |
| 139 | * - local_* variants of page and mm only apply to the current |
| 140 | * processor |
| 141 | */ |
| 142 | void radix__local_flush_tlb_mm(struct mm_struct *mm) |
| 143 | { |
Aneesh Kumar K.V | 9690c15 | 2016-06-02 15:14:48 +0530 | [diff] [blame] | 144 | unsigned long pid; |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 145 | |
| 146 | preempt_disable(); |
| 147 | pid = mm->context.id; |
| 148 | if (pid != MMU_NO_CONTEXT) |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 149 | _tlbiel_pid(pid, RIC_FLUSH_ALL); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 150 | preempt_enable(); |
| 151 | } |
| 152 | EXPORT_SYMBOL(radix__local_flush_tlb_mm); |
| 153 | |
Aneesh Kumar K.V | a145abf | 2016-06-08 19:55:51 +0530 | [diff] [blame] | 154 | void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr) |
| 155 | { |
| 156 | unsigned long pid; |
| 157 | struct mm_struct *mm = tlb->mm; |
Aneesh Kumar K.V | f6b0df5 | 2017-04-01 20:11:47 +0530 | [diff] [blame] | 158 | /* |
| 159 | * If we are doing a full mm flush, we will do a tlb flush |
| 160 | * with RIC_FLUSH_ALL later. |
| 161 | */ |
| 162 | if (tlb->fullmm) |
| 163 | return; |
Aneesh Kumar K.V | a145abf | 2016-06-08 19:55:51 +0530 | [diff] [blame] | 164 | |
| 165 | preempt_disable(); |
| 166 | |
| 167 | pid = mm->context.id; |
| 168 | if (pid != MMU_NO_CONTEXT) |
Aneesh Kumar K.V | cf4f08b | 2017-04-26 21:38:30 +1000 | [diff] [blame] | 169 | tlbiel_pwc(pid); |
Aneesh Kumar K.V | a145abf | 2016-06-08 19:55:51 +0530 | [diff] [blame] | 170 | |
| 171 | preempt_enable(); |
| 172 | } |
| 173 | EXPORT_SYMBOL(radix__local_flush_tlb_pwc); |
| 174 | |
Aneesh Kumar K.V | f22dfc9 | 2016-07-13 15:06:41 +0530 | [diff] [blame] | 175 | void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, |
Aneesh Kumar K.V | fbfa26d | 2016-07-13 15:06:42 +0530 | [diff] [blame] | 176 | int psize) |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 177 | { |
Aneesh Kumar K.V | 9690c15 | 2016-06-02 15:14:48 +0530 | [diff] [blame] | 178 | unsigned long pid; |
Aneesh Kumar K.V | fbfa26d | 2016-07-13 15:06:42 +0530 | [diff] [blame] | 179 | unsigned long ap = mmu_get_ap(psize); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 180 | |
| 181 | preempt_disable(); |
| 182 | pid = mm ? mm->context.id : 0; |
| 183 | if (pid != MMU_NO_CONTEXT) |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 184 | _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 185 | preempt_enable(); |
| 186 | } |
| 187 | |
| 188 | void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) |
| 189 | { |
Aneesh Kumar K.V | 4848376 | 2016-04-29 23:26:25 +1000 | [diff] [blame] | 190 | #ifdef CONFIG_HUGETLB_PAGE |
| 191 | /* need the return fix for nohash.c */ |
| 192 | if (vma && is_vm_hugetlb_page(vma)) |
| 193 | return __local_flush_hugetlb_page(vma, vmaddr); |
| 194 | #endif |
Aneesh Kumar K.V | f22dfc9 | 2016-07-13 15:06:41 +0530 | [diff] [blame] | 195 | radix__local_flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr, |
Aneesh Kumar K.V | fbfa26d | 2016-07-13 15:06:42 +0530 | [diff] [blame] | 196 | mmu_virtual_psize); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 197 | } |
| 198 | EXPORT_SYMBOL(radix__local_flush_tlb_page); |
| 199 | |
| 200 | #ifdef CONFIG_SMP |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 201 | void radix__flush_tlb_mm(struct mm_struct *mm) |
| 202 | { |
Aneesh Kumar K.V | 9690c15 | 2016-06-02 15:14:48 +0530 | [diff] [blame] | 203 | unsigned long pid; |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 204 | |
| 205 | preempt_disable(); |
| 206 | pid = mm->context.id; |
| 207 | if (unlikely(pid == MMU_NO_CONTEXT)) |
| 208 | goto no_context; |
| 209 | |
Michael Ellerman | 3c9ac2b | 2017-05-02 21:00:14 +1000 | [diff] [blame] | 210 | if (!mm_is_thread_local(mm)) |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 211 | _tlbie_pid(pid, RIC_FLUSH_ALL); |
Michael Ellerman | 3c9ac2b | 2017-05-02 21:00:14 +1000 | [diff] [blame] | 212 | else |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 213 | _tlbiel_pid(pid, RIC_FLUSH_ALL); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 214 | no_context: |
| 215 | preempt_enable(); |
| 216 | } |
| 217 | EXPORT_SYMBOL(radix__flush_tlb_mm); |
| 218 | |
Aneesh Kumar K.V | a145abf | 2016-06-08 19:55:51 +0530 | [diff] [blame] | 219 | void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr) |
| 220 | { |
| 221 | unsigned long pid; |
| 222 | struct mm_struct *mm = tlb->mm; |
| 223 | |
Aneesh Kumar K.V | f6b0df5 | 2017-04-01 20:11:47 +0530 | [diff] [blame] | 224 | /* |
| 225 | * If we are doing a full mm flush, we will do a tlb flush |
| 226 | * with RIC_FLUSH_ALL later. |
| 227 | */ |
| 228 | if (tlb->fullmm) |
| 229 | return; |
Aneesh Kumar K.V | a145abf | 2016-06-08 19:55:51 +0530 | [diff] [blame] | 230 | preempt_disable(); |
| 231 | |
| 232 | pid = mm->context.id; |
| 233 | if (unlikely(pid == MMU_NO_CONTEXT)) |
| 234 | goto no_context; |
| 235 | |
Michael Ellerman | 3c9ac2b | 2017-05-02 21:00:14 +1000 | [diff] [blame] | 236 | if (!mm_is_thread_local(mm)) |
Aneesh Kumar K.V | a145abf | 2016-06-08 19:55:51 +0530 | [diff] [blame] | 237 | _tlbie_pid(pid, RIC_FLUSH_PWC); |
Michael Ellerman | 3c9ac2b | 2017-05-02 21:00:14 +1000 | [diff] [blame] | 238 | else |
Aneesh Kumar K.V | cf4f08b | 2017-04-26 21:38:30 +1000 | [diff] [blame] | 239 | tlbiel_pwc(pid); |
Aneesh Kumar K.V | a145abf | 2016-06-08 19:55:51 +0530 | [diff] [blame] | 240 | no_context: |
| 241 | preempt_enable(); |
| 242 | } |
| 243 | EXPORT_SYMBOL(radix__flush_tlb_pwc); |
| 244 | |
Aneesh Kumar K.V | f22dfc9 | 2016-07-13 15:06:41 +0530 | [diff] [blame] | 245 | void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, |
Aneesh Kumar K.V | fbfa26d | 2016-07-13 15:06:42 +0530 | [diff] [blame] | 246 | int psize) |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 247 | { |
Aneesh Kumar K.V | 9690c15 | 2016-06-02 15:14:48 +0530 | [diff] [blame] | 248 | unsigned long pid; |
Aneesh Kumar K.V | fbfa26d | 2016-07-13 15:06:42 +0530 | [diff] [blame] | 249 | unsigned long ap = mmu_get_ap(psize); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 250 | |
| 251 | preempt_disable(); |
| 252 | pid = mm ? mm->context.id : 0; |
| 253 | if (unlikely(pid == MMU_NO_CONTEXT)) |
| 254 | goto bail; |
Michael Ellerman | 3c9ac2b | 2017-05-02 21:00:14 +1000 | [diff] [blame] | 255 | if (!mm_is_thread_local(mm)) |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 256 | _tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB); |
Michael Ellerman | 3c9ac2b | 2017-05-02 21:00:14 +1000 | [diff] [blame] | 257 | else |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 258 | _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 259 | bail: |
| 260 | preempt_enable(); |
| 261 | } |
| 262 | |
| 263 | void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) |
| 264 | { |
Aneesh Kumar K.V | 4848376 | 2016-04-29 23:26:25 +1000 | [diff] [blame] | 265 | #ifdef CONFIG_HUGETLB_PAGE |
| 266 | if (vma && is_vm_hugetlb_page(vma)) |
| 267 | return flush_hugetlb_page(vma, vmaddr); |
| 268 | #endif |
Aneesh Kumar K.V | f22dfc9 | 2016-07-13 15:06:41 +0530 | [diff] [blame] | 269 | radix__flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr, |
Aneesh Kumar K.V | fbfa26d | 2016-07-13 15:06:42 +0530 | [diff] [blame] | 270 | mmu_virtual_psize); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 271 | } |
| 272 | EXPORT_SYMBOL(radix__flush_tlb_page); |
| 273 | |
| 274 | #endif /* CONFIG_SMP */ |
| 275 | |
| 276 | void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end) |
| 277 | { |
Aneesh Kumar K.V | 3619481 | 2016-06-08 19:55:50 +0530 | [diff] [blame] | 278 | _tlbie_pid(0, RIC_FLUSH_ALL); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 279 | } |
| 280 | EXPORT_SYMBOL(radix__flush_tlb_kernel_range); |
| 281 | |
| 282 | /* |
| 283 | * Currently, for range flushing, we just do a full mm flush. Because |
| 284 | * we use this in code path where we don' track the page size. |
| 285 | */ |
| 286 | void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
| 287 | unsigned long end) |
| 288 | |
| 289 | { |
| 290 | struct mm_struct *mm = vma->vm_mm; |
| 291 | radix__flush_tlb_mm(mm); |
| 292 | } |
| 293 | EXPORT_SYMBOL(radix__flush_tlb_range); |
| 294 | |
Aneesh Kumar K.V | 912cc87 | 2016-07-13 15:05:29 +0530 | [diff] [blame] | 295 | static int radix_get_mmu_psize(int page_size) |
| 296 | { |
| 297 | int psize; |
| 298 | |
| 299 | if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift)) |
| 300 | psize = mmu_virtual_psize; |
| 301 | else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift)) |
| 302 | psize = MMU_PAGE_2M; |
| 303 | else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift)) |
| 304 | psize = MMU_PAGE_1G; |
| 305 | else |
| 306 | return -1; |
| 307 | return psize; |
| 308 | } |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 309 | |
| 310 | void radix__tlb_flush(struct mmu_gather *tlb) |
| 311 | { |
Aneesh Kumar K.V | 8cb8140 | 2016-07-13 15:06:35 +0530 | [diff] [blame] | 312 | int psize = 0; |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 313 | struct mm_struct *mm = tlb->mm; |
Aneesh Kumar K.V | 8cb8140 | 2016-07-13 15:06:35 +0530 | [diff] [blame] | 314 | int page_size = tlb->page_size; |
| 315 | |
| 316 | psize = radix_get_mmu_psize(page_size); |
| 317 | /* |
| 318 | * if page size is not something we understand, do a full mm flush |
| 319 | */ |
| 320 | if (psize != -1 && !tlb->fullmm && !tlb->need_flush_all) |
| 321 | radix__flush_tlb_range_psize(mm, tlb->start, tlb->end, psize); |
| 322 | else |
| 323 | radix__flush_tlb_mm(mm); |
| 324 | } |
| 325 | |
| 326 | #define TLB_FLUSH_ALL -1UL |
| 327 | /* |
| 328 | * Number of pages above which we will do a bcast tlbie. Just a |
| 329 | * number at this point copied from x86 |
| 330 | */ |
| 331 | static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; |
| 332 | |
| 333 | void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start, |
| 334 | unsigned long end, int psize) |
| 335 | { |
| 336 | unsigned long pid; |
| 337 | unsigned long addr; |
Aneesh Kumar K.V | bd77c44 | 2016-10-24 08:50:43 +0530 | [diff] [blame] | 338 | int local = mm_is_thread_local(mm); |
Aneesh Kumar K.V | 8cb8140 | 2016-07-13 15:06:35 +0530 | [diff] [blame] | 339 | unsigned long ap = mmu_get_ap(psize); |
Aneesh Kumar K.V | 8cb8140 | 2016-07-13 15:06:35 +0530 | [diff] [blame] | 340 | unsigned long page_size = 1UL << mmu_psize_defs[psize].shift; |
| 341 | |
| 342 | |
| 343 | preempt_disable(); |
| 344 | pid = mm ? mm->context.id : 0; |
| 345 | if (unlikely(pid == MMU_NO_CONTEXT)) |
| 346 | goto err_out; |
| 347 | |
| 348 | if (end == TLB_FLUSH_ALL || |
| 349 | (end - start) > tlb_single_page_flush_ceiling * page_size) { |
| 350 | if (local) |
| 351 | _tlbiel_pid(pid, RIC_FLUSH_TLB); |
| 352 | else |
| 353 | _tlbie_pid(pid, RIC_FLUSH_TLB); |
| 354 | goto err_out; |
| 355 | } |
| 356 | for (addr = start; addr < end; addr += page_size) { |
| 357 | |
| 358 | if (local) |
| 359 | _tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB); |
Michael Ellerman | 3c9ac2b | 2017-05-02 21:00:14 +1000 | [diff] [blame] | 360 | else |
Aneesh Kumar K.V | 8cb8140 | 2016-07-13 15:06:35 +0530 | [diff] [blame] | 361 | _tlbie_va(addr, pid, ap, RIC_FLUSH_TLB); |
Aneesh Kumar K.V | 8cb8140 | 2016-07-13 15:06:35 +0530 | [diff] [blame] | 362 | } |
| 363 | err_out: |
| 364 | preempt_enable(); |
Aneesh Kumar K.V | 1a472c9 | 2016-04-29 23:26:05 +1000 | [diff] [blame] | 365 | } |
Aneesh Kumar K.V | 912cc87 | 2016-07-13 15:05:29 +0530 | [diff] [blame] | 366 | |
| 367 | void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa, |
| 368 | unsigned long page_size) |
| 369 | { |
| 370 | unsigned long rb,rs,prs,r; |
| 371 | unsigned long ap; |
| 372 | unsigned long ric = RIC_FLUSH_TLB; |
| 373 | |
| 374 | ap = mmu_get_ap(radix_get_mmu_psize(page_size)); |
| 375 | rb = gpa & ~(PPC_BITMASK(52, 63)); |
| 376 | rb |= ap << PPC_BITLSHIFT(58); |
| 377 | rs = lpid & ((1UL << 32) - 1); |
| 378 | prs = 0; /* process scoped */ |
| 379 | r = 1; /* raidx format */ |
| 380 | |
| 381 | asm volatile("ptesync": : :"memory"); |
| 382 | asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) |
| 383 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); |
| 384 | asm volatile("eieio; tlbsync; ptesync": : :"memory"); |
Balbir Singh | 0428491 | 2017-04-11 15:23:25 +1000 | [diff] [blame] | 385 | trace_tlbie(lpid, 0, rb, rs, ric, prs, r); |
Aneesh Kumar K.V | 912cc87 | 2016-07-13 15:05:29 +0530 | [diff] [blame] | 386 | } |
| 387 | EXPORT_SYMBOL(radix__flush_tlb_lpid_va); |
| 388 | |
| 389 | void radix__flush_tlb_lpid(unsigned long lpid) |
| 390 | { |
| 391 | unsigned long rb,rs,prs,r; |
| 392 | unsigned long ric = RIC_FLUSH_ALL; |
| 393 | |
| 394 | rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */ |
| 395 | rs = lpid & ((1UL << 32) - 1); |
| 396 | prs = 0; /* partition scoped */ |
| 397 | r = 1; /* raidx format */ |
| 398 | |
| 399 | asm volatile("ptesync": : :"memory"); |
| 400 | asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) |
| 401 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); |
| 402 | asm volatile("eieio; tlbsync; ptesync": : :"memory"); |
Balbir Singh | 0428491 | 2017-04-11 15:23:25 +1000 | [diff] [blame] | 403 | trace_tlbie(lpid, 0, rb, rs, ric, prs, r); |
Aneesh Kumar K.V | 912cc87 | 2016-07-13 15:05:29 +0530 | [diff] [blame] | 404 | } |
| 405 | EXPORT_SYMBOL(radix__flush_tlb_lpid); |
Aneesh Kumar K.V | d8e91e9 | 2016-07-13 15:06:40 +0530 | [diff] [blame] | 406 | |
| 407 | void radix__flush_pmd_tlb_range(struct vm_area_struct *vma, |
| 408 | unsigned long start, unsigned long end) |
| 409 | { |
| 410 | radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M); |
| 411 | } |
| 412 | EXPORT_SYMBOL(radix__flush_pmd_tlb_range); |
Aneesh Kumar K.V | be34d30 | 2016-08-23 16:27:48 +0530 | [diff] [blame] | 413 | |
| 414 | void radix__flush_tlb_all(void) |
| 415 | { |
| 416 | unsigned long rb,prs,r,rs; |
| 417 | unsigned long ric = RIC_FLUSH_ALL; |
| 418 | |
| 419 | rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */ |
| 420 | prs = 0; /* partition scoped */ |
| 421 | r = 1; /* raidx format */ |
| 422 | rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */ |
| 423 | |
| 424 | asm volatile("ptesync": : :"memory"); |
| 425 | /* |
| 426 | * now flush guest entries by passing PRS = 1 and LPID != 0 |
| 427 | */ |
| 428 | asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) |
| 429 | : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory"); |
Balbir Singh | 0428491 | 2017-04-11 15:23:25 +1000 | [diff] [blame] | 430 | trace_tlbie(0, 0, rb, rs, ric, prs, r); |
Aneesh Kumar K.V | be34d30 | 2016-08-23 16:27:48 +0530 | [diff] [blame] | 431 | /* |
| 432 | * now flush host entires by passing PRS = 0 and LPID == 0 |
| 433 | */ |
| 434 | asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) |
| 435 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory"); |
| 436 | asm volatile("eieio; tlbsync; ptesync": : :"memory"); |
Balbir Singh | 0428491 | 2017-04-11 15:23:25 +1000 | [diff] [blame] | 437 | trace_tlbie(0, 0, rb, 0, ric, prs, r); |
Aneesh Kumar K.V | be34d30 | 2016-08-23 16:27:48 +0530 | [diff] [blame] | 438 | } |
Aneesh Kumar K.V | 6d3a037 | 2016-11-28 11:47:01 +0530 | [diff] [blame] | 439 | |
| 440 | void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm, |
| 441 | unsigned long address) |
| 442 | { |
| 443 | /* |
| 444 | * We track page size in pte only for DD1, So we can |
| 445 | * call this only on DD1. |
| 446 | */ |
| 447 | if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) { |
| 448 | VM_WARN_ON(1); |
| 449 | return; |
| 450 | } |
| 451 | |
Aneesh Kumar K.V | ddb014b | 2017-03-21 22:59:54 +0530 | [diff] [blame] | 452 | if (old_pte & R_PAGE_LARGE) |
Aneesh Kumar K.V | 6d3a037 | 2016-11-28 11:47:01 +0530 | [diff] [blame] | 453 | radix__flush_tlb_page_psize(mm, address, MMU_PAGE_2M); |
| 454 | else |
| 455 | radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize); |
| 456 | } |