blob: 8e4c6cb4a808700ceb35f2bb078a9285ad629fa5 [file] [log] [blame]
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +10001/*
2 * TLB flush routines for radix kernels.
3 *
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/mm.h>
13#include <linux/hugetlb.h>
14#include <linux/memblock.h>
15
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +100016#include <asm/ppc-opcode.h>
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100017#include <asm/tlb.h>
18#include <asm/tlbflush.h>
Balbir Singh04284912017-04-11 15:23:25 +100019#include <asm/trace.h>
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +100020#include <asm/cputhreads.h>
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100021
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053022#define RIC_FLUSH_TLB 0
23#define RIC_FLUSH_PWC 1
24#define RIC_FLUSH_ALL 2
25
Nicholas Piggind4748272017-12-24 01:15:50 +100026/*
27 * tlbiel instruction for radix, set invalidation
28 * i.e., r=1 and is=01 or is=10 or is=11
29 */
30static inline void tlbiel_radix_set_isa300(unsigned int set, unsigned int is,
31 unsigned int pid,
32 unsigned int ric, unsigned int prs)
33{
34 unsigned long rb;
35 unsigned long rs;
36 unsigned int r = 1; /* radix format */
37
38 rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
39 rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
40
41 asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4)
42 : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "r"(r)
43 : "memory");
44}
45
46static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
47{
48 unsigned int set;
49
50 asm volatile("ptesync": : :"memory");
51
52 /*
53 * Flush the first set of the TLB, and the entire Page Walk Cache
54 * and partition table entries. Then flush the remaining sets of the
55 * TLB.
56 */
57 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 0);
58 for (set = 1; set < num_sets; set++)
59 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 0);
60
61 /* Do the same for process scoped entries. */
62 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 1);
63 for (set = 1; set < num_sets; set++)
64 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 1);
65
66 asm volatile("ptesync": : :"memory");
67}
68
69void radix__tlbiel_all(unsigned int action)
70{
71 unsigned int is;
72
73 switch (action) {
74 case TLB_INVAL_SCOPE_GLOBAL:
75 is = 3;
76 break;
77 case TLB_INVAL_SCOPE_LPID:
78 is = 2;
79 break;
80 default:
81 BUG();
82 }
83
84 if (early_cpu_has_feature(CPU_FTR_ARCH_300))
85 tlbiel_all_isa300(POWER9_TLB_SETS_RADIX, is);
86 else
87 WARN(1, "%s called on pre-POWER9 CPU\n", __func__);
88
89 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
90}
91
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053092static inline void __tlbiel_pid(unsigned long pid, int set,
93 unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100094{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053095 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100096
97 rb = PPC_BIT(53); /* IS = 1 */
98 rb |= set << PPC_BITLSHIFT(51);
99 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
100 prs = 1; /* process scoped */
101 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000102
Balbir Singh8cd6d3c2016-07-13 15:05:20 +0530103 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000104 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000105 trace_tlbie(0, 1, rb, rs, ric, prs, r);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000106}
107
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100108static inline void __tlbie_pid(unsigned long pid, unsigned long ric)
109{
110 unsigned long rb,rs,prs,r;
111
112 rb = PPC_BIT(53); /* IS = 1 */
113 rs = pid << PPC_BITLSHIFT(31);
114 prs = 1; /* process scoped */
115 r = 1; /* raidx format */
116
117 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
118 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
119 trace_tlbie(0, 0, rb, rs, ric, prs, r);
120}
121
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000122/*
123 * We use 128 set in radix mode and 256 set in hpt mode.
124 */
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530125static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000126{
127 int set;
128
Aneesh Kumar K.Vf7327e02017-04-01 20:11:48 +0530129 asm volatile("ptesync": : :"memory");
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +1000130
131 /*
132 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
133 * also flush the entire Page Walk Cache.
134 */
135 __tlbiel_pid(pid, 0, ric);
136
Benjamin Herrenschmidt5ce5fe12017-07-19 14:49:04 +1000137 /* For PWC, only one flush is needed */
138 if (ric == RIC_FLUSH_PWC) {
139 asm volatile("ptesync": : :"memory");
140 return;
141 }
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +1000142
Benjamin Herrenschmidt5ce5fe12017-07-19 14:49:04 +1000143 /* For the remaining sets, just flush the TLB */
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +1000144 for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
Benjamin Herrenschmidt5ce5fe12017-07-19 14:49:04 +1000145 __tlbiel_pid(pid, set, RIC_FLUSH_TLB);
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +1000146
Aneesh Kumar K.Vf7327e02017-04-01 20:11:48 +0530147 asm volatile("ptesync": : :"memory");
Benjamin Herrenschmidt90c1e3c2017-02-06 13:05:16 +1100148 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000149}
150
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530151static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000152{
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000153 asm volatile("ptesync": : :"memory");
Benjamin Herrenschmidt80a4ae22018-03-23 09:29:06 +1100154
155 /*
156 * Workaround the fact that the "ric" argument to __tlbie_pid
157 * must be a compile-time contraint to match the "i" constraint
158 * in the asm statement.
159 */
160 switch (ric) {
161 case RIC_FLUSH_TLB:
162 __tlbie_pid(pid, RIC_FLUSH_TLB);
163 break;
164 case RIC_FLUSH_PWC:
165 __tlbie_pid(pid, RIC_FLUSH_PWC);
166 break;
167 case RIC_FLUSH_ALL:
168 default:
169 __tlbie_pid(pid, RIC_FLUSH_ALL);
170 }
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000171 asm volatile("eieio; tlbsync; ptesync": : :"memory");
172}
173
Nicholas Piggin14001c62017-11-07 18:53:05 +1100174static inline void __tlbiel_va(unsigned long va, unsigned long pid,
Nicholas Piggind6657672017-11-07 18:53:06 +1100175 unsigned long ap, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000176{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530177 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000178
179 rb = va & ~(PPC_BITMASK(52, 63));
180 rb |= ap << PPC_BITLSHIFT(58);
181 rs = pid << PPC_BITLSHIFT(31);
182 prs = 1; /* process scoped */
183 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000184
Balbir Singh8cd6d3c2016-07-13 15:05:20 +0530185 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000186 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000187 trace_tlbie(0, 1, rb, rs, ric, prs, r);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000188}
189
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100190static inline void __tlbiel_va_range(unsigned long start, unsigned long end,
191 unsigned long pid, unsigned long page_size,
192 unsigned long psize)
193{
194 unsigned long addr;
195 unsigned long ap = mmu_get_ap(psize);
196
197 for (addr = start; addr < end; addr += page_size)
198 __tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
199}
200
Nicholas Piggin14001c62017-11-07 18:53:05 +1100201static inline void _tlbiel_va(unsigned long va, unsigned long pid,
Nicholas Piggind6657672017-11-07 18:53:06 +1100202 unsigned long psize, unsigned long ric)
Nicholas Piggin14001c62017-11-07 18:53:05 +1100203{
Nicholas Piggind6657672017-11-07 18:53:06 +1100204 unsigned long ap = mmu_get_ap(psize);
205
Nicholas Piggin14001c62017-11-07 18:53:05 +1100206 asm volatile("ptesync": : :"memory");
207 __tlbiel_va(va, pid, ap, ric);
208 asm volatile("ptesync": : :"memory");
209}
210
Nicholas Piggind6657672017-11-07 18:53:06 +1100211static inline void _tlbiel_va_range(unsigned long start, unsigned long end,
212 unsigned long pid, unsigned long page_size,
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100213 unsigned long psize, bool also_pwc)
Nicholas Piggind6657672017-11-07 18:53:06 +1100214{
Nicholas Piggind6657672017-11-07 18:53:06 +1100215 asm volatile("ptesync": : :"memory");
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100216 if (also_pwc)
217 __tlbiel_pid(pid, 0, RIC_FLUSH_PWC);
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100218 __tlbiel_va_range(start, end, pid, page_size, psize);
Nicholas Piggind6657672017-11-07 18:53:06 +1100219 asm volatile("ptesync": : :"memory");
220}
221
Nicholas Piggin14001c62017-11-07 18:53:05 +1100222static inline void __tlbie_va(unsigned long va, unsigned long pid,
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530223 unsigned long ap, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000224{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530225 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000226
227 rb = va & ~(PPC_BITMASK(52, 63));
228 rb |= ap << PPC_BITLSHIFT(58);
229 rs = pid << PPC_BITLSHIFT(31);
230 prs = 1; /* process scoped */
231 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000232
Balbir Singh8cd6d3c2016-07-13 15:05:20 +0530233 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000234 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000235 trace_tlbie(0, 0, rb, rs, ric, prs, r);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000236}
237
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100238static inline void __tlbie_va_range(unsigned long start, unsigned long end,
239 unsigned long pid, unsigned long page_size,
240 unsigned long psize)
241{
242 unsigned long addr;
243 unsigned long ap = mmu_get_ap(psize);
244
245 for (addr = start; addr < end; addr += page_size)
246 __tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
247}
248
Nicholas Piggin14001c62017-11-07 18:53:05 +1100249static inline void _tlbie_va(unsigned long va, unsigned long pid,
Nicholas Piggind6657672017-11-07 18:53:06 +1100250 unsigned long psize, unsigned long ric)
Nicholas Piggin14001c62017-11-07 18:53:05 +1100251{
Nicholas Piggind6657672017-11-07 18:53:06 +1100252 unsigned long ap = mmu_get_ap(psize);
253
Nicholas Piggin14001c62017-11-07 18:53:05 +1100254 asm volatile("ptesync": : :"memory");
255 __tlbie_va(va, pid, ap, ric);
256 asm volatile("eieio; tlbsync; ptesync": : :"memory");
257}
258
Nicholas Piggind6657672017-11-07 18:53:06 +1100259static inline void _tlbie_va_range(unsigned long start, unsigned long end,
260 unsigned long pid, unsigned long page_size,
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100261 unsigned long psize, bool also_pwc)
Nicholas Piggind6657672017-11-07 18:53:06 +1100262{
Nicholas Piggind6657672017-11-07 18:53:06 +1100263 asm volatile("ptesync": : :"memory");
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100264 if (also_pwc)
265 __tlbie_pid(pid, RIC_FLUSH_PWC);
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100266 __tlbie_va_range(start, end, pid, page_size, psize);
Nicholas Piggind6657672017-11-07 18:53:06 +1100267 asm volatile("eieio; tlbsync; ptesync": : :"memory");
268}
Nicholas Piggin14001c62017-11-07 18:53:05 +1100269
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000270/*
271 * Base TLB flushing operations:
272 *
273 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
274 * - flush_tlb_page(vma, vmaddr) flushes one page
275 * - flush_tlb_range(vma, start, end) flushes a range of pages
276 * - flush_tlb_kernel_range(start, end) flushes kernel pages
277 *
278 * - local_* variants of page and mm only apply to the current
279 * processor
280 */
281void radix__local_flush_tlb_mm(struct mm_struct *mm)
282{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530283 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000284
285 preempt_disable();
286 pid = mm->context.id;
287 if (pid != MMU_NO_CONTEXT)
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000288 _tlbiel_pid(pid, RIC_FLUSH_TLB);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000289 preempt_enable();
290}
291EXPORT_SYMBOL(radix__local_flush_tlb_mm);
292
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000293#ifndef CONFIG_SMP
Frederic Barrat61102362017-09-03 20:15:12 +0200294void radix__local_flush_all_mm(struct mm_struct *mm)
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530295{
296 unsigned long pid;
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530297
298 preempt_disable();
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530299 pid = mm->context.id;
300 if (pid != MMU_NO_CONTEXT)
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000301 _tlbiel_pid(pid, RIC_FLUSH_ALL);
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530302 preempt_enable();
303}
Frederic Barrat61102362017-09-03 20:15:12 +0200304EXPORT_SYMBOL(radix__local_flush_all_mm);
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000305#endif /* CONFIG_SMP */
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530306
Aneesh Kumar K.Vf22dfc92016-07-13 15:06:41 +0530307void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
Aneesh Kumar K.Vfbfa26d2016-07-13 15:06:42 +0530308 int psize)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000309{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530310 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000311
312 preempt_disable();
Michael Ellerman67730272017-10-16 12:41:00 +0530313 pid = mm->context.id;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000314 if (pid != MMU_NO_CONTEXT)
Nicholas Piggind6657672017-11-07 18:53:06 +1100315 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000316 preempt_enable();
317}
318
319void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
320{
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000321#ifdef CONFIG_HUGETLB_PAGE
322 /* need the return fix for nohash.c */
Michael Ellerman67730272017-10-16 12:41:00 +0530323 if (is_vm_hugetlb_page(vma))
324 return radix__local_flush_hugetlb_page(vma, vmaddr);
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000325#endif
Michael Ellerman67730272017-10-16 12:41:00 +0530326 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000327}
328EXPORT_SYMBOL(radix__local_flush_tlb_page);
329
Benjamin Herrenschmidt80a4ae22018-03-23 09:29:06 +1100330static bool mm_needs_flush_escalation(struct mm_struct *mm)
331{
332 /*
333 * P9 nest MMU has issues with the page walk cache
334 * caching PTEs and not flushing them properly when
335 * RIC = 0 for a PID/LPID invalidate
336 */
337 return atomic_read(&mm->context.copros) != 0;
338}
339
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000340#ifdef CONFIG_SMP
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000341void radix__flush_tlb_mm(struct mm_struct *mm)
342{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530343 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000344
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000345 pid = mm->context.id;
346 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000347 return;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000348
Nicholas Piggindffe8442017-10-24 23:06:53 +1000349 preempt_disable();
Benjamin Herrenschmidt80a4ae22018-03-23 09:29:06 +1100350 if (!mm_is_thread_local(mm)) {
351 if (mm_needs_flush_escalation(mm))
352 _tlbie_pid(pid, RIC_FLUSH_ALL);
353 else
354 _tlbie_pid(pid, RIC_FLUSH_TLB);
355 } else
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000356 _tlbiel_pid(pid, RIC_FLUSH_TLB);
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000357 preempt_enable();
358}
359EXPORT_SYMBOL(radix__flush_tlb_mm);
360
Frederic Barrat61102362017-09-03 20:15:12 +0200361void radix__flush_all_mm(struct mm_struct *mm)
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000362{
363 unsigned long pid;
364
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000365 pid = mm->context.id;
366 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000367 return;
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000368
Nicholas Piggindffe8442017-10-24 23:06:53 +1000369 preempt_disable();
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000370 if (!mm_is_thread_local(mm))
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530371 _tlbie_pid(pid, RIC_FLUSH_ALL);
Michael Ellerman3c9ac2b2017-05-02 21:00:14 +1000372 else
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530373 _tlbiel_pid(pid, RIC_FLUSH_ALL);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000374 preempt_enable();
375}
Frederic Barrat61102362017-09-03 20:15:12 +0200376EXPORT_SYMBOL(radix__flush_all_mm);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000377
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530378void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
379{
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000380 tlb->need_flush_all = 1;
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530381}
382EXPORT_SYMBOL(radix__flush_tlb_pwc);
383
Aneesh Kumar K.Vf22dfc92016-07-13 15:06:41 +0530384void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
Aneesh Kumar K.Vfbfa26d2016-07-13 15:06:42 +0530385 int psize)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000386{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530387 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000388
Michael Ellerman67730272017-10-16 12:41:00 +0530389 pid = mm->context.id;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000390 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000391 return;
392
393 preempt_disable();
Michael Ellerman3c9ac2b2017-05-02 21:00:14 +1000394 if (!mm_is_thread_local(mm))
Nicholas Piggind6657672017-11-07 18:53:06 +1100395 _tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
Michael Ellerman3c9ac2b2017-05-02 21:00:14 +1000396 else
Nicholas Piggind6657672017-11-07 18:53:06 +1100397 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000398 preempt_enable();
399}
400
401void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
402{
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000403#ifdef CONFIG_HUGETLB_PAGE
Michael Ellerman67730272017-10-16 12:41:00 +0530404 if (is_vm_hugetlb_page(vma))
405 return radix__flush_hugetlb_page(vma, vmaddr);
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000406#endif
Michael Ellerman67730272017-10-16 12:41:00 +0530407 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000408}
409EXPORT_SYMBOL(radix__flush_tlb_page);
410
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000411#else /* CONFIG_SMP */
412#define radix__flush_all_mm radix__local_flush_all_mm
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000413#endif /* CONFIG_SMP */
414
415void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
416{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530417 _tlbie_pid(0, RIC_FLUSH_ALL);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000418}
419EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
420
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100421#define TLB_FLUSH_ALL -1UL
422
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000423/*
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100424 * Number of pages above which we invalidate the entire PID rather than
425 * flush individual pages, for local and global flushes respectively.
426 *
427 * tlbie goes out to the interconnect and individual ops are more costly.
428 * It also does not iterate over sets like the local tlbiel variant when
429 * invalidating a full PID, so it has a far lower threshold to change from
430 * individual page flushes to full-pid flushes.
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000431 */
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100432static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
Nicholas Pigginf6f27952017-11-07 18:53:08 +1100433static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = POWER9_TLB_SETS_RADIX * 2;
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100434
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000435void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
436 unsigned long end)
437
438{
439 struct mm_struct *mm = vma->vm_mm;
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100440 unsigned long pid;
441 unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift;
442 unsigned long page_size = 1UL << page_shift;
443 unsigned long nr_pages = (end - start) >> page_shift;
444 bool local, full;
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000445
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100446#ifdef CONFIG_HUGETLB_PAGE
447 if (is_vm_hugetlb_page(vma))
448 return radix__flush_hugetlb_tlb_range(vma, start, end);
449#endif
450
451 pid = mm->context.id;
452 if (unlikely(pid == MMU_NO_CONTEXT))
453 return;
454
455 preempt_disable();
Nicholas Pigginf6f27952017-11-07 18:53:08 +1100456 if (mm_is_thread_local(mm)) {
457 local = true;
458 full = (end == TLB_FLUSH_ALL ||
459 nr_pages > tlb_local_single_page_flush_ceiling);
460 } else {
461 local = false;
462 full = (end == TLB_FLUSH_ALL ||
463 nr_pages > tlb_single_page_flush_ceiling);
464 }
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100465
466 if (full) {
Benjamin Herrenschmidt80a4ae22018-03-23 09:29:06 +1100467 if (local) {
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100468 _tlbiel_pid(pid, RIC_FLUSH_TLB);
Benjamin Herrenschmidt80a4ae22018-03-23 09:29:06 +1100469 } else {
470 if (mm_needs_flush_escalation(mm))
471 _tlbie_pid(pid, RIC_FLUSH_ALL);
472 else
473 _tlbie_pid(pid, RIC_FLUSH_TLB);
474 }
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100475 } else {
476 bool hflush = false;
477 unsigned long hstart, hend;
478
479#ifdef CONFIG_TRANSPARENT_HUGEPAGE
480 hstart = (start + HPAGE_PMD_SIZE - 1) >> HPAGE_PMD_SHIFT;
481 hend = end >> HPAGE_PMD_SHIFT;
482 if (hstart < hend) {
483 hstart <<= HPAGE_PMD_SHIFT;
484 hend <<= HPAGE_PMD_SHIFT;
485 hflush = true;
486 }
487#endif
488
489 asm volatile("ptesync": : :"memory");
490 if (local) {
491 __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
492 if (hflush)
493 __tlbiel_va_range(hstart, hend, pid,
494 HPAGE_PMD_SIZE, MMU_PAGE_2M);
495 asm volatile("ptesync": : :"memory");
496 } else {
497 __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);
498 if (hflush)
499 __tlbie_va_range(hstart, hend, pid,
500 HPAGE_PMD_SIZE, MMU_PAGE_2M);
501 asm volatile("eieio; tlbsync; ptesync": : :"memory");
502 }
503 }
504 preempt_enable();
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000505}
506EXPORT_SYMBOL(radix__flush_tlb_range);
507
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530508static int radix_get_mmu_psize(int page_size)
509{
510 int psize;
511
512 if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift))
513 psize = mmu_virtual_psize;
514 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift))
515 psize = MMU_PAGE_2M;
516 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift))
517 psize = MMU_PAGE_1G;
518 else
519 return -1;
520 return psize;
521}
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000522
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100523static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
524 unsigned long end, int psize);
525
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000526void radix__tlb_flush(struct mmu_gather *tlb)
527{
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530528 int psize = 0;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000529 struct mm_struct *mm = tlb->mm;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530530 int page_size = tlb->page_size;
531
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530532 /*
533 * if page size is not something we understand, do a full mm flush
Nicholas Piggin30b49ec2017-10-24 23:06:54 +1000534 *
535 * A "fullmm" flush must always do a flush_all_mm (RIC=2) flush
536 * that flushes the process table entry cache upon process teardown.
537 * See the comment for radix in arch_exit_mmap().
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530538 */
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100539 if (tlb->fullmm) {
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000540 radix__flush_all_mm(mm);
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100541 } else if ( (psize = radix_get_mmu_psize(page_size)) == -1) {
542 if (!tlb->need_flush_all)
543 radix__flush_tlb_mm(mm);
544 else
545 radix__flush_all_mm(mm);
546 } else {
547 unsigned long start = tlb->start;
548 unsigned long end = tlb->end;
549
550 if (!tlb->need_flush_all)
551 radix__flush_tlb_range_psize(mm, start, end, psize);
552 else
553 radix__flush_tlb_pwc_range_psize(mm, start, end, psize);
554 }
555 tlb->need_flush_all = 0;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530556}
557
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100558static inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
559 unsigned long start, unsigned long end,
560 int psize, bool also_pwc)
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530561{
562 unsigned long pid;
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100563 unsigned int page_shift = mmu_psize_defs[psize].shift;
564 unsigned long page_size = 1UL << page_shift;
565 unsigned long nr_pages = (end - start) >> page_shift;
566 bool local, full;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530567
Michael Ellerman67730272017-10-16 12:41:00 +0530568 pid = mm->context.id;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530569 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000570 return;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530571
Nicholas Piggindffe8442017-10-24 23:06:53 +1000572 preempt_disable();
Nicholas Pigginf6f27952017-11-07 18:53:08 +1100573 if (mm_is_thread_local(mm)) {
574 local = true;
575 full = (end == TLB_FLUSH_ALL ||
576 nr_pages > tlb_local_single_page_flush_ceiling);
577 } else {
578 local = false;
579 full = (end == TLB_FLUSH_ALL ||
580 nr_pages > tlb_single_page_flush_ceiling);
581 }
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100582
583 if (full) {
Benjamin Herrenschmidt80a4ae22018-03-23 09:29:06 +1100584 if (!local && mm_needs_flush_escalation(mm))
585 also_pwc = true;
586
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530587 if (local)
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100588 _tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530589 else
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100590 _tlbie_pid(pid, also_pwc ? RIC_FLUSH_ALL: RIC_FLUSH_TLB);
Nicholas Piggindffe8442017-10-24 23:06:53 +1000591 } else {
Nicholas Piggin14001c62017-11-07 18:53:05 +1100592 if (local)
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100593 _tlbiel_va_range(start, end, pid, page_size, psize, also_pwc);
Nicholas Piggin14001c62017-11-07 18:53:05 +1100594 else
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100595 _tlbie_va_range(start, end, pid, page_size, psize, also_pwc);
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530596 }
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530597 preempt_enable();
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000598}
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530599
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100600void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
601 unsigned long end, int psize)
602{
603 return __radix__flush_tlb_range_psize(mm, start, end, psize, false);
604}
605
606static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
607 unsigned long end, int psize)
608{
609 __radix__flush_tlb_range_psize(mm, start, end, psize, true);
610}
611
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000612#ifdef CONFIG_TRANSPARENT_HUGEPAGE
613void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
614{
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000615 unsigned long pid, end;
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000616
Michael Ellerman67730272017-10-16 12:41:00 +0530617 pid = mm->context.id;
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000618 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000619 return;
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000620
621 /* 4k page size, just blow the world */
622 if (PAGE_SIZE == 0x1000) {
623 radix__flush_all_mm(mm);
624 return;
625 }
626
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000627 end = addr + HPAGE_PMD_SIZE;
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100628
629 /* Otherwise first do the PWC, then iterate the pages. */
630 preempt_disable();
631
632 if (mm_is_thread_local(mm)) {
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100633 _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100634 } else {
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100635 _tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100636 }
Nicholas Piggin14001c62017-11-07 18:53:05 +1100637
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000638 preempt_enable();
639}
640#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
641
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530642void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
643 unsigned long page_size)
644{
645 unsigned long rb,rs,prs,r;
646 unsigned long ap;
647 unsigned long ric = RIC_FLUSH_TLB;
648
649 ap = mmu_get_ap(radix_get_mmu_psize(page_size));
650 rb = gpa & ~(PPC_BITMASK(52, 63));
651 rb |= ap << PPC_BITLSHIFT(58);
652 rs = lpid & ((1UL << 32) - 1);
653 prs = 0; /* process scoped */
654 r = 1; /* raidx format */
655
656 asm volatile("ptesync": : :"memory");
657 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
658 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
659 asm volatile("eieio; tlbsync; ptesync": : :"memory");
Balbir Singh04284912017-04-11 15:23:25 +1000660 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530661}
662EXPORT_SYMBOL(radix__flush_tlb_lpid_va);
663
664void radix__flush_tlb_lpid(unsigned long lpid)
665{
666 unsigned long rb,rs,prs,r;
667 unsigned long ric = RIC_FLUSH_ALL;
668
669 rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */
670 rs = lpid & ((1UL << 32) - 1);
671 prs = 0; /* partition scoped */
672 r = 1; /* raidx format */
673
674 asm volatile("ptesync": : :"memory");
675 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
676 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
677 asm volatile("eieio; tlbsync; ptesync": : :"memory");
Balbir Singh04284912017-04-11 15:23:25 +1000678 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530679}
680EXPORT_SYMBOL(radix__flush_tlb_lpid);
Aneesh Kumar K.Vd8e91e92016-07-13 15:06:40 +0530681
682void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
683 unsigned long start, unsigned long end)
684{
685 radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
686}
687EXPORT_SYMBOL(radix__flush_pmd_tlb_range);
Aneesh Kumar K.Vbe34d302016-08-23 16:27:48 +0530688
689void radix__flush_tlb_all(void)
690{
691 unsigned long rb,prs,r,rs;
692 unsigned long ric = RIC_FLUSH_ALL;
693
694 rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */
695 prs = 0; /* partition scoped */
696 r = 1; /* raidx format */
697 rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */
698
699 asm volatile("ptesync": : :"memory");
700 /*
701 * now flush guest entries by passing PRS = 1 and LPID != 0
702 */
703 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
704 : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
705 /*
706 * now flush host entires by passing PRS = 0 and LPID == 0
707 */
708 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
709 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
710 asm volatile("eieio; tlbsync; ptesync": : :"memory");
711}
Aneesh Kumar K.V6d3a0372016-11-28 11:47:01 +0530712
713void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
714 unsigned long address)
715{
716 /*
717 * We track page size in pte only for DD1, So we can
718 * call this only on DD1.
719 */
720 if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) {
721 VM_WARN_ON(1);
722 return;
723 }
724
Aneesh Kumar K.Vddb014b2017-03-21 22:59:54 +0530725 if (old_pte & R_PAGE_LARGE)
Aneesh Kumar K.V6d3a0372016-11-28 11:47:01 +0530726 radix__flush_tlb_page_psize(mm, address, MMU_PAGE_2M);
727 else
728 radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize);
729}
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000730
731#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
732extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
733{
734 unsigned int pid = mm->context.id;
735
736 if (unlikely(pid == MMU_NO_CONTEXT))
737 return;
738
739 /*
740 * If this context hasn't run on that CPU before and KVM is
741 * around, there's a slim chance that the guest on another
742 * CPU just brought in obsolete translation into the TLB of
743 * this CPU due to a bad prefetch using the guest PID on
744 * the way into the hypervisor.
745 *
746 * We work around this here. If KVM is possible, we check if
747 * any sibling thread is in KVM. If it is, the window may exist
748 * and thus we flush that PID from the core.
749 *
750 * A potential future improvement would be to mark which PIDs
751 * have never been used on the system and avoid it if the PID
752 * is new and the process has no other cpumask bit set.
753 */
754 if (cpu_has_feature(CPU_FTR_HVMODE) && radix_enabled()) {
755 int cpu = smp_processor_id();
756 int sib = cpu_first_thread_sibling(cpu);
757 bool flush = false;
758
759 for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
760 if (sib == cpu)
761 continue;
762 if (paca[sib].kvm_hstate.kvm_vcpu)
763 flush = true;
764 }
765 if (flush)
766 _tlbiel_pid(pid, RIC_FLUSH_ALL);
767 }
768}
769EXPORT_SYMBOL_GPL(radix_kvm_prefetch_workaround);
770#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */