blob: a07f5372a4bf36ce726408891defc380dd9fea59 [file] [log] [blame]
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +10001/*
2 * TLB flush routines for radix kernels.
3 *
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/mm.h>
13#include <linux/hugetlb.h>
14#include <linux/memblock.h>
15
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +100016#include <asm/ppc-opcode.h>
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100017#include <asm/tlb.h>
18#include <asm/tlbflush.h>
Balbir Singh04284912017-04-11 15:23:25 +100019#include <asm/trace.h>
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +100020#include <asm/cputhreads.h>
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100021
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053022#define RIC_FLUSH_TLB 0
23#define RIC_FLUSH_PWC 1
24#define RIC_FLUSH_ALL 2
25
Nicholas Piggind4748272017-12-24 01:15:50 +100026/*
27 * tlbiel instruction for radix, set invalidation
28 * i.e., r=1 and is=01 or is=10 or is=11
29 */
30static inline void tlbiel_radix_set_isa300(unsigned int set, unsigned int is,
31 unsigned int pid,
32 unsigned int ric, unsigned int prs)
33{
34 unsigned long rb;
35 unsigned long rs;
36 unsigned int r = 1; /* radix format */
37
38 rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
39 rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
40
41 asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4)
42 : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "r"(r)
43 : "memory");
44}
45
46static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
47{
48 unsigned int set;
49
50 asm volatile("ptesync": : :"memory");
51
52 /*
53 * Flush the first set of the TLB, and the entire Page Walk Cache
54 * and partition table entries. Then flush the remaining sets of the
55 * TLB.
56 */
57 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 0);
58 for (set = 1; set < num_sets; set++)
59 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 0);
60
61 /* Do the same for process scoped entries. */
62 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 1);
63 for (set = 1; set < num_sets; set++)
64 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 1);
65
66 asm volatile("ptesync": : :"memory");
67}
68
69void radix__tlbiel_all(unsigned int action)
70{
71 unsigned int is;
72
73 switch (action) {
74 case TLB_INVAL_SCOPE_GLOBAL:
75 is = 3;
76 break;
77 case TLB_INVAL_SCOPE_LPID:
78 is = 2;
79 break;
80 default:
81 BUG();
82 }
83
84 if (early_cpu_has_feature(CPU_FTR_ARCH_300))
85 tlbiel_all_isa300(POWER9_TLB_SETS_RADIX, is);
86 else
87 WARN(1, "%s called on pre-POWER9 CPU\n", __func__);
88
89 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
90}
91
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053092static inline void __tlbiel_pid(unsigned long pid, int set,
93 unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100094{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053095 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100096
97 rb = PPC_BIT(53); /* IS = 1 */
98 rb |= set << PPC_BITLSHIFT(51);
99 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
100 prs = 1; /* process scoped */
101 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000102
Balbir Singh8cd6d3c2016-07-13 15:05:20 +0530103 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000104 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000105 trace_tlbie(0, 1, rb, rs, ric, prs, r);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000106}
107
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100108static inline void __tlbie_pid(unsigned long pid, unsigned long ric)
109{
110 unsigned long rb,rs,prs,r;
111
112 rb = PPC_BIT(53); /* IS = 1 */
113 rs = pid << PPC_BITLSHIFT(31);
114 prs = 1; /* process scoped */
115 r = 1; /* raidx format */
116
117 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
118 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
119 trace_tlbie(0, 0, rb, rs, ric, prs, r);
120}
121
Aneesh Kumar K.V243fee32018-03-23 10:26:26 +0530122static inline void __tlbiel_va(unsigned long va, unsigned long pid,
123 unsigned long ap, unsigned long ric)
124{
125 unsigned long rb,rs,prs,r;
126
127 rb = va & ~(PPC_BITMASK(52, 63));
128 rb |= ap << PPC_BITLSHIFT(58);
129 rs = pid << PPC_BITLSHIFT(31);
130 prs = 1; /* process scoped */
131 r = 1; /* raidx format */
132
133 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
134 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
135 trace_tlbie(0, 1, rb, rs, ric, prs, r);
136}
137
138static inline void __tlbie_va(unsigned long va, unsigned long pid,
139 unsigned long ap, unsigned long ric)
140{
141 unsigned long rb,rs,prs,r;
142
143 rb = va & ~(PPC_BITMASK(52, 63));
144 rb |= ap << PPC_BITLSHIFT(58);
145 rs = pid << PPC_BITLSHIFT(31);
146 prs = 1; /* process scoped */
147 r = 1; /* raidx format */
148
149 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
150 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
151 trace_tlbie(0, 0, rb, rs, ric, prs, r);
152}
153
Aneesh Kumar K.Va5d4b582018-03-23 10:26:27 +0530154static inline void fixup_tlbie(void)
155{
156 unsigned long pid = 0;
157 unsigned long va = ((1UL << 52) - 1);
158
159 if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
160 asm volatile("ptesync": : :"memory");
161 __tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
162 }
163}
164
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000165/*
166 * We use 128 set in radix mode and 256 set in hpt mode.
167 */
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530168static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000169{
170 int set;
171
Aneesh Kumar K.Vf7327e02017-04-01 20:11:48 +0530172 asm volatile("ptesync": : :"memory");
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +1000173
174 /*
175 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
176 * also flush the entire Page Walk Cache.
177 */
178 __tlbiel_pid(pid, 0, ric);
179
Benjamin Herrenschmidt5ce5fe12017-07-19 14:49:04 +1000180 /* For PWC, only one flush is needed */
181 if (ric == RIC_FLUSH_PWC) {
182 asm volatile("ptesync": : :"memory");
183 return;
184 }
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +1000185
Benjamin Herrenschmidt5ce5fe12017-07-19 14:49:04 +1000186 /* For the remaining sets, just flush the TLB */
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +1000187 for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
Benjamin Herrenschmidt5ce5fe12017-07-19 14:49:04 +1000188 __tlbiel_pid(pid, set, RIC_FLUSH_TLB);
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +1000189
Aneesh Kumar K.Vf7327e02017-04-01 20:11:48 +0530190 asm volatile("ptesync": : :"memory");
Benjamin Herrenschmidt90c1e3c2017-02-06 13:05:16 +1100191 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000192}
193
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530194static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000195{
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000196 asm volatile("ptesync": : :"memory");
Benjamin Herrenschmidt80a4ae22018-03-23 09:29:06 +1100197
198 /*
199 * Workaround the fact that the "ric" argument to __tlbie_pid
200 * must be a compile-time contraint to match the "i" constraint
201 * in the asm statement.
202 */
203 switch (ric) {
204 case RIC_FLUSH_TLB:
205 __tlbie_pid(pid, RIC_FLUSH_TLB);
206 break;
207 case RIC_FLUSH_PWC:
208 __tlbie_pid(pid, RIC_FLUSH_PWC);
209 break;
210 case RIC_FLUSH_ALL:
211 default:
212 __tlbie_pid(pid, RIC_FLUSH_ALL);
213 }
Aneesh Kumar K.Va5d4b582018-03-23 10:26:27 +0530214 fixup_tlbie();
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000215 asm volatile("eieio; tlbsync; ptesync": : :"memory");
216}
217
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100218static inline void __tlbiel_va_range(unsigned long start, unsigned long end,
219 unsigned long pid, unsigned long page_size,
220 unsigned long psize)
221{
222 unsigned long addr;
223 unsigned long ap = mmu_get_ap(psize);
224
225 for (addr = start; addr < end; addr += page_size)
226 __tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
227}
228
Nicholas Piggin14001c62017-11-07 18:53:05 +1100229static inline void _tlbiel_va(unsigned long va, unsigned long pid,
Nicholas Piggind6657672017-11-07 18:53:06 +1100230 unsigned long psize, unsigned long ric)
Nicholas Piggin14001c62017-11-07 18:53:05 +1100231{
Nicholas Piggind6657672017-11-07 18:53:06 +1100232 unsigned long ap = mmu_get_ap(psize);
233
Nicholas Piggin14001c62017-11-07 18:53:05 +1100234 asm volatile("ptesync": : :"memory");
235 __tlbiel_va(va, pid, ap, ric);
236 asm volatile("ptesync": : :"memory");
237}
238
Nicholas Piggind6657672017-11-07 18:53:06 +1100239static inline void _tlbiel_va_range(unsigned long start, unsigned long end,
240 unsigned long pid, unsigned long page_size,
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100241 unsigned long psize, bool also_pwc)
Nicholas Piggind6657672017-11-07 18:53:06 +1100242{
Nicholas Piggind6657672017-11-07 18:53:06 +1100243 asm volatile("ptesync": : :"memory");
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100244 if (also_pwc)
245 __tlbiel_pid(pid, 0, RIC_FLUSH_PWC);
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100246 __tlbiel_va_range(start, end, pid, page_size, psize);
Nicholas Piggind6657672017-11-07 18:53:06 +1100247 asm volatile("ptesync": : :"memory");
248}
249
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100250static inline void __tlbie_va_range(unsigned long start, unsigned long end,
251 unsigned long pid, unsigned long page_size,
252 unsigned long psize)
253{
254 unsigned long addr;
255 unsigned long ap = mmu_get_ap(psize);
256
257 for (addr = start; addr < end; addr += page_size)
258 __tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
259}
260
Nicholas Piggin14001c62017-11-07 18:53:05 +1100261static inline void _tlbie_va(unsigned long va, unsigned long pid,
Nicholas Piggind6657672017-11-07 18:53:06 +1100262 unsigned long psize, unsigned long ric)
Nicholas Piggin14001c62017-11-07 18:53:05 +1100263{
Nicholas Piggind6657672017-11-07 18:53:06 +1100264 unsigned long ap = mmu_get_ap(psize);
265
Nicholas Piggin14001c62017-11-07 18:53:05 +1100266 asm volatile("ptesync": : :"memory");
267 __tlbie_va(va, pid, ap, ric);
Aneesh Kumar K.Va5d4b582018-03-23 10:26:27 +0530268 fixup_tlbie();
Nicholas Piggin14001c62017-11-07 18:53:05 +1100269 asm volatile("eieio; tlbsync; ptesync": : :"memory");
270}
271
Nicholas Piggind6657672017-11-07 18:53:06 +1100272static inline void _tlbie_va_range(unsigned long start, unsigned long end,
273 unsigned long pid, unsigned long page_size,
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100274 unsigned long psize, bool also_pwc)
Nicholas Piggind6657672017-11-07 18:53:06 +1100275{
Nicholas Piggind6657672017-11-07 18:53:06 +1100276 asm volatile("ptesync": : :"memory");
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100277 if (also_pwc)
278 __tlbie_pid(pid, RIC_FLUSH_PWC);
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100279 __tlbie_va_range(start, end, pid, page_size, psize);
Aneesh Kumar K.Va5d4b582018-03-23 10:26:27 +0530280 fixup_tlbie();
Nicholas Piggind6657672017-11-07 18:53:06 +1100281 asm volatile("eieio; tlbsync; ptesync": : :"memory");
282}
Nicholas Piggin14001c62017-11-07 18:53:05 +1100283
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000284/*
285 * Base TLB flushing operations:
286 *
287 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
288 * - flush_tlb_page(vma, vmaddr) flushes one page
289 * - flush_tlb_range(vma, start, end) flushes a range of pages
290 * - flush_tlb_kernel_range(start, end) flushes kernel pages
291 *
292 * - local_* variants of page and mm only apply to the current
293 * processor
294 */
295void radix__local_flush_tlb_mm(struct mm_struct *mm)
296{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530297 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000298
299 preempt_disable();
300 pid = mm->context.id;
301 if (pid != MMU_NO_CONTEXT)
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000302 _tlbiel_pid(pid, RIC_FLUSH_TLB);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000303 preempt_enable();
304}
305EXPORT_SYMBOL(radix__local_flush_tlb_mm);
306
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000307#ifndef CONFIG_SMP
Frederic Barrat61102362017-09-03 20:15:12 +0200308void radix__local_flush_all_mm(struct mm_struct *mm)
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530309{
310 unsigned long pid;
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530311
312 preempt_disable();
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530313 pid = mm->context.id;
314 if (pid != MMU_NO_CONTEXT)
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000315 _tlbiel_pid(pid, RIC_FLUSH_ALL);
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530316 preempt_enable();
317}
Frederic Barrat61102362017-09-03 20:15:12 +0200318EXPORT_SYMBOL(radix__local_flush_all_mm);
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000319#endif /* CONFIG_SMP */
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530320
Aneesh Kumar K.Vf22dfc92016-07-13 15:06:41 +0530321void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
Aneesh Kumar K.Vfbfa26d2016-07-13 15:06:42 +0530322 int psize)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000323{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530324 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000325
326 preempt_disable();
Michael Ellerman67730272017-10-16 12:41:00 +0530327 pid = mm->context.id;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000328 if (pid != MMU_NO_CONTEXT)
Nicholas Piggind6657672017-11-07 18:53:06 +1100329 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000330 preempt_enable();
331}
332
333void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
334{
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000335#ifdef CONFIG_HUGETLB_PAGE
336 /* need the return fix for nohash.c */
Michael Ellerman67730272017-10-16 12:41:00 +0530337 if (is_vm_hugetlb_page(vma))
338 return radix__local_flush_hugetlb_page(vma, vmaddr);
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000339#endif
Michael Ellerman67730272017-10-16 12:41:00 +0530340 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000341}
342EXPORT_SYMBOL(radix__local_flush_tlb_page);
343
Benjamin Herrenschmidt80a4ae22018-03-23 09:29:06 +1100344static bool mm_needs_flush_escalation(struct mm_struct *mm)
345{
346 /*
347 * P9 nest MMU has issues with the page walk cache
348 * caching PTEs and not flushing them properly when
349 * RIC = 0 for a PID/LPID invalidate
350 */
351 return atomic_read(&mm->context.copros) != 0;
352}
353
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000354#ifdef CONFIG_SMP
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000355void radix__flush_tlb_mm(struct mm_struct *mm)
356{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530357 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000358
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000359 pid = mm->context.id;
360 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000361 return;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000362
Nicholas Piggindffe8442017-10-24 23:06:53 +1000363 preempt_disable();
Benjamin Herrenschmidt80a4ae22018-03-23 09:29:06 +1100364 if (!mm_is_thread_local(mm)) {
365 if (mm_needs_flush_escalation(mm))
366 _tlbie_pid(pid, RIC_FLUSH_ALL);
367 else
368 _tlbie_pid(pid, RIC_FLUSH_TLB);
369 } else
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000370 _tlbiel_pid(pid, RIC_FLUSH_TLB);
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000371 preempt_enable();
372}
373EXPORT_SYMBOL(radix__flush_tlb_mm);
374
Frederic Barrat61102362017-09-03 20:15:12 +0200375void radix__flush_all_mm(struct mm_struct *mm)
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000376{
377 unsigned long pid;
378
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000379 pid = mm->context.id;
380 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000381 return;
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000382
Nicholas Piggindffe8442017-10-24 23:06:53 +1000383 preempt_disable();
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000384 if (!mm_is_thread_local(mm))
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530385 _tlbie_pid(pid, RIC_FLUSH_ALL);
Michael Ellerman3c9ac2b2017-05-02 21:00:14 +1000386 else
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530387 _tlbiel_pid(pid, RIC_FLUSH_ALL);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000388 preempt_enable();
389}
Frederic Barrat61102362017-09-03 20:15:12 +0200390EXPORT_SYMBOL(radix__flush_all_mm);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000391
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530392void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
393{
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000394 tlb->need_flush_all = 1;
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530395}
396EXPORT_SYMBOL(radix__flush_tlb_pwc);
397
Aneesh Kumar K.Vf22dfc92016-07-13 15:06:41 +0530398void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
Aneesh Kumar K.Vfbfa26d2016-07-13 15:06:42 +0530399 int psize)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000400{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530401 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000402
Michael Ellerman67730272017-10-16 12:41:00 +0530403 pid = mm->context.id;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000404 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000405 return;
406
407 preempt_disable();
Michael Ellerman3c9ac2b2017-05-02 21:00:14 +1000408 if (!mm_is_thread_local(mm))
Nicholas Piggind6657672017-11-07 18:53:06 +1100409 _tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
Michael Ellerman3c9ac2b2017-05-02 21:00:14 +1000410 else
Nicholas Piggind6657672017-11-07 18:53:06 +1100411 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000412 preempt_enable();
413}
414
415void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
416{
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000417#ifdef CONFIG_HUGETLB_PAGE
Michael Ellerman67730272017-10-16 12:41:00 +0530418 if (is_vm_hugetlb_page(vma))
419 return radix__flush_hugetlb_page(vma, vmaddr);
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000420#endif
Michael Ellerman67730272017-10-16 12:41:00 +0530421 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000422}
423EXPORT_SYMBOL(radix__flush_tlb_page);
424
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000425#else /* CONFIG_SMP */
426#define radix__flush_all_mm radix__local_flush_all_mm
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000427#endif /* CONFIG_SMP */
428
429void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
430{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530431 _tlbie_pid(0, RIC_FLUSH_ALL);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000432}
433EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
434
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100435#define TLB_FLUSH_ALL -1UL
436
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000437/*
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100438 * Number of pages above which we invalidate the entire PID rather than
439 * flush individual pages, for local and global flushes respectively.
440 *
441 * tlbie goes out to the interconnect and individual ops are more costly.
442 * It also does not iterate over sets like the local tlbiel variant when
443 * invalidating a full PID, so it has a far lower threshold to change from
444 * individual page flushes to full-pid flushes.
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000445 */
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100446static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
Nicholas Pigginf6f27952017-11-07 18:53:08 +1100447static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = POWER9_TLB_SETS_RADIX * 2;
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100448
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000449void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
450 unsigned long end)
451
452{
453 struct mm_struct *mm = vma->vm_mm;
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100454 unsigned long pid;
455 unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift;
456 unsigned long page_size = 1UL << page_shift;
457 unsigned long nr_pages = (end - start) >> page_shift;
458 bool local, full;
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000459
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100460#ifdef CONFIG_HUGETLB_PAGE
461 if (is_vm_hugetlb_page(vma))
462 return radix__flush_hugetlb_tlb_range(vma, start, end);
463#endif
464
465 pid = mm->context.id;
466 if (unlikely(pid == MMU_NO_CONTEXT))
467 return;
468
469 preempt_disable();
Nicholas Pigginf6f27952017-11-07 18:53:08 +1100470 if (mm_is_thread_local(mm)) {
471 local = true;
472 full = (end == TLB_FLUSH_ALL ||
473 nr_pages > tlb_local_single_page_flush_ceiling);
474 } else {
475 local = false;
476 full = (end == TLB_FLUSH_ALL ||
477 nr_pages > tlb_single_page_flush_ceiling);
478 }
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100479
480 if (full) {
Benjamin Herrenschmidt80a4ae22018-03-23 09:29:06 +1100481 if (local) {
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100482 _tlbiel_pid(pid, RIC_FLUSH_TLB);
Benjamin Herrenschmidt80a4ae22018-03-23 09:29:06 +1100483 } else {
484 if (mm_needs_flush_escalation(mm))
485 _tlbie_pid(pid, RIC_FLUSH_ALL);
486 else
487 _tlbie_pid(pid, RIC_FLUSH_TLB);
488 }
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100489 } else {
490 bool hflush = false;
491 unsigned long hstart, hend;
492
493#ifdef CONFIG_TRANSPARENT_HUGEPAGE
494 hstart = (start + HPAGE_PMD_SIZE - 1) >> HPAGE_PMD_SHIFT;
495 hend = end >> HPAGE_PMD_SHIFT;
496 if (hstart < hend) {
497 hstart <<= HPAGE_PMD_SHIFT;
498 hend <<= HPAGE_PMD_SHIFT;
499 hflush = true;
500 }
501#endif
502
503 asm volatile("ptesync": : :"memory");
504 if (local) {
505 __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
506 if (hflush)
507 __tlbiel_va_range(hstart, hend, pid,
508 HPAGE_PMD_SIZE, MMU_PAGE_2M);
509 asm volatile("ptesync": : :"memory");
510 } else {
511 __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);
512 if (hflush)
513 __tlbie_va_range(hstart, hend, pid,
514 HPAGE_PMD_SIZE, MMU_PAGE_2M);
Aneesh Kumar K.Va5d4b582018-03-23 10:26:27 +0530515 fixup_tlbie();
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100516 asm volatile("eieio; tlbsync; ptesync": : :"memory");
517 }
518 }
519 preempt_enable();
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000520}
521EXPORT_SYMBOL(radix__flush_tlb_range);
522
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530523static int radix_get_mmu_psize(int page_size)
524{
525 int psize;
526
527 if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift))
528 psize = mmu_virtual_psize;
529 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift))
530 psize = MMU_PAGE_2M;
531 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift))
532 psize = MMU_PAGE_1G;
533 else
534 return -1;
535 return psize;
536}
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000537
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100538static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
539 unsigned long end, int psize);
540
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000541void radix__tlb_flush(struct mmu_gather *tlb)
542{
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530543 int psize = 0;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000544 struct mm_struct *mm = tlb->mm;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530545 int page_size = tlb->page_size;
546
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530547 /*
548 * if page size is not something we understand, do a full mm flush
Nicholas Piggin30b49ec2017-10-24 23:06:54 +1000549 *
550 * A "fullmm" flush must always do a flush_all_mm (RIC=2) flush
551 * that flushes the process table entry cache upon process teardown.
552 * See the comment for radix in arch_exit_mmap().
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530553 */
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100554 if (tlb->fullmm) {
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000555 radix__flush_all_mm(mm);
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100556 } else if ( (psize = radix_get_mmu_psize(page_size)) == -1) {
557 if (!tlb->need_flush_all)
558 radix__flush_tlb_mm(mm);
559 else
560 radix__flush_all_mm(mm);
561 } else {
562 unsigned long start = tlb->start;
563 unsigned long end = tlb->end;
564
565 if (!tlb->need_flush_all)
566 radix__flush_tlb_range_psize(mm, start, end, psize);
567 else
568 radix__flush_tlb_pwc_range_psize(mm, start, end, psize);
569 }
570 tlb->need_flush_all = 0;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530571}
572
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100573static inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
574 unsigned long start, unsigned long end,
575 int psize, bool also_pwc)
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530576{
577 unsigned long pid;
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100578 unsigned int page_shift = mmu_psize_defs[psize].shift;
579 unsigned long page_size = 1UL << page_shift;
580 unsigned long nr_pages = (end - start) >> page_shift;
581 bool local, full;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530582
Michael Ellerman67730272017-10-16 12:41:00 +0530583 pid = mm->context.id;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530584 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000585 return;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530586
Nicholas Piggindffe8442017-10-24 23:06:53 +1000587 preempt_disable();
Nicholas Pigginf6f27952017-11-07 18:53:08 +1100588 if (mm_is_thread_local(mm)) {
589 local = true;
590 full = (end == TLB_FLUSH_ALL ||
591 nr_pages > tlb_local_single_page_flush_ceiling);
592 } else {
593 local = false;
594 full = (end == TLB_FLUSH_ALL ||
595 nr_pages > tlb_single_page_flush_ceiling);
596 }
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100597
598 if (full) {
Benjamin Herrenschmidt80a4ae22018-03-23 09:29:06 +1100599 if (!local && mm_needs_flush_escalation(mm))
600 also_pwc = true;
601
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530602 if (local)
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100603 _tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530604 else
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100605 _tlbie_pid(pid, also_pwc ? RIC_FLUSH_ALL: RIC_FLUSH_TLB);
Nicholas Piggindffe8442017-10-24 23:06:53 +1000606 } else {
Nicholas Piggin14001c62017-11-07 18:53:05 +1100607 if (local)
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100608 _tlbiel_va_range(start, end, pid, page_size, psize, also_pwc);
Nicholas Piggin14001c62017-11-07 18:53:05 +1100609 else
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100610 _tlbie_va_range(start, end, pid, page_size, psize, also_pwc);
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530611 }
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530612 preempt_enable();
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000613}
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530614
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100615void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
616 unsigned long end, int psize)
617{
618 return __radix__flush_tlb_range_psize(mm, start, end, psize, false);
619}
620
621static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
622 unsigned long end, int psize)
623{
624 __radix__flush_tlb_range_psize(mm, start, end, psize, true);
625}
626
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000627#ifdef CONFIG_TRANSPARENT_HUGEPAGE
628void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
629{
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000630 unsigned long pid, end;
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000631
Michael Ellerman67730272017-10-16 12:41:00 +0530632 pid = mm->context.id;
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000633 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000634 return;
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000635
636 /* 4k page size, just blow the world */
637 if (PAGE_SIZE == 0x1000) {
638 radix__flush_all_mm(mm);
639 return;
640 }
641
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000642 end = addr + HPAGE_PMD_SIZE;
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100643
644 /* Otherwise first do the PWC, then iterate the pages. */
645 preempt_disable();
646
647 if (mm_is_thread_local(mm)) {
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100648 _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100649 } else {
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100650 _tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100651 }
Nicholas Piggin14001c62017-11-07 18:53:05 +1100652
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000653 preempt_enable();
654}
655#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
656
Aneesh Kumar K.Vd8e91e92016-07-13 15:06:40 +0530657void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
658 unsigned long start, unsigned long end)
659{
660 radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
661}
662EXPORT_SYMBOL(radix__flush_pmd_tlb_range);
Aneesh Kumar K.Vbe34d302016-08-23 16:27:48 +0530663
664void radix__flush_tlb_all(void)
665{
666 unsigned long rb,prs,r,rs;
667 unsigned long ric = RIC_FLUSH_ALL;
668
669 rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */
670 prs = 0; /* partition scoped */
671 r = 1; /* raidx format */
672 rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */
673
674 asm volatile("ptesync": : :"memory");
675 /*
676 * now flush guest entries by passing PRS = 1 and LPID != 0
677 */
678 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
679 : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
680 /*
681 * now flush host entires by passing PRS = 0 and LPID == 0
682 */
683 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
684 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
685 asm volatile("eieio; tlbsync; ptesync": : :"memory");
686}
Aneesh Kumar K.V6d3a0372016-11-28 11:47:01 +0530687
688void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
689 unsigned long address)
690{
691 /*
692 * We track page size in pte only for DD1, So we can
693 * call this only on DD1.
694 */
695 if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) {
696 VM_WARN_ON(1);
697 return;
698 }
699
Aneesh Kumar K.Vddb014b2017-03-21 22:59:54 +0530700 if (old_pte & R_PAGE_LARGE)
Aneesh Kumar K.V6d3a0372016-11-28 11:47:01 +0530701 radix__flush_tlb_page_psize(mm, address, MMU_PAGE_2M);
702 else
703 radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize);
704}
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000705
706#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
707extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
708{
709 unsigned int pid = mm->context.id;
710
711 if (unlikely(pid == MMU_NO_CONTEXT))
712 return;
713
714 /*
715 * If this context hasn't run on that CPU before and KVM is
716 * around, there's a slim chance that the guest on another
717 * CPU just brought in obsolete translation into the TLB of
718 * this CPU due to a bad prefetch using the guest PID on
719 * the way into the hypervisor.
720 *
721 * We work around this here. If KVM is possible, we check if
722 * any sibling thread is in KVM. If it is, the window may exist
723 * and thus we flush that PID from the core.
724 *
725 * A potential future improvement would be to mark which PIDs
726 * have never been used on the system and avoid it if the PID
727 * is new and the process has no other cpumask bit set.
728 */
729 if (cpu_has_feature(CPU_FTR_HVMODE) && radix_enabled()) {
730 int cpu = smp_processor_id();
731 int sib = cpu_first_thread_sibling(cpu);
732 bool flush = false;
733
734 for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
735 if (sib == cpu)
736 continue;
737 if (paca[sib].kvm_hstate.kvm_vcpu)
738 flush = true;
739 }
740 if (flush)
741 _tlbiel_pid(pid, RIC_FLUSH_ALL);
742 }
743}
744EXPORT_SYMBOL_GPL(radix_kvm_prefetch_workaround);
745#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */