blob: 71d1b19ad1c0da190ecaf5cd394e918a771b01f9 [file] [log] [blame]
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +10001/*
2 * TLB flush routines for radix kernels.
3 *
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/mm.h>
13#include <linux/hugetlb.h>
14#include <linux/memblock.h>
15
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +100016#include <asm/ppc-opcode.h>
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100017#include <asm/tlb.h>
18#include <asm/tlbflush.h>
Balbir Singh04284912017-04-11 15:23:25 +100019#include <asm/trace.h>
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +100020#include <asm/cputhreads.h>
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100021
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053022#define RIC_FLUSH_TLB 0
23#define RIC_FLUSH_PWC 1
24#define RIC_FLUSH_ALL 2
25
Nicholas Piggind4748272017-12-24 01:15:50 +100026/*
27 * tlbiel instruction for radix, set invalidation
28 * i.e., r=1 and is=01 or is=10 or is=11
29 */
30static inline void tlbiel_radix_set_isa300(unsigned int set, unsigned int is,
31 unsigned int pid,
32 unsigned int ric, unsigned int prs)
33{
34 unsigned long rb;
35 unsigned long rs;
36 unsigned int r = 1; /* radix format */
37
38 rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
39 rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
40
41 asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4)
42 : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "r"(r)
43 : "memory");
44}
45
46static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
47{
48 unsigned int set;
49
50 asm volatile("ptesync": : :"memory");
51
52 /*
53 * Flush the first set of the TLB, and the entire Page Walk Cache
54 * and partition table entries. Then flush the remaining sets of the
55 * TLB.
56 */
57 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 0);
58 for (set = 1; set < num_sets; set++)
59 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 0);
60
61 /* Do the same for process scoped entries. */
62 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 1);
63 for (set = 1; set < num_sets; set++)
64 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 1);
65
66 asm volatile("ptesync": : :"memory");
67}
68
69void radix__tlbiel_all(unsigned int action)
70{
71 unsigned int is;
72
73 switch (action) {
74 case TLB_INVAL_SCOPE_GLOBAL:
75 is = 3;
76 break;
77 case TLB_INVAL_SCOPE_LPID:
78 is = 2;
79 break;
80 default:
81 BUG();
82 }
83
84 if (early_cpu_has_feature(CPU_FTR_ARCH_300))
85 tlbiel_all_isa300(POWER9_TLB_SETS_RADIX, is);
86 else
87 WARN(1, "%s called on pre-POWER9 CPU\n", __func__);
88
89 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
90}
91
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053092static inline void __tlbiel_pid(unsigned long pid, int set,
93 unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100094{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053095 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100096
97 rb = PPC_BIT(53); /* IS = 1 */
98 rb |= set << PPC_BITLSHIFT(51);
99 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
100 prs = 1; /* process scoped */
101 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000102
Balbir Singh8cd6d3c2016-07-13 15:05:20 +0530103 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000104 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000105 trace_tlbie(0, 1, rb, rs, ric, prs, r);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000106}
107
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100108static inline void __tlbie_pid(unsigned long pid, unsigned long ric)
109{
110 unsigned long rb,rs,prs,r;
111
112 rb = PPC_BIT(53); /* IS = 1 */
113 rs = pid << PPC_BITLSHIFT(31);
114 prs = 1; /* process scoped */
115 r = 1; /* raidx format */
116
117 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
118 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
119 trace_tlbie(0, 0, rb, rs, ric, prs, r);
120}
121
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000122/*
123 * We use 128 set in radix mode and 256 set in hpt mode.
124 */
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530125static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000126{
127 int set;
128
Aneesh Kumar K.Vf7327e02017-04-01 20:11:48 +0530129 asm volatile("ptesync": : :"memory");
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +1000130
131 /*
132 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
133 * also flush the entire Page Walk Cache.
134 */
135 __tlbiel_pid(pid, 0, ric);
136
Benjamin Herrenschmidt5ce5fe12017-07-19 14:49:04 +1000137 /* For PWC, only one flush is needed */
138 if (ric == RIC_FLUSH_PWC) {
139 asm volatile("ptesync": : :"memory");
140 return;
141 }
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +1000142
Benjamin Herrenschmidt5ce5fe12017-07-19 14:49:04 +1000143 /* For the remaining sets, just flush the TLB */
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +1000144 for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
Benjamin Herrenschmidt5ce5fe12017-07-19 14:49:04 +1000145 __tlbiel_pid(pid, set, RIC_FLUSH_TLB);
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +1000146
Aneesh Kumar K.Vf7327e02017-04-01 20:11:48 +0530147 asm volatile("ptesync": : :"memory");
Benjamin Herrenschmidt90c1e3c2017-02-06 13:05:16 +1100148 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000149}
150
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530151static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000152{
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000153 asm volatile("ptesync": : :"memory");
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100154 __tlbie_pid(pid, ric);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000155 asm volatile("eieio; tlbsync; ptesync": : :"memory");
156}
157
Nicholas Piggin14001c62017-11-07 18:53:05 +1100158static inline void __tlbiel_va(unsigned long va, unsigned long pid,
Nicholas Piggind6657672017-11-07 18:53:06 +1100159 unsigned long ap, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000160{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530161 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000162
163 rb = va & ~(PPC_BITMASK(52, 63));
164 rb |= ap << PPC_BITLSHIFT(58);
165 rs = pid << PPC_BITLSHIFT(31);
166 prs = 1; /* process scoped */
167 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000168
Balbir Singh8cd6d3c2016-07-13 15:05:20 +0530169 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000170 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000171 trace_tlbie(0, 1, rb, rs, ric, prs, r);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000172}
173
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100174static inline void __tlbiel_va_range(unsigned long start, unsigned long end,
175 unsigned long pid, unsigned long page_size,
176 unsigned long psize)
177{
178 unsigned long addr;
179 unsigned long ap = mmu_get_ap(psize);
180
181 for (addr = start; addr < end; addr += page_size)
182 __tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
183}
184
Nicholas Piggin14001c62017-11-07 18:53:05 +1100185static inline void _tlbiel_va(unsigned long va, unsigned long pid,
Nicholas Piggind6657672017-11-07 18:53:06 +1100186 unsigned long psize, unsigned long ric)
Nicholas Piggin14001c62017-11-07 18:53:05 +1100187{
Nicholas Piggind6657672017-11-07 18:53:06 +1100188 unsigned long ap = mmu_get_ap(psize);
189
Nicholas Piggin14001c62017-11-07 18:53:05 +1100190 asm volatile("ptesync": : :"memory");
191 __tlbiel_va(va, pid, ap, ric);
192 asm volatile("ptesync": : :"memory");
193}
194
Nicholas Piggind6657672017-11-07 18:53:06 +1100195static inline void _tlbiel_va_range(unsigned long start, unsigned long end,
196 unsigned long pid, unsigned long page_size,
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100197 unsigned long psize, bool also_pwc)
Nicholas Piggind6657672017-11-07 18:53:06 +1100198{
Nicholas Piggind6657672017-11-07 18:53:06 +1100199 asm volatile("ptesync": : :"memory");
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100200 if (also_pwc)
201 __tlbiel_pid(pid, 0, RIC_FLUSH_PWC);
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100202 __tlbiel_va_range(start, end, pid, page_size, psize);
Nicholas Piggind6657672017-11-07 18:53:06 +1100203 asm volatile("ptesync": : :"memory");
204}
205
Nicholas Piggin14001c62017-11-07 18:53:05 +1100206static inline void __tlbie_va(unsigned long va, unsigned long pid,
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530207 unsigned long ap, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000208{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530209 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000210
211 rb = va & ~(PPC_BITMASK(52, 63));
212 rb |= ap << PPC_BITLSHIFT(58);
213 rs = pid << PPC_BITLSHIFT(31);
214 prs = 1; /* process scoped */
215 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000216
Balbir Singh8cd6d3c2016-07-13 15:05:20 +0530217 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000218 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000219 trace_tlbie(0, 0, rb, rs, ric, prs, r);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000220}
221
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100222static inline void __tlbie_va_range(unsigned long start, unsigned long end,
223 unsigned long pid, unsigned long page_size,
224 unsigned long psize)
225{
226 unsigned long addr;
227 unsigned long ap = mmu_get_ap(psize);
228
229 for (addr = start; addr < end; addr += page_size)
230 __tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
231}
232
Nicholas Piggin14001c62017-11-07 18:53:05 +1100233static inline void _tlbie_va(unsigned long va, unsigned long pid,
Nicholas Piggind6657672017-11-07 18:53:06 +1100234 unsigned long psize, unsigned long ric)
Nicholas Piggin14001c62017-11-07 18:53:05 +1100235{
Nicholas Piggind6657672017-11-07 18:53:06 +1100236 unsigned long ap = mmu_get_ap(psize);
237
Nicholas Piggin14001c62017-11-07 18:53:05 +1100238 asm volatile("ptesync": : :"memory");
239 __tlbie_va(va, pid, ap, ric);
240 asm volatile("eieio; tlbsync; ptesync": : :"memory");
241}
242
Nicholas Piggind6657672017-11-07 18:53:06 +1100243static inline void _tlbie_va_range(unsigned long start, unsigned long end,
244 unsigned long pid, unsigned long page_size,
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100245 unsigned long psize, bool also_pwc)
Nicholas Piggind6657672017-11-07 18:53:06 +1100246{
Nicholas Piggind6657672017-11-07 18:53:06 +1100247 asm volatile("ptesync": : :"memory");
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100248 if (also_pwc)
249 __tlbie_pid(pid, RIC_FLUSH_PWC);
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100250 __tlbie_va_range(start, end, pid, page_size, psize);
Nicholas Piggind6657672017-11-07 18:53:06 +1100251 asm volatile("eieio; tlbsync; ptesync": : :"memory");
252}
Nicholas Piggin14001c62017-11-07 18:53:05 +1100253
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000254/*
255 * Base TLB flushing operations:
256 *
257 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
258 * - flush_tlb_page(vma, vmaddr) flushes one page
259 * - flush_tlb_range(vma, start, end) flushes a range of pages
260 * - flush_tlb_kernel_range(start, end) flushes kernel pages
261 *
262 * - local_* variants of page and mm only apply to the current
263 * processor
264 */
265void radix__local_flush_tlb_mm(struct mm_struct *mm)
266{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530267 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000268
269 preempt_disable();
270 pid = mm->context.id;
271 if (pid != MMU_NO_CONTEXT)
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000272 _tlbiel_pid(pid, RIC_FLUSH_TLB);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000273 preempt_enable();
274}
275EXPORT_SYMBOL(radix__local_flush_tlb_mm);
276
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000277#ifndef CONFIG_SMP
Frederic Barrat61102362017-09-03 20:15:12 +0200278void radix__local_flush_all_mm(struct mm_struct *mm)
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530279{
280 unsigned long pid;
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530281
282 preempt_disable();
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530283 pid = mm->context.id;
284 if (pid != MMU_NO_CONTEXT)
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000285 _tlbiel_pid(pid, RIC_FLUSH_ALL);
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530286 preempt_enable();
287}
Frederic Barrat61102362017-09-03 20:15:12 +0200288EXPORT_SYMBOL(radix__local_flush_all_mm);
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000289#endif /* CONFIG_SMP */
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530290
Aneesh Kumar K.Vf22dfc92016-07-13 15:06:41 +0530291void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
Aneesh Kumar K.Vfbfa26d2016-07-13 15:06:42 +0530292 int psize)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000293{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530294 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000295
296 preempt_disable();
Michael Ellerman67730272017-10-16 12:41:00 +0530297 pid = mm->context.id;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000298 if (pid != MMU_NO_CONTEXT)
Nicholas Piggind6657672017-11-07 18:53:06 +1100299 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000300 preempt_enable();
301}
302
303void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
304{
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000305#ifdef CONFIG_HUGETLB_PAGE
306 /* need the return fix for nohash.c */
Michael Ellerman67730272017-10-16 12:41:00 +0530307 if (is_vm_hugetlb_page(vma))
308 return radix__local_flush_hugetlb_page(vma, vmaddr);
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000309#endif
Michael Ellerman67730272017-10-16 12:41:00 +0530310 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000311}
312EXPORT_SYMBOL(radix__local_flush_tlb_page);
313
314#ifdef CONFIG_SMP
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000315void radix__flush_tlb_mm(struct mm_struct *mm)
316{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530317 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000318
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000319 pid = mm->context.id;
320 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000321 return;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000322
Nicholas Piggindffe8442017-10-24 23:06:53 +1000323 preempt_disable();
Michael Ellerman3c9ac2b2017-05-02 21:00:14 +1000324 if (!mm_is_thread_local(mm))
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000325 _tlbie_pid(pid, RIC_FLUSH_TLB);
326 else
327 _tlbiel_pid(pid, RIC_FLUSH_TLB);
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000328 preempt_enable();
329}
330EXPORT_SYMBOL(radix__flush_tlb_mm);
331
Frederic Barrat61102362017-09-03 20:15:12 +0200332void radix__flush_all_mm(struct mm_struct *mm)
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000333{
334 unsigned long pid;
335
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000336 pid = mm->context.id;
337 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000338 return;
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000339
Nicholas Piggindffe8442017-10-24 23:06:53 +1000340 preempt_disable();
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000341 if (!mm_is_thread_local(mm))
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530342 _tlbie_pid(pid, RIC_FLUSH_ALL);
Michael Ellerman3c9ac2b2017-05-02 21:00:14 +1000343 else
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530344 _tlbiel_pid(pid, RIC_FLUSH_ALL);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000345 preempt_enable();
346}
Frederic Barrat61102362017-09-03 20:15:12 +0200347EXPORT_SYMBOL(radix__flush_all_mm);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000348
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530349void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
350{
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000351 tlb->need_flush_all = 1;
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530352}
353EXPORT_SYMBOL(radix__flush_tlb_pwc);
354
Aneesh Kumar K.Vf22dfc92016-07-13 15:06:41 +0530355void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
Aneesh Kumar K.Vfbfa26d2016-07-13 15:06:42 +0530356 int psize)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000357{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530358 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000359
Michael Ellerman67730272017-10-16 12:41:00 +0530360 pid = mm->context.id;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000361 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000362 return;
363
364 preempt_disable();
Michael Ellerman3c9ac2b2017-05-02 21:00:14 +1000365 if (!mm_is_thread_local(mm))
Nicholas Piggind6657672017-11-07 18:53:06 +1100366 _tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
Michael Ellerman3c9ac2b2017-05-02 21:00:14 +1000367 else
Nicholas Piggind6657672017-11-07 18:53:06 +1100368 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000369 preempt_enable();
370}
371
372void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
373{
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000374#ifdef CONFIG_HUGETLB_PAGE
Michael Ellerman67730272017-10-16 12:41:00 +0530375 if (is_vm_hugetlb_page(vma))
376 return radix__flush_hugetlb_page(vma, vmaddr);
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000377#endif
Michael Ellerman67730272017-10-16 12:41:00 +0530378 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000379}
380EXPORT_SYMBOL(radix__flush_tlb_page);
381
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000382#else /* CONFIG_SMP */
383#define radix__flush_all_mm radix__local_flush_all_mm
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000384#endif /* CONFIG_SMP */
385
386void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
387{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530388 _tlbie_pid(0, RIC_FLUSH_ALL);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000389}
390EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
391
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100392#define TLB_FLUSH_ALL -1UL
393
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000394/*
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100395 * Number of pages above which we invalidate the entire PID rather than
396 * flush individual pages, for local and global flushes respectively.
397 *
398 * tlbie goes out to the interconnect and individual ops are more costly.
399 * It also does not iterate over sets like the local tlbiel variant when
400 * invalidating a full PID, so it has a far lower threshold to change from
401 * individual page flushes to full-pid flushes.
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000402 */
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100403static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
Nicholas Pigginf6f27952017-11-07 18:53:08 +1100404static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = POWER9_TLB_SETS_RADIX * 2;
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100405
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000406void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
407 unsigned long end)
408
409{
410 struct mm_struct *mm = vma->vm_mm;
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100411 unsigned long pid;
412 unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift;
413 unsigned long page_size = 1UL << page_shift;
414 unsigned long nr_pages = (end - start) >> page_shift;
415 bool local, full;
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000416
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100417#ifdef CONFIG_HUGETLB_PAGE
418 if (is_vm_hugetlb_page(vma))
419 return radix__flush_hugetlb_tlb_range(vma, start, end);
420#endif
421
422 pid = mm->context.id;
423 if (unlikely(pid == MMU_NO_CONTEXT))
424 return;
425
426 preempt_disable();
Nicholas Pigginf6f27952017-11-07 18:53:08 +1100427 if (mm_is_thread_local(mm)) {
428 local = true;
429 full = (end == TLB_FLUSH_ALL ||
430 nr_pages > tlb_local_single_page_flush_ceiling);
431 } else {
432 local = false;
433 full = (end == TLB_FLUSH_ALL ||
434 nr_pages > tlb_single_page_flush_ceiling);
435 }
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100436
437 if (full) {
438 if (local)
439 _tlbiel_pid(pid, RIC_FLUSH_TLB);
440 else
441 _tlbie_pid(pid, RIC_FLUSH_TLB);
442 } else {
443 bool hflush = false;
444 unsigned long hstart, hend;
445
446#ifdef CONFIG_TRANSPARENT_HUGEPAGE
447 hstart = (start + HPAGE_PMD_SIZE - 1) >> HPAGE_PMD_SHIFT;
448 hend = end >> HPAGE_PMD_SHIFT;
449 if (hstart < hend) {
450 hstart <<= HPAGE_PMD_SHIFT;
451 hend <<= HPAGE_PMD_SHIFT;
452 hflush = true;
453 }
454#endif
455
456 asm volatile("ptesync": : :"memory");
457 if (local) {
458 __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
459 if (hflush)
460 __tlbiel_va_range(hstart, hend, pid,
461 HPAGE_PMD_SIZE, MMU_PAGE_2M);
462 asm volatile("ptesync": : :"memory");
463 } else {
464 __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);
465 if (hflush)
466 __tlbie_va_range(hstart, hend, pid,
467 HPAGE_PMD_SIZE, MMU_PAGE_2M);
468 asm volatile("eieio; tlbsync; ptesync": : :"memory");
469 }
470 }
471 preempt_enable();
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000472}
473EXPORT_SYMBOL(radix__flush_tlb_range);
474
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530475static int radix_get_mmu_psize(int page_size)
476{
477 int psize;
478
479 if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift))
480 psize = mmu_virtual_psize;
481 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift))
482 psize = MMU_PAGE_2M;
483 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift))
484 psize = MMU_PAGE_1G;
485 else
486 return -1;
487 return psize;
488}
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000489
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100490static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
491 unsigned long end, int psize);
492
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000493void radix__tlb_flush(struct mmu_gather *tlb)
494{
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530495 int psize = 0;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000496 struct mm_struct *mm = tlb->mm;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530497 int page_size = tlb->page_size;
498
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530499 /*
500 * if page size is not something we understand, do a full mm flush
Nicholas Piggin30b49ec2017-10-24 23:06:54 +1000501 *
502 * A "fullmm" flush must always do a flush_all_mm (RIC=2) flush
503 * that flushes the process table entry cache upon process teardown.
504 * See the comment for radix in arch_exit_mmap().
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530505 */
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100506 if (tlb->fullmm) {
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000507 radix__flush_all_mm(mm);
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100508 } else if ( (psize = radix_get_mmu_psize(page_size)) == -1) {
509 if (!tlb->need_flush_all)
510 radix__flush_tlb_mm(mm);
511 else
512 radix__flush_all_mm(mm);
513 } else {
514 unsigned long start = tlb->start;
515 unsigned long end = tlb->end;
516
517 if (!tlb->need_flush_all)
518 radix__flush_tlb_range_psize(mm, start, end, psize);
519 else
520 radix__flush_tlb_pwc_range_psize(mm, start, end, psize);
521 }
522 tlb->need_flush_all = 0;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530523}
524
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100525static inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
526 unsigned long start, unsigned long end,
527 int psize, bool also_pwc)
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530528{
529 unsigned long pid;
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100530 unsigned int page_shift = mmu_psize_defs[psize].shift;
531 unsigned long page_size = 1UL << page_shift;
532 unsigned long nr_pages = (end - start) >> page_shift;
533 bool local, full;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530534
Michael Ellerman67730272017-10-16 12:41:00 +0530535 pid = mm->context.id;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530536 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000537 return;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530538
Nicholas Piggindffe8442017-10-24 23:06:53 +1000539 preempt_disable();
Nicholas Pigginf6f27952017-11-07 18:53:08 +1100540 if (mm_is_thread_local(mm)) {
541 local = true;
542 full = (end == TLB_FLUSH_ALL ||
543 nr_pages > tlb_local_single_page_flush_ceiling);
544 } else {
545 local = false;
546 full = (end == TLB_FLUSH_ALL ||
547 nr_pages > tlb_single_page_flush_ceiling);
548 }
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100549
550 if (full) {
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530551 if (local)
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100552 _tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530553 else
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100554 _tlbie_pid(pid, also_pwc ? RIC_FLUSH_ALL: RIC_FLUSH_TLB);
Nicholas Piggindffe8442017-10-24 23:06:53 +1000555 } else {
Nicholas Piggin14001c62017-11-07 18:53:05 +1100556 if (local)
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100557 _tlbiel_va_range(start, end, pid, page_size, psize, also_pwc);
Nicholas Piggin14001c62017-11-07 18:53:05 +1100558 else
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100559 _tlbie_va_range(start, end, pid, page_size, psize, also_pwc);
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530560 }
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530561 preempt_enable();
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000562}
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530563
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100564void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
565 unsigned long end, int psize)
566{
567 return __radix__flush_tlb_range_psize(mm, start, end, psize, false);
568}
569
570static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
571 unsigned long end, int psize)
572{
573 __radix__flush_tlb_range_psize(mm, start, end, psize, true);
574}
575
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000576#ifdef CONFIG_TRANSPARENT_HUGEPAGE
577void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
578{
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000579 unsigned long pid, end;
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000580
Michael Ellerman67730272017-10-16 12:41:00 +0530581 pid = mm->context.id;
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000582 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000583 return;
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000584
585 /* 4k page size, just blow the world */
586 if (PAGE_SIZE == 0x1000) {
587 radix__flush_all_mm(mm);
588 return;
589 }
590
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000591 end = addr + HPAGE_PMD_SIZE;
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100592
593 /* Otherwise first do the PWC, then iterate the pages. */
594 preempt_disable();
595
596 if (mm_is_thread_local(mm)) {
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100597 _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100598 } else {
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100599 _tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100600 }
Nicholas Piggin14001c62017-11-07 18:53:05 +1100601
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000602 preempt_enable();
603}
604#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
605
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530606void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
607 unsigned long page_size)
608{
609 unsigned long rb,rs,prs,r;
610 unsigned long ap;
611 unsigned long ric = RIC_FLUSH_TLB;
612
613 ap = mmu_get_ap(radix_get_mmu_psize(page_size));
614 rb = gpa & ~(PPC_BITMASK(52, 63));
615 rb |= ap << PPC_BITLSHIFT(58);
616 rs = lpid & ((1UL << 32) - 1);
617 prs = 0; /* process scoped */
618 r = 1; /* raidx format */
619
620 asm volatile("ptesync": : :"memory");
621 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
622 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
623 asm volatile("eieio; tlbsync; ptesync": : :"memory");
Balbir Singh04284912017-04-11 15:23:25 +1000624 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530625}
626EXPORT_SYMBOL(radix__flush_tlb_lpid_va);
627
628void radix__flush_tlb_lpid(unsigned long lpid)
629{
630 unsigned long rb,rs,prs,r;
631 unsigned long ric = RIC_FLUSH_ALL;
632
633 rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */
634 rs = lpid & ((1UL << 32) - 1);
635 prs = 0; /* partition scoped */
636 r = 1; /* raidx format */
637
638 asm volatile("ptesync": : :"memory");
639 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
640 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
641 asm volatile("eieio; tlbsync; ptesync": : :"memory");
Balbir Singh04284912017-04-11 15:23:25 +1000642 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530643}
644EXPORT_SYMBOL(radix__flush_tlb_lpid);
Aneesh Kumar K.Vd8e91e92016-07-13 15:06:40 +0530645
646void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
647 unsigned long start, unsigned long end)
648{
649 radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
650}
651EXPORT_SYMBOL(radix__flush_pmd_tlb_range);
Aneesh Kumar K.Vbe34d302016-08-23 16:27:48 +0530652
653void radix__flush_tlb_all(void)
654{
655 unsigned long rb,prs,r,rs;
656 unsigned long ric = RIC_FLUSH_ALL;
657
658 rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */
659 prs = 0; /* partition scoped */
660 r = 1; /* raidx format */
661 rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */
662
663 asm volatile("ptesync": : :"memory");
664 /*
665 * now flush guest entries by passing PRS = 1 and LPID != 0
666 */
667 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
668 : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
669 /*
670 * now flush host entires by passing PRS = 0 and LPID == 0
671 */
672 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
673 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
674 asm volatile("eieio; tlbsync; ptesync": : :"memory");
675}
Aneesh Kumar K.V6d3a0372016-11-28 11:47:01 +0530676
677void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
678 unsigned long address)
679{
680 /*
681 * We track page size in pte only for DD1, So we can
682 * call this only on DD1.
683 */
684 if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) {
685 VM_WARN_ON(1);
686 return;
687 }
688
Aneesh Kumar K.Vddb014b2017-03-21 22:59:54 +0530689 if (old_pte & R_PAGE_LARGE)
Aneesh Kumar K.V6d3a0372016-11-28 11:47:01 +0530690 radix__flush_tlb_page_psize(mm, address, MMU_PAGE_2M);
691 else
692 radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize);
693}
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000694
695#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
696extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
697{
698 unsigned int pid = mm->context.id;
699
700 if (unlikely(pid == MMU_NO_CONTEXT))
701 return;
702
703 /*
704 * If this context hasn't run on that CPU before and KVM is
705 * around, there's a slim chance that the guest on another
706 * CPU just brought in obsolete translation into the TLB of
707 * this CPU due to a bad prefetch using the guest PID on
708 * the way into the hypervisor.
709 *
710 * We work around this here. If KVM is possible, we check if
711 * any sibling thread is in KVM. If it is, the window may exist
712 * and thus we flush that PID from the core.
713 *
714 * A potential future improvement would be to mark which PIDs
715 * have never been used on the system and avoid it if the PID
716 * is new and the process has no other cpumask bit set.
717 */
718 if (cpu_has_feature(CPU_FTR_HVMODE) && radix_enabled()) {
719 int cpu = smp_processor_id();
720 int sib = cpu_first_thread_sibling(cpu);
721 bool flush = false;
722
723 for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
724 if (sib == cpu)
725 continue;
726 if (paca[sib].kvm_hstate.kvm_vcpu)
727 flush = true;
728 }
729 if (flush)
730 _tlbiel_pid(pid, RIC_FLUSH_ALL);
731 }
732}
733EXPORT_SYMBOL_GPL(radix_kvm_prefetch_workaround);
734#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */