blob: 5e17c4e873a57d5cf824d2d7186b3b884fe4432f [file] [log] [blame]
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +10001/*
2 * TLB flush routines for radix kernels.
3 *
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/mm.h>
13#include <linux/hugetlb.h>
14#include <linux/memblock.h>
Balbir Singh8cd6d3c2016-07-13 15:05:20 +053015#include <asm/ppc-opcode.h>
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100016
17#include <asm/tlb.h>
18#include <asm/tlbflush.h>
19
20static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
21
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053022#define RIC_FLUSH_TLB 0
23#define RIC_FLUSH_PWC 1
24#define RIC_FLUSH_ALL 2
25
26static inline void __tlbiel_pid(unsigned long pid, int set,
27 unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100028{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053029 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100030
31 rb = PPC_BIT(53); /* IS = 1 */
32 rb |= set << PPC_BITLSHIFT(51);
33 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
34 prs = 1; /* process scoped */
35 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100036
Balbir Singh8cd6d3c2016-07-13 15:05:20 +053037 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100038 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100039}
40
41/*
42 * We use 128 set in radix mode and 256 set in hpt mode.
43 */
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053044static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100045{
46 int set;
47
Aneesh Kumar K.Vf7327e02017-04-01 20:11:48 +053048 asm volatile("ptesync": : :"memory");
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +100049
50 /*
51 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
52 * also flush the entire Page Walk Cache.
53 */
54 __tlbiel_pid(pid, 0, ric);
55
56 if (ric == RIC_FLUSH_ALL)
57 /* For the remaining sets, just flush the TLB */
58 ric = RIC_FLUSH_TLB;
59
60 for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053061 __tlbiel_pid(pid, set, ric);
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +100062
Aneesh Kumar K.Vf7327e02017-04-01 20:11:48 +053063 asm volatile("ptesync": : :"memory");
Benjamin Herrenschmidt90c1e3c2017-02-06 13:05:16 +110064 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100065}
66
Aneesh Kumar K.Vcf4f08b2017-04-26 21:38:30 +100067static inline void tlbiel_pwc(unsigned long pid)
68{
69 asm volatile("ptesync": : :"memory");
70
71 /* For PWC flush, we don't look at set number */
72 __tlbiel_pid(pid, 0, RIC_FLUSH_PWC);
73
74 asm volatile("ptesync": : :"memory");
75 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
76}
77
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053078static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100079{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053080 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100081
82 rb = PPC_BIT(53); /* IS = 1 */
83 rs = pid << PPC_BITLSHIFT(31);
84 prs = 1; /* process scoped */
85 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100086
87 asm volatile("ptesync": : :"memory");
Balbir Singh8cd6d3c2016-07-13 15:05:20 +053088 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100089 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
90 asm volatile("eieio; tlbsync; ptesync": : :"memory");
91}
92
93static inline void _tlbiel_va(unsigned long va, unsigned long pid,
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053094 unsigned long ap, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100095{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053096 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100097
98 rb = va & ~(PPC_BITMASK(52, 63));
99 rb |= ap << PPC_BITLSHIFT(58);
100 rs = pid << PPC_BITLSHIFT(31);
101 prs = 1; /* process scoped */
102 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000103
104 asm volatile("ptesync": : :"memory");
Balbir Singh8cd6d3c2016-07-13 15:05:20 +0530105 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000106 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
107 asm volatile("ptesync": : :"memory");
108}
109
110static inline void _tlbie_va(unsigned long va, unsigned long pid,
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530111 unsigned long ap, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000112{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530113 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000114
115 rb = va & ~(PPC_BITMASK(52, 63));
116 rb |= ap << PPC_BITLSHIFT(58);
117 rs = pid << PPC_BITLSHIFT(31);
118 prs = 1; /* process scoped */
119 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000120
121 asm volatile("ptesync": : :"memory");
Balbir Singh8cd6d3c2016-07-13 15:05:20 +0530122 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000123 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
124 asm volatile("eieio; tlbsync; ptesync": : :"memory");
125}
126
127/*
128 * Base TLB flushing operations:
129 *
130 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
131 * - flush_tlb_page(vma, vmaddr) flushes one page
132 * - flush_tlb_range(vma, start, end) flushes a range of pages
133 * - flush_tlb_kernel_range(start, end) flushes kernel pages
134 *
135 * - local_* variants of page and mm only apply to the current
136 * processor
137 */
138void radix__local_flush_tlb_mm(struct mm_struct *mm)
139{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530140 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000141
142 preempt_disable();
143 pid = mm->context.id;
144 if (pid != MMU_NO_CONTEXT)
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530145 _tlbiel_pid(pid, RIC_FLUSH_ALL);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000146 preempt_enable();
147}
148EXPORT_SYMBOL(radix__local_flush_tlb_mm);
149
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530150void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
151{
152 unsigned long pid;
153 struct mm_struct *mm = tlb->mm;
Aneesh Kumar K.Vf6b0df52017-04-01 20:11:47 +0530154 /*
155 * If we are doing a full mm flush, we will do a tlb flush
156 * with RIC_FLUSH_ALL later.
157 */
158 if (tlb->fullmm)
159 return;
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530160
161 preempt_disable();
162
163 pid = mm->context.id;
164 if (pid != MMU_NO_CONTEXT)
Aneesh Kumar K.Vcf4f08b2017-04-26 21:38:30 +1000165 tlbiel_pwc(pid);
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530166
167 preempt_enable();
168}
169EXPORT_SYMBOL(radix__local_flush_tlb_pwc);
170
Aneesh Kumar K.Vf22dfc92016-07-13 15:06:41 +0530171void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
Aneesh Kumar K.Vfbfa26d2016-07-13 15:06:42 +0530172 int psize)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000173{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530174 unsigned long pid;
Aneesh Kumar K.Vfbfa26d2016-07-13 15:06:42 +0530175 unsigned long ap = mmu_get_ap(psize);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000176
177 preempt_disable();
178 pid = mm ? mm->context.id : 0;
179 if (pid != MMU_NO_CONTEXT)
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530180 _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000181 preempt_enable();
182}
183
184void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
185{
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000186#ifdef CONFIG_HUGETLB_PAGE
187 /* need the return fix for nohash.c */
188 if (vma && is_vm_hugetlb_page(vma))
189 return __local_flush_hugetlb_page(vma, vmaddr);
190#endif
Aneesh Kumar K.Vf22dfc92016-07-13 15:06:41 +0530191 radix__local_flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr,
Aneesh Kumar K.Vfbfa26d2016-07-13 15:06:42 +0530192 mmu_virtual_psize);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000193}
194EXPORT_SYMBOL(radix__local_flush_tlb_page);
195
196#ifdef CONFIG_SMP
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000197void radix__flush_tlb_mm(struct mm_struct *mm)
198{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530199 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000200
201 preempt_disable();
202 pid = mm->context.id;
203 if (unlikely(pid == MMU_NO_CONTEXT))
204 goto no_context;
205
Aneesh Kumar K.Vbd77c442016-10-24 08:50:43 +0530206 if (!mm_is_thread_local(mm)) {
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000207 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
208
209 if (lock_tlbie)
210 raw_spin_lock(&native_tlbie_lock);
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530211 _tlbie_pid(pid, RIC_FLUSH_ALL);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000212 if (lock_tlbie)
213 raw_spin_unlock(&native_tlbie_lock);
214 } else
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530215 _tlbiel_pid(pid, RIC_FLUSH_ALL);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000216no_context:
217 preempt_enable();
218}
219EXPORT_SYMBOL(radix__flush_tlb_mm);
220
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530221void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
222{
223 unsigned long pid;
224 struct mm_struct *mm = tlb->mm;
225
Aneesh Kumar K.Vf6b0df52017-04-01 20:11:47 +0530226 /*
227 * If we are doing a full mm flush, we will do a tlb flush
228 * with RIC_FLUSH_ALL later.
229 */
230 if (tlb->fullmm)
231 return;
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530232 preempt_disable();
233
234 pid = mm->context.id;
235 if (unlikely(pid == MMU_NO_CONTEXT))
236 goto no_context;
237
Aneesh Kumar K.Vbd77c442016-10-24 08:50:43 +0530238 if (!mm_is_thread_local(mm)) {
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530239 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
240
241 if (lock_tlbie)
242 raw_spin_lock(&native_tlbie_lock);
243 _tlbie_pid(pid, RIC_FLUSH_PWC);
244 if (lock_tlbie)
245 raw_spin_unlock(&native_tlbie_lock);
246 } else
Aneesh Kumar K.Vcf4f08b2017-04-26 21:38:30 +1000247 tlbiel_pwc(pid);
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530248no_context:
249 preempt_enable();
250}
251EXPORT_SYMBOL(radix__flush_tlb_pwc);
252
Aneesh Kumar K.Vf22dfc92016-07-13 15:06:41 +0530253void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
Aneesh Kumar K.Vfbfa26d2016-07-13 15:06:42 +0530254 int psize)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000255{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530256 unsigned long pid;
Aneesh Kumar K.Vfbfa26d2016-07-13 15:06:42 +0530257 unsigned long ap = mmu_get_ap(psize);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000258
259 preempt_disable();
260 pid = mm ? mm->context.id : 0;
261 if (unlikely(pid == MMU_NO_CONTEXT))
262 goto bail;
Aneesh Kumar K.Vbd77c442016-10-24 08:50:43 +0530263 if (!mm_is_thread_local(mm)) {
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000264 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
265
266 if (lock_tlbie)
267 raw_spin_lock(&native_tlbie_lock);
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530268 _tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000269 if (lock_tlbie)
270 raw_spin_unlock(&native_tlbie_lock);
271 } else
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530272 _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000273bail:
274 preempt_enable();
275}
276
277void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
278{
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000279#ifdef CONFIG_HUGETLB_PAGE
280 if (vma && is_vm_hugetlb_page(vma))
281 return flush_hugetlb_page(vma, vmaddr);
282#endif
Aneesh Kumar K.Vf22dfc92016-07-13 15:06:41 +0530283 radix__flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr,
Aneesh Kumar K.Vfbfa26d2016-07-13 15:06:42 +0530284 mmu_virtual_psize);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000285}
286EXPORT_SYMBOL(radix__flush_tlb_page);
287
288#endif /* CONFIG_SMP */
289
290void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
291{
292 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
293
294 if (lock_tlbie)
295 raw_spin_lock(&native_tlbie_lock);
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530296 _tlbie_pid(0, RIC_FLUSH_ALL);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000297 if (lock_tlbie)
298 raw_spin_unlock(&native_tlbie_lock);
299}
300EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
301
302/*
303 * Currently, for range flushing, we just do a full mm flush. Because
304 * we use this in code path where we don' track the page size.
305 */
306void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
307 unsigned long end)
308
309{
310 struct mm_struct *mm = vma->vm_mm;
311 radix__flush_tlb_mm(mm);
312}
313EXPORT_SYMBOL(radix__flush_tlb_range);
314
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530315static int radix_get_mmu_psize(int page_size)
316{
317 int psize;
318
319 if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift))
320 psize = mmu_virtual_psize;
321 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift))
322 psize = MMU_PAGE_2M;
323 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift))
324 psize = MMU_PAGE_1G;
325 else
326 return -1;
327 return psize;
328}
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000329
330void radix__tlb_flush(struct mmu_gather *tlb)
331{
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530332 int psize = 0;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000333 struct mm_struct *mm = tlb->mm;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530334 int page_size = tlb->page_size;
335
336 psize = radix_get_mmu_psize(page_size);
337 /*
338 * if page size is not something we understand, do a full mm flush
339 */
340 if (psize != -1 && !tlb->fullmm && !tlb->need_flush_all)
341 radix__flush_tlb_range_psize(mm, tlb->start, tlb->end, psize);
342 else
343 radix__flush_tlb_mm(mm);
344}
345
346#define TLB_FLUSH_ALL -1UL
347/*
348 * Number of pages above which we will do a bcast tlbie. Just a
349 * number at this point copied from x86
350 */
351static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
352
353void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
354 unsigned long end, int psize)
355{
356 unsigned long pid;
357 unsigned long addr;
Aneesh Kumar K.Vbd77c442016-10-24 08:50:43 +0530358 int local = mm_is_thread_local(mm);
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530359 unsigned long ap = mmu_get_ap(psize);
360 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
361 unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;
362
363
364 preempt_disable();
365 pid = mm ? mm->context.id : 0;
366 if (unlikely(pid == MMU_NO_CONTEXT))
367 goto err_out;
368
369 if (end == TLB_FLUSH_ALL ||
370 (end - start) > tlb_single_page_flush_ceiling * page_size) {
371 if (local)
372 _tlbiel_pid(pid, RIC_FLUSH_TLB);
373 else
374 _tlbie_pid(pid, RIC_FLUSH_TLB);
375 goto err_out;
376 }
377 for (addr = start; addr < end; addr += page_size) {
378
379 if (local)
380 _tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
381 else {
382 if (lock_tlbie)
383 raw_spin_lock(&native_tlbie_lock);
384 _tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
385 if (lock_tlbie)
386 raw_spin_unlock(&native_tlbie_lock);
387 }
388 }
389err_out:
390 preempt_enable();
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000391}
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530392
393void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
394 unsigned long page_size)
395{
396 unsigned long rb,rs,prs,r;
397 unsigned long ap;
398 unsigned long ric = RIC_FLUSH_TLB;
399
400 ap = mmu_get_ap(radix_get_mmu_psize(page_size));
401 rb = gpa & ~(PPC_BITMASK(52, 63));
402 rb |= ap << PPC_BITLSHIFT(58);
403 rs = lpid & ((1UL << 32) - 1);
404 prs = 0; /* process scoped */
405 r = 1; /* raidx format */
406
407 asm volatile("ptesync": : :"memory");
408 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
409 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
410 asm volatile("eieio; tlbsync; ptesync": : :"memory");
411}
412EXPORT_SYMBOL(radix__flush_tlb_lpid_va);
413
414void radix__flush_tlb_lpid(unsigned long lpid)
415{
416 unsigned long rb,rs,prs,r;
417 unsigned long ric = RIC_FLUSH_ALL;
418
419 rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */
420 rs = lpid & ((1UL << 32) - 1);
421 prs = 0; /* partition scoped */
422 r = 1; /* raidx format */
423
424 asm volatile("ptesync": : :"memory");
425 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
426 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
427 asm volatile("eieio; tlbsync; ptesync": : :"memory");
428}
429EXPORT_SYMBOL(radix__flush_tlb_lpid);
Aneesh Kumar K.Vd8e91e92016-07-13 15:06:40 +0530430
431void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
432 unsigned long start, unsigned long end)
433{
434 radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
435}
436EXPORT_SYMBOL(radix__flush_pmd_tlb_range);
Aneesh Kumar K.Vbe34d302016-08-23 16:27:48 +0530437
438void radix__flush_tlb_all(void)
439{
440 unsigned long rb,prs,r,rs;
441 unsigned long ric = RIC_FLUSH_ALL;
442
443 rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */
444 prs = 0; /* partition scoped */
445 r = 1; /* raidx format */
446 rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */
447
448 asm volatile("ptesync": : :"memory");
449 /*
450 * now flush guest entries by passing PRS = 1 and LPID != 0
451 */
452 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
453 : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
454 /*
455 * now flush host entires by passing PRS = 0 and LPID == 0
456 */
457 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
458 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
459 asm volatile("eieio; tlbsync; ptesync": : :"memory");
460}
Aneesh Kumar K.V6d3a0372016-11-28 11:47:01 +0530461
462void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
463 unsigned long address)
464{
465 /*
466 * We track page size in pte only for DD1, So we can
467 * call this only on DD1.
468 */
469 if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) {
470 VM_WARN_ON(1);
471 return;
472 }
473
Aneesh Kumar K.Vddb014b2017-03-21 22:59:54 +0530474 if (old_pte & R_PAGE_LARGE)
Aneesh Kumar K.V6d3a0372016-11-28 11:47:01 +0530475 radix__flush_tlb_page_psize(mm, address, MMU_PAGE_2M);
476 else
477 radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize);
478}