blob: f3e58bd60d1a2e6097432ee9467f30071b6524ea [file] [log] [blame]
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +10001/*
2 * TLB flush routines for radix kernels.
3 *
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/mm.h>
13#include <linux/hugetlb.h>
14#include <linux/memblock.h>
Balbir Singh8cd6d3c2016-07-13 15:05:20 +053015#include <asm/ppc-opcode.h>
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100016
17#include <asm/tlb.h>
18#include <asm/tlbflush.h>
19
20static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
21
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053022#define RIC_FLUSH_TLB 0
23#define RIC_FLUSH_PWC 1
24#define RIC_FLUSH_ALL 2
25
26static inline void __tlbiel_pid(unsigned long pid, int set,
27 unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100028{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053029 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100030
31 rb = PPC_BIT(53); /* IS = 1 */
32 rb |= set << PPC_BITLSHIFT(51);
33 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
34 prs = 1; /* process scoped */
35 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100036
37 asm volatile("ptesync": : :"memory");
Balbir Singh8cd6d3c2016-07-13 15:05:20 +053038 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100039 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
40 asm volatile("ptesync": : :"memory");
41}
42
43/*
44 * We use 128 set in radix mode and 256 set in hpt mode.
45 */
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053046static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100047{
48 int set;
49
50 for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053051 __tlbiel_pid(pid, set, ric);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100052 }
Benjamin Herrenschmidt90c1e3c2017-02-06 13:05:16 +110053 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100054}
55
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053056static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100057{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053058 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100059
60 rb = PPC_BIT(53); /* IS = 1 */
61 rs = pid << PPC_BITLSHIFT(31);
62 prs = 1; /* process scoped */
63 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100064
65 asm volatile("ptesync": : :"memory");
Balbir Singh8cd6d3c2016-07-13 15:05:20 +053066 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100067 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
68 asm volatile("eieio; tlbsync; ptesync": : :"memory");
69}
70
71static inline void _tlbiel_va(unsigned long va, unsigned long pid,
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053072 unsigned long ap, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100073{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053074 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100075
76 rb = va & ~(PPC_BITMASK(52, 63));
77 rb |= ap << PPC_BITLSHIFT(58);
78 rs = pid << PPC_BITLSHIFT(31);
79 prs = 1; /* process scoped */
80 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100081
82 asm volatile("ptesync": : :"memory");
Balbir Singh8cd6d3c2016-07-13 15:05:20 +053083 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100084 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
85 asm volatile("ptesync": : :"memory");
86}
87
88static inline void _tlbie_va(unsigned long va, unsigned long pid,
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053089 unsigned long ap, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100090{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053091 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100092
93 rb = va & ~(PPC_BITMASK(52, 63));
94 rb |= ap << PPC_BITLSHIFT(58);
95 rs = pid << PPC_BITLSHIFT(31);
96 prs = 1; /* process scoped */
97 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100098
99 asm volatile("ptesync": : :"memory");
Balbir Singh8cd6d3c2016-07-13 15:05:20 +0530100 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000101 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
102 asm volatile("eieio; tlbsync; ptesync": : :"memory");
103}
104
105/*
106 * Base TLB flushing operations:
107 *
108 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
109 * - flush_tlb_page(vma, vmaddr) flushes one page
110 * - flush_tlb_range(vma, start, end) flushes a range of pages
111 * - flush_tlb_kernel_range(start, end) flushes kernel pages
112 *
113 * - local_* variants of page and mm only apply to the current
114 * processor
115 */
116void radix__local_flush_tlb_mm(struct mm_struct *mm)
117{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530118 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000119
120 preempt_disable();
121 pid = mm->context.id;
122 if (pid != MMU_NO_CONTEXT)
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530123 _tlbiel_pid(pid, RIC_FLUSH_ALL);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000124 preempt_enable();
125}
126EXPORT_SYMBOL(radix__local_flush_tlb_mm);
127
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530128void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
129{
130 unsigned long pid;
131 struct mm_struct *mm = tlb->mm;
Aneesh Kumar K.Vf6b0df52017-04-01 20:11:47 +0530132 /*
133 * If we are doing a full mm flush, we will do a tlb flush
134 * with RIC_FLUSH_ALL later.
135 */
136 if (tlb->fullmm)
137 return;
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530138
139 preempt_disable();
140
141 pid = mm->context.id;
142 if (pid != MMU_NO_CONTEXT)
143 _tlbiel_pid(pid, RIC_FLUSH_PWC);
144
145 preempt_enable();
146}
147EXPORT_SYMBOL(radix__local_flush_tlb_pwc);
148
Aneesh Kumar K.Vf22dfc92016-07-13 15:06:41 +0530149void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
Aneesh Kumar K.Vfbfa26d2016-07-13 15:06:42 +0530150 int psize)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000151{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530152 unsigned long pid;
Aneesh Kumar K.Vfbfa26d2016-07-13 15:06:42 +0530153 unsigned long ap = mmu_get_ap(psize);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000154
155 preempt_disable();
156 pid = mm ? mm->context.id : 0;
157 if (pid != MMU_NO_CONTEXT)
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530158 _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000159 preempt_enable();
160}
161
162void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
163{
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000164#ifdef CONFIG_HUGETLB_PAGE
165 /* need the return fix for nohash.c */
166 if (vma && is_vm_hugetlb_page(vma))
167 return __local_flush_hugetlb_page(vma, vmaddr);
168#endif
Aneesh Kumar K.Vf22dfc92016-07-13 15:06:41 +0530169 radix__local_flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr,
Aneesh Kumar K.Vfbfa26d2016-07-13 15:06:42 +0530170 mmu_virtual_psize);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000171}
172EXPORT_SYMBOL(radix__local_flush_tlb_page);
173
174#ifdef CONFIG_SMP
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000175void radix__flush_tlb_mm(struct mm_struct *mm)
176{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530177 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000178
179 preempt_disable();
180 pid = mm->context.id;
181 if (unlikely(pid == MMU_NO_CONTEXT))
182 goto no_context;
183
Aneesh Kumar K.Vbd77c442016-10-24 08:50:43 +0530184 if (!mm_is_thread_local(mm)) {
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000185 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
186
187 if (lock_tlbie)
188 raw_spin_lock(&native_tlbie_lock);
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530189 _tlbie_pid(pid, RIC_FLUSH_ALL);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000190 if (lock_tlbie)
191 raw_spin_unlock(&native_tlbie_lock);
192 } else
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530193 _tlbiel_pid(pid, RIC_FLUSH_ALL);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000194no_context:
195 preempt_enable();
196}
197EXPORT_SYMBOL(radix__flush_tlb_mm);
198
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530199void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
200{
201 unsigned long pid;
202 struct mm_struct *mm = tlb->mm;
203
Aneesh Kumar K.Vf6b0df52017-04-01 20:11:47 +0530204 /*
205 * If we are doing a full mm flush, we will do a tlb flush
206 * with RIC_FLUSH_ALL later.
207 */
208 if (tlb->fullmm)
209 return;
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530210 preempt_disable();
211
212 pid = mm->context.id;
213 if (unlikely(pid == MMU_NO_CONTEXT))
214 goto no_context;
215
Aneesh Kumar K.Vbd77c442016-10-24 08:50:43 +0530216 if (!mm_is_thread_local(mm)) {
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530217 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
218
219 if (lock_tlbie)
220 raw_spin_lock(&native_tlbie_lock);
221 _tlbie_pid(pid, RIC_FLUSH_PWC);
222 if (lock_tlbie)
223 raw_spin_unlock(&native_tlbie_lock);
224 } else
225 _tlbiel_pid(pid, RIC_FLUSH_PWC);
226no_context:
227 preempt_enable();
228}
229EXPORT_SYMBOL(radix__flush_tlb_pwc);
230
Aneesh Kumar K.Vf22dfc92016-07-13 15:06:41 +0530231void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
Aneesh Kumar K.Vfbfa26d2016-07-13 15:06:42 +0530232 int psize)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000233{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530234 unsigned long pid;
Aneesh Kumar K.Vfbfa26d2016-07-13 15:06:42 +0530235 unsigned long ap = mmu_get_ap(psize);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000236
237 preempt_disable();
238 pid = mm ? mm->context.id : 0;
239 if (unlikely(pid == MMU_NO_CONTEXT))
240 goto bail;
Aneesh Kumar K.Vbd77c442016-10-24 08:50:43 +0530241 if (!mm_is_thread_local(mm)) {
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000242 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
243
244 if (lock_tlbie)
245 raw_spin_lock(&native_tlbie_lock);
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530246 _tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000247 if (lock_tlbie)
248 raw_spin_unlock(&native_tlbie_lock);
249 } else
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530250 _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000251bail:
252 preempt_enable();
253}
254
255void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
256{
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000257#ifdef CONFIG_HUGETLB_PAGE
258 if (vma && is_vm_hugetlb_page(vma))
259 return flush_hugetlb_page(vma, vmaddr);
260#endif
Aneesh Kumar K.Vf22dfc92016-07-13 15:06:41 +0530261 radix__flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr,
Aneesh Kumar K.Vfbfa26d2016-07-13 15:06:42 +0530262 mmu_virtual_psize);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000263}
264EXPORT_SYMBOL(radix__flush_tlb_page);
265
266#endif /* CONFIG_SMP */
267
268void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
269{
270 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
271
272 if (lock_tlbie)
273 raw_spin_lock(&native_tlbie_lock);
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530274 _tlbie_pid(0, RIC_FLUSH_ALL);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000275 if (lock_tlbie)
276 raw_spin_unlock(&native_tlbie_lock);
277}
278EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
279
280/*
281 * Currently, for range flushing, we just do a full mm flush. Because
282 * we use this in code path where we don' track the page size.
283 */
284void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
285 unsigned long end)
286
287{
288 struct mm_struct *mm = vma->vm_mm;
289 radix__flush_tlb_mm(mm);
290}
291EXPORT_SYMBOL(radix__flush_tlb_range);
292
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530293static int radix_get_mmu_psize(int page_size)
294{
295 int psize;
296
297 if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift))
298 psize = mmu_virtual_psize;
299 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift))
300 psize = MMU_PAGE_2M;
301 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift))
302 psize = MMU_PAGE_1G;
303 else
304 return -1;
305 return psize;
306}
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000307
308void radix__tlb_flush(struct mmu_gather *tlb)
309{
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530310 int psize = 0;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000311 struct mm_struct *mm = tlb->mm;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530312 int page_size = tlb->page_size;
313
314 psize = radix_get_mmu_psize(page_size);
315 /*
316 * if page size is not something we understand, do a full mm flush
317 */
318 if (psize != -1 && !tlb->fullmm && !tlb->need_flush_all)
319 radix__flush_tlb_range_psize(mm, tlb->start, tlb->end, psize);
320 else
321 radix__flush_tlb_mm(mm);
322}
323
324#define TLB_FLUSH_ALL -1UL
325/*
326 * Number of pages above which we will do a bcast tlbie. Just a
327 * number at this point copied from x86
328 */
329static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
330
331void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
332 unsigned long end, int psize)
333{
334 unsigned long pid;
335 unsigned long addr;
Aneesh Kumar K.Vbd77c442016-10-24 08:50:43 +0530336 int local = mm_is_thread_local(mm);
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530337 unsigned long ap = mmu_get_ap(psize);
338 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
339 unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;
340
341
342 preempt_disable();
343 pid = mm ? mm->context.id : 0;
344 if (unlikely(pid == MMU_NO_CONTEXT))
345 goto err_out;
346
347 if (end == TLB_FLUSH_ALL ||
348 (end - start) > tlb_single_page_flush_ceiling * page_size) {
349 if (local)
350 _tlbiel_pid(pid, RIC_FLUSH_TLB);
351 else
352 _tlbie_pid(pid, RIC_FLUSH_TLB);
353 goto err_out;
354 }
355 for (addr = start; addr < end; addr += page_size) {
356
357 if (local)
358 _tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
359 else {
360 if (lock_tlbie)
361 raw_spin_lock(&native_tlbie_lock);
362 _tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
363 if (lock_tlbie)
364 raw_spin_unlock(&native_tlbie_lock);
365 }
366 }
367err_out:
368 preempt_enable();
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000369}
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530370
371void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
372 unsigned long page_size)
373{
374 unsigned long rb,rs,prs,r;
375 unsigned long ap;
376 unsigned long ric = RIC_FLUSH_TLB;
377
378 ap = mmu_get_ap(radix_get_mmu_psize(page_size));
379 rb = gpa & ~(PPC_BITMASK(52, 63));
380 rb |= ap << PPC_BITLSHIFT(58);
381 rs = lpid & ((1UL << 32) - 1);
382 prs = 0; /* process scoped */
383 r = 1; /* raidx format */
384
385 asm volatile("ptesync": : :"memory");
386 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
387 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
388 asm volatile("eieio; tlbsync; ptesync": : :"memory");
389}
390EXPORT_SYMBOL(radix__flush_tlb_lpid_va);
391
392void radix__flush_tlb_lpid(unsigned long lpid)
393{
394 unsigned long rb,rs,prs,r;
395 unsigned long ric = RIC_FLUSH_ALL;
396
397 rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */
398 rs = lpid & ((1UL << 32) - 1);
399 prs = 0; /* partition scoped */
400 r = 1; /* raidx format */
401
402 asm volatile("ptesync": : :"memory");
403 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
404 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
405 asm volatile("eieio; tlbsync; ptesync": : :"memory");
406}
407EXPORT_SYMBOL(radix__flush_tlb_lpid);
Aneesh Kumar K.Vd8e91e92016-07-13 15:06:40 +0530408
409void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
410 unsigned long start, unsigned long end)
411{
412 radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
413}
414EXPORT_SYMBOL(radix__flush_pmd_tlb_range);
Aneesh Kumar K.Vbe34d302016-08-23 16:27:48 +0530415
416void radix__flush_tlb_all(void)
417{
418 unsigned long rb,prs,r,rs;
419 unsigned long ric = RIC_FLUSH_ALL;
420
421 rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */
422 prs = 0; /* partition scoped */
423 r = 1; /* raidx format */
424 rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */
425
426 asm volatile("ptesync": : :"memory");
427 /*
428 * now flush guest entries by passing PRS = 1 and LPID != 0
429 */
430 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
431 : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
432 /*
433 * now flush host entires by passing PRS = 0 and LPID == 0
434 */
435 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
436 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
437 asm volatile("eieio; tlbsync; ptesync": : :"memory");
438}
Aneesh Kumar K.V6d3a0372016-11-28 11:47:01 +0530439
440void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
441 unsigned long address)
442{
443 /*
444 * We track page size in pte only for DD1, So we can
445 * call this only on DD1.
446 */
447 if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) {
448 VM_WARN_ON(1);
449 return;
450 }
451
Aneesh Kumar K.Vddb014b2017-03-21 22:59:54 +0530452 if (old_pte & R_PAGE_LARGE)
Aneesh Kumar K.V6d3a0372016-11-28 11:47:01 +0530453 radix__flush_tlb_page_psize(mm, address, MMU_PAGE_2M);
454 else
455 radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize);
456}