blob: 22b657e4b01a26fd7ffbee5e12ccd619bda61bee [file] [log] [blame]
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +10001/*
2 * TLB flush routines for radix kernels.
3 *
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/mm.h>
13#include <linux/hugetlb.h>
14#include <linux/memblock.h>
15
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +100016#include <asm/ppc-opcode.h>
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100017#include <asm/tlb.h>
18#include <asm/tlbflush.h>
Balbir Singh04284912017-04-11 15:23:25 +100019#include <asm/trace.h>
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +100020#include <asm/cputhreads.h>
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100021
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053022#define RIC_FLUSH_TLB 0
23#define RIC_FLUSH_PWC 1
24#define RIC_FLUSH_ALL 2
25
26static inline void __tlbiel_pid(unsigned long pid, int set,
27 unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100028{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053029 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100030
31 rb = PPC_BIT(53); /* IS = 1 */
32 rb |= set << PPC_BITLSHIFT(51);
33 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
34 prs = 1; /* process scoped */
35 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100036
Balbir Singh8cd6d3c2016-07-13 15:05:20 +053037 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100038 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
Balbir Singh04284912017-04-11 15:23:25 +100039 trace_tlbie(0, 1, rb, rs, ric, prs, r);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100040}
41
42/*
43 * We use 128 set in radix mode and 256 set in hpt mode.
44 */
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053045static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100046{
47 int set;
48
Aneesh Kumar K.Vf7327e02017-04-01 20:11:48 +053049 asm volatile("ptesync": : :"memory");
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +100050
51 /*
52 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
53 * also flush the entire Page Walk Cache.
54 */
55 __tlbiel_pid(pid, 0, ric);
56
Benjamin Herrenschmidt5ce5fe12017-07-19 14:49:04 +100057 /* For PWC, only one flush is needed */
58 if (ric == RIC_FLUSH_PWC) {
59 asm volatile("ptesync": : :"memory");
60 return;
61 }
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +100062
Benjamin Herrenschmidt5ce5fe12017-07-19 14:49:04 +100063 /* For the remaining sets, just flush the TLB */
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +100064 for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
Benjamin Herrenschmidt5ce5fe12017-07-19 14:49:04 +100065 __tlbiel_pid(pid, set, RIC_FLUSH_TLB);
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +100066
Aneesh Kumar K.Vf7327e02017-04-01 20:11:48 +053067 asm volatile("ptesync": : :"memory");
Benjamin Herrenschmidt90c1e3c2017-02-06 13:05:16 +110068 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100069}
70
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053071static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100072{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053073 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100074
75 rb = PPC_BIT(53); /* IS = 1 */
76 rs = pid << PPC_BITLSHIFT(31);
77 prs = 1; /* process scoped */
78 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100079
80 asm volatile("ptesync": : :"memory");
Balbir Singh8cd6d3c2016-07-13 15:05:20 +053081 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100082 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
83 asm volatile("eieio; tlbsync; ptesync": : :"memory");
Balbir Singh04284912017-04-11 15:23:25 +100084 trace_tlbie(0, 0, rb, rs, ric, prs, r);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100085}
86
Nicholas Piggin14001c62017-11-07 18:53:05 +110087static inline void __tlbiel_va(unsigned long va, unsigned long pid,
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053088 unsigned long ap, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100089{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053090 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100091
92 rb = va & ~(PPC_BITMASK(52, 63));
93 rb |= ap << PPC_BITLSHIFT(58);
94 rs = pid << PPC_BITLSHIFT(31);
95 prs = 1; /* process scoped */
96 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100097
Balbir Singh8cd6d3c2016-07-13 15:05:20 +053098 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100099 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000100 trace_tlbie(0, 1, rb, rs, ric, prs, r);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000101}
102
Nicholas Piggin14001c62017-11-07 18:53:05 +1100103static inline void _tlbiel_va(unsigned long va, unsigned long pid,
104 unsigned long ap, unsigned long ric)
105{
106 asm volatile("ptesync": : :"memory");
107 __tlbiel_va(va, pid, ap, ric);
108 asm volatile("ptesync": : :"memory");
109}
110
111static inline void __tlbie_va(unsigned long va, unsigned long pid,
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530112 unsigned long ap, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000113{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530114 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000115
116 rb = va & ~(PPC_BITMASK(52, 63));
117 rb |= ap << PPC_BITLSHIFT(58);
118 rs = pid << PPC_BITLSHIFT(31);
119 prs = 1; /* process scoped */
120 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000121
Balbir Singh8cd6d3c2016-07-13 15:05:20 +0530122 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000123 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000124 trace_tlbie(0, 0, rb, rs, ric, prs, r);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000125}
126
Nicholas Piggin14001c62017-11-07 18:53:05 +1100127static inline void _tlbie_va(unsigned long va, unsigned long pid,
128 unsigned long ap, unsigned long ric)
129{
130 asm volatile("ptesync": : :"memory");
131 __tlbie_va(va, pid, ap, ric);
132 asm volatile("eieio; tlbsync; ptesync": : :"memory");
133}
134
135
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000136/*
137 * Base TLB flushing operations:
138 *
139 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
140 * - flush_tlb_page(vma, vmaddr) flushes one page
141 * - flush_tlb_range(vma, start, end) flushes a range of pages
142 * - flush_tlb_kernel_range(start, end) flushes kernel pages
143 *
144 * - local_* variants of page and mm only apply to the current
145 * processor
146 */
147void radix__local_flush_tlb_mm(struct mm_struct *mm)
148{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530149 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000150
151 preempt_disable();
152 pid = mm->context.id;
153 if (pid != MMU_NO_CONTEXT)
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000154 _tlbiel_pid(pid, RIC_FLUSH_TLB);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000155 preempt_enable();
156}
157EXPORT_SYMBOL(radix__local_flush_tlb_mm);
158
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000159#ifndef CONFIG_SMP
Frederic Barrat61102362017-09-03 20:15:12 +0200160void radix__local_flush_all_mm(struct mm_struct *mm)
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530161{
162 unsigned long pid;
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530163
164 preempt_disable();
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530165 pid = mm->context.id;
166 if (pid != MMU_NO_CONTEXT)
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000167 _tlbiel_pid(pid, RIC_FLUSH_ALL);
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530168 preempt_enable();
169}
Frederic Barrat61102362017-09-03 20:15:12 +0200170EXPORT_SYMBOL(radix__local_flush_all_mm);
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000171#endif /* CONFIG_SMP */
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530172
Aneesh Kumar K.Vf22dfc92016-07-13 15:06:41 +0530173void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
Aneesh Kumar K.Vfbfa26d2016-07-13 15:06:42 +0530174 int psize)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000175{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530176 unsigned long pid;
Aneesh Kumar K.Vfbfa26d2016-07-13 15:06:42 +0530177 unsigned long ap = mmu_get_ap(psize);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000178
179 preempt_disable();
Michael Ellerman67730272017-10-16 12:41:00 +0530180 pid = mm->context.id;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000181 if (pid != MMU_NO_CONTEXT)
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530182 _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000183 preempt_enable();
184}
185
186void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
187{
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000188#ifdef CONFIG_HUGETLB_PAGE
189 /* need the return fix for nohash.c */
Michael Ellerman67730272017-10-16 12:41:00 +0530190 if (is_vm_hugetlb_page(vma))
191 return radix__local_flush_hugetlb_page(vma, vmaddr);
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000192#endif
Michael Ellerman67730272017-10-16 12:41:00 +0530193 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000194}
195EXPORT_SYMBOL(radix__local_flush_tlb_page);
196
197#ifdef CONFIG_SMP
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000198void radix__flush_tlb_mm(struct mm_struct *mm)
199{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530200 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000201
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000202 pid = mm->context.id;
203 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000204 return;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000205
Nicholas Piggindffe8442017-10-24 23:06:53 +1000206 preempt_disable();
Michael Ellerman3c9ac2b2017-05-02 21:00:14 +1000207 if (!mm_is_thread_local(mm))
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000208 _tlbie_pid(pid, RIC_FLUSH_TLB);
209 else
210 _tlbiel_pid(pid, RIC_FLUSH_TLB);
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000211 preempt_enable();
212}
213EXPORT_SYMBOL(radix__flush_tlb_mm);
214
Frederic Barrat61102362017-09-03 20:15:12 +0200215void radix__flush_all_mm(struct mm_struct *mm)
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000216{
217 unsigned long pid;
218
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000219 pid = mm->context.id;
220 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000221 return;
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000222
Nicholas Piggindffe8442017-10-24 23:06:53 +1000223 preempt_disable();
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000224 if (!mm_is_thread_local(mm))
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530225 _tlbie_pid(pid, RIC_FLUSH_ALL);
Michael Ellerman3c9ac2b2017-05-02 21:00:14 +1000226 else
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530227 _tlbiel_pid(pid, RIC_FLUSH_ALL);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000228 preempt_enable();
229}
Frederic Barrat61102362017-09-03 20:15:12 +0200230EXPORT_SYMBOL(radix__flush_all_mm);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000231
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530232void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
233{
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000234 tlb->need_flush_all = 1;
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530235}
236EXPORT_SYMBOL(radix__flush_tlb_pwc);
237
Aneesh Kumar K.Vf22dfc92016-07-13 15:06:41 +0530238void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
Aneesh Kumar K.Vfbfa26d2016-07-13 15:06:42 +0530239 int psize)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000240{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530241 unsigned long pid;
Aneesh Kumar K.Vfbfa26d2016-07-13 15:06:42 +0530242 unsigned long ap = mmu_get_ap(psize);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000243
Michael Ellerman67730272017-10-16 12:41:00 +0530244 pid = mm->context.id;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000245 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000246 return;
247
248 preempt_disable();
Michael Ellerman3c9ac2b2017-05-02 21:00:14 +1000249 if (!mm_is_thread_local(mm))
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530250 _tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
Michael Ellerman3c9ac2b2017-05-02 21:00:14 +1000251 else
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530252 _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000253 preempt_enable();
254}
255
256void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
257{
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000258#ifdef CONFIG_HUGETLB_PAGE
Michael Ellerman67730272017-10-16 12:41:00 +0530259 if (is_vm_hugetlb_page(vma))
260 return radix__flush_hugetlb_page(vma, vmaddr);
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000261#endif
Michael Ellerman67730272017-10-16 12:41:00 +0530262 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000263}
264EXPORT_SYMBOL(radix__flush_tlb_page);
265
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000266#else /* CONFIG_SMP */
267#define radix__flush_all_mm radix__local_flush_all_mm
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000268#endif /* CONFIG_SMP */
269
270void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
271{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530272 _tlbie_pid(0, RIC_FLUSH_ALL);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000273}
274EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
275
276/*
277 * Currently, for range flushing, we just do a full mm flush. Because
278 * we use this in code path where we don' track the page size.
279 */
280void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
281 unsigned long end)
282
283{
284 struct mm_struct *mm = vma->vm_mm;
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000285
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000286 radix__flush_tlb_mm(mm);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000287}
288EXPORT_SYMBOL(radix__flush_tlb_range);
289
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530290static int radix_get_mmu_psize(int page_size)
291{
292 int psize;
293
294 if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift))
295 psize = mmu_virtual_psize;
296 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift))
297 psize = MMU_PAGE_2M;
298 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift))
299 psize = MMU_PAGE_1G;
300 else
301 return -1;
302 return psize;
303}
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000304
305void radix__tlb_flush(struct mmu_gather *tlb)
306{
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530307 int psize = 0;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000308 struct mm_struct *mm = tlb->mm;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530309 int page_size = tlb->page_size;
310
311 psize = radix_get_mmu_psize(page_size);
312 /*
313 * if page size is not something we understand, do a full mm flush
Nicholas Piggin30b49ec2017-10-24 23:06:54 +1000314 *
315 * A "fullmm" flush must always do a flush_all_mm (RIC=2) flush
316 * that flushes the process table entry cache upon process teardown.
317 * See the comment for radix in arch_exit_mmap().
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530318 */
319 if (psize != -1 && !tlb->fullmm && !tlb->need_flush_all)
320 radix__flush_tlb_range_psize(mm, tlb->start, tlb->end, psize);
Nicholas Piggin30b49ec2017-10-24 23:06:54 +1000321 else if (tlb->fullmm || tlb->need_flush_all) {
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000322 tlb->need_flush_all = 0;
323 radix__flush_all_mm(mm);
324 } else
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530325 radix__flush_tlb_mm(mm);
326}
327
328#define TLB_FLUSH_ALL -1UL
329/*
330 * Number of pages above which we will do a bcast tlbie. Just a
331 * number at this point copied from x86
332 */
333static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
334
335void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
336 unsigned long end, int psize)
337{
338 unsigned long pid;
339 unsigned long addr;
Nicholas Piggindffe8442017-10-24 23:06:53 +1000340 bool local;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530341 unsigned long ap = mmu_get_ap(psize);
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530342 unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;
343
Michael Ellerman67730272017-10-16 12:41:00 +0530344 pid = mm->context.id;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530345 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000346 return;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530347
Nicholas Piggindffe8442017-10-24 23:06:53 +1000348 preempt_disable();
349 local = mm_is_thread_local(mm);
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530350 if (end == TLB_FLUSH_ALL ||
351 (end - start) > tlb_single_page_flush_ceiling * page_size) {
352 if (local)
353 _tlbiel_pid(pid, RIC_FLUSH_TLB);
354 else
355 _tlbie_pid(pid, RIC_FLUSH_TLB);
Nicholas Piggindffe8442017-10-24 23:06:53 +1000356 } else {
Nicholas Piggin14001c62017-11-07 18:53:05 +1100357 asm volatile("ptesync": : :"memory");
Nicholas Piggindffe8442017-10-24 23:06:53 +1000358 for (addr = start; addr < end; addr += page_size) {
Nicholas Piggindffe8442017-10-24 23:06:53 +1000359 if (local)
Nicholas Piggin14001c62017-11-07 18:53:05 +1100360 __tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
Nicholas Piggindffe8442017-10-24 23:06:53 +1000361 else
Nicholas Piggin14001c62017-11-07 18:53:05 +1100362 __tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
Nicholas Piggindffe8442017-10-24 23:06:53 +1000363 }
Nicholas Piggin14001c62017-11-07 18:53:05 +1100364 if (local)
365 asm volatile("ptesync": : :"memory");
366 else
367 asm volatile("eieio; tlbsync; ptesync": : :"memory");
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530368 }
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530369 preempt_enable();
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000370}
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530371
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000372#ifdef CONFIG_TRANSPARENT_HUGEPAGE
373void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
374{
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000375 unsigned long ap = mmu_get_ap(mmu_virtual_psize);
376 unsigned long pid, end;
Nicholas Piggindffe8442017-10-24 23:06:53 +1000377 bool local;
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000378
Michael Ellerman67730272017-10-16 12:41:00 +0530379 pid = mm->context.id;
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000380 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000381 return;
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000382
383 /* 4k page size, just blow the world */
384 if (PAGE_SIZE == 0x1000) {
385 radix__flush_all_mm(mm);
386 return;
387 }
388
Nicholas Piggindffe8442017-10-24 23:06:53 +1000389 preempt_disable();
390 local = mm_is_thread_local(mm);
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000391 /* Otherwise first do the PWC */
392 if (local)
393 _tlbiel_pid(pid, RIC_FLUSH_PWC);
394 else
395 _tlbie_pid(pid, RIC_FLUSH_PWC);
396
397 /* Then iterate the pages */
Nicholas Piggin14001c62017-11-07 18:53:05 +1100398 asm volatile("ptesync": : :"memory");
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000399 end = addr + HPAGE_PMD_SIZE;
400 for (; addr < end; addr += PAGE_SIZE) {
401 if (local)
402 _tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
403 else
404 _tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
405 }
Nicholas Piggindffe8442017-10-24 23:06:53 +1000406
Nicholas Piggin14001c62017-11-07 18:53:05 +1100407 if (local)
408 asm volatile("ptesync": : :"memory");
409 else
410 asm volatile("eieio; tlbsync; ptesync": : :"memory");
411
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000412 preempt_enable();
413}
414#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
415
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530416void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
417 unsigned long page_size)
418{
419 unsigned long rb,rs,prs,r;
420 unsigned long ap;
421 unsigned long ric = RIC_FLUSH_TLB;
422
423 ap = mmu_get_ap(radix_get_mmu_psize(page_size));
424 rb = gpa & ~(PPC_BITMASK(52, 63));
425 rb |= ap << PPC_BITLSHIFT(58);
426 rs = lpid & ((1UL << 32) - 1);
427 prs = 0; /* process scoped */
428 r = 1; /* raidx format */
429
430 asm volatile("ptesync": : :"memory");
431 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
432 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
433 asm volatile("eieio; tlbsync; ptesync": : :"memory");
Balbir Singh04284912017-04-11 15:23:25 +1000434 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530435}
436EXPORT_SYMBOL(radix__flush_tlb_lpid_va);
437
438void radix__flush_tlb_lpid(unsigned long lpid)
439{
440 unsigned long rb,rs,prs,r;
441 unsigned long ric = RIC_FLUSH_ALL;
442
443 rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */
444 rs = lpid & ((1UL << 32) - 1);
445 prs = 0; /* partition scoped */
446 r = 1; /* raidx format */
447
448 asm volatile("ptesync": : :"memory");
449 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
450 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
451 asm volatile("eieio; tlbsync; ptesync": : :"memory");
Balbir Singh04284912017-04-11 15:23:25 +1000452 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530453}
454EXPORT_SYMBOL(radix__flush_tlb_lpid);
Aneesh Kumar K.Vd8e91e92016-07-13 15:06:40 +0530455
456void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
457 unsigned long start, unsigned long end)
458{
459 radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
460}
461EXPORT_SYMBOL(radix__flush_pmd_tlb_range);
Aneesh Kumar K.Vbe34d302016-08-23 16:27:48 +0530462
463void radix__flush_tlb_all(void)
464{
465 unsigned long rb,prs,r,rs;
466 unsigned long ric = RIC_FLUSH_ALL;
467
468 rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */
469 prs = 0; /* partition scoped */
470 r = 1; /* raidx format */
471 rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */
472
473 asm volatile("ptesync": : :"memory");
474 /*
475 * now flush guest entries by passing PRS = 1 and LPID != 0
476 */
477 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
478 : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000479 trace_tlbie(0, 0, rb, rs, ric, prs, r);
Aneesh Kumar K.Vbe34d302016-08-23 16:27:48 +0530480 /*
481 * now flush host entires by passing PRS = 0 and LPID == 0
482 */
483 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
484 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
485 asm volatile("eieio; tlbsync; ptesync": : :"memory");
Balbir Singh04284912017-04-11 15:23:25 +1000486 trace_tlbie(0, 0, rb, 0, ric, prs, r);
Aneesh Kumar K.Vbe34d302016-08-23 16:27:48 +0530487}
Aneesh Kumar K.V6d3a0372016-11-28 11:47:01 +0530488
489void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
490 unsigned long address)
491{
492 /*
493 * We track page size in pte only for DD1, So we can
494 * call this only on DD1.
495 */
496 if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) {
497 VM_WARN_ON(1);
498 return;
499 }
500
Aneesh Kumar K.Vddb014b2017-03-21 22:59:54 +0530501 if (old_pte & R_PAGE_LARGE)
Aneesh Kumar K.V6d3a0372016-11-28 11:47:01 +0530502 radix__flush_tlb_page_psize(mm, address, MMU_PAGE_2M);
503 else
504 radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize);
505}
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000506
507#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
508extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
509{
510 unsigned int pid = mm->context.id;
511
512 if (unlikely(pid == MMU_NO_CONTEXT))
513 return;
514
515 /*
516 * If this context hasn't run on that CPU before and KVM is
517 * around, there's a slim chance that the guest on another
518 * CPU just brought in obsolete translation into the TLB of
519 * this CPU due to a bad prefetch using the guest PID on
520 * the way into the hypervisor.
521 *
522 * We work around this here. If KVM is possible, we check if
523 * any sibling thread is in KVM. If it is, the window may exist
524 * and thus we flush that PID from the core.
525 *
526 * A potential future improvement would be to mark which PIDs
527 * have never been used on the system and avoid it if the PID
528 * is new and the process has no other cpumask bit set.
529 */
530 if (cpu_has_feature(CPU_FTR_HVMODE) && radix_enabled()) {
531 int cpu = smp_processor_id();
532 int sib = cpu_first_thread_sibling(cpu);
533 bool flush = false;
534
535 for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
536 if (sib == cpu)
537 continue;
538 if (paca[sib].kvm_hstate.kvm_vcpu)
539 flush = true;
540 }
541 if (flush)
542 _tlbiel_pid(pid, RIC_FLUSH_ALL);
543 }
544}
545EXPORT_SYMBOL_GPL(radix_kvm_prefetch_workaround);
546#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */