blob: 884f4b705b572ddc66444079c9ed596f10a20312 [file] [log] [blame]
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +10001/*
2 * TLB flush routines for radix kernels.
3 *
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/mm.h>
13#include <linux/hugetlb.h>
14#include <linux/memblock.h>
15
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +100016#include <asm/ppc-opcode.h>
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100017#include <asm/tlb.h>
18#include <asm/tlbflush.h>
Balbir Singh04284912017-04-11 15:23:25 +100019#include <asm/trace.h>
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +100020#include <asm/cputhreads.h>
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100021
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053022#define RIC_FLUSH_TLB 0
23#define RIC_FLUSH_PWC 1
24#define RIC_FLUSH_ALL 2
25
26static inline void __tlbiel_pid(unsigned long pid, int set,
27 unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100028{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053029 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100030
31 rb = PPC_BIT(53); /* IS = 1 */
32 rb |= set << PPC_BITLSHIFT(51);
33 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
34 prs = 1; /* process scoped */
35 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100036
Balbir Singh8cd6d3c2016-07-13 15:05:20 +053037 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100038 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
Balbir Singh04284912017-04-11 15:23:25 +100039 trace_tlbie(0, 1, rb, rs, ric, prs, r);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100040}
41
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +110042static inline void __tlbie_pid(unsigned long pid, unsigned long ric)
43{
44 unsigned long rb,rs,prs,r;
45
46 rb = PPC_BIT(53); /* IS = 1 */
47 rs = pid << PPC_BITLSHIFT(31);
48 prs = 1; /* process scoped */
49 r = 1; /* raidx format */
50
51 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
52 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
53 trace_tlbie(0, 0, rb, rs, ric, prs, r);
54}
55
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100056/*
57 * We use 128 set in radix mode and 256 set in hpt mode.
58 */
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053059static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100060{
61 int set;
62
Aneesh Kumar K.Vf7327e02017-04-01 20:11:48 +053063 asm volatile("ptesync": : :"memory");
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +100064
65 /*
66 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
67 * also flush the entire Page Walk Cache.
68 */
69 __tlbiel_pid(pid, 0, ric);
70
Benjamin Herrenschmidt5ce5fe12017-07-19 14:49:04 +100071 /* For PWC, only one flush is needed */
72 if (ric == RIC_FLUSH_PWC) {
73 asm volatile("ptesync": : :"memory");
74 return;
75 }
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +100076
Benjamin Herrenschmidt5ce5fe12017-07-19 14:49:04 +100077 /* For the remaining sets, just flush the TLB */
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +100078 for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
Benjamin Herrenschmidt5ce5fe12017-07-19 14:49:04 +100079 __tlbiel_pid(pid, set, RIC_FLUSH_TLB);
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +100080
Aneesh Kumar K.Vf7327e02017-04-01 20:11:48 +053081 asm volatile("ptesync": : :"memory");
Benjamin Herrenschmidt90c1e3c2017-02-06 13:05:16 +110082 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100083}
84
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053085static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100086{
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100087 asm volatile("ptesync": : :"memory");
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +110088 __tlbie_pid(pid, ric);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100089 asm volatile("eieio; tlbsync; ptesync": : :"memory");
90}
91
Nicholas Piggin14001c62017-11-07 18:53:05 +110092static inline void __tlbiel_va(unsigned long va, unsigned long pid,
Nicholas Piggind6657672017-11-07 18:53:06 +110093 unsigned long ap, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100094{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053095 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100096
97 rb = va & ~(PPC_BITMASK(52, 63));
98 rb |= ap << PPC_BITLSHIFT(58);
99 rs = pid << PPC_BITLSHIFT(31);
100 prs = 1; /* process scoped */
101 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000102
Balbir Singh8cd6d3c2016-07-13 15:05:20 +0530103 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000104 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000105 trace_tlbie(0, 1, rb, rs, ric, prs, r);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000106}
107
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100108static inline void __tlbiel_va_range(unsigned long start, unsigned long end,
109 unsigned long pid, unsigned long page_size,
110 unsigned long psize)
111{
112 unsigned long addr;
113 unsigned long ap = mmu_get_ap(psize);
114
115 for (addr = start; addr < end; addr += page_size)
116 __tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
117}
118
Nicholas Piggin14001c62017-11-07 18:53:05 +1100119static inline void _tlbiel_va(unsigned long va, unsigned long pid,
Nicholas Piggind6657672017-11-07 18:53:06 +1100120 unsigned long psize, unsigned long ric)
Nicholas Piggin14001c62017-11-07 18:53:05 +1100121{
Nicholas Piggind6657672017-11-07 18:53:06 +1100122 unsigned long ap = mmu_get_ap(psize);
123
Nicholas Piggin14001c62017-11-07 18:53:05 +1100124 asm volatile("ptesync": : :"memory");
125 __tlbiel_va(va, pid, ap, ric);
126 asm volatile("ptesync": : :"memory");
127}
128
Nicholas Piggind6657672017-11-07 18:53:06 +1100129static inline void _tlbiel_va_range(unsigned long start, unsigned long end,
130 unsigned long pid, unsigned long page_size,
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100131 unsigned long psize, bool also_pwc)
Nicholas Piggind6657672017-11-07 18:53:06 +1100132{
Nicholas Piggind6657672017-11-07 18:53:06 +1100133 asm volatile("ptesync": : :"memory");
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100134 if (also_pwc)
135 __tlbiel_pid(pid, 0, RIC_FLUSH_PWC);
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100136 __tlbiel_va_range(start, end, pid, page_size, psize);
Nicholas Piggind6657672017-11-07 18:53:06 +1100137 asm volatile("ptesync": : :"memory");
138}
139
Nicholas Piggin14001c62017-11-07 18:53:05 +1100140static inline void __tlbie_va(unsigned long va, unsigned long pid,
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530141 unsigned long ap, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000142{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530143 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000144
145 rb = va & ~(PPC_BITMASK(52, 63));
146 rb |= ap << PPC_BITLSHIFT(58);
147 rs = pid << PPC_BITLSHIFT(31);
148 prs = 1; /* process scoped */
149 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000150
Balbir Singh8cd6d3c2016-07-13 15:05:20 +0530151 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000152 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000153 trace_tlbie(0, 0, rb, rs, ric, prs, r);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000154}
155
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100156static inline void __tlbie_va_range(unsigned long start, unsigned long end,
157 unsigned long pid, unsigned long page_size,
158 unsigned long psize)
159{
160 unsigned long addr;
161 unsigned long ap = mmu_get_ap(psize);
162
163 for (addr = start; addr < end; addr += page_size)
164 __tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
165}
166
Nicholas Piggin14001c62017-11-07 18:53:05 +1100167static inline void _tlbie_va(unsigned long va, unsigned long pid,
Nicholas Piggind6657672017-11-07 18:53:06 +1100168 unsigned long psize, unsigned long ric)
Nicholas Piggin14001c62017-11-07 18:53:05 +1100169{
Nicholas Piggind6657672017-11-07 18:53:06 +1100170 unsigned long ap = mmu_get_ap(psize);
171
Nicholas Piggin14001c62017-11-07 18:53:05 +1100172 asm volatile("ptesync": : :"memory");
173 __tlbie_va(va, pid, ap, ric);
174 asm volatile("eieio; tlbsync; ptesync": : :"memory");
175}
176
Nicholas Piggind6657672017-11-07 18:53:06 +1100177static inline void _tlbie_va_range(unsigned long start, unsigned long end,
178 unsigned long pid, unsigned long page_size,
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100179 unsigned long psize, bool also_pwc)
Nicholas Piggind6657672017-11-07 18:53:06 +1100180{
Nicholas Piggind6657672017-11-07 18:53:06 +1100181 asm volatile("ptesync": : :"memory");
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100182 if (also_pwc)
183 __tlbie_pid(pid, RIC_FLUSH_PWC);
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100184 __tlbie_va_range(start, end, pid, page_size, psize);
Nicholas Piggind6657672017-11-07 18:53:06 +1100185 asm volatile("eieio; tlbsync; ptesync": : :"memory");
186}
Nicholas Piggin14001c62017-11-07 18:53:05 +1100187
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000188/*
189 * Base TLB flushing operations:
190 *
191 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
192 * - flush_tlb_page(vma, vmaddr) flushes one page
193 * - flush_tlb_range(vma, start, end) flushes a range of pages
194 * - flush_tlb_kernel_range(start, end) flushes kernel pages
195 *
196 * - local_* variants of page and mm only apply to the current
197 * processor
198 */
199void radix__local_flush_tlb_mm(struct mm_struct *mm)
200{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530201 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000202
203 preempt_disable();
204 pid = mm->context.id;
205 if (pid != MMU_NO_CONTEXT)
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000206 _tlbiel_pid(pid, RIC_FLUSH_TLB);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000207 preempt_enable();
208}
209EXPORT_SYMBOL(radix__local_flush_tlb_mm);
210
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000211#ifndef CONFIG_SMP
Frederic Barrat61102362017-09-03 20:15:12 +0200212void radix__local_flush_all_mm(struct mm_struct *mm)
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530213{
214 unsigned long pid;
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530215
216 preempt_disable();
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530217 pid = mm->context.id;
218 if (pid != MMU_NO_CONTEXT)
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000219 _tlbiel_pid(pid, RIC_FLUSH_ALL);
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530220 preempt_enable();
221}
Frederic Barrat61102362017-09-03 20:15:12 +0200222EXPORT_SYMBOL(radix__local_flush_all_mm);
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000223#endif /* CONFIG_SMP */
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530224
Aneesh Kumar K.Vf22dfc92016-07-13 15:06:41 +0530225void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
Aneesh Kumar K.Vfbfa26d2016-07-13 15:06:42 +0530226 int psize)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000227{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530228 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000229
230 preempt_disable();
Michael Ellerman67730272017-10-16 12:41:00 +0530231 pid = mm->context.id;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000232 if (pid != MMU_NO_CONTEXT)
Nicholas Piggind6657672017-11-07 18:53:06 +1100233 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000234 preempt_enable();
235}
236
237void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
238{
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000239#ifdef CONFIG_HUGETLB_PAGE
240 /* need the return fix for nohash.c */
Michael Ellerman67730272017-10-16 12:41:00 +0530241 if (is_vm_hugetlb_page(vma))
242 return radix__local_flush_hugetlb_page(vma, vmaddr);
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000243#endif
Michael Ellerman67730272017-10-16 12:41:00 +0530244 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000245}
246EXPORT_SYMBOL(radix__local_flush_tlb_page);
247
248#ifdef CONFIG_SMP
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000249void radix__flush_tlb_mm(struct mm_struct *mm)
250{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530251 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000252
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000253 pid = mm->context.id;
254 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000255 return;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000256
Nicholas Piggindffe8442017-10-24 23:06:53 +1000257 preempt_disable();
Michael Ellerman3c9ac2b2017-05-02 21:00:14 +1000258 if (!mm_is_thread_local(mm))
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000259 _tlbie_pid(pid, RIC_FLUSH_TLB);
260 else
261 _tlbiel_pid(pid, RIC_FLUSH_TLB);
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000262 preempt_enable();
263}
264EXPORT_SYMBOL(radix__flush_tlb_mm);
265
Frederic Barrat61102362017-09-03 20:15:12 +0200266void radix__flush_all_mm(struct mm_struct *mm)
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000267{
268 unsigned long pid;
269
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000270 pid = mm->context.id;
271 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000272 return;
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000273
Nicholas Piggindffe8442017-10-24 23:06:53 +1000274 preempt_disable();
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000275 if (!mm_is_thread_local(mm))
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530276 _tlbie_pid(pid, RIC_FLUSH_ALL);
Michael Ellerman3c9ac2b2017-05-02 21:00:14 +1000277 else
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530278 _tlbiel_pid(pid, RIC_FLUSH_ALL);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000279 preempt_enable();
280}
Frederic Barrat61102362017-09-03 20:15:12 +0200281EXPORT_SYMBOL(radix__flush_all_mm);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000282
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530283void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
284{
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000285 tlb->need_flush_all = 1;
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530286}
287EXPORT_SYMBOL(radix__flush_tlb_pwc);
288
Aneesh Kumar K.Vf22dfc92016-07-13 15:06:41 +0530289void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
Aneesh Kumar K.Vfbfa26d2016-07-13 15:06:42 +0530290 int psize)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000291{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530292 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000293
Michael Ellerman67730272017-10-16 12:41:00 +0530294 pid = mm->context.id;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000295 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000296 return;
297
298 preempt_disable();
Michael Ellerman3c9ac2b2017-05-02 21:00:14 +1000299 if (!mm_is_thread_local(mm))
Nicholas Piggind6657672017-11-07 18:53:06 +1100300 _tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
Michael Ellerman3c9ac2b2017-05-02 21:00:14 +1000301 else
Nicholas Piggind6657672017-11-07 18:53:06 +1100302 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000303 preempt_enable();
304}
305
306void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
307{
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000308#ifdef CONFIG_HUGETLB_PAGE
Michael Ellerman67730272017-10-16 12:41:00 +0530309 if (is_vm_hugetlb_page(vma))
310 return radix__flush_hugetlb_page(vma, vmaddr);
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000311#endif
Michael Ellerman67730272017-10-16 12:41:00 +0530312 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000313}
314EXPORT_SYMBOL(radix__flush_tlb_page);
315
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000316#else /* CONFIG_SMP */
317#define radix__flush_all_mm radix__local_flush_all_mm
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000318#endif /* CONFIG_SMP */
319
320void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
321{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530322 _tlbie_pid(0, RIC_FLUSH_ALL);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000323}
324EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
325
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100326#define TLB_FLUSH_ALL -1UL
327
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000328/*
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100329 * Number of pages above which we invalidate the entire PID rather than
330 * flush individual pages, for local and global flushes respectively.
331 *
332 * tlbie goes out to the interconnect and individual ops are more costly.
333 * It also does not iterate over sets like the local tlbiel variant when
334 * invalidating a full PID, so it has a far lower threshold to change from
335 * individual page flushes to full-pid flushes.
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000336 */
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100337static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
Nicholas Pigginf6f27952017-11-07 18:53:08 +1100338static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = POWER9_TLB_SETS_RADIX * 2;
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100339
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000340void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
341 unsigned long end)
342
343{
344 struct mm_struct *mm = vma->vm_mm;
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100345 unsigned long pid;
346 unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift;
347 unsigned long page_size = 1UL << page_shift;
348 unsigned long nr_pages = (end - start) >> page_shift;
349 bool local, full;
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000350
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100351#ifdef CONFIG_HUGETLB_PAGE
352 if (is_vm_hugetlb_page(vma))
353 return radix__flush_hugetlb_tlb_range(vma, start, end);
354#endif
355
356 pid = mm->context.id;
357 if (unlikely(pid == MMU_NO_CONTEXT))
358 return;
359
360 preempt_disable();
Nicholas Pigginf6f27952017-11-07 18:53:08 +1100361 if (mm_is_thread_local(mm)) {
362 local = true;
363 full = (end == TLB_FLUSH_ALL ||
364 nr_pages > tlb_local_single_page_flush_ceiling);
365 } else {
366 local = false;
367 full = (end == TLB_FLUSH_ALL ||
368 nr_pages > tlb_single_page_flush_ceiling);
369 }
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100370
371 if (full) {
372 if (local)
373 _tlbiel_pid(pid, RIC_FLUSH_TLB);
374 else
375 _tlbie_pid(pid, RIC_FLUSH_TLB);
376 } else {
377 bool hflush = false;
378 unsigned long hstart, hend;
379
380#ifdef CONFIG_TRANSPARENT_HUGEPAGE
381 hstart = (start + HPAGE_PMD_SIZE - 1) >> HPAGE_PMD_SHIFT;
382 hend = end >> HPAGE_PMD_SHIFT;
383 if (hstart < hend) {
384 hstart <<= HPAGE_PMD_SHIFT;
385 hend <<= HPAGE_PMD_SHIFT;
386 hflush = true;
387 }
388#endif
389
390 asm volatile("ptesync": : :"memory");
391 if (local) {
392 __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
393 if (hflush)
394 __tlbiel_va_range(hstart, hend, pid,
395 HPAGE_PMD_SIZE, MMU_PAGE_2M);
396 asm volatile("ptesync": : :"memory");
397 } else {
398 __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);
399 if (hflush)
400 __tlbie_va_range(hstart, hend, pid,
401 HPAGE_PMD_SIZE, MMU_PAGE_2M);
402 asm volatile("eieio; tlbsync; ptesync": : :"memory");
403 }
404 }
405 preempt_enable();
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000406}
407EXPORT_SYMBOL(radix__flush_tlb_range);
408
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530409static int radix_get_mmu_psize(int page_size)
410{
411 int psize;
412
413 if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift))
414 psize = mmu_virtual_psize;
415 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift))
416 psize = MMU_PAGE_2M;
417 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift))
418 psize = MMU_PAGE_1G;
419 else
420 return -1;
421 return psize;
422}
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000423
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100424static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
425 unsigned long end, int psize);
426
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000427void radix__tlb_flush(struct mmu_gather *tlb)
428{
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530429 int psize = 0;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000430 struct mm_struct *mm = tlb->mm;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530431 int page_size = tlb->page_size;
432
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530433 /*
434 * if page size is not something we understand, do a full mm flush
Nicholas Piggin30b49ec2017-10-24 23:06:54 +1000435 *
436 * A "fullmm" flush must always do a flush_all_mm (RIC=2) flush
437 * that flushes the process table entry cache upon process teardown.
438 * See the comment for radix in arch_exit_mmap().
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530439 */
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100440 if (tlb->fullmm) {
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000441 radix__flush_all_mm(mm);
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100442 } else if ( (psize = radix_get_mmu_psize(page_size)) == -1) {
443 if (!tlb->need_flush_all)
444 radix__flush_tlb_mm(mm);
445 else
446 radix__flush_all_mm(mm);
447 } else {
448 unsigned long start = tlb->start;
449 unsigned long end = tlb->end;
450
451 if (!tlb->need_flush_all)
452 radix__flush_tlb_range_psize(mm, start, end, psize);
453 else
454 radix__flush_tlb_pwc_range_psize(mm, start, end, psize);
455 }
456 tlb->need_flush_all = 0;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530457}
458
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100459static inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
460 unsigned long start, unsigned long end,
461 int psize, bool also_pwc)
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530462{
463 unsigned long pid;
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100464 unsigned int page_shift = mmu_psize_defs[psize].shift;
465 unsigned long page_size = 1UL << page_shift;
466 unsigned long nr_pages = (end - start) >> page_shift;
467 bool local, full;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530468
Michael Ellerman67730272017-10-16 12:41:00 +0530469 pid = mm->context.id;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530470 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000471 return;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530472
Nicholas Piggindffe8442017-10-24 23:06:53 +1000473 preempt_disable();
Nicholas Pigginf6f27952017-11-07 18:53:08 +1100474 if (mm_is_thread_local(mm)) {
475 local = true;
476 full = (end == TLB_FLUSH_ALL ||
477 nr_pages > tlb_local_single_page_flush_ceiling);
478 } else {
479 local = false;
480 full = (end == TLB_FLUSH_ALL ||
481 nr_pages > tlb_single_page_flush_ceiling);
482 }
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100483
484 if (full) {
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530485 if (local)
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100486 _tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530487 else
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100488 _tlbie_pid(pid, also_pwc ? RIC_FLUSH_ALL: RIC_FLUSH_TLB);
Nicholas Piggindffe8442017-10-24 23:06:53 +1000489 } else {
Nicholas Piggin14001c62017-11-07 18:53:05 +1100490 if (local)
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100491 _tlbiel_va_range(start, end, pid, page_size, psize, also_pwc);
Nicholas Piggin14001c62017-11-07 18:53:05 +1100492 else
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100493 _tlbie_va_range(start, end, pid, page_size, psize, also_pwc);
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530494 }
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530495 preempt_enable();
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000496}
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530497
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100498void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
499 unsigned long end, int psize)
500{
501 return __radix__flush_tlb_range_psize(mm, start, end, psize, false);
502}
503
504static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
505 unsigned long end, int psize)
506{
507 __radix__flush_tlb_range_psize(mm, start, end, psize, true);
508}
509
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000510#ifdef CONFIG_TRANSPARENT_HUGEPAGE
511void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
512{
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000513 unsigned long pid, end;
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000514
Michael Ellerman67730272017-10-16 12:41:00 +0530515 pid = mm->context.id;
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000516 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000517 return;
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000518
519 /* 4k page size, just blow the world */
520 if (PAGE_SIZE == 0x1000) {
521 radix__flush_all_mm(mm);
522 return;
523 }
524
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000525 end = addr + HPAGE_PMD_SIZE;
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100526
527 /* Otherwise first do the PWC, then iterate the pages. */
528 preempt_disable();
529
530 if (mm_is_thread_local(mm)) {
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100531 _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100532 } else {
Nicholas Piggin0b2f5a82017-11-07 18:53:09 +1100533 _tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100534 }
Nicholas Piggin14001c62017-11-07 18:53:05 +1100535
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000536 preempt_enable();
537}
538#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
539
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530540void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
541 unsigned long page_size)
542{
543 unsigned long rb,rs,prs,r;
544 unsigned long ap;
545 unsigned long ric = RIC_FLUSH_TLB;
546
547 ap = mmu_get_ap(radix_get_mmu_psize(page_size));
548 rb = gpa & ~(PPC_BITMASK(52, 63));
549 rb |= ap << PPC_BITLSHIFT(58);
550 rs = lpid & ((1UL << 32) - 1);
551 prs = 0; /* process scoped */
552 r = 1; /* raidx format */
553
554 asm volatile("ptesync": : :"memory");
555 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
556 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
557 asm volatile("eieio; tlbsync; ptesync": : :"memory");
Balbir Singh04284912017-04-11 15:23:25 +1000558 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530559}
560EXPORT_SYMBOL(radix__flush_tlb_lpid_va);
561
562void radix__flush_tlb_lpid(unsigned long lpid)
563{
564 unsigned long rb,rs,prs,r;
565 unsigned long ric = RIC_FLUSH_ALL;
566
567 rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */
568 rs = lpid & ((1UL << 32) - 1);
569 prs = 0; /* partition scoped */
570 r = 1; /* raidx format */
571
572 asm volatile("ptesync": : :"memory");
573 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
574 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
575 asm volatile("eieio; tlbsync; ptesync": : :"memory");
Balbir Singh04284912017-04-11 15:23:25 +1000576 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530577}
578EXPORT_SYMBOL(radix__flush_tlb_lpid);
Aneesh Kumar K.Vd8e91e92016-07-13 15:06:40 +0530579
580void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
581 unsigned long start, unsigned long end)
582{
583 radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
584}
585EXPORT_SYMBOL(radix__flush_pmd_tlb_range);
Aneesh Kumar K.Vbe34d302016-08-23 16:27:48 +0530586
587void radix__flush_tlb_all(void)
588{
589 unsigned long rb,prs,r,rs;
590 unsigned long ric = RIC_FLUSH_ALL;
591
592 rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */
593 prs = 0; /* partition scoped */
594 r = 1; /* raidx format */
595 rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */
596
597 asm volatile("ptesync": : :"memory");
598 /*
599 * now flush guest entries by passing PRS = 1 and LPID != 0
600 */
601 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
602 : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000603 trace_tlbie(0, 0, rb, rs, ric, prs, r);
Aneesh Kumar K.Vbe34d302016-08-23 16:27:48 +0530604 /*
605 * now flush host entires by passing PRS = 0 and LPID == 0
606 */
607 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
608 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
609 asm volatile("eieio; tlbsync; ptesync": : :"memory");
Balbir Singh04284912017-04-11 15:23:25 +1000610 trace_tlbie(0, 0, rb, 0, ric, prs, r);
Aneesh Kumar K.Vbe34d302016-08-23 16:27:48 +0530611}
Aneesh Kumar K.V6d3a0372016-11-28 11:47:01 +0530612
613void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
614 unsigned long address)
615{
616 /*
617 * We track page size in pte only for DD1, So we can
618 * call this only on DD1.
619 */
620 if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) {
621 VM_WARN_ON(1);
622 return;
623 }
624
Aneesh Kumar K.Vddb014b2017-03-21 22:59:54 +0530625 if (old_pte & R_PAGE_LARGE)
Aneesh Kumar K.V6d3a0372016-11-28 11:47:01 +0530626 radix__flush_tlb_page_psize(mm, address, MMU_PAGE_2M);
627 else
628 radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize);
629}
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000630
631#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
632extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
633{
634 unsigned int pid = mm->context.id;
635
636 if (unlikely(pid == MMU_NO_CONTEXT))
637 return;
638
639 /*
640 * If this context hasn't run on that CPU before and KVM is
641 * around, there's a slim chance that the guest on another
642 * CPU just brought in obsolete translation into the TLB of
643 * this CPU due to a bad prefetch using the guest PID on
644 * the way into the hypervisor.
645 *
646 * We work around this here. If KVM is possible, we check if
647 * any sibling thread is in KVM. If it is, the window may exist
648 * and thus we flush that PID from the core.
649 *
650 * A potential future improvement would be to mark which PIDs
651 * have never been used on the system and avoid it if the PID
652 * is new and the process has no other cpumask bit set.
653 */
654 if (cpu_has_feature(CPU_FTR_HVMODE) && radix_enabled()) {
655 int cpu = smp_processor_id();
656 int sib = cpu_first_thread_sibling(cpu);
657 bool flush = false;
658
659 for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
660 if (sib == cpu)
661 continue;
662 if (paca[sib].kvm_hstate.kvm_vcpu)
663 flush = true;
664 }
665 if (flush)
666 _tlbiel_pid(pid, RIC_FLUSH_ALL);
667 }
668}
669EXPORT_SYMBOL_GPL(radix_kvm_prefetch_workaround);
670#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */