blob: cfa08da534a7e75f27da32bda5cb6392ed32877a [file] [log] [blame]
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +10001/*
2 * TLB flush routines for radix kernels.
3 *
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/mm.h>
13#include <linux/hugetlb.h>
14#include <linux/memblock.h>
15
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +100016#include <asm/ppc-opcode.h>
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100017#include <asm/tlb.h>
18#include <asm/tlbflush.h>
Balbir Singh04284912017-04-11 15:23:25 +100019#include <asm/trace.h>
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +100020#include <asm/cputhreads.h>
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100021
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053022#define RIC_FLUSH_TLB 0
23#define RIC_FLUSH_PWC 1
24#define RIC_FLUSH_ALL 2
25
26static inline void __tlbiel_pid(unsigned long pid, int set,
27 unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100028{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053029 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100030
31 rb = PPC_BIT(53); /* IS = 1 */
32 rb |= set << PPC_BITLSHIFT(51);
33 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
34 prs = 1; /* process scoped */
35 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100036
Balbir Singh8cd6d3c2016-07-13 15:05:20 +053037 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100038 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
Balbir Singh04284912017-04-11 15:23:25 +100039 trace_tlbie(0, 1, rb, rs, ric, prs, r);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100040}
41
42/*
43 * We use 128 set in radix mode and 256 set in hpt mode.
44 */
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053045static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100046{
47 int set;
48
Aneesh Kumar K.Vf7327e02017-04-01 20:11:48 +053049 asm volatile("ptesync": : :"memory");
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +100050
51 /*
52 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
53 * also flush the entire Page Walk Cache.
54 */
55 __tlbiel_pid(pid, 0, ric);
56
Benjamin Herrenschmidt5ce5fe12017-07-19 14:49:04 +100057 /* For PWC, only one flush is needed */
58 if (ric == RIC_FLUSH_PWC) {
59 asm volatile("ptesync": : :"memory");
60 return;
61 }
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +100062
Benjamin Herrenschmidt5ce5fe12017-07-19 14:49:04 +100063 /* For the remaining sets, just flush the TLB */
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +100064 for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
Benjamin Herrenschmidt5ce5fe12017-07-19 14:49:04 +100065 __tlbiel_pid(pid, set, RIC_FLUSH_TLB);
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +100066
Aneesh Kumar K.Vf7327e02017-04-01 20:11:48 +053067 asm volatile("ptesync": : :"memory");
Benjamin Herrenschmidt90c1e3c2017-02-06 13:05:16 +110068 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100069}
70
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053071static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100072{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053073 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100074
75 rb = PPC_BIT(53); /* IS = 1 */
76 rs = pid << PPC_BITLSHIFT(31);
77 prs = 1; /* process scoped */
78 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100079
80 asm volatile("ptesync": : :"memory");
Balbir Singh8cd6d3c2016-07-13 15:05:20 +053081 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100082 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
83 asm volatile("eieio; tlbsync; ptesync": : :"memory");
Balbir Singh04284912017-04-11 15:23:25 +100084 trace_tlbie(0, 0, rb, rs, ric, prs, r);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100085}
86
Nicholas Piggin14001c62017-11-07 18:53:05 +110087static inline void __tlbiel_va(unsigned long va, unsigned long pid,
Nicholas Piggind6657672017-11-07 18:53:06 +110088 unsigned long ap, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100089{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053090 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100091
92 rb = va & ~(PPC_BITMASK(52, 63));
93 rb |= ap << PPC_BITLSHIFT(58);
94 rs = pid << PPC_BITLSHIFT(31);
95 prs = 1; /* process scoped */
96 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100097
Balbir Singh8cd6d3c2016-07-13 15:05:20 +053098 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100099 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000100 trace_tlbie(0, 1, rb, rs, ric, prs, r);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000101}
102
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100103static inline void __tlbiel_va_range(unsigned long start, unsigned long end,
104 unsigned long pid, unsigned long page_size,
105 unsigned long psize)
106{
107 unsigned long addr;
108 unsigned long ap = mmu_get_ap(psize);
109
110 for (addr = start; addr < end; addr += page_size)
111 __tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
112}
113
Nicholas Piggin14001c62017-11-07 18:53:05 +1100114static inline void _tlbiel_va(unsigned long va, unsigned long pid,
Nicholas Piggind6657672017-11-07 18:53:06 +1100115 unsigned long psize, unsigned long ric)
Nicholas Piggin14001c62017-11-07 18:53:05 +1100116{
Nicholas Piggind6657672017-11-07 18:53:06 +1100117 unsigned long ap = mmu_get_ap(psize);
118
Nicholas Piggin14001c62017-11-07 18:53:05 +1100119 asm volatile("ptesync": : :"memory");
120 __tlbiel_va(va, pid, ap, ric);
121 asm volatile("ptesync": : :"memory");
122}
123
Nicholas Piggind6657672017-11-07 18:53:06 +1100124static inline void _tlbiel_va_range(unsigned long start, unsigned long end,
125 unsigned long pid, unsigned long page_size,
126 unsigned long psize)
127{
Nicholas Piggind6657672017-11-07 18:53:06 +1100128 asm volatile("ptesync": : :"memory");
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100129 __tlbiel_va_range(start, end, pid, page_size, psize);
Nicholas Piggind6657672017-11-07 18:53:06 +1100130 asm volatile("ptesync": : :"memory");
131}
132
Nicholas Piggin14001c62017-11-07 18:53:05 +1100133static inline void __tlbie_va(unsigned long va, unsigned long pid,
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530134 unsigned long ap, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000135{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530136 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000137
138 rb = va & ~(PPC_BITMASK(52, 63));
139 rb |= ap << PPC_BITLSHIFT(58);
140 rs = pid << PPC_BITLSHIFT(31);
141 prs = 1; /* process scoped */
142 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000143
Balbir Singh8cd6d3c2016-07-13 15:05:20 +0530144 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000145 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000146 trace_tlbie(0, 0, rb, rs, ric, prs, r);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000147}
148
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100149static inline void __tlbie_va_range(unsigned long start, unsigned long end,
150 unsigned long pid, unsigned long page_size,
151 unsigned long psize)
152{
153 unsigned long addr;
154 unsigned long ap = mmu_get_ap(psize);
155
156 for (addr = start; addr < end; addr += page_size)
157 __tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
158}
159
Nicholas Piggin14001c62017-11-07 18:53:05 +1100160static inline void _tlbie_va(unsigned long va, unsigned long pid,
Nicholas Piggind6657672017-11-07 18:53:06 +1100161 unsigned long psize, unsigned long ric)
Nicholas Piggin14001c62017-11-07 18:53:05 +1100162{
Nicholas Piggind6657672017-11-07 18:53:06 +1100163 unsigned long ap = mmu_get_ap(psize);
164
Nicholas Piggin14001c62017-11-07 18:53:05 +1100165 asm volatile("ptesync": : :"memory");
166 __tlbie_va(va, pid, ap, ric);
167 asm volatile("eieio; tlbsync; ptesync": : :"memory");
168}
169
Nicholas Piggind6657672017-11-07 18:53:06 +1100170static inline void _tlbie_va_range(unsigned long start, unsigned long end,
171 unsigned long pid, unsigned long page_size,
172 unsigned long psize)
173{
Nicholas Piggind6657672017-11-07 18:53:06 +1100174 asm volatile("ptesync": : :"memory");
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100175 __tlbie_va_range(start, end, pid, page_size, psize);
Nicholas Piggind6657672017-11-07 18:53:06 +1100176 asm volatile("eieio; tlbsync; ptesync": : :"memory");
177}
Nicholas Piggin14001c62017-11-07 18:53:05 +1100178
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000179/*
180 * Base TLB flushing operations:
181 *
182 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
183 * - flush_tlb_page(vma, vmaddr) flushes one page
184 * - flush_tlb_range(vma, start, end) flushes a range of pages
185 * - flush_tlb_kernel_range(start, end) flushes kernel pages
186 *
187 * - local_* variants of page and mm only apply to the current
188 * processor
189 */
190void radix__local_flush_tlb_mm(struct mm_struct *mm)
191{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530192 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000193
194 preempt_disable();
195 pid = mm->context.id;
196 if (pid != MMU_NO_CONTEXT)
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000197 _tlbiel_pid(pid, RIC_FLUSH_TLB);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000198 preempt_enable();
199}
200EXPORT_SYMBOL(radix__local_flush_tlb_mm);
201
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000202#ifndef CONFIG_SMP
Frederic Barrat61102362017-09-03 20:15:12 +0200203void radix__local_flush_all_mm(struct mm_struct *mm)
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530204{
205 unsigned long pid;
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530206
207 preempt_disable();
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530208 pid = mm->context.id;
209 if (pid != MMU_NO_CONTEXT)
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000210 _tlbiel_pid(pid, RIC_FLUSH_ALL);
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530211 preempt_enable();
212}
Frederic Barrat61102362017-09-03 20:15:12 +0200213EXPORT_SYMBOL(radix__local_flush_all_mm);
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000214#endif /* CONFIG_SMP */
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530215
Aneesh Kumar K.Vf22dfc92016-07-13 15:06:41 +0530216void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
Aneesh Kumar K.Vfbfa26d2016-07-13 15:06:42 +0530217 int psize)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000218{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530219 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000220
221 preempt_disable();
Michael Ellerman67730272017-10-16 12:41:00 +0530222 pid = mm->context.id;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000223 if (pid != MMU_NO_CONTEXT)
Nicholas Piggind6657672017-11-07 18:53:06 +1100224 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000225 preempt_enable();
226}
227
228void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
229{
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000230#ifdef CONFIG_HUGETLB_PAGE
231 /* need the return fix for nohash.c */
Michael Ellerman67730272017-10-16 12:41:00 +0530232 if (is_vm_hugetlb_page(vma))
233 return radix__local_flush_hugetlb_page(vma, vmaddr);
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000234#endif
Michael Ellerman67730272017-10-16 12:41:00 +0530235 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000236}
237EXPORT_SYMBOL(radix__local_flush_tlb_page);
238
239#ifdef CONFIG_SMP
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000240void radix__flush_tlb_mm(struct mm_struct *mm)
241{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530242 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000243
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000244 pid = mm->context.id;
245 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000246 return;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000247
Nicholas Piggindffe8442017-10-24 23:06:53 +1000248 preempt_disable();
Michael Ellerman3c9ac2b2017-05-02 21:00:14 +1000249 if (!mm_is_thread_local(mm))
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000250 _tlbie_pid(pid, RIC_FLUSH_TLB);
251 else
252 _tlbiel_pid(pid, RIC_FLUSH_TLB);
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000253 preempt_enable();
254}
255EXPORT_SYMBOL(radix__flush_tlb_mm);
256
Frederic Barrat61102362017-09-03 20:15:12 +0200257void radix__flush_all_mm(struct mm_struct *mm)
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000258{
259 unsigned long pid;
260
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000261 pid = mm->context.id;
262 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000263 return;
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000264
Nicholas Piggindffe8442017-10-24 23:06:53 +1000265 preempt_disable();
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000266 if (!mm_is_thread_local(mm))
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530267 _tlbie_pid(pid, RIC_FLUSH_ALL);
Michael Ellerman3c9ac2b2017-05-02 21:00:14 +1000268 else
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530269 _tlbiel_pid(pid, RIC_FLUSH_ALL);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000270 preempt_enable();
271}
Frederic Barrat61102362017-09-03 20:15:12 +0200272EXPORT_SYMBOL(radix__flush_all_mm);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000273
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530274void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
275{
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000276 tlb->need_flush_all = 1;
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530277}
278EXPORT_SYMBOL(radix__flush_tlb_pwc);
279
Aneesh Kumar K.Vf22dfc92016-07-13 15:06:41 +0530280void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
Aneesh Kumar K.Vfbfa26d2016-07-13 15:06:42 +0530281 int psize)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000282{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530283 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000284
Michael Ellerman67730272017-10-16 12:41:00 +0530285 pid = mm->context.id;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000286 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000287 return;
288
289 preempt_disable();
Michael Ellerman3c9ac2b2017-05-02 21:00:14 +1000290 if (!mm_is_thread_local(mm))
Nicholas Piggind6657672017-11-07 18:53:06 +1100291 _tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
Michael Ellerman3c9ac2b2017-05-02 21:00:14 +1000292 else
Nicholas Piggind6657672017-11-07 18:53:06 +1100293 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000294 preempt_enable();
295}
296
297void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
298{
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000299#ifdef CONFIG_HUGETLB_PAGE
Michael Ellerman67730272017-10-16 12:41:00 +0530300 if (is_vm_hugetlb_page(vma))
301 return radix__flush_hugetlb_page(vma, vmaddr);
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000302#endif
Michael Ellerman67730272017-10-16 12:41:00 +0530303 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000304}
305EXPORT_SYMBOL(radix__flush_tlb_page);
306
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000307#else /* CONFIG_SMP */
308#define radix__flush_all_mm radix__local_flush_all_mm
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000309#endif /* CONFIG_SMP */
310
311void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
312{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530313 _tlbie_pid(0, RIC_FLUSH_ALL);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000314}
315EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
316
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100317#define TLB_FLUSH_ALL -1UL
318
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000319/*
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100320 * Number of pages above which we invalidate the entire PID rather than
321 * flush individual pages, for local and global flushes respectively.
322 *
323 * tlbie goes out to the interconnect and individual ops are more costly.
324 * It also does not iterate over sets like the local tlbiel variant when
325 * invalidating a full PID, so it has a far lower threshold to change from
326 * individual page flushes to full-pid flushes.
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000327 */
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100328static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
Nicholas Pigginf6f27952017-11-07 18:53:08 +1100329static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = POWER9_TLB_SETS_RADIX * 2;
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100330
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000331void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
332 unsigned long end)
333
334{
335 struct mm_struct *mm = vma->vm_mm;
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100336 unsigned long pid;
337 unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift;
338 unsigned long page_size = 1UL << page_shift;
339 unsigned long nr_pages = (end - start) >> page_shift;
340 bool local, full;
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000341
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100342#ifdef CONFIG_HUGETLB_PAGE
343 if (is_vm_hugetlb_page(vma))
344 return radix__flush_hugetlb_tlb_range(vma, start, end);
345#endif
346
347 pid = mm->context.id;
348 if (unlikely(pid == MMU_NO_CONTEXT))
349 return;
350
351 preempt_disable();
Nicholas Pigginf6f27952017-11-07 18:53:08 +1100352 if (mm_is_thread_local(mm)) {
353 local = true;
354 full = (end == TLB_FLUSH_ALL ||
355 nr_pages > tlb_local_single_page_flush_ceiling);
356 } else {
357 local = false;
358 full = (end == TLB_FLUSH_ALL ||
359 nr_pages > tlb_single_page_flush_ceiling);
360 }
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100361
362 if (full) {
363 if (local)
364 _tlbiel_pid(pid, RIC_FLUSH_TLB);
365 else
366 _tlbie_pid(pid, RIC_FLUSH_TLB);
367 } else {
368 bool hflush = false;
369 unsigned long hstart, hend;
370
371#ifdef CONFIG_TRANSPARENT_HUGEPAGE
372 hstart = (start + HPAGE_PMD_SIZE - 1) >> HPAGE_PMD_SHIFT;
373 hend = end >> HPAGE_PMD_SHIFT;
374 if (hstart < hend) {
375 hstart <<= HPAGE_PMD_SHIFT;
376 hend <<= HPAGE_PMD_SHIFT;
377 hflush = true;
378 }
379#endif
380
381 asm volatile("ptesync": : :"memory");
382 if (local) {
383 __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
384 if (hflush)
385 __tlbiel_va_range(hstart, hend, pid,
386 HPAGE_PMD_SIZE, MMU_PAGE_2M);
387 asm volatile("ptesync": : :"memory");
388 } else {
389 __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);
390 if (hflush)
391 __tlbie_va_range(hstart, hend, pid,
392 HPAGE_PMD_SIZE, MMU_PAGE_2M);
393 asm volatile("eieio; tlbsync; ptesync": : :"memory");
394 }
395 }
396 preempt_enable();
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000397}
398EXPORT_SYMBOL(radix__flush_tlb_range);
399
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530400static int radix_get_mmu_psize(int page_size)
401{
402 int psize;
403
404 if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift))
405 psize = mmu_virtual_psize;
406 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift))
407 psize = MMU_PAGE_2M;
408 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift))
409 psize = MMU_PAGE_1G;
410 else
411 return -1;
412 return psize;
413}
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000414
415void radix__tlb_flush(struct mmu_gather *tlb)
416{
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530417 int psize = 0;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000418 struct mm_struct *mm = tlb->mm;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530419 int page_size = tlb->page_size;
420
421 psize = radix_get_mmu_psize(page_size);
422 /*
423 * if page size is not something we understand, do a full mm flush
Nicholas Piggin30b49ec2017-10-24 23:06:54 +1000424 *
425 * A "fullmm" flush must always do a flush_all_mm (RIC=2) flush
426 * that flushes the process table entry cache upon process teardown.
427 * See the comment for radix in arch_exit_mmap().
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530428 */
429 if (psize != -1 && !tlb->fullmm && !tlb->need_flush_all)
430 radix__flush_tlb_range_psize(mm, tlb->start, tlb->end, psize);
Nicholas Piggin30b49ec2017-10-24 23:06:54 +1000431 else if (tlb->fullmm || tlb->need_flush_all) {
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000432 tlb->need_flush_all = 0;
433 radix__flush_all_mm(mm);
434 } else
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530435 radix__flush_tlb_mm(mm);
436}
437
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530438void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
439 unsigned long end, int psize)
440{
441 unsigned long pid;
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100442 unsigned int page_shift = mmu_psize_defs[psize].shift;
443 unsigned long page_size = 1UL << page_shift;
444 unsigned long nr_pages = (end - start) >> page_shift;
445 bool local, full;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530446
Michael Ellerman67730272017-10-16 12:41:00 +0530447 pid = mm->context.id;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530448 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000449 return;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530450
Nicholas Piggindffe8442017-10-24 23:06:53 +1000451 preempt_disable();
Nicholas Pigginf6f27952017-11-07 18:53:08 +1100452 if (mm_is_thread_local(mm)) {
453 local = true;
454 full = (end == TLB_FLUSH_ALL ||
455 nr_pages > tlb_local_single_page_flush_ceiling);
456 } else {
457 local = false;
458 full = (end == TLB_FLUSH_ALL ||
459 nr_pages > tlb_single_page_flush_ceiling);
460 }
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100461
462 if (full) {
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530463 if (local)
464 _tlbiel_pid(pid, RIC_FLUSH_TLB);
465 else
466 _tlbie_pid(pid, RIC_FLUSH_TLB);
Nicholas Piggindffe8442017-10-24 23:06:53 +1000467 } else {
Nicholas Piggin14001c62017-11-07 18:53:05 +1100468 if (local)
Nicholas Piggind6657672017-11-07 18:53:06 +1100469 _tlbiel_va_range(start, end, pid, page_size, psize);
Nicholas Piggin14001c62017-11-07 18:53:05 +1100470 else
Nicholas Piggind6657672017-11-07 18:53:06 +1100471 _tlbie_va_range(start, end, pid, page_size, psize);
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530472 }
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530473 preempt_enable();
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000474}
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530475
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000476#ifdef CONFIG_TRANSPARENT_HUGEPAGE
477void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
478{
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000479 unsigned long pid, end;
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000480
Michael Ellerman67730272017-10-16 12:41:00 +0530481 pid = mm->context.id;
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000482 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000483 return;
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000484
485 /* 4k page size, just blow the world */
486 if (PAGE_SIZE == 0x1000) {
487 radix__flush_all_mm(mm);
488 return;
489 }
490
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000491 end = addr + HPAGE_PMD_SIZE;
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100492
493 /* Otherwise first do the PWC, then iterate the pages. */
494 preempt_disable();
495
496 if (mm_is_thread_local(mm)) {
497 _tlbiel_pid(pid, RIC_FLUSH_PWC);
Nicholas Piggind6657672017-11-07 18:53:06 +1100498 _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize);
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100499 } else {
500 _tlbie_pid(pid, RIC_FLUSH_PWC);
Nicholas Piggind6657672017-11-07 18:53:06 +1100501 _tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize);
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100502 }
Nicholas Piggin14001c62017-11-07 18:53:05 +1100503
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000504 preempt_enable();
505}
506#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
507
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530508void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
509 unsigned long page_size)
510{
511 unsigned long rb,rs,prs,r;
512 unsigned long ap;
513 unsigned long ric = RIC_FLUSH_TLB;
514
515 ap = mmu_get_ap(radix_get_mmu_psize(page_size));
516 rb = gpa & ~(PPC_BITMASK(52, 63));
517 rb |= ap << PPC_BITLSHIFT(58);
518 rs = lpid & ((1UL << 32) - 1);
519 prs = 0; /* process scoped */
520 r = 1; /* raidx format */
521
522 asm volatile("ptesync": : :"memory");
523 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
524 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
525 asm volatile("eieio; tlbsync; ptesync": : :"memory");
Balbir Singh04284912017-04-11 15:23:25 +1000526 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530527}
528EXPORT_SYMBOL(radix__flush_tlb_lpid_va);
529
530void radix__flush_tlb_lpid(unsigned long lpid)
531{
532 unsigned long rb,rs,prs,r;
533 unsigned long ric = RIC_FLUSH_ALL;
534
535 rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */
536 rs = lpid & ((1UL << 32) - 1);
537 prs = 0; /* partition scoped */
538 r = 1; /* raidx format */
539
540 asm volatile("ptesync": : :"memory");
541 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
542 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
543 asm volatile("eieio; tlbsync; ptesync": : :"memory");
Balbir Singh04284912017-04-11 15:23:25 +1000544 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530545}
546EXPORT_SYMBOL(radix__flush_tlb_lpid);
Aneesh Kumar K.Vd8e91e92016-07-13 15:06:40 +0530547
548void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
549 unsigned long start, unsigned long end)
550{
551 radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
552}
553EXPORT_SYMBOL(radix__flush_pmd_tlb_range);
Aneesh Kumar K.Vbe34d302016-08-23 16:27:48 +0530554
555void radix__flush_tlb_all(void)
556{
557 unsigned long rb,prs,r,rs;
558 unsigned long ric = RIC_FLUSH_ALL;
559
560 rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */
561 prs = 0; /* partition scoped */
562 r = 1; /* raidx format */
563 rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */
564
565 asm volatile("ptesync": : :"memory");
566 /*
567 * now flush guest entries by passing PRS = 1 and LPID != 0
568 */
569 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
570 : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000571 trace_tlbie(0, 0, rb, rs, ric, prs, r);
Aneesh Kumar K.Vbe34d302016-08-23 16:27:48 +0530572 /*
573 * now flush host entires by passing PRS = 0 and LPID == 0
574 */
575 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
576 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
577 asm volatile("eieio; tlbsync; ptesync": : :"memory");
Balbir Singh04284912017-04-11 15:23:25 +1000578 trace_tlbie(0, 0, rb, 0, ric, prs, r);
Aneesh Kumar K.Vbe34d302016-08-23 16:27:48 +0530579}
Aneesh Kumar K.V6d3a0372016-11-28 11:47:01 +0530580
581void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
582 unsigned long address)
583{
584 /*
585 * We track page size in pte only for DD1, So we can
586 * call this only on DD1.
587 */
588 if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) {
589 VM_WARN_ON(1);
590 return;
591 }
592
Aneesh Kumar K.Vddb014b2017-03-21 22:59:54 +0530593 if (old_pte & R_PAGE_LARGE)
Aneesh Kumar K.V6d3a0372016-11-28 11:47:01 +0530594 radix__flush_tlb_page_psize(mm, address, MMU_PAGE_2M);
595 else
596 radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize);
597}
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000598
599#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
600extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
601{
602 unsigned int pid = mm->context.id;
603
604 if (unlikely(pid == MMU_NO_CONTEXT))
605 return;
606
607 /*
608 * If this context hasn't run on that CPU before and KVM is
609 * around, there's a slim chance that the guest on another
610 * CPU just brought in obsolete translation into the TLB of
611 * this CPU due to a bad prefetch using the guest PID on
612 * the way into the hypervisor.
613 *
614 * We work around this here. If KVM is possible, we check if
615 * any sibling thread is in KVM. If it is, the window may exist
616 * and thus we flush that PID from the core.
617 *
618 * A potential future improvement would be to mark which PIDs
619 * have never been used on the system and avoid it if the PID
620 * is new and the process has no other cpumask bit set.
621 */
622 if (cpu_has_feature(CPU_FTR_HVMODE) && radix_enabled()) {
623 int cpu = smp_processor_id();
624 int sib = cpu_first_thread_sibling(cpu);
625 bool flush = false;
626
627 for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
628 if (sib == cpu)
629 continue;
630 if (paca[sib].kvm_hstate.kvm_vcpu)
631 flush = true;
632 }
633 if (flush)
634 _tlbiel_pid(pid, RIC_FLUSH_ALL);
635 }
636}
637EXPORT_SYMBOL_GPL(radix_kvm_prefetch_workaround);
638#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */