blob: b4b49de551a9bcbc5ac0663adfec5d9046097c1d [file] [log] [blame]
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +10001/*
2 * TLB flush routines for radix kernels.
3 *
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/mm.h>
13#include <linux/hugetlb.h>
14#include <linux/memblock.h>
15
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +100016#include <asm/ppc-opcode.h>
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100017#include <asm/tlb.h>
18#include <asm/tlbflush.h>
Balbir Singh04284912017-04-11 15:23:25 +100019#include <asm/trace.h>
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +100020#include <asm/cputhreads.h>
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100021
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053022#define RIC_FLUSH_TLB 0
23#define RIC_FLUSH_PWC 1
24#define RIC_FLUSH_ALL 2
25
26static inline void __tlbiel_pid(unsigned long pid, int set,
27 unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100028{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053029 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100030
31 rb = PPC_BIT(53); /* IS = 1 */
32 rb |= set << PPC_BITLSHIFT(51);
33 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
34 prs = 1; /* process scoped */
35 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100036
Balbir Singh8cd6d3c2016-07-13 15:05:20 +053037 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100038 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
Balbir Singh04284912017-04-11 15:23:25 +100039 trace_tlbie(0, 1, rb, rs, ric, prs, r);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100040}
41
42/*
43 * We use 128 set in radix mode and 256 set in hpt mode.
44 */
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053045static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100046{
47 int set;
48
Aneesh Kumar K.Vf7327e02017-04-01 20:11:48 +053049 asm volatile("ptesync": : :"memory");
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +100050
51 /*
52 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
53 * also flush the entire Page Walk Cache.
54 */
55 __tlbiel_pid(pid, 0, ric);
56
Benjamin Herrenschmidt5ce5fe12017-07-19 14:49:04 +100057 /* For PWC, only one flush is needed */
58 if (ric == RIC_FLUSH_PWC) {
59 asm volatile("ptesync": : :"memory");
60 return;
61 }
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +100062
Benjamin Herrenschmidt5ce5fe12017-07-19 14:49:04 +100063 /* For the remaining sets, just flush the TLB */
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +100064 for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
Benjamin Herrenschmidt5ce5fe12017-07-19 14:49:04 +100065 __tlbiel_pid(pid, set, RIC_FLUSH_TLB);
Aneesh Kumar K.Va5998fc2017-04-26 21:38:17 +100066
Aneesh Kumar K.Vf7327e02017-04-01 20:11:48 +053067 asm volatile("ptesync": : :"memory");
Benjamin Herrenschmidt90c1e3c2017-02-06 13:05:16 +110068 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100069}
70
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053071static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100072{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053073 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100074
75 rb = PPC_BIT(53); /* IS = 1 */
76 rs = pid << PPC_BITLSHIFT(31);
77 prs = 1; /* process scoped */
78 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100079
80 asm volatile("ptesync": : :"memory");
Balbir Singh8cd6d3c2016-07-13 15:05:20 +053081 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100082 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
83 asm volatile("eieio; tlbsync; ptesync": : :"memory");
Balbir Singh04284912017-04-11 15:23:25 +100084 trace_tlbie(0, 0, rb, rs, ric, prs, r);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100085}
86
Nicholas Piggin14001c62017-11-07 18:53:05 +110087static inline void __tlbiel_va(unsigned long va, unsigned long pid,
Nicholas Piggind6657672017-11-07 18:53:06 +110088 unsigned long ap, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100089{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +053090 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100091
92 rb = va & ~(PPC_BITMASK(52, 63));
93 rb |= ap << PPC_BITLSHIFT(58);
94 rs = pid << PPC_BITLSHIFT(31);
95 prs = 1; /* process scoped */
96 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100097
Balbir Singh8cd6d3c2016-07-13 15:05:20 +053098 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +100099 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000100 trace_tlbie(0, 1, rb, rs, ric, prs, r);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000101}
102
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100103static inline void __tlbiel_va_range(unsigned long start, unsigned long end,
104 unsigned long pid, unsigned long page_size,
105 unsigned long psize)
106{
107 unsigned long addr;
108 unsigned long ap = mmu_get_ap(psize);
109
110 for (addr = start; addr < end; addr += page_size)
111 __tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
112}
113
Nicholas Piggin14001c62017-11-07 18:53:05 +1100114static inline void _tlbiel_va(unsigned long va, unsigned long pid,
Nicholas Piggind6657672017-11-07 18:53:06 +1100115 unsigned long psize, unsigned long ric)
Nicholas Piggin14001c62017-11-07 18:53:05 +1100116{
Nicholas Piggind6657672017-11-07 18:53:06 +1100117 unsigned long ap = mmu_get_ap(psize);
118
Nicholas Piggin14001c62017-11-07 18:53:05 +1100119 asm volatile("ptesync": : :"memory");
120 __tlbiel_va(va, pid, ap, ric);
121 asm volatile("ptesync": : :"memory");
122}
123
Nicholas Piggind6657672017-11-07 18:53:06 +1100124static inline void _tlbiel_va_range(unsigned long start, unsigned long end,
125 unsigned long pid, unsigned long page_size,
126 unsigned long psize)
127{
Nicholas Piggind6657672017-11-07 18:53:06 +1100128 asm volatile("ptesync": : :"memory");
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100129 __tlbiel_va_range(start, end, pid, page_size, psize);
Nicholas Piggind6657672017-11-07 18:53:06 +1100130 asm volatile("ptesync": : :"memory");
131}
132
Nicholas Piggin14001c62017-11-07 18:53:05 +1100133static inline void __tlbie_va(unsigned long va, unsigned long pid,
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530134 unsigned long ap, unsigned long ric)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000135{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530136 unsigned long rb,rs,prs,r;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000137
138 rb = va & ~(PPC_BITMASK(52, 63));
139 rb |= ap << PPC_BITLSHIFT(58);
140 rs = pid << PPC_BITLSHIFT(31);
141 prs = 1; /* process scoped */
142 r = 1; /* raidx format */
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000143
Balbir Singh8cd6d3c2016-07-13 15:05:20 +0530144 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000145 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000146 trace_tlbie(0, 0, rb, rs, ric, prs, r);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000147}
148
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100149static inline void __tlbie_va_range(unsigned long start, unsigned long end,
150 unsigned long pid, unsigned long page_size,
151 unsigned long psize)
152{
153 unsigned long addr;
154 unsigned long ap = mmu_get_ap(psize);
155
156 for (addr = start; addr < end; addr += page_size)
157 __tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
158}
159
Nicholas Piggin14001c62017-11-07 18:53:05 +1100160static inline void _tlbie_va(unsigned long va, unsigned long pid,
Nicholas Piggind6657672017-11-07 18:53:06 +1100161 unsigned long psize, unsigned long ric)
Nicholas Piggin14001c62017-11-07 18:53:05 +1100162{
Nicholas Piggind6657672017-11-07 18:53:06 +1100163 unsigned long ap = mmu_get_ap(psize);
164
Nicholas Piggin14001c62017-11-07 18:53:05 +1100165 asm volatile("ptesync": : :"memory");
166 __tlbie_va(va, pid, ap, ric);
167 asm volatile("eieio; tlbsync; ptesync": : :"memory");
168}
169
Nicholas Piggind6657672017-11-07 18:53:06 +1100170static inline void _tlbie_va_range(unsigned long start, unsigned long end,
171 unsigned long pid, unsigned long page_size,
172 unsigned long psize)
173{
Nicholas Piggind6657672017-11-07 18:53:06 +1100174 asm volatile("ptesync": : :"memory");
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100175 __tlbie_va_range(start, end, pid, page_size, psize);
Nicholas Piggind6657672017-11-07 18:53:06 +1100176 asm volatile("eieio; tlbsync; ptesync": : :"memory");
177}
Nicholas Piggin14001c62017-11-07 18:53:05 +1100178
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000179/*
180 * Base TLB flushing operations:
181 *
182 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
183 * - flush_tlb_page(vma, vmaddr) flushes one page
184 * - flush_tlb_range(vma, start, end) flushes a range of pages
185 * - flush_tlb_kernel_range(start, end) flushes kernel pages
186 *
187 * - local_* variants of page and mm only apply to the current
188 * processor
189 */
190void radix__local_flush_tlb_mm(struct mm_struct *mm)
191{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530192 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000193
194 preempt_disable();
195 pid = mm->context.id;
196 if (pid != MMU_NO_CONTEXT)
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000197 _tlbiel_pid(pid, RIC_FLUSH_TLB);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000198 preempt_enable();
199}
200EXPORT_SYMBOL(radix__local_flush_tlb_mm);
201
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000202#ifndef CONFIG_SMP
Frederic Barrat61102362017-09-03 20:15:12 +0200203void radix__local_flush_all_mm(struct mm_struct *mm)
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530204{
205 unsigned long pid;
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530206
207 preempt_disable();
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530208 pid = mm->context.id;
209 if (pid != MMU_NO_CONTEXT)
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000210 _tlbiel_pid(pid, RIC_FLUSH_ALL);
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530211 preempt_enable();
212}
Frederic Barrat61102362017-09-03 20:15:12 +0200213EXPORT_SYMBOL(radix__local_flush_all_mm);
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000214#endif /* CONFIG_SMP */
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530215
Aneesh Kumar K.Vf22dfc92016-07-13 15:06:41 +0530216void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
Aneesh Kumar K.Vfbfa26d2016-07-13 15:06:42 +0530217 int psize)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000218{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530219 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000220
221 preempt_disable();
Michael Ellerman67730272017-10-16 12:41:00 +0530222 pid = mm->context.id;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000223 if (pid != MMU_NO_CONTEXT)
Nicholas Piggind6657672017-11-07 18:53:06 +1100224 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000225 preempt_enable();
226}
227
228void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
229{
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000230#ifdef CONFIG_HUGETLB_PAGE
231 /* need the return fix for nohash.c */
Michael Ellerman67730272017-10-16 12:41:00 +0530232 if (is_vm_hugetlb_page(vma))
233 return radix__local_flush_hugetlb_page(vma, vmaddr);
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000234#endif
Michael Ellerman67730272017-10-16 12:41:00 +0530235 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000236}
237EXPORT_SYMBOL(radix__local_flush_tlb_page);
238
239#ifdef CONFIG_SMP
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000240void radix__flush_tlb_mm(struct mm_struct *mm)
241{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530242 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000243
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000244 pid = mm->context.id;
245 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000246 return;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000247
Nicholas Piggindffe8442017-10-24 23:06:53 +1000248 preempt_disable();
Michael Ellerman3c9ac2b2017-05-02 21:00:14 +1000249 if (!mm_is_thread_local(mm))
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000250 _tlbie_pid(pid, RIC_FLUSH_TLB);
251 else
252 _tlbiel_pid(pid, RIC_FLUSH_TLB);
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000253 preempt_enable();
254}
255EXPORT_SYMBOL(radix__flush_tlb_mm);
256
Frederic Barrat61102362017-09-03 20:15:12 +0200257void radix__flush_all_mm(struct mm_struct *mm)
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000258{
259 unsigned long pid;
260
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000261 pid = mm->context.id;
262 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000263 return;
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000264
Nicholas Piggindffe8442017-10-24 23:06:53 +1000265 preempt_disable();
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000266 if (!mm_is_thread_local(mm))
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530267 _tlbie_pid(pid, RIC_FLUSH_ALL);
Michael Ellerman3c9ac2b2017-05-02 21:00:14 +1000268 else
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530269 _tlbiel_pid(pid, RIC_FLUSH_ALL);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000270 preempt_enable();
271}
Frederic Barrat61102362017-09-03 20:15:12 +0200272EXPORT_SYMBOL(radix__flush_all_mm);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000273
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530274void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
275{
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000276 tlb->need_flush_all = 1;
Aneesh Kumar K.Va145abf2016-06-08 19:55:51 +0530277}
278EXPORT_SYMBOL(radix__flush_tlb_pwc);
279
Aneesh Kumar K.Vf22dfc92016-07-13 15:06:41 +0530280void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
Aneesh Kumar K.Vfbfa26d2016-07-13 15:06:42 +0530281 int psize)
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000282{
Aneesh Kumar K.V9690c152016-06-02 15:14:48 +0530283 unsigned long pid;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000284
Michael Ellerman67730272017-10-16 12:41:00 +0530285 pid = mm->context.id;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000286 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000287 return;
288
289 preempt_disable();
Michael Ellerman3c9ac2b2017-05-02 21:00:14 +1000290 if (!mm_is_thread_local(mm))
Nicholas Piggind6657672017-11-07 18:53:06 +1100291 _tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
Michael Ellerman3c9ac2b2017-05-02 21:00:14 +1000292 else
Nicholas Piggind6657672017-11-07 18:53:06 +1100293 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000294 preempt_enable();
295}
296
297void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
298{
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000299#ifdef CONFIG_HUGETLB_PAGE
Michael Ellerman67730272017-10-16 12:41:00 +0530300 if (is_vm_hugetlb_page(vma))
301 return radix__flush_hugetlb_page(vma, vmaddr);
Aneesh Kumar K.V48483762016-04-29 23:26:25 +1000302#endif
Michael Ellerman67730272017-10-16 12:41:00 +0530303 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000304}
305EXPORT_SYMBOL(radix__flush_tlb_page);
306
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000307#else /* CONFIG_SMP */
308#define radix__flush_all_mm radix__local_flush_all_mm
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000309#endif /* CONFIG_SMP */
310
311void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
312{
Aneesh Kumar K.V36194812016-06-08 19:55:50 +0530313 _tlbie_pid(0, RIC_FLUSH_ALL);
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000314}
315EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
316
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100317#define TLB_FLUSH_ALL -1UL
318
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000319/*
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100320 * Number of pages above which we invalidate the entire PID rather than
321 * flush individual pages, for local and global flushes respectively.
322 *
323 * tlbie goes out to the interconnect and individual ops are more costly.
324 * It also does not iterate over sets like the local tlbiel variant when
325 * invalidating a full PID, so it has a far lower threshold to change from
326 * individual page flushes to full-pid flushes.
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000327 */
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100328static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
329
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000330void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
331 unsigned long end)
332
333{
334 struct mm_struct *mm = vma->vm_mm;
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100335 unsigned long pid;
336 unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift;
337 unsigned long page_size = 1UL << page_shift;
338 unsigned long nr_pages = (end - start) >> page_shift;
339 bool local, full;
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000340
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100341#ifdef CONFIG_HUGETLB_PAGE
342 if (is_vm_hugetlb_page(vma))
343 return radix__flush_hugetlb_tlb_range(vma, start, end);
344#endif
345
346 pid = mm->context.id;
347 if (unlikely(pid == MMU_NO_CONTEXT))
348 return;
349
350 preempt_disable();
351 local = mm_is_thread_local(mm);
352 full = (end == TLB_FLUSH_ALL || nr_pages > tlb_single_page_flush_ceiling);
353
354 if (full) {
355 if (local)
356 _tlbiel_pid(pid, RIC_FLUSH_TLB);
357 else
358 _tlbie_pid(pid, RIC_FLUSH_TLB);
359 } else {
360 bool hflush = false;
361 unsigned long hstart, hend;
362
363#ifdef CONFIG_TRANSPARENT_HUGEPAGE
364 hstart = (start + HPAGE_PMD_SIZE - 1) >> HPAGE_PMD_SHIFT;
365 hend = end >> HPAGE_PMD_SHIFT;
366 if (hstart < hend) {
367 hstart <<= HPAGE_PMD_SHIFT;
368 hend <<= HPAGE_PMD_SHIFT;
369 hflush = true;
370 }
371#endif
372
373 asm volatile("ptesync": : :"memory");
374 if (local) {
375 __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
376 if (hflush)
377 __tlbiel_va_range(hstart, hend, pid,
378 HPAGE_PMD_SIZE, MMU_PAGE_2M);
379 asm volatile("ptesync": : :"memory");
380 } else {
381 __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);
382 if (hflush)
383 __tlbie_va_range(hstart, hend, pid,
384 HPAGE_PMD_SIZE, MMU_PAGE_2M);
385 asm volatile("eieio; tlbsync; ptesync": : :"memory");
386 }
387 }
388 preempt_enable();
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000389}
390EXPORT_SYMBOL(radix__flush_tlb_range);
391
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530392static int radix_get_mmu_psize(int page_size)
393{
394 int psize;
395
396 if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift))
397 psize = mmu_virtual_psize;
398 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift))
399 psize = MMU_PAGE_2M;
400 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift))
401 psize = MMU_PAGE_1G;
402 else
403 return -1;
404 return psize;
405}
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000406
407void radix__tlb_flush(struct mmu_gather *tlb)
408{
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530409 int psize = 0;
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000410 struct mm_struct *mm = tlb->mm;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530411 int page_size = tlb->page_size;
412
413 psize = radix_get_mmu_psize(page_size);
414 /*
415 * if page size is not something we understand, do a full mm flush
Nicholas Piggin30b49ec2017-10-24 23:06:54 +1000416 *
417 * A "fullmm" flush must always do a flush_all_mm (RIC=2) flush
418 * that flushes the process table entry cache upon process teardown.
419 * See the comment for radix in arch_exit_mmap().
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530420 */
421 if (psize != -1 && !tlb->fullmm && !tlb->need_flush_all)
422 radix__flush_tlb_range_psize(mm, tlb->start, tlb->end, psize);
Nicholas Piggin30b49ec2017-10-24 23:06:54 +1000423 else if (tlb->fullmm || tlb->need_flush_all) {
Benjamin Herrenschmidta46cc7a2017-07-19 14:49:05 +1000424 tlb->need_flush_all = 0;
425 radix__flush_all_mm(mm);
426 } else
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530427 radix__flush_tlb_mm(mm);
428}
429
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530430void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
431 unsigned long end, int psize)
432{
433 unsigned long pid;
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100434 unsigned int page_shift = mmu_psize_defs[psize].shift;
435 unsigned long page_size = 1UL << page_shift;
436 unsigned long nr_pages = (end - start) >> page_shift;
437 bool local, full;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530438
Michael Ellerman67730272017-10-16 12:41:00 +0530439 pid = mm->context.id;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530440 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000441 return;
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530442
Nicholas Piggindffe8442017-10-24 23:06:53 +1000443 preempt_disable();
444 local = mm_is_thread_local(mm);
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100445 full = (end == TLB_FLUSH_ALL || nr_pages > tlb_single_page_flush_ceiling);
446
447 if (full) {
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530448 if (local)
449 _tlbiel_pid(pid, RIC_FLUSH_TLB);
450 else
451 _tlbie_pid(pid, RIC_FLUSH_TLB);
Nicholas Piggindffe8442017-10-24 23:06:53 +1000452 } else {
Nicholas Piggin14001c62017-11-07 18:53:05 +1100453 if (local)
Nicholas Piggind6657672017-11-07 18:53:06 +1100454 _tlbiel_va_range(start, end, pid, page_size, psize);
Nicholas Piggin14001c62017-11-07 18:53:05 +1100455 else
Nicholas Piggind6657672017-11-07 18:53:06 +1100456 _tlbie_va_range(start, end, pid, page_size, psize);
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530457 }
Aneesh Kumar K.V8cb81402016-07-13 15:06:35 +0530458 preempt_enable();
Aneesh Kumar K.V1a472c92016-04-29 23:26:05 +1000459}
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530460
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000461#ifdef CONFIG_TRANSPARENT_HUGEPAGE
462void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
463{
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000464 unsigned long pid, end;
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000465
Michael Ellerman67730272017-10-16 12:41:00 +0530466 pid = mm->context.id;
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000467 if (unlikely(pid == MMU_NO_CONTEXT))
Nicholas Piggindffe8442017-10-24 23:06:53 +1000468 return;
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000469
470 /* 4k page size, just blow the world */
471 if (PAGE_SIZE == 0x1000) {
472 radix__flush_all_mm(mm);
473 return;
474 }
475
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000476 end = addr + HPAGE_PMD_SIZE;
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100477
478 /* Otherwise first do the PWC, then iterate the pages. */
479 preempt_disable();
480
481 if (mm_is_thread_local(mm)) {
482 _tlbiel_pid(pid, RIC_FLUSH_PWC);
Nicholas Piggind6657672017-11-07 18:53:06 +1100483 _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize);
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100484 } else {
485 _tlbie_pid(pid, RIC_FLUSH_PWC);
Nicholas Piggind6657672017-11-07 18:53:06 +1100486 _tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize);
Nicholas Piggincbf09c82017-11-07 18:53:07 +1100487 }
Nicholas Piggin14001c62017-11-07 18:53:05 +1100488
Benjamin Herrenschmidt424de9c2017-07-19 14:49:06 +1000489 preempt_enable();
490}
491#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
492
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530493void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
494 unsigned long page_size)
495{
496 unsigned long rb,rs,prs,r;
497 unsigned long ap;
498 unsigned long ric = RIC_FLUSH_TLB;
499
500 ap = mmu_get_ap(radix_get_mmu_psize(page_size));
501 rb = gpa & ~(PPC_BITMASK(52, 63));
502 rb |= ap << PPC_BITLSHIFT(58);
503 rs = lpid & ((1UL << 32) - 1);
504 prs = 0; /* process scoped */
505 r = 1; /* raidx format */
506
507 asm volatile("ptesync": : :"memory");
508 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
509 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
510 asm volatile("eieio; tlbsync; ptesync": : :"memory");
Balbir Singh04284912017-04-11 15:23:25 +1000511 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530512}
513EXPORT_SYMBOL(radix__flush_tlb_lpid_va);
514
515void radix__flush_tlb_lpid(unsigned long lpid)
516{
517 unsigned long rb,rs,prs,r;
518 unsigned long ric = RIC_FLUSH_ALL;
519
520 rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */
521 rs = lpid & ((1UL << 32) - 1);
522 prs = 0; /* partition scoped */
523 r = 1; /* raidx format */
524
525 asm volatile("ptesync": : :"memory");
526 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
527 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
528 asm volatile("eieio; tlbsync; ptesync": : :"memory");
Balbir Singh04284912017-04-11 15:23:25 +1000529 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
Aneesh Kumar K.V912cc872016-07-13 15:05:29 +0530530}
531EXPORT_SYMBOL(radix__flush_tlb_lpid);
Aneesh Kumar K.Vd8e91e92016-07-13 15:06:40 +0530532
533void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
534 unsigned long start, unsigned long end)
535{
536 radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
537}
538EXPORT_SYMBOL(radix__flush_pmd_tlb_range);
Aneesh Kumar K.Vbe34d302016-08-23 16:27:48 +0530539
540void radix__flush_tlb_all(void)
541{
542 unsigned long rb,prs,r,rs;
543 unsigned long ric = RIC_FLUSH_ALL;
544
545 rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */
546 prs = 0; /* partition scoped */
547 r = 1; /* raidx format */
548 rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */
549
550 asm volatile("ptesync": : :"memory");
551 /*
552 * now flush guest entries by passing PRS = 1 and LPID != 0
553 */
554 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
555 : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000556 trace_tlbie(0, 0, rb, rs, ric, prs, r);
Aneesh Kumar K.Vbe34d302016-08-23 16:27:48 +0530557 /*
558 * now flush host entires by passing PRS = 0 and LPID == 0
559 */
560 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
561 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
562 asm volatile("eieio; tlbsync; ptesync": : :"memory");
Balbir Singh04284912017-04-11 15:23:25 +1000563 trace_tlbie(0, 0, rb, 0, ric, prs, r);
Aneesh Kumar K.Vbe34d302016-08-23 16:27:48 +0530564}
Aneesh Kumar K.V6d3a0372016-11-28 11:47:01 +0530565
566void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
567 unsigned long address)
568{
569 /*
570 * We track page size in pte only for DD1, So we can
571 * call this only on DD1.
572 */
573 if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) {
574 VM_WARN_ON(1);
575 return;
576 }
577
Aneesh Kumar K.Vddb014b2017-03-21 22:59:54 +0530578 if (old_pte & R_PAGE_LARGE)
Aneesh Kumar K.V6d3a0372016-11-28 11:47:01 +0530579 radix__flush_tlb_page_psize(mm, address, MMU_PAGE_2M);
580 else
581 radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize);
582}
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +1000583
584#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
585extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
586{
587 unsigned int pid = mm->context.id;
588
589 if (unlikely(pid == MMU_NO_CONTEXT))
590 return;
591
592 /*
593 * If this context hasn't run on that CPU before and KVM is
594 * around, there's a slim chance that the guest on another
595 * CPU just brought in obsolete translation into the TLB of
596 * this CPU due to a bad prefetch using the guest PID on
597 * the way into the hypervisor.
598 *
599 * We work around this here. If KVM is possible, we check if
600 * any sibling thread is in KVM. If it is, the window may exist
601 * and thus we flush that PID from the core.
602 *
603 * A potential future improvement would be to mark which PIDs
604 * have never been used on the system and avoid it if the PID
605 * is new and the process has no other cpumask bit set.
606 */
607 if (cpu_has_feature(CPU_FTR_HVMODE) && radix_enabled()) {
608 int cpu = smp_processor_id();
609 int sib = cpu_first_thread_sibling(cpu);
610 bool flush = false;
611
612 for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
613 if (sib == cpu)
614 continue;
615 if (paca[sib].kvm_hstate.kvm_vcpu)
616 flush = true;
617 }
618 if (flush)
619 _tlbiel_pid(pid, RIC_FLUSH_ALL);
620 }
621}
622EXPORT_SYMBOL_GPL(radix_kvm_prefetch_workaround);
623#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */