blob: 1a691ef740cfd3df0bb50bf744219b5059a43e46 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _S390_TLBFLUSH_H
2#define _S390_TLBFLUSH_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/mm.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +02005#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <asm/processor.h>
Gerald Schaeferc1821c22007-02-05 21:18:17 +01007#include <asm/pgalloc.h>
Heiko Carstens4ccccc52016-05-14 10:46:33 +02008#include <asm/pgtable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009
10/*
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020011 * Flush all TLB entries on the local CPU.
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 */
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020013static inline void __tlb_flush_local(void)
14{
15 asm volatile("ptlb" : : : "memory");
16}
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
18/*
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020019 * Flush TLB entries for a specific ASCE on all CPUs
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 */
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020021static inline void __tlb_flush_idte(unsigned long asce)
22{
23 /* Global TLB flush for the mm */
24 asm volatile(
25 " .insn rrf,0xb98e0000,0,%0,%1,0"
26 : : "a" (2048), "a" (asce) : "cc");
27}
28
29/*
30 * Flush TLB entries for a specific ASCE on the local CPU
31 */
32static inline void __tlb_flush_idte_local(unsigned long asce)
33{
34 /* Local TLB flush for the mm */
35 asm volatile(
36 " .insn rrf,0xb98e0000,0,%0,%1,1"
37 : : "a" (2048), "a" (asce) : "cc");
38}
39
40#ifdef CONFIG_SMP
Heiko Carstensa8061702008-04-17 07:46:26 +020041void smp_ptlb_all(void);
42
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020043/*
44 * Flush all TLB entries on all CPUs.
45 */
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020046static inline void __tlb_flush_global(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070047{
Heiko Carstens4ccccc52016-05-14 10:46:33 +020048 unsigned int dummy = 0;
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +020049
Heiko Carstens4ccccc52016-05-14 10:46:33 +020050 csp(&dummy, 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070051}
52
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020053/*
54 * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
55 * this implicates multiple ASCEs!).
56 */
Martin Schwidefsky374b8f42008-04-17 07:45:58 +020057static inline void __tlb_flush_full(struct mm_struct *mm)
58{
Martin Schwidefsky374b8f42008-04-17 07:45:58 +020059 preempt_disable();
Martin Schwidefsky64f31d52016-05-25 09:45:26 +020060 atomic_inc(&mm->context.flush_count);
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020061 if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
62 /* Local TLB flush */
Martin Schwidefsky374b8f42008-04-17 07:45:58 +020063 __tlb_flush_local();
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020064 } else {
65 /* Global TLB flush */
Martin Schwidefsky374b8f42008-04-17 07:45:58 +020066 __tlb_flush_global();
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020067 /* Reset TLB flush mask */
68 if (MACHINE_HAS_TLB_LC)
69 cpumask_copy(mm_cpumask(mm),
70 &mm->context.cpu_attach_mask);
71 }
Martin Schwidefsky64f31d52016-05-25 09:45:26 +020072 atomic_dec(&mm->context.flush_count);
Martin Schwidefsky374b8f42008-04-17 07:45:58 +020073 preempt_enable();
74}
Martin Schwidefsky374b8f42008-04-17 07:45:58 +020075
Linus Torvalds1da177e2005-04-16 15:20:36 -070076/*
David Hildenbrandf0454022016-07-07 10:44:10 +020077 * Flush TLB entries for a specific ASCE on all CPUs. Should never be used
78 * when more than one asce (e.g. gmap) ran on this mm.
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 */
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020080static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020081{
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020082 preempt_disable();
Martin Schwidefsky64f31d52016-05-25 09:45:26 +020083 atomic_inc(&mm->context.flush_count);
84 if (MACHINE_HAS_TLB_LC &&
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020085 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
86 __tlb_flush_idte_local(asce);
87 } else {
88 if (MACHINE_HAS_IDTE)
89 __tlb_flush_idte(asce);
90 else
91 __tlb_flush_global();
92 /* Reset TLB flush mask */
93 if (MACHINE_HAS_TLB_LC)
94 cpumask_copy(mm_cpumask(mm),
95 &mm->context.cpu_attach_mask);
96 }
Martin Schwidefsky64f31d52016-05-25 09:45:26 +020097 atomic_dec(&mm->context.flush_count);
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020098 preempt_enable();
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020099}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
Martin Schwidefsky1b948d62014-04-03 13:55:01 +0200101static inline void __tlb_flush_kernel(void)
102{
103 if (MACHINE_HAS_IDTE)
Gerald Schaefer723cacb2016-04-15 16:38:40 +0200104 __tlb_flush_idte(init_mm.context.asce);
Martin Schwidefsky1b948d62014-04-03 13:55:01 +0200105 else
106 __tlb_flush_global();
107}
108#else
109#define __tlb_flush_global() __tlb_flush_local()
110#define __tlb_flush_full(mm) __tlb_flush_local()
111
112/*
113 * Flush TLB entries for a specific ASCE on all CPUs.
114 */
115static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
116{
117 if (MACHINE_HAS_TLB_LC)
118 __tlb_flush_idte_local(asce);
119 else
120 __tlb_flush_local();
121}
122
123static inline void __tlb_flush_kernel(void)
124{
125 if (MACHINE_HAS_TLB_LC)
Gerald Schaefer723cacb2016-04-15 16:38:40 +0200126 __tlb_flush_idte_local(init_mm.context.asce);
Martin Schwidefsky1b948d62014-04-03 13:55:01 +0200127 else
128 __tlb_flush_local();
129}
130#endif
131
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200132static inline void __tlb_flush_mm(struct mm_struct * mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133{
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200134 /*
135 * If the machine has IDTE we prefer to do a per mm flush
136 * on all cpus instead of doing a local flush if the mm
137 * only ran on the local cpu.
138 */
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200139 if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
Gerald Schaefer723cacb2016-04-15 16:38:40 +0200140 __tlb_flush_asce(mm, mm->context.asce);
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200141 else
142 __tlb_flush_full(mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143}
144
Martin Schwidefsky5c474a12013-08-16 13:31:40 +0200145static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146{
Martin Schwidefsky050eef32010-08-24 09:26:21 +0200147 if (mm->context.flush_mm) {
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200148 __tlb_flush_mm(mm);
Martin Schwidefsky050eef32010-08-24 09:26:21 +0200149 mm->context.flush_mm = 0;
150 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151}
152
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200153/*
154 * TLB flushing:
155 * flush_tlb() - flushes the current mm struct TLBs
156 * flush_tlb_all() - flushes all processes TLBs
157 * flush_tlb_mm(mm) - flushes the specified mm context TLB's
158 * flush_tlb_page(vma, vmaddr) - flushes one page
159 * flush_tlb_range(vma, start, end) - flushes a range of pages
160 * flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
161 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200163/*
164 * flush_tlb_mm goes together with ptep_set_wrprotect for the
165 * copy_page_range operation and flush_tlb_range is related to
166 * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
167 * ptep_get_and_clear do not flush the TLBs directly if the mm has
168 * only one user. At the end of the update the flush_tlb_mm and
169 * flush_tlb_range functions need to do the flush.
170 */
171#define flush_tlb() do { } while (0)
172#define flush_tlb_all() do { } while (0)
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200173#define flush_tlb_page(vma, addr) do { } while (0)
Martin Schwidefsky8ffd74a2008-01-26 14:10:59 +0100174
175static inline void flush_tlb_mm(struct mm_struct *mm)
176{
Martin Schwidefsky5c474a12013-08-16 13:31:40 +0200177 __tlb_flush_mm_lazy(mm);
Martin Schwidefsky8ffd74a2008-01-26 14:10:59 +0100178}
179
180static inline void flush_tlb_range(struct vm_area_struct *vma,
181 unsigned long start, unsigned long end)
182{
Martin Schwidefsky5c474a12013-08-16 13:31:40 +0200183 __tlb_flush_mm_lazy(vma->vm_mm);
Martin Schwidefsky8ffd74a2008-01-26 14:10:59 +0100184}
185
186static inline void flush_tlb_kernel_range(unsigned long start,
187 unsigned long end)
188{
Martin Schwidefsky1b948d62014-04-03 13:55:01 +0200189 __tlb_flush_kernel();
Martin Schwidefsky8ffd74a2008-01-26 14:10:59 +0100190}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192#endif /* _S390_TLBFLUSH_H */