blob: a2e6ef32e05445b190b444cb249db44f638e10d2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _S390_TLBFLUSH_H
2#define _S390_TLBFLUSH_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/mm.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +02005#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <asm/processor.h>
Gerald Schaeferc1821c22007-02-05 21:18:17 +01007#include <asm/pgalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008
9/*
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020010 * Flush all TLB entries on the local CPU.
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 */
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020012static inline void __tlb_flush_local(void)
13{
14 asm volatile("ptlb" : : : "memory");
15}
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
17/*
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020018 * Flush TLB entries for a specific ASCE on all CPUs
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 */
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020020static inline void __tlb_flush_idte(unsigned long asce)
21{
22 /* Global TLB flush for the mm */
23 asm volatile(
24 " .insn rrf,0xb98e0000,0,%0,%1,0"
25 : : "a" (2048), "a" (asce) : "cc");
26}
27
28/*
29 * Flush TLB entries for a specific ASCE on the local CPU
30 */
31static inline void __tlb_flush_idte_local(unsigned long asce)
32{
33 /* Local TLB flush for the mm */
34 asm volatile(
35 " .insn rrf,0xb98e0000,0,%0,%1,1"
36 : : "a" (2048), "a" (asce) : "cc");
37}
38
39#ifdef CONFIG_SMP
Heiko Carstensa8061702008-04-17 07:46:26 +020040void smp_ptlb_all(void);
41
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020042/*
43 * Flush all TLB entries on all CPUs.
44 */
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020045static inline void __tlb_flush_global(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070046{
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +020047 register unsigned long reg2 asm("2");
48 register unsigned long reg3 asm("3");
49 register unsigned long reg4 asm("4");
50 long dummy;
51
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +020052 dummy = 0;
53 reg2 = reg3 = 0;
54 reg4 = ((unsigned long) &dummy) + 1;
55 asm volatile(
56 " csp %0,%2"
57 : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" );
Linus Torvalds1da177e2005-04-16 15:20:36 -070058}
59
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020060/*
61 * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
62 * this implicates multiple ASCEs!).
63 */
Martin Schwidefsky374b8f42008-04-17 07:45:58 +020064static inline void __tlb_flush_full(struct mm_struct *mm)
65{
Martin Schwidefsky374b8f42008-04-17 07:45:58 +020066 preempt_disable();
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020067 atomic_add(0x10000, &mm->context.attach_count);
68 if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
69 /* Local TLB flush */
Martin Schwidefsky374b8f42008-04-17 07:45:58 +020070 __tlb_flush_local();
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020071 } else {
72 /* Global TLB flush */
Martin Schwidefsky374b8f42008-04-17 07:45:58 +020073 __tlb_flush_global();
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020074 /* Reset TLB flush mask */
75 if (MACHINE_HAS_TLB_LC)
76 cpumask_copy(mm_cpumask(mm),
77 &mm->context.cpu_attach_mask);
78 }
79 atomic_sub(0x10000, &mm->context.attach_count);
Martin Schwidefsky374b8f42008-04-17 07:45:58 +020080 preempt_enable();
81}
Martin Schwidefsky374b8f42008-04-17 07:45:58 +020082
Linus Torvalds1da177e2005-04-16 15:20:36 -070083/*
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020084 * Flush TLB entries for a specific ASCE on all CPUs.
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 */
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020086static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020087{
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020088 int active, count;
89
90 preempt_disable();
91 active = (mm == current->active_mm) ? 1 : 0;
92 count = atomic_add_return(0x10000, &mm->context.attach_count);
93 if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
94 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
95 __tlb_flush_idte_local(asce);
96 } else {
97 if (MACHINE_HAS_IDTE)
98 __tlb_flush_idte(asce);
99 else
100 __tlb_flush_global();
101 /* Reset TLB flush mask */
102 if (MACHINE_HAS_TLB_LC)
103 cpumask_copy(mm_cpumask(mm),
104 &mm->context.cpu_attach_mask);
105 }
106 atomic_sub(0x10000, &mm->context.attach_count);
107 preempt_enable();
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200108}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Martin Schwidefsky1b948d62014-04-03 13:55:01 +0200110static inline void __tlb_flush_kernel(void)
111{
112 if (MACHINE_HAS_IDTE)
Gerald Schaefer723cacb2016-04-15 16:38:40 +0200113 __tlb_flush_idte(init_mm.context.asce);
Martin Schwidefsky1b948d62014-04-03 13:55:01 +0200114 else
115 __tlb_flush_global();
116}
117#else
118#define __tlb_flush_global() __tlb_flush_local()
119#define __tlb_flush_full(mm) __tlb_flush_local()
120
121/*
122 * Flush TLB entries for a specific ASCE on all CPUs.
123 */
124static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
125{
126 if (MACHINE_HAS_TLB_LC)
127 __tlb_flush_idte_local(asce);
128 else
129 __tlb_flush_local();
130}
131
132static inline void __tlb_flush_kernel(void)
133{
134 if (MACHINE_HAS_TLB_LC)
Gerald Schaefer723cacb2016-04-15 16:38:40 +0200135 __tlb_flush_idte_local(init_mm.context.asce);
Martin Schwidefsky1b948d62014-04-03 13:55:01 +0200136 else
137 __tlb_flush_local();
138}
139#endif
140
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200141static inline void __tlb_flush_mm(struct mm_struct * mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142{
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200143 /*
144 * If the machine has IDTE we prefer to do a per mm flush
145 * on all cpus instead of doing a local flush if the mm
146 * only ran on the local cpu.
147 */
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200148 if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
Gerald Schaefer723cacb2016-04-15 16:38:40 +0200149 __tlb_flush_asce(mm, mm->context.asce);
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200150 else
151 __tlb_flush_full(mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152}
153
Martin Schwidefsky5c474a12013-08-16 13:31:40 +0200154static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155{
Martin Schwidefsky050eef32010-08-24 09:26:21 +0200156 if (mm->context.flush_mm) {
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200157 __tlb_flush_mm(mm);
Martin Schwidefsky050eef32010-08-24 09:26:21 +0200158 mm->context.flush_mm = 0;
159 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160}
161
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200162/*
163 * TLB flushing:
164 * flush_tlb() - flushes the current mm struct TLBs
165 * flush_tlb_all() - flushes all processes TLBs
166 * flush_tlb_mm(mm) - flushes the specified mm context TLB's
167 * flush_tlb_page(vma, vmaddr) - flushes one page
168 * flush_tlb_range(vma, start, end) - flushes a range of pages
169 * flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
170 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200172/*
173 * flush_tlb_mm goes together with ptep_set_wrprotect for the
174 * copy_page_range operation and flush_tlb_range is related to
175 * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
176 * ptep_get_and_clear do not flush the TLBs directly if the mm has
177 * only one user. At the end of the update the flush_tlb_mm and
178 * flush_tlb_range functions need to do the flush.
179 */
180#define flush_tlb() do { } while (0)
181#define flush_tlb_all() do { } while (0)
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200182#define flush_tlb_page(vma, addr) do { } while (0)
Martin Schwidefsky8ffd74a2008-01-26 14:10:59 +0100183
184static inline void flush_tlb_mm(struct mm_struct *mm)
185{
Martin Schwidefsky5c474a12013-08-16 13:31:40 +0200186 __tlb_flush_mm_lazy(mm);
Martin Schwidefsky8ffd74a2008-01-26 14:10:59 +0100187}
188
189static inline void flush_tlb_range(struct vm_area_struct *vma,
190 unsigned long start, unsigned long end)
191{
Martin Schwidefsky5c474a12013-08-16 13:31:40 +0200192 __tlb_flush_mm_lazy(vma->vm_mm);
Martin Schwidefsky8ffd74a2008-01-26 14:10:59 +0100193}
194
195static inline void flush_tlb_kernel_range(unsigned long start,
196 unsigned long end)
197{
Martin Schwidefsky1b948d62014-04-03 13:55:01 +0200198 __tlb_flush_kernel();
Martin Schwidefsky8ffd74a2008-01-26 14:10:59 +0100199}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201#endif /* _S390_TLBFLUSH_H */