blob: ca148f7c3eaae0d5c7f5483aef08f26af1bc9ca0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _S390_TLBFLUSH_H
2#define _S390_TLBFLUSH_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/mm.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +02005#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <asm/processor.h>
Gerald Schaeferc1821c22007-02-05 21:18:17 +01007#include <asm/pgalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008
9/*
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020010 * Flush all TLB entries on the local CPU.
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 */
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020012static inline void __tlb_flush_local(void)
13{
14 asm volatile("ptlb" : : : "memory");
15}
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
17/*
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020018 * Flush TLB entries for a specific ASCE on all CPUs
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 */
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020020static inline void __tlb_flush_idte(unsigned long asce)
21{
22 /* Global TLB flush for the mm */
23 asm volatile(
24 " .insn rrf,0xb98e0000,0,%0,%1,0"
25 : : "a" (2048), "a" (asce) : "cc");
26}
27
28/*
29 * Flush TLB entries for a specific ASCE on the local CPU
30 */
31static inline void __tlb_flush_idte_local(unsigned long asce)
32{
33 /* Local TLB flush for the mm */
34 asm volatile(
35 " .insn rrf,0xb98e0000,0,%0,%1,1"
36 : : "a" (2048), "a" (asce) : "cc");
37}
38
39#ifdef CONFIG_SMP
Heiko Carstensa8061702008-04-17 07:46:26 +020040void smp_ptlb_all(void);
41
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020042/*
43 * Flush all TLB entries on all CPUs.
44 */
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020045static inline void __tlb_flush_global(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070046{
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +020047 register unsigned long reg2 asm("2");
48 register unsigned long reg3 asm("3");
49 register unsigned long reg4 asm("4");
50 long dummy;
51
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +020052 dummy = 0;
53 reg2 = reg3 = 0;
54 reg4 = ((unsigned long) &dummy) + 1;
55 asm volatile(
56 " csp %0,%2"
57 : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" );
Linus Torvalds1da177e2005-04-16 15:20:36 -070058}
59
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020060/*
61 * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
62 * this implicates multiple ASCEs!).
63 */
Martin Schwidefsky374b8f42008-04-17 07:45:58 +020064static inline void __tlb_flush_full(struct mm_struct *mm)
65{
Martin Schwidefsky374b8f42008-04-17 07:45:58 +020066 preempt_disable();
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020067 atomic_add(0x10000, &mm->context.attach_count);
68 if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
69 /* Local TLB flush */
Martin Schwidefsky374b8f42008-04-17 07:45:58 +020070 __tlb_flush_local();
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020071 } else {
72 /* Global TLB flush */
Martin Schwidefsky374b8f42008-04-17 07:45:58 +020073 __tlb_flush_global();
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020074 /* Reset TLB flush mask */
75 if (MACHINE_HAS_TLB_LC)
76 cpumask_copy(mm_cpumask(mm),
77 &mm->context.cpu_attach_mask);
78 }
79 atomic_sub(0x10000, &mm->context.attach_count);
Martin Schwidefsky374b8f42008-04-17 07:45:58 +020080 preempt_enable();
81}
Martin Schwidefsky374b8f42008-04-17 07:45:58 +020082
Linus Torvalds1da177e2005-04-16 15:20:36 -070083/*
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020084 * Flush TLB entries for a specific ASCE on all CPUs.
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 */
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020086static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020087{
Martin Schwidefsky1b948d62014-04-03 13:55:01 +020088 int active, count;
89
90 preempt_disable();
91 active = (mm == current->active_mm) ? 1 : 0;
92 count = atomic_add_return(0x10000, &mm->context.attach_count);
93 if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
94 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
95 __tlb_flush_idte_local(asce);
96 } else {
97 if (MACHINE_HAS_IDTE)
98 __tlb_flush_idte(asce);
99 else
100 __tlb_flush_global();
101 /* Reset TLB flush mask */
102 if (MACHINE_HAS_TLB_LC)
103 cpumask_copy(mm_cpumask(mm),
104 &mm->context.cpu_attach_mask);
105 }
106 atomic_sub(0x10000, &mm->context.attach_count);
107 preempt_enable();
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200108}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Martin Schwidefsky1b948d62014-04-03 13:55:01 +0200110static inline void __tlb_flush_kernel(void)
111{
112 if (MACHINE_HAS_IDTE)
113 __tlb_flush_idte((unsigned long) init_mm.pgd |
114 init_mm.context.asce_bits);
115 else
116 __tlb_flush_global();
117}
118#else
119#define __tlb_flush_global() __tlb_flush_local()
120#define __tlb_flush_full(mm) __tlb_flush_local()
121
122/*
123 * Flush TLB entries for a specific ASCE on all CPUs.
124 */
125static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
126{
127 if (MACHINE_HAS_TLB_LC)
128 __tlb_flush_idte_local(asce);
129 else
130 __tlb_flush_local();
131}
132
133static inline void __tlb_flush_kernel(void)
134{
135 if (MACHINE_HAS_TLB_LC)
136 __tlb_flush_idte_local((unsigned long) init_mm.pgd |
137 init_mm.context.asce_bits);
138 else
139 __tlb_flush_local();
140}
141#endif
142
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200143static inline void __tlb_flush_mm(struct mm_struct * mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144{
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200145 /*
146 * If the machine has IDTE we prefer to do a per mm flush
147 * on all cpus instead of doing a local flush if the mm
148 * only ran on the local cpu.
149 */
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200150 if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
Martin Schwidefsky1b948d62014-04-03 13:55:01 +0200151 __tlb_flush_asce(mm, (unsigned long) mm->pgd |
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100152 mm->context.asce_bits);
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200153 else
154 __tlb_flush_full(mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155}
156
Martin Schwidefsky5c474a12013-08-16 13:31:40 +0200157static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158{
Martin Schwidefsky050eef32010-08-24 09:26:21 +0200159 if (mm->context.flush_mm) {
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200160 __tlb_flush_mm(mm);
Martin Schwidefsky050eef32010-08-24 09:26:21 +0200161 mm->context.flush_mm = 0;
162 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163}
164
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200165/*
166 * TLB flushing:
167 * flush_tlb() - flushes the current mm struct TLBs
168 * flush_tlb_all() - flushes all processes TLBs
169 * flush_tlb_mm(mm) - flushes the specified mm context TLB's
170 * flush_tlb_page(vma, vmaddr) - flushes one page
171 * flush_tlb_range(vma, start, end) - flushes a range of pages
172 * flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
173 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200175/*
176 * flush_tlb_mm goes together with ptep_set_wrprotect for the
177 * copy_page_range operation and flush_tlb_range is related to
178 * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
179 * ptep_get_and_clear do not flush the TLBs directly if the mm has
180 * only one user. At the end of the update the flush_tlb_mm and
181 * flush_tlb_range functions need to do the flush.
182 */
183#define flush_tlb() do { } while (0)
184#define flush_tlb_all() do { } while (0)
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200185#define flush_tlb_page(vma, addr) do { } while (0)
Martin Schwidefsky8ffd74a2008-01-26 14:10:59 +0100186
187static inline void flush_tlb_mm(struct mm_struct *mm)
188{
Martin Schwidefsky5c474a12013-08-16 13:31:40 +0200189 __tlb_flush_mm_lazy(mm);
Martin Schwidefsky8ffd74a2008-01-26 14:10:59 +0100190}
191
192static inline void flush_tlb_range(struct vm_area_struct *vma,
193 unsigned long start, unsigned long end)
194{
Martin Schwidefsky5c474a12013-08-16 13:31:40 +0200195 __tlb_flush_mm_lazy(vma->vm_mm);
Martin Schwidefsky8ffd74a2008-01-26 14:10:59 +0100196}
197
198static inline void flush_tlb_kernel_range(unsigned long start,
199 unsigned long end)
200{
Martin Schwidefsky1b948d62014-04-03 13:55:01 +0200201 __tlb_flush_kernel();
Martin Schwidefsky8ffd74a2008-01-26 14:10:59 +0100202}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204#endif /* _S390_TLBFLUSH_H */