blob: 73cd85bebfb2ade78de40e577452ff86054bbb14 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _S390_TLBFLUSH_H
2#define _S390_TLBFLUSH_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/mm.h>
5#include <asm/processor.h>
6
7/*
8 * TLB flushing:
9 *
10 * - flush_tlb() flushes the current mm struct TLBs
11 * - flush_tlb_all() flushes all processes TLBs
12 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
13 * - flush_tlb_page(vma, vmaddr) flushes one page
14 * - flush_tlb_range(vma, start, end) flushes a range of pages
15 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
16 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
17 */
18
19/*
20 * S/390 has three ways of flushing TLBs
21 * 'ptlb' does a flush of the local processor
22 * 'csp' flushes the TLBs on all PUs of a SMP
23 * 'ipte' invalidates a pte in a page table and flushes that out of
24 * the TLBs of all PUs of a SMP
25 */
26
27#define local_flush_tlb() \
28do { __asm__ __volatile__("ptlb": : :"memory"); } while (0)
29
30#ifndef CONFIG_SMP
31
32/*
33 * We always need to flush, since s390 does not flush tlb
34 * on each context switch
35 */
36
37static inline void flush_tlb(void)
38{
39 local_flush_tlb();
40}
41static inline void flush_tlb_all(void)
42{
43 local_flush_tlb();
44}
45static inline void flush_tlb_mm(struct mm_struct *mm)
46{
47 local_flush_tlb();
48}
49static inline void flush_tlb_page(struct vm_area_struct *vma,
50 unsigned long addr)
51{
52 local_flush_tlb();
53}
54static inline void flush_tlb_range(struct vm_area_struct *vma,
55 unsigned long start, unsigned long end)
56{
57 local_flush_tlb();
58}
59
60#define flush_tlb_kernel_range(start, end) \
61 local_flush_tlb();
62
63#else
64
65#include <asm/smp.h>
66
67extern void smp_ptlb_all(void);
68
69static inline void global_flush_tlb(void)
70{
71#ifndef __s390x__
72 if (!MACHINE_HAS_CSP) {
73 smp_ptlb_all();
74 return;
75 }
76#endif /* __s390x__ */
77 {
78 register unsigned long addr asm("4");
79 long dummy;
80
81 dummy = 0;
82 addr = ((unsigned long) &dummy) + 1;
83 __asm__ __volatile__ (
84 " slr 2,2\n"
85 " slr 3,3\n"
86 " csp 2,%0"
87 : : "a" (addr), "m" (dummy) : "cc", "2", "3" );
88 }
89}
90
91/*
92 * We only have to do global flush of tlb if process run since last
93 * flush on any other pu than current.
94 * If we have threads (mm->count > 1) we always do a global flush,
95 * since the process runs on more than one processor at the same time.
96 */
97
98static inline void __flush_tlb_mm(struct mm_struct * mm)
99{
100 cpumask_t local_cpumask;
101
102 if (unlikely(cpus_empty(mm->cpu_vm_mask)))
103 return;
104 if (MACHINE_HAS_IDTE) {
105 asm volatile (".insn rrf,0xb98e0000,0,%0,%1,0"
106 : : "a" (2048),
107 "a" (__pa(mm->pgd)&PAGE_MASK) : "cc" );
108 return;
109 }
110 preempt_disable();
111 local_cpumask = cpumask_of_cpu(smp_processor_id());
112 if (cpus_equal(mm->cpu_vm_mask, local_cpumask))
113 local_flush_tlb();
114 else
115 global_flush_tlb();
116 preempt_enable();
117}
118
119static inline void flush_tlb(void)
120{
121 __flush_tlb_mm(current->mm);
122}
123static inline void flush_tlb_all(void)
124{
125 global_flush_tlb();
126}
127static inline void flush_tlb_mm(struct mm_struct *mm)
128{
129 __flush_tlb_mm(mm);
130}
131static inline void flush_tlb_page(struct vm_area_struct *vma,
132 unsigned long addr)
133{
134 __flush_tlb_mm(vma->vm_mm);
135}
136static inline void flush_tlb_range(struct vm_area_struct *vma,
137 unsigned long start, unsigned long end)
138{
139 __flush_tlb_mm(vma->vm_mm);
140}
141
142#define flush_tlb_kernel_range(start, end) global_flush_tlb()
143
144#endif
145
146static inline void flush_tlb_pgtables(struct mm_struct *mm,
147 unsigned long start, unsigned long end)
148{
149 /* S/390 does not keep any page table caches in TLB */
150}
151
152#endif /* _S390_TLBFLUSH_H */