blob: 1bb73b0e61fad19cb65cc84128a5aef59c504089 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _S390_TLBFLUSH_H
2#define _S390_TLBFLUSH_H
3
4#include <linux/config.h>
5#include <linux/mm.h>
6#include <asm/processor.h>
7
8/*
9 * TLB flushing:
10 *
11 * - flush_tlb() flushes the current mm struct TLBs
12 * - flush_tlb_all() flushes all processes TLBs
13 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
14 * - flush_tlb_page(vma, vmaddr) flushes one page
15 * - flush_tlb_range(vma, start, end) flushes a range of pages
16 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
17 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
18 */
19
20/*
21 * S/390 has three ways of flushing TLBs
22 * 'ptlb' does a flush of the local processor
23 * 'csp' flushes the TLBs on all PUs of a SMP
24 * 'ipte' invalidates a pte in a page table and flushes that out of
25 * the TLBs of all PUs of a SMP
26 */
27
28#define local_flush_tlb() \
29do { __asm__ __volatile__("ptlb": : :"memory"); } while (0)
30
31#ifndef CONFIG_SMP
32
33/*
34 * We always need to flush, since s390 does not flush tlb
35 * on each context switch
36 */
37
38static inline void flush_tlb(void)
39{
40 local_flush_tlb();
41}
42static inline void flush_tlb_all(void)
43{
44 local_flush_tlb();
45}
46static inline void flush_tlb_mm(struct mm_struct *mm)
47{
48 local_flush_tlb();
49}
50static inline void flush_tlb_page(struct vm_area_struct *vma,
51 unsigned long addr)
52{
53 local_flush_tlb();
54}
55static inline void flush_tlb_range(struct vm_area_struct *vma,
56 unsigned long start, unsigned long end)
57{
58 local_flush_tlb();
59}
60
61#define flush_tlb_kernel_range(start, end) \
62 local_flush_tlb();
63
64#else
65
66#include <asm/smp.h>
67
68extern void smp_ptlb_all(void);
69
70static inline void global_flush_tlb(void)
71{
72#ifndef __s390x__
73 if (!MACHINE_HAS_CSP) {
74 smp_ptlb_all();
75 return;
76 }
77#endif /* __s390x__ */
78 {
79 register unsigned long addr asm("4");
80 long dummy;
81
82 dummy = 0;
83 addr = ((unsigned long) &dummy) + 1;
84 __asm__ __volatile__ (
85 " slr 2,2\n"
86 " slr 3,3\n"
87 " csp 2,%0"
88 : : "a" (addr), "m" (dummy) : "cc", "2", "3" );
89 }
90}
91
92/*
93 * We only have to do global flush of tlb if process run since last
94 * flush on any other pu than current.
95 * If we have threads (mm->count > 1) we always do a global flush,
96 * since the process runs on more than one processor at the same time.
97 */
98
99static inline void __flush_tlb_mm(struct mm_struct * mm)
100{
101 cpumask_t local_cpumask;
102
103 if (unlikely(cpus_empty(mm->cpu_vm_mask)))
104 return;
105 if (MACHINE_HAS_IDTE) {
106 asm volatile (".insn rrf,0xb98e0000,0,%0,%1,0"
107 : : "a" (2048),
108 "a" (__pa(mm->pgd)&PAGE_MASK) : "cc" );
109 return;
110 }
111 preempt_disable();
112 local_cpumask = cpumask_of_cpu(smp_processor_id());
113 if (cpus_equal(mm->cpu_vm_mask, local_cpumask))
114 local_flush_tlb();
115 else
116 global_flush_tlb();
117 preempt_enable();
118}
119
120static inline void flush_tlb(void)
121{
122 __flush_tlb_mm(current->mm);
123}
124static inline void flush_tlb_all(void)
125{
126 global_flush_tlb();
127}
128static inline void flush_tlb_mm(struct mm_struct *mm)
129{
130 __flush_tlb_mm(mm);
131}
132static inline void flush_tlb_page(struct vm_area_struct *vma,
133 unsigned long addr)
134{
135 __flush_tlb_mm(vma->vm_mm);
136}
137static inline void flush_tlb_range(struct vm_area_struct *vma,
138 unsigned long start, unsigned long end)
139{
140 __flush_tlb_mm(vma->vm_mm);
141}
142
143#define flush_tlb_kernel_range(start, end) global_flush_tlb()
144
145#endif
146
147static inline void flush_tlb_pgtables(struct mm_struct *mm,
148 unsigned long start, unsigned long end)
149{
150 /* S/390 does not keep any page table caches in TLB */
151}
152
153#endif /* _S390_TLBFLUSH_H */