blob: c687a2c834626adb1f01cc24ae4f62713d83adaa [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _S390_TLB_H
2#define _S390_TLB_H
3
4/*
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02005 * TLB flushing on s390 is complicated. The following requirement
6 * from the principles of operation is the most arduous:
7 *
8 * "A valid table entry must not be changed while it is attached
9 * to any CPU and may be used for translation by that CPU except to
10 * (1) invalidate the entry by using INVALIDATE PAGE TABLE ENTRY,
11 * or INVALIDATE DAT TABLE ENTRY, (2) alter bits 56-63 of a page
12 * table entry, or (3) make a change by means of a COMPARE AND SWAP
13 * AND PURGE instruction that purges the TLB."
14 *
15 * The modification of a pte of an active mm struct therefore is
16 * a two step process: i) invalidate the pte, ii) store the new pte.
17 * This is true for the page protection bit as well.
18 * The only possible optimization is to flush at the beginning of
19 * a tlb_gather_mmu cycle if the mm_struct is currently not in use.
20 *
21 * Pages used for the page tables is a different story. FIXME: more
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 */
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020023
24#include <linux/mm.h>
Heiko Carstensc84ca002011-01-31 11:30:06 +010025#include <linux/pagemap.h>
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020026#include <linux/swap.h>
27#include <asm/processor.h>
28#include <asm/pgalloc.h>
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020029#include <asm/tlbflush.h>
30
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020031struct mmu_gather {
32 struct mm_struct *mm;
Martin Schwidefsky36409f62011-06-06 14:14:41 +020033#ifdef CONFIG_HAVE_RCU_TABLE_FREE
34 struct mmu_table_batch *batch;
35#endif
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020036 unsigned int fullmm;
Martin Schwidefsky36409f62011-06-06 14:14:41 +020037 unsigned int need_flush;
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020038};
39
Martin Schwidefsky36409f62011-06-06 14:14:41 +020040#ifdef CONFIG_HAVE_RCU_TABLE_FREE
41struct mmu_table_batch {
42 struct rcu_head rcu;
43 unsigned int nr;
44 void *tables[0];
45};
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020046
Martin Schwidefsky36409f62011-06-06 14:14:41 +020047#define MAX_TABLE_BATCH \
48 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
49
50extern void tlb_table_flush(struct mmu_gather *tlb);
51extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
52#endif
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020053
Peter Zijlstra68f03922011-05-24 17:11:51 -070054static inline void tlb_gather_mmu(struct mmu_gather *tlb,
55 struct mm_struct *mm,
56 unsigned int full_mm_flush)
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020057{
Peter Zijlstra68f03922011-05-24 17:11:51 -070058 tlb->mm = mm;
Peter Zijlstra68f03922011-05-24 17:11:51 -070059 tlb->fullmm = full_mm_flush;
Martin Schwidefsky36409f62011-06-06 14:14:41 +020060 tlb->need_flush = 0;
61#ifdef CONFIG_HAVE_RCU_TABLE_FREE
62 tlb->batch = NULL;
63#endif
Peter Zijlstra68f03922011-05-24 17:11:51 -070064 if (tlb->fullmm)
65 __tlb_flush_mm(mm);
Peter Zijlstra68f03922011-05-24 17:11:51 -070066}
67
68static inline void tlb_flush_mmu(struct mmu_gather *tlb)
69{
Martin Schwidefsky36409f62011-06-06 14:14:41 +020070 if (!tlb->need_flush)
71 return;
72 tlb->need_flush = 0;
73 __tlb_flush_mm(tlb->mm);
74#ifdef CONFIG_HAVE_RCU_TABLE_FREE
75 tlb_table_flush(tlb);
76#endif
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020077}
78
79static inline void tlb_finish_mmu(struct mmu_gather *tlb,
80 unsigned long start, unsigned long end)
81{
Peter Zijlstra68f03922011-05-24 17:11:51 -070082 tlb_flush_mmu(tlb);
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020083}
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
85/*
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020086 * Release the page cache reference for a pte removed by
Peter Zijlstra68f03922011-05-24 17:11:51 -070087 * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020088 * has already been freed, so just do free_page_and_swap_cache.
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 */
Peter Zijlstra68f03922011-05-24 17:11:51 -070090static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
91{
92 free_page_and_swap_cache(page);
93 return 1; /* avoid calling tlb_flush_mmu */
94}
95
Martin Schwidefskyba8a9222007-10-22 12:52:44 +020096static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
97{
98 free_page_and_swap_cache(page);
99}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200101/*
102 * pte_free_tlb frees a pte table and clears the CRSTE for the
103 * page table from the tlb.
104 */
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000105static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
106 unsigned long address)
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200107{
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200108#ifdef CONFIG_HAVE_RCU_TABLE_FREE
109 if (!tlb->fullmm)
110 return page_table_free_rcu(tlb, (unsigned long *) pte);
111#endif
112 page_table_free(tlb->mm, (unsigned long *) pte);
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200113}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200115/*
116 * pmd_free_tlb frees a pmd table and clears the CRSTE for the
117 * segment table entry from the tlb.
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100118 * If the mm uses a two level page table the single pmd is freed
119 * as the pgd. pmd_free_tlb checks the asce_limit against 2GB
120 * to avoid the double free of the pmd in this case.
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200121 */
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000122static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
123 unsigned long address)
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200124{
125#ifdef __s390x__
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100126 if (tlb->mm->context.asce_limit <= (1UL << 31))
127 return;
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200128#ifdef CONFIG_HAVE_RCU_TABLE_FREE
129 if (!tlb->fullmm)
130 return tlb_remove_table(tlb, pmd);
131#endif
132 crst_table_free(tlb->mm, (unsigned long *) pmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133#endif
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200134}
135
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100136/*
137 * pud_free_tlb frees a pud table and clears the CRSTE for the
138 * region third table entry from the tlb.
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100139 * If the mm uses a three level page table the single pud is freed
140 * as the pgd. pud_free_tlb checks the asce_limit against 4TB
141 * to avoid the double free of the pud in this case.
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100142 */
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000143static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
144 unsigned long address)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100145{
146#ifdef __s390x__
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100147 if (tlb->mm->context.asce_limit <= (1UL << 42))
148 return;
Martin Schwidefsky36409f62011-06-06 14:14:41 +0200149#ifdef CONFIG_HAVE_RCU_TABLE_FREE
150 if (!tlb->fullmm)
151 return tlb_remove_table(tlb, pud);
152#endif
153 crst_table_free(tlb->mm, (unsigned long *) pud);
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100154#endif
155}
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200156
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200157#define tlb_start_vma(tlb, vma) do { } while (0)
158#define tlb_end_vma(tlb, vma) do { } while (0)
159#define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0)
160#define tlb_migrate_finish(mm) do { } while (0)
161
162#endif /* _S390_TLB_H */