blob: 62f80d2a9df9f35c22b761b58cc7d8d747a689f4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SH_TLB_H
2#define __ASM_SH_TLB_H
3
Paul Mundt959f7d52007-11-10 20:35:53 +09004#ifdef CONFIG_SUPERH64
David Howellsa1ce3922012-10-02 18:01:25 +01005# include <asm/tlb_64.h>
Paul Mundt959f7d52007-11-10 20:35:53 +09006#endif
7
8#ifndef __ASSEMBLY__
Paul Mundtc2035182009-03-17 21:19:49 +09009#include <linux/pagemap.h>
Paul Mundt959f7d52007-11-10 20:35:53 +090010
Paul Mundtc2035182009-03-17 21:19:49 +090011#ifdef CONFIG_MMU
Nobuhiro Iwamatsu194cd8d2011-05-31 13:27:41 +090012#include <linux/swap.h>
Paul Mundtc2035182009-03-17 21:19:49 +090013#include <asm/pgalloc.h>
14#include <asm/tlbflush.h>
Matt Fleming24ef7fc2009-11-19 21:11:05 +000015#include <asm/mmu_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
17/*
Paul Mundtc2035182009-03-17 21:19:49 +090018 * TLB handling. This allows us to remove pages from the page
19 * tables, and efficiently handle the TLB issues.
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 */
Paul Mundtc2035182009-03-17 21:19:49 +090021struct mmu_gather {
22 struct mm_struct *mm;
23 unsigned int fullmm;
24 unsigned long start, end;
25};
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
Paul Mundtc2035182009-03-17 21:19:49 +090027static inline void init_tlb_gather(struct mmu_gather *tlb)
28{
29 tlb->start = TASK_SIZE;
30 tlb->end = 0;
31
32 if (tlb->fullmm) {
33 tlb->start = 0;
34 tlb->end = TASK_SIZE;
35 }
36}
37
Peter Zijlstra1e56a562011-05-24 17:11:54 -070038static inline void
Linus Torvalds2b047252013-08-15 11:42:25 -070039tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
Paul Mundtc2035182009-03-17 21:19:49 +090040{
Paul Mundtc2035182009-03-17 21:19:49 +090041 tlb->mm = mm;
Linus Torvalds2b047252013-08-15 11:42:25 -070042 tlb->start = start;
43 tlb->end = end;
44 tlb->fullmm = !(start | (end+1));
Paul Mundtc2035182009-03-17 21:19:49 +090045
46 init_tlb_gather(tlb);
Paul Mundtc2035182009-03-17 21:19:49 +090047}
48
49static inline void
50tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
51{
52 if (tlb->fullmm)
53 flush_tlb_mm(tlb->mm);
54
55 /* keep the page table cache within bounds */
56 check_pgt_cache();
Paul Mundtc2035182009-03-17 21:19:49 +090057}
58
59static inline void
60tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
61{
62 if (tlb->start > address)
63 tlb->start = address;
64 if (tlb->end < address + PAGE_SIZE)
65 tlb->end = address + PAGE_SIZE;
66}
67
68/*
69 * In the case of tlb vma handling, we can optimise these away in the
70 * case where we're doing a full MM flush. When we're doing a munmap,
71 * the vmas are adjusted to only cover the region to be torn down.
72 */
73static inline void
74tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
75{
76 if (!tlb->fullmm)
77 flush_cache_range(vma, vma->vm_start, vma->vm_end);
78}
79
80static inline void
81tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
82{
83 if (!tlb->fullmm && tlb->end) {
84 flush_tlb_range(vma, tlb->start, tlb->end);
85 init_tlb_gather(tlb);
86 }
87}
88
Linus Torvalds1cf35d42014-04-25 16:05:40 -070089static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
90{
91}
92
93static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
94{
95}
96
Peter Zijlstra1e56a562011-05-24 17:11:54 -070097static inline void tlb_flush_mmu(struct mmu_gather *tlb)
98{
99}
100
101static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
102{
103 free_page_and_swap_cache(page);
104 return 1; /* avoid calling tlb_flush_mmu */
105}
106
107static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
108{
109 __tlb_remove_page(tlb, page);
110}
111
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000112#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
113#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
114#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
Paul Mundtc2035182009-03-17 21:19:49 +0900115
116#define tlb_migrate_finish(mm) do { } while (0)
117
Paul Mundtbb29c672010-01-19 15:20:35 +0900118#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64)
Matt Fleming8eda5512009-11-17 21:05:31 +0000119extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t);
120extern void tlb_unwire_entry(void);
121#else
122static inline void tlb_wire_entry(struct vm_area_struct *vma ,
123 unsigned long addr, pte_t pte)
124{
125 BUG();
126}
127
128static inline void tlb_unwire_entry(void)
129{
130 BUG();
131}
Paul Mundtbb29c672010-01-19 15:20:35 +0900132#endif
Matt Fleming8eda5512009-11-17 21:05:31 +0000133
Paul Mundtc2035182009-03-17 21:19:49 +0900134#else /* CONFIG_MMU */
135
136#define tlb_start_vma(tlb, vma) do { } while (0)
137#define tlb_end_vma(tlb, vma) do { } while (0)
138#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
139#define tlb_flush(tlb) do { } while (0)
140
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141#include <asm-generic/tlb.h>
Paul Mundt959f7d52007-11-10 20:35:53 +0900142
Paul Mundtc2035182009-03-17 21:19:49 +0900143#endif /* CONFIG_MMU */
Paul Mundt959f7d52007-11-10 20:35:53 +0900144#endif /* __ASSEMBLY__ */
145#endif /* __ASM_SH_TLB_H */