blob: 6c308d8b9a50a96335a170fb13aa8069f619782a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SH_TLB_H
2#define __ASM_SH_TLB_H
3
Paul Mundt959f7d52007-11-10 20:35:53 +09004#ifdef CONFIG_SUPERH64
5# include "tlb_64.h"
6#endif
7
8#ifndef __ASSEMBLY__
Paul Mundtc2035182009-03-17 21:19:49 +09009#include <linux/pagemap.h>
Paul Mundt959f7d52007-11-10 20:35:53 +090010
Paul Mundtc2035182009-03-17 21:19:49 +090011#ifdef CONFIG_MMU
12#include <asm/pgalloc.h>
13#include <asm/tlbflush.h>
Matt Fleming24ef7fc2009-11-19 21:11:05 +000014#include <asm/mmu_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
16/*
Paul Mundtc2035182009-03-17 21:19:49 +090017 * TLB handling. This allows us to remove pages from the page
18 * tables, and efficiently handle the TLB issues.
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 */
Paul Mundtc2035182009-03-17 21:19:49 +090020struct mmu_gather {
21 struct mm_struct *mm;
22 unsigned int fullmm;
23 unsigned long start, end;
24};
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Paul Mundtc2035182009-03-17 21:19:49 +090026static inline void init_tlb_gather(struct mmu_gather *tlb)
27{
28 tlb->start = TASK_SIZE;
29 tlb->end = 0;
30
31 if (tlb->fullmm) {
32 tlb->start = 0;
33 tlb->end = TASK_SIZE;
34 }
35}
36
Peter Zijlstra1e56a562011-05-24 17:11:54 -070037static inline void
38tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
Paul Mundtc2035182009-03-17 21:19:49 +090039{
Paul Mundtc2035182009-03-17 21:19:49 +090040 tlb->mm = mm;
41 tlb->fullmm = full_mm_flush;
42
43 init_tlb_gather(tlb);
Paul Mundtc2035182009-03-17 21:19:49 +090044}
45
46static inline void
47tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
48{
49 if (tlb->fullmm)
50 flush_tlb_mm(tlb->mm);
51
52 /* keep the page table cache within bounds */
53 check_pgt_cache();
Paul Mundtc2035182009-03-17 21:19:49 +090054}
55
56static inline void
57tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
58{
59 if (tlb->start > address)
60 tlb->start = address;
61 if (tlb->end < address + PAGE_SIZE)
62 tlb->end = address + PAGE_SIZE;
63}
64
65/*
66 * In the case of tlb vma handling, we can optimise these away in the
67 * case where we're doing a full MM flush. When we're doing a munmap,
68 * the vmas are adjusted to only cover the region to be torn down.
69 */
70static inline void
71tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
72{
73 if (!tlb->fullmm)
74 flush_cache_range(vma, vma->vm_start, vma->vm_end);
75}
76
77static inline void
78tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
79{
80 if (!tlb->fullmm && tlb->end) {
81 flush_tlb_range(vma, tlb->start, tlb->end);
82 init_tlb_gather(tlb);
83 }
84}
85
Peter Zijlstra1e56a562011-05-24 17:11:54 -070086static inline void tlb_flush_mmu(struct mmu_gather *tlb)
87{
88}
89
90static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
91{
92 free_page_and_swap_cache(page);
93 return 1; /* avoid calling tlb_flush_mmu */
94}
95
96static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
97{
98 __tlb_remove_page(tlb, page);
99}
100
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000101#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
102#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
103#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
Paul Mundtc2035182009-03-17 21:19:49 +0900104
105#define tlb_migrate_finish(mm) do { } while (0)
106
Paul Mundtbb29c672010-01-19 15:20:35 +0900107#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64)
Matt Fleming8eda5512009-11-17 21:05:31 +0000108extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t);
109extern void tlb_unwire_entry(void);
110#else
111static inline void tlb_wire_entry(struct vm_area_struct *vma ,
112 unsigned long addr, pte_t pte)
113{
114 BUG();
115}
116
117static inline void tlb_unwire_entry(void)
118{
119 BUG();
120}
Paul Mundtbb29c672010-01-19 15:20:35 +0900121#endif
Matt Fleming8eda5512009-11-17 21:05:31 +0000122
Paul Mundtc2035182009-03-17 21:19:49 +0900123#else /* CONFIG_MMU */
124
125#define tlb_start_vma(tlb, vma) do { } while (0)
126#define tlb_end_vma(tlb, vma) do { } while (0)
127#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
128#define tlb_flush(tlb) do { } while (0)
129
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130#include <asm-generic/tlb.h>
Paul Mundt959f7d52007-11-10 20:35:53 +0900131
Paul Mundtc2035182009-03-17 21:19:49 +0900132#endif /* CONFIG_MMU */
Paul Mundt959f7d52007-11-10 20:35:53 +0900133#endif /* __ASSEMBLY__ */
134#endif /* __ASM_SH_TLB_H */