blob: dfc8fcd8ee5075aaa9b9c5c620f243458e4367f5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SH_TLB_H
2#define __ASM_SH_TLB_H
3
Paul Mundt959f7d52007-11-10 20:35:53 +09004#ifdef CONFIG_SUPERH64
5# include "tlb_64.h"
6#endif
7
8#ifndef __ASSEMBLY__
Paul Mundtc2035182009-03-17 21:19:49 +09009#include <linux/pagemap.h>
Paul Mundt959f7d52007-11-10 20:35:53 +090010
Paul Mundtc2035182009-03-17 21:19:49 +090011#ifdef CONFIG_MMU
12#include <asm/pgalloc.h>
13#include <asm/tlbflush.h>
Matt Fleming24ef7fc2009-11-19 21:11:05 +000014#include <asm/mmu_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
16/*
Paul Mundtc2035182009-03-17 21:19:49 +090017 * TLB handling. This allows us to remove pages from the page
18 * tables, and efficiently handle the TLB issues.
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 */
Paul Mundtc2035182009-03-17 21:19:49 +090020struct mmu_gather {
21 struct mm_struct *mm;
22 unsigned int fullmm;
23 unsigned long start, end;
24};
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Paul Mundtc2035182009-03-17 21:19:49 +090026DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
27
28static inline void init_tlb_gather(struct mmu_gather *tlb)
29{
30 tlb->start = TASK_SIZE;
31 tlb->end = 0;
32
33 if (tlb->fullmm) {
34 tlb->start = 0;
35 tlb->end = TASK_SIZE;
36 }
37}
38
39static inline struct mmu_gather *
40tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
41{
42 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
43
44 tlb->mm = mm;
45 tlb->fullmm = full_mm_flush;
46
47 init_tlb_gather(tlb);
48
49 return tlb;
50}
51
52static inline void
53tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
54{
55 if (tlb->fullmm)
56 flush_tlb_mm(tlb->mm);
57
58 /* keep the page table cache within bounds */
59 check_pgt_cache();
60
61 put_cpu_var(mmu_gathers);
62}
63
64static inline void
65tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
66{
67 if (tlb->start > address)
68 tlb->start = address;
69 if (tlb->end < address + PAGE_SIZE)
70 tlb->end = address + PAGE_SIZE;
71}
72
73/*
74 * In the case of tlb vma handling, we can optimise these away in the
75 * case where we're doing a full MM flush. When we're doing a munmap,
76 * the vmas are adjusted to only cover the region to be torn down.
77 */
78static inline void
79tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
80{
81 if (!tlb->fullmm)
82 flush_cache_range(vma, vma->vm_start, vma->vm_end);
83}
84
85static inline void
86tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
87{
88 if (!tlb->fullmm && tlb->end) {
89 flush_tlb_range(vma, tlb->start, tlb->end);
90 init_tlb_gather(tlb);
91 }
92}
93
94#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +100095#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
96#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
97#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
Paul Mundtc2035182009-03-17 21:19:49 +090098
99#define tlb_migrate_finish(mm) do { } while (0)
100
Matt Fleming8eda5512009-11-17 21:05:31 +0000101#ifdef CONFIG_CPU_SH4
102extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t);
103extern void tlb_unwire_entry(void);
Matt Fleming24ef7fc2009-11-19 21:11:05 +0000104#elif defined(CONFIG_SUPERH64)
105static int dtlb_entry;
106static unsigned long long dtlb_entries[64];
107
108static inline void tlb_wire_entry(struct vm_area_struct *vma,
109 unsigned long addr, pte_t pte)
110{
111 unsigned long long entry;
112 unsigned long paddr, flags;
113
114 BUG_ON(dtlb_entry == 64);
115
116 local_irq_save(flags);
117
118 entry = sh64_get_wired_dtlb_entry();
119 dtlb_entries[dtlb_entry++] = entry;
120
121 paddr = pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK;
122 paddr &= ~PAGE_MASK;
123
124 sh64_setup_tlb_slot(entry, addr, get_asid(), paddr);
125
126 local_irq_restore(flags);
127}
128
129static inline void tlb_unwire_entry(void)
130{
131 unsigned long long entry;
132 unsigned long flags;
133
134 BUG_ON(!dtlb_entry);
135
136 local_irq_save(flags);
137 entry = dtlb_entries[dtlb_entry--];
138
139 sh64_teardown_tlb_slot(entry);
140 sh64_put_wired_dtlb_entry(entry);
141
142 local_irq_restore(flags);
143}
Matt Fleming8eda5512009-11-17 21:05:31 +0000144#else
145static inline void tlb_wire_entry(struct vm_area_struct *vma ,
146 unsigned long addr, pte_t pte)
147{
148 BUG();
149}
150
151static inline void tlb_unwire_entry(void)
152{
153 BUG();
154}
155#endif /* CONFIG_CPU_SH4 */
156
Paul Mundtc2035182009-03-17 21:19:49 +0900157#else /* CONFIG_MMU */
158
159#define tlb_start_vma(tlb, vma) do { } while (0)
160#define tlb_end_vma(tlb, vma) do { } while (0)
161#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
162#define tlb_flush(tlb) do { } while (0)
163
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164#include <asm-generic/tlb.h>
Paul Mundt959f7d52007-11-10 20:35:53 +0900165
Paul Mundtc2035182009-03-17 21:19:49 +0900166#endif /* CONFIG_MMU */
Paul Mundt959f7d52007-11-10 20:35:53 +0900167#endif /* __ASSEMBLY__ */
168#endif /* __ASM_SH_TLB_H */