Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SH_TLB_H |
| 2 | #define __ASM_SH_TLB_H |
| 3 | |
Paul Mundt | 959f7d5 | 2007-11-10 20:35:53 +0900 | [diff] [blame] | 4 | #ifdef CONFIG_SUPERH64 |
| 5 | # include "tlb_64.h" |
| 6 | #endif |
| 7 | |
| 8 | #ifndef __ASSEMBLY__ |
Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 9 | #include <linux/pagemap.h> |
Paul Mundt | 959f7d5 | 2007-11-10 20:35:53 +0900 | [diff] [blame] | 10 | |
Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 11 | #ifdef CONFIG_MMU |
| 12 | #include <asm/pgalloc.h> |
| 13 | #include <asm/tlbflush.h> |
Matt Fleming | 24ef7fc | 2009-11-19 21:11:05 +0000 | [diff] [blame] | 14 | #include <asm/mmu_context.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | |
| 16 | /* |
Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 17 | * TLB handling. This allows us to remove pages from the page |
| 18 | * tables, and efficiently handle the TLB issues. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | */ |
Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 20 | struct mmu_gather { |
| 21 | struct mm_struct *mm; |
| 22 | unsigned int fullmm; |
| 23 | unsigned long start, end; |
| 24 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | |
Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 26 | static inline void init_tlb_gather(struct mmu_gather *tlb) |
| 27 | { |
| 28 | tlb->start = TASK_SIZE; |
| 29 | tlb->end = 0; |
| 30 | |
| 31 | if (tlb->fullmm) { |
| 32 | tlb->start = 0; |
| 33 | tlb->end = TASK_SIZE; |
| 34 | } |
| 35 | } |
| 36 | |
Peter Zijlstra | 1e56a56 | 2011-05-24 17:11:54 -0700 | [diff] [blame] | 37 | static inline void |
| 38 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) |
Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 39 | { |
Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 40 | tlb->mm = mm; |
| 41 | tlb->fullmm = full_mm_flush; |
| 42 | |
| 43 | init_tlb_gather(tlb); |
Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 44 | } |
| 45 | |
| 46 | static inline void |
| 47 | tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) |
| 48 | { |
| 49 | if (tlb->fullmm) |
| 50 | flush_tlb_mm(tlb->mm); |
| 51 | |
| 52 | /* keep the page table cache within bounds */ |
| 53 | check_pgt_cache(); |
Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 54 | } |
| 55 | |
| 56 | static inline void |
| 57 | tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address) |
| 58 | { |
| 59 | if (tlb->start > address) |
| 60 | tlb->start = address; |
| 61 | if (tlb->end < address + PAGE_SIZE) |
| 62 | tlb->end = address + PAGE_SIZE; |
| 63 | } |
| 64 | |
| 65 | /* |
| 66 | * In the case of tlb vma handling, we can optimise these away in the |
| 67 | * case where we're doing a full MM flush. When we're doing a munmap, |
| 68 | * the vmas are adjusted to only cover the region to be torn down. |
| 69 | */ |
| 70 | static inline void |
| 71 | tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) |
| 72 | { |
| 73 | if (!tlb->fullmm) |
| 74 | flush_cache_range(vma, vma->vm_start, vma->vm_end); |
| 75 | } |
| 76 | |
| 77 | static inline void |
| 78 | tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) |
| 79 | { |
| 80 | if (!tlb->fullmm && tlb->end) { |
| 81 | flush_tlb_range(vma, tlb->start, tlb->end); |
| 82 | init_tlb_gather(tlb); |
| 83 | } |
| 84 | } |
| 85 | |
Peter Zijlstra | 1e56a56 | 2011-05-24 17:11:54 -0700 | [diff] [blame] | 86 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) |
| 87 | { |
| 88 | } |
| 89 | |
| 90 | static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
| 91 | { |
| 92 | free_page_and_swap_cache(page); |
| 93 | return 1; /* avoid calling tlb_flush_mmu */ |
| 94 | } |
| 95 | |
| 96 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
| 97 | { |
| 98 | __tlb_remove_page(tlb, page); |
| 99 | } |
| 100 | |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 101 | #define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) |
| 102 | #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) |
| 103 | #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) |
Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 104 | |
| 105 | #define tlb_migrate_finish(mm) do { } while (0) |
| 106 | |
Paul Mundt | bb29c67 | 2010-01-19 15:20:35 +0900 | [diff] [blame] | 107 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64) |
Matt Fleming | 8eda551 | 2009-11-17 21:05:31 +0000 | [diff] [blame] | 108 | extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t); |
| 109 | extern void tlb_unwire_entry(void); |
| 110 | #else |
| 111 | static inline void tlb_wire_entry(struct vm_area_struct *vma , |
| 112 | unsigned long addr, pte_t pte) |
| 113 | { |
| 114 | BUG(); |
| 115 | } |
| 116 | |
| 117 | static inline void tlb_unwire_entry(void) |
| 118 | { |
| 119 | BUG(); |
| 120 | } |
Paul Mundt | bb29c67 | 2010-01-19 15:20:35 +0900 | [diff] [blame] | 121 | #endif |
Matt Fleming | 8eda551 | 2009-11-17 21:05:31 +0000 | [diff] [blame] | 122 | |
Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 123 | #else /* CONFIG_MMU */ |
| 124 | |
| 125 | #define tlb_start_vma(tlb, vma) do { } while (0) |
| 126 | #define tlb_end_vma(tlb, vma) do { } while (0) |
| 127 | #define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0) |
| 128 | #define tlb_flush(tlb) do { } while (0) |
| 129 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | #include <asm-generic/tlb.h> |
Paul Mundt | 959f7d5 | 2007-11-10 20:35:53 +0900 | [diff] [blame] | 131 | |
Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 132 | #endif /* CONFIG_MMU */ |
Paul Mundt | 959f7d5 | 2007-11-10 20:35:53 +0900 | [diff] [blame] | 133 | #endif /* __ASSEMBLY__ */ |
| 134 | #endif /* __ASM_SH_TLB_H */ |