Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SH_TLB_H |
| 2 | #define __ASM_SH_TLB_H |
| 3 | |
Paul Mundt | 959f7d5 | 2007-11-10 20:35:53 +0900 | [diff] [blame] | 4 | #ifdef CONFIG_SUPERH64 |
David Howells | a1ce392 | 2012-10-02 18:01:25 +0100 | [diff] [blame] | 5 | # include <asm/tlb_64.h> |
Paul Mundt | 959f7d5 | 2007-11-10 20:35:53 +0900 | [diff] [blame] | 6 | #endif |
| 7 | |
| 8 | #ifndef __ASSEMBLY__ |
Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 9 | #include <linux/pagemap.h> |
Paul Mundt | 959f7d5 | 2007-11-10 20:35:53 +0900 | [diff] [blame] | 10 | |
Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 11 | #ifdef CONFIG_MMU |
Nobuhiro Iwamatsu | 194cd8d | 2011-05-31 13:27:41 +0900 | [diff] [blame] | 12 | #include <linux/swap.h> |
Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 13 | #include <asm/pgalloc.h> |
| 14 | #include <asm/tlbflush.h> |
Matt Fleming | 24ef7fc | 2009-11-19 21:11:05 +0000 | [diff] [blame] | 15 | #include <asm/mmu_context.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | |
| 17 | /* |
Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 18 | * TLB handling. This allows us to remove pages from the page |
| 19 | * tables, and efficiently handle the TLB issues. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | */ |
Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 21 | struct mmu_gather { |
| 22 | struct mm_struct *mm; |
| 23 | unsigned int fullmm; |
| 24 | unsigned long start, end; |
| 25 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 27 | static inline void init_tlb_gather(struct mmu_gather *tlb) |
| 28 | { |
| 29 | tlb->start = TASK_SIZE; |
| 30 | tlb->end = 0; |
| 31 | |
| 32 | if (tlb->fullmm) { |
| 33 | tlb->start = 0; |
| 34 | tlb->end = TASK_SIZE; |
| 35 | } |
| 36 | } |
| 37 | |
Peter Zijlstra | 1e56a56 | 2011-05-24 17:11:54 -0700 | [diff] [blame] | 38 | static inline void |
Linus Torvalds | 2b04725 | 2013-08-15 11:42:25 -0700 | [diff] [blame] | 39 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 40 | { |
Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 41 | tlb->mm = mm; |
Linus Torvalds | 2b04725 | 2013-08-15 11:42:25 -0700 | [diff] [blame] | 42 | tlb->start = start; |
| 43 | tlb->end = end; |
| 44 | tlb->fullmm = !(start | (end+1)); |
Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 45 | |
| 46 | init_tlb_gather(tlb); |
Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 47 | } |
| 48 | |
| 49 | static inline void |
| 50 | tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) |
| 51 | { |
| 52 | if (tlb->fullmm) |
| 53 | flush_tlb_mm(tlb->mm); |
| 54 | |
| 55 | /* keep the page table cache within bounds */ |
| 56 | check_pgt_cache(); |
Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 57 | } |
| 58 | |
| 59 | static inline void |
| 60 | tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address) |
| 61 | { |
| 62 | if (tlb->start > address) |
| 63 | tlb->start = address; |
| 64 | if (tlb->end < address + PAGE_SIZE) |
| 65 | tlb->end = address + PAGE_SIZE; |
| 66 | } |
| 67 | |
| 68 | /* |
| 69 | * In the case of tlb vma handling, we can optimise these away in the |
| 70 | * case where we're doing a full MM flush. When we're doing a munmap, |
| 71 | * the vmas are adjusted to only cover the region to be torn down. |
| 72 | */ |
| 73 | static inline void |
| 74 | tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) |
| 75 | { |
| 76 | if (!tlb->fullmm) |
| 77 | flush_cache_range(vma, vma->vm_start, vma->vm_end); |
| 78 | } |
| 79 | |
| 80 | static inline void |
| 81 | tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) |
| 82 | { |
| 83 | if (!tlb->fullmm && tlb->end) { |
| 84 | flush_tlb_range(vma, tlb->start, tlb->end); |
| 85 | init_tlb_gather(tlb); |
| 86 | } |
| 87 | } |
| 88 | |
Linus Torvalds | 1cf35d4 | 2014-04-25 16:05:40 -0700 | [diff] [blame] | 89 | static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) |
| 90 | { |
| 91 | } |
| 92 | |
| 93 | static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) |
| 94 | { |
| 95 | } |
| 96 | |
Peter Zijlstra | 1e56a56 | 2011-05-24 17:11:54 -0700 | [diff] [blame] | 97 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) |
| 98 | { |
| 99 | } |
| 100 | |
| 101 | static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
| 102 | { |
| 103 | free_page_and_swap_cache(page); |
| 104 | return 1; /* avoid calling tlb_flush_mmu */ |
| 105 | } |
| 106 | |
| 107 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
| 108 | { |
| 109 | __tlb_remove_page(tlb, page); |
| 110 | } |
| 111 | |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 112 | #define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) |
| 113 | #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) |
| 114 | #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) |
Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 115 | |
| 116 | #define tlb_migrate_finish(mm) do { } while (0) |
| 117 | |
Paul Mundt | bb29c67 | 2010-01-19 15:20:35 +0900 | [diff] [blame] | 118 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64) |
Matt Fleming | 8eda551 | 2009-11-17 21:05:31 +0000 | [diff] [blame] | 119 | extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t); |
| 120 | extern void tlb_unwire_entry(void); |
| 121 | #else |
| 122 | static inline void tlb_wire_entry(struct vm_area_struct *vma , |
| 123 | unsigned long addr, pte_t pte) |
| 124 | { |
| 125 | BUG(); |
| 126 | } |
| 127 | |
| 128 | static inline void tlb_unwire_entry(void) |
| 129 | { |
| 130 | BUG(); |
| 131 | } |
Paul Mundt | bb29c67 | 2010-01-19 15:20:35 +0900 | [diff] [blame] | 132 | #endif |
Matt Fleming | 8eda551 | 2009-11-17 21:05:31 +0000 | [diff] [blame] | 133 | |
Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 134 | #else /* CONFIG_MMU */ |
| 135 | |
| 136 | #define tlb_start_vma(tlb, vma) do { } while (0) |
| 137 | #define tlb_end_vma(tlb, vma) do { } while (0) |
| 138 | #define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0) |
| 139 | #define tlb_flush(tlb) do { } while (0) |
| 140 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | #include <asm-generic/tlb.h> |
Paul Mundt | 959f7d5 | 2007-11-10 20:35:53 +0900 | [diff] [blame] | 142 | |
Paul Mundt | c203518 | 2009-03-17 21:19:49 +0900 | [diff] [blame] | 143 | #endif /* CONFIG_MMU */ |
Paul Mundt | 959f7d5 | 2007-11-10 20:35:53 +0900 | [diff] [blame] | 144 | #endif /* __ASSEMBLY__ */ |
| 145 | #endif /* __ASM_SH_TLB_H */ |