Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Russell King | 4baa992 | 2008-08-02 10:55:55 +0100 | [diff] [blame] | 2 | * arch/arm/include/asm/tlb.h |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
| 4 | * Copyright (C) 2002 Russell King |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | * |
| 10 | * Experimentation shows that on a StrongARM, it appears to be faster |
| 11 | * to use the "invalidate whole tlb" rather than "invalidate single |
| 12 | * tlb" for this. |
| 13 | * |
| 14 | * This appears true for both the process fork+exit case, as well as |
| 15 | * the munmap-large-area case. |
| 16 | */ |
| 17 | #ifndef __ASMARM_TLB_H |
| 18 | #define __ASMARM_TLB_H |
| 19 | |
| 20 | #include <asm/cacheflush.h> |
Hyok S. Choi | 0157903 | 2006-02-24 21:41:25 +0000 | [diff] [blame] | 21 | |
| 22 | #ifndef CONFIG_MMU |
| 23 | |
| 24 | #include <linux/pagemap.h> |
Russell King | 58e9c47 | 2011-02-20 12:27:49 +0000 | [diff] [blame] | 25 | |
| 26 | #define tlb_flush(tlb) ((void) tlb) |
| 27 | |
Hyok S. Choi | 0157903 | 2006-02-24 21:41:25 +0000 | [diff] [blame] | 28 | #include <asm-generic/tlb.h> |
| 29 | |
| 30 | #else /* !CONFIG_MMU */ |
| 31 | |
Russell King | 06824ba | 2011-02-20 12:16:45 +0000 | [diff] [blame] | 32 | #include <linux/swap.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #include <asm/pgalloc.h> |
Russell King | 06824ba | 2011-02-20 12:16:45 +0000 | [diff] [blame] | 34 | #include <asm/tlbflush.h> |
| 35 | |
| 36 | /* |
| 37 | * We need to delay page freeing for SMP as other CPUs can access pages |
| 38 | * which have been removed but not yet had their TLB entries invalidated. |
| 39 | * Also, as ARMv7 speculative prefetch can drag new entries into the TLB, |
| 40 | * we need to apply this same delaying tactic to ensure correct operation. |
| 41 | */ |
| 42 | #if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7) |
| 43 | #define tlb_fast_mode(tlb) 0 |
Russell King | 06824ba | 2011-02-20 12:16:45 +0000 | [diff] [blame] | 44 | #else |
| 45 | #define tlb_fast_mode(tlb) 1 |
Russell King | 06824ba | 2011-02-20 12:16:45 +0000 | [diff] [blame] | 46 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | |
Peter Zijlstra | 9e14f67 | 2011-05-24 17:11:53 -0700 | [diff] [blame] | 48 | #define MMU_GATHER_BUNDLE 8 |
| 49 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | /* |
| 51 | * TLB handling. This allows us to remove pages from the page |
| 52 | * tables, and efficiently handle the TLB issues. |
| 53 | */ |
| 54 | struct mmu_gather { |
| 55 | struct mm_struct *mm; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | unsigned int fullmm; |
Russell King | 06824ba | 2011-02-20 12:16:45 +0000 | [diff] [blame] | 57 | struct vm_area_struct *vma; |
Aaro Koskinen | 7fccfc0 | 2009-04-14 13:07:35 +0100 | [diff] [blame] | 58 | unsigned long range_start; |
| 59 | unsigned long range_end; |
Russell King | 06824ba | 2011-02-20 12:16:45 +0000 | [diff] [blame] | 60 | unsigned int nr; |
Peter Zijlstra | 9e14f67 | 2011-05-24 17:11:53 -0700 | [diff] [blame] | 61 | unsigned int max; |
| 62 | struct page **pages; |
| 63 | struct page *local[MMU_GATHER_BUNDLE]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | }; |
| 65 | |
| 66 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); |
| 67 | |
Russell King | 06824ba | 2011-02-20 12:16:45 +0000 | [diff] [blame] | 68 | /* |
| 69 | * This is unnecessarily complex. There's three ways the TLB shootdown |
| 70 | * code is used: |
| 71 | * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region(). |
| 72 | * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called. |
| 73 | * tlb->vma will be non-NULL. |
| 74 | * 2. Unmapping all vmas. See exit_mmap(). |
| 75 | * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called. |
| 76 | * tlb->vma will be non-NULL. Additionally, page tables will be freed. |
| 77 | * 3. Unmapping argument pages. See shift_arg_pages(). |
| 78 | * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called. |
| 79 | * tlb->vma will be NULL. |
| 80 | */ |
| 81 | static inline void tlb_flush(struct mmu_gather *tlb) |
| 82 | { |
| 83 | if (tlb->fullmm || !tlb->vma) |
| 84 | flush_tlb_mm(tlb->mm); |
| 85 | else if (tlb->range_end > 0) { |
| 86 | flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end); |
| 87 | tlb->range_start = TASK_SIZE; |
| 88 | tlb->range_end = 0; |
| 89 | } |
| 90 | } |
| 91 | |
| 92 | static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr) |
| 93 | { |
| 94 | if (!tlb->fullmm) { |
| 95 | if (addr < tlb->range_start) |
| 96 | tlb->range_start = addr; |
| 97 | if (addr + PAGE_SIZE > tlb->range_end) |
| 98 | tlb->range_end = addr + PAGE_SIZE; |
| 99 | } |
| 100 | } |
| 101 | |
Peter Zijlstra | 9e14f67 | 2011-05-24 17:11:53 -0700 | [diff] [blame] | 102 | static inline void __tlb_alloc_page(struct mmu_gather *tlb) |
| 103 | { |
| 104 | unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); |
| 105 | |
| 106 | if (addr) { |
| 107 | tlb->pages = (void *)addr; |
| 108 | tlb->max = PAGE_SIZE / sizeof(struct page *); |
| 109 | } |
| 110 | } |
| 111 | |
Russell King | 06824ba | 2011-02-20 12:16:45 +0000 | [diff] [blame] | 112 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) |
| 113 | { |
| 114 | tlb_flush(tlb); |
| 115 | if (!tlb_fast_mode(tlb)) { |
| 116 | free_pages_and_swap_cache(tlb->pages, tlb->nr); |
| 117 | tlb->nr = 0; |
Peter Zijlstra | 9e14f67 | 2011-05-24 17:11:53 -0700 | [diff] [blame] | 118 | if (tlb->pages == tlb->local) |
| 119 | __tlb_alloc_page(tlb); |
Russell King | 06824ba | 2011-02-20 12:16:45 +0000 | [diff] [blame] | 120 | } |
| 121 | } |
| 122 | |
Peter Zijlstra | 9e14f67 | 2011-05-24 17:11:53 -0700 | [diff] [blame] | 123 | static inline void |
| 124 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | tlb->mm = mm; |
Peter Zijlstra | 9e14f67 | 2011-05-24 17:11:53 -0700 | [diff] [blame] | 127 | tlb->fullmm = fullmm; |
Russell King | 06824ba | 2011-02-20 12:16:45 +0000 | [diff] [blame] | 128 | tlb->vma = NULL; |
Peter Zijlstra | 9e14f67 | 2011-05-24 17:11:53 -0700 | [diff] [blame] | 129 | tlb->max = ARRAY_SIZE(tlb->local); |
| 130 | tlb->pages = tlb->local; |
Russell King | 06824ba | 2011-02-20 12:16:45 +0000 | [diff] [blame] | 131 | tlb->nr = 0; |
Peter Zijlstra | 9e14f67 | 2011-05-24 17:11:53 -0700 | [diff] [blame] | 132 | __tlb_alloc_page(tlb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | } |
| 134 | |
| 135 | static inline void |
| 136 | tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) |
| 137 | { |
Russell King | 06824ba | 2011-02-20 12:16:45 +0000 | [diff] [blame] | 138 | tlb_flush_mmu(tlb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | |
| 140 | /* keep the page table cache within bounds */ |
| 141 | check_pgt_cache(); |
Hugh Dickins | 15a23ff | 2005-10-29 18:16:01 -0700 | [diff] [blame] | 142 | |
Peter Zijlstra | 9e14f67 | 2011-05-24 17:11:53 -0700 | [diff] [blame] | 143 | if (tlb->pages != tlb->local) |
| 144 | free_pages((unsigned long)tlb->pages, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | } |
| 146 | |
Aaro Koskinen | 7fccfc0 | 2009-04-14 13:07:35 +0100 | [diff] [blame] | 147 | /* |
| 148 | * Memorize the range for the TLB flush. |
| 149 | */ |
| 150 | static inline void |
| 151 | tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) |
| 152 | { |
Russell King | 06824ba | 2011-02-20 12:16:45 +0000 | [diff] [blame] | 153 | tlb_add_flush(tlb, addr); |
Aaro Koskinen | 7fccfc0 | 2009-04-14 13:07:35 +0100 | [diff] [blame] | 154 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | |
| 156 | /* |
| 157 | * In the case of tlb vma handling, we can optimise these away in the |
| 158 | * case where we're doing a full MM flush. When we're doing a munmap, |
| 159 | * the vmas are adjusted to only cover the region to be torn down. |
| 160 | */ |
| 161 | static inline void |
| 162 | tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) |
| 163 | { |
Aaro Koskinen | 7fccfc0 | 2009-04-14 13:07:35 +0100 | [diff] [blame] | 164 | if (!tlb->fullmm) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | flush_cache_range(vma, vma->vm_start, vma->vm_end); |
Russell King | 06824ba | 2011-02-20 12:16:45 +0000 | [diff] [blame] | 166 | tlb->vma = vma; |
Aaro Koskinen | 7fccfc0 | 2009-04-14 13:07:35 +0100 | [diff] [blame] | 167 | tlb->range_start = TASK_SIZE; |
| 168 | tlb->range_end = 0; |
| 169 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | } |
| 171 | |
| 172 | static inline void |
| 173 | tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) |
| 174 | { |
Russell King | 06824ba | 2011-02-20 12:16:45 +0000 | [diff] [blame] | 175 | if (!tlb->fullmm) |
| 176 | tlb_flush(tlb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | } |
| 178 | |
Peter Zijlstra | 9e14f67 | 2011-05-24 17:11:53 -0700 | [diff] [blame] | 179 | static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
Russell King | 06824ba | 2011-02-20 12:16:45 +0000 | [diff] [blame] | 180 | { |
| 181 | if (tlb_fast_mode(tlb)) { |
| 182 | free_page_and_swap_cache(page); |
Peter Zijlstra | 9e14f67 | 2011-05-24 17:11:53 -0700 | [diff] [blame] | 183 | return 1; /* avoid calling tlb_flush_mmu */ |
Russell King | 06824ba | 2011-02-20 12:16:45 +0000 | [diff] [blame] | 184 | } |
Peter Zijlstra | 9e14f67 | 2011-05-24 17:11:53 -0700 | [diff] [blame] | 185 | |
| 186 | tlb->pages[tlb->nr++] = page; |
| 187 | VM_BUG_ON(tlb->nr > tlb->max); |
| 188 | return tlb->max - tlb->nr; |
| 189 | } |
| 190 | |
| 191 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
| 192 | { |
| 193 | if (!__tlb_remove_page(tlb, page)) |
| 194 | tlb_flush_mmu(tlb); |
Russell King | 06824ba | 2011-02-20 12:16:45 +0000 | [diff] [blame] | 195 | } |
| 196 | |
| 197 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, |
| 198 | unsigned long addr) |
| 199 | { |
| 200 | pgtable_page_dtor(pte); |
| 201 | tlb_add_flush(tlb, addr); |
| 202 | tlb_remove_page(tlb, pte); |
| 203 | } |
| 204 | |
| 205 | #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 206 | #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | |
| 208 | #define tlb_migrate_finish(mm) do { } while (0) |
| 209 | |
Hyok S. Choi | 0157903 | 2006-02-24 21:41:25 +0000 | [diff] [blame] | 210 | #endif /* CONFIG_MMU */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | #endif |