Peter Zijlstra | 196d9d8 | 2018-09-03 15:07:36 +0100 | [diff] [blame] | 1 | #include <linux/gfp.h> |
| 2 | #include <linux/highmem.h> |
| 3 | #include <linux/kernel.h> |
| 4 | #include <linux/mmdebug.h> |
| 5 | #include <linux/mm_types.h> |
| 6 | #include <linux/pagemap.h> |
| 7 | #include <linux/rcupdate.h> |
| 8 | #include <linux/smp.h> |
| 9 | #include <linux/swap.h> |
| 10 | |
| 11 | #include <asm/pgalloc.h> |
| 12 | #include <asm/tlb.h> |
| 13 | |
Martin Schwidefsky | 952a31c | 2018-09-18 14:51:50 +0200 | [diff] [blame] | 14 | #ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER |
| 15 | |
Peter Zijlstra | 196d9d8 | 2018-09-03 15:07:36 +0100 | [diff] [blame] | 16 | static bool tlb_next_batch(struct mmu_gather *tlb) |
| 17 | { |
| 18 | struct mmu_gather_batch *batch; |
| 19 | |
| 20 | batch = tlb->active; |
| 21 | if (batch->next) { |
| 22 | tlb->active = batch->next; |
| 23 | return true; |
| 24 | } |
| 25 | |
| 26 | if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) |
| 27 | return false; |
| 28 | |
| 29 | batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); |
| 30 | if (!batch) |
| 31 | return false; |
| 32 | |
| 33 | tlb->batch_count++; |
| 34 | batch->next = NULL; |
| 35 | batch->nr = 0; |
| 36 | batch->max = MAX_GATHER_BATCH; |
| 37 | |
| 38 | tlb->active->next = batch; |
| 39 | tlb->active = batch; |
| 40 | |
| 41 | return true; |
| 42 | } |
| 43 | |
Martin Schwidefsky | 952a31c | 2018-09-18 14:51:50 +0200 | [diff] [blame] | 44 | static void tlb_batch_pages_flush(struct mmu_gather *tlb) |
Peter Zijlstra | 196d9d8 | 2018-09-03 15:07:36 +0100 | [diff] [blame] | 45 | { |
| 46 | struct mmu_gather_batch *batch; |
| 47 | |
Peter Zijlstra | 196d9d8 | 2018-09-03 15:07:36 +0100 | [diff] [blame] | 48 | for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { |
| 49 | free_pages_and_swap_cache(batch->pages, batch->nr); |
| 50 | batch->nr = 0; |
| 51 | } |
| 52 | tlb->active = &tlb->local; |
| 53 | } |
| 54 | |
Martin Schwidefsky | 952a31c | 2018-09-18 14:51:50 +0200 | [diff] [blame] | 55 | static void tlb_batch_list_free(struct mmu_gather *tlb) |
Peter Zijlstra | 196d9d8 | 2018-09-03 15:07:36 +0100 | [diff] [blame] | 56 | { |
| 57 | struct mmu_gather_batch *batch, *next; |
| 58 | |
Peter Zijlstra | 196d9d8 | 2018-09-03 15:07:36 +0100 | [diff] [blame] | 59 | for (batch = tlb->local.next; batch; batch = next) { |
| 60 | next = batch->next; |
| 61 | free_pages((unsigned long)batch, 0); |
| 62 | } |
| 63 | tlb->local.next = NULL; |
| 64 | } |
| 65 | |
Peter Zijlstra | 196d9d8 | 2018-09-03 15:07:36 +0100 | [diff] [blame] | 66 | bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) |
| 67 | { |
| 68 | struct mmu_gather_batch *batch; |
| 69 | |
| 70 | VM_BUG_ON(!tlb->end); |
Peter Zijlstra | ed6a793 | 2018-08-31 14:46:08 +0200 | [diff] [blame] | 71 | |
| 72 | #ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE |
Peter Zijlstra | 196d9d8 | 2018-09-03 15:07:36 +0100 | [diff] [blame] | 73 | VM_WARN_ON(tlb->page_size != page_size); |
Peter Zijlstra | ed6a793 | 2018-08-31 14:46:08 +0200 | [diff] [blame] | 74 | #endif |
Peter Zijlstra | 196d9d8 | 2018-09-03 15:07:36 +0100 | [diff] [blame] | 75 | |
| 76 | batch = tlb->active; |
| 77 | /* |
| 78 | * Add the page and check if we are full. If so |
| 79 | * force a flush. |
| 80 | */ |
| 81 | batch->pages[batch->nr++] = page; |
| 82 | if (batch->nr == batch->max) { |
| 83 | if (!tlb_next_batch(tlb)) |
| 84 | return true; |
| 85 | batch = tlb->active; |
| 86 | } |
| 87 | VM_BUG_ON_PAGE(batch->nr > batch->max, page); |
| 88 | |
| 89 | return false; |
| 90 | } |
| 91 | |
Martin Schwidefsky | 952a31c | 2018-09-18 14:51:50 +0200 | [diff] [blame] | 92 | #endif /* HAVE_MMU_GATHER_NO_GATHER */ |
| 93 | |
Peter Zijlstra | 196d9d8 | 2018-09-03 15:07:36 +0100 | [diff] [blame] | 94 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
| 95 | |
| 96 | /* |
| 97 | * See the comment near struct mmu_table_batch. |
| 98 | */ |
| 99 | |
| 100 | /* |
| 101 | * If we want tlb_remove_table() to imply TLB invalidates. |
| 102 | */ |
| 103 | static inline void tlb_table_invalidate(struct mmu_gather *tlb) |
| 104 | { |
Peter Zijlstra | 806cabd | 2020-02-03 17:36:49 -0800 | [diff] [blame] | 105 | if (tlb_needs_table_invalidate()) { |
| 106 | /* |
| 107 | * Invalidate page-table caches used by hardware walkers. Then |
| 108 | * we still need to RCU-sched wait while freeing the pages |
| 109 | * because software walkers can still be in-flight. |
| 110 | */ |
| 111 | tlb_flush_mmu_tlbonly(tlb); |
| 112 | } |
Peter Zijlstra | 196d9d8 | 2018-09-03 15:07:36 +0100 | [diff] [blame] | 113 | } |
| 114 | |
| 115 | static void tlb_remove_table_smp_sync(void *arg) |
| 116 | { |
| 117 | /* Simply deliver the interrupt */ |
| 118 | } |
| 119 | |
| 120 | static void tlb_remove_table_one(void *table) |
| 121 | { |
| 122 | /* |
| 123 | * This isn't an RCU grace period and hence the page-tables cannot be |
| 124 | * assumed to be actually RCU-freed. |
| 125 | * |
| 126 | * It is however sufficient for software page-table walkers that rely on |
| 127 | * IRQ disabling. See the comment near struct mmu_table_batch. |
| 128 | */ |
| 129 | smp_call_function(tlb_remove_table_smp_sync, NULL, 1); |
| 130 | __tlb_remove_table(table); |
| 131 | } |
| 132 | |
| 133 | static void tlb_remove_table_rcu(struct rcu_head *head) |
| 134 | { |
| 135 | struct mmu_table_batch *batch; |
| 136 | int i; |
| 137 | |
| 138 | batch = container_of(head, struct mmu_table_batch, rcu); |
| 139 | |
| 140 | for (i = 0; i < batch->nr; i++) |
| 141 | __tlb_remove_table(batch->tables[i]); |
| 142 | |
| 143 | free_page((unsigned long)batch); |
| 144 | } |
| 145 | |
Peter Zijlstra | 0a8caf2 | 2018-09-20 10:55:10 +0200 | [diff] [blame] | 146 | static void tlb_table_flush(struct mmu_gather *tlb) |
Peter Zijlstra | 196d9d8 | 2018-09-03 15:07:36 +0100 | [diff] [blame] | 147 | { |
| 148 | struct mmu_table_batch **batch = &tlb->batch; |
| 149 | |
| 150 | if (*batch) { |
| 151 | tlb_table_invalidate(tlb); |
Paul E. McKenney | b401ec1 | 2018-11-06 19:30:34 -0800 | [diff] [blame] | 152 | call_rcu(&(*batch)->rcu, tlb_remove_table_rcu); |
Peter Zijlstra | 196d9d8 | 2018-09-03 15:07:36 +0100 | [diff] [blame] | 153 | *batch = NULL; |
| 154 | } |
| 155 | } |
| 156 | |
| 157 | void tlb_remove_table(struct mmu_gather *tlb, void *table) |
| 158 | { |
| 159 | struct mmu_table_batch **batch = &tlb->batch; |
| 160 | |
| 161 | if (*batch == NULL) { |
| 162 | *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); |
| 163 | if (*batch == NULL) { |
| 164 | tlb_table_invalidate(tlb); |
| 165 | tlb_remove_table_one(table); |
| 166 | return; |
| 167 | } |
| 168 | (*batch)->nr = 0; |
| 169 | } |
| 170 | |
| 171 | (*batch)->tables[(*batch)->nr++] = table; |
| 172 | if ((*batch)->nr == MAX_TABLE_BATCH) |
| 173 | tlb_table_flush(tlb); |
| 174 | } |
| 175 | |
| 176 | #endif /* CONFIG_HAVE_RCU_TABLE_FREE */ |
| 177 | |
Peter Zijlstra | 0a8caf2 | 2018-09-20 10:55:10 +0200 | [diff] [blame] | 178 | static void tlb_flush_mmu_free(struct mmu_gather *tlb) |
| 179 | { |
| 180 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
| 181 | tlb_table_flush(tlb); |
| 182 | #endif |
| 183 | #ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER |
| 184 | tlb_batch_pages_flush(tlb); |
| 185 | #endif |
| 186 | } |
| 187 | |
| 188 | void tlb_flush_mmu(struct mmu_gather *tlb) |
| 189 | { |
| 190 | tlb_flush_mmu_tlbonly(tlb); |
| 191 | tlb_flush_mmu_free(tlb); |
| 192 | } |
| 193 | |
Peter Zijlstra | 196d9d8 | 2018-09-03 15:07:36 +0100 | [diff] [blame] | 194 | /** |
| 195 | * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down |
| 196 | * @tlb: the mmu_gather structure to initialize |
| 197 | * @mm: the mm_struct of the target address space |
| 198 | * @start: start of the region that will be removed from the page-table |
| 199 | * @end: end of the region that will be removed from the page-table |
| 200 | * |
| 201 | * Called to initialize an (on-stack) mmu_gather structure for page-table |
| 202 | * tear-down from @mm. The @start and @end are set to 0 and -1 |
| 203 | * respectively when @mm is without users and we're going to destroy |
| 204 | * the full address space (exit/execve). |
| 205 | */ |
| 206 | void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, |
| 207 | unsigned long start, unsigned long end) |
| 208 | { |
Peter Zijlstra | 1808d65 | 2018-09-20 10:50:11 +0200 | [diff] [blame] | 209 | tlb->mm = mm; |
| 210 | |
| 211 | /* Is it from 0 to ~0? */ |
| 212 | tlb->fullmm = !(start | (end+1)); |
| 213 | |
| 214 | #ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER |
| 215 | tlb->need_flush_all = 0; |
| 216 | tlb->local.next = NULL; |
| 217 | tlb->local.nr = 0; |
| 218 | tlb->local.max = ARRAY_SIZE(tlb->__pages); |
| 219 | tlb->active = &tlb->local; |
| 220 | tlb->batch_count = 0; |
| 221 | #endif |
| 222 | |
| 223 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
| 224 | tlb->batch = NULL; |
| 225 | #endif |
| 226 | #ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE |
| 227 | tlb->page_size = 0; |
| 228 | #endif |
| 229 | |
| 230 | __tlb_reset_range(tlb); |
Peter Zijlstra | 196d9d8 | 2018-09-03 15:07:36 +0100 | [diff] [blame] | 231 | inc_tlb_flush_pending(tlb->mm); |
| 232 | } |
| 233 | |
Peter Zijlstra | 1808d65 | 2018-09-20 10:50:11 +0200 | [diff] [blame] | 234 | /** |
| 235 | * tlb_finish_mmu - finish an mmu_gather structure |
| 236 | * @tlb: the mmu_gather structure to finish |
| 237 | * @start: start of the region that will be removed from the page-table |
| 238 | * @end: end of the region that will be removed from the page-table |
| 239 | * |
| 240 | * Called at the end of the shootdown operation to free up any resources that |
| 241 | * were required. |
| 242 | */ |
Peter Zijlstra | 196d9d8 | 2018-09-03 15:07:36 +0100 | [diff] [blame] | 243 | void tlb_finish_mmu(struct mmu_gather *tlb, |
| 244 | unsigned long start, unsigned long end) |
| 245 | { |
| 246 | /* |
| 247 | * If there are parallel threads are doing PTE changes on same range |
Yang Shi | 7a30df4 | 2019-06-13 15:56:05 -0700 | [diff] [blame] | 248 | * under non-exclusive lock (e.g., mmap_sem read-side) but defer TLB |
| 249 | * flush by batching, one thread may end up seeing inconsistent PTEs |
| 250 | * and result in having stale TLB entries. So flush TLB forcefully |
| 251 | * if we detect parallel PTE batching threads. |
| 252 | * |
| 253 | * However, some syscalls, e.g. munmap(), may free page tables, this |
| 254 | * needs force flush everything in the given range. Otherwise this |
| 255 | * may result in having stale TLB entries for some architectures, |
| 256 | * e.g. aarch64, that could specify flush what level TLB. |
Peter Zijlstra | 196d9d8 | 2018-09-03 15:07:36 +0100 | [diff] [blame] | 257 | */ |
Peter Zijlstra | 1808d65 | 2018-09-20 10:50:11 +0200 | [diff] [blame] | 258 | if (mm_tlb_flush_nested(tlb->mm)) { |
Yang Shi | 7a30df4 | 2019-06-13 15:56:05 -0700 | [diff] [blame] | 259 | /* |
| 260 | * The aarch64 yields better performance with fullmm by |
| 261 | * avoiding multiple CPUs spamming TLBI messages at the |
| 262 | * same time. |
| 263 | * |
| 264 | * On x86 non-fullmm doesn't yield significant difference |
| 265 | * against fullmm. |
| 266 | */ |
| 267 | tlb->fullmm = 1; |
Peter Zijlstra | 1808d65 | 2018-09-20 10:50:11 +0200 | [diff] [blame] | 268 | __tlb_reset_range(tlb); |
Yang Shi | 7a30df4 | 2019-06-13 15:56:05 -0700 | [diff] [blame] | 269 | tlb->freed_tables = 1; |
Peter Zijlstra | 1808d65 | 2018-09-20 10:50:11 +0200 | [diff] [blame] | 270 | } |
Peter Zijlstra | 196d9d8 | 2018-09-03 15:07:36 +0100 | [diff] [blame] | 271 | |
Peter Zijlstra | 1808d65 | 2018-09-20 10:50:11 +0200 | [diff] [blame] | 272 | tlb_flush_mmu(tlb); |
| 273 | |
Peter Zijlstra | 1808d65 | 2018-09-20 10:50:11 +0200 | [diff] [blame] | 274 | #ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER |
| 275 | tlb_batch_list_free(tlb); |
| 276 | #endif |
Peter Zijlstra | 196d9d8 | 2018-09-03 15:07:36 +0100 | [diff] [blame] | 277 | dec_tlb_flush_pending(tlb->mm); |
| 278 | } |