Uwe Zeisberger | f30c226 | 2006-10-03 23:01:26 +0200 | [diff] [blame] | 1 | /* include/asm-generic/tlb.h |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * |
| 3 | * Generic TLB shootdown code |
| 4 | * |
| 5 | * Copyright 2001 Red Hat, Inc. |
| 6 | * Based on code from mm/memory.c Copyright Linus Torvalds and others. |
| 7 | * |
Peter Zijlstra | 90eec10 | 2015-11-16 11:08:45 +0100 | [diff] [blame] | 8 | * Copyright 2011 Red Hat, Inc., Peter Zijlstra |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 9 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | * This program is free software; you can redistribute it and/or |
| 11 | * modify it under the terms of the GNU General Public License |
| 12 | * as published by the Free Software Foundation; either version |
| 13 | * 2 of the License, or (at your option) any later version. |
| 14 | */ |
| 15 | #ifndef _ASM_GENERIC__TLB_H |
| 16 | #define _ASM_GENERIC__TLB_H |
| 17 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/swap.h> |
Ingo Molnar | 62152d0 | 2008-01-31 22:05:48 +0100 | [diff] [blame] | 19 | #include <asm/pgalloc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include <asm/tlbflush.h> |
| 21 | |
Peter Zijlstra | 2672391 | 2011-05-24 17:12:00 -0700 | [diff] [blame] | 22 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
| 23 | /* |
| 24 | * Semi RCU freeing of the page directories. |
| 25 | * |
| 26 | * This is needed by some architectures to implement software pagetable walkers. |
| 27 | * |
| 28 | * gup_fast() and other software pagetable walkers do a lockless page-table |
| 29 | * walk and therefore needs some synchronization with the freeing of the page |
| 30 | * directories. The chosen means to accomplish that is by disabling IRQs over |
| 31 | * the walk. |
| 32 | * |
| 33 | * Architectures that use IPIs to flush TLBs will then automagically DTRT, |
| 34 | * since we unlink the page, flush TLBs, free the page. Since the disabling of |
| 35 | * IRQs delays the completion of the TLB flush we can never observe an already |
| 36 | * freed page. |
| 37 | * |
| 38 | * Architectures that do not have this (PPC) need to delay the freeing by some |
| 39 | * other means, this is that means. |
| 40 | * |
| 41 | * What we do is batch the freed directory pages (tables) and RCU free them. |
| 42 | * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling |
| 43 | * holds off grace periods. |
| 44 | * |
| 45 | * However, in order to batch these pages we need to allocate storage, this |
| 46 | * allocation is deep inside the MM code and can thus easily fail on memory |
| 47 | * pressure. To guarantee progress we fall back to single table freeing, see |
| 48 | * the implementation of tlb_remove_table_one(). |
| 49 | * |
| 50 | */ |
| 51 | struct mmu_table_batch { |
| 52 | struct rcu_head rcu; |
| 53 | unsigned int nr; |
| 54 | void *tables[0]; |
| 55 | }; |
| 56 | |
| 57 | #define MAX_TABLE_BATCH \ |
| 58 | ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) |
| 59 | |
| 60 | extern void tlb_table_flush(struct mmu_gather *tlb); |
| 61 | extern void tlb_remove_table(struct mmu_gather *tlb, void *table); |
| 62 | |
| 63 | #endif |
| 64 | |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 65 | /* |
| 66 | * If we can't allocate a page to make a big batch of page pointers |
| 67 | * to work on, then just handle a few from the on-stack structure. |
| 68 | */ |
| 69 | #define MMU_GATHER_BUNDLE 8 |
| 70 | |
Peter Zijlstra | e303297 | 2011-05-24 17:12:01 -0700 | [diff] [blame] | 71 | struct mmu_gather_batch { |
| 72 | struct mmu_gather_batch *next; |
| 73 | unsigned int nr; |
| 74 | unsigned int max; |
| 75 | struct page *pages[0]; |
| 76 | }; |
| 77 | |
| 78 | #define MAX_GATHER_BATCH \ |
| 79 | ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *)) |
| 80 | |
Michal Hocko | 53a59fc | 2013-01-04 15:35:12 -0800 | [diff] [blame] | 81 | /* |
| 82 | * Limit the maximum number of mmu_gather batches to reduce a risk of soft |
| 83 | * lockups for non-preemptible kernels on huge machines when a lot of memory |
| 84 | * is zapped during unmapping. |
| 85 | * 10K pages freed at once should be safe even without a preemption point. |
| 86 | */ |
| 87 | #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) |
| 88 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | /* struct mmu_gather is an opaque type used by the mm code for passing around |
Hugh Dickins | 15a23ff | 2005-10-29 18:16:01 -0700 | [diff] [blame] | 90 | * any data needed by arch specific code for tlb_remove_page. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | */ |
| 92 | struct mmu_gather { |
| 93 | struct mm_struct *mm; |
Peter Zijlstra | 2672391 | 2011-05-24 17:12:00 -0700 | [diff] [blame] | 94 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
| 95 | struct mmu_table_batch *batch; |
| 96 | #endif |
Alex Shi | 597e1c3 | 2012-06-28 09:02:21 +0800 | [diff] [blame] | 97 | unsigned long start; |
| 98 | unsigned long end; |
Dave Hansen | 1de14c3 | 2013-04-12 16:23:54 -0700 | [diff] [blame] | 99 | /* we are in the middle of an operation to clear |
| 100 | * a full mm and can make some optimizations */ |
Will Deacon | fb7332a | 2014-10-29 10:03:09 +0000 | [diff] [blame] | 101 | unsigned int fullmm : 1, |
Dave Hansen | 1de14c3 | 2013-04-12 16:23:54 -0700 | [diff] [blame] | 102 | /* we have performed an operation which |
| 103 | * requires a complete flush of the tlb */ |
| 104 | need_flush_all : 1; |
Peter Zijlstra | e303297 | 2011-05-24 17:12:01 -0700 | [diff] [blame] | 105 | |
| 106 | struct mmu_gather_batch *active; |
| 107 | struct mmu_gather_batch local; |
| 108 | struct page *__pages[MMU_GATHER_BUNDLE]; |
Michal Hocko | 53a59fc | 2013-01-04 15:35:12 -0800 | [diff] [blame] | 109 | unsigned int batch_count; |
Aneesh Kumar K.V | e77b085 | 2016-07-26 15:24:12 -0700 | [diff] [blame] | 110 | int page_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | }; |
| 112 | |
Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 113 | #define HAVE_GENERIC_MMU_GATHER |
| 114 | |
Linus Torvalds | 2b04725 | 2013-08-15 11:42:25 -0700 | [diff] [blame] | 115 | void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end); |
Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 116 | void tlb_flush_mmu(struct mmu_gather *tlb); |
Alex Shi | c4211f4 | 2012-06-28 09:02:19 +0800 | [diff] [blame] | 117 | void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, |
| 118 | unsigned long end); |
Aneesh Kumar K.V | e77b085 | 2016-07-26 15:24:12 -0700 | [diff] [blame] | 119 | extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, |
| 120 | int page_size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | |
Will Deacon | fb7332a | 2014-10-29 10:03:09 +0000 | [diff] [blame] | 122 | static inline void __tlb_adjust_range(struct mmu_gather *tlb, |
Aneesh Kumar K.V | b5bc66b | 2016-12-12 16:42:34 -0800 | [diff] [blame] | 123 | unsigned long address, |
| 124 | unsigned int range_size) |
Will Deacon | fb7332a | 2014-10-29 10:03:09 +0000 | [diff] [blame] | 125 | { |
| 126 | tlb->start = min(tlb->start, address); |
Aneesh Kumar K.V | b5bc66b | 2016-12-12 16:42:34 -0800 | [diff] [blame] | 127 | tlb->end = max(tlb->end, address + range_size); |
Will Deacon | fb7332a | 2014-10-29 10:03:09 +0000 | [diff] [blame] | 128 | } |
| 129 | |
| 130 | static inline void __tlb_reset_range(struct mmu_gather *tlb) |
| 131 | { |
Will Deacon | 721c21c | 2015-01-12 19:10:55 +0000 | [diff] [blame] | 132 | if (tlb->fullmm) { |
| 133 | tlb->start = tlb->end = ~0; |
| 134 | } else { |
| 135 | tlb->start = TASK_SIZE; |
| 136 | tlb->end = 0; |
| 137 | } |
Will Deacon | fb7332a | 2014-10-29 10:03:09 +0000 | [diff] [blame] | 138 | } |
| 139 | |
Aneesh Kumar K.V | e77b085 | 2016-07-26 15:24:12 -0700 | [diff] [blame] | 140 | static inline void tlb_remove_page_size(struct mmu_gather *tlb, |
| 141 | struct page *page, int page_size) |
| 142 | { |
Aneesh Kumar K.V | 692a68c | 2016-12-12 16:42:43 -0800 | [diff] [blame] | 143 | if (__tlb_remove_page_size(tlb, page, page_size)) |
Aneesh Kumar K.V | e77b085 | 2016-07-26 15:24:12 -0700 | [diff] [blame] | 144 | tlb_flush_mmu(tlb); |
Aneesh Kumar K.V | e77b085 | 2016-07-26 15:24:12 -0700 | [diff] [blame] | 145 | } |
| 146 | |
Aneesh Kumar K.V | 692a68c | 2016-12-12 16:42:43 -0800 | [diff] [blame] | 147 | static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
Aneesh Kumar K.V | e77b085 | 2016-07-26 15:24:12 -0700 | [diff] [blame] | 148 | { |
| 149 | return __tlb_remove_page_size(tlb, page, PAGE_SIZE); |
| 150 | } |
| 151 | |
Aneesh Kumar K.V | e9d55e1 | 2016-07-26 15:24:09 -0700 | [diff] [blame] | 152 | /* tlb_remove_page |
| 153 | * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when |
| 154 | * required. |
| 155 | */ |
| 156 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
| 157 | { |
Aneesh Kumar K.V | e77b085 | 2016-07-26 15:24:12 -0700 | [diff] [blame] | 158 | return tlb_remove_page_size(tlb, page, PAGE_SIZE); |
Aneesh Kumar K.V | e9d55e1 | 2016-07-26 15:24:09 -0700 | [diff] [blame] | 159 | } |
| 160 | |
Aneesh Kumar K.V | 07e3266 | 2016-12-12 16:42:40 -0800 | [diff] [blame] | 161 | #ifndef tlb_remove_check_page_size_change |
| 162 | #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change |
| 163 | static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, |
| 164 | unsigned int page_size) |
| 165 | { |
| 166 | /* |
| 167 | * We don't care about page size change, just update |
| 168 | * mmu_gather page size here so that debug checks |
| 169 | * doesn't throw false warning. |
| 170 | */ |
| 171 | #ifdef CONFIG_DEBUG_VM |
| 172 | tlb->page_size = page_size; |
| 173 | #endif |
| 174 | } |
| 175 | #endif |
| 176 | |
Will Deacon | fb7332a | 2014-10-29 10:03:09 +0000 | [diff] [blame] | 177 | /* |
| 178 | * In the case of tlb vma handling, we can optimise these away in the |
| 179 | * case where we're doing a full MM flush. When we're doing a munmap, |
| 180 | * the vmas are adjusted to only cover the region to be torn down. |
| 181 | */ |
| 182 | #ifndef tlb_start_vma |
| 183 | #define tlb_start_vma(tlb, vma) do { } while (0) |
| 184 | #endif |
| 185 | |
| 186 | #define __tlb_end_vma(tlb, vma) \ |
| 187 | do { \ |
| 188 | if (!tlb->fullmm && tlb->end) { \ |
| 189 | tlb_flush(tlb); \ |
| 190 | __tlb_reset_range(tlb); \ |
| 191 | } \ |
| 192 | } while (0) |
| 193 | |
| 194 | #ifndef tlb_end_vma |
| 195 | #define tlb_end_vma __tlb_end_vma |
| 196 | #endif |
| 197 | |
| 198 | #ifndef __tlb_remove_tlb_entry |
| 199 | #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) |
| 200 | #endif |
| 201 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | /** |
| 203 | * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. |
| 204 | * |
Will Deacon | fb7332a | 2014-10-29 10:03:09 +0000 | [diff] [blame] | 205 | * Record the fact that pte's were really unmapped by updating the range, |
| 206 | * so we can later optimise away the tlb invalidate. This helps when |
| 207 | * userspace is unmapping already-unmapped pages, which happens quite a lot. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | */ |
| 209 | #define tlb_remove_tlb_entry(tlb, ptep, address) \ |
| 210 | do { \ |
Aneesh Kumar K.V | b5bc66b | 2016-12-12 16:42:34 -0800 | [diff] [blame] | 211 | __tlb_adjust_range(tlb, address, PAGE_SIZE); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 212 | __tlb_remove_tlb_entry(tlb, ptep, address); \ |
| 213 | } while (0) |
| 214 | |
Aneesh Kumar K.V | b528e4b | 2016-12-12 16:42:37 -0800 | [diff] [blame] | 215 | #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ |
| 216 | do { \ |
| 217 | __tlb_adjust_range(tlb, address, huge_page_size(h)); \ |
| 218 | __tlb_remove_tlb_entry(tlb, ptep, address); \ |
| 219 | } while (0) |
| 220 | |
Shaohua Li | f21760b | 2012-01-12 17:19:16 -0800 | [diff] [blame] | 221 | /** |
| 222 | * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation |
| 223 | * This is a nop so far, because only x86 needs it. |
| 224 | */ |
| 225 | #ifndef __tlb_remove_pmd_tlb_entry |
| 226 | #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0) |
| 227 | #endif |
| 228 | |
Aneesh Kumar K.V | b5bc66b | 2016-12-12 16:42:34 -0800 | [diff] [blame] | 229 | #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \ |
| 230 | do { \ |
| 231 | __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \ |
| 232 | __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ |
Shaohua Li | f21760b | 2012-01-12 17:19:16 -0800 | [diff] [blame] | 233 | } while (0) |
| 234 | |
Aneesh Kumar K.V | b5bc66b | 2016-12-12 16:42:34 -0800 | [diff] [blame] | 235 | /* |
| 236 | * For things like page tables caches (ie caching addresses "inside" the |
| 237 | * page tables, like x86 does), for legacy reasons, flushing an |
| 238 | * individual page had better flush the page table caches behind it. This |
| 239 | * is definitely how x86 works, for example. And if you have an |
| 240 | * architected non-legacy page table cache (which I'm not aware of |
| 241 | * anybody actually doing), you're going to have some architecturally |
| 242 | * explicit flushing for that, likely *separate* from a regular TLB entry |
| 243 | * flush, and thus you'd need more than just some range expansion.. |
| 244 | * |
| 245 | * So if we ever find an architecture |
| 246 | * that would want something that odd, I think it is up to that |
| 247 | * architecture to do its own odd thing, not cause pain for others |
| 248 | * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com |
| 249 | * |
| 250 | * For now w.r.t page table cache, mark the range_size as PAGE_SIZE |
| 251 | */ |
| 252 | |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 253 | #define pte_free_tlb(tlb, ptep, address) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | do { \ |
Aneesh Kumar K.V | b5bc66b | 2016-12-12 16:42:34 -0800 | [diff] [blame] | 255 | __tlb_adjust_range(tlb, address, PAGE_SIZE); \ |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 256 | __pte_free_tlb(tlb, ptep, address); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | } while (0) |
| 258 | |
| 259 | #ifndef __ARCH_HAS_4LEVEL_HACK |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 260 | #define pud_free_tlb(tlb, pudp, address) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | do { \ |
Aneesh Kumar K.V | b5bc66b | 2016-12-12 16:42:34 -0800 | [diff] [blame] | 262 | __tlb_adjust_range(tlb, address, PAGE_SIZE); \ |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 263 | __pud_free_tlb(tlb, pudp, address); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | } while (0) |
| 265 | #endif |
| 266 | |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 267 | #define pmd_free_tlb(tlb, pmdp, address) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | do { \ |
Aneesh Kumar K.V | b5bc66b | 2016-12-12 16:42:34 -0800 | [diff] [blame] | 269 | __tlb_adjust_range(tlb, address, PAGE_SIZE); \ |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 270 | __pmd_free_tlb(tlb, pmdp, address); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | } while (0) |
| 272 | |
| 273 | #define tlb_migrate_finish(mm) do {} while (0) |
| 274 | |
| 275 | #endif /* _ASM_GENERIC__TLB_H */ |