blob: db284bff29dcceb39360d458cec3a194745955f8 [file] [log] [blame]
Uwe Zeisbergerf30c2262006-10-03 23:01:26 +02001/* include/asm-generic/tlb.h
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 *
3 * Generic TLB shootdown code
4 *
5 * Copyright 2001 Red Hat, Inc.
6 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
7 *
Peter Zijlstrad16dfc52011-05-24 17:11:45 -07008 * Copyright 2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15#ifndef _ASM_GENERIC__TLB_H
16#define _ASM_GENERIC__TLB_H
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/swap.h>
Ingo Molnar62152d02008-01-31 22:05:48 +010019#include <asm/pgalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <asm/tlbflush.h>
21
Peter Zijlstra26723912011-05-24 17:12:00 -070022#ifdef CONFIG_HAVE_RCU_TABLE_FREE
23/*
24 * Semi RCU freeing of the page directories.
25 *
26 * This is needed by some architectures to implement software pagetable walkers.
27 *
28 * gup_fast() and other software pagetable walkers do a lockless page-table
29 * walk and therefore needs some synchronization with the freeing of the page
30 * directories. The chosen means to accomplish that is by disabling IRQs over
31 * the walk.
32 *
33 * Architectures that use IPIs to flush TLBs will then automagically DTRT,
34 * since we unlink the page, flush TLBs, free the page. Since the disabling of
35 * IRQs delays the completion of the TLB flush we can never observe an already
36 * freed page.
37 *
38 * Architectures that do not have this (PPC) need to delay the freeing by some
39 * other means, this is that means.
40 *
41 * What we do is batch the freed directory pages (tables) and RCU free them.
42 * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
43 * holds off grace periods.
44 *
45 * However, in order to batch these pages we need to allocate storage, this
46 * allocation is deep inside the MM code and can thus easily fail on memory
47 * pressure. To guarantee progress we fall back to single table freeing, see
48 * the implementation of tlb_remove_table_one().
49 *
50 */
51struct mmu_table_batch {
52 struct rcu_head rcu;
53 unsigned int nr;
54 void *tables[0];
55};
56
57#define MAX_TABLE_BATCH \
58 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
59
60extern void tlb_table_flush(struct mmu_gather *tlb);
61extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
62
63#endif
64
Peter Zijlstrad16dfc52011-05-24 17:11:45 -070065/*
66 * If we can't allocate a page to make a big batch of page pointers
67 * to work on, then just handle a few from the on-stack structure.
68 */
69#define MMU_GATHER_BUNDLE 8
70
Peter Zijlstrae3032972011-05-24 17:12:01 -070071struct mmu_gather_batch {
72 struct mmu_gather_batch *next;
73 unsigned int nr;
74 unsigned int max;
75 struct page *pages[0];
76};
77
78#define MAX_GATHER_BATCH \
79 ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
80
Michal Hocko53a59fc2013-01-04 15:35:12 -080081/*
82 * Limit the maximum number of mmu_gather batches to reduce a risk of soft
83 * lockups for non-preemptible kernels on huge machines when a lot of memory
84 * is zapped during unmapping.
85 * 10K pages freed at once should be safe even without a preemption point.
86 */
87#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
88
Linus Torvalds1da177e2005-04-16 15:20:36 -070089/* struct mmu_gather is an opaque type used by the mm code for passing around
Hugh Dickins15a23ff2005-10-29 18:16:01 -070090 * any data needed by arch specific code for tlb_remove_page.
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 */
92struct mmu_gather {
93 struct mm_struct *mm;
Peter Zijlstra26723912011-05-24 17:12:00 -070094#ifdef CONFIG_HAVE_RCU_TABLE_FREE
95 struct mmu_table_batch *batch;
96#endif
Alex Shi597e1c32012-06-28 09:02:21 +080097 unsigned long start;
98 unsigned long end;
Dave Hansen1de14c32013-04-12 16:23:54 -070099 /* we are in the middle of an operation to clear
100 * a full mm and can make some optimizations */
Will Deaconfb7332a2014-10-29 10:03:09 +0000101 unsigned int fullmm : 1,
Dave Hansen1de14c32013-04-12 16:23:54 -0700102 /* we have performed an operation which
103 * requires a complete flush of the tlb */
104 need_flush_all : 1;
Peter Zijlstrae3032972011-05-24 17:12:01 -0700105
106 struct mmu_gather_batch *active;
107 struct mmu_gather_batch local;
108 struct page *__pages[MMU_GATHER_BUNDLE];
Michal Hocko53a59fc2013-01-04 15:35:12 -0800109 unsigned int batch_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110};
111
Peter Zijlstra9547d012011-05-24 17:12:14 -0700112#define HAVE_GENERIC_MMU_GATHER
113
Linus Torvalds2b047252013-08-15 11:42:25 -0700114void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
Peter Zijlstra9547d012011-05-24 17:12:14 -0700115void tlb_flush_mmu(struct mmu_gather *tlb);
Alex Shic4211f42012-06-28 09:02:19 +0800116void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
117 unsigned long end);
Peter Zijlstra9547d012011-05-24 17:12:14 -0700118int __tlb_remove_page(struct mmu_gather *tlb, struct page *page);
Peter Zijlstrad16dfc52011-05-24 17:11:45 -0700119
120/* tlb_remove_page
121 * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
122 * required.
123 */
124static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
125{
126 if (!__tlb_remove_page(tlb, page))
127 tlb_flush_mmu(tlb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128}
129
Will Deaconfb7332a2014-10-29 10:03:09 +0000130static inline void __tlb_adjust_range(struct mmu_gather *tlb,
131 unsigned long address)
132{
133 tlb->start = min(tlb->start, address);
134 tlb->end = max(tlb->end, address + PAGE_SIZE);
135}
136
137static inline void __tlb_reset_range(struct mmu_gather *tlb)
138{
Will Deacon721c21c2015-01-12 19:10:55 +0000139 if (tlb->fullmm) {
140 tlb->start = tlb->end = ~0;
141 } else {
142 tlb->start = TASK_SIZE;
143 tlb->end = 0;
144 }
Will Deaconfb7332a2014-10-29 10:03:09 +0000145}
146
147/*
148 * In the case of tlb vma handling, we can optimise these away in the
149 * case where we're doing a full MM flush. When we're doing a munmap,
150 * the vmas are adjusted to only cover the region to be torn down.
151 */
152#ifndef tlb_start_vma
153#define tlb_start_vma(tlb, vma) do { } while (0)
154#endif
155
156#define __tlb_end_vma(tlb, vma) \
157 do { \
158 if (!tlb->fullmm && tlb->end) { \
159 tlb_flush(tlb); \
160 __tlb_reset_range(tlb); \
161 } \
162 } while (0)
163
164#ifndef tlb_end_vma
165#define tlb_end_vma __tlb_end_vma
166#endif
167
168#ifndef __tlb_remove_tlb_entry
169#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
170#endif
171
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172/**
173 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
174 *
Will Deaconfb7332a2014-10-29 10:03:09 +0000175 * Record the fact that pte's were really unmapped by updating the range,
176 * so we can later optimise away the tlb invalidate. This helps when
177 * userspace is unmapping already-unmapped pages, which happens quite a lot.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 */
179#define tlb_remove_tlb_entry(tlb, ptep, address) \
180 do { \
Will Deaconfb7332a2014-10-29 10:03:09 +0000181 __tlb_adjust_range(tlb, address); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 __tlb_remove_tlb_entry(tlb, ptep, address); \
183 } while (0)
184
Shaohua Lif21760b2012-01-12 17:19:16 -0800185/**
186 * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
187 * This is a nop so far, because only x86 needs it.
188 */
189#ifndef __tlb_remove_pmd_tlb_entry
190#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
191#endif
192
193#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
194 do { \
Will Deaconfb7332a2014-10-29 10:03:09 +0000195 __tlb_adjust_range(tlb, address); \
Shaohua Lif21760b2012-01-12 17:19:16 -0800196 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
197 } while (0)
198
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000199#define pte_free_tlb(tlb, ptep, address) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 do { \
Will Deaconfb7332a2014-10-29 10:03:09 +0000201 __tlb_adjust_range(tlb, address); \
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000202 __pte_free_tlb(tlb, ptep, address); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 } while (0)
204
205#ifndef __ARCH_HAS_4LEVEL_HACK
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000206#define pud_free_tlb(tlb, pudp, address) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 do { \
Will Deaconfb7332a2014-10-29 10:03:09 +0000208 __tlb_adjust_range(tlb, address); \
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000209 __pud_free_tlb(tlb, pudp, address); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 } while (0)
211#endif
212
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000213#define pmd_free_tlb(tlb, pmdp, address) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 do { \
Will Deaconfb7332a2014-10-29 10:03:09 +0000215 __tlb_adjust_range(tlb, address); \
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000216 __pmd_free_tlb(tlb, pmdp, address); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 } while (0)
218
219#define tlb_migrate_finish(mm) do {} while (0)
220
221#endif /* _ASM_GENERIC__TLB_H */