blob: 1e25cd80589e7370eee54dedb18d355d3bf66e48 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Russell King4baa9922008-08-02 10:55:55 +01002 * arch/arm/include/asm/tlb.h
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Experimentation shows that on a StrongARM, it appears to be faster
11 * to use the "invalidate whole tlb" rather than "invalidate single
12 * tlb" for this.
13 *
14 * This appears true for both the process fork+exit case, as well as
15 * the munmap-large-area case.
16 */
17#ifndef __ASMARM_TLB_H
18#define __ASMARM_TLB_H
19
20#include <asm/cacheflush.h>
Hyok S. Choi01579032006-02-24 21:41:25 +000021
22#ifndef CONFIG_MMU
23
24#include <linux/pagemap.h>
Russell King58e9c472011-02-20 12:27:49 +000025
26#define tlb_flush(tlb) ((void) tlb)
27
Hyok S. Choi01579032006-02-24 21:41:25 +000028#include <asm-generic/tlb.h>
29
30#else /* !CONFIG_MMU */
31
Russell King06824ba2011-02-20 12:16:45 +000032#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <asm/pgalloc.h>
Russell King06824ba2011-02-20 12:16:45 +000034#include <asm/tlbflush.h>
35
Peter Zijlstra9e14f672011-05-24 17:11:53 -070036#define MMU_GATHER_BUNDLE 8
37
Steve Cappera0ad5492014-10-09 15:29:18 -070038#ifdef CONFIG_HAVE_RCU_TABLE_FREE
39static inline void __tlb_remove_table(void *_table)
40{
41 free_page_and_swap_cache((struct page *)_table);
42}
43
44struct mmu_table_batch {
45 struct rcu_head rcu;
46 unsigned int nr;
47 void *tables[0];
48};
49
50#define MAX_TABLE_BATCH \
51 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
52
53extern void tlb_table_flush(struct mmu_gather *tlb);
54extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
55
56#define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry)
57#else
58#define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry)
59#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
60
Linus Torvalds1da177e2005-04-16 15:20:36 -070061/*
62 * TLB handling. This allows us to remove pages from the page
63 * tables, and efficiently handle the TLB issues.
64 */
65struct mmu_gather {
66 struct mm_struct *mm;
Steve Cappera0ad5492014-10-09 15:29:18 -070067#ifdef CONFIG_HAVE_RCU_TABLE_FREE
68 struct mmu_table_batch *batch;
69 unsigned int need_flush;
70#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 unsigned int fullmm;
Russell King06824ba2011-02-20 12:16:45 +000072 struct vm_area_struct *vma;
Linus Torvalds2b047252013-08-15 11:42:25 -070073 unsigned long start, end;
Aaro Koskinen7fccfc02009-04-14 13:07:35 +010074 unsigned long range_start;
75 unsigned long range_end;
Russell King06824ba2011-02-20 12:16:45 +000076 unsigned int nr;
Peter Zijlstra9e14f672011-05-24 17:11:53 -070077 unsigned int max;
78 struct page **pages;
79 struct page *local[MMU_GATHER_BUNDLE];
Linus Torvalds1da177e2005-04-16 15:20:36 -070080};
81
82DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
83
Russell King06824ba2011-02-20 12:16:45 +000084/*
85 * This is unnecessarily complex. There's three ways the TLB shootdown
86 * code is used:
87 * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
88 * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
89 * tlb->vma will be non-NULL.
90 * 2. Unmapping all vmas. See exit_mmap().
91 * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
92 * tlb->vma will be non-NULL. Additionally, page tables will be freed.
93 * 3. Unmapping argument pages. See shift_arg_pages().
94 * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
95 * tlb->vma will be NULL.
96 */
97static inline void tlb_flush(struct mmu_gather *tlb)
98{
99 if (tlb->fullmm || !tlb->vma)
100 flush_tlb_mm(tlb->mm);
101 else if (tlb->range_end > 0) {
102 flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
103 tlb->range_start = TASK_SIZE;
104 tlb->range_end = 0;
105 }
106}
107
108static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
109{
110 if (!tlb->fullmm) {
111 if (addr < tlb->range_start)
112 tlb->range_start = addr;
113 if (addr + PAGE_SIZE > tlb->range_end)
114 tlb->range_end = addr + PAGE_SIZE;
115 }
116}
117
Peter Zijlstra9e14f672011-05-24 17:11:53 -0700118static inline void __tlb_alloc_page(struct mmu_gather *tlb)
119{
120 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
121
122 if (addr) {
123 tlb->pages = (void *)addr;
124 tlb->max = PAGE_SIZE / sizeof(struct page *);
125 }
126}
127
Linus Torvalds1cf35d42014-04-25 16:05:40 -0700128static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
Russell King06824ba2011-02-20 12:16:45 +0000129{
130 tlb_flush(tlb);
Steve Cappera0ad5492014-10-09 15:29:18 -0700131#ifdef CONFIG_HAVE_RCU_TABLE_FREE
132 tlb_table_flush(tlb);
133#endif
Linus Torvalds1cf35d42014-04-25 16:05:40 -0700134}
135
136static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
137{
Peter Zijlstra29eb7782013-06-05 12:26:50 +0200138 free_pages_and_swap_cache(tlb->pages, tlb->nr);
139 tlb->nr = 0;
140 if (tlb->pages == tlb->local)
141 __tlb_alloc_page(tlb);
Russell King06824ba2011-02-20 12:16:45 +0000142}
143
Linus Torvalds1cf35d42014-04-25 16:05:40 -0700144static inline void tlb_flush_mmu(struct mmu_gather *tlb)
145{
146 tlb_flush_mmu_tlbonly(tlb);
147 tlb_flush_mmu_free(tlb);
148}
149
Peter Zijlstra9e14f672011-05-24 17:11:53 -0700150static inline void
Linus Torvalds2b047252013-08-15 11:42:25 -0700151tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 tlb->mm = mm;
Linus Torvalds2b047252013-08-15 11:42:25 -0700154 tlb->fullmm = !(start | (end+1));
155 tlb->start = start;
156 tlb->end = end;
Russell King06824ba2011-02-20 12:16:45 +0000157 tlb->vma = NULL;
Peter Zijlstra9e14f672011-05-24 17:11:53 -0700158 tlb->max = ARRAY_SIZE(tlb->local);
159 tlb->pages = tlb->local;
Russell King06824ba2011-02-20 12:16:45 +0000160 tlb->nr = 0;
Peter Zijlstra9e14f672011-05-24 17:11:53 -0700161 __tlb_alloc_page(tlb);
Steve Cappera0ad5492014-10-09 15:29:18 -0700162
163#ifdef CONFIG_HAVE_RCU_TABLE_FREE
164 tlb->batch = NULL;
165#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166}
167
168static inline void
169tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
170{
Russell King06824ba2011-02-20 12:16:45 +0000171 tlb_flush_mmu(tlb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
173 /* keep the page table cache within bounds */
174 check_pgt_cache();
Hugh Dickins15a23ff2005-10-29 18:16:01 -0700175
Peter Zijlstra9e14f672011-05-24 17:11:53 -0700176 if (tlb->pages != tlb->local)
177 free_pages((unsigned long)tlb->pages, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178}
179
Aaro Koskinen7fccfc02009-04-14 13:07:35 +0100180/*
181 * Memorize the range for the TLB flush.
182 */
183static inline void
184tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
185{
Russell King06824ba2011-02-20 12:16:45 +0000186 tlb_add_flush(tlb, addr);
Aaro Koskinen7fccfc02009-04-14 13:07:35 +0100187}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
189/*
190 * In the case of tlb vma handling, we can optimise these away in the
191 * case where we're doing a full MM flush. When we're doing a munmap,
192 * the vmas are adjusted to only cover the region to be torn down.
193 */
194static inline void
195tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
196{
Aaro Koskinen7fccfc02009-04-14 13:07:35 +0100197 if (!tlb->fullmm) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 flush_cache_range(vma, vma->vm_start, vma->vm_end);
Russell King06824ba2011-02-20 12:16:45 +0000199 tlb->vma = vma;
Aaro Koskinen7fccfc02009-04-14 13:07:35 +0100200 tlb->range_start = TASK_SIZE;
201 tlb->range_end = 0;
202 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203}
204
205static inline void
206tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
207{
Russell King06824ba2011-02-20 12:16:45 +0000208 if (!tlb->fullmm)
209 tlb_flush(tlb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210}
211
Aneesh Kumar K.Ve9d55e12016-07-26 15:24:09 -0700212static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
Russell King06824ba2011-02-20 12:16:45 +0000213{
Aneesh Kumar K.Ve9d55e12016-07-26 15:24:09 -0700214 if (tlb->nr == tlb->max)
215 return true;
Peter Zijlstra9e14f672011-05-24 17:11:53 -0700216 tlb->pages[tlb->nr++] = page;
Aneesh Kumar K.Ve9d55e12016-07-26 15:24:09 -0700217 return false;
Peter Zijlstra9e14f672011-05-24 17:11:53 -0700218}
219
220static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
221{
Aneesh Kumar K.Ve9d55e12016-07-26 15:24:09 -0700222 if (__tlb_remove_page(tlb, page)) {
Peter Zijlstra9e14f672011-05-24 17:11:53 -0700223 tlb_flush_mmu(tlb);
Aneesh Kumar K.Ve9d55e12016-07-26 15:24:09 -0700224 __tlb_remove_page(tlb, page);
225 }
226}
227
Aneesh Kumar K.Ve77b0852016-07-26 15:24:12 -0700228static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
229 struct page *page, int page_size)
230{
231 return __tlb_remove_page(tlb, page);
232}
233
Aneesh Kumar K.Ve9d55e12016-07-26 15:24:09 -0700234static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
235 struct page *page)
236{
237 return __tlb_remove_page(tlb, page);
Russell King06824ba2011-02-20 12:16:45 +0000238}
239
Aneesh Kumar K.Ve77b0852016-07-26 15:24:12 -0700240static inline void tlb_remove_page_size(struct mmu_gather *tlb,
241 struct page *page, int page_size)
242{
243 return tlb_remove_page(tlb, page);
244}
245
Russell King06824ba2011-02-20 12:16:45 +0000246static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
247 unsigned long addr)
248{
249 pgtable_page_dtor(pte);
Catalin Marinas6d3ec1a2012-01-25 11:54:22 +0100250
Will Deacondf547e02012-08-24 15:23:06 +0100251#ifdef CONFIG_ARM_LPAE
252 tlb_add_flush(tlb, addr);
253#else
Catalin Marinas6d3ec1a2012-01-25 11:54:22 +0100254 /*
255 * With the classic ARM MMU, a pte page has two corresponding pmd
256 * entries, each covering 1MB.
257 */
258 addr &= PMD_MASK;
259 tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
260 tlb_add_flush(tlb, addr + SZ_1M);
Will Deacondf547e02012-08-24 15:23:06 +0100261#endif
Catalin Marinas6d3ec1a2012-01-25 11:54:22 +0100262
Steve Cappera0ad5492014-10-09 15:29:18 -0700263 tlb_remove_entry(tlb, pte);
Russell King06824ba2011-02-20 12:16:45 +0000264}
265
Catalin Marinasc9f27f12011-11-22 17:30:29 +0000266static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
267 unsigned long addr)
268{
269#ifdef CONFIG_ARM_LPAE
270 tlb_add_flush(tlb, addr);
Steve Cappera0ad5492014-10-09 15:29:18 -0700271 tlb_remove_entry(tlb, virt_to_page(pmdp));
Catalin Marinasc9f27f12011-11-22 17:30:29 +0000272#endif
273}
274
Catalin Marinas8d962502012-07-25 14:39:26 +0100275static inline void
276tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
277{
278 tlb_add_flush(tlb, addr);
279}
280
Russell King06824ba2011-02-20 12:16:45 +0000281#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
Catalin Marinasc9f27f12011-11-22 17:30:29 +0000282#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
Russell Kinga32618d2011-11-22 17:30:28 +0000283#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284
285#define tlb_migrate_finish(mm) do { } while (0)
286
Hyok S. Choi01579032006-02-24 21:41:25 +0000287#endif /* CONFIG_MMU */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288#endif