blob: 857f1dfac794c5bc5ef63a01b840f3444d0c2888 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Russell King4baa9922008-08-02 10:55:55 +01002 * arch/arm/include/asm/tlb.h
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Experimentation shows that on a StrongARM, it appears to be faster
11 * to use the "invalidate whole tlb" rather than "invalidate single
12 * tlb" for this.
13 *
14 * This appears true for both the process fork+exit case, as well as
15 * the munmap-large-area case.
16 */
17#ifndef __ASMARM_TLB_H
18#define __ASMARM_TLB_H
19
20#include <asm/cacheflush.h>
21#include <asm/tlbflush.h>
Hyok S. Choi01579032006-02-24 21:41:25 +000022
23#ifndef CONFIG_MMU
24
25#include <linux/pagemap.h>
26#include <asm-generic/tlb.h>
27
28#else /* !CONFIG_MMU */
29
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <asm/pgalloc.h>
31
32/*
33 * TLB handling. This allows us to remove pages from the page
34 * tables, and efficiently handle the TLB issues.
35 */
36struct mmu_gather {
37 struct mm_struct *mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 unsigned int fullmm;
Linus Torvalds1da177e2005-04-16 15:20:36 -070039};
40
41DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
42
43static inline struct mmu_gather *
44tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
45{
Hugh Dickins15a23ff2005-10-29 18:16:01 -070046 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
48 tlb->mm = mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 tlb->fullmm = full_mm_flush;
50
51 return tlb;
52}
53
54static inline void
55tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
56{
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 if (tlb->fullmm)
Hugh Dickinsfc2acab2005-10-29 18:16:03 -070058 flush_tlb_mm(tlb->mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
60 /* keep the page table cache within bounds */
61 check_pgt_cache();
Hugh Dickins15a23ff2005-10-29 18:16:01 -070062
63 put_cpu_var(mmu_gathers);
Linus Torvalds1da177e2005-04-16 15:20:36 -070064}
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#define tlb_remove_tlb_entry(tlb,ptep,address) do { } while (0)
67
68/*
69 * In the case of tlb vma handling, we can optimise these away in the
70 * case where we're doing a full MM flush. When we're doing a munmap,
71 * the vmas are adjusted to only cover the region to be torn down.
72 */
73static inline void
74tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
75{
76 if (!tlb->fullmm)
77 flush_cache_range(vma, vma->vm_start, vma->vm_end);
78}
79
80static inline void
81tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
82{
83 if (!tlb->fullmm)
84 flush_tlb_range(vma, vma->vm_start, vma->vm_end);
85}
86
87#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -080088#define pte_free_tlb(tlb, ptep) pte_free((tlb)->mm, ptep)
89#define pmd_free_tlb(tlb, pmdp) pmd_free((tlb)->mm, pmdp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
91#define tlb_migrate_finish(mm) do { } while (0)
92
Hyok S. Choi01579032006-02-24 21:41:25 +000093#endif /* CONFIG_MMU */
Linus Torvalds1da177e2005-04-16 15:20:36 -070094#endif