Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* asm-generic/tlb.h |
| 2 | * |
| 3 | * Generic TLB shootdown code |
| 4 | * |
| 5 | * Copyright 2001 Red Hat, Inc. |
| 6 | * Based on code from mm/memory.c Copyright Linus Torvalds and others. |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or |
| 9 | * modify it under the terms of the GNU General Public License |
| 10 | * as published by the Free Software Foundation; either version |
| 11 | * 2 of the License, or (at your option) any later version. |
| 12 | */ |
| 13 | #ifndef _ASM_GENERIC__TLB_H |
| 14 | #define _ASM_GENERIC__TLB_H |
| 15 | |
| 16 | #include <linux/config.h> |
| 17 | #include <linux/swap.h> |
| 18 | #include <asm/pgalloc.h> |
| 19 | #include <asm/tlbflush.h> |
| 20 | |
| 21 | /* |
| 22 | * For UP we don't need to worry about TLB flush |
| 23 | * and page free order so much.. |
| 24 | */ |
| 25 | #ifdef CONFIG_SMP |
| 26 | #define FREE_PTE_NR 506 |
| 27 | #define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) |
| 28 | #else |
| 29 | #define FREE_PTE_NR 1 |
| 30 | #define tlb_fast_mode(tlb) 1 |
| 31 | #endif |
| 32 | |
| 33 | /* struct mmu_gather is an opaque type used by the mm code for passing around |
| 34 | * any data needed by arch specific code for tlb_remove_page. This structure |
| 35 | * can be per-CPU or per-MM as the page table lock is held for the duration of |
| 36 | * TLB shootdown. |
| 37 | */ |
| 38 | struct mmu_gather { |
| 39 | struct mm_struct *mm; |
| 40 | unsigned int nr; /* set to ~0U means fast mode */ |
| 41 | unsigned int need_flush;/* Really unmapped some ptes? */ |
| 42 | unsigned int fullmm; /* non-zero means full mm flush */ |
| 43 | unsigned long freed; |
| 44 | struct page * pages[FREE_PTE_NR]; |
| 45 | }; |
| 46 | |
| 47 | /* Users of the generic TLB shootdown code must declare this storage space. */ |
| 48 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); |
| 49 | |
| 50 | /* tlb_gather_mmu |
| 51 | * Return a pointer to an initialized struct mmu_gather. |
| 52 | */ |
| 53 | static inline struct mmu_gather * |
| 54 | tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) |
| 55 | { |
| 56 | struct mmu_gather *tlb = &per_cpu(mmu_gathers, smp_processor_id()); |
| 57 | |
| 58 | tlb->mm = mm; |
| 59 | |
| 60 | /* Use fast mode if only one CPU is online */ |
| 61 | tlb->nr = num_online_cpus() > 1 ? 0U : ~0U; |
| 62 | |
| 63 | tlb->fullmm = full_mm_flush; |
| 64 | tlb->freed = 0; |
| 65 | |
| 66 | return tlb; |
| 67 | } |
| 68 | |
| 69 | static inline void |
| 70 | tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) |
| 71 | { |
| 72 | if (!tlb->need_flush) |
| 73 | return; |
| 74 | tlb->need_flush = 0; |
| 75 | tlb_flush(tlb); |
| 76 | if (!tlb_fast_mode(tlb)) { |
| 77 | free_pages_and_swap_cache(tlb->pages, tlb->nr); |
| 78 | tlb->nr = 0; |
| 79 | } |
| 80 | } |
| 81 | |
| 82 | /* tlb_finish_mmu |
| 83 | * Called at the end of the shootdown operation to free up any resources |
| 84 | * that were required. The page table lock is still held at this point. |
| 85 | */ |
| 86 | static inline void |
| 87 | tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) |
| 88 | { |
| 89 | int freed = tlb->freed; |
| 90 | struct mm_struct *mm = tlb->mm; |
| 91 | int rss = get_mm_counter(mm, rss); |
| 92 | |
| 93 | if (rss < freed) |
| 94 | freed = rss; |
| 95 | add_mm_counter(mm, rss, -freed); |
| 96 | tlb_flush_mmu(tlb, start, end); |
| 97 | |
| 98 | /* keep the page table cache within bounds */ |
| 99 | check_pgt_cache(); |
| 100 | } |
| 101 | |
| 102 | static inline unsigned int |
| 103 | tlb_is_full_mm(struct mmu_gather *tlb) |
| 104 | { |
| 105 | return tlb->fullmm; |
| 106 | } |
| 107 | |
| 108 | /* tlb_remove_page |
| 109 | * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while |
| 110 | * handling the additional races in SMP caused by other CPUs caching valid |
| 111 | * mappings in their TLBs. |
| 112 | */ |
| 113 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
| 114 | { |
| 115 | tlb->need_flush = 1; |
| 116 | if (tlb_fast_mode(tlb)) { |
| 117 | free_page_and_swap_cache(page); |
| 118 | return; |
| 119 | } |
| 120 | tlb->pages[tlb->nr++] = page; |
| 121 | if (tlb->nr >= FREE_PTE_NR) |
| 122 | tlb_flush_mmu(tlb, 0, 0); |
| 123 | } |
| 124 | |
| 125 | /** |
| 126 | * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. |
| 127 | * |
| 128 | * Record the fact that pte's were really umapped in ->need_flush, so we can |
| 129 | * later optimise away the tlb invalidate. This helps when userspace is |
| 130 | * unmapping already-unmapped pages, which happens quite a lot. |
| 131 | */ |
| 132 | #define tlb_remove_tlb_entry(tlb, ptep, address) \ |
| 133 | do { \ |
| 134 | tlb->need_flush = 1; \ |
| 135 | __tlb_remove_tlb_entry(tlb, ptep, address); \ |
| 136 | } while (0) |
| 137 | |
| 138 | #define pte_free_tlb(tlb, ptep) \ |
| 139 | do { \ |
| 140 | tlb->need_flush = 1; \ |
| 141 | __pte_free_tlb(tlb, ptep); \ |
| 142 | } while (0) |
| 143 | |
| 144 | #ifndef __ARCH_HAS_4LEVEL_HACK |
| 145 | #define pud_free_tlb(tlb, pudp) \ |
| 146 | do { \ |
| 147 | tlb->need_flush = 1; \ |
| 148 | __pud_free_tlb(tlb, pudp); \ |
| 149 | } while (0) |
| 150 | #endif |
| 151 | |
| 152 | #define pmd_free_tlb(tlb, pmdp) \ |
| 153 | do { \ |
| 154 | tlb->need_flush = 1; \ |
| 155 | __pmd_free_tlb(tlb, pmdp); \ |
| 156 | } while (0) |
| 157 | |
| 158 | #define tlb_migrate_finish(mm) do {} while (0) |
| 159 | |
| 160 | #endif /* _ASM_GENERIC__TLB_H */ |