Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ASM_IA64_TLB_H |
| 2 | #define _ASM_IA64_TLB_H |
| 3 | /* |
| 4 | * Based on <asm-generic/tlb.h>. |
| 5 | * |
| 6 | * Copyright (C) 2002-2003 Hewlett-Packard Co |
| 7 | * David Mosberger-Tang <davidm@hpl.hp.com> |
| 8 | */ |
| 9 | /* |
| 10 | * Removing a translation from a page table (including TLB-shootdown) is a four-step |
| 11 | * procedure: |
| 12 | * |
| 13 | * (1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory |
| 14 | * (this is a no-op on ia64). |
| 15 | * (2) Clear the relevant portions of the page-table |
| 16 | * (3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs |
| 17 | * (4) Release the pages that were freed up in step (2). |
| 18 | * |
| 19 | * Note that the ordering of these steps is crucial to avoid races on MP machines. |
| 20 | * |
| 21 | * The Linux kernel defines several platform-specific hooks for TLB-shootdown. When |
| 22 | * unmapping a portion of the virtual address space, these hooks are called according to |
| 23 | * the following template: |
| 24 | * |
| 25 | * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM |
| 26 | * { |
| 27 | * for each vma that needs a shootdown do { |
| 28 | * tlb_start_vma(tlb, vma); |
| 29 | * for each page-table-entry PTE that needs to be removed do { |
| 30 | * tlb_remove_tlb_entry(tlb, pte, address); |
| 31 | * if (pte refers to a normal page) { |
| 32 | * tlb_remove_page(tlb, page); |
| 33 | * } |
| 34 | * } |
| 35 | * tlb_end_vma(tlb, vma); |
| 36 | * } |
| 37 | * } |
| 38 | * tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM |
| 39 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | #include <linux/mm.h> |
| 41 | #include <linux/pagemap.h> |
| 42 | #include <linux/swap.h> |
| 43 | |
| 44 | #include <asm/pgalloc.h> |
| 45 | #include <asm/processor.h> |
| 46 | #include <asm/tlbflush.h> |
| 47 | #include <asm/machvec.h> |
| 48 | |
| 49 | #ifdef CONFIG_SMP |
| 50 | # define FREE_PTE_NR 2048 |
| 51 | # define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) |
| 52 | #else |
| 53 | # define FREE_PTE_NR 0 |
| 54 | # define tlb_fast_mode(tlb) (1) |
| 55 | #endif |
| 56 | |
| 57 | struct mmu_gather { |
| 58 | struct mm_struct *mm; |
| 59 | unsigned int nr; /* == ~0U => fast mode */ |
| 60 | unsigned char fullmm; /* non-zero means full mm flush */ |
| 61 | unsigned char need_flush; /* really unmapped some PTEs? */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | unsigned long start_addr; |
| 63 | unsigned long end_addr; |
| 64 | struct page *pages[FREE_PTE_NR]; |
| 65 | }; |
| 66 | |
Xiantao Zhang | 9665189 | 2008-04-03 11:02:58 -0700 | [diff] [blame] | 67 | struct ia64_tr_entry { |
| 68 | u64 ifa; |
| 69 | u64 itir; |
| 70 | u64 pte; |
| 71 | u64 rr; |
| 72 | }; /*Record for tr entry!*/ |
| 73 | |
| 74 | extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size); |
| 75 | extern void ia64_ptr_entry(u64 target_mask, int slot); |
| 76 | |
| 77 | extern struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX]; |
| 78 | |
| 79 | /* |
| 80 | region register macros |
| 81 | */ |
| 82 | #define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001) |
| 83 | #define RR_VE(val) (((val) & 0x0000000000000001) << 0) |
| 84 | #define RR_VE_MASK 0x0000000000000001L |
| 85 | #define RR_VE_SHIFT 0 |
| 86 | #define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f) |
| 87 | #define RR_PS(val) (((val) & 0x000000000000003f) << 2) |
| 88 | #define RR_PS_MASK 0x00000000000000fcL |
| 89 | #define RR_PS_SHIFT 2 |
| 90 | #define RR_RID_MASK 0x00000000ffffff00L |
| 91 | #define RR_TO_RID(val) ((val >> 8) & 0xffffff) |
| 92 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | /* Users of the generic TLB shootdown code must declare this storage space. */ |
| 94 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); |
| 95 | |
| 96 | /* |
| 97 | * Flush the TLB for address range START to END and, if not in fast mode, release the |
| 98 | * freed pages that where gathered up to this point. |
| 99 | */ |
| 100 | static inline void |
| 101 | ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) |
| 102 | { |
| 103 | unsigned int nr; |
| 104 | |
| 105 | if (!tlb->need_flush) |
| 106 | return; |
| 107 | tlb->need_flush = 0; |
| 108 | |
| 109 | if (tlb->fullmm) { |
| 110 | /* |
| 111 | * Tearing down the entire address space. This happens both as a result |
| 112 | * of exit() and execve(). The latter case necessitates the call to |
| 113 | * flush_tlb_mm() here. |
| 114 | */ |
| 115 | flush_tlb_mm(tlb->mm); |
| 116 | } else if (unlikely (end - start >= 1024*1024*1024*1024UL |
| 117 | || REGION_NUMBER(start) != REGION_NUMBER(end - 1))) |
| 118 | { |
| 119 | /* |
| 120 | * If we flush more than a tera-byte or across regions, we're probably |
| 121 | * better off just flushing the entire TLB(s). This should be very rare |
| 122 | * and is not worth optimizing for. |
| 123 | */ |
| 124 | flush_tlb_all(); |
| 125 | } else { |
| 126 | /* |
| 127 | * XXX fix me: flush_tlb_range() should take an mm pointer instead of a |
| 128 | * vma pointer. |
| 129 | */ |
| 130 | struct vm_area_struct vma; |
| 131 | |
| 132 | vma.vm_mm = tlb->mm; |
| 133 | /* flush the address range from the tlb: */ |
| 134 | flush_tlb_range(&vma, start, end); |
| 135 | /* now flush the virt. page-table area mapping the address range: */ |
| 136 | flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end)); |
| 137 | } |
| 138 | |
| 139 | /* lastly, release the freed pages */ |
| 140 | nr = tlb->nr; |
| 141 | if (!tlb_fast_mode(tlb)) { |
| 142 | unsigned long i; |
| 143 | tlb->nr = 0; |
| 144 | tlb->start_addr = ~0UL; |
| 145 | for (i = 0; i < nr; ++i) |
| 146 | free_page_and_swap_cache(tlb->pages[i]); |
| 147 | } |
| 148 | } |
| 149 | |
| 150 | /* |
| 151 | * Return a pointer to an initialized struct mmu_gather. |
| 152 | */ |
| 153 | static inline struct mmu_gather * |
| 154 | tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush) |
| 155 | { |
Hugh Dickins | 15a23ff | 2005-10-29 18:16:01 -0700 | [diff] [blame] | 156 | struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | |
| 158 | tlb->mm = mm; |
| 159 | /* |
| 160 | * Use fast mode if only 1 CPU is online. |
| 161 | * |
| 162 | * It would be tempting to turn on fast-mode for full_mm_flush as well. But this |
| 163 | * doesn't work because of speculative accesses and software prefetching: the page |
| 164 | * table of "mm" may (and usually is) the currently active page table and even |
| 165 | * though the kernel won't do any user-space accesses during the TLB shoot down, a |
| 166 | * compiler might use speculation or lfetch.fault on what happens to be a valid |
| 167 | * user-space address. This in turn could trigger a TLB miss fault (or a VHPT |
| 168 | * walk) and re-insert a TLB entry we just removed. Slow mode avoids such |
| 169 | * problems. (We could make fast-mode work by switching the current task to a |
| 170 | * different "mm" during the shootdown.) --davidm 08/02/2002 |
| 171 | */ |
| 172 | tlb->nr = (num_online_cpus() == 1) ? ~0U : 0; |
| 173 | tlb->fullmm = full_mm_flush; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | tlb->start_addr = ~0UL; |
| 175 | return tlb; |
| 176 | } |
| 177 | |
| 178 | /* |
| 179 | * Called at the end of the shootdown operation to free up any resources that were |
Hugh Dickins | 15a23ff | 2005-10-29 18:16:01 -0700 | [diff] [blame] | 180 | * collected. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | */ |
| 182 | static inline void |
| 183 | tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) |
| 184 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | /* |
| 186 | * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and |
| 187 | * tlb->end_addr. |
| 188 | */ |
| 189 | ia64_tlb_flush_mmu(tlb, start, end); |
| 190 | |
| 191 | /* keep the page table cache within bounds */ |
| 192 | check_pgt_cache(); |
Hugh Dickins | 15a23ff | 2005-10-29 18:16:01 -0700 | [diff] [blame] | 193 | |
| 194 | put_cpu_var(mmu_gathers); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | } |
| 196 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | /* |
| 198 | * Logically, this routine frees PAGE. On MP machines, the actual freeing of the page |
| 199 | * must be delayed until after the TLB has been flushed (see comments at the beginning of |
| 200 | * this file). |
| 201 | */ |
| 202 | static inline void |
| 203 | tlb_remove_page (struct mmu_gather *tlb, struct page *page) |
| 204 | { |
| 205 | tlb->need_flush = 1; |
| 206 | |
| 207 | if (tlb_fast_mode(tlb)) { |
| 208 | free_page_and_swap_cache(page); |
| 209 | return; |
| 210 | } |
| 211 | tlb->pages[tlb->nr++] = page; |
| 212 | if (tlb->nr >= FREE_PTE_NR) |
| 213 | ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr); |
| 214 | } |
| 215 | |
| 216 | /* |
| 217 | * Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any |
| 218 | * PTE, not just those pointing to (normal) physical memory. |
| 219 | */ |
| 220 | static inline void |
| 221 | __tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address) |
| 222 | { |
| 223 | if (tlb->start_addr == ~0UL) |
| 224 | tlb->start_addr = address; |
| 225 | tlb->end_addr = address + PAGE_SIZE; |
| 226 | } |
| 227 | |
| 228 | #define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm) |
| 229 | |
| 230 | #define tlb_start_vma(tlb, vma) do { } while (0) |
| 231 | #define tlb_end_vma(tlb, vma) do { } while (0) |
| 232 | |
| 233 | #define tlb_remove_tlb_entry(tlb, ptep, addr) \ |
| 234 | do { \ |
| 235 | tlb->need_flush = 1; \ |
| 236 | __tlb_remove_tlb_entry(tlb, ptep, addr); \ |
| 237 | } while (0) |
| 238 | |
| 239 | #define pte_free_tlb(tlb, ptep) \ |
| 240 | do { \ |
| 241 | tlb->need_flush = 1; \ |
| 242 | __pte_free_tlb(tlb, ptep); \ |
| 243 | } while (0) |
| 244 | |
| 245 | #define pmd_free_tlb(tlb, ptep) \ |
| 246 | do { \ |
| 247 | tlb->need_flush = 1; \ |
| 248 | __pmd_free_tlb(tlb, ptep); \ |
| 249 | } while (0) |
| 250 | |
| 251 | #define pud_free_tlb(tlb, pudp) \ |
| 252 | do { \ |
| 253 | tlb->need_flush = 1; \ |
| 254 | __pud_free_tlb(tlb, pudp); \ |
| 255 | } while (0) |
| 256 | |
| 257 | #endif /* _ASM_IA64_TLB_H */ |