Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* arch/sparc64/mm/tlb.c |
| 2 | * |
| 3 | * Copyright (C) 2004 David S. Miller <davem@redhat.com> |
| 4 | */ |
| 5 | |
| 6 | #include <linux/kernel.h> |
| 7 | #include <linux/init.h> |
| 8 | #include <linux/percpu.h> |
| 9 | #include <linux/mm.h> |
| 10 | #include <linux/swap.h> |
David S. Miller | c9f2946 | 2006-04-30 22:54:27 -0700 | [diff] [blame] | 11 | #include <linux/preempt.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | |
| 13 | #include <asm/pgtable.h> |
| 14 | #include <asm/pgalloc.h> |
| 15 | #include <asm/tlbflush.h> |
| 16 | #include <asm/cacheflush.h> |
| 17 | #include <asm/mmu_context.h> |
| 18 | #include <asm/tlb.h> |
| 19 | |
| 20 | /* Heavily inspired by the ppc64 code. */ |
| 21 | |
Peter Zijlstra | 90f08e3 | 2011-05-24 17:11:50 -0700 | [diff] [blame] | 22 | static DEFINE_PER_CPU(struct tlb_batch, tlb_batch); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
| 24 | void flush_tlb_pending(void) |
| 25 | { |
Peter Zijlstra | 90f08e3 | 2011-05-24 17:11:50 -0700 | [diff] [blame] | 26 | struct tlb_batch *tb = &get_cpu_var(tlb_batch); |
David S. Miller | f36391d | 2013-04-19 17:26:26 -0400 | [diff] [blame] | 27 | struct mm_struct *mm = tb->mm; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | |
David S. Miller | f36391d | 2013-04-19 17:26:26 -0400 | [diff] [blame] | 29 | if (!tb->tlb_nr) |
| 30 | goto out; |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 31 | |
David S. Miller | f36391d | 2013-04-19 17:26:26 -0400 | [diff] [blame] | 32 | flush_tsb_user(tb); |
| 33 | |
| 34 | if (CTX_VALID(mm->context)) { |
| 35 | if (tb->tlb_nr == 1) { |
| 36 | global_flush_tlb_page(mm, tb->vaddrs[0]); |
| 37 | } else { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | #ifdef CONFIG_SMP |
Peter Zijlstra | 90f08e3 | 2011-05-24 17:11:50 -0700 | [diff] [blame] | 39 | smp_flush_tlb_pending(tb->mm, tb->tlb_nr, |
| 40 | &tb->vaddrs[0]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | #else |
Peter Zijlstra | 90f08e3 | 2011-05-24 17:11:50 -0700 | [diff] [blame] | 42 | __flush_tlb_pending(CTX_HWBITS(tb->mm->context), |
| 43 | tb->tlb_nr, &tb->vaddrs[0]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | #endif |
| 45 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | } |
David S. Miller | c9f2946 | 2006-04-30 22:54:27 -0700 | [diff] [blame] | 47 | |
David S. Miller | f36391d | 2013-04-19 17:26:26 -0400 | [diff] [blame] | 48 | tb->tlb_nr = 0; |
| 49 | |
| 50 | out: |
Peter Zijlstra | 90f08e3 | 2011-05-24 17:11:50 -0700 | [diff] [blame] | 51 | put_cpu_var(tlb_batch); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | } |
| 53 | |
David S. Miller | f36391d | 2013-04-19 17:26:26 -0400 | [diff] [blame] | 54 | void arch_enter_lazy_mmu_mode(void) |
| 55 | { |
| 56 | struct tlb_batch *tb = &__get_cpu_var(tlb_batch); |
| 57 | |
| 58 | tb->active = 1; |
| 59 | } |
| 60 | |
| 61 | void arch_leave_lazy_mmu_mode(void) |
| 62 | { |
| 63 | struct tlb_batch *tb = &__get_cpu_var(tlb_batch); |
| 64 | |
| 65 | if (tb->tlb_nr) |
| 66 | flush_tlb_pending(); |
| 67 | tb->active = 0; |
| 68 | } |
| 69 | |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 70 | static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, |
| 71 | bool exec) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | { |
Peter Zijlstra | 90f08e3 | 2011-05-24 17:11:50 -0700 | [diff] [blame] | 73 | struct tlb_batch *tb = &get_cpu_var(tlb_batch); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | unsigned long nr; |
| 75 | |
| 76 | vaddr &= PAGE_MASK; |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 77 | if (exec) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | vaddr |= 0x1UL; |
| 79 | |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 80 | nr = tb->tlb_nr; |
| 81 | |
| 82 | if (unlikely(nr != 0 && mm != tb->mm)) { |
| 83 | flush_tlb_pending(); |
| 84 | nr = 0; |
| 85 | } |
| 86 | |
David S. Miller | f36391d | 2013-04-19 17:26:26 -0400 | [diff] [blame] | 87 | if (!tb->active) { |
David S. Miller | f36391d | 2013-04-19 17:26:26 -0400 | [diff] [blame] | 88 | flush_tsb_user_page(mm, vaddr); |
Dave Kleikamp | 23a0113 | 2013-06-18 09:05:36 -0500 | [diff] [blame] | 89 | global_flush_tlb_page(mm, vaddr); |
David S. Miller | f0af970 | 2013-04-24 16:52:18 -0700 | [diff] [blame] | 90 | goto out; |
David S. Miller | f36391d | 2013-04-19 17:26:26 -0400 | [diff] [blame] | 91 | } |
| 92 | |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 93 | if (nr == 0) |
| 94 | tb->mm = mm; |
| 95 | |
| 96 | tb->vaddrs[nr] = vaddr; |
| 97 | tb->tlb_nr = ++nr; |
| 98 | if (nr >= TLB_BATCH_NR) |
| 99 | flush_tlb_pending(); |
| 100 | |
David S. Miller | f0af970 | 2013-04-24 16:52:18 -0700 | [diff] [blame] | 101 | out: |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 102 | put_cpu_var(tlb_batch); |
| 103 | } |
| 104 | |
| 105 | void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, |
| 106 | pte_t *ptep, pte_t orig, int fullmm) |
| 107 | { |
David S. Miller | 7a591cf | 2006-02-26 19:44:50 -0800 | [diff] [blame] | 108 | if (tlb_type != hypervisor && |
| 109 | pte_dirty(orig)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | unsigned long paddr, pfn = pte_pfn(orig); |
| 111 | struct address_space *mapping; |
| 112 | struct page *page; |
| 113 | |
| 114 | if (!pfn_valid(pfn)) |
| 115 | goto no_cache_flush; |
| 116 | |
| 117 | page = pfn_to_page(pfn); |
| 118 | if (PageReserved(page)) |
| 119 | goto no_cache_flush; |
| 120 | |
| 121 | /* A real file page? */ |
| 122 | mapping = page_mapping(page); |
| 123 | if (!mapping) |
| 124 | goto no_cache_flush; |
| 125 | |
| 126 | paddr = (unsigned long) page_address(page); |
| 127 | if ((paddr ^ vaddr) & (1 << 13)) |
| 128 | flush_dcache_page_all(mm, page); |
| 129 | } |
| 130 | |
| 131 | no_cache_flush: |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 132 | if (!fullmm) |
| 133 | tlb_batch_add_one(mm, vaddr, pte_exec(orig)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | } |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 135 | |
| 136 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 137 | static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, |
| 138 | pmd_t pmd, bool exec) |
| 139 | { |
| 140 | unsigned long end; |
| 141 | pte_t *pte; |
| 142 | |
| 143 | pte = pte_offset_map(&pmd, vaddr); |
| 144 | end = vaddr + HPAGE_SIZE; |
| 145 | while (vaddr < end) { |
| 146 | if (pte_val(*pte) & _PAGE_VALID) |
| 147 | tlb_batch_add_one(mm, vaddr, exec); |
| 148 | pte++; |
| 149 | vaddr += PAGE_SIZE; |
| 150 | } |
| 151 | pte_unmap(pte); |
| 152 | } |
| 153 | |
| 154 | void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
| 155 | pmd_t *pmdp, pmd_t pmd) |
| 156 | { |
| 157 | pmd_t orig = *pmdp; |
| 158 | |
| 159 | *pmdp = pmd; |
| 160 | |
| 161 | if (mm == &init_mm) |
| 162 | return; |
| 163 | |
David S. Miller | a7b9403 | 2013-09-26 13:45:15 -0700 | [diff] [blame] | 164 | if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) { |
| 165 | if (pmd_val(pmd) & _PAGE_PMD_HUGE) |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 166 | mm->context.huge_pte_count++; |
| 167 | else |
| 168 | mm->context.huge_pte_count--; |
David S. Miller | 0fbebed | 2013-02-19 22:34:10 -0800 | [diff] [blame] | 169 | |
| 170 | /* Do not try to allocate the TSB hash table if we |
| 171 | * don't have one already. We have various locks held |
| 172 | * and thus we'll end up doing a GFP_KERNEL allocation |
| 173 | * in an atomic context. |
| 174 | * |
| 175 | * Instead, we let the first TLB miss on a hugepage |
| 176 | * take care of this. |
| 177 | */ |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 178 | } |
| 179 | |
| 180 | if (!pmd_none(orig)) { |
David S. Miller | a7b9403 | 2013-09-26 13:45:15 -0700 | [diff] [blame] | 181 | pte_t orig_pte = __pte(pmd_val(orig)); |
| 182 | bool exec = pte_exec(orig_pte); |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 183 | |
| 184 | addr &= HPAGE_MASK; |
David S. Miller | a7b9403 | 2013-09-26 13:45:15 -0700 | [diff] [blame] | 185 | if (pmd_trans_huge(orig)) { |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 186 | tlb_batch_add_one(mm, addr, exec); |
David S. Miller | 37b3a8f | 2013-09-25 13:48:49 -0700 | [diff] [blame] | 187 | tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec); |
| 188 | } else { |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 189 | tlb_batch_pmd_scan(mm, addr, orig, exec); |
David S. Miller | 37b3a8f | 2013-09-25 13:48:49 -0700 | [diff] [blame] | 190 | } |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 191 | } |
| 192 | } |
| 193 | |
Aneesh Kumar K.V | 6b0b50b | 2013-06-05 17:14:02 -0700 | [diff] [blame] | 194 | void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, |
| 195 | pgtable_t pgtable) |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 196 | { |
| 197 | struct list_head *lh = (struct list_head *) pgtable; |
| 198 | |
| 199 | assert_spin_locked(&mm->page_table_lock); |
| 200 | |
| 201 | /* FIFO */ |
Kirill A. Shutemov | c389a25 | 2013-11-14 14:30:59 -0800 | [diff] [blame] | 202 | if (!pmd_huge_pte(mm, pmdp)) |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 203 | INIT_LIST_HEAD(lh); |
| 204 | else |
Kirill A. Shutemov | c389a25 | 2013-11-14 14:30:59 -0800 | [diff] [blame] | 205 | list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); |
| 206 | pmd_huge_pte(mm, pmdp) = pgtable; |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 207 | } |
| 208 | |
Aneesh Kumar K.V | 6b0b50b | 2013-06-05 17:14:02 -0700 | [diff] [blame] | 209 | pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 210 | { |
| 211 | struct list_head *lh; |
| 212 | pgtable_t pgtable; |
| 213 | |
| 214 | assert_spin_locked(&mm->page_table_lock); |
| 215 | |
| 216 | /* FIFO */ |
Kirill A. Shutemov | c389a25 | 2013-11-14 14:30:59 -0800 | [diff] [blame] | 217 | pgtable = pmd_huge_pte(mm, pmdp); |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 218 | lh = (struct list_head *) pgtable; |
| 219 | if (list_empty(lh)) |
Kirill A. Shutemov | c389a25 | 2013-11-14 14:30:59 -0800 | [diff] [blame] | 220 | pmd_huge_pte(mm, pmdp) = NULL; |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 221 | else { |
Kirill A. Shutemov | c389a25 | 2013-11-14 14:30:59 -0800 | [diff] [blame] | 222 | pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; |
David Miller | 9e695d2 | 2012-10-08 16:34:29 -0700 | [diff] [blame] | 223 | list_del(lh); |
| 224 | } |
| 225 | pte_val(pgtable[0]) = 0; |
| 226 | pte_val(pgtable[1]) = 0; |
| 227 | |
| 228 | return pgtable; |
| 229 | } |
| 230 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |