Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * This file contains the routines for flushing entries from the |
| 3 | * TLB and MMU hash table. |
| 4 | * |
| 5 | * Derived from arch/ppc64/mm/init.c: |
| 6 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
| 7 | * |
| 8 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) |
| 9 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) |
| 10 | * Copyright (C) 1996 Paul Mackerras |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | * |
| 12 | * Derived from "arch/i386/mm/init.c" |
| 13 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
| 14 | * |
| 15 | * Dave Engebretsen <engebret@us.ibm.com> |
| 16 | * Rework for PPC64 port. |
| 17 | * |
| 18 | * This program is free software; you can redistribute it and/or |
| 19 | * modify it under the terms of the GNU General Public License |
| 20 | * as published by the Free Software Foundation; either version |
| 21 | * 2 of the License, or (at your option) any later version. |
| 22 | */ |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 23 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #include <linux/kernel.h> |
| 25 | #include <linux/mm.h> |
| 26 | #include <linux/init.h> |
| 27 | #include <linux/percpu.h> |
| 28 | #include <linux/hardirq.h> |
| 29 | #include <asm/pgalloc.h> |
| 30 | #include <asm/tlbflush.h> |
| 31 | #include <asm/tlb.h> |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 32 | #include <asm/bug.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | |
| 34 | DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); |
| 35 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | /* |
Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 37 | * A linux PTE was changed and the corresponding hash table entry |
| 38 | * neesd to be flushed. This function will either perform the flush |
| 39 | * immediately or will batch it up if the current CPU has an active |
| 40 | * batch on it. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | */ |
Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 42 | void hpte_need_flush(struct mm_struct *mm, unsigned long addr, |
| 43 | pte_t *ptep, unsigned long pte, int huge) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | { |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 45 | unsigned long vpn; |
Peter Zijlstra | f342552 | 2011-02-24 10:47:32 +0000 | [diff] [blame] | 46 | struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 47 | unsigned long vsid; |
Paul Mackerras | bf72aeb | 2006-06-15 10:45:18 +1000 | [diff] [blame] | 48 | unsigned int psize; |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 49 | int ssize; |
Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 50 | real_pte_t rpte; |
Benjamin Herrenschmidt | 61b1a94 | 2005-09-20 13:52:50 +1000 | [diff] [blame] | 51 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | i = batch->index; |
| 54 | |
Benjamin Herrenschmidt | 16c2d47 | 2007-05-08 16:27:28 +1000 | [diff] [blame] | 55 | /* Get page size (maybe move back to caller). |
| 56 | * |
| 57 | * NOTE: when using special 64K mappings in 4K environment like |
| 58 | * for SPEs, we obtain the page size from the slice, which thus |
| 59 | * must still exist (and thus the VMA not reused) at the time |
| 60 | * of this call |
| 61 | */ |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 62 | if (huge) { |
| 63 | #ifdef CONFIG_HUGETLB_PAGE |
Joe Perches | d258e64 | 2009-06-28 06:26:10 +0000 | [diff] [blame] | 64 | psize = get_slice_psize(mm, addr); |
David Gibson | 77058e1 | 2010-02-08 20:09:03 +0000 | [diff] [blame] | 65 | /* Mask the address for the correct page size */ |
| 66 | addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1); |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 67 | #else |
| 68 | BUG(); |
Benjamin Herrenschmidt | 16c2d47 | 2007-05-08 16:27:28 +1000 | [diff] [blame] | 69 | psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 70 | #endif |
David Gibson | 77058e1 | 2010-02-08 20:09:03 +0000 | [diff] [blame] | 71 | } else { |
Benjamin Herrenschmidt | 16c2d47 | 2007-05-08 16:27:28 +1000 | [diff] [blame] | 72 | psize = pte_pagesize_index(mm, addr, pte); |
David Gibson | 77058e1 | 2010-02-08 20:09:03 +0000 | [diff] [blame] | 73 | /* Mask the address for the standard page size. If we |
| 74 | * have a 64k page kernel, but the hardware does not |
| 75 | * support 64k pages, this might be different from the |
| 76 | * hardware page size encoded in the slice table. */ |
| 77 | addr &= PAGE_MASK; |
| 78 | } |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 79 | |
David Gibson | f71dc17 | 2009-10-26 19:24:31 +0000 | [diff] [blame] | 80 | |
Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 81 | /* Build full vaddr */ |
| 82 | if (!is_kernel_addr(addr)) { |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 83 | ssize = user_segment_size(addr); |
| 84 | vsid = get_vsid(mm->context.id, addr, ssize); |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 85 | } else { |
| 86 | vsid = get_kernel_vsid(addr, mmu_kernel_ssize); |
| 87 | ssize = mmu_kernel_ssize; |
| 88 | } |
Aneesh Kumar K.V | c60ac56 | 2013-03-13 03:34:54 +0000 | [diff] [blame] | 89 | WARN_ON(vsid == 0); |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 90 | vpn = hpt_vpn(addr, vsid, ssize); |
Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 91 | rpte = __real_pte(__pte(pte), ptep); |
| 92 | |
| 93 | /* |
| 94 | * Check if we have an active batch on this CPU. If not, just |
| 95 | * flush now and return. For now, we don global invalidates |
| 96 | * in that case, might be worth testing the mm cpu mask though |
| 97 | * and decide to use local invalidates instead... |
| 98 | */ |
| 99 | if (!batch->active) { |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 100 | flush_hash_page(vpn, rpte, psize, ssize, 0); |
Peter Zijlstra | f342552 | 2011-02-24 10:47:32 +0000 | [diff] [blame] | 101 | put_cpu_var(ppc64_tlb_batch); |
Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 102 | return; |
| 103 | } |
| 104 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | /* |
| 106 | * This can happen when we are in the middle of a TLB batch and |
| 107 | * we encounter memory pressure (eg copy_page_range when it tries |
| 108 | * to allocate a new pte). If we have to reclaim memory and end |
| 109 | * up scanning and resetting referenced bits then our batch context |
| 110 | * will change mid stream. |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 111 | * |
| 112 | * We also need to ensure only one page size is present in a given |
| 113 | * batch |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | */ |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 115 | if (i != 0 && (mm != batch->mm || batch->psize != psize || |
| 116 | batch->ssize != ssize)) { |
Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 117 | __flush_tlb_pending(batch); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | i = 0; |
| 119 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | if (i == 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | batch->mm = mm; |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 122 | batch->psize = psize; |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 123 | batch->ssize = ssize; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | } |
Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 125 | batch->pte[i] = rpte; |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 126 | batch->vpn[i] = vpn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | batch->index = ++i; |
| 128 | if (i >= PPC64_TLB_BATCH_NR) |
Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 129 | __flush_tlb_pending(batch); |
Peter Zijlstra | f342552 | 2011-02-24 10:47:32 +0000 | [diff] [blame] | 130 | put_cpu_var(ppc64_tlb_batch); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | } |
| 132 | |
Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 133 | /* |
| 134 | * This function is called when terminating an mmu batch or when a batch |
| 135 | * is full. It will perform the flush of all the entries currently stored |
| 136 | * in a batch. |
| 137 | * |
| 138 | * Must be called from within some kind of spinlock/non-preempt region... |
| 139 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | void __flush_tlb_pending(struct ppc64_tlb_batch *batch) |
| 141 | { |
Rusty Russell | 56aa412 | 2009-03-15 18:16:43 +0000 | [diff] [blame] | 142 | const struct cpumask *tmp; |
Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 143 | int i, local = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | i = batch->index; |
Rusty Russell | 56aa412 | 2009-03-15 18:16:43 +0000 | [diff] [blame] | 146 | tmp = cpumask_of(smp_processor_id()); |
| 147 | if (cpumask_equal(mm_cpumask(batch->mm), tmp)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | local = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | if (i == 1) |
Aneesh Kumar K.V | 5524a27 | 2012-09-10 02:52:50 +0000 | [diff] [blame] | 150 | flush_hash_page(batch->vpn[0], batch->pte[0], |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 151 | batch->psize, batch->ssize, local); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | else |
Benjamin Herrenschmidt | 61b1a94 | 2005-09-20 13:52:50 +1000 | [diff] [blame] | 153 | flush_hash_range(i, local); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | batch->index = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | } |
| 156 | |
Benjamin Herrenschmidt | c7cc58a1 | 2009-07-23 23:15:28 +0000 | [diff] [blame] | 157 | void tlb_flush(struct mmu_gather *tlb) |
| 158 | { |
Peter Zijlstra | d6bf29b | 2011-05-24 17:11:48 -0700 | [diff] [blame] | 159 | struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch); |
Benjamin Herrenschmidt | c7cc58a1 | 2009-07-23 23:15:28 +0000 | [diff] [blame] | 160 | |
| 161 | /* If there's a TLB batch pending, then we must flush it because the |
| 162 | * pages are going to be freed and we really don't want to have a CPU |
| 163 | * access a freed page because it has a stale TLB |
| 164 | */ |
| 165 | if (tlbbatch->index) |
| 166 | __flush_tlb_pending(tlbbatch); |
| 167 | |
Peter Zijlstra | d6bf29b | 2011-05-24 17:11:48 -0700 | [diff] [blame] | 168 | put_cpu_var(ppc64_tlb_batch); |
Benjamin Herrenschmidt | c7cc58a1 | 2009-07-23 23:15:28 +0000 | [diff] [blame] | 169 | } |
| 170 | |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 171 | /** |
| 172 | * __flush_hash_table_range - Flush all HPTEs for a given address range |
| 173 | * from the hash table (and the TLB). But keeps |
| 174 | * the linux PTEs intact. |
| 175 | * |
| 176 | * @mm : mm_struct of the target address space (generally init_mm) |
| 177 | * @start : starting address |
| 178 | * @end : ending address (not included in the flush) |
| 179 | * |
| 180 | * This function is mostly to be used by some IO hotplug code in order |
| 181 | * to remove all hash entries from a given address range used to map IO |
| 182 | * space on a removed PCI-PCI bidge without tearing down the full mapping |
| 183 | * since 64K pages may overlap with other bridges when using 64K pages |
| 184 | * with 4K HW pages on IO space. |
| 185 | * |
Stephen Rothwell | 40b3136 | 2013-05-21 13:49:35 +1000 | [diff] [blame] | 186 | * Because of that usage pattern, it is implemented for small size rather |
| 187 | * than speed. |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 188 | */ |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 189 | void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, |
| 190 | unsigned long end) |
| 191 | { |
Aneesh Kumar K.V | 12bc9f6 | 2013-06-20 14:30:18 +0530 | [diff] [blame] | 192 | int hugepage_shift; |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 193 | unsigned long flags; |
| 194 | |
| 195 | start = _ALIGN_DOWN(start, PAGE_SIZE); |
| 196 | end = _ALIGN_UP(end, PAGE_SIZE); |
| 197 | |
| 198 | BUG_ON(!mm->pgd); |
| 199 | |
| 200 | /* Note: Normally, we should only ever use a batch within a |
| 201 | * PTE locked section. This violates the rule, but will work |
| 202 | * since we don't actually modify the PTEs, we just flush the |
| 203 | * hash while leaving the PTEs intact (including their reference |
| 204 | * to being hashed). This is not the most performance oriented |
| 205 | * way to do things but is fine for our needs here. |
| 206 | */ |
| 207 | local_irq_save(flags); |
| 208 | arch_enter_lazy_mmu_mode(); |
| 209 | for (; start < end; start += PAGE_SIZE) { |
Aneesh Kumar K.V | 12bc9f6 | 2013-06-20 14:30:18 +0530 | [diff] [blame] | 210 | pte_t *ptep = find_linux_pte_or_hugepte(mm->pgd, start, |
| 211 | &hugepage_shift); |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 212 | unsigned long pte; |
| 213 | |
| 214 | if (ptep == NULL) |
| 215 | continue; |
| 216 | pte = pte_val(*ptep); |
| 217 | if (!(pte & _PAGE_HASHPTE)) |
| 218 | continue; |
Aneesh Kumar K.V | 12bc9f6 | 2013-06-20 14:30:18 +0530 | [diff] [blame] | 219 | if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte))) |
| 220 | hpte_do_hugepage_flush(mm, start, (pmd_t *)pte); |
| 221 | else |
| 222 | hpte_need_flush(mm, start, ptep, pte, 0); |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 223 | } |
| 224 | arch_leave_lazy_mmu_mode(); |
| 225 | local_irq_restore(flags); |
| 226 | } |
Aneesh Kumar K.V | 074c2ea | 2013-06-20 14:30:15 +0530 | [diff] [blame] | 227 | |
| 228 | void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr) |
| 229 | { |
| 230 | pte_t *pte; |
| 231 | pte_t *start_pte; |
| 232 | unsigned long flags; |
| 233 | |
| 234 | addr = _ALIGN_DOWN(addr, PMD_SIZE); |
| 235 | /* Note: Normally, we should only ever use a batch within a |
| 236 | * PTE locked section. This violates the rule, but will work |
| 237 | * since we don't actually modify the PTEs, we just flush the |
| 238 | * hash while leaving the PTEs intact (including their reference |
| 239 | * to being hashed). This is not the most performance oriented |
| 240 | * way to do things but is fine for our needs here. |
| 241 | */ |
| 242 | local_irq_save(flags); |
| 243 | arch_enter_lazy_mmu_mode(); |
| 244 | start_pte = pte_offset_map(pmd, addr); |
| 245 | for (pte = start_pte; pte < start_pte + PTRS_PER_PTE; pte++) { |
| 246 | unsigned long pteval = pte_val(*pte); |
| 247 | if (pteval & _PAGE_HASHPTE) |
| 248 | hpte_need_flush(mm, addr, pte, pteval, 0); |
| 249 | addr += PAGE_SIZE; |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 250 | } |
| 251 | arch_leave_lazy_mmu_mode(); |
| 252 | local_irq_restore(flags); |
| 253 | } |