Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * This file contains the routines for flushing entries from the |
| 3 | * TLB and MMU hash table. |
| 4 | * |
| 5 | * Derived from arch/ppc64/mm/init.c: |
| 6 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
| 7 | * |
| 8 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) |
| 9 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) |
| 10 | * Copyright (C) 1996 Paul Mackerras |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | * |
| 12 | * Derived from "arch/i386/mm/init.c" |
| 13 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
| 14 | * |
| 15 | * Dave Engebretsen <engebret@us.ibm.com> |
| 16 | * Rework for PPC64 port. |
| 17 | * |
| 18 | * This program is free software; you can redistribute it and/or |
| 19 | * modify it under the terms of the GNU General Public License |
| 20 | * as published by the Free Software Foundation; either version |
| 21 | * 2 of the License, or (at your option) any later version. |
| 22 | */ |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 23 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #include <linux/kernel.h> |
| 25 | #include <linux/mm.h> |
| 26 | #include <linux/init.h> |
| 27 | #include <linux/percpu.h> |
| 28 | #include <linux/hardirq.h> |
| 29 | #include <asm/pgalloc.h> |
| 30 | #include <asm/tlbflush.h> |
| 31 | #include <asm/tlb.h> |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 32 | #include <asm/bug.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | |
| 34 | DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); |
| 35 | |
| 36 | /* This is declared as we are using the more or less generic |
Stephen Rothwell | b8b572e | 2008-08-01 15:20:30 +1000 | [diff] [blame] | 37 | * arch/powerpc/include/asm/tlb.h file -- tgall |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | */ |
| 39 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
Michael Ellerman | c884116 | 2008-05-08 14:27:09 +1000 | [diff] [blame] | 40 | static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); |
| 41 | static unsigned long pte_freelist_forced_free; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | |
David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 43 | struct pte_freelist_batch |
| 44 | { |
| 45 | struct rcu_head rcu; |
| 46 | unsigned int index; |
| 47 | pgtable_free_t tables[0]; |
| 48 | }; |
| 49 | |
David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 50 | #define PTE_FREELIST_SIZE \ |
| 51 | ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \ |
| 52 | / sizeof(pgtable_free_t)) |
| 53 | |
David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 54 | static void pte_free_smp_sync(void *arg) |
| 55 | { |
| 56 | /* Do nothing, just ensure we sync with all CPUs */ |
| 57 | } |
David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 58 | |
| 59 | /* This is only called when we are critically out of memory |
| 60 | * (and fail to get a page in pte_free_tlb). |
| 61 | */ |
| 62 | static void pgtable_free_now(pgtable_free_t pgf) |
| 63 | { |
| 64 | pte_freelist_forced_free++; |
| 65 | |
Stephen Rothwell | 392096e | 2008-07-03 17:10:07 +1000 | [diff] [blame] | 66 | smp_call_function(pte_free_smp_sync, NULL, 1); |
David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 67 | |
| 68 | pgtable_free(pgf); |
| 69 | } |
| 70 | |
| 71 | static void pte_free_rcu_callback(struct rcu_head *head) |
| 72 | { |
| 73 | struct pte_freelist_batch *batch = |
| 74 | container_of(head, struct pte_freelist_batch, rcu); |
| 75 | unsigned int i; |
| 76 | |
| 77 | for (i = 0; i < batch->index; i++) |
| 78 | pgtable_free(batch->tables[i]); |
| 79 | |
| 80 | free_page((unsigned long)batch); |
| 81 | } |
| 82 | |
| 83 | static void pte_free_submit(struct pte_freelist_batch *batch) |
| 84 | { |
| 85 | INIT_RCU_HEAD(&batch->rcu); |
| 86 | call_rcu(&batch->rcu, pte_free_rcu_callback); |
| 87 | } |
| 88 | |
| 89 | void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | { |
Hugh Dickins | 01edcd8 | 2005-11-23 13:37:39 -0800 | [diff] [blame] | 91 | /* This is safe since tlb_gather_mmu has disabled preemption */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id()); |
| 93 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); |
| 94 | |
| 95 | if (atomic_read(&tlb->mm->mm_users) < 2 || |
| 96 | cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) { |
David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 97 | pgtable_free(pgf); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | return; |
| 99 | } |
| 100 | |
| 101 | if (*batchp == NULL) { |
| 102 | *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); |
| 103 | if (*batchp == NULL) { |
David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 104 | pgtable_free_now(pgf); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | return; |
| 106 | } |
| 107 | (*batchp)->index = 0; |
| 108 | } |
David Gibson | e28f7fa | 2005-08-05 19:39:06 +1000 | [diff] [blame] | 109 | (*batchp)->tables[(*batchp)->index++] = pgf; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | if ((*batchp)->index == PTE_FREELIST_SIZE) { |
| 111 | pte_free_submit(*batchp); |
| 112 | *batchp = NULL; |
| 113 | } |
| 114 | } |
| 115 | |
| 116 | /* |
Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 117 | * A linux PTE was changed and the corresponding hash table entry |
| 118 | * neesd to be flushed. This function will either perform the flush |
| 119 | * immediately or will batch it up if the current CPU has an active |
| 120 | * batch on it. |
| 121 | * |
| 122 | * Must be called from within some kind of spinlock/non-preempt region... |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | */ |
Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 124 | void hpte_need_flush(struct mm_struct *mm, unsigned long addr, |
| 125 | pte_t *ptep, unsigned long pte, int huge) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); |
Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 128 | unsigned long vsid, vaddr; |
Paul Mackerras | bf72aeb | 2006-06-15 10:45:18 +1000 | [diff] [blame] | 129 | unsigned int psize; |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 130 | int ssize; |
Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 131 | real_pte_t rpte; |
Benjamin Herrenschmidt | 61b1a94 | 2005-09-20 13:52:50 +1000 | [diff] [blame] | 132 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | i = batch->index; |
| 135 | |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 136 | /* We mask the address for the base page size. Huge pages will |
| 137 | * have applied their own masking already |
| 138 | */ |
| 139 | addr &= PAGE_MASK; |
| 140 | |
Benjamin Herrenschmidt | 16c2d47 | 2007-05-08 16:27:28 +1000 | [diff] [blame] | 141 | /* Get page size (maybe move back to caller). |
| 142 | * |
| 143 | * NOTE: when using special 64K mappings in 4K environment like |
| 144 | * for SPEs, we obtain the page size from the slice, which thus |
| 145 | * must still exist (and thus the VMA not reused) at the time |
| 146 | * of this call |
| 147 | */ |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 148 | if (huge) { |
| 149 | #ifdef CONFIG_HUGETLB_PAGE |
Jon Tollefson | 0d9ea75 | 2008-07-23 21:27:56 -0700 | [diff] [blame] | 150 | psize = get_slice_psize(mm, addr);; |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 151 | #else |
| 152 | BUG(); |
Benjamin Herrenschmidt | 16c2d47 | 2007-05-08 16:27:28 +1000 | [diff] [blame] | 153 | psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 154 | #endif |
Paul Mackerras | bf72aeb | 2006-06-15 10:45:18 +1000 | [diff] [blame] | 155 | } else |
Benjamin Herrenschmidt | 16c2d47 | 2007-05-08 16:27:28 +1000 | [diff] [blame] | 156 | psize = pte_pagesize_index(mm, addr, pte); |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 157 | |
Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 158 | /* Build full vaddr */ |
| 159 | if (!is_kernel_addr(addr)) { |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 160 | ssize = user_segment_size(addr); |
| 161 | vsid = get_vsid(mm->context.id, addr, ssize); |
Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 162 | WARN_ON(vsid == 0); |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 163 | } else { |
| 164 | vsid = get_kernel_vsid(addr, mmu_kernel_ssize); |
| 165 | ssize = mmu_kernel_ssize; |
| 166 | } |
| 167 | vaddr = hpt_va(addr, vsid, ssize); |
Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 168 | rpte = __real_pte(__pte(pte), ptep); |
| 169 | |
| 170 | /* |
| 171 | * Check if we have an active batch on this CPU. If not, just |
| 172 | * flush now and return. For now, we don global invalidates |
| 173 | * in that case, might be worth testing the mm cpu mask though |
| 174 | * and decide to use local invalidates instead... |
| 175 | */ |
| 176 | if (!batch->active) { |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 177 | flush_hash_page(vaddr, rpte, psize, ssize, 0); |
Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 178 | return; |
| 179 | } |
| 180 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | /* |
| 182 | * This can happen when we are in the middle of a TLB batch and |
| 183 | * we encounter memory pressure (eg copy_page_range when it tries |
| 184 | * to allocate a new pte). If we have to reclaim memory and end |
| 185 | * up scanning and resetting referenced bits then our batch context |
| 186 | * will change mid stream. |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 187 | * |
| 188 | * We also need to ensure only one page size is present in a given |
| 189 | * batch |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | */ |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 191 | if (i != 0 && (mm != batch->mm || batch->psize != psize || |
| 192 | batch->ssize != ssize)) { |
Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 193 | __flush_tlb_pending(batch); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | i = 0; |
| 195 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | if (i == 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | batch->mm = mm; |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 198 | batch->psize = psize; |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 199 | batch->ssize = ssize; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | } |
Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 201 | batch->pte[i] = rpte; |
| 202 | batch->vaddr[i] = vaddr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | batch->index = ++i; |
| 204 | if (i >= PPC64_TLB_BATCH_NR) |
Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 205 | __flush_tlb_pending(batch); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | } |
| 207 | |
Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 208 | /* |
| 209 | * This function is called when terminating an mmu batch or when a batch |
| 210 | * is full. It will perform the flush of all the entries currently stored |
| 211 | * in a batch. |
| 212 | * |
| 213 | * Must be called from within some kind of spinlock/non-preempt region... |
| 214 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | void __flush_tlb_pending(struct ppc64_tlb_batch *batch) |
| 216 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | cpumask_t tmp; |
Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 218 | int i, local = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | i = batch->index; |
Benjamin Herrenschmidt | a741e67 | 2007-04-10 17:09:37 +1000 | [diff] [blame] | 221 | tmp = cpumask_of_cpu(smp_processor_id()); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | if (cpus_equal(batch->mm->cpu_vm_mask, tmp)) |
| 223 | local = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | if (i == 1) |
Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 225 | flush_hash_page(batch->vaddr[0], batch->pte[0], |
Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 226 | batch->psize, batch->ssize, local); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | else |
Benjamin Herrenschmidt | 61b1a94 | 2005-09-20 13:52:50 +1000 | [diff] [blame] | 228 | flush_hash_range(i, local); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | batch->index = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | } |
| 231 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | void pte_free_finish(void) |
| 233 | { |
Hugh Dickins | 01edcd8 | 2005-11-23 13:37:39 -0800 | [diff] [blame] | 234 | /* This is safe since tlb_gather_mmu has disabled preemption */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); |
| 236 | |
| 237 | if (*batchp == NULL) |
| 238 | return; |
| 239 | pte_free_submit(*batchp); |
| 240 | *batchp = NULL; |
| 241 | } |
Benjamin Herrenschmidt | 3d5134e | 2007-06-04 15:15:36 +1000 | [diff] [blame] | 242 | |
| 243 | /** |
| 244 | * __flush_hash_table_range - Flush all HPTEs for a given address range |
| 245 | * from the hash table (and the TLB). But keeps |
| 246 | * the linux PTEs intact. |
| 247 | * |
| 248 | * @mm : mm_struct of the target address space (generally init_mm) |
| 249 | * @start : starting address |
| 250 | * @end : ending address (not included in the flush) |
| 251 | * |
| 252 | * This function is mostly to be used by some IO hotplug code in order |
| 253 | * to remove all hash entries from a given address range used to map IO |
| 254 | * space on a removed PCI-PCI bidge without tearing down the full mapping |
| 255 | * since 64K pages may overlap with other bridges when using 64K pages |
| 256 | * with 4K HW pages on IO space. |
| 257 | * |
| 258 | * Because of that usage pattern, it's only available with CONFIG_HOTPLUG |
| 259 | * and is implemented for small size rather than speed. |
| 260 | */ |
| 261 | #ifdef CONFIG_HOTPLUG |
| 262 | |
| 263 | void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, |
| 264 | unsigned long end) |
| 265 | { |
| 266 | unsigned long flags; |
| 267 | |
| 268 | start = _ALIGN_DOWN(start, PAGE_SIZE); |
| 269 | end = _ALIGN_UP(end, PAGE_SIZE); |
| 270 | |
| 271 | BUG_ON(!mm->pgd); |
| 272 | |
| 273 | /* Note: Normally, we should only ever use a batch within a |
| 274 | * PTE locked section. This violates the rule, but will work |
| 275 | * since we don't actually modify the PTEs, we just flush the |
| 276 | * hash while leaving the PTEs intact (including their reference |
| 277 | * to being hashed). This is not the most performance oriented |
| 278 | * way to do things but is fine for our needs here. |
| 279 | */ |
| 280 | local_irq_save(flags); |
| 281 | arch_enter_lazy_mmu_mode(); |
| 282 | for (; start < end; start += PAGE_SIZE) { |
| 283 | pte_t *ptep = find_linux_pte(mm->pgd, start); |
| 284 | unsigned long pte; |
| 285 | |
| 286 | if (ptep == NULL) |
| 287 | continue; |
| 288 | pte = pte_val(*ptep); |
| 289 | if (!(pte & _PAGE_HASHPTE)) |
| 290 | continue; |
| 291 | hpte_need_flush(mm, start, ptep, pte, 0); |
| 292 | } |
| 293 | arch_leave_lazy_mmu_mode(); |
| 294 | local_irq_restore(flags); |
| 295 | } |
| 296 | |
| 297 | #endif /* CONFIG_HOTPLUG */ |