blob: 31f18207970ba6aa01d7370784bf2d810465f413 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file contains the routines for flushing entries from the
3 * TLB and MMU hash table.
4 *
5 * Derived from arch/ppc64/mm/init.c:
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 *
8 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
9 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
10 * Copyright (C) 1996 Paul Mackerras
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 * Derived from "arch/i386/mm/init.c"
13 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
14 *
15 * Dave Engebretsen <engebret@us.ibm.com>
16 * Rework for PPC64 port.
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110023
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/kernel.h>
25#include <linux/mm.h>
26#include <linux/init.h>
27#include <linux/percpu.h>
28#include <linux/hardirq.h>
29#include <asm/pgalloc.h>
30#include <asm/tlbflush.h>
31#include <asm/tlb.h>
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110032#include <asm/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036/*
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +100037 * A linux PTE was changed and the corresponding hash table entry
38 * neesd to be flushed. This function will either perform the flush
39 * immediately or will batch it up if the current CPU has an active
40 * batch on it.
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 */
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +100042void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
43 pte_t *ptep, unsigned long pte, int huge)
Linus Torvalds1da177e2005-04-16 15:20:36 -070044{
Peter Zijlstraf3425522011-02-24 10:47:32 +000045 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +100046 unsigned long vsid, vaddr;
Paul Mackerrasbf72aeb2006-06-15 10:45:18 +100047 unsigned int psize;
Paul Mackerras1189be62007-10-11 20:37:10 +100048 int ssize;
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +100049 real_pte_t rpte;
Benjamin Herrenschmidt61b1a942005-09-20 13:52:50 +100050 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 i = batch->index;
53
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +100054 /* Get page size (maybe move back to caller).
55 *
56 * NOTE: when using special 64K mappings in 4K environment like
57 * for SPEs, we obtain the page size from the slice, which thus
58 * must still exist (and thus the VMA not reused) at the time
59 * of this call
60 */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110061 if (huge) {
62#ifdef CONFIG_HUGETLB_PAGE
Joe Perchesd258e642009-06-28 06:26:10 +000063 psize = get_slice_psize(mm, addr);
David Gibson77058e12010-02-08 20:09:03 +000064 /* Mask the address for the correct page size */
65 addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110066#else
67 BUG();
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +100068 psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110069#endif
David Gibson77058e12010-02-08 20:09:03 +000070 } else {
Benjamin Herrenschmidt16c2d472007-05-08 16:27:28 +100071 psize = pte_pagesize_index(mm, addr, pte);
David Gibson77058e12010-02-08 20:09:03 +000072 /* Mask the address for the standard page size. If we
73 * have a 64k page kernel, but the hardware does not
74 * support 64k pages, this might be different from the
75 * hardware page size encoded in the slice table. */
76 addr &= PAGE_MASK;
77 }
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +110078
David Gibsonf71dc172009-10-26 19:24:31 +000079
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +100080 /* Build full vaddr */
81 if (!is_kernel_addr(addr)) {
Paul Mackerras1189be62007-10-11 20:37:10 +100082 ssize = user_segment_size(addr);
83 vsid = get_vsid(mm->context.id, addr, ssize);
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +100084 WARN_ON(vsid == 0);
Paul Mackerras1189be62007-10-11 20:37:10 +100085 } else {
86 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
87 ssize = mmu_kernel_ssize;
88 }
89 vaddr = hpt_va(addr, vsid, ssize);
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +100090 rpte = __real_pte(__pte(pte), ptep);
91
92 /*
93 * Check if we have an active batch on this CPU. If not, just
94 * flush now and return. For now, we don global invalidates
95 * in that case, might be worth testing the mm cpu mask though
96 * and decide to use local invalidates instead...
97 */
98 if (!batch->active) {
Paul Mackerras1189be62007-10-11 20:37:10 +100099 flush_hash_page(vaddr, rpte, psize, ssize, 0);
Peter Zijlstraf3425522011-02-24 10:47:32 +0000100 put_cpu_var(ppc64_tlb_batch);
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +1000101 return;
102 }
103
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 /*
105 * This can happen when we are in the middle of a TLB batch and
106 * we encounter memory pressure (eg copy_page_range when it tries
107 * to allocate a new pte). If we have to reclaim memory and end
108 * up scanning and resetting referenced bits then our batch context
109 * will change mid stream.
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100110 *
111 * We also need to ensure only one page size is present in a given
112 * batch
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 */
Paul Mackerras1189be62007-10-11 20:37:10 +1000114 if (i != 0 && (mm != batch->mm || batch->psize != psize ||
115 batch->ssize != ssize)) {
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +1000116 __flush_tlb_pending(batch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 i = 0;
118 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 if (i == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 batch->mm = mm;
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100121 batch->psize = psize;
Paul Mackerras1189be62007-10-11 20:37:10 +1000122 batch->ssize = ssize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 }
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +1000124 batch->pte[i] = rpte;
125 batch->vaddr[i] = vaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 batch->index = ++i;
127 if (i >= PPC64_TLB_BATCH_NR)
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +1000128 __flush_tlb_pending(batch);
Peter Zijlstraf3425522011-02-24 10:47:32 +0000129 put_cpu_var(ppc64_tlb_batch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130}
131
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +1000132/*
133 * This function is called when terminating an mmu batch or when a batch
134 * is full. It will perform the flush of all the entries currently stored
135 * in a batch.
136 *
137 * Must be called from within some kind of spinlock/non-preempt region...
138 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
140{
Rusty Russell56aa4122009-03-15 18:16:43 +0000141 const struct cpumask *tmp;
Benjamin Herrenschmidta741e672007-04-10 17:09:37 +1000142 int i, local = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 i = batch->index;
Rusty Russell56aa4122009-03-15 18:16:43 +0000145 tmp = cpumask_of(smp_processor_id());
146 if (cpumask_equal(mm_cpumask(batch->mm), tmp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 local = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 if (i == 1)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100149 flush_hash_page(batch->vaddr[0], batch->pte[0],
Paul Mackerras1189be62007-10-11 20:37:10 +1000150 batch->psize, batch->ssize, local);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 else
Benjamin Herrenschmidt61b1a942005-09-20 13:52:50 +1000152 flush_hash_range(i, local);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 batch->index = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154}
155
Benjamin Herrenschmidtc7cc58a12009-07-23 23:15:28 +0000156void tlb_flush(struct mmu_gather *tlb)
157{
Peter Zijlstrad6bf29b2011-05-24 17:11:48 -0700158 struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch);
Benjamin Herrenschmidtc7cc58a12009-07-23 23:15:28 +0000159
160 /* If there's a TLB batch pending, then we must flush it because the
161 * pages are going to be freed and we really don't want to have a CPU
162 * access a freed page because it has a stale TLB
163 */
164 if (tlbbatch->index)
165 __flush_tlb_pending(tlbbatch);
166
Peter Zijlstrad6bf29b2011-05-24 17:11:48 -0700167 put_cpu_var(ppc64_tlb_batch);
Benjamin Herrenschmidtc7cc58a12009-07-23 23:15:28 +0000168}
169
Benjamin Herrenschmidt3d5134e2007-06-04 15:15:36 +1000170/**
171 * __flush_hash_table_range - Flush all HPTEs for a given address range
172 * from the hash table (and the TLB). But keeps
173 * the linux PTEs intact.
174 *
175 * @mm : mm_struct of the target address space (generally init_mm)
176 * @start : starting address
177 * @end : ending address (not included in the flush)
178 *
179 * This function is mostly to be used by some IO hotplug code in order
180 * to remove all hash entries from a given address range used to map IO
181 * space on a removed PCI-PCI bidge without tearing down the full mapping
182 * since 64K pages may overlap with other bridges when using 64K pages
183 * with 4K HW pages on IO space.
184 *
185 * Because of that usage pattern, it's only available with CONFIG_HOTPLUG
186 * and is implemented for small size rather than speed.
187 */
188#ifdef CONFIG_HOTPLUG
189
190void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
191 unsigned long end)
192{
193 unsigned long flags;
194
195 start = _ALIGN_DOWN(start, PAGE_SIZE);
196 end = _ALIGN_UP(end, PAGE_SIZE);
197
198 BUG_ON(!mm->pgd);
199
200 /* Note: Normally, we should only ever use a batch within a
201 * PTE locked section. This violates the rule, but will work
202 * since we don't actually modify the PTEs, we just flush the
203 * hash while leaving the PTEs intact (including their reference
204 * to being hashed). This is not the most performance oriented
205 * way to do things but is fine for our needs here.
206 */
207 local_irq_save(flags);
208 arch_enter_lazy_mmu_mode();
209 for (; start < end; start += PAGE_SIZE) {
210 pte_t *ptep = find_linux_pte(mm->pgd, start);
211 unsigned long pte;
212
213 if (ptep == NULL)
214 continue;
215 pte = pte_val(*ptep);
216 if (!(pte & _PAGE_HASHPTE))
217 continue;
218 hpte_need_flush(mm, start, ptep, pte, 0);
219 }
220 arch_leave_lazy_mmu_mode();
221 local_irq_restore(flags);
222}
223
224#endif /* CONFIG_HOTPLUG */