blob: 9a39ea9031d4f79e1ee4b99c51186b8c908195fe [file] [log] [blame]
Akira Takeuchi965ea4b2010-10-27 17:28:51 +01001/* SMP TLB support routines.
2 *
3 * Copyright (C) 2006-2008 Panasonic Corporation
4 * All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15#include <linux/interrupt.h>
16#include <linux/spinlock.h>
17#include <linux/init.h>
18#include <linux/jiffies.h>
19#include <linux/cpumask.h>
20#include <linux/err.h>
21#include <linux/kernel.h>
22#include <linux/delay.h>
23#include <linux/sched.h>
24#include <linux/profile.h>
25#include <linux/smp.h>
26#include <asm/tlbflush.h>
Akira Takeuchi965ea4b2010-10-27 17:28:51 +010027#include <asm/bitops.h>
28#include <asm/processor.h>
29#include <asm/bug.h>
30#include <asm/exceptions.h>
31#include <asm/hardirq.h>
32#include <asm/fpu.h>
33#include <asm/mmu_context.h>
34#include <asm/thread_info.h>
35#include <asm/cpu-regs.h>
36#include <asm/intctl-regs.h>
37
38/*
39 * For flush TLB
40 */
41#define FLUSH_ALL 0xffffffff
42
43static cpumask_t flush_cpumask;
44static struct mm_struct *flush_mm;
45static unsigned long flush_va;
46static DEFINE_SPINLOCK(tlbstate_lock);
47
48DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
49 &init_mm, 0
50};
51
52static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
53 unsigned long va);
54static void do_flush_tlb_all(void *info);
55
56/**
57 * smp_flush_tlb - Callback to invalidate the TLB.
58 * @unused: Callback context (ignored).
59 */
60void smp_flush_tlb(void *unused)
61{
62 unsigned long cpu_id;
63
64 cpu_id = get_cpu();
65
KOSAKI Motohiro8ea97162011-05-24 17:12:58 -070066 if (!cpumask_test_cpu(cpu_id, &flush_cpumask))
Akira Takeuchi965ea4b2010-10-27 17:28:51 +010067 /* This was a BUG() but until someone can quote me the line
68 * from the intel manual that guarantees an IPI to multiple
69 * CPUs is retried _only_ on the erroring CPUs its staying as a
70 * return
71 *
72 * BUG();
73 */
74 goto out;
75
76 if (flush_va == FLUSH_ALL)
77 local_flush_tlb();
78 else
79 local_flush_tlb_page(flush_mm, flush_va);
80
Peter Zijlstra9424cdf2014-03-13 19:00:36 +010081 smp_mb__before_atomic();
KOSAKI Motohiro8ea97162011-05-24 17:12:58 -070082 cpumask_clear_cpu(cpu_id, &flush_cpumask);
Peter Zijlstra9424cdf2014-03-13 19:00:36 +010083 smp_mb__after_atomic();
Akira Takeuchi965ea4b2010-10-27 17:28:51 +010084out:
85 put_cpu();
86}
87
88/**
89 * flush_tlb_others - Tell the specified CPUs to invalidate their TLBs
90 * @cpumask: The list of CPUs to target.
91 * @mm: The VM context to flush from (if va!=FLUSH_ALL).
92 * @va: Virtual address to flush or FLUSH_ALL to flush everything.
93 */
94static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
95 unsigned long va)
96{
97 cpumask_t tmp;
98
99 /* A couple of sanity checks (to be removed):
100 * - mask must not be empty
101 * - current CPU must not be in mask
102 * - we do not send IPIs to as-yet unbooted CPUs.
103 */
104 BUG_ON(!mm);
KOSAKI Motohiro8ea97162011-05-24 17:12:58 -0700105 BUG_ON(cpumask_empty(&cpumask));
106 BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));
Akira Takeuchi965ea4b2010-10-27 17:28:51 +0100107
KOSAKI Motohiro8ea97162011-05-24 17:12:58 -0700108 cpumask_and(&tmp, &cpumask, cpu_online_mask);
109 BUG_ON(!cpumask_equal(&cpumask, &tmp));
Akira Takeuchi965ea4b2010-10-27 17:28:51 +0100110
111 /* I'm not happy about this global shared spinlock in the MM hot path,
112 * but we'll see how contended it is.
113 *
114 * Temporarily this turns IRQs off, so that lockups are detected by the
115 * NMI watchdog.
116 */
117 spin_lock(&tlbstate_lock);
118
119 flush_mm = mm;
120 flush_va = va;
121#if NR_CPUS <= BITS_PER_LONG
Peter Zijlstra805de8f42015-04-24 01:12:32 +0200122 atomic_or(cpumask.bits[0], (atomic_t *)&flush_cpumask.bits[0]);
Akira Takeuchi965ea4b2010-10-27 17:28:51 +0100123#else
124#error Not supported.
125#endif
126
127 /* FIXME: if NR_CPUS>=3, change send_IPI_mask */
128 smp_call_function(smp_flush_tlb, NULL, 1);
129
KOSAKI Motohiro8ea97162011-05-24 17:12:58 -0700130 while (!cpumask_empty(&flush_cpumask))
Akira Takeuchi965ea4b2010-10-27 17:28:51 +0100131 /* Lockup detection does not belong here */
132 smp_mb();
133
134 flush_mm = NULL;
135 flush_va = 0;
136 spin_unlock(&tlbstate_lock);
137}
138
139/**
140 * flush_tlb_mm - Invalidate TLB of specified VM context
141 * @mm: The VM context to invalidate.
142 */
143void flush_tlb_mm(struct mm_struct *mm)
144{
145 cpumask_t cpu_mask;
146
147 preempt_disable();
KOSAKI Motohiro8ea97162011-05-24 17:12:58 -0700148 cpumask_copy(&cpu_mask, mm_cpumask(mm));
149 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
Akira Takeuchi965ea4b2010-10-27 17:28:51 +0100150
151 local_flush_tlb();
KOSAKI Motohiro8ea97162011-05-24 17:12:58 -0700152 if (!cpumask_empty(&cpu_mask))
Akira Takeuchi965ea4b2010-10-27 17:28:51 +0100153 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
154
155 preempt_enable();
156}
157
158/**
159 * flush_tlb_current_task - Invalidate TLB of current task
160 */
161void flush_tlb_current_task(void)
162{
163 struct mm_struct *mm = current->mm;
164 cpumask_t cpu_mask;
165
166 preempt_disable();
KOSAKI Motohiro8ea97162011-05-24 17:12:58 -0700167 cpumask_copy(&cpu_mask, mm_cpumask(mm));
168 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
Akira Takeuchi965ea4b2010-10-27 17:28:51 +0100169
170 local_flush_tlb();
KOSAKI Motohiro8ea97162011-05-24 17:12:58 -0700171 if (!cpumask_empty(&cpu_mask))
Akira Takeuchi965ea4b2010-10-27 17:28:51 +0100172 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
173
174 preempt_enable();
175}
176
177/**
178 * flush_tlb_page - Invalidate TLB of page
179 * @vma: The VM context to invalidate the page for.
180 * @va: The virtual address of the page to invalidate.
181 */
182void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
183{
184 struct mm_struct *mm = vma->vm_mm;
185 cpumask_t cpu_mask;
186
187 preempt_disable();
KOSAKI Motohiro8ea97162011-05-24 17:12:58 -0700188 cpumask_copy(&cpu_mask, mm_cpumask(mm));
189 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
Akira Takeuchi965ea4b2010-10-27 17:28:51 +0100190
191 local_flush_tlb_page(mm, va);
KOSAKI Motohiro8ea97162011-05-24 17:12:58 -0700192 if (!cpumask_empty(&cpu_mask))
Akira Takeuchi965ea4b2010-10-27 17:28:51 +0100193 flush_tlb_others(cpu_mask, mm, va);
194
195 preempt_enable();
196}
197
198/**
199 * do_flush_tlb_all - Callback to completely invalidate a TLB
200 * @unused: Callback context (ignored).
201 */
202static void do_flush_tlb_all(void *unused)
203{
204 local_flush_tlb_all();
205}
206
207/**
208 * flush_tlb_all - Completely invalidate TLBs on all CPUs
209 */
210void flush_tlb_all(void)
211{
212 on_each_cpu(do_flush_tlb_all, 0, 1);
213}