blob: 14536f6bdb8406b52d9ea1f4d21a7bbf21a6028c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Russell Kingd84b4712006-08-21 19:23:38 +01002 * linux/arch/arm/mm/context.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/init.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
Catalin Marinas11805bc2010-01-26 19:09:42 +010013#include <linux/smp.h>
14#include <linux/percpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
16#include <asm/mmu_context.h>
Will Deacona7a6f922012-01-23 13:48:48 -080017#include <asm/thread_notify.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <asm/tlbflush.h>
19
Thomas Gleixner450ea482009-07-03 08:44:46 -050020static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
Russell King8678c1f2007-05-08 20:03:09 +010021unsigned int cpu_last_asid = ASID_FIRST_VERSION;
Catalin Marinas11805bc2010-01-26 19:09:42 +010022#ifdef CONFIG_SMP
23DEFINE_PER_CPU(struct mm_struct *, current_mm);
24#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Will Deacona7a6f922012-01-23 13:48:48 -080026static void write_contextidr(u32 contextidr)
27{
28 asm("mcr p15, 0, %0, c13, c0, 1" : : "r" (contextidr));
29 isb();
30}
31
32#ifdef CONFIG_PID_IN_CONTEXTIDR
33static u32 read_contextidr(void)
34{
35 u32 contextidr;
36 asm("mrc p15, 0, %0, c13, c0, 1" : "=r" (contextidr));
37 return contextidr;
38}
39
40static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
41 void *t)
42{
43 unsigned long flags;
44 u32 contextidr;
45 pid_t pid;
46 struct thread_info *thread = t;
47
48 if (cmd != THREAD_NOTIFY_SWITCH)
49 return NOTIFY_DONE;
50
51 pid = task_pid_nr(thread->task);
52 local_irq_save(flags);
53 contextidr = read_contextidr();
54 contextidr &= ~ASID_MASK;
55 contextidr |= pid << ASID_BITS;
56 write_contextidr(contextidr);
57 local_irq_restore(flags);
58
59 return NOTIFY_OK;
60}
61
62static struct notifier_block contextidr_notifier_block = {
63 .notifier_call = contextidr_notifier,
64};
65
66static int __init contextidr_notifier_init(void)
67{
68 return thread_register_notifier(&contextidr_notifier_block);
69}
70arch_initcall(contextidr_notifier_init);
71
72static void set_asid(unsigned int asid)
73{
74 u32 contextidr = read_contextidr();
75 contextidr &= ASID_MASK;
76 contextidr |= asid & ~ASID_MASK;
77 write_contextidr(contextidr);
78}
79#else
80static void set_asid(unsigned int asid)
81{
82 write_contextidr(asid);
83}
84#endif
85
Linus Torvalds1da177e2005-04-16 15:20:36 -070086/*
87 * We fork()ed a process, and we need a new context for the child
Russell King07989b72011-06-09 10:10:27 +010088 * to run in. We reserve version 0 for initial tasks so we will
89 * always allocate an ASID. The ASID 0 is reserved for the TTBR
90 * register changing sequence.
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 */
92void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
93{
94 mm->context.id = 0;
Thomas Gleixner450ea482009-07-03 08:44:46 -050095 raw_spin_lock_init(&mm->context.id_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096}
97
Catalin Marinas11805bc2010-01-26 19:09:42 +010098static void flush_context(void)
99{
Russell King07989b72011-06-09 10:10:27 +0100100 /* set the reserved ASID before flushing the TLB */
Will Deacona7a6f922012-01-23 13:48:48 -0800101 set_asid(0);
Catalin Marinas11805bc2010-01-26 19:09:42 +0100102 local_flush_tlb_all();
103 if (icache_is_vivt_asid_tagged()) {
104 __flush_icache_all();
105 dsb();
106 }
107}
108
109#ifdef CONFIG_SMP
110
111static void set_mm_context(struct mm_struct *mm, unsigned int asid)
112{
113 unsigned long flags;
114
115 /*
116 * Locking needed for multi-threaded applications where the
117 * same mm->context.id could be set from different CPUs during
118 * the broadcast. This function is also called via IPI so the
119 * mm->context.id_lock has to be IRQ-safe.
120 */
Thomas Gleixner450ea482009-07-03 08:44:46 -0500121 raw_spin_lock_irqsave(&mm->context.id_lock, flags);
Catalin Marinas11805bc2010-01-26 19:09:42 +0100122 if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) {
123 /*
124 * Old version of ASID found. Set the new one and
125 * reset mm_cpumask(mm).
126 */
127 mm->context.id = asid;
128 cpumask_clear(mm_cpumask(mm));
129 }
Thomas Gleixner450ea482009-07-03 08:44:46 -0500130 raw_spin_unlock_irqrestore(&mm->context.id_lock, flags);
Catalin Marinas11805bc2010-01-26 19:09:42 +0100131
132 /*
133 * Set the mm_cpumask(mm) bit for the current CPU.
134 */
135 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
136}
137
138/*
139 * Reset the ASID on the current CPU. This function call is broadcast
140 * from the CPU handling the ASID rollover and holding cpu_asid_lock.
141 */
142static void reset_context(void *info)
143{
144 unsigned int asid;
145 unsigned int cpu = smp_processor_id();
146 struct mm_struct *mm = per_cpu(current_mm, cpu);
147
148 /*
149 * Check if a current_mm was set on this CPU as it might still
150 * be in the early booting stages and using the reserved ASID.
151 */
152 if (!mm)
153 return;
154
155 smp_rmb();
Russell Kinga0a54d32011-06-09 10:12:41 +0100156 asid = cpu_last_asid + cpu + 1;
Catalin Marinas11805bc2010-01-26 19:09:42 +0100157
158 flush_context();
159 set_mm_context(mm, asid);
160
161 /* set the new ASID */
Will Deacona7a6f922012-01-23 13:48:48 -0800162 set_asid(mm->context.id);
Catalin Marinas11805bc2010-01-26 19:09:42 +0100163}
164
165#else
166
167static inline void set_mm_context(struct mm_struct *mm, unsigned int asid)
168{
169 mm->context.id = asid;
170 cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
171}
172
173#endif
174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175void __new_context(struct mm_struct *mm)
176{
177 unsigned int asid;
178
Thomas Gleixner450ea482009-07-03 08:44:46 -0500179 raw_spin_lock(&cpu_asid_lock);
Catalin Marinas11805bc2010-01-26 19:09:42 +0100180#ifdef CONFIG_SMP
181 /*
182 * Check the ASID again, in case the change was broadcast from
183 * another CPU before we acquired the lock.
184 */
185 if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) {
186 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
Thomas Gleixner450ea482009-07-03 08:44:46 -0500187 raw_spin_unlock(&cpu_asid_lock);
Catalin Marinas11805bc2010-01-26 19:09:42 +0100188 return;
189 }
190#endif
191 /*
192 * At this point, it is guaranteed that the current mm (with
193 * an old ASID) isn't active on any other CPU since the ASIDs
194 * are changed simultaneously via IPI.
195 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 asid = ++cpu_last_asid;
197 if (asid == 0)
Russell King8678c1f2007-05-08 20:03:09 +0100198 asid = cpu_last_asid = ASID_FIRST_VERSION;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
200 /*
201 * If we've used up all our ASIDs, we need
202 * to start a new version and flush the TLB.
203 */
Russell King8678c1f2007-05-08 20:03:09 +0100204 if (unlikely((asid & ~ASID_MASK) == 0)) {
Russell Kinga0a54d32011-06-09 10:12:41 +0100205 asid = cpu_last_asid + smp_processor_id() + 1;
Catalin Marinas11805bc2010-01-26 19:09:42 +0100206 flush_context();
207#ifdef CONFIG_SMP
208 smp_wmb();
209 smp_call_function(reset_context, NULL, 1);
210#endif
Russell Kinga0a54d32011-06-09 10:12:41 +0100211 cpu_last_asid += NR_CPUS;
Catalin Marinas9d99df42007-02-05 14:47:40 +0100212 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213
Catalin Marinas11805bc2010-01-26 19:09:42 +0100214 set_mm_context(mm, asid);
Thomas Gleixner450ea482009-07-03 08:44:46 -0500215 raw_spin_unlock(&cpu_asid_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216}