blob: 8bfae964b133987e061c430a0426f951b07172fa [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Russell Kingd84b4712006-08-21 19:23:38 +01002 * linux/arch/arm/mm/context.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/init.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
Catalin Marinas11805bc2010-01-26 19:09:42 +010013#include <linux/smp.h>
14#include <linux/percpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
16#include <asm/mmu_context.h>
17#include <asm/tlbflush.h>
18
Russell King8678c1f2007-05-08 20:03:09 +010019static DEFINE_SPINLOCK(cpu_asid_lock);
20unsigned int cpu_last_asid = ASID_FIRST_VERSION;
Catalin Marinas11805bc2010-01-26 19:09:42 +010021#ifdef CONFIG_SMP
22DEFINE_PER_CPU(struct mm_struct *, current_mm);
23#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
25/*
26 * We fork()ed a process, and we need a new context for the child
Will Deacon52af9c62011-05-26 11:23:43 +010027 * to run in.
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 */
29void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
30{
31 mm->context.id = 0;
Catalin Marinas11805bc2010-01-26 19:09:42 +010032 spin_lock_init(&mm->context.id_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070033}
34
Catalin Marinas11805bc2010-01-26 19:09:42 +010035static void flush_context(void)
36{
Will Deacon52af9c62011-05-26 11:23:43 +010037 u32 ttb;
38 /* Copy TTBR1 into TTBR0 */
39 asm volatile("mrc p15, 0, %0, c2, c0, 1\n"
40 "mcr p15, 0, %0, c2, c0, 0"
41 : "=r" (ttb));
Catalin Marinas11805bc2010-01-26 19:09:42 +010042 isb();
43 local_flush_tlb_all();
44 if (icache_is_vivt_asid_tagged()) {
45 __flush_icache_all();
46 dsb();
47 }
48}
49
50#ifdef CONFIG_SMP
51
52static void set_mm_context(struct mm_struct *mm, unsigned int asid)
53{
54 unsigned long flags;
55
56 /*
57 * Locking needed for multi-threaded applications where the
58 * same mm->context.id could be set from different CPUs during
59 * the broadcast. This function is also called via IPI so the
60 * mm->context.id_lock has to be IRQ-safe.
61 */
62 spin_lock_irqsave(&mm->context.id_lock, flags);
63 if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) {
64 /*
65 * Old version of ASID found. Set the new one and
66 * reset mm_cpumask(mm).
67 */
68 mm->context.id = asid;
69 cpumask_clear(mm_cpumask(mm));
70 }
71 spin_unlock_irqrestore(&mm->context.id_lock, flags);
72
73 /*
74 * Set the mm_cpumask(mm) bit for the current CPU.
75 */
76 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
77}
78
79/*
80 * Reset the ASID on the current CPU. This function call is broadcast
81 * from the CPU handling the ASID rollover and holding cpu_asid_lock.
82 */
83static void reset_context(void *info)
84{
85 unsigned int asid;
86 unsigned int cpu = smp_processor_id();
87 struct mm_struct *mm = per_cpu(current_mm, cpu);
88
89 /*
90 * Check if a current_mm was set on this CPU as it might still
91 * be in the early booting stages and using the reserved ASID.
92 */
93 if (!mm)
94 return;
95
96 smp_rmb();
Will Deacon45b95232011-05-26 11:24:25 +010097 asid = cpu_last_asid + cpu;
Catalin Marinas11805bc2010-01-26 19:09:42 +010098
99 flush_context();
100 set_mm_context(mm, asid);
101
102 /* set the new ASID */
103 asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id));
104 isb();
105}
106
107#else
108
109static inline void set_mm_context(struct mm_struct *mm, unsigned int asid)
110{
111 mm->context.id = asid;
112 cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
113}
114
115#endif
116
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117void __new_context(struct mm_struct *mm)
118{
119 unsigned int asid;
120
Russell King8678c1f2007-05-08 20:03:09 +0100121 spin_lock(&cpu_asid_lock);
Catalin Marinas11805bc2010-01-26 19:09:42 +0100122#ifdef CONFIG_SMP
123 /*
124 * Check the ASID again, in case the change was broadcast from
125 * another CPU before we acquired the lock.
126 */
127 if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) {
128 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
129 spin_unlock(&cpu_asid_lock);
130 return;
131 }
132#endif
133 /*
134 * At this point, it is guaranteed that the current mm (with
135 * an old ASID) isn't active on any other CPU since the ASIDs
136 * are changed simultaneously via IPI.
137 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 asid = ++cpu_last_asid;
139 if (asid == 0)
Russell King8678c1f2007-05-08 20:03:09 +0100140 asid = cpu_last_asid = ASID_FIRST_VERSION;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
142 /*
143 * If we've used up all our ASIDs, we need
144 * to start a new version and flush the TLB.
145 */
Russell King8678c1f2007-05-08 20:03:09 +0100146 if (unlikely((asid & ~ASID_MASK) == 0)) {
Will Deacon45b95232011-05-26 11:24:25 +0100147 asid = cpu_last_asid + smp_processor_id();
Catalin Marinas11805bc2010-01-26 19:09:42 +0100148 flush_context();
149#ifdef CONFIG_SMP
150 smp_wmb();
151 smp_call_function(reset_context, NULL, 1);
152#endif
Will Deacon45b95232011-05-26 11:24:25 +0100153 cpu_last_asid += NR_CPUS - 1;
Catalin Marinas9d99df42007-02-05 14:47:40 +0100154 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
Catalin Marinas11805bc2010-01-26 19:09:42 +0100156 set_mm_context(mm, asid);
157 spin_unlock(&cpu_asid_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158}