blob: 21f9f1ea14f7c70d6cb20a8ce7a6a2e154bb756e [file] [log] [blame]
Catalin Marinasb3901d52012-03-05 11:49:28 +00001/*
2 * Based on arch/arm/mm/context.c
3 *
4 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
Will Deacon5aec7152015-10-06 18:46:24 +010020#include <linux/bitops.h>
Catalin Marinasb3901d52012-03-05 11:49:28 +000021#include <linux/sched.h>
Will Deacon5aec7152015-10-06 18:46:24 +010022#include <linux/slab.h>
Catalin Marinasb3901d52012-03-05 11:49:28 +000023#include <linux/mm.h>
Catalin Marinasb3901d52012-03-05 11:49:28 +000024
Will Deacon5aec7152015-10-06 18:46:24 +010025#include <asm/cpufeature.h>
Catalin Marinasb3901d52012-03-05 11:49:28 +000026#include <asm/mmu_context.h>
27#include <asm/tlbflush.h>
Catalin Marinasb3901d52012-03-05 11:49:28 +000028
Will Deacon5aec7152015-10-06 18:46:24 +010029static u32 asid_bits;
Catalin Marinasb3901d52012-03-05 11:49:28 +000030static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
Catalin Marinasb3901d52012-03-05 11:49:28 +000031
Will Deacon5aec7152015-10-06 18:46:24 +010032static atomic64_t asid_generation;
33static unsigned long *asid_map;
Catalin Marinasb3901d52012-03-05 11:49:28 +000034
Will Deacon5aec7152015-10-06 18:46:24 +010035static DEFINE_PER_CPU(atomic64_t, active_asids);
36static DEFINE_PER_CPU(u64, reserved_asids);
37static cpumask_t tlb_flush_pending;
38
39#define ASID_MASK (~GENMASK(asid_bits - 1, 0))
40#define ASID_FIRST_VERSION (1UL << asid_bits)
41#define NUM_USER_ASIDS ASID_FIRST_VERSION
42
Suzuki K Poulose038dc9c2016-02-23 10:31:44 +000043/* Get the ASIDBits supported by the current CPU */
44static u32 get_cpu_asid_bits(void)
45{
46 u32 asid;
47 int fld = cpuid_feature_extract_field(read_cpuid(SYS_ID_AA64MMFR0_EL1),
48 ID_AA64MMFR0_ASID_SHIFT);
49
50 switch (fld) {
51 default:
52 pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
53 smp_processor_id(), fld);
54 /* Fallthrough */
55 case 0:
56 asid = 8;
57 break;
58 case 2:
59 asid = 16;
60 }
61
62 return asid;
63}
64
Will Deacon5aec7152015-10-06 18:46:24 +010065static void flush_context(unsigned int cpu)
Catalin Marinasb3901d52012-03-05 11:49:28 +000066{
Will Deacon5aec7152015-10-06 18:46:24 +010067 int i;
68 u64 asid;
69
70 /* Update the list of reserved ASIDs and the ASID bitmap. */
71 bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
72
73 /*
74 * Ensure the generation bump is observed before we xchg the
75 * active_asids.
76 */
77 smp_wmb();
78
79 for_each_possible_cpu(i) {
80 asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
81 /*
82 * If this CPU has already been through a
83 * rollover, but hasn't run another task in
84 * the meantime, we must preserve its reserved
85 * ASID, as this is the only trace we have of
86 * the process it is still running.
87 */
88 if (asid == 0)
89 asid = per_cpu(reserved_asids, i);
90 __set_bit(asid & ~ASID_MASK, asid_map);
91 per_cpu(reserved_asids, i) = asid;
92 }
93
94 /* Queue a TLB invalidate and flush the I-cache if necessary. */
95 cpumask_setall(&tlb_flush_pending);
96
Catalin Marinasb3901d52012-03-05 11:49:28 +000097 if (icache_is_aivivt())
Will Deacon5aec7152015-10-06 18:46:24 +010098 __flush_icache_all();
Catalin Marinasb3901d52012-03-05 11:49:28 +000099}
100
Will Deacon0ebea802015-11-26 13:49:39 +0000101static bool check_update_reserved_asid(u64 asid, u64 newasid)
Will Deacon5aec7152015-10-06 18:46:24 +0100102{
103 int cpu;
Will Deacon0ebea802015-11-26 13:49:39 +0000104 bool hit = false;
105
106 /*
107 * Iterate over the set of reserved ASIDs looking for a match.
108 * If we find one, then we can update our mm to use newasid
109 * (i.e. the same ASID in the current generation) but we can't
110 * exit the loop early, since we need to ensure that all copies
111 * of the old ASID are updated to reflect the mm. Failure to do
112 * so could result in us missing the reserved ASID in a future
113 * generation.
114 */
115 for_each_possible_cpu(cpu) {
116 if (per_cpu(reserved_asids, cpu) == asid) {
117 hit = true;
118 per_cpu(reserved_asids, cpu) = newasid;
119 }
120 }
121
122 return hit;
Will Deacon5aec7152015-10-06 18:46:24 +0100123}
124
125static u64 new_context(struct mm_struct *mm, unsigned int cpu)
126{
127 static u32 cur_idx = 1;
128 u64 asid = atomic64_read(&mm->context.id);
129 u64 generation = atomic64_read(&asid_generation);
130
131 if (asid != 0) {
Will Deacon0ebea802015-11-26 13:49:39 +0000132 u64 newasid = generation | (asid & ~ASID_MASK);
133
Will Deacon5aec7152015-10-06 18:46:24 +0100134 /*
135 * If our current ASID was active during a rollover, we
136 * can continue to use it and this was just a false alarm.
137 */
Will Deacon0ebea802015-11-26 13:49:39 +0000138 if (check_update_reserved_asid(asid, newasid))
139 return newasid;
Will Deacon5aec7152015-10-06 18:46:24 +0100140
141 /*
142 * We had a valid ASID in a previous life, so try to re-use
143 * it if possible.
144 */
145 asid &= ~ASID_MASK;
146 if (!__test_and_set_bit(asid, asid_map))
Will Deacon0ebea802015-11-26 13:49:39 +0000147 return newasid;
Will Deacon5aec7152015-10-06 18:46:24 +0100148 }
149
150 /*
151 * Allocate a free ASID. If we can't find one, take a note of the
152 * currently active ASIDs and mark the TLBs as requiring flushes.
153 * We always count from ASID #1, as we use ASID #0 when setting a
154 * reserved TTBR0 for the init_mm.
155 */
156 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
157 if (asid != NUM_USER_ASIDS)
158 goto set_asid;
159
160 /* We're out of ASIDs, so increment the global generation count */
161 generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
162 &asid_generation);
163 flush_context(cpu);
164
165 /* We have at least 1 ASID per CPU, so this will always succeed */
166 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
167
168set_asid:
169 __set_bit(asid, asid_map);
170 cur_idx = asid;
Will Deacon0ebea802015-11-26 13:49:39 +0000171 return asid | generation;
Will Deacon5aec7152015-10-06 18:46:24 +0100172}
173
174void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
Catalin Marinasb3901d52012-03-05 11:49:28 +0000175{
176 unsigned long flags;
Will Deacon5aec7152015-10-06 18:46:24 +0100177 u64 asid;
178
179 asid = atomic64_read(&mm->context.id);
Catalin Marinasb3901d52012-03-05 11:49:28 +0000180
181 /*
Will Deacon5aec7152015-10-06 18:46:24 +0100182 * The memory ordering here is subtle. We rely on the control
183 * dependency between the generation read and the update of
184 * active_asids to ensure that we are synchronised with a
185 * parallel rollover (i.e. this pairs with the smp_wmb() in
186 * flush_context).
Catalin Marinasb3901d52012-03-05 11:49:28 +0000187 */
Will Deacon5aec7152015-10-06 18:46:24 +0100188 if (!((asid ^ atomic64_read(&asid_generation)) >> asid_bits)
189 && atomic64_xchg_relaxed(&per_cpu(active_asids, cpu), asid))
190 goto switch_mm_fastpath;
191
192 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
193 /* Check that our ASID belongs to the current generation. */
194 asid = atomic64_read(&mm->context.id);
195 if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) {
196 asid = new_context(mm, cpu);
197 atomic64_set(&mm->context.id, asid);
Catalin Marinasb3901d52012-03-05 11:49:28 +0000198 }
Catalin Marinasb3901d52012-03-05 11:49:28 +0000199
Will Deacon5aec7152015-10-06 18:46:24 +0100200 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
201 local_flush_tlb_all();
Catalin Marinasb3901d52012-03-05 11:49:28 +0000202
Will Deacon5aec7152015-10-06 18:46:24 +0100203 atomic64_set(&per_cpu(active_asids, cpu), asid);
Will Deacon5aec7152015-10-06 18:46:24 +0100204 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
Catalin Marinasb3901d52012-03-05 11:49:28 +0000205
Will Deacon5aec7152015-10-06 18:46:24 +0100206switch_mm_fastpath:
Catalin Marinasb3901d52012-03-05 11:49:28 +0000207 cpu_switch_mm(mm->pgd, mm);
208}
209
Will Deacon5aec7152015-10-06 18:46:24 +0100210static int asids_init(void)
Catalin Marinasb3901d52012-03-05 11:49:28 +0000211{
Suzuki K Poulose038dc9c2016-02-23 10:31:44 +0000212 asid_bits = get_cpu_asid_bits();
Will Deacon5aec7152015-10-06 18:46:24 +0100213 /* If we end up with more CPUs than ASIDs, expect things to crash */
214 WARN_ON(NUM_USER_ASIDS < num_possible_cpus());
215 atomic64_set(&asid_generation, ASID_FIRST_VERSION);
216 asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map),
217 GFP_KERNEL);
218 if (!asid_map)
219 panic("Failed to allocate bitmap for %lu ASIDs\n",
220 NUM_USER_ASIDS);
221
222 pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
223 return 0;
Catalin Marinasb3901d52012-03-05 11:49:28 +0000224}
Will Deacon5aec7152015-10-06 18:46:24 +0100225early_initcall(asids_init);