ARM: 5905/1: ARM: Global ASID allocation on SMP
The current ASID allocation algorithm doesn't ensure the notification
of the other CPUs when the ASID rolls over. This may lead to two
processes using the same ASID (but different generation) or multiple
threads of the same process using different ASIDs.
This patch adds the broadcasting of the ASID rollover event to the
other CPUs. To avoid a race on multiple CPUs modifying "cpu_last_asid"
during the handling of the broadcast, the ASID numbering now starts at
"smp_processor_id() + 1". At rollover, the cpu_last_asid will be set
to NR_CPUS.
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index b561584..68870c7 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -6,6 +6,7 @@
typedef struct {
#ifdef CONFIG_CPU_HAS_ASID
unsigned int id;
+ spinlock_t id_lock;
#endif
unsigned int kvm_seq;
} mm_context_t;
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index de6cefb..a0b3cac 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -43,12 +43,23 @@
#define ASID_FIRST_VERSION (1 << ASID_BITS)
extern unsigned int cpu_last_asid;
+#ifdef CONFIG_SMP
+DECLARE_PER_CPU(struct mm_struct *, current_mm);
+#endif
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void __new_context(struct mm_struct *mm);
static inline void check_context(struct mm_struct *mm)
{
+ /*
+ * This code is executed with interrupts enabled. Therefore,
+ * mm->context.id cannot be updated to the latest ASID version
+ * on a different CPU (and condition below not triggered)
+ * without first getting an IPI to reset the context. The
+ * alternative is to take a read_lock on mm->context.id_lock
+ * (after changing its type to rwlock_t).
+ */
if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
__new_context(mm);
@@ -108,6 +119,10 @@
__flush_icache_all();
#endif
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
+#ifdef CONFIG_SMP
+ struct mm_struct **crt_mm = &per_cpu(current_mm, cpu);
+ *crt_mm = next;
+#endif
check_context(next);
cpu_switch_mm(next->pgd, next);
if (cache_is_vivt())