cpumask: use mm_cpumask() wrapper: cris
Makes code futureproof against the impending change to mm->cpu_vm_mask.
It's also a chance to use the new cpumask_ ops which take a pointer
(the older ones are deprecated, but there's no hurry for arch code).
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c
index b47764c..dc31b04 100644
--- a/arch/cris/arch-v32/kernel/smp.c
+++ b/arch/cris/arch-v32/kernel/smp.c
@@ -232,7 +232,7 @@
cpumask_t cpu_mask;
spin_lock_irqsave(&tlbstate_lock, flags);
- cpu_mask = (mm == FLUSH_ALL ? CPU_MASK_ALL : mm->cpu_vm_mask);
+ cpu_mask = (mm == FLUSH_ALL ? cpu_all_mask : *mm_cpumask(mm));
cpu_clear(smp_processor_id(), cpu_mask);
flush_mm = mm;
flush_vma = vma;
@@ -252,8 +252,8 @@
__flush_tlb_mm(mm);
flush_tlb_common(mm, FLUSH_ALL, 0);
/* No more mappings in other CPUs */
- cpus_clear(mm->cpu_vm_mask);
- cpu_set(smp_processor_id(), mm->cpu_vm_mask);
+ cpumask_clear(mm_cpumask(mm));
+ cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
}
void flush_tlb_page(struct vm_area_struct *vma,
diff --git a/arch/cris/arch-v32/mm/tlb.c b/arch/cris/arch-v32/mm/tlb.c
index 55ade36..6779bcb 100644
--- a/arch/cris/arch-v32/mm/tlb.c
+++ b/arch/cris/arch-v32/mm/tlb.c
@@ -185,7 +185,7 @@
/* Make sure there is a MMU context. */
spin_lock(&mmu_context_lock);
get_mmu_context(next);
- cpu_set(cpu, next->cpu_vm_mask);
+ cpumask_set_cpu(cpu, mm_cpumask(next));
spin_unlock(&mmu_context_lock);
/*