Benjamin Herrenschmidt | 3a2df37 | 2017-07-24 14:28:03 +1000 | [diff] [blame] | 1 | /* |
| 2 | * Common implementation of switch_mm_irqs_off |
| 3 | * |
| 4 | * Copyright IBM Corp. 2017 |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | * |
| 11 | */ |
| 12 | |
| 13 | #include <linux/mm.h> |
| 14 | #include <linux/cpu.h> |
| 15 | |
| 16 | #include <asm/mmu_context.h> |
| 17 | |
| 18 | #if defined(CONFIG_PPC32) |
| 19 | static inline void switch_mm_pgdir(struct task_struct *tsk, |
| 20 | struct mm_struct *mm) |
| 21 | { |
| 22 | /* 32-bit keeps track of the current PGDIR in the thread struct */ |
| 23 | tsk->thread.pgdir = mm->pgd; |
| 24 | } |
| 25 | #elif defined(CONFIG_PPC_BOOK3E_64) |
| 26 | static inline void switch_mm_pgdir(struct task_struct *tsk, |
| 27 | struct mm_struct *mm) |
| 28 | { |
| 29 | /* 64-bit Book3E keeps track of current PGD in the PACA */ |
| 30 | get_paca()->pgd = mm->pgd; |
| 31 | } |
| 32 | #else |
| 33 | static inline void switch_mm_pgdir(struct task_struct *tsk, |
| 34 | struct mm_struct *mm) { } |
| 35 | #endif |
| 36 | |
| 37 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 38 | static inline void inc_mm_active_cpus(struct mm_struct *mm) |
| 39 | { |
| 40 | atomic_inc(&mm->context.active_cpus); |
| 41 | } |
| 42 | #else |
| 43 | static inline void inc_mm_active_cpus(struct mm_struct *mm) { } |
| 44 | #endif |
| 45 | |
| 46 | void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, |
| 47 | struct task_struct *tsk) |
| 48 | { |
| 49 | bool new_on_cpu = false; |
| 50 | |
| 51 | /* Mark this context has been used on the new CPU */ |
| 52 | if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) { |
| 53 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); |
| 54 | inc_mm_active_cpus(next); |
| 55 | |
| 56 | /* |
| 57 | * This full barrier orders the store to the cpumask above vs |
| 58 | * a subsequent operation which allows this CPU to begin loading |
| 59 | * translations for next. |
| 60 | * |
| 61 | * When using the radix MMU that operation is the load of the |
| 62 | * MMU context id, which is then moved to SPRN_PID. |
| 63 | * |
| 64 | * For the hash MMU it is either the first load from slb_cache |
| 65 | * in switch_slb(), and/or the store of paca->mm_ctx_id in |
| 66 | * copy_mm_to_paca(). |
| 67 | * |
| 68 | * On the read side the barrier is in pte_xchg(), which orders |
| 69 | * the store to the PTE vs the load of mm_cpumask. |
| 70 | */ |
| 71 | smp_mb(); |
| 72 | |
| 73 | new_on_cpu = true; |
| 74 | } |
| 75 | |
| 76 | /* Some subarchs need to track the PGD elsewhere */ |
| 77 | switch_mm_pgdir(tsk, next); |
| 78 | |
| 79 | /* Nothing else to do if we aren't actually switching */ |
| 80 | if (prev == next) |
| 81 | return; |
| 82 | |
| 83 | /* |
| 84 | * We must stop all altivec streams before changing the HW |
| 85 | * context |
| 86 | */ |
| 87 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
| 88 | asm volatile ("dssall"); |
| 89 | |
| 90 | if (new_on_cpu) |
| 91 | radix_kvm_prefetch_workaround(next); |
| 92 | |
| 93 | /* |
| 94 | * The actual HW switching method differs between the various |
| 95 | * sub architectures. Out of line for now |
| 96 | */ |
| 97 | switch_mmu_context(prev, next, tsk); |
| 98 | } |
| 99 | |