Hollis Blanchard | 75f74f0 | 2008-11-05 09:36:16 -0600 | [diff] [blame] | 1 | /* |
| 2 | * This program is free software; you can redistribute it and/or modify |
| 3 | * it under the terms of the GNU General Public License, version 2, as |
| 4 | * published by the Free Software Foundation. |
| 5 | * |
| 6 | * This program is distributed in the hope that it will be useful, |
| 7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 9 | * GNU General Public License for more details. |
| 10 | * |
| 11 | * You should have received a copy of the GNU General Public License |
| 12 | * along with this program; if not, write to the Free Software |
| 13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
| 14 | * |
| 15 | * Copyright IBM Corp. 2008 |
| 16 | * |
| 17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> |
| 18 | */ |
| 19 | |
| 20 | #ifndef __KVM_BOOKE_H__ |
| 21 | #define __KVM_BOOKE_H__ |
| 22 | |
| 23 | #include <linux/types.h> |
| 24 | #include <linux/kvm_host.h> |
Hollis Blanchard | d0c7dc0 | 2009-01-03 16:23:06 -0600 | [diff] [blame] | 25 | #include <asm/kvm_ppc.h> |
Scott Wood | 8fae845 | 2011-12-20 15:34:45 +0000 | [diff] [blame] | 26 | #include <asm/switch_to.h> |
Hollis Blanchard | 73e75b4 | 2008-12-02 15:51:57 -0600 | [diff] [blame] | 27 | #include "timing.h" |
Hollis Blanchard | 75f74f0 | 2008-11-05 09:36:16 -0600 | [diff] [blame] | 28 | |
Hollis Blanchard | d4cf389 | 2008-11-05 09:36:23 -0600 | [diff] [blame] | 29 | /* interrupt priortity ordering */ |
| 30 | #define BOOKE_IRQPRIO_DATA_STORAGE 0 |
| 31 | #define BOOKE_IRQPRIO_INST_STORAGE 1 |
| 32 | #define BOOKE_IRQPRIO_ALIGNMENT 2 |
| 33 | #define BOOKE_IRQPRIO_PROGRAM 3 |
| 34 | #define BOOKE_IRQPRIO_FP_UNAVAIL 4 |
Hollis Blanchard | bb3a8a1 | 2009-01-03 16:23:13 -0600 | [diff] [blame] | 35 | #define BOOKE_IRQPRIO_SPE_UNAVAIL 5 |
| 36 | #define BOOKE_IRQPRIO_SPE_FP_DATA 6 |
| 37 | #define BOOKE_IRQPRIO_SPE_FP_ROUND 7 |
| 38 | #define BOOKE_IRQPRIO_SYSCALL 8 |
| 39 | #define BOOKE_IRQPRIO_AP_UNAVAIL 9 |
| 40 | #define BOOKE_IRQPRIO_DTLB_MISS 10 |
| 41 | #define BOOKE_IRQPRIO_ITLB_MISS 11 |
| 42 | #define BOOKE_IRQPRIO_MACHINE_CHECK 12 |
| 43 | #define BOOKE_IRQPRIO_DEBUG 13 |
| 44 | #define BOOKE_IRQPRIO_CRITICAL 14 |
| 45 | #define BOOKE_IRQPRIO_WATCHDOG 15 |
| 46 | #define BOOKE_IRQPRIO_EXTERNAL 16 |
| 47 | #define BOOKE_IRQPRIO_FIT 17 |
| 48 | #define BOOKE_IRQPRIO_DECREMENTER 18 |
| 49 | #define BOOKE_IRQPRIO_PERFORMANCE_MONITOR 19 |
Alexander Graf | c5335f1 | 2010-08-30 14:03:24 +0200 | [diff] [blame] | 50 | /* Internal pseudo-irqprio for level triggered externals */ |
| 51 | #define BOOKE_IRQPRIO_EXTERNAL_LEVEL 20 |
Scott Wood | d30f6e4 | 2011-12-20 15:34:43 +0000 | [diff] [blame] | 52 | #define BOOKE_IRQPRIO_DBELL 21 |
| 53 | #define BOOKE_IRQPRIO_DBELL_CRIT 22 |
| 54 | #define BOOKE_IRQPRIO_MAX 23 |
| 55 | |
| 56 | #define BOOKE_IRQMASK_EE ((1 << BOOKE_IRQPRIO_EXTERNAL_LEVEL) | \ |
| 57 | (1 << BOOKE_IRQPRIO_PERFORMANCE_MONITOR) | \ |
| 58 | (1 << BOOKE_IRQPRIO_DBELL) | \ |
| 59 | (1 << BOOKE_IRQPRIO_DECREMENTER) | \ |
| 60 | (1 << BOOKE_IRQPRIO_FIT) | \ |
| 61 | (1 << BOOKE_IRQPRIO_EXTERNAL)) |
| 62 | |
| 63 | #define BOOKE_IRQMASK_CE ((1 << BOOKE_IRQPRIO_DBELL_CRIT) | \ |
| 64 | (1 << BOOKE_IRQPRIO_WATCHDOG) | \ |
| 65 | (1 << BOOKE_IRQPRIO_CRITICAL)) |
Hollis Blanchard | bb3a8a1 | 2009-01-03 16:23:13 -0600 | [diff] [blame] | 66 | |
| 67 | extern unsigned long kvmppc_booke_handlers; |
Bharat Bhushan | 1d542d9 | 2013-01-15 22:24:39 +0000 | [diff] [blame] | 68 | extern unsigned long kvmppc_booke_handler_addr[]; |
Hollis Blanchard | d4cf389 | 2008-11-05 09:36:23 -0600 | [diff] [blame] | 69 | |
Scott Wood | 4cd35f6 | 2011-06-14 18:34:31 -0500 | [diff] [blame] | 70 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr); |
Liu Yu | dd9ebf1f | 2011-06-14 18:35:14 -0500 | [diff] [blame] | 71 | void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr); |
Hollis Blanchard | 75f74f0 | 2008-11-05 09:36:16 -0600 | [diff] [blame] | 72 | |
Mihai Caraman | 38f9882 | 2012-10-11 06:13:27 +0000 | [diff] [blame] | 73 | void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr); |
Scott Wood | dfd4d47 | 2011-11-17 12:39:59 +0000 | [diff] [blame] | 74 | void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr); |
| 75 | void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits); |
| 76 | void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits); |
| 77 | |
Hollis Blanchard | d0c7dc0 | 2009-01-03 16:23:06 -0600 | [diff] [blame] | 78 | int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 79 | unsigned int inst, int *advance); |
Alexander Graf | 54771e6 | 2012-05-04 14:55:12 +0200 | [diff] [blame] | 80 | int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val); |
| 81 | int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val); |
Hollis Blanchard | d0c7dc0 | 2009-01-03 16:23:06 -0600 | [diff] [blame] | 82 | |
Scott Wood | 4cd35f6 | 2011-06-14 18:34:31 -0500 | [diff] [blame] | 83 | /* low-level asm code to transfer guest state */ |
| 84 | void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu); |
| 85 | void kvmppc_save_guest_spe(struct kvm_vcpu *vcpu); |
| 86 | |
| 87 | /* high-level function, manages flags, host state */ |
| 88 | void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu); |
| 89 | |
Scott Wood | 94fa9d9 | 2011-12-20 15:34:22 +0000 | [diff] [blame] | 90 | void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu); |
| 91 | void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu); |
| 92 | |
Scott Wood | d30f6e4 | 2011-12-20 15:34:43 +0000 | [diff] [blame] | 93 | enum int_class { |
| 94 | INT_CLASS_NONCRIT, |
| 95 | INT_CLASS_CRIT, |
| 96 | INT_CLASS_MC, |
| 97 | INT_CLASS_DBG, |
| 98 | }; |
| 99 | |
| 100 | void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type); |
| 101 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame^] | 102 | extern void kvmppc_mmu_destroy_44x(struct kvm_vcpu *vcpu); |
| 103 | extern int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 104 | unsigned int inst, int *advance); |
| 105 | extern int kvmppc_core_emulate_mtspr_44x(struct kvm_vcpu *vcpu, int sprn, |
| 106 | ulong spr_val); |
| 107 | extern int kvmppc_core_emulate_mfspr_44x(struct kvm_vcpu *vcpu, int sprn, |
| 108 | ulong *spr_val); |
| 109 | extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu); |
| 110 | extern int kvmppc_core_emulate_op_e500(struct kvm_run *run, |
| 111 | struct kvm_vcpu *vcpu, |
| 112 | unsigned int inst, int *advance); |
| 113 | extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, |
| 114 | ulong spr_val); |
| 115 | extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, |
| 116 | ulong *spr_val); |
| 117 | extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu); |
| 118 | extern int kvmppc_core_emulate_op_e500(struct kvm_run *run, |
| 119 | struct kvm_vcpu *vcpu, |
| 120 | unsigned int inst, int *advance); |
| 121 | extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, |
| 122 | ulong spr_val); |
| 123 | extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, |
| 124 | ulong *spr_val); |
| 125 | |
Scott Wood | 8fae845 | 2011-12-20 15:34:45 +0000 | [diff] [blame] | 126 | /* |
| 127 | * Load up guest vcpu FP state if it's needed. |
| 128 | * It also set the MSR_FP in thread so that host know |
| 129 | * we're holding FPU, and then host can help to save |
| 130 | * guest vcpu FP state if other threads require to use FPU. |
| 131 | * This simulates an FP unavailable fault. |
| 132 | * |
| 133 | * It requires to be called with preemption disabled. |
| 134 | */ |
| 135 | static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu) |
| 136 | { |
| 137 | #ifdef CONFIG_PPC_FPU |
| 138 | if (vcpu->fpu_active && !(current->thread.regs->msr & MSR_FP)) { |
| 139 | load_up_fpu(); |
| 140 | current->thread.regs->msr |= MSR_FP; |
| 141 | } |
| 142 | #endif |
| 143 | } |
| 144 | |
| 145 | /* |
| 146 | * Save guest vcpu FP state into thread. |
| 147 | * It requires to be called with preemption disabled. |
| 148 | */ |
| 149 | static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu) |
| 150 | { |
| 151 | #ifdef CONFIG_PPC_FPU |
| 152 | if (vcpu->fpu_active && (current->thread.regs->msr & MSR_FP)) |
| 153 | giveup_fpu(current); |
| 154 | #endif |
| 155 | } |
Bharat Bhushan | ce11e48 | 2013-07-04 12:27:47 +0530 | [diff] [blame] | 156 | |
| 157 | static inline void kvmppc_clear_dbsr(void) |
| 158 | { |
| 159 | mtspr(SPRN_DBSR, mfspr(SPRN_DBSR)); |
| 160 | } |
Hollis Blanchard | 75f74f0 | 2008-11-05 09:36:16 -0600 | [diff] [blame] | 161 | #endif /* __KVM_BOOKE_H__ */ |