Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2017 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License, version 2, as |
| 6 | * published by the Free Software Foundation. |
| 7 | */ |
| 8 | |
| 9 | #include <linux/kvm_host.h> |
| 10 | |
| 11 | #include <asm/kvm_ppc.h> |
| 12 | #include <asm/kvm_book3s.h> |
| 13 | #include <asm/kvm_book3s_64.h> |
| 14 | #include <asm/reg.h> |
| 15 | #include <asm/ppc-opcode.h> |
| 16 | |
| 17 | /* |
| 18 | * This handles the cases where the guest is in real suspend mode |
| 19 | * and we want to get back to the guest without dooming the transaction. |
| 20 | * The caller has checked that the guest is in real-suspend mode |
| 21 | * (MSR[TS] = S and the fake-suspend flag is not set). |
| 22 | */ |
| 23 | int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) |
| 24 | { |
| 25 | u32 instr = vcpu->arch.emul_inst; |
| 26 | u64 newmsr, msr, bescr; |
| 27 | int rs; |
| 28 | |
| 29 | switch (instr & 0xfc0007ff) { |
| 30 | case PPC_INST_RFID: |
| 31 | /* XXX do we need to check for PR=0 here? */ |
| 32 | newmsr = vcpu->arch.shregs.srr1; |
| 33 | /* should only get here for Sx -> T1 transition */ |
| 34 | if (!(MSR_TM_TRANSACTIONAL(newmsr) && (newmsr & MSR_TM))) |
| 35 | return 0; |
| 36 | newmsr = sanitize_msr(newmsr); |
| 37 | vcpu->arch.shregs.msr = newmsr; |
Simon Guo | 173c520 | 2018-05-07 14:20:08 +0800 | [diff] [blame] | 38 | vcpu->arch.cfar = vcpu->arch.regs.nip - 4; |
| 39 | vcpu->arch.regs.nip = vcpu->arch.shregs.srr0; |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 40 | return 1; |
| 41 | |
| 42 | case PPC_INST_RFEBB: |
| 43 | /* check for PR=1 and arch 2.06 bit set in PCR */ |
| 44 | msr = vcpu->arch.shregs.msr; |
| 45 | if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) |
| 46 | return 0; |
| 47 | /* check EBB facility is available */ |
| 48 | if (!(vcpu->arch.hfscr & HFSCR_EBB) || |
| 49 | ((msr & MSR_PR) && !(mfspr(SPRN_FSCR) & FSCR_EBB))) |
| 50 | return 0; |
| 51 | bescr = mfspr(SPRN_BESCR); |
| 52 | /* expect to see a S->T transition requested */ |
| 53 | if (((bescr >> 30) & 3) != 2) |
| 54 | return 0; |
| 55 | bescr &= ~BESCR_GE; |
| 56 | if (instr & (1 << 11)) |
| 57 | bescr |= BESCR_GE; |
| 58 | mtspr(SPRN_BESCR, bescr); |
| 59 | msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; |
| 60 | vcpu->arch.shregs.msr = msr; |
Simon Guo | 173c520 | 2018-05-07 14:20:08 +0800 | [diff] [blame] | 61 | vcpu->arch.cfar = vcpu->arch.regs.nip - 4; |
| 62 | vcpu->arch.regs.nip = mfspr(SPRN_EBBRR); |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 63 | return 1; |
| 64 | |
| 65 | case PPC_INST_MTMSRD: |
| 66 | /* XXX do we need to check for PR=0 here? */ |
| 67 | rs = (instr >> 21) & 0x1f; |
| 68 | newmsr = kvmppc_get_gpr(vcpu, rs); |
| 69 | msr = vcpu->arch.shregs.msr; |
| 70 | /* check this is a Sx -> T1 transition */ |
| 71 | if (!(MSR_TM_TRANSACTIONAL(newmsr) && (newmsr & MSR_TM))) |
| 72 | return 0; |
| 73 | /* mtmsrd doesn't change LE */ |
| 74 | newmsr = (newmsr & ~MSR_LE) | (msr & MSR_LE); |
| 75 | newmsr = sanitize_msr(newmsr); |
| 76 | vcpu->arch.shregs.msr = newmsr; |
| 77 | return 1; |
| 78 | |
| 79 | case PPC_INST_TSR: |
| 80 | /* we know the MSR has the TS field = S (0b01) here */ |
| 81 | msr = vcpu->arch.shregs.msr; |
| 82 | /* check for PR=1 and arch 2.06 bit set in PCR */ |
| 83 | if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) |
| 84 | return 0; |
| 85 | /* check for TM disabled in the HFSCR or MSR */ |
| 86 | if (!(vcpu->arch.hfscr & HFSCR_TM) || !(msr & MSR_TM)) |
| 87 | return 0; |
| 88 | /* L=1 => tresume => set TS to T (0b10) */ |
| 89 | if (instr & (1 << 21)) |
| 90 | vcpu->arch.shregs.msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; |
| 91 | /* Set CR0 to 0b0010 */ |
| 92 | vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0x20000000; |
| 93 | return 1; |
| 94 | } |
| 95 | |
| 96 | return 0; |
| 97 | } |
| 98 | |
| 99 | /* |
| 100 | * This is called when we are returning to a guest in TM transactional |
| 101 | * state. We roll the guest state back to the checkpointed state. |
| 102 | */ |
| 103 | void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu) |
| 104 | { |
| 105 | vcpu->arch.shregs.msr &= ~MSR_TS_MASK; /* go to N state */ |
Simon Guo | 173c520 | 2018-05-07 14:20:08 +0800 | [diff] [blame] | 106 | vcpu->arch.regs.nip = vcpu->arch.tfhar; |
Paul Mackerras | 4bb3c7a | 2018-03-21 21:32:01 +1100 | [diff] [blame] | 107 | copy_from_checkpoint(vcpu); |
| 108 | vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0xa0000000; |
| 109 | } |