Hollis Blanchard | 9dd921c | 2008-11-05 09:36:14 -0600 | [diff] [blame^] | 1 | /* |
| 2 | * This program is free software; you can redistribute it and/or modify |
| 3 | * it under the terms of the GNU General Public License, version 2, as |
| 4 | * published by the Free Software Foundation. |
| 5 | * |
| 6 | * This program is distributed in the hope that it will be useful, |
| 7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 9 | * GNU General Public License for more details. |
| 10 | * |
| 11 | * You should have received a copy of the GNU General Public License |
| 12 | * along with this program; if not, write to the Free Software |
| 13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
| 14 | * |
| 15 | * Copyright IBM Corp. 2008 |
| 16 | * |
| 17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> |
| 18 | */ |
| 19 | |
| 20 | #include <linux/kvm_host.h> |
| 21 | #include <asm/reg.h> |
| 22 | #include <asm/cputable.h> |
| 23 | #include <asm/tlbflush.h> |
| 24 | |
| 25 | #include "44x_tlb.h" |
| 26 | |
| 27 | /* Note: clearing MSR[DE] just means that the debug interrupt will not be |
| 28 | * delivered *immediately*. Instead, it simply sets the appropriate DBSR bits. |
| 29 | * If those DBSR bits are still set when MSR[DE] is re-enabled, the interrupt |
| 30 | * will be delivered as an "imprecise debug event" (which is indicated by |
| 31 | * DBSR[IDE]. |
| 32 | */ |
| 33 | static void kvm44x_disable_debug_interrupts(void) |
| 34 | { |
| 35 | mtmsr(mfmsr() & ~MSR_DE); |
| 36 | } |
| 37 | |
| 38 | void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu) |
| 39 | { |
| 40 | kvm44x_disable_debug_interrupts(); |
| 41 | |
| 42 | mtspr(SPRN_IAC1, vcpu->arch.host_iac[0]); |
| 43 | mtspr(SPRN_IAC2, vcpu->arch.host_iac[1]); |
| 44 | mtspr(SPRN_IAC3, vcpu->arch.host_iac[2]); |
| 45 | mtspr(SPRN_IAC4, vcpu->arch.host_iac[3]); |
| 46 | mtspr(SPRN_DBCR1, vcpu->arch.host_dbcr1); |
| 47 | mtspr(SPRN_DBCR2, vcpu->arch.host_dbcr2); |
| 48 | mtspr(SPRN_DBCR0, vcpu->arch.host_dbcr0); |
| 49 | mtmsr(vcpu->arch.host_msr); |
| 50 | } |
| 51 | |
| 52 | void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) |
| 53 | { |
| 54 | struct kvm_guest_debug *dbg = &vcpu->guest_debug; |
| 55 | u32 dbcr0 = 0; |
| 56 | |
| 57 | vcpu->arch.host_msr = mfmsr(); |
| 58 | kvm44x_disable_debug_interrupts(); |
| 59 | |
| 60 | /* Save host debug register state. */ |
| 61 | vcpu->arch.host_iac[0] = mfspr(SPRN_IAC1); |
| 62 | vcpu->arch.host_iac[1] = mfspr(SPRN_IAC2); |
| 63 | vcpu->arch.host_iac[2] = mfspr(SPRN_IAC3); |
| 64 | vcpu->arch.host_iac[3] = mfspr(SPRN_IAC4); |
| 65 | vcpu->arch.host_dbcr0 = mfspr(SPRN_DBCR0); |
| 66 | vcpu->arch.host_dbcr1 = mfspr(SPRN_DBCR1); |
| 67 | vcpu->arch.host_dbcr2 = mfspr(SPRN_DBCR2); |
| 68 | |
| 69 | /* set registers up for guest */ |
| 70 | |
| 71 | if (dbg->bp[0]) { |
| 72 | mtspr(SPRN_IAC1, dbg->bp[0]); |
| 73 | dbcr0 |= DBCR0_IAC1 | DBCR0_IDM; |
| 74 | } |
| 75 | if (dbg->bp[1]) { |
| 76 | mtspr(SPRN_IAC2, dbg->bp[1]); |
| 77 | dbcr0 |= DBCR0_IAC2 | DBCR0_IDM; |
| 78 | } |
| 79 | if (dbg->bp[2]) { |
| 80 | mtspr(SPRN_IAC3, dbg->bp[2]); |
| 81 | dbcr0 |= DBCR0_IAC3 | DBCR0_IDM; |
| 82 | } |
| 83 | if (dbg->bp[3]) { |
| 84 | mtspr(SPRN_IAC4, dbg->bp[3]); |
| 85 | dbcr0 |= DBCR0_IAC4 | DBCR0_IDM; |
| 86 | } |
| 87 | |
| 88 | mtspr(SPRN_DBCR0, dbcr0); |
| 89 | mtspr(SPRN_DBCR1, 0); |
| 90 | mtspr(SPRN_DBCR2, 0); |
| 91 | } |
| 92 | |
| 93 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
| 94 | { |
| 95 | int i; |
| 96 | |
| 97 | /* Mark every guest entry in the shadow TLB entry modified, so that they |
| 98 | * will all be reloaded on the next vcpu run (instead of being |
| 99 | * demand-faulted). */ |
| 100 | for (i = 0; i <= tlb_44x_hwater; i++) |
| 101 | kvmppc_tlbe_set_modified(vcpu, i); |
| 102 | } |
| 103 | |
| 104 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) |
| 105 | { |
| 106 | /* Don't leave guest TLB entries resident when being de-scheduled. */ |
| 107 | /* XXX It would be nice to differentiate between heavyweight exit and |
| 108 | * sched_out here, since we could avoid the TLB flush for heavyweight |
| 109 | * exits. */ |
| 110 | _tlbia(); |
| 111 | } |
| 112 | |
| 113 | int kvmppc_core_check_processor_compat(void) |
| 114 | { |
| 115 | int r; |
| 116 | |
| 117 | if (strcmp(cur_cpu_spec->platform, "ppc440") == 0) |
| 118 | r = 0; |
| 119 | else |
| 120 | r = -ENOTSUPP; |
| 121 | |
| 122 | return r; |
| 123 | } |