blob: 22054b164b5a6fefb77c38e03fbd14e912d1d9c3 [file] [log] [blame]
Hollis Blanchard9dd921c2008-11-05 09:36:14 -06001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#include <linux/kvm_host.h>
Hollis Blancharddb93f572008-11-05 09:36:18 -060021#include <linux/err.h>
22
Hollis Blanchard9dd921c2008-11-05 09:36:14 -060023#include <asm/reg.h>
24#include <asm/cputable.h>
25#include <asm/tlbflush.h>
Hollis Blancharddb93f572008-11-05 09:36:18 -060026#include <asm/kvm_44x.h>
27#include <asm/kvm_ppc.h>
Hollis Blanchard9dd921c2008-11-05 09:36:14 -060028
29#include "44x_tlb.h"
30
31/* Note: clearing MSR[DE] just means that the debug interrupt will not be
32 * delivered *immediately*. Instead, it simply sets the appropriate DBSR bits.
33 * If those DBSR bits are still set when MSR[DE] is re-enabled, the interrupt
34 * will be delivered as an "imprecise debug event" (which is indicated by
35 * DBSR[IDE].
36 */
37static void kvm44x_disable_debug_interrupts(void)
38{
39 mtmsr(mfmsr() & ~MSR_DE);
40}
41
42void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
43{
44 kvm44x_disable_debug_interrupts();
45
46 mtspr(SPRN_IAC1, vcpu->arch.host_iac[0]);
47 mtspr(SPRN_IAC2, vcpu->arch.host_iac[1]);
48 mtspr(SPRN_IAC3, vcpu->arch.host_iac[2]);
49 mtspr(SPRN_IAC4, vcpu->arch.host_iac[3]);
50 mtspr(SPRN_DBCR1, vcpu->arch.host_dbcr1);
51 mtspr(SPRN_DBCR2, vcpu->arch.host_dbcr2);
52 mtspr(SPRN_DBCR0, vcpu->arch.host_dbcr0);
53 mtmsr(vcpu->arch.host_msr);
54}
55
56void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
57{
58 struct kvm_guest_debug *dbg = &vcpu->guest_debug;
59 u32 dbcr0 = 0;
60
61 vcpu->arch.host_msr = mfmsr();
62 kvm44x_disable_debug_interrupts();
63
64 /* Save host debug register state. */
65 vcpu->arch.host_iac[0] = mfspr(SPRN_IAC1);
66 vcpu->arch.host_iac[1] = mfspr(SPRN_IAC2);
67 vcpu->arch.host_iac[2] = mfspr(SPRN_IAC3);
68 vcpu->arch.host_iac[3] = mfspr(SPRN_IAC4);
69 vcpu->arch.host_dbcr0 = mfspr(SPRN_DBCR0);
70 vcpu->arch.host_dbcr1 = mfspr(SPRN_DBCR1);
71 vcpu->arch.host_dbcr2 = mfspr(SPRN_DBCR2);
72
73 /* set registers up for guest */
74
75 if (dbg->bp[0]) {
76 mtspr(SPRN_IAC1, dbg->bp[0]);
77 dbcr0 |= DBCR0_IAC1 | DBCR0_IDM;
78 }
79 if (dbg->bp[1]) {
80 mtspr(SPRN_IAC2, dbg->bp[1]);
81 dbcr0 |= DBCR0_IAC2 | DBCR0_IDM;
82 }
83 if (dbg->bp[2]) {
84 mtspr(SPRN_IAC3, dbg->bp[2]);
85 dbcr0 |= DBCR0_IAC3 | DBCR0_IDM;
86 }
87 if (dbg->bp[3]) {
88 mtspr(SPRN_IAC4, dbg->bp[3]);
89 dbcr0 |= DBCR0_IAC4 | DBCR0_IDM;
90 }
91
92 mtspr(SPRN_DBCR0, dbcr0);
93 mtspr(SPRN_DBCR1, 0);
94 mtspr(SPRN_DBCR2, 0);
95}
96
97void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
98{
99 int i;
100
101 /* Mark every guest entry in the shadow TLB entry modified, so that they
102 * will all be reloaded on the next vcpu run (instead of being
103 * demand-faulted). */
104 for (i = 0; i <= tlb_44x_hwater; i++)
105 kvmppc_tlbe_set_modified(vcpu, i);
106}
107
108void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
109{
110 /* Don't leave guest TLB entries resident when being de-scheduled. */
111 /* XXX It would be nice to differentiate between heavyweight exit and
112 * sched_out here, since we could avoid the TLB flush for heavyweight
113 * exits. */
114 _tlbia();
115}
116
117int kvmppc_core_check_processor_compat(void)
118{
119 int r;
120
121 if (strcmp(cur_cpu_spec->platform, "ppc440") == 0)
122 r = 0;
123 else
124 r = -ENOTSUPP;
125
126 return r;
127}
Hollis Blanchard5cbb5102008-11-05 09:36:17 -0600128
129int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
130{
Hollis Blancharddb93f572008-11-05 09:36:18 -0600131 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
132 struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[0];
Hollis Blanchard5cbb5102008-11-05 09:36:17 -0600133
134 tlbe->tid = 0;
135 tlbe->word0 = PPC44x_TLB_16M | PPC44x_TLB_VALID;
136 tlbe->word1 = 0;
137 tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR;
138
139 tlbe++;
140 tlbe->tid = 0;
141 tlbe->word0 = 0xef600000 | PPC44x_TLB_4K | PPC44x_TLB_VALID;
142 tlbe->word1 = 0xef600000;
143 tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR
144 | PPC44x_TLB_I | PPC44x_TLB_G;
145
146 /* Since the guest can directly access the timebase, it must know the
147 * real timebase frequency. Accordingly, it must see the state of
148 * CCR1[TCS]. */
149 vcpu->arch.ccr1 = mfspr(SPRN_CCR1);
150
151 return 0;
152}
153
154/* 'linear_address' is actually an encoding of AS|PID|EADDR . */
155int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
156 struct kvm_translation *tr)
157{
Hollis Blancharddb93f572008-11-05 09:36:18 -0600158 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
Hollis Blanchard5cbb5102008-11-05 09:36:17 -0600159 struct kvmppc_44x_tlbe *gtlbe;
160 int index;
161 gva_t eaddr;
162 u8 pid;
163 u8 as;
164
165 eaddr = tr->linear_address;
166 pid = (tr->linear_address >> 32) & 0xff;
167 as = (tr->linear_address >> 40) & 0x1;
168
169 index = kvmppc_44x_tlb_index(vcpu, eaddr, pid, as);
170 if (index == -1) {
171 tr->valid = 0;
172 return 0;
173 }
174
Hollis Blancharddb93f572008-11-05 09:36:18 -0600175 gtlbe = &vcpu_44x->guest_tlb[index];
Hollis Blanchard5cbb5102008-11-05 09:36:17 -0600176
177 tr->physical_address = tlb_xlate(gtlbe, eaddr);
178 /* XXX what does "writeable" and "usermode" even mean? */
179 tr->valid = 1;
180
181 return 0;
182}
Hollis Blancharddb93f572008-11-05 09:36:18 -0600183
184struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
185{
186 struct kvmppc_vcpu_44x *vcpu_44x;
187 struct kvm_vcpu *vcpu;
188 int err;
189
190 vcpu_44x = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
191 if (!vcpu_44x) {
192 err = -ENOMEM;
193 goto out;
194 }
195
196 vcpu = &vcpu_44x->vcpu;
197 err = kvm_vcpu_init(vcpu, kvm, id);
198 if (err)
199 goto free_vcpu;
200
201 return vcpu;
202
203free_vcpu:
204 kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
205out:
206 return ERR_PTR(err);
207}
208
209void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
210{
211 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
212
213 kvm_vcpu_uninit(vcpu);
214 kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
215}
216
217static int kvmppc_44x_init(void)
218{
219 int r;
220
221 r = kvmppc_booke_init();
222 if (r)
223 return r;
224
225 return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), THIS_MODULE);
226}
227
228static void kvmppc_44x_exit(void)
229{
230 kvmppc_booke_exit();
231}
232
233module_init(kvmppc_44x_init);
234module_exit(kvmppc_44x_exit);