blob: f48a0c22e8f9024e87965df7b0821596d43c926a [file] [log] [blame]
Scott Wood73196cd2011-12-20 15:34:47 +00001/*
Mihai Caramanc7ba7772012-06-25 02:26:19 +00002 * Copyright (C) 2010,2012 Freescale Semiconductor, Inc. All rights reserved.
Scott Wood73196cd2011-12-20 15:34:47 +00003 *
4 * Author: Varun Sethi, <varun.sethi@freescale.com>
5 *
6 * Description:
7 * This file is derived from arch/powerpc/kvm/e500.c,
8 * by Yu Liu <yu.liu@freescale.com>.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/kvm_host.h>
16#include <linux/slab.h>
17#include <linux/err.h>
18#include <linux/export.h>
Alexander Graf398a76c2013-12-09 13:53:42 +010019#include <linux/miscdevice.h>
20#include <linux/module.h>
Scott Wood73196cd2011-12-20 15:34:47 +000021
22#include <asm/reg.h>
23#include <asm/cputable.h>
24#include <asm/tlbflush.h>
25#include <asm/kvm_ppc.h>
26#include <asm/dbell.h>
27
28#include "booke.h"
29#include "e500.h"
30
31void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type)
32{
33 enum ppc_dbell dbell_type;
34 unsigned long tag;
35
36 switch (type) {
37 case INT_CLASS_NONCRIT:
38 dbell_type = PPC_G_DBELL;
39 break;
40 case INT_CLASS_CRIT:
41 dbell_type = PPC_G_DBELL_CRIT;
42 break;
43 case INT_CLASS_MC:
44 dbell_type = PPC_G_DBELL_MC;
45 break;
46 default:
47 WARN_ONCE(1, "%s: unknown int type %d\n", __func__, type);
48 return;
49 }
50
Mihai Caraman188e2672014-09-01 12:01:58 +030051 preempt_disable();
52 tag = PPC_DBELL_LPID(get_lpid(vcpu)) | vcpu->vcpu_id;
Scott Wood73196cd2011-12-20 15:34:47 +000053 mb();
54 ppc_msgsnd(dbell_type, 0, tag);
Mihai Caraman188e2672014-09-01 12:01:58 +030055 preempt_enable();
Scott Wood73196cd2011-12-20 15:34:47 +000056}
57
58/* gtlbe must not be mapped by more than one host tlb entry */
59void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
60 struct kvm_book3e_206_tlb_entry *gtlbe)
61{
62 unsigned int tid, ts;
Mihai Caraman66c98972012-06-25 02:26:26 +000063 gva_t eaddr;
Mihai Caraman188e2672014-09-01 12:01:58 +030064 u32 val;
Scott Wood73196cd2011-12-20 15:34:47 +000065 unsigned long flags;
66
67 ts = get_tlb_ts(gtlbe);
68 tid = get_tlb_tid(gtlbe);
Scott Wood73196cd2011-12-20 15:34:47 +000069
70 /* We search the host TLB to invalidate its shadow TLB entry */
71 val = (tid << 16) | ts;
72 eaddr = get_tlb_eaddr(gtlbe);
73
74 local_irq_save(flags);
75
76 mtspr(SPRN_MAS6, val);
Mihai Caraman188e2672014-09-01 12:01:58 +030077 mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu));
Scott Wood73196cd2011-12-20 15:34:47 +000078
79 asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr] "r" (eaddr));
80 val = mfspr(SPRN_MAS1);
81 if (val & MAS1_VALID) {
82 mtspr(SPRN_MAS1, val & ~MAS1_VALID);
83 asm volatile("tlbwe");
84 }
85 mtspr(SPRN_MAS5, 0);
86 /* NOTE: tlbsx also updates mas8, so clear it for host tlbwe */
87 mtspr(SPRN_MAS8, 0);
88 isync();
89
90 local_irq_restore(flags);
91}
92
93void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500)
94{
95 unsigned long flags;
96
97 local_irq_save(flags);
Mihai Caraman188e2672014-09-01 12:01:58 +030098 mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu));
Scott Wood73196cd2011-12-20 15:34:47 +000099 asm volatile("tlbilxlpid");
100 mtspr(SPRN_MAS5, 0);
101 local_irq_restore(flags);
102}
103
104void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
105{
106 vcpu->arch.pid = pid;
107}
108
109void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
110{
111}
112
Mihai Caraman188e2672014-09-01 12:01:58 +0300113/* We use two lpids per VM */
Mihai Caraman1f0eeb72014-06-18 10:15:22 +0300114static DEFINE_PER_CPU(struct kvm_vcpu *[KVMPPC_NR_LPIDS], last_vcpu_of_lpid);
Scott Woodc5e6cb02013-02-18 18:13:09 +0000115
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530116static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
Scott Wood73196cd2011-12-20 15:34:47 +0000117{
118 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
119
120 kvmppc_booke_vcpu_load(vcpu, cpu);
121
Mihai Caraman188e2672014-09-01 12:01:58 +0300122 mtspr(SPRN_LPID, get_lpid(vcpu));
Scott Wood73196cd2011-12-20 15:34:47 +0000123 mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr);
124 mtspr(SPRN_GPIR, vcpu->vcpu_id);
125 mtspr(SPRN_MSRP, vcpu->arch.shadow_msrp);
Mihai Caraman188e2672014-09-01 12:01:58 +0300126 vcpu->arch.eplc = EPC_EGS | (get_lpid(vcpu) << EPC_ELPID_SHIFT);
127 vcpu->arch.epsc = vcpu->arch.eplc;
Scott Wood73196cd2011-12-20 15:34:47 +0000128 mtspr(SPRN_EPLC, vcpu->arch.eplc);
129 mtspr(SPRN_EPSC, vcpu->arch.epsc);
130
131 mtspr(SPRN_GIVPR, vcpu->arch.ivpr);
132 mtspr(SPRN_GIVOR2, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]);
133 mtspr(SPRN_GIVOR8, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]);
134 mtspr(SPRN_GSPRG0, (unsigned long)vcpu->arch.shared->sprg0);
135 mtspr(SPRN_GSPRG1, (unsigned long)vcpu->arch.shared->sprg1);
136 mtspr(SPRN_GSPRG2, (unsigned long)vcpu->arch.shared->sprg2);
137 mtspr(SPRN_GSPRG3, (unsigned long)vcpu->arch.shared->sprg3);
138
139 mtspr(SPRN_GSRR0, vcpu->arch.shared->srr0);
140 mtspr(SPRN_GSRR1, vcpu->arch.shared->srr1);
141
142 mtspr(SPRN_GEPR, vcpu->arch.epr);
143 mtspr(SPRN_GDEAR, vcpu->arch.shared->dar);
144 mtspr(SPRN_GESR, vcpu->arch.shared->esr);
145
Scott Woodc5e6cb02013-02-18 18:13:09 +0000146 if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
Christoph Lameter69111ba2014-10-21 15:23:25 -0500147 __this_cpu_read(last_vcpu_of_lpid[get_lpid(vcpu)]) != vcpu) {
Scott Wood73196cd2011-12-20 15:34:47 +0000148 kvmppc_e500_tlbil_all(vcpu_e500);
Christoph Lameter69111ba2014-10-21 15:23:25 -0500149 __this_cpu_write(last_vcpu_of_lpid[get_lpid(vcpu)], vcpu);
Scott Woodc5e6cb02013-02-18 18:13:09 +0000150 }
Scott Wood73196cd2011-12-20 15:34:47 +0000151}
152
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530153static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu)
Scott Wood73196cd2011-12-20 15:34:47 +0000154{
155 vcpu->arch.eplc = mfspr(SPRN_EPLC);
156 vcpu->arch.epsc = mfspr(SPRN_EPSC);
157
158 vcpu->arch.shared->sprg0 = mfspr(SPRN_GSPRG0);
159 vcpu->arch.shared->sprg1 = mfspr(SPRN_GSPRG1);
160 vcpu->arch.shared->sprg2 = mfspr(SPRN_GSPRG2);
161 vcpu->arch.shared->sprg3 = mfspr(SPRN_GSPRG3);
162
163 vcpu->arch.shared->srr0 = mfspr(SPRN_GSRR0);
164 vcpu->arch.shared->srr1 = mfspr(SPRN_GSRR1);
165
166 vcpu->arch.epr = mfspr(SPRN_GEPR);
167 vcpu->arch.shared->dar = mfspr(SPRN_GDEAR);
168 vcpu->arch.shared->esr = mfspr(SPRN_GESR);
169
170 vcpu->arch.oldpir = mfspr(SPRN_PIR);
171
172 kvmppc_booke_vcpu_put(vcpu);
173}
174
175int kvmppc_core_check_processor_compat(void)
176{
177 int r;
178
179 if (strcmp(cur_cpu_spec->cpu_name, "e500mc") == 0)
180 r = 0;
181 else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0)
182 r = 0;
Mihai Caramand2ca32a2014-09-01 12:01:59 +0300183#ifdef CONFIG_ALTIVEC
184 /*
Adam Buchbinder446957b2016-02-24 10:51:11 -0800185 * Since guests have the privilege to enable AltiVec, we need AltiVec
Mihai Caramand2ca32a2014-09-01 12:01:59 +0300186 * support in the host to save/restore their context.
187 * Don't use CPU_FTR_ALTIVEC to identify cores with AltiVec unit
188 * because it's cleared in the absence of CONFIG_ALTIVEC!
189 */
190 else if (strcmp(cur_cpu_spec->cpu_name, "e6500") == 0)
191 r = 0;
192#endif
Scott Wood73196cd2011-12-20 15:34:47 +0000193 else
194 r = -ENOTSUPP;
195
196 return r;
197}
198
199int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
200{
201 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
202
203 vcpu->arch.shadow_epcr = SPRN_EPCR_DSIGS | SPRN_EPCR_DGTMI | \
204 SPRN_EPCR_DUVD;
Mihai Caramanc7ba7772012-06-25 02:26:19 +0000205#ifdef CONFIG_64BIT
206 vcpu->arch.shadow_epcr |= SPRN_EPCR_ICM;
207#endif
Bharat Bhushan37277b12014-08-06 12:08:53 +0530208 vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_PMMP;
Scott Wood73196cd2011-12-20 15:34:47 +0000209
210 vcpu->arch.pvr = mfspr(SPRN_PVR);
211 vcpu_e500->svr = mfspr(SPRN_SVR);
212
213 vcpu->arch.cpu_type = KVM_CPU_E500MC;
214
215 return 0;
216}
217
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530218static int kvmppc_core_get_sregs_e500mc(struct kvm_vcpu *vcpu,
219 struct kvm_sregs *sregs)
Scott Wood73196cd2011-12-20 15:34:47 +0000220{
221 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
222
223 sregs->u.e.features |= KVM_SREGS_E_ARCH206_MMU | KVM_SREGS_E_PM |
224 KVM_SREGS_E_PC;
225 sregs->u.e.impl_id = KVM_SREGS_E_IMPL_FSL;
226
227 sregs->u.e.impl.fsl.features = 0;
228 sregs->u.e.impl.fsl.svr = vcpu_e500->svr;
229 sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0;
230 sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar;
231
232 kvmppc_get_sregs_e500_tlb(vcpu, sregs);
233
234 sregs->u.e.ivor_high[3] =
235 vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
236 sregs->u.e.ivor_high[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
237 sregs->u.e.ivor_high[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
238
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530239 return kvmppc_get_sregs_ivor(vcpu, sregs);
Scott Wood73196cd2011-12-20 15:34:47 +0000240}
241
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530242static int kvmppc_core_set_sregs_e500mc(struct kvm_vcpu *vcpu,
243 struct kvm_sregs *sregs)
Scott Wood73196cd2011-12-20 15:34:47 +0000244{
245 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
246 int ret;
247
248 if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
249 vcpu_e500->svr = sregs->u.e.impl.fsl.svr;
250 vcpu_e500->hid0 = sregs->u.e.impl.fsl.hid0;
251 vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar;
252 }
253
254 ret = kvmppc_set_sregs_e500_tlb(vcpu, sregs);
255 if (ret < 0)
256 return ret;
257
258 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
259 return 0;
260
261 if (sregs->u.e.features & KVM_SREGS_E_PM) {
262 vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] =
263 sregs->u.e.ivor_high[3];
264 }
265
266 if (sregs->u.e.features & KVM_SREGS_E_PC) {
267 vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] =
268 sregs->u.e.ivor_high[4];
269 vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] =
270 sregs->u.e.ivor_high[5];
271 }
272
273 return kvmppc_set_sregs_ivor(vcpu, sregs);
274}
275
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530276static int kvmppc_get_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id,
277 union kvmppc_one_reg *val)
Mihai Caraman35b299e2013-04-11 00:03:07 +0000278{
Bharat Bhushan28d2f422014-07-25 11:21:08 +0530279 int r = 0;
280
281 switch (id) {
282 case KVM_REG_PPC_SPRG9:
283 *val = get_reg_val(id, vcpu->arch.sprg9);
284 break;
285 default:
286 r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
287 }
288
Mihai Caramana85d2aa2013-04-11 00:03:08 +0000289 return r;
Mihai Caraman35b299e2013-04-11 00:03:07 +0000290}
291
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530292static int kvmppc_set_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id,
293 union kvmppc_one_reg *val)
Mihai Caraman35b299e2013-04-11 00:03:07 +0000294{
Bharat Bhushan28d2f422014-07-25 11:21:08 +0530295 int r = 0;
296
297 switch (id) {
298 case KVM_REG_PPC_SPRG9:
299 vcpu->arch.sprg9 = set_reg_val(id, *val);
300 break;
301 default:
302 r = kvmppc_set_one_reg_e500_tlb(vcpu, id, val);
303 }
304
Mihai Caramana85d2aa2013-04-11 00:03:08 +0000305 return r;
Mihai Caraman35b299e2013-04-11 00:03:07 +0000306}
307
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530308static struct kvm_vcpu *kvmppc_core_vcpu_create_e500mc(struct kvm *kvm,
309 unsigned int id)
Scott Wood73196cd2011-12-20 15:34:47 +0000310{
311 struct kvmppc_vcpu_e500 *vcpu_e500;
312 struct kvm_vcpu *vcpu;
313 int err;
314
315 vcpu_e500 = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
316 if (!vcpu_e500) {
317 err = -ENOMEM;
318 goto out;
319 }
320 vcpu = &vcpu_e500->vcpu;
321
322 /* Invalid PIR value -- this LPID dosn't have valid state on any cpu */
323 vcpu->arch.oldpir = 0xffffffff;
324
325 err = kvm_vcpu_init(vcpu, kvm, id);
326 if (err)
327 goto free_vcpu;
328
329 err = kvmppc_e500_tlb_init(vcpu_e500);
330 if (err)
331 goto uninit_vcpu;
332
333 vcpu->arch.shared = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
334 if (!vcpu->arch.shared)
335 goto uninit_tlb;
336
337 return vcpu;
338
339uninit_tlb:
340 kvmppc_e500_tlb_uninit(vcpu_e500);
341uninit_vcpu:
342 kvm_vcpu_uninit(vcpu);
343
344free_vcpu:
345 kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
346out:
347 return ERR_PTR(err);
348}
349
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530350static void kvmppc_core_vcpu_free_e500mc(struct kvm_vcpu *vcpu)
Scott Wood73196cd2011-12-20 15:34:47 +0000351{
352 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
353
354 free_page((unsigned long)vcpu->arch.shared);
355 kvmppc_e500_tlb_uninit(vcpu_e500);
356 kvm_vcpu_uninit(vcpu);
357 kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
358}
359
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530360static int kvmppc_core_init_vm_e500mc(struct kvm *kvm)
Scott Wood73196cd2011-12-20 15:34:47 +0000361{
362 int lpid;
363
364 lpid = kvmppc_alloc_lpid();
365 if (lpid < 0)
366 return lpid;
367
Mihai Caraman188e2672014-09-01 12:01:58 +0300368 /*
369 * Use two lpids per VM on cores with two threads like e6500. Use
370 * even numbers to speedup vcpu lpid computation with consecutive lpids
371 * per VM. vm1 will use lpids 2 and 3, vm2 lpids 4 and 5, and so on.
372 */
373 if (threads_per_core == 2)
374 lpid <<= 1;
375
Scott Wood73196cd2011-12-20 15:34:47 +0000376 kvm->arch.lpid = lpid;
377 return 0;
378}
379
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530380static void kvmppc_core_destroy_vm_e500mc(struct kvm *kvm)
Scott Wood73196cd2011-12-20 15:34:47 +0000381{
Mihai Caraman188e2672014-09-01 12:01:58 +0300382 int lpid = kvm->arch.lpid;
383
384 if (threads_per_core == 2)
385 lpid >>= 1;
386
387 kvmppc_free_lpid(lpid);
Scott Wood73196cd2011-12-20 15:34:47 +0000388}
389
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530390static struct kvmppc_ops kvm_ops_e500mc = {
391 .get_sregs = kvmppc_core_get_sregs_e500mc,
392 .set_sregs = kvmppc_core_set_sregs_e500mc,
393 .get_one_reg = kvmppc_get_one_reg_e500mc,
394 .set_one_reg = kvmppc_set_one_reg_e500mc,
395 .vcpu_load = kvmppc_core_vcpu_load_e500mc,
396 .vcpu_put = kvmppc_core_vcpu_put_e500mc,
397 .vcpu_create = kvmppc_core_vcpu_create_e500mc,
398 .vcpu_free = kvmppc_core_vcpu_free_e500mc,
399 .mmu_destroy = kvmppc_mmu_destroy_e500,
400 .init_vm = kvmppc_core_init_vm_e500mc,
401 .destroy_vm = kvmppc_core_destroy_vm_e500mc,
402 .emulate_op = kvmppc_core_emulate_op_e500,
403 .emulate_mtspr = kvmppc_core_emulate_mtspr_e500,
404 .emulate_mfspr = kvmppc_core_emulate_mfspr_e500,
405};
406
Scott Wood73196cd2011-12-20 15:34:47 +0000407static int __init kvmppc_e500mc_init(void)
408{
409 int r;
410
411 r = kvmppc_booke_init();
412 if (r)
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530413 goto err_out;
Scott Wood73196cd2011-12-20 15:34:47 +0000414
Mihai Caraman188e2672014-09-01 12:01:58 +0300415 /*
416 * Use two lpids per VM on dual threaded processors like e6500
417 * to workarround the lack of tlb write conditional instruction.
418 * Expose half the number of available hardware lpids to the lpid
419 * allocator.
420 */
421 kvmppc_init_lpid(KVMPPC_NR_LPIDS/threads_per_core);
Scott Wood73196cd2011-12-20 15:34:47 +0000422 kvmppc_claim_lpid(0); /* host */
423
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530424 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530425 if (r)
426 goto err_out;
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530427 kvm_ops_e500mc.owner = THIS_MODULE;
428 kvmppc_pr_ops = &kvm_ops_e500mc;
429
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530430err_out:
431 return r;
Scott Wood73196cd2011-12-20 15:34:47 +0000432}
433
434static void __exit kvmppc_e500mc_exit(void)
435{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530436 kvmppc_pr_ops = NULL;
Scott Wood73196cd2011-12-20 15:34:47 +0000437 kvmppc_booke_exit();
438}
439
440module_init(kvmppc_e500mc_init);
441module_exit(kvmppc_e500mc_exit);
Alexander Graf398a76c2013-12-09 13:53:42 +0100442MODULE_ALIAS_MISCDEV(KVM_MINOR);
443MODULE_ALIAS("devname:kvm");