blob: b29ce752c7d69027b1b07d42b0d9dc6d0ce2cf82 [file] [log] [blame]
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001/*
Scott Wood4cd35f62011-06-14 18:34:31 -05002 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06003 *
4 * Author: Yu Liu, <yu.liu@freescale.com>
5 *
6 * Description:
7 * This file is derived from arch/powerpc/kvm/44x.c,
8 * by Hollis Blanchard <hollisb@us.ibm.com>.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/kvm_host.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
Hollis Blanchardbc8080c2009-01-03 16:23:10 -060017#include <linux/err.h>
Scott Woodfae9dbb2011-12-20 14:43:45 +000018#include <linux/export.h>
Alexander Graf398a76c2013-12-09 13:53:42 +010019#include <linux/module.h>
20#include <linux/miscdevice.h>
Hollis Blanchardbc8080c2009-01-03 16:23:10 -060021
22#include <asm/reg.h>
23#include <asm/cputable.h>
24#include <asm/tlbflush.h>
Hollis Blanchardbc8080c2009-01-03 16:23:10 -060025#include <asm/kvm_ppc.h>
26
Scott Wood8fdd21a22011-12-20 15:34:34 +000027#include "../mm/mmu_decl.h"
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -060028#include "booke.h"
Scott Wood29a5a6f2011-12-20 15:34:29 +000029#include "e500.h"
Hollis Blanchardbc8080c2009-01-03 16:23:10 -060030
Scott Wood8fdd21a22011-12-20 15:34:34 +000031struct id {
32 unsigned long val;
33 struct id **pentry;
34};
35
36#define NUM_TIDS 256
37
38/*
39 * This table provide mappings from:
40 * (guestAS,guestTID,guestPR) --> ID of physical cpu
41 * guestAS [0..1]
42 * guestTID [0..255]
43 * guestPR [0..1]
44 * ID [1..255]
45 * Each vcpu keeps one vcpu_id_table.
46 */
47struct vcpu_id_table {
48 struct id id[2][NUM_TIDS][2];
49};
50
51/*
52 * This table provide reversed mappings of vcpu_id_table:
53 * ID --> address of vcpu_id_table item.
54 * Each physical core has one pcpu_id_table.
55 */
56struct pcpu_id_table {
57 struct id *entry[NUM_TIDS];
58};
59
60static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids);
61
62/* This variable keeps last used shadow ID on local core.
63 * The valid range of shadow ID is [1..255] */
64static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);
65
66/*
67 * Allocate a free shadow id and setup a valid sid mapping in given entry.
68 * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
69 *
70 * The caller must have preemption disabled, and keep it that way until
71 * it has finished with the returned shadow id (either written into the
72 * TLB or arch.shadow_pid, or discarded).
73 */
74static inline int local_sid_setup_one(struct id *entry)
75{
76 unsigned long sid;
77 int ret = -1;
78
Christoph Lameter69111ba2014-10-21 15:23:25 -050079 sid = __this_cpu_inc_return(pcpu_last_used_sid);
Scott Wood8fdd21a22011-12-20 15:34:34 +000080 if (sid < NUM_TIDS) {
Alexander Graf91ed9e82014-12-18 10:17:08 +010081 __this_cpu_write(pcpu_sids.entry[sid], entry);
Scott Wood8fdd21a22011-12-20 15:34:34 +000082 entry->val = sid;
Christoph Lameter69111ba2014-10-21 15:23:25 -050083 entry->pentry = this_cpu_ptr(&pcpu_sids.entry[sid]);
Scott Wood8fdd21a22011-12-20 15:34:34 +000084 ret = sid;
85 }
86
87 /*
88 * If sid == NUM_TIDS, we've run out of sids. We return -1, and
89 * the caller will invalidate everything and start over.
90 *
91 * sid > NUM_TIDS indicates a race, which we disable preemption to
92 * avoid.
93 */
94 WARN_ON(sid > NUM_TIDS);
95
96 return ret;
97}
98
99/*
100 * Check if given entry contain a valid shadow id mapping.
101 * An ID mapping is considered valid only if
102 * both vcpu and pcpu know this mapping.
103 *
104 * The caller must have preemption disabled, and keep it that way until
105 * it has finished with the returned shadow id (either written into the
106 * TLB or arch.shadow_pid, or discarded).
107 */
108static inline int local_sid_lookup(struct id *entry)
109{
110 if (entry && entry->val != 0 &&
Christoph Lameter69111ba2014-10-21 15:23:25 -0500111 __this_cpu_read(pcpu_sids.entry[entry->val]) == entry &&
112 entry->pentry == this_cpu_ptr(&pcpu_sids.entry[entry->val]))
Scott Wood8fdd21a22011-12-20 15:34:34 +0000113 return entry->val;
114 return -1;
115}
116
117/* Invalidate all id mappings on local core -- call with preempt disabled */
118static inline void local_sid_destroy_all(void)
119{
Christoph Lameter69111ba2014-10-21 15:23:25 -0500120 __this_cpu_write(pcpu_last_used_sid, 0);
121 memset(this_cpu_ptr(&pcpu_sids), 0, sizeof(pcpu_sids));
Scott Wood8fdd21a22011-12-20 15:34:34 +0000122}
123
124static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
125{
126 vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL);
127 return vcpu_e500->idt;
128}
129
130static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500)
131{
132 kfree(vcpu_e500->idt);
133 vcpu_e500->idt = NULL;
134}
135
136/* Map guest pid to shadow.
137 * We use PID to keep shadow of current guest non-zero PID,
138 * and use PID1 to keep shadow of guest zero PID.
139 * So that guest tlbe with TID=0 can be accessed at any time */
140static void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
141{
142 preempt_disable();
143 vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500,
144 get_cur_as(&vcpu_e500->vcpu),
145 get_cur_pid(&vcpu_e500->vcpu),
146 get_cur_pr(&vcpu_e500->vcpu), 1);
147 vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500,
148 get_cur_as(&vcpu_e500->vcpu), 0,
149 get_cur_pr(&vcpu_e500->vcpu), 1);
150 preempt_enable();
151}
152
153/* Invalidate all mappings on vcpu */
154static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500)
155{
156 memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table));
157
158 /* Update shadow pid when mappings are changed */
159 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
160}
161
162/* Invalidate one ID mapping on vcpu */
163static inline void kvmppc_e500_id_table_reset_one(
164 struct kvmppc_vcpu_e500 *vcpu_e500,
165 int as, int pid, int pr)
166{
167 struct vcpu_id_table *idt = vcpu_e500->idt;
168
169 BUG_ON(as >= 2);
170 BUG_ON(pid >= NUM_TIDS);
171 BUG_ON(pr >= 2);
172
173 idt->id[as][pid][pr].val = 0;
174 idt->id[as][pid][pr].pentry = NULL;
175
176 /* Update shadow pid when mappings are changed */
177 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
178}
179
180/*
181 * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
182 * This function first lookup if a valid mapping exists,
183 * if not, then creates a new one.
184 *
185 * The caller must have preemption disabled, and keep it that way until
186 * it has finished with the returned shadow id (either written into the
187 * TLB or arch.shadow_pid, or discarded).
188 */
189unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
190 unsigned int as, unsigned int gid,
191 unsigned int pr, int avoid_recursion)
192{
193 struct vcpu_id_table *idt = vcpu_e500->idt;
194 int sid;
195
196 BUG_ON(as >= 2);
197 BUG_ON(gid >= NUM_TIDS);
198 BUG_ON(pr >= 2);
199
200 sid = local_sid_lookup(&idt->id[as][gid][pr]);
201
202 while (sid <= 0) {
203 /* No mapping yet */
204 sid = local_sid_setup_one(&idt->id[as][gid][pr]);
205 if (sid <= 0) {
206 _tlbil_all();
207 local_sid_destroy_all();
208 }
209
210 /* Update shadow pid when mappings are changed */
211 if (!avoid_recursion)
212 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
213 }
214
215 return sid;
216}
217
218unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
219 struct kvm_book3e_206_tlb_entry *gtlbe)
220{
221 return kvmppc_e500_get_sid(to_e500(vcpu), get_tlb_ts(gtlbe),
222 get_tlb_tid(gtlbe), get_cur_pr(vcpu), 0);
223}
224
225void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
226{
227 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
228
229 if (vcpu->arch.pid != pid) {
230 vcpu_e500->pid[0] = vcpu->arch.pid = pid;
231 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
232 }
233}
234
235/* gtlbe must not be mapped by more than one host tlbe */
236void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
237 struct kvm_book3e_206_tlb_entry *gtlbe)
238{
239 struct vcpu_id_table *idt = vcpu_e500->idt;
240 unsigned int pr, tid, ts, pid;
241 u32 val, eaddr;
242 unsigned long flags;
243
244 ts = get_tlb_ts(gtlbe);
245 tid = get_tlb_tid(gtlbe);
246
247 preempt_disable();
248
249 /* One guest ID may be mapped to two shadow IDs */
250 for (pr = 0; pr < 2; pr++) {
251 /*
252 * The shadow PID can have a valid mapping on at most one
253 * host CPU. In the common case, it will be valid on this
254 * CPU, in which case we do a local invalidation of the
255 * specific address.
256 *
257 * If the shadow PID is not valid on the current host CPU,
258 * we invalidate the entire shadow PID.
259 */
260 pid = local_sid_lookup(&idt->id[ts][tid][pr]);
261 if (pid <= 0) {
262 kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr);
263 continue;
264 }
265
266 /*
267 * The guest is invalidating a 4K entry which is in a PID
268 * that has a valid shadow mapping on this host CPU. We
269 * search host TLB to invalidate it's shadow TLB entry,
270 * similar to __tlbil_va except that we need to look in AS1.
271 */
272 val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS;
273 eaddr = get_tlb_eaddr(gtlbe);
274
275 local_irq_save(flags);
276
277 mtspr(SPRN_MAS6, val);
278 asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr));
279 val = mfspr(SPRN_MAS1);
280 if (val & MAS1_VALID) {
281 mtspr(SPRN_MAS1, val & ~MAS1_VALID);
282 asm volatile("tlbwe");
283 }
284
285 local_irq_restore(flags);
286 }
287
288 preempt_enable();
289}
290
291void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500)
292{
293 kvmppc_e500_id_table_reset_all(vcpu_e500);
294}
295
296void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
297{
298 /* Recalc shadow pid since MSR changes */
299 kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
300}
301
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530302static void kvmppc_core_vcpu_load_e500(struct kvm_vcpu *vcpu, int cpu)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600303{
Scott Wood94fa9d92011-12-20 15:34:22 +0000304 kvmppc_booke_vcpu_load(vcpu, cpu);
Scott Wood8fdd21a22011-12-20 15:34:34 +0000305
306 /* Shadow PID may be expired on local core */
307 kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600308}
309
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530310static void kvmppc_core_vcpu_put_e500(struct kvm_vcpu *vcpu)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600311{
Scott Wood4cd35f62011-06-14 18:34:31 -0500312#ifdef CONFIG_SPE
313 if (vcpu->arch.shadow_msr & MSR_SPE)
314 kvmppc_vcpu_disable_spe(vcpu);
315#endif
Scott Wood94fa9d92011-12-20 15:34:22 +0000316
317 kvmppc_booke_vcpu_put(vcpu);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600318}
319
320int kvmppc_core_check_processor_compat(void)
321{
322 int r;
323
324 if (strcmp(cur_cpu_spec->cpu_name, "e500v2") == 0)
325 r = 0;
326 else
327 r = -ENOTSUPP;
328
329 return r;
330}
331
Scott Wood8fdd21a22011-12-20 15:34:34 +0000332static void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
333{
334 struct kvm_book3e_206_tlb_entry *tlbe;
335
336 /* Insert large initial mapping for guest. */
337 tlbe = get_entry(vcpu_e500, 1, 0);
338 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
339 tlbe->mas2 = 0;
340 tlbe->mas7_3 = E500_TLB_SUPER_PERM_MASK;
341
342 /* 4K map for serial output. Used by kernel wrapper. */
343 tlbe = get_entry(vcpu_e500, 1, 1);
344 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
345 tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
346 tlbe->mas7_3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
347}
348
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600349int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
350{
351 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
352
353 kvmppc_e500_tlb_setup(vcpu_e500);
354
Liu Yua9040f22010-01-22 18:50:30 +0800355 /* Registers init */
356 vcpu->arch.pvr = mfspr(SPRN_PVR);
Scott Wood90d34b02011-03-29 16:49:10 -0500357 vcpu_e500->svr = mfspr(SPRN_SVR);
Liu Yua9040f22010-01-22 18:50:30 +0800358
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200359 vcpu->arch.cpu_type = KVM_CPU_E500V2;
360
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600361 return 0;
362}
363
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530364static int kvmppc_core_get_sregs_e500(struct kvm_vcpu *vcpu,
365 struct kvm_sregs *sregs)
Scott Wood5ce941e2011-04-27 17:24:21 -0500366{
367 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
368
369 sregs->u.e.features |= KVM_SREGS_E_ARCH206_MMU | KVM_SREGS_E_SPE |
370 KVM_SREGS_E_PM;
371 sregs->u.e.impl_id = KVM_SREGS_E_IMPL_FSL;
372
373 sregs->u.e.impl.fsl.features = 0;
374 sregs->u.e.impl.fsl.svr = vcpu_e500->svr;
375 sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0;
376 sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar;
377
Scott Wood5ce941e2011-04-27 17:24:21 -0500378 sregs->u.e.ivor_high[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
379 sregs->u.e.ivor_high[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
380 sregs->u.e.ivor_high[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
381 sregs->u.e.ivor_high[3] =
382 vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
383
384 kvmppc_get_sregs_ivor(vcpu, sregs);
Scott Wood8fdd21a22011-12-20 15:34:34 +0000385 kvmppc_get_sregs_e500_tlb(vcpu, sregs);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530386 return 0;
Scott Wood5ce941e2011-04-27 17:24:21 -0500387}
388
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530389static int kvmppc_core_set_sregs_e500(struct kvm_vcpu *vcpu,
390 struct kvm_sregs *sregs)
Scott Wood5ce941e2011-04-27 17:24:21 -0500391{
392 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
Scott Wood8fdd21a22011-12-20 15:34:34 +0000393 int ret;
Scott Wood5ce941e2011-04-27 17:24:21 -0500394
395 if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
396 vcpu_e500->svr = sregs->u.e.impl.fsl.svr;
397 vcpu_e500->hid0 = sregs->u.e.impl.fsl.hid0;
398 vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar;
399 }
400
Scott Wood8fdd21a22011-12-20 15:34:34 +0000401 ret = kvmppc_set_sregs_e500_tlb(vcpu, sregs);
402 if (ret < 0)
403 return ret;
Scott Wood5ce941e2011-04-27 17:24:21 -0500404
405 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
406 return 0;
407
408 if (sregs->u.e.features & KVM_SREGS_E_SPE) {
409 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] =
410 sregs->u.e.ivor_high[0];
411 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] =
412 sregs->u.e.ivor_high[1];
413 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] =
414 sregs->u.e.ivor_high[2];
415 }
416
417 if (sregs->u.e.features & KVM_SREGS_E_PM) {
418 vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] =
419 sregs->u.e.ivor_high[3];
420 }
421
422 return kvmppc_set_sregs_ivor(vcpu, sregs);
423}
424
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530425static int kvmppc_get_one_reg_e500(struct kvm_vcpu *vcpu, u64 id,
426 union kvmppc_one_reg *val)
Mihai Caraman35b299e2013-04-11 00:03:07 +0000427{
Mihai Caramana85d2aa2013-04-11 00:03:08 +0000428 int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
429 return r;
Mihai Caraman35b299e2013-04-11 00:03:07 +0000430}
431
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530432static int kvmppc_set_one_reg_e500(struct kvm_vcpu *vcpu, u64 id,
433 union kvmppc_one_reg *val)
Mihai Caraman35b299e2013-04-11 00:03:07 +0000434{
Mihai Caramana85d2aa2013-04-11 00:03:08 +0000435 int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
436 return r;
Mihai Caraman35b299e2013-04-11 00:03:07 +0000437}
438
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530439static struct kvm_vcpu *kvmppc_core_vcpu_create_e500(struct kvm *kvm,
440 unsigned int id)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600441{
442 struct kvmppc_vcpu_e500 *vcpu_e500;
443 struct kvm_vcpu *vcpu;
444 int err;
445
446 vcpu_e500 = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
447 if (!vcpu_e500) {
448 err = -ENOMEM;
449 goto out;
450 }
451
452 vcpu = &vcpu_e500->vcpu;
453 err = kvm_vcpu_init(vcpu, kvm, id);
454 if (err)
455 goto free_vcpu;
456
Scott Wood8fdd21a22011-12-20 15:34:34 +0000457 if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL)
458 goto uninit_vcpu;
459
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600460 err = kvmppc_e500_tlb_init(vcpu_e500);
461 if (err)
Scott Wood8fdd21a22011-12-20 15:34:34 +0000462 goto uninit_id;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600463
Alexander Graf96bc4512010-07-29 14:47:42 +0200464 vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO);
465 if (!vcpu->arch.shared)
466 goto uninit_tlb;
467
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600468 return vcpu;
469
Alexander Graf96bc4512010-07-29 14:47:42 +0200470uninit_tlb:
471 kvmppc_e500_tlb_uninit(vcpu_e500);
Scott Wood8fdd21a22011-12-20 15:34:34 +0000472uninit_id:
473 kvmppc_e500_id_table_free(vcpu_e500);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600474uninit_vcpu:
475 kvm_vcpu_uninit(vcpu);
476free_vcpu:
477 kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
478out:
479 return ERR_PTR(err);
480}
481
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530482static void kvmppc_core_vcpu_free_e500(struct kvm_vcpu *vcpu)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600483{
484 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
485
Alexander Graf96bc4512010-07-29 14:47:42 +0200486 free_page((unsigned long)vcpu->arch.shared);
Scott Woodf22e2f02010-10-05 14:22:41 -0500487 kvmppc_e500_tlb_uninit(vcpu_e500);
Scott Wood8fdd21a22011-12-20 15:34:34 +0000488 kvmppc_e500_id_table_free(vcpu_e500);
489 kvm_vcpu_uninit(vcpu);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600490 kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
491}
492
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530493static int kvmppc_core_init_vm_e500(struct kvm *kvm)
Scott Woodfafd6832011-12-20 15:34:26 +0000494{
495 return 0;
496}
497
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530498static void kvmppc_core_destroy_vm_e500(struct kvm *kvm)
Scott Woodfafd6832011-12-20 15:34:26 +0000499{
500}
501
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530502static struct kvmppc_ops kvm_ops_e500 = {
503 .get_sregs = kvmppc_core_get_sregs_e500,
504 .set_sregs = kvmppc_core_set_sregs_e500,
505 .get_one_reg = kvmppc_get_one_reg_e500,
506 .set_one_reg = kvmppc_set_one_reg_e500,
507 .vcpu_load = kvmppc_core_vcpu_load_e500,
508 .vcpu_put = kvmppc_core_vcpu_put_e500,
509 .vcpu_create = kvmppc_core_vcpu_create_e500,
510 .vcpu_free = kvmppc_core_vcpu_free_e500,
511 .mmu_destroy = kvmppc_mmu_destroy_e500,
512 .init_vm = kvmppc_core_init_vm_e500,
513 .destroy_vm = kvmppc_core_destroy_vm_e500,
514 .emulate_op = kvmppc_core_emulate_op_e500,
515 .emulate_mtspr = kvmppc_core_emulate_mtspr_e500,
516 .emulate_mfspr = kvmppc_core_emulate_mfspr_e500,
517};
518
Stephen Rothwell2986b8c2009-06-02 11:46:14 +1000519static int __init kvmppc_e500_init(void)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600520{
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600521 int r, i;
522 unsigned long ivor[3];
Bharat Bhushan1d542d92013-01-15 22:24:39 +0000523 /* Process remaining handlers above the generic first 16 */
524 unsigned long *handler = &kvmppc_booke_handler_addr[16];
525 unsigned long handler_len;
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600526 unsigned long max_ivor = 0;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600527
Alexander Graf9cf7c0e2012-01-19 00:23:46 +0100528 r = kvmppc_core_check_processor_compat();
529 if (r)
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530530 goto err_out;
Alexander Graf9cf7c0e2012-01-19 00:23:46 +0100531
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600532 r = kvmppc_booke_init();
533 if (r)
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530534 goto err_out;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600535
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600536 /* copy extra E500 exception handlers */
537 ivor[0] = mfspr(SPRN_IVOR32);
538 ivor[1] = mfspr(SPRN_IVOR33);
539 ivor[2] = mfspr(SPRN_IVOR34);
540 for (i = 0; i < 3; i++) {
Bharat Bhushan1d542d92013-01-15 22:24:39 +0000541 if (ivor[i] > ivor[max_ivor])
542 max_ivor = i;
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600543
Bharat Bhushan1d542d92013-01-15 22:24:39 +0000544 handler_len = handler[i + 1] - handler[i];
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600545 memcpy((void *)kvmppc_booke_handlers + ivor[i],
Bharat Bhushan1d542d92013-01-15 22:24:39 +0000546 (void *)handler[i], handler_len);
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600547 }
Bharat Bhushan1d542d92013-01-15 22:24:39 +0000548 handler_len = handler[max_ivor + 1] - handler[max_ivor];
549 flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
550 ivor[max_ivor] + handler_len);
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600551
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530552 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
553 if (r)
554 goto err_out;
555 kvm_ops_e500.owner = THIS_MODULE;
556 kvmppc_pr_ops = &kvm_ops_e500;
557
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530558err_out:
559 return r;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600560}
561
Jean Delvarea06cdb52010-05-18 09:34:12 +0200562static void __exit kvmppc_e500_exit(void)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600563{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530564 kvmppc_pr_ops = NULL;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600565 kvmppc_booke_exit();
566}
567
568module_init(kvmppc_e500_init);
569module_exit(kvmppc_e500_exit);
Alexander Graf398a76c2013-12-09 13:53:42 +0100570MODULE_ALIAS_MISCDEV(KVM_MINOR);
571MODULE_ALIAS("devname:kvm");