blob: 907935764de017a0e4711ef847cced5752b69929 [file] [log] [blame]
Paul Mackerrasde56a942011-06-29 00:21:34 +00001/*
2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
4 *
5 * Authors:
6 * Paul Mackerras <paulus@au1.ibm.com>
7 * Alexander Graf <agraf@suse.de>
8 * Kevin Wolf <mail@kevin-wolf.de>
9 *
10 * Description: KVM functions specific to running on Book 3S
11 * processors in hypervisor mode (specifically POWER7 and later).
12 *
13 * This file is derived from arch/powerpc/kvm/book3s.c,
14 * by Alexander Graf <agraf@suse.de>.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License, version 2, as
18 * published by the Free Software Foundation.
19 */
20
21#include <linux/kvm_host.h>
22#include <linux/err.h>
23#include <linux/slab.h>
24#include <linux/preempt.h>
25#include <linux/sched.h>
26#include <linux/delay.h>
Paul Gortmaker66b15db2011-05-27 10:46:24 -040027#include <linux/export.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000028#include <linux/fs.h>
29#include <linux/anon_inodes.h>
30#include <linux/cpumask.h>
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +000031#include <linux/spinlock.h>
32#include <linux/page-flags.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000033
34#include <asm/reg.h>
35#include <asm/cputable.h>
36#include <asm/cacheflush.h>
37#include <asm/tlbflush.h>
38#include <asm/uaccess.h>
39#include <asm/io.h>
40#include <asm/kvm_ppc.h>
41#include <asm/kvm_book3s.h>
42#include <asm/mmu_context.h>
43#include <asm/lppaca.h>
44#include <asm/processor.h>
Paul Mackerras371fefd2011-06-29 00:23:08 +000045#include <asm/cputhreads.h>
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +000046#include <asm/page.h>
Michael Neulingde1d9242011-11-09 20:39:49 +000047#include <asm/hvcall.h>
David Howellsae3a1972012-03-28 18:30:02 +010048#include <asm/switch_to.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000049#include <linux/gfp.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000050#include <linux/vmalloc.h>
51#include <linux/highmem.h>
Paul Mackerrasc77162d2011-12-12 12:31:00 +000052#include <linux/hugetlb.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000053
54/* #define EXIT_DEBUG */
55/* #define EXIT_DEBUG_SIMPLE */
56/* #define EXIT_DEBUG_INT */
57
Paul Mackerras19ccb762011-07-23 17:42:46 +100058static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
Paul Mackerrasc77162d2011-12-12 12:31:00 +000059static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu);
Paul Mackerras19ccb762011-07-23 17:42:46 +100060
Paul Mackerrasde56a942011-06-29 00:21:34 +000061void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
62{
Paul Mackerras0456ec42012-02-03 00:56:21 +000063 struct kvmppc_vcore *vc = vcpu->arch.vcore;
64
Paul Mackerrasde56a942011-06-29 00:21:34 +000065 local_paca->kvm_hstate.kvm_vcpu = vcpu;
Paul Mackerras0456ec42012-02-03 00:56:21 +000066 local_paca->kvm_hstate.kvm_vcore = vc;
67 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
68 vc->stolen_tb += mftb() - vc->preempt_tb;
Paul Mackerrasde56a942011-06-29 00:21:34 +000069}
70
71void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
72{
Paul Mackerras0456ec42012-02-03 00:56:21 +000073 struct kvmppc_vcore *vc = vcpu->arch.vcore;
74
75 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
76 vc->preempt_tb = mftb();
Paul Mackerrasde56a942011-06-29 00:21:34 +000077}
78
Paul Mackerrasde56a942011-06-29 00:21:34 +000079void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
80{
81 vcpu->arch.shregs.msr = msr;
Paul Mackerras19ccb762011-07-23 17:42:46 +100082 kvmppc_end_cede(vcpu);
Paul Mackerrasde56a942011-06-29 00:21:34 +000083}
84
85void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
86{
87 vcpu->arch.pvr = pvr;
88}
89
90void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
91{
92 int r;
93
94 pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
95 pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
96 vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap);
97 for (r = 0; r < 16; ++r)
98 pr_err("r%2d = %.16lx r%d = %.16lx\n",
99 r, kvmppc_get_gpr(vcpu, r),
100 r+16, kvmppc_get_gpr(vcpu, r+16));
101 pr_err("ctr = %.16lx lr = %.16lx\n",
102 vcpu->arch.ctr, vcpu->arch.lr);
103 pr_err("srr0 = %.16llx srr1 = %.16llx\n",
104 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
105 pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
106 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
107 pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
108 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
109 pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
110 vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr);
111 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
112 pr_err("fault dar = %.16lx dsisr = %.8x\n",
113 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
114 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
115 for (r = 0; r < vcpu->arch.slb_max; ++r)
116 pr_err(" ESID = %.16llx VSID = %.16llx\n",
117 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
118 pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +0000119 vcpu->kvm->arch.lpcr, vcpu->kvm->arch.sdr1,
Paul Mackerrasde56a942011-06-29 00:21:34 +0000120 vcpu->arch.last_inst);
121}
122
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000123struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
124{
125 int r;
126 struct kvm_vcpu *v, *ret = NULL;
127
128 mutex_lock(&kvm->lock);
129 kvm_for_each_vcpu(r, v, kvm) {
130 if (v->vcpu_id == id) {
131 ret = v;
132 break;
133 }
134 }
135 mutex_unlock(&kvm->lock);
136 return ret;
137}
138
139static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
140{
141 vpa->shared_proc = 1;
142 vpa->yield_count = 1;
143}
144
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000145/* Length for a per-processor buffer is passed in at offset 4 in the buffer */
146struct reg_vpa {
147 u32 dummy;
148 union {
149 u16 hword;
150 u32 word;
151 } length;
152};
153
154static int vpa_is_registered(struct kvmppc_vpa *vpap)
155{
156 if (vpap->update_pending)
157 return vpap->next_gpa != 0;
158 return vpap->pinned_addr != NULL;
159}
160
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000161static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
162 unsigned long flags,
163 unsigned long vcpuid, unsigned long vpa)
164{
165 struct kvm *kvm = vcpu->kvm;
Paul Mackerras93e60242011-12-12 12:28:55 +0000166 unsigned long len, nb;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000167 void *va;
168 struct kvm_vcpu *tvcpu;
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000169 int err;
170 int subfunc;
171 struct kvmppc_vpa *vpap;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000172
173 tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
174 if (!tvcpu)
175 return H_PARAMETER;
176
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000177 subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK;
178 if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL ||
179 subfunc == H_VPA_REG_SLB) {
180 /* Registering new area - address must be cache-line aligned */
181 if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000182 return H_PARAMETER;
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000183
184 /* convert logical addr to kernel addr and read length */
Paul Mackerras93e60242011-12-12 12:28:55 +0000185 va = kvmppc_pin_guest_page(kvm, vpa, &nb);
186 if (va == NULL)
Paul Mackerrasb2b2f162011-12-12 12:28:21 +0000187 return H_PARAMETER;
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000188 if (subfunc == H_VPA_REG_VPA)
189 len = ((struct reg_vpa *)va)->length.hword;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000190 else
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000191 len = ((struct reg_vpa *)va)->length.word;
192 kvmppc_unpin_guest_page(kvm, va);
193
194 /* Check length */
195 if (len > nb || len < sizeof(struct reg_vpa))
196 return H_PARAMETER;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000197 } else {
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000198 vpa = 0;
199 len = 0;
200 }
201
202 err = H_PARAMETER;
203 vpap = NULL;
204 spin_lock(&tvcpu->arch.vpa_update_lock);
205
206 switch (subfunc) {
207 case H_VPA_REG_VPA: /* register VPA */
208 if (len < sizeof(struct lppaca))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000209 break;
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000210 vpap = &tvcpu->arch.vpa;
211 err = 0;
212 break;
213
214 case H_VPA_REG_DTL: /* register DTL */
215 if (len < sizeof(struct dtl_entry))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000216 break;
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000217 len -= len % sizeof(struct dtl_entry);
218
219 /* Check that they have previously registered a VPA */
220 err = H_RESOURCE;
221 if (!vpa_is_registered(&tvcpu->arch.vpa))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000222 break;
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000223
224 vpap = &tvcpu->arch.dtl;
225 err = 0;
226 break;
227
228 case H_VPA_REG_SLB: /* register SLB shadow buffer */
229 /* Check that they have previously registered a VPA */
230 err = H_RESOURCE;
231 if (!vpa_is_registered(&tvcpu->arch.vpa))
232 break;
233
234 vpap = &tvcpu->arch.slb_shadow;
235 err = 0;
236 break;
237
238 case H_VPA_DEREG_VPA: /* deregister VPA */
239 /* Check they don't still have a DTL or SLB buf registered */
240 err = H_RESOURCE;
241 if (vpa_is_registered(&tvcpu->arch.dtl) ||
242 vpa_is_registered(&tvcpu->arch.slb_shadow))
243 break;
244
245 vpap = &tvcpu->arch.vpa;
246 err = 0;
247 break;
248
249 case H_VPA_DEREG_DTL: /* deregister DTL */
250 vpap = &tvcpu->arch.dtl;
251 err = 0;
252 break;
253
254 case H_VPA_DEREG_SLB: /* deregister SLB shadow buffer */
255 vpap = &tvcpu->arch.slb_shadow;
256 err = 0;
257 break;
258 }
259
260 if (vpap) {
261 vpap->next_gpa = vpa;
262 vpap->len = len;
263 vpap->update_pending = 1;
264 }
265
266 spin_unlock(&tvcpu->arch.vpa_update_lock);
267
268 return err;
269}
270
271static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap)
272{
273 void *va;
274 unsigned long nb;
275
276 vpap->update_pending = 0;
277 va = NULL;
278 if (vpap->next_gpa) {
279 va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb);
280 if (nb < vpap->len) {
281 /*
282 * If it's now too short, it must be that userspace
283 * has changed the mappings underlying guest memory,
284 * so unregister the region.
285 */
286 kvmppc_unpin_guest_page(kvm, va);
287 va = NULL;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000288 }
289 }
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000290 if (vpap->pinned_addr)
291 kvmppc_unpin_guest_page(kvm, vpap->pinned_addr);
292 vpap->pinned_addr = va;
293 if (va)
294 vpap->pinned_end = va + vpap->len;
295}
Paul Mackerras93e60242011-12-12 12:28:55 +0000296
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000297static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
298{
299 struct kvm *kvm = vcpu->kvm;
300
301 spin_lock(&vcpu->arch.vpa_update_lock);
302 if (vcpu->arch.vpa.update_pending) {
303 kvmppc_update_vpa(kvm, &vcpu->arch.vpa);
304 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
305 }
306 if (vcpu->arch.dtl.update_pending) {
307 kvmppc_update_vpa(kvm, &vcpu->arch.dtl);
308 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
309 vcpu->arch.dtl_index = 0;
310 }
311 if (vcpu->arch.slb_shadow.update_pending)
312 kvmppc_update_vpa(kvm, &vcpu->arch.slb_shadow);
313 spin_unlock(&vcpu->arch.vpa_update_lock);
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000314}
315
Paul Mackerras0456ec42012-02-03 00:56:21 +0000316static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
317 struct kvmppc_vcore *vc)
318{
319 struct dtl_entry *dt;
320 struct lppaca *vpa;
321 unsigned long old_stolen;
322
323 dt = vcpu->arch.dtl_ptr;
324 vpa = vcpu->arch.vpa.pinned_addr;
325 old_stolen = vcpu->arch.stolen_logged;
326 vcpu->arch.stolen_logged = vc->stolen_tb;
327 if (!dt || !vpa)
328 return;
329 memset(dt, 0, sizeof(struct dtl_entry));
330 dt->dispatch_reason = 7;
331 dt->processor_id = vc->pcpu + vcpu->arch.ptid;
332 dt->timebase = mftb();
333 dt->enqueue_to_dispatch_time = vc->stolen_tb - old_stolen;
334 dt->srr0 = kvmppc_get_pc(vcpu);
335 dt->srr1 = vcpu->arch.shregs.msr;
336 ++dt;
337 if (dt == vcpu->arch.dtl.pinned_end)
338 dt = vcpu->arch.dtl.pinned_addr;
339 vcpu->arch.dtl_ptr = dt;
340 /* order writing *dt vs. writing vpa->dtl_idx */
341 smp_wmb();
342 vpa->dtl_idx = ++vcpu->arch.dtl_index;
343}
344
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000345int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
346{
347 unsigned long req = kvmppc_get_gpr(vcpu, 3);
348 unsigned long target, ret = H_SUCCESS;
349 struct kvm_vcpu *tvcpu;
350
351 switch (req) {
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000352 case H_ENTER:
353 ret = kvmppc_virtmode_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
354 kvmppc_get_gpr(vcpu, 5),
355 kvmppc_get_gpr(vcpu, 6),
356 kvmppc_get_gpr(vcpu, 7));
357 break;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000358 case H_CEDE:
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000359 break;
360 case H_PROD:
361 target = kvmppc_get_gpr(vcpu, 4);
362 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
363 if (!tvcpu) {
364 ret = H_PARAMETER;
365 break;
366 }
367 tvcpu->arch.prodded = 1;
368 smp_mb();
369 if (vcpu->arch.ceded) {
370 if (waitqueue_active(&vcpu->wq)) {
371 wake_up_interruptible(&vcpu->wq);
372 vcpu->stat.halt_wakeup++;
373 }
374 }
375 break;
376 case H_CONFER:
377 break;
378 case H_REGISTER_VPA:
379 ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
380 kvmppc_get_gpr(vcpu, 5),
381 kvmppc_get_gpr(vcpu, 6));
382 break;
383 default:
384 return RESUME_HOST;
385 }
386 kvmppc_set_gpr(vcpu, 3, ret);
387 vcpu->arch.hcall_needed = 0;
388 return RESUME_GUEST;
389}
390
Paul Mackerrasde56a942011-06-29 00:21:34 +0000391static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
392 struct task_struct *tsk)
393{
394 int r = RESUME_HOST;
395
396 vcpu->stat.sum_exits++;
397
398 run->exit_reason = KVM_EXIT_UNKNOWN;
399 run->ready_for_interrupt_injection = 1;
400 switch (vcpu->arch.trap) {
401 /* We're good on these - the host merely wanted to get our attention */
402 case BOOK3S_INTERRUPT_HV_DECREMENTER:
403 vcpu->stat.dec_exits++;
404 r = RESUME_GUEST;
405 break;
406 case BOOK3S_INTERRUPT_EXTERNAL:
407 vcpu->stat.ext_intr_exits++;
408 r = RESUME_GUEST;
409 break;
410 case BOOK3S_INTERRUPT_PERFMON:
411 r = RESUME_GUEST;
412 break;
413 case BOOK3S_INTERRUPT_PROGRAM:
414 {
415 ulong flags;
416 /*
417 * Normally program interrupts are delivered directly
418 * to the guest by the hardware, but we can get here
419 * as a result of a hypervisor emulation interrupt
420 * (e40) getting turned into a 700 by BML RTAS.
421 */
422 flags = vcpu->arch.shregs.msr & 0x1f0000ull;
423 kvmppc_core_queue_program(vcpu, flags);
424 r = RESUME_GUEST;
425 break;
426 }
427 case BOOK3S_INTERRUPT_SYSCALL:
428 {
429 /* hcall - punt to userspace */
430 int i;
431
432 if (vcpu->arch.shregs.msr & MSR_PR) {
433 /* sc 1 from userspace - reflect to guest syscall */
434 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_SYSCALL);
435 r = RESUME_GUEST;
436 break;
437 }
438 run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
439 for (i = 0; i < 9; ++i)
440 run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
441 run->exit_reason = KVM_EXIT_PAPR_HCALL;
442 vcpu->arch.hcall_needed = 1;
443 r = RESUME_HOST;
444 break;
445 }
446 /*
Paul Mackerras342d3db2011-12-12 12:38:05 +0000447 * We get these next two if the guest accesses a page which it thinks
448 * it has mapped but which is not actually present, either because
449 * it is for an emulated I/O device or because the corresonding
450 * host page has been paged out. Any other HDSI/HISI interrupts
451 * have been handled already.
Paul Mackerrasde56a942011-06-29 00:21:34 +0000452 */
453 case BOOK3S_INTERRUPT_H_DATA_STORAGE:
Paul Mackerras697d3892011-12-12 12:36:37 +0000454 r = kvmppc_book3s_hv_page_fault(run, vcpu,
455 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000456 break;
457 case BOOK3S_INTERRUPT_H_INST_STORAGE:
Paul Mackerras342d3db2011-12-12 12:38:05 +0000458 r = kvmppc_book3s_hv_page_fault(run, vcpu,
459 kvmppc_get_pc(vcpu), 0);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000460 break;
461 /*
462 * This occurs if the guest executes an illegal instruction.
463 * We just generate a program interrupt to the guest, since
464 * we don't emulate any guest instructions at this stage.
465 */
466 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
467 kvmppc_core_queue_program(vcpu, 0x80000);
468 r = RESUME_GUEST;
469 break;
470 default:
471 kvmppc_dump_regs(vcpu);
472 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
473 vcpu->arch.trap, kvmppc_get_pc(vcpu),
474 vcpu->arch.shregs.msr);
475 r = RESUME_HOST;
476 BUG();
477 break;
478 }
479
Paul Mackerrasde56a942011-06-29 00:21:34 +0000480 return r;
481}
482
483int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
484 struct kvm_sregs *sregs)
485{
486 int i;
487
488 sregs->pvr = vcpu->arch.pvr;
489
490 memset(sregs, 0, sizeof(struct kvm_sregs));
491 for (i = 0; i < vcpu->arch.slb_max; i++) {
492 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
493 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
494 }
495
496 return 0;
497}
498
499int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
500 struct kvm_sregs *sregs)
501{
502 int i, j;
503
504 kvmppc_set_pvr(vcpu, sregs->pvr);
505
506 j = 0;
507 for (i = 0; i < vcpu->arch.slb_nr; i++) {
508 if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
509 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
510 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
511 ++j;
512 }
513 }
514 vcpu->arch.slb_max = j;
515
516 return 0;
517}
518
Paul Mackerras31f34382011-12-12 12:26:50 +0000519int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
520{
521 int r = -EINVAL;
522
523 switch (reg->id) {
524 case KVM_REG_PPC_HIOR:
525 r = put_user(0, (u64 __user *)reg->addr);
526 break;
527 default:
528 break;
529 }
530
531 return r;
532}
533
534int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
535{
536 int r = -EINVAL;
537
538 switch (reg->id) {
539 case KVM_REG_PPC_HIOR:
540 {
541 u64 hior;
542 /* Only allow this to be set to zero */
543 r = get_user(hior, (u64 __user *)reg->addr);
544 if (!r && (hior != 0))
545 r = -EINVAL;
546 break;
547 }
548 default:
549 break;
550 }
551
552 return r;
553}
554
Paul Mackerrasde56a942011-06-29 00:21:34 +0000555int kvmppc_core_check_processor_compat(void)
556{
Paul Mackerras9e368f22011-06-29 00:40:08 +0000557 if (cpu_has_feature(CPU_FTR_HVMODE))
Paul Mackerrasde56a942011-06-29 00:21:34 +0000558 return 0;
559 return -EIO;
560}
561
562struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
563{
564 struct kvm_vcpu *vcpu;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000565 int err = -EINVAL;
566 int core;
567 struct kvmppc_vcore *vcore;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000568
Paul Mackerras371fefd2011-06-29 00:23:08 +0000569 core = id / threads_per_core;
570 if (core >= KVM_MAX_VCORES)
571 goto out;
572
573 err = -ENOMEM;
Sasha Levin6b75e6b2011-12-07 10:24:56 +0200574 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000575 if (!vcpu)
576 goto out;
577
578 err = kvm_vcpu_init(vcpu, kvm, id);
579 if (err)
580 goto free_vcpu;
581
582 vcpu->arch.shared = &vcpu->arch.shregs;
583 vcpu->arch.last_cpu = -1;
584 vcpu->arch.mmcr[0] = MMCR0_FC;
585 vcpu->arch.ctrl = CTRL_RUNLATCH;
586 /* default to host PVR, since we can't spoof it */
587 vcpu->arch.pvr = mfspr(SPRN_PVR);
588 kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000589 spin_lock_init(&vcpu->arch.vpa_update_lock);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000590
Paul Mackerrasde56a942011-06-29 00:21:34 +0000591 kvmppc_mmu_book3s_hv_init(vcpu);
592
Paul Mackerras371fefd2011-06-29 00:23:08 +0000593 /*
Paul Mackerras19ccb762011-07-23 17:42:46 +1000594 * We consider the vcpu stopped until we see the first run ioctl for it.
Paul Mackerras371fefd2011-06-29 00:23:08 +0000595 */
Paul Mackerras19ccb762011-07-23 17:42:46 +1000596 vcpu->arch.state = KVMPPC_VCPU_STOPPED;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000597
598 init_waitqueue_head(&vcpu->arch.cpu_run);
599
600 mutex_lock(&kvm->lock);
601 vcore = kvm->arch.vcores[core];
602 if (!vcore) {
603 vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
604 if (vcore) {
605 INIT_LIST_HEAD(&vcore->runnable_threads);
606 spin_lock_init(&vcore->lock);
Paul Mackerras19ccb762011-07-23 17:42:46 +1000607 init_waitqueue_head(&vcore->wq);
Paul Mackerras0456ec42012-02-03 00:56:21 +0000608 vcore->preempt_tb = mftb();
Paul Mackerras371fefd2011-06-29 00:23:08 +0000609 }
610 kvm->arch.vcores[core] = vcore;
611 }
612 mutex_unlock(&kvm->lock);
613
614 if (!vcore)
615 goto free_vcpu;
616
617 spin_lock(&vcore->lock);
618 ++vcore->num_threads;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000619 spin_unlock(&vcore->lock);
620 vcpu->arch.vcore = vcore;
Paul Mackerras0456ec42012-02-03 00:56:21 +0000621 vcpu->arch.stolen_logged = vcore->stolen_tb;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000622
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200623 vcpu->arch.cpu_type = KVM_CPU_3S_64;
624 kvmppc_sanity_check(vcpu);
625
Paul Mackerrasde56a942011-06-29 00:21:34 +0000626 return vcpu;
627
628free_vcpu:
Sasha Levin6b75e6b2011-12-07 10:24:56 +0200629 kmem_cache_free(kvm_vcpu_cache, vcpu);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000630out:
631 return ERR_PTR(err);
632}
633
634void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
635{
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000636 spin_lock(&vcpu->arch.vpa_update_lock);
637 if (vcpu->arch.dtl.pinned_addr)
638 kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.dtl.pinned_addr);
639 if (vcpu->arch.slb_shadow.pinned_addr)
640 kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.slb_shadow.pinned_addr);
641 if (vcpu->arch.vpa.pinned_addr)
642 kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.vpa.pinned_addr);
643 spin_unlock(&vcpu->arch.vpa_update_lock);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000644 kvm_vcpu_uninit(vcpu);
Sasha Levin6b75e6b2011-12-07 10:24:56 +0200645 kmem_cache_free(kvm_vcpu_cache, vcpu);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000646}
647
Paul Mackerras19ccb762011-07-23 17:42:46 +1000648static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000649{
Paul Mackerras19ccb762011-07-23 17:42:46 +1000650 unsigned long dec_nsec, now;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000651
Paul Mackerras19ccb762011-07-23 17:42:46 +1000652 now = get_tb();
653 if (now > vcpu->arch.dec_expires) {
654 /* decrementer has already gone negative */
655 kvmppc_core_queue_dec(vcpu);
Scott Wood7e28e60e2011-11-08 18:23:20 -0600656 kvmppc_core_prepare_to_enter(vcpu);
Paul Mackerras19ccb762011-07-23 17:42:46 +1000657 return;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000658 }
Paul Mackerras19ccb762011-07-23 17:42:46 +1000659 dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC
660 / tb_ticks_per_sec;
661 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
662 HRTIMER_MODE_REL);
663 vcpu->arch.timer_running = 1;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000664}
665
Paul Mackerras19ccb762011-07-23 17:42:46 +1000666static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
Paul Mackerras371fefd2011-06-29 00:23:08 +0000667{
Paul Mackerras19ccb762011-07-23 17:42:46 +1000668 vcpu->arch.ceded = 0;
669 if (vcpu->arch.timer_running) {
670 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
671 vcpu->arch.timer_running = 0;
672 }
Paul Mackerras371fefd2011-06-29 00:23:08 +0000673}
674
675extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
676extern void xics_wake_cpu(int cpu);
677
678static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
679 struct kvm_vcpu *vcpu)
680{
681 struct kvm_vcpu *v;
682
683 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
684 return;
685 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
686 --vc->n_runnable;
Paul Mackerras19ccb762011-07-23 17:42:46 +1000687 ++vc->n_busy;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000688 /* decrement the physical thread id of each following vcpu */
689 v = vcpu;
690 list_for_each_entry_continue(v, &vc->runnable_threads, arch.run_list)
691 --v->arch.ptid;
692 list_del(&vcpu->arch.run_list);
693}
694
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000695static int kvmppc_grab_hwthread(int cpu)
696{
697 struct paca_struct *tpaca;
698 long timeout = 1000;
699
700 tpaca = &paca[cpu];
701
702 /* Ensure the thread won't go into the kernel if it wakes */
703 tpaca->kvm_hstate.hwthread_req = 1;
704
705 /*
706 * If the thread is already executing in the kernel (e.g. handling
707 * a stray interrupt), wait for it to get back to nap mode.
708 * The smp_mb() is to ensure that our setting of hwthread_req
709 * is visible before we look at hwthread_state, so if this
710 * races with the code at system_reset_pSeries and the thread
711 * misses our setting of hwthread_req, we are sure to see its
712 * setting of hwthread_state, and vice versa.
713 */
714 smp_mb();
715 while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) {
716 if (--timeout <= 0) {
717 pr_err("KVM: couldn't grab cpu %d\n", cpu);
718 return -EBUSY;
719 }
720 udelay(1);
721 }
722 return 0;
723}
724
725static void kvmppc_release_hwthread(int cpu)
726{
727 struct paca_struct *tpaca;
728
729 tpaca = &paca[cpu];
730 tpaca->kvm_hstate.hwthread_req = 0;
731 tpaca->kvm_hstate.kvm_vcpu = NULL;
732}
733
Paul Mackerras371fefd2011-06-29 00:23:08 +0000734static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
735{
736 int cpu;
737 struct paca_struct *tpaca;
738 struct kvmppc_vcore *vc = vcpu->arch.vcore;
739
Paul Mackerras19ccb762011-07-23 17:42:46 +1000740 if (vcpu->arch.timer_running) {
741 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
742 vcpu->arch.timer_running = 0;
743 }
Paul Mackerras371fefd2011-06-29 00:23:08 +0000744 cpu = vc->pcpu + vcpu->arch.ptid;
745 tpaca = &paca[cpu];
746 tpaca->kvm_hstate.kvm_vcpu = vcpu;
747 tpaca->kvm_hstate.kvm_vcore = vc;
Paul Mackerras19ccb762011-07-23 17:42:46 +1000748 tpaca->kvm_hstate.napping = 0;
749 vcpu->cpu = vc->pcpu;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000750 smp_wmb();
Michael Neuling251da032011-11-10 16:03:20 +0000751#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
Paul Mackerras371fefd2011-06-29 00:23:08 +0000752 if (vcpu->arch.ptid) {
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000753 kvmppc_grab_hwthread(cpu);
Paul Mackerras371fefd2011-06-29 00:23:08 +0000754 xics_wake_cpu(cpu);
755 ++vc->n_woken;
756 }
757#endif
758}
759
760static void kvmppc_wait_for_nap(struct kvmppc_vcore *vc)
761{
762 int i;
763
764 HMT_low();
765 i = 0;
766 while (vc->nap_count < vc->n_woken) {
767 if (++i >= 1000000) {
768 pr_err("kvmppc_wait_for_nap timeout %d %d\n",
769 vc->nap_count, vc->n_woken);
770 break;
771 }
772 cpu_relax();
773 }
774 HMT_medium();
775}
776
777/*
778 * Check that we are on thread 0 and that any other threads in
779 * this core are off-line.
780 */
781static int on_primary_thread(void)
782{
783 int cpu = smp_processor_id();
784 int thr = cpu_thread_in_core(cpu);
785
786 if (thr)
787 return 0;
788 while (++thr < threads_per_core)
789 if (cpu_online(cpu + thr))
790 return 0;
791 return 1;
792}
793
794/*
795 * Run a set of guest threads on a physical core.
796 * Called with vc->lock held.
797 */
798static int kvmppc_run_core(struct kvmppc_vcore *vc)
799{
Paul Mackerras19ccb762011-07-23 17:42:46 +1000800 struct kvm_vcpu *vcpu, *vcpu0, *vnext;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000801 long ret;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000802 u64 now;
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000803 int ptid, i;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000804
Paul Mackerras371fefd2011-06-29 00:23:08 +0000805 /* don't start if any threads have a signal pending */
806 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
807 if (signal_pending(vcpu->arch.run_task))
808 return 0;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000809
810 /*
811 * Make sure we are running on thread 0, and that
812 * secondary threads are offline.
813 * XXX we should also block attempts to bring any
814 * secondary threads online.
815 */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000816 if (threads_per_core > 1 && !on_primary_thread()) {
817 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
818 vcpu->arch.ret = -EBUSY;
819 goto out;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000820 }
821
Paul Mackerras19ccb762011-07-23 17:42:46 +1000822 /*
823 * Assign physical thread IDs, first to non-ceded vcpus
824 * and then to ceded ones.
825 */
826 ptid = 0;
827 vcpu0 = NULL;
828 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
829 if (!vcpu->arch.ceded) {
830 if (!ptid)
831 vcpu0 = vcpu;
832 vcpu->arch.ptid = ptid++;
833 }
834 }
835 if (!vcpu0)
836 return 0; /* nothing to run */
837 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
838 if (vcpu->arch.ceded)
839 vcpu->arch.ptid = ptid++;
840
Paul Mackerras371fefd2011-06-29 00:23:08 +0000841 vc->n_woken = 0;
842 vc->nap_count = 0;
843 vc->entry_exit_count = 0;
Paul Mackerras19ccb762011-07-23 17:42:46 +1000844 vc->vcore_state = VCORE_RUNNING;
Paul Mackerras0456ec42012-02-03 00:56:21 +0000845 vc->stolen_tb += mftb() - vc->preempt_tb;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000846 vc->in_guest = 0;
847 vc->pcpu = smp_processor_id();
Paul Mackerras19ccb762011-07-23 17:42:46 +1000848 vc->napping_threads = 0;
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000849 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
Paul Mackerras371fefd2011-06-29 00:23:08 +0000850 kvmppc_start_thread(vcpu);
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000851 if (vcpu->arch.vpa.update_pending ||
852 vcpu->arch.slb_shadow.update_pending ||
853 vcpu->arch.dtl.update_pending)
854 kvmppc_update_vpas(vcpu);
Paul Mackerras0456ec42012-02-03 00:56:21 +0000855 kvmppc_create_dtl_entry(vcpu, vc);
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000856 }
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000857 /* Grab any remaining hw threads so they can't go into the kernel */
858 for (i = ptid; i < threads_per_core; ++i)
859 kvmppc_grab_hwthread(vc->pcpu + i);
Paul Mackerras371fefd2011-06-29 00:23:08 +0000860
861 preempt_disable();
Paul Mackerras19ccb762011-07-23 17:42:46 +1000862 spin_unlock(&vc->lock);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000863
Paul Mackerras19ccb762011-07-23 17:42:46 +1000864 kvm_guest_enter();
865 __kvmppc_vcore_entry(NULL, vcpu0);
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000866 for (i = 0; i < threads_per_core; ++i)
867 kvmppc_release_hwthread(vc->pcpu + i);
Paul Mackerras19ccb762011-07-23 17:42:46 +1000868
Paul Mackerras371fefd2011-06-29 00:23:08 +0000869 spin_lock(&vc->lock);
Paul Mackerras19ccb762011-07-23 17:42:46 +1000870 /* disable sending of IPIs on virtual external irqs */
871 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
872 vcpu->cpu = -1;
873 /* wait for secondary threads to finish writing their state to memory */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000874 if (vc->nap_count < vc->n_woken)
875 kvmppc_wait_for_nap(vc);
876 /* prevent other vcpu threads from doing kvmppc_start_thread() now */
Paul Mackerras19ccb762011-07-23 17:42:46 +1000877 vc->vcore_state = VCORE_EXITING;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000878 spin_unlock(&vc->lock);
879
880 /* make sure updates to secondary vcpu structs are visible now */
881 smp_mb();
Paul Mackerrasde56a942011-06-29 00:21:34 +0000882 kvm_guest_exit();
883
884 preempt_enable();
885 kvm_resched(vcpu);
886
887 now = get_tb();
Paul Mackerras371fefd2011-06-29 00:23:08 +0000888 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
889 /* cancel pending dec exception if dec is positive */
890 if (now < vcpu->arch.dec_expires &&
891 kvmppc_core_pending_dec(vcpu))
892 kvmppc_core_dequeue_dec(vcpu);
Paul Mackerras19ccb762011-07-23 17:42:46 +1000893
894 ret = RESUME_GUEST;
895 if (vcpu->arch.trap)
896 ret = kvmppc_handle_exit(vcpu->arch.kvm_run, vcpu,
897 vcpu->arch.run_task);
898
Paul Mackerras371fefd2011-06-29 00:23:08 +0000899 vcpu->arch.ret = ret;
900 vcpu->arch.trap = 0;
Paul Mackerras19ccb762011-07-23 17:42:46 +1000901
902 if (vcpu->arch.ceded) {
903 if (ret != RESUME_GUEST)
904 kvmppc_end_cede(vcpu);
905 else
906 kvmppc_set_timer(vcpu);
907 }
Paul Mackerras371fefd2011-06-29 00:23:08 +0000908 }
Paul Mackerrasde56a942011-06-29 00:21:34 +0000909
Paul Mackerras371fefd2011-06-29 00:23:08 +0000910 spin_lock(&vc->lock);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000911 out:
Paul Mackerras19ccb762011-07-23 17:42:46 +1000912 vc->vcore_state = VCORE_INACTIVE;
Paul Mackerras0456ec42012-02-03 00:56:21 +0000913 vc->preempt_tb = mftb();
Paul Mackerras371fefd2011-06-29 00:23:08 +0000914 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
915 arch.run_list) {
916 if (vcpu->arch.ret != RESUME_GUEST) {
917 kvmppc_remove_runnable(vc, vcpu);
918 wake_up(&vcpu->arch.cpu_run);
919 }
920 }
921
922 return 1;
923}
924
Paul Mackerras19ccb762011-07-23 17:42:46 +1000925/*
926 * Wait for some other vcpu thread to execute us, and
927 * wake us up when we need to handle something in the host.
928 */
929static void kvmppc_wait_for_exec(struct kvm_vcpu *vcpu, int wait_state)
Paul Mackerras371fefd2011-06-29 00:23:08 +0000930{
Paul Mackerras371fefd2011-06-29 00:23:08 +0000931 DEFINE_WAIT(wait);
932
Paul Mackerras19ccb762011-07-23 17:42:46 +1000933 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
934 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE)
935 schedule();
936 finish_wait(&vcpu->arch.cpu_run, &wait);
937}
Paul Mackerras371fefd2011-06-29 00:23:08 +0000938
Paul Mackerras19ccb762011-07-23 17:42:46 +1000939/*
940 * All the vcpus in this vcore are idle, so wait for a decrementer
941 * or external interrupt to one of the vcpus. vc->lock is held.
942 */
943static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
944{
945 DEFINE_WAIT(wait);
946 struct kvm_vcpu *v;
947 int all_idle = 1;
948
949 prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
950 vc->vcore_state = VCORE_SLEEPING;
951 spin_unlock(&vc->lock);
952 list_for_each_entry(v, &vc->runnable_threads, arch.run_list) {
953 if (!v->arch.ceded || v->arch.pending_exceptions) {
954 all_idle = 0;
955 break;
956 }
957 }
958 if (all_idle)
959 schedule();
960 finish_wait(&vc->wq, &wait);
961 spin_lock(&vc->lock);
962 vc->vcore_state = VCORE_INACTIVE;
963}
964
965static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
966{
967 int n_ceded;
968 int prev_state;
969 struct kvmppc_vcore *vc;
970 struct kvm_vcpu *v, *vn;
Paul Mackerras9e368f22011-06-29 00:40:08 +0000971
Paul Mackerras371fefd2011-06-29 00:23:08 +0000972 kvm_run->exit_reason = 0;
973 vcpu->arch.ret = RESUME_GUEST;
974 vcpu->arch.trap = 0;
975
Paul Mackerras371fefd2011-06-29 00:23:08 +0000976 /*
977 * Synchronize with other threads in this virtual core
978 */
979 vc = vcpu->arch.vcore;
980 spin_lock(&vc->lock);
Paul Mackerras19ccb762011-07-23 17:42:46 +1000981 vcpu->arch.ceded = 0;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000982 vcpu->arch.run_task = current;
983 vcpu->arch.kvm_run = kvm_run;
Paul Mackerras19ccb762011-07-23 17:42:46 +1000984 prev_state = vcpu->arch.state;
985 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000986 list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads);
987 ++vc->n_runnable;
988
Paul Mackerras19ccb762011-07-23 17:42:46 +1000989 /*
990 * This happens the first time this is called for a vcpu.
991 * If the vcore is already running, we may be able to start
992 * this thread straight away and have it join in.
993 */
994 if (prev_state == KVMPPC_VCPU_STOPPED) {
995 if (vc->vcore_state == VCORE_RUNNING &&
996 VCORE_EXIT_COUNT(vc) == 0) {
997 vcpu->arch.ptid = vc->n_runnable - 1;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000998 kvmppc_start_thread(vcpu);
Paul Mackerras19ccb762011-07-23 17:42:46 +1000999 }
Paul Mackerras371fefd2011-06-29 00:23:08 +00001000
Paul Mackerras19ccb762011-07-23 17:42:46 +10001001 } else if (prev_state == KVMPPC_VCPU_BUSY_IN_HOST)
1002 --vc->n_busy;
1003
1004 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
1005 !signal_pending(current)) {
1006 if (vc->n_busy || vc->vcore_state != VCORE_INACTIVE) {
1007 spin_unlock(&vc->lock);
1008 kvmppc_wait_for_exec(vcpu, TASK_INTERRUPTIBLE);
1009 spin_lock(&vc->lock);
1010 continue;
1011 }
Paul Mackerras0456ec42012-02-03 00:56:21 +00001012 vc->runner = vcpu;
Paul Mackerras19ccb762011-07-23 17:42:46 +10001013 n_ceded = 0;
1014 list_for_each_entry(v, &vc->runnable_threads, arch.run_list)
1015 n_ceded += v->arch.ceded;
1016 if (n_ceded == vc->n_runnable)
1017 kvmppc_vcore_blocked(vc);
1018 else
1019 kvmppc_run_core(vc);
1020
1021 list_for_each_entry_safe(v, vn, &vc->runnable_threads,
1022 arch.run_list) {
Scott Wood7e28e60e2011-11-08 18:23:20 -06001023 kvmppc_core_prepare_to_enter(v);
Paul Mackerras19ccb762011-07-23 17:42:46 +10001024 if (signal_pending(v->arch.run_task)) {
1025 kvmppc_remove_runnable(vc, v);
1026 v->stat.signal_exits++;
1027 v->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
1028 v->arch.ret = -EINTR;
1029 wake_up(&v->arch.cpu_run);
1030 }
1031 }
Paul Mackerras0456ec42012-02-03 00:56:21 +00001032 vc->runner = NULL;
Paul Mackerras371fefd2011-06-29 00:23:08 +00001033 }
1034
Paul Mackerras19ccb762011-07-23 17:42:46 +10001035 if (signal_pending(current)) {
1036 if (vc->vcore_state == VCORE_RUNNING ||
1037 vc->vcore_state == VCORE_EXITING) {
1038 spin_unlock(&vc->lock);
1039 kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE);
1040 spin_lock(&vc->lock);
1041 }
1042 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
1043 kvmppc_remove_runnable(vc, vcpu);
1044 vcpu->stat.signal_exits++;
1045 kvm_run->exit_reason = KVM_EXIT_INTR;
1046 vcpu->arch.ret = -EINTR;
1047 }
1048 }
Paul Mackerras371fefd2011-06-29 00:23:08 +00001049
Paul Mackerras19ccb762011-07-23 17:42:46 +10001050 spin_unlock(&vc->lock);
Paul Mackerras371fefd2011-06-29 00:23:08 +00001051 return vcpu->arch.ret;
Paul Mackerrasde56a942011-06-29 00:21:34 +00001052}
1053
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001054int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
1055{
1056 int r;
1057
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001058 if (!vcpu->arch.sane) {
1059 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1060 return -EINVAL;
1061 }
1062
Scott Wood25051b52011-11-08 18:23:23 -06001063 kvmppc_core_prepare_to_enter(vcpu);
1064
Paul Mackerras19ccb762011-07-23 17:42:46 +10001065 /* No need to go into the guest when all we'll do is come back out */
1066 if (signal_pending(current)) {
1067 run->exit_reason = KVM_EXIT_INTR;
1068 return -EINTR;
1069 }
1070
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001071 /* On the first time here, set up VRMA or RMA */
1072 if (!vcpu->kvm->arch.rma_setup_done) {
1073 r = kvmppc_hv_setup_rma(vcpu);
1074 if (r)
1075 return r;
1076 }
Paul Mackerras19ccb762011-07-23 17:42:46 +10001077
1078 flush_fp_to_thread(current);
1079 flush_altivec_to_thread(current);
1080 flush_vsx_to_thread(current);
1081 vcpu->arch.wqp = &vcpu->arch.vcore->wq;
Paul Mackerras342d3db2011-12-12 12:38:05 +00001082 vcpu->arch.pgdir = current->mm->pgd;
Paul Mackerras19ccb762011-07-23 17:42:46 +10001083
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001084 do {
1085 r = kvmppc_run_vcpu(run, vcpu);
1086
1087 if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
1088 !(vcpu->arch.shregs.msr & MSR_PR)) {
1089 r = kvmppc_pseries_do_hcall(vcpu);
Scott Wood7e28e60e2011-11-08 18:23:20 -06001090 kvmppc_core_prepare_to_enter(vcpu);
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001091 }
1092 } while (r == RESUME_GUEST);
1093 return r;
1094}
1095
David Gibson54738c02011-06-29 00:22:41 +00001096static long kvmppc_stt_npages(unsigned long window_size)
1097{
1098 return ALIGN((window_size >> SPAPR_TCE_SHIFT)
1099 * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
1100}
1101
1102static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt)
1103{
1104 struct kvm *kvm = stt->kvm;
1105 int i;
1106
1107 mutex_lock(&kvm->lock);
1108 list_del(&stt->list);
1109 for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++)
1110 __free_page(stt->pages[i]);
1111 kfree(stt);
1112 mutex_unlock(&kvm->lock);
1113
1114 kvm_put_kvm(kvm);
1115}
1116
1117static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1118{
1119 struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
1120 struct page *page;
1121
1122 if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size))
1123 return VM_FAULT_SIGBUS;
1124
1125 page = stt->pages[vmf->pgoff];
1126 get_page(page);
1127 vmf->page = page;
1128 return 0;
1129}
1130
1131static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
1132 .fault = kvm_spapr_tce_fault,
1133};
1134
1135static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
1136{
1137 vma->vm_ops = &kvm_spapr_tce_vm_ops;
1138 return 0;
1139}
1140
1141static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
1142{
1143 struct kvmppc_spapr_tce_table *stt = filp->private_data;
1144
1145 release_spapr_tce_table(stt);
1146 return 0;
1147}
1148
1149static struct file_operations kvm_spapr_tce_fops = {
1150 .mmap = kvm_spapr_tce_mmap,
1151 .release = kvm_spapr_tce_release,
1152};
1153
1154long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
1155 struct kvm_create_spapr_tce *args)
1156{
1157 struct kvmppc_spapr_tce_table *stt = NULL;
1158 long npages;
1159 int ret = -ENOMEM;
1160 int i;
1161
1162 /* Check this LIOBN hasn't been previously allocated */
1163 list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
1164 if (stt->liobn == args->liobn)
1165 return -EBUSY;
1166 }
1167
1168 npages = kvmppc_stt_npages(args->window_size);
1169
1170 stt = kzalloc(sizeof(*stt) + npages* sizeof(struct page *),
1171 GFP_KERNEL);
1172 if (!stt)
1173 goto fail;
1174
1175 stt->liobn = args->liobn;
1176 stt->window_size = args->window_size;
1177 stt->kvm = kvm;
1178
1179 for (i = 0; i < npages; i++) {
1180 stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
1181 if (!stt->pages[i])
1182 goto fail;
1183 }
1184
1185 kvm_get_kvm(kvm);
1186
1187 mutex_lock(&kvm->lock);
1188 list_add(&stt->list, &kvm->arch.spapr_tce_tables);
1189
1190 mutex_unlock(&kvm->lock);
1191
1192 return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
1193 stt, O_RDWR);
1194
1195fail:
1196 if (stt) {
1197 for (i = 0; i < npages; i++)
1198 if (stt->pages[i])
1199 __free_page(stt->pages[i]);
1200
1201 kfree(stt);
1202 }
1203 return ret;
1204}
1205
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001206/* Work out RMLS (real mode limit selector) field value for a given RMA size.
Paul Mackerras9e368f22011-06-29 00:40:08 +00001207 Assumes POWER7 or PPC970. */
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001208static inline int lpcr_rmls(unsigned long rma_size)
1209{
1210 switch (rma_size) {
1211 case 32ul << 20: /* 32 MB */
Paul Mackerras9e368f22011-06-29 00:40:08 +00001212 if (cpu_has_feature(CPU_FTR_ARCH_206))
1213 return 8; /* only supported on POWER7 */
1214 return -1;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001215 case 64ul << 20: /* 64 MB */
1216 return 3;
1217 case 128ul << 20: /* 128 MB */
1218 return 7;
1219 case 256ul << 20: /* 256 MB */
1220 return 4;
1221 case 1ul << 30: /* 1 GB */
1222 return 2;
1223 case 16ul << 30: /* 16 GB */
1224 return 1;
1225 case 256ul << 30: /* 256 GB */
1226 return 0;
1227 default:
1228 return -1;
1229 }
1230}
1231
1232static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1233{
Alexander Grafb4e70612012-01-16 16:50:10 +01001234 struct kvmppc_linear_info *ri = vma->vm_file->private_data;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001235 struct page *page;
1236
1237 if (vmf->pgoff >= ri->npages)
1238 return VM_FAULT_SIGBUS;
1239
1240 page = pfn_to_page(ri->base_pfn + vmf->pgoff);
1241 get_page(page);
1242 vmf->page = page;
1243 return 0;
1244}
1245
1246static const struct vm_operations_struct kvm_rma_vm_ops = {
1247 .fault = kvm_rma_fault,
1248};
1249
1250static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
1251{
1252 vma->vm_flags |= VM_RESERVED;
1253 vma->vm_ops = &kvm_rma_vm_ops;
1254 return 0;
1255}
1256
1257static int kvm_rma_release(struct inode *inode, struct file *filp)
1258{
Alexander Grafb4e70612012-01-16 16:50:10 +01001259 struct kvmppc_linear_info *ri = filp->private_data;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001260
1261 kvm_release_rma(ri);
1262 return 0;
1263}
1264
1265static struct file_operations kvm_rma_fops = {
1266 .mmap = kvm_rma_mmap,
1267 .release = kvm_rma_release,
1268};
1269
1270long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
1271{
Alexander Grafb4e70612012-01-16 16:50:10 +01001272 struct kvmppc_linear_info *ri;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001273 long fd;
1274
1275 ri = kvm_alloc_rma();
1276 if (!ri)
1277 return -ENOMEM;
1278
1279 fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR);
1280 if (fd < 0)
1281 kvm_release_rma(ri);
1282
1283 ret->rma_size = ri->npages << PAGE_SHIFT;
1284 return fd;
1285}
1286
Paul Mackerras82ed3612011-12-15 02:03:22 +00001287/*
1288 * Get (and clear) the dirty memory log for a memory slot.
1289 */
1290int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1291{
1292 struct kvm_memory_slot *memslot;
1293 int r;
1294 unsigned long n;
1295
1296 mutex_lock(&kvm->slots_lock);
1297
1298 r = -EINVAL;
1299 if (log->slot >= KVM_MEMORY_SLOTS)
1300 goto out;
1301
1302 memslot = id_to_memslot(kvm->memslots, log->slot);
1303 r = -ENOENT;
1304 if (!memslot->dirty_bitmap)
1305 goto out;
1306
1307 n = kvm_dirty_bitmap_bytes(memslot);
1308 memset(memslot->dirty_bitmap, 0, n);
1309
1310 r = kvmppc_hv_get_dirty_log(kvm, memslot);
1311 if (r)
1312 goto out;
1313
1314 r = -EFAULT;
1315 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
1316 goto out;
1317
1318 r = 0;
1319out:
1320 mutex_unlock(&kvm->slots_lock);
1321 return r;
1322}
1323
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001324static unsigned long slb_pgsize_encoding(unsigned long psize)
1325{
1326 unsigned long senc = 0;
1327
1328 if (psize > 0x1000) {
1329 senc = SLB_VSID_L;
1330 if (psize == 0x10000)
1331 senc |= SLB_VSID_LP_01;
1332 }
1333 return senc;
1334}
1335
Paul Mackerrasde56a942011-06-29 00:21:34 +00001336int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1337 struct kvm_userspace_memory_region *mem)
1338{
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001339 unsigned long npages;
Paul Mackerrasb2b2f162011-12-12 12:28:21 +00001340 unsigned long *phys;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001341
Paul Mackerrasb2b2f162011-12-12 12:28:21 +00001342 /* Allocate a slot_phys array */
Paul Mackerrasb2b2f162011-12-12 12:28:21 +00001343 phys = kvm->arch.slot_phys[mem->slot];
Paul Mackerras342d3db2011-12-12 12:38:05 +00001344 if (!kvm->arch.using_mmu_notifiers && !phys) {
1345 npages = mem->memory_size >> PAGE_SHIFT;
Paul Mackerrasb2b2f162011-12-12 12:28:21 +00001346 phys = vzalloc(npages * sizeof(unsigned long));
1347 if (!phys)
1348 return -ENOMEM;
1349 kvm->arch.slot_phys[mem->slot] = phys;
1350 kvm->arch.slot_npages[mem->slot] = npages;
1351 }
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001352
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001353 return 0;
1354}
1355
1356static void unpin_slot(struct kvm *kvm, int slot_id)
1357{
1358 unsigned long *physp;
1359 unsigned long j, npages, pfn;
1360 struct page *page;
1361
1362 physp = kvm->arch.slot_phys[slot_id];
1363 npages = kvm->arch.slot_npages[slot_id];
1364 if (physp) {
1365 spin_lock(&kvm->arch.slot_phys_lock);
1366 for (j = 0; j < npages; j++) {
1367 if (!(physp[j] & KVMPPC_GOT_PAGE))
1368 continue;
1369 pfn = physp[j] >> PAGE_SHIFT;
1370 page = pfn_to_page(pfn);
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001371 if (PageHuge(page))
1372 page = compound_head(page);
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001373 SetPageDirty(page);
1374 put_page(page);
1375 }
1376 kvm->arch.slot_phys[slot_id] = NULL;
1377 spin_unlock(&kvm->arch.slot_phys_lock);
1378 vfree(physp);
1379 }
1380}
1381
1382void kvmppc_core_commit_memory_region(struct kvm *kvm,
1383 struct kvm_userspace_memory_region *mem)
1384{
1385}
1386
1387static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu)
1388{
1389 int err = 0;
1390 struct kvm *kvm = vcpu->kvm;
Alexander Grafb4e70612012-01-16 16:50:10 +01001391 struct kvmppc_linear_info *ri = NULL;
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001392 unsigned long hva;
1393 struct kvm_memory_slot *memslot;
1394 struct vm_area_struct *vma;
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001395 unsigned long lpcr, senc;
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001396 unsigned long psize, porder;
1397 unsigned long rma_size;
1398 unsigned long rmls;
1399 unsigned long *physp;
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001400 unsigned long i, npages;
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001401
1402 mutex_lock(&kvm->lock);
1403 if (kvm->arch.rma_setup_done)
1404 goto out; /* another vcpu beat us to it */
1405
1406 /* Look up the memslot for guest physical address 0 */
1407 memslot = gfn_to_memslot(kvm, 0);
1408
1409 /* We must have some memory at 0 by now */
1410 err = -EINVAL;
1411 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
1412 goto out;
1413
1414 /* Look up the VMA for the start of this memory slot */
1415 hva = memslot->userspace_addr;
1416 down_read(&current->mm->mmap_sem);
1417 vma = find_vma(current->mm, hva);
1418 if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
1419 goto up_out;
1420
1421 psize = vma_kernel_pagesize(vma);
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001422 porder = __ilog2(psize);
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001423
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001424 /* Is this one of our preallocated RMAs? */
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001425 if (vma->vm_file && vma->vm_file->f_op == &kvm_rma_fops &&
1426 hva == vma->vm_start)
1427 ri = vma->vm_file->private_data;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001428
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001429 up_read(&current->mm->mmap_sem);
1430
1431 if (!ri) {
1432 /* On POWER7, use VRMA; on PPC970, give up */
1433 err = -EPERM;
1434 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
1435 pr_err("KVM: CPU requires an RMO\n");
1436 goto out;
Paul Mackerras9e368f22011-06-29 00:40:08 +00001437 }
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001438
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001439 /* We can handle 4k, 64k or 16M pages in the VRMA */
1440 err = -EINVAL;
1441 if (!(psize == 0x1000 || psize == 0x10000 ||
1442 psize == 0x1000000))
1443 goto out;
1444
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001445 /* Update VRMASD field in the LPCR */
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001446 senc = slb_pgsize_encoding(psize);
Paul Mackerras697d3892011-12-12 12:36:37 +00001447 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
1448 (VRMA_VSID << SLB_VSID_SHIFT_1T);
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001449 lpcr = kvm->arch.lpcr & ~LPCR_VRMASD;
1450 lpcr |= senc << (LPCR_VRMASD_SH - 4);
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001451 kvm->arch.lpcr = lpcr;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001452
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001453 /* Create HPTEs in the hash page table for the VRMA */
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001454 kvmppc_map_vrma(vcpu, memslot, porder);
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001455
1456 } else {
1457 /* Set up to use an RMO region */
1458 rma_size = ri->npages;
1459 if (rma_size > memslot->npages)
1460 rma_size = memslot->npages;
1461 rma_size <<= PAGE_SHIFT;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001462 rmls = lpcr_rmls(rma_size);
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001463 err = -EINVAL;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001464 if (rmls < 0) {
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001465 pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size);
1466 goto out;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001467 }
1468 atomic_inc(&ri->use_count);
1469 kvm->arch.rma = ri;
Paul Mackerras9e368f22011-06-29 00:40:08 +00001470
1471 /* Update LPCR and RMOR */
1472 lpcr = kvm->arch.lpcr;
1473 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
1474 /* PPC970; insert RMLS value (split field) in HID4 */
1475 lpcr &= ~((1ul << HID4_RMLS0_SH) |
1476 (3ul << HID4_RMLS2_SH));
1477 lpcr |= ((rmls >> 2) << HID4_RMLS0_SH) |
1478 ((rmls & 3) << HID4_RMLS2_SH);
1479 /* RMOR is also in HID4 */
1480 lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
1481 << HID4_RMOR_SH;
1482 } else {
1483 /* POWER7 */
1484 lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L);
1485 lpcr |= rmls << LPCR_RMLS_SH;
1486 kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT;
1487 }
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001488 kvm->arch.lpcr = lpcr;
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001489 pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001490 ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001491
1492 /* Initialize phys addrs of pages in RMO */
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001493 npages = ri->npages;
1494 porder = __ilog2(npages);
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001495 physp = kvm->arch.slot_phys[memslot->id];
1496 spin_lock(&kvm->arch.slot_phys_lock);
1497 for (i = 0; i < npages; ++i)
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001498 physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) + porder;
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001499 spin_unlock(&kvm->arch.slot_phys_lock);
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001500 }
1501
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001502 /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
1503 smp_wmb();
1504 kvm->arch.rma_setup_done = 1;
1505 err = 0;
1506 out:
1507 mutex_unlock(&kvm->lock);
1508 return err;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001509
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001510 up_out:
1511 up_read(&current->mm->mmap_sem);
1512 goto out;
Paul Mackerrasde56a942011-06-29 00:21:34 +00001513}
1514
1515int kvmppc_core_init_vm(struct kvm *kvm)
1516{
1517 long r;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001518 unsigned long lpcr;
Paul Mackerrasde56a942011-06-29 00:21:34 +00001519
1520 /* Allocate hashed page table */
1521 r = kvmppc_alloc_hpt(kvm);
David Gibson54738c02011-06-29 00:22:41 +00001522 if (r)
1523 return r;
Paul Mackerrasde56a942011-06-29 00:21:34 +00001524
David Gibson54738c02011-06-29 00:22:41 +00001525 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001526
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001527 kvm->arch.rma = NULL;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001528
Paul Mackerras9e368f22011-06-29 00:40:08 +00001529 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001530
Paul Mackerras9e368f22011-06-29 00:40:08 +00001531 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
1532 /* PPC970; HID4 is effectively the LPCR */
1533 unsigned long lpid = kvm->arch.lpid;
1534 kvm->arch.host_lpid = 0;
1535 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4);
1536 lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH));
1537 lpcr |= ((lpid >> 4) << HID4_LPID1_SH) |
1538 ((lpid & 0xf) << HID4_LPID5_SH);
1539 } else {
1540 /* POWER7; init LPCR for virtual RMA mode */
1541 kvm->arch.host_lpid = mfspr(SPRN_LPID);
1542 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
1543 lpcr &= LPCR_PECE | LPCR_LPES;
1544 lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
Paul Mackerras697d3892011-12-12 12:36:37 +00001545 LPCR_VPM0 | LPCR_VPM1;
1546 kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
1547 (VRMA_VSID << SLB_VSID_SHIFT_1T);
Paul Mackerras9e368f22011-06-29 00:40:08 +00001548 }
1549 kvm->arch.lpcr = lpcr;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001550
Paul Mackerras342d3db2011-12-12 12:38:05 +00001551 kvm->arch.using_mmu_notifiers = !!cpu_has_feature(CPU_FTR_ARCH_206);
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001552 spin_lock_init(&kvm->arch.slot_phys_lock);
David Gibson54738c02011-06-29 00:22:41 +00001553 return 0;
Paul Mackerrasde56a942011-06-29 00:21:34 +00001554}
1555
1556void kvmppc_core_destroy_vm(struct kvm *kvm)
1557{
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001558 unsigned long i;
1559
Paul Mackerras342d3db2011-12-12 12:38:05 +00001560 if (!kvm->arch.using_mmu_notifiers)
1561 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
1562 unpin_slot(kvm, i);
Paul Mackerrasb2b2f162011-12-12 12:28:21 +00001563
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001564 if (kvm->arch.rma) {
1565 kvm_release_rma(kvm->arch.rma);
1566 kvm->arch.rma = NULL;
1567 }
1568
Paul Mackerrasde56a942011-06-29 00:21:34 +00001569 kvmppc_free_hpt(kvm);
David Gibson54738c02011-06-29 00:22:41 +00001570 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
Paul Mackerrasde56a942011-06-29 00:21:34 +00001571}
1572
1573/* These are stubs for now */
1574void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
1575{
1576}
1577
1578/* We don't need to emulate any privileged instructions or dcbz */
1579int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
1580 unsigned int inst, int *advance)
1581{
1582 return EMULATE_FAIL;
1583}
1584
1585int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
1586{
1587 return EMULATE_FAIL;
1588}
1589
1590int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
1591{
1592 return EMULATE_FAIL;
1593}
1594
1595static int kvmppc_book3s_hv_init(void)
1596{
1597 int r;
1598
1599 r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1600
1601 if (r)
1602 return r;
1603
1604 r = kvmppc_mmu_hv_init();
1605
1606 return r;
1607}
1608
1609static void kvmppc_book3s_hv_exit(void)
1610{
1611 kvm_exit();
1612}
1613
1614module_init(kvmppc_book3s_hv_init);
1615module_exit(kvmppc_book3s_hv_exit);