blob: f3d7af7981c7d457945b102dc1892be398104a4a [file] [log] [blame]
Paul Mackerrasde56a942011-06-29 00:21:34 +00001/*
2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
4 *
5 * Authors:
6 * Paul Mackerras <paulus@au1.ibm.com>
7 * Alexander Graf <agraf@suse.de>
8 * Kevin Wolf <mail@kevin-wolf.de>
9 *
10 * Description: KVM functions specific to running on Book 3S
11 * processors in hypervisor mode (specifically POWER7 and later).
12 *
13 * This file is derived from arch/powerpc/kvm/book3s.c,
14 * by Alexander Graf <agraf@suse.de>.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License, version 2, as
18 * published by the Free Software Foundation.
19 */
20
21#include <linux/kvm_host.h>
22#include <linux/err.h>
23#include <linux/slab.h>
24#include <linux/preempt.h>
25#include <linux/sched.h>
26#include <linux/delay.h>
Paul Gortmaker66b15db2011-05-27 10:46:24 -040027#include <linux/export.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000028#include <linux/fs.h>
29#include <linux/anon_inodes.h>
30#include <linux/cpumask.h>
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +000031#include <linux/spinlock.h>
32#include <linux/page-flags.h>
Paul Mackerras2c9097e2012-09-11 13:27:01 +000033#include <linux/srcu.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000034
35#include <asm/reg.h>
36#include <asm/cputable.h>
37#include <asm/cacheflush.h>
38#include <asm/tlbflush.h>
39#include <asm/uaccess.h>
40#include <asm/io.h>
41#include <asm/kvm_ppc.h>
42#include <asm/kvm_book3s.h>
43#include <asm/mmu_context.h>
44#include <asm/lppaca.h>
45#include <asm/processor.h>
Paul Mackerras371fefd2011-06-29 00:23:08 +000046#include <asm/cputhreads.h>
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +000047#include <asm/page.h>
Michael Neulingde1d9242011-11-09 20:39:49 +000048#include <asm/hvcall.h>
David Howellsae3a1972012-03-28 18:30:02 +010049#include <asm/switch_to.h>
Paul Mackerras512691d2012-10-15 01:15:41 +000050#include <asm/smp.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000051#include <linux/gfp.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000052#include <linux/vmalloc.h>
53#include <linux/highmem.h>
Paul Mackerrasc77162d2011-12-12 12:31:00 +000054#include <linux/hugetlb.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000055
56/* #define EXIT_DEBUG */
57/* #define EXIT_DEBUG_SIMPLE */
58/* #define EXIT_DEBUG_INT */
59
Paul Mackerras913d3ff9a2012-10-15 01:16:48 +000060/* Used to indicate that a guest page fault needs to be handled */
61#define RESUME_PAGE_FAULT (RESUME_GUEST | RESUME_FLAG_ARCH1)
62
Paul Mackerrasc7b67672012-10-15 01:18:07 +000063/* Used as a "null" value for timebase values */
64#define TB_NIL (~(u64)0)
65
Paul Mackerras19ccb762011-07-23 17:42:46 +100066static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
Paul Mackerras32fad282012-05-04 02:32:53 +000067static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
Paul Mackerras19ccb762011-07-23 17:42:46 +100068
Paul Mackerrasc7b67672012-10-15 01:18:07 +000069/*
70 * We use the vcpu_load/put functions to measure stolen time.
71 * Stolen time is counted as time when either the vcpu is able to
72 * run as part of a virtual core, but the task running the vcore
73 * is preempted or sleeping, or when the vcpu needs something done
74 * in the kernel by the task running the vcpu, but that task is
75 * preempted or sleeping. Those two things have to be counted
76 * separately, since one of the vcpu tasks will take on the job
77 * of running the core, and the other vcpu tasks in the vcore will
78 * sleep waiting for it to do that, but that sleep shouldn't count
79 * as stolen time.
80 *
81 * Hence we accumulate stolen time when the vcpu can run as part of
82 * a vcore using vc->stolen_tb, and the stolen time when the vcpu
83 * needs its task to do other things in the kernel (for example,
84 * service a page fault) in busy_stolen. We don't accumulate
85 * stolen time for a vcore when it is inactive, or for a vcpu
86 * when it is in state RUNNING or NOTREADY. NOTREADY is a bit of
87 * a misnomer; it means that the vcpu task is not executing in
88 * the KVM_VCPU_RUN ioctl, i.e. it is in userspace or elsewhere in
89 * the kernel. We don't have any way of dividing up that time
90 * between time that the vcpu is genuinely stopped, time that
91 * the task is actively working on behalf of the vcpu, and time
92 * that the task is preempted, so we don't count any of it as
93 * stolen.
94 *
95 * Updates to busy_stolen are protected by arch.tbacct_lock;
96 * updates to vc->stolen_tb are protected by the arch.tbacct_lock
97 * of the vcpu that has taken responsibility for running the vcore
98 * (i.e. vc->runner). The stolen times are measured in units of
99 * timebase ticks. (Note that the != TB_NIL checks below are
100 * purely defensive; they should never fail.)
101 */
102
Paul Mackerrasde56a942011-06-29 00:21:34 +0000103void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
104{
Paul Mackerras0456ec42012-02-03 00:56:21 +0000105 struct kvmppc_vcore *vc = vcpu->arch.vcore;
106
Paul Mackerrasc7b67672012-10-15 01:18:07 +0000107 spin_lock(&vcpu->arch.tbacct_lock);
108 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE &&
109 vc->preempt_tb != TB_NIL) {
Paul Mackerras0456ec42012-02-03 00:56:21 +0000110 vc->stolen_tb += mftb() - vc->preempt_tb;
Paul Mackerrasc7b67672012-10-15 01:18:07 +0000111 vc->preempt_tb = TB_NIL;
112 }
113 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
114 vcpu->arch.busy_preempt != TB_NIL) {
115 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
116 vcpu->arch.busy_preempt = TB_NIL;
117 }
118 spin_unlock(&vcpu->arch.tbacct_lock);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000119}
120
121void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
122{
Paul Mackerras0456ec42012-02-03 00:56:21 +0000123 struct kvmppc_vcore *vc = vcpu->arch.vcore;
124
Paul Mackerrasc7b67672012-10-15 01:18:07 +0000125 spin_lock(&vcpu->arch.tbacct_lock);
Paul Mackerras0456ec42012-02-03 00:56:21 +0000126 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
127 vc->preempt_tb = mftb();
Paul Mackerrasc7b67672012-10-15 01:18:07 +0000128 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
129 vcpu->arch.busy_preempt = mftb();
130 spin_unlock(&vcpu->arch.tbacct_lock);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000131}
132
Paul Mackerrasde56a942011-06-29 00:21:34 +0000133void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
134{
135 vcpu->arch.shregs.msr = msr;
Paul Mackerras19ccb762011-07-23 17:42:46 +1000136 kvmppc_end_cede(vcpu);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000137}
138
139void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
140{
141 vcpu->arch.pvr = pvr;
142}
143
144void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
145{
146 int r;
147
148 pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
149 pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
150 vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap);
151 for (r = 0; r < 16; ++r)
152 pr_err("r%2d = %.16lx r%d = %.16lx\n",
153 r, kvmppc_get_gpr(vcpu, r),
154 r+16, kvmppc_get_gpr(vcpu, r+16));
155 pr_err("ctr = %.16lx lr = %.16lx\n",
156 vcpu->arch.ctr, vcpu->arch.lr);
157 pr_err("srr0 = %.16llx srr1 = %.16llx\n",
158 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
159 pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
160 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
161 pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
162 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
163 pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
164 vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr);
165 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
166 pr_err("fault dar = %.16lx dsisr = %.8x\n",
167 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
168 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
169 for (r = 0; r < vcpu->arch.slb_max; ++r)
170 pr_err(" ESID = %.16llx VSID = %.16llx\n",
171 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
172 pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +0000173 vcpu->kvm->arch.lpcr, vcpu->kvm->arch.sdr1,
Paul Mackerrasde56a942011-06-29 00:21:34 +0000174 vcpu->arch.last_inst);
175}
176
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000177struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
178{
179 int r;
180 struct kvm_vcpu *v, *ret = NULL;
181
182 mutex_lock(&kvm->lock);
183 kvm_for_each_vcpu(r, v, kvm) {
184 if (v->vcpu_id == id) {
185 ret = v;
186 break;
187 }
188 }
189 mutex_unlock(&kvm->lock);
190 return ret;
191}
192
193static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
194{
195 vpa->shared_proc = 1;
196 vpa->yield_count = 1;
197}
198
Paul Mackerras55b665b2012-09-25 20:33:06 +0000199static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
200 unsigned long addr, unsigned long len)
201{
202 /* check address is cacheline aligned */
203 if (addr & (L1_CACHE_BYTES - 1))
204 return -EINVAL;
205 spin_lock(&vcpu->arch.vpa_update_lock);
206 if (v->next_gpa != addr || v->len != len) {
207 v->next_gpa = addr;
208 v->len = addr ? len : 0;
209 v->update_pending = 1;
210 }
211 spin_unlock(&vcpu->arch.vpa_update_lock);
212 return 0;
213}
214
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000215/* Length for a per-processor buffer is passed in at offset 4 in the buffer */
216struct reg_vpa {
217 u32 dummy;
218 union {
219 u16 hword;
220 u32 word;
221 } length;
222};
223
224static int vpa_is_registered(struct kvmppc_vpa *vpap)
225{
226 if (vpap->update_pending)
227 return vpap->next_gpa != 0;
228 return vpap->pinned_addr != NULL;
229}
230
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000231static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
232 unsigned long flags,
233 unsigned long vcpuid, unsigned long vpa)
234{
235 struct kvm *kvm = vcpu->kvm;
Paul Mackerras93e60242011-12-12 12:28:55 +0000236 unsigned long len, nb;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000237 void *va;
238 struct kvm_vcpu *tvcpu;
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000239 int err;
240 int subfunc;
241 struct kvmppc_vpa *vpap;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000242
243 tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
244 if (!tvcpu)
245 return H_PARAMETER;
246
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000247 subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK;
248 if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL ||
249 subfunc == H_VPA_REG_SLB) {
250 /* Registering new area - address must be cache-line aligned */
251 if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000252 return H_PARAMETER;
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000253
254 /* convert logical addr to kernel addr and read length */
Paul Mackerras93e60242011-12-12 12:28:55 +0000255 va = kvmppc_pin_guest_page(kvm, vpa, &nb);
256 if (va == NULL)
Paul Mackerrasb2b2f162011-12-12 12:28:21 +0000257 return H_PARAMETER;
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000258 if (subfunc == H_VPA_REG_VPA)
259 len = ((struct reg_vpa *)va)->length.hword;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000260 else
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000261 len = ((struct reg_vpa *)va)->length.word;
Paul Mackerrasc35635e2013-04-18 19:51:04 +0000262 kvmppc_unpin_guest_page(kvm, va, vpa, false);
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000263
264 /* Check length */
265 if (len > nb || len < sizeof(struct reg_vpa))
266 return H_PARAMETER;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000267 } else {
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000268 vpa = 0;
269 len = 0;
270 }
271
272 err = H_PARAMETER;
273 vpap = NULL;
274 spin_lock(&tvcpu->arch.vpa_update_lock);
275
276 switch (subfunc) {
277 case H_VPA_REG_VPA: /* register VPA */
278 if (len < sizeof(struct lppaca))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000279 break;
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000280 vpap = &tvcpu->arch.vpa;
281 err = 0;
282 break;
283
284 case H_VPA_REG_DTL: /* register DTL */
285 if (len < sizeof(struct dtl_entry))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000286 break;
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000287 len -= len % sizeof(struct dtl_entry);
288
289 /* Check that they have previously registered a VPA */
290 err = H_RESOURCE;
291 if (!vpa_is_registered(&tvcpu->arch.vpa))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000292 break;
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000293
294 vpap = &tvcpu->arch.dtl;
295 err = 0;
296 break;
297
298 case H_VPA_REG_SLB: /* register SLB shadow buffer */
299 /* Check that they have previously registered a VPA */
300 err = H_RESOURCE;
301 if (!vpa_is_registered(&tvcpu->arch.vpa))
302 break;
303
304 vpap = &tvcpu->arch.slb_shadow;
305 err = 0;
306 break;
307
308 case H_VPA_DEREG_VPA: /* deregister VPA */
309 /* Check they don't still have a DTL or SLB buf registered */
310 err = H_RESOURCE;
311 if (vpa_is_registered(&tvcpu->arch.dtl) ||
312 vpa_is_registered(&tvcpu->arch.slb_shadow))
313 break;
314
315 vpap = &tvcpu->arch.vpa;
316 err = 0;
317 break;
318
319 case H_VPA_DEREG_DTL: /* deregister DTL */
320 vpap = &tvcpu->arch.dtl;
321 err = 0;
322 break;
323
324 case H_VPA_DEREG_SLB: /* deregister SLB shadow buffer */
325 vpap = &tvcpu->arch.slb_shadow;
326 err = 0;
327 break;
328 }
329
330 if (vpap) {
331 vpap->next_gpa = vpa;
332 vpap->len = len;
333 vpap->update_pending = 1;
334 }
335
336 spin_unlock(&tvcpu->arch.vpa_update_lock);
337
338 return err;
339}
340
Paul Mackerras081f3232012-06-01 20:20:24 +1000341static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000342{
Paul Mackerras081f3232012-06-01 20:20:24 +1000343 struct kvm *kvm = vcpu->kvm;
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000344 void *va;
345 unsigned long nb;
Paul Mackerras081f3232012-06-01 20:20:24 +1000346 unsigned long gpa;
347
348 /*
349 * We need to pin the page pointed to by vpap->next_gpa,
350 * but we can't call kvmppc_pin_guest_page under the lock
351 * as it does get_user_pages() and down_read(). So we
352 * have to drop the lock, pin the page, then get the lock
353 * again and check that a new area didn't get registered
354 * in the meantime.
355 */
356 for (;;) {
357 gpa = vpap->next_gpa;
358 spin_unlock(&vcpu->arch.vpa_update_lock);
359 va = NULL;
360 nb = 0;
361 if (gpa)
Paul Mackerrasc35635e2013-04-18 19:51:04 +0000362 va = kvmppc_pin_guest_page(kvm, gpa, &nb);
Paul Mackerras081f3232012-06-01 20:20:24 +1000363 spin_lock(&vcpu->arch.vpa_update_lock);
364 if (gpa == vpap->next_gpa)
365 break;
366 /* sigh... unpin that one and try again */
367 if (va)
Paul Mackerrasc35635e2013-04-18 19:51:04 +0000368 kvmppc_unpin_guest_page(kvm, va, gpa, false);
Paul Mackerras081f3232012-06-01 20:20:24 +1000369 }
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000370
371 vpap->update_pending = 0;
Paul Mackerras081f3232012-06-01 20:20:24 +1000372 if (va && nb < vpap->len) {
373 /*
374 * If it's now too short, it must be that userspace
375 * has changed the mappings underlying guest memory,
376 * so unregister the region.
377 */
Paul Mackerrasc35635e2013-04-18 19:51:04 +0000378 kvmppc_unpin_guest_page(kvm, va, gpa, false);
Paul Mackerras081f3232012-06-01 20:20:24 +1000379 va = NULL;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000380 }
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000381 if (vpap->pinned_addr)
Paul Mackerrasc35635e2013-04-18 19:51:04 +0000382 kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa,
383 vpap->dirty);
384 vpap->gpa = gpa;
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000385 vpap->pinned_addr = va;
Paul Mackerrasc35635e2013-04-18 19:51:04 +0000386 vpap->dirty = false;
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000387 if (va)
388 vpap->pinned_end = va + vpap->len;
389}
Paul Mackerras93e60242011-12-12 12:28:55 +0000390
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000391static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
392{
Paul Mackerras2f12f032012-10-15 01:17:17 +0000393 if (!(vcpu->arch.vpa.update_pending ||
394 vcpu->arch.slb_shadow.update_pending ||
395 vcpu->arch.dtl.update_pending))
396 return;
397
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000398 spin_lock(&vcpu->arch.vpa_update_lock);
399 if (vcpu->arch.vpa.update_pending) {
Paul Mackerras081f3232012-06-01 20:20:24 +1000400 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
Paul Mackerras55b665b2012-09-25 20:33:06 +0000401 if (vcpu->arch.vpa.pinned_addr)
402 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000403 }
404 if (vcpu->arch.dtl.update_pending) {
Paul Mackerras081f3232012-06-01 20:20:24 +1000405 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000406 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
407 vcpu->arch.dtl_index = 0;
408 }
409 if (vcpu->arch.slb_shadow.update_pending)
Paul Mackerras081f3232012-06-01 20:20:24 +1000410 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000411 spin_unlock(&vcpu->arch.vpa_update_lock);
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000412}
413
Paul Mackerrasc7b67672012-10-15 01:18:07 +0000414/*
415 * Return the accumulated stolen time for the vcore up until `now'.
416 * The caller should hold the vcore lock.
417 */
418static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
419{
420 u64 p;
421
422 /*
423 * If we are the task running the vcore, then since we hold
424 * the vcore lock, we can't be preempted, so stolen_tb/preempt_tb
425 * can't be updated, so we don't need the tbacct_lock.
426 * If the vcore is inactive, it can't become active (since we
427 * hold the vcore lock), so the vcpu load/put functions won't
428 * update stolen_tb/preempt_tb, and we don't need tbacct_lock.
429 */
430 if (vc->vcore_state != VCORE_INACTIVE &&
431 vc->runner->arch.run_task != current) {
432 spin_lock(&vc->runner->arch.tbacct_lock);
433 p = vc->stolen_tb;
434 if (vc->preempt_tb != TB_NIL)
435 p += now - vc->preempt_tb;
436 spin_unlock(&vc->runner->arch.tbacct_lock);
437 } else {
438 p = vc->stolen_tb;
439 }
440 return p;
441}
442
Paul Mackerras0456ec42012-02-03 00:56:21 +0000443static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
444 struct kvmppc_vcore *vc)
445{
446 struct dtl_entry *dt;
447 struct lppaca *vpa;
Paul Mackerrasc7b67672012-10-15 01:18:07 +0000448 unsigned long stolen;
449 unsigned long core_stolen;
450 u64 now;
Paul Mackerras0456ec42012-02-03 00:56:21 +0000451
452 dt = vcpu->arch.dtl_ptr;
453 vpa = vcpu->arch.vpa.pinned_addr;
Paul Mackerrasc7b67672012-10-15 01:18:07 +0000454 now = mftb();
455 core_stolen = vcore_stolen_time(vc, now);
456 stolen = core_stolen - vcpu->arch.stolen_logged;
457 vcpu->arch.stolen_logged = core_stolen;
458 spin_lock(&vcpu->arch.tbacct_lock);
459 stolen += vcpu->arch.busy_stolen;
460 vcpu->arch.busy_stolen = 0;
461 spin_unlock(&vcpu->arch.tbacct_lock);
Paul Mackerras0456ec42012-02-03 00:56:21 +0000462 if (!dt || !vpa)
463 return;
464 memset(dt, 0, sizeof(struct dtl_entry));
465 dt->dispatch_reason = 7;
466 dt->processor_id = vc->pcpu + vcpu->arch.ptid;
Paul Mackerrasc7b67672012-10-15 01:18:07 +0000467 dt->timebase = now;
468 dt->enqueue_to_dispatch_time = stolen;
Paul Mackerras0456ec42012-02-03 00:56:21 +0000469 dt->srr0 = kvmppc_get_pc(vcpu);
470 dt->srr1 = vcpu->arch.shregs.msr;
471 ++dt;
472 if (dt == vcpu->arch.dtl.pinned_end)
473 dt = vcpu->arch.dtl.pinned_addr;
474 vcpu->arch.dtl_ptr = dt;
475 /* order writing *dt vs. writing vpa->dtl_idx */
476 smp_wmb();
477 vpa->dtl_idx = ++vcpu->arch.dtl_index;
Paul Mackerrasc35635e2013-04-18 19:51:04 +0000478 vcpu->arch.dtl.dirty = true;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000479}
480
481int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
482{
483 unsigned long req = kvmppc_get_gpr(vcpu, 3);
484 unsigned long target, ret = H_SUCCESS;
485 struct kvm_vcpu *tvcpu;
Michael Ellerman8e591cb2013-04-17 20:30:00 +0000486 int idx, rc;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000487
488 switch (req) {
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000489 case H_ENTER:
Paul Mackerras2c9097e2012-09-11 13:27:01 +0000490 idx = srcu_read_lock(&vcpu->kvm->srcu);
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000491 ret = kvmppc_virtmode_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
492 kvmppc_get_gpr(vcpu, 5),
493 kvmppc_get_gpr(vcpu, 6),
494 kvmppc_get_gpr(vcpu, 7));
Paul Mackerras2c9097e2012-09-11 13:27:01 +0000495 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000496 break;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000497 case H_CEDE:
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000498 break;
499 case H_PROD:
500 target = kvmppc_get_gpr(vcpu, 4);
501 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
502 if (!tvcpu) {
503 ret = H_PARAMETER;
504 break;
505 }
506 tvcpu->arch.prodded = 1;
507 smp_mb();
508 if (vcpu->arch.ceded) {
509 if (waitqueue_active(&vcpu->wq)) {
510 wake_up_interruptible(&vcpu->wq);
511 vcpu->stat.halt_wakeup++;
512 }
513 }
514 break;
515 case H_CONFER:
516 break;
517 case H_REGISTER_VPA:
518 ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
519 kvmppc_get_gpr(vcpu, 5),
520 kvmppc_get_gpr(vcpu, 6));
521 break;
Michael Ellerman8e591cb2013-04-17 20:30:00 +0000522 case H_RTAS:
523 if (list_empty(&vcpu->kvm->arch.rtas_tokens))
524 return RESUME_HOST;
525
526 rc = kvmppc_rtas_hcall(vcpu);
527
528 if (rc == -ENOENT)
529 return RESUME_HOST;
530 else if (rc == 0)
531 break;
532
533 /* Send the error out to userspace via KVM_RUN */
534 return rc;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000535 default:
536 return RESUME_HOST;
537 }
538 kvmppc_set_gpr(vcpu, 3, ret);
539 vcpu->arch.hcall_needed = 0;
540 return RESUME_GUEST;
541}
542
Paul Mackerrasde56a942011-06-29 00:21:34 +0000543static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
544 struct task_struct *tsk)
545{
546 int r = RESUME_HOST;
547
548 vcpu->stat.sum_exits++;
549
550 run->exit_reason = KVM_EXIT_UNKNOWN;
551 run->ready_for_interrupt_injection = 1;
552 switch (vcpu->arch.trap) {
553 /* We're good on these - the host merely wanted to get our attention */
554 case BOOK3S_INTERRUPT_HV_DECREMENTER:
555 vcpu->stat.dec_exits++;
556 r = RESUME_GUEST;
557 break;
558 case BOOK3S_INTERRUPT_EXTERNAL:
559 vcpu->stat.ext_intr_exits++;
560 r = RESUME_GUEST;
561 break;
562 case BOOK3S_INTERRUPT_PERFMON:
563 r = RESUME_GUEST;
564 break;
Paul Mackerrasb4072df2012-11-23 22:37:50 +0000565 case BOOK3S_INTERRUPT_MACHINE_CHECK:
566 /*
567 * Deliver a machine check interrupt to the guest.
568 * We have to do this, even if the host has handled the
569 * machine check, because machine checks use SRR0/1 and
570 * the interrupt might have trashed guest state in them.
571 */
572 kvmppc_book3s_queue_irqprio(vcpu,
573 BOOK3S_INTERRUPT_MACHINE_CHECK);
574 r = RESUME_GUEST;
575 break;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000576 case BOOK3S_INTERRUPT_PROGRAM:
577 {
578 ulong flags;
579 /*
580 * Normally program interrupts are delivered directly
581 * to the guest by the hardware, but we can get here
582 * as a result of a hypervisor emulation interrupt
583 * (e40) getting turned into a 700 by BML RTAS.
584 */
585 flags = vcpu->arch.shregs.msr & 0x1f0000ull;
586 kvmppc_core_queue_program(vcpu, flags);
587 r = RESUME_GUEST;
588 break;
589 }
590 case BOOK3S_INTERRUPT_SYSCALL:
591 {
592 /* hcall - punt to userspace */
593 int i;
594
595 if (vcpu->arch.shregs.msr & MSR_PR) {
596 /* sc 1 from userspace - reflect to guest syscall */
597 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_SYSCALL);
598 r = RESUME_GUEST;
599 break;
600 }
601 run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
602 for (i = 0; i < 9; ++i)
603 run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
604 run->exit_reason = KVM_EXIT_PAPR_HCALL;
605 vcpu->arch.hcall_needed = 1;
606 r = RESUME_HOST;
607 break;
608 }
609 /*
Paul Mackerras342d3db2011-12-12 12:38:05 +0000610 * We get these next two if the guest accesses a page which it thinks
611 * it has mapped but which is not actually present, either because
612 * it is for an emulated I/O device or because the corresonding
613 * host page has been paged out. Any other HDSI/HISI interrupts
614 * have been handled already.
Paul Mackerrasde56a942011-06-29 00:21:34 +0000615 */
616 case BOOK3S_INTERRUPT_H_DATA_STORAGE:
Paul Mackerras913d3ff9a2012-10-15 01:16:48 +0000617 r = RESUME_PAGE_FAULT;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000618 break;
619 case BOOK3S_INTERRUPT_H_INST_STORAGE:
Paul Mackerras913d3ff9a2012-10-15 01:16:48 +0000620 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
621 vcpu->arch.fault_dsisr = 0;
622 r = RESUME_PAGE_FAULT;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000623 break;
624 /*
625 * This occurs if the guest executes an illegal instruction.
626 * We just generate a program interrupt to the guest, since
627 * we don't emulate any guest instructions at this stage.
628 */
629 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
630 kvmppc_core_queue_program(vcpu, 0x80000);
631 r = RESUME_GUEST;
632 break;
633 default:
634 kvmppc_dump_regs(vcpu);
635 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
636 vcpu->arch.trap, kvmppc_get_pc(vcpu),
637 vcpu->arch.shregs.msr);
638 r = RESUME_HOST;
639 BUG();
640 break;
641 }
642
Paul Mackerrasde56a942011-06-29 00:21:34 +0000643 return r;
644}
645
646int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
647 struct kvm_sregs *sregs)
648{
649 int i;
650
651 sregs->pvr = vcpu->arch.pvr;
652
653 memset(sregs, 0, sizeof(struct kvm_sregs));
654 for (i = 0; i < vcpu->arch.slb_max; i++) {
655 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
656 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
657 }
658
659 return 0;
660}
661
662int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
663 struct kvm_sregs *sregs)
664{
665 int i, j;
666
667 kvmppc_set_pvr(vcpu, sregs->pvr);
668
669 j = 0;
670 for (i = 0; i < vcpu->arch.slb_nr; i++) {
671 if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
672 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
673 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
674 ++j;
675 }
676 }
677 vcpu->arch.slb_max = j;
678
679 return 0;
680}
681
Paul Mackerrasa136a8b2012-09-25 20:31:56 +0000682int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +0000683{
Paul Mackerrasa136a8b2012-09-25 20:31:56 +0000684 int r = 0;
685 long int i;
Paul Mackerras31f34382011-12-12 12:26:50 +0000686
Paul Mackerrasa136a8b2012-09-25 20:31:56 +0000687 switch (id) {
Paul Mackerras31f34382011-12-12 12:26:50 +0000688 case KVM_REG_PPC_HIOR:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +0000689 *val = get_reg_val(id, 0);
690 break;
691 case KVM_REG_PPC_DABR:
692 *val = get_reg_val(id, vcpu->arch.dabr);
693 break;
694 case KVM_REG_PPC_DSCR:
695 *val = get_reg_val(id, vcpu->arch.dscr);
696 break;
697 case KVM_REG_PPC_PURR:
698 *val = get_reg_val(id, vcpu->arch.purr);
699 break;
700 case KVM_REG_PPC_SPURR:
701 *val = get_reg_val(id, vcpu->arch.spurr);
702 break;
703 case KVM_REG_PPC_AMR:
704 *val = get_reg_val(id, vcpu->arch.amr);
705 break;
706 case KVM_REG_PPC_UAMOR:
707 *val = get_reg_val(id, vcpu->arch.uamor);
708 break;
709 case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA:
710 i = id - KVM_REG_PPC_MMCR0;
711 *val = get_reg_val(id, vcpu->arch.mmcr[i]);
712 break;
713 case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
714 i = id - KVM_REG_PPC_PMC1;
715 *val = get_reg_val(id, vcpu->arch.pmc[i]);
Paul Mackerras31f34382011-12-12 12:26:50 +0000716 break;
Paul Mackerrasa8bd19e2012-09-25 20:32:30 +0000717#ifdef CONFIG_VSX
718 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
719 if (cpu_has_feature(CPU_FTR_VSX)) {
720 /* VSX => FP reg i is stored in arch.vsr[2*i] */
721 long int i = id - KVM_REG_PPC_FPR0;
722 *val = get_reg_val(id, vcpu->arch.vsr[2 * i]);
723 } else {
724 /* let generic code handle it */
725 r = -EINVAL;
726 }
727 break;
728 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
729 if (cpu_has_feature(CPU_FTR_VSX)) {
730 long int i = id - KVM_REG_PPC_VSR0;
731 val->vsxval[0] = vcpu->arch.vsr[2 * i];
732 val->vsxval[1] = vcpu->arch.vsr[2 * i + 1];
733 } else {
734 r = -ENXIO;
735 }
736 break;
737#endif /* CONFIG_VSX */
Paul Mackerras55b665b2012-09-25 20:33:06 +0000738 case KVM_REG_PPC_VPA_ADDR:
739 spin_lock(&vcpu->arch.vpa_update_lock);
740 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
741 spin_unlock(&vcpu->arch.vpa_update_lock);
742 break;
743 case KVM_REG_PPC_VPA_SLB:
744 spin_lock(&vcpu->arch.vpa_update_lock);
745 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa;
746 val->vpaval.length = vcpu->arch.slb_shadow.len;
747 spin_unlock(&vcpu->arch.vpa_update_lock);
748 break;
749 case KVM_REG_PPC_VPA_DTL:
750 spin_lock(&vcpu->arch.vpa_update_lock);
751 val->vpaval.addr = vcpu->arch.dtl.next_gpa;
752 val->vpaval.length = vcpu->arch.dtl.len;
753 spin_unlock(&vcpu->arch.vpa_update_lock);
754 break;
Paul Mackerras31f34382011-12-12 12:26:50 +0000755 default:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +0000756 r = -EINVAL;
Paul Mackerras31f34382011-12-12 12:26:50 +0000757 break;
758 }
759
760 return r;
761}
762
Paul Mackerrasa136a8b2012-09-25 20:31:56 +0000763int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +0000764{
Paul Mackerrasa136a8b2012-09-25 20:31:56 +0000765 int r = 0;
766 long int i;
Paul Mackerras55b665b2012-09-25 20:33:06 +0000767 unsigned long addr, len;
Paul Mackerras31f34382011-12-12 12:26:50 +0000768
Paul Mackerrasa136a8b2012-09-25 20:31:56 +0000769 switch (id) {
Paul Mackerras31f34382011-12-12 12:26:50 +0000770 case KVM_REG_PPC_HIOR:
Paul Mackerras31f34382011-12-12 12:26:50 +0000771 /* Only allow this to be set to zero */
Paul Mackerrasa136a8b2012-09-25 20:31:56 +0000772 if (set_reg_val(id, *val))
Paul Mackerras31f34382011-12-12 12:26:50 +0000773 r = -EINVAL;
774 break;
Paul Mackerrasa136a8b2012-09-25 20:31:56 +0000775 case KVM_REG_PPC_DABR:
776 vcpu->arch.dabr = set_reg_val(id, *val);
777 break;
778 case KVM_REG_PPC_DSCR:
779 vcpu->arch.dscr = set_reg_val(id, *val);
780 break;
781 case KVM_REG_PPC_PURR:
782 vcpu->arch.purr = set_reg_val(id, *val);
783 break;
784 case KVM_REG_PPC_SPURR:
785 vcpu->arch.spurr = set_reg_val(id, *val);
786 break;
787 case KVM_REG_PPC_AMR:
788 vcpu->arch.amr = set_reg_val(id, *val);
789 break;
790 case KVM_REG_PPC_UAMOR:
791 vcpu->arch.uamor = set_reg_val(id, *val);
792 break;
793 case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA:
794 i = id - KVM_REG_PPC_MMCR0;
795 vcpu->arch.mmcr[i] = set_reg_val(id, *val);
796 break;
797 case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
798 i = id - KVM_REG_PPC_PMC1;
799 vcpu->arch.pmc[i] = set_reg_val(id, *val);
800 break;
Paul Mackerrasa8bd19e2012-09-25 20:32:30 +0000801#ifdef CONFIG_VSX
802 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
803 if (cpu_has_feature(CPU_FTR_VSX)) {
804 /* VSX => FP reg i is stored in arch.vsr[2*i] */
805 long int i = id - KVM_REG_PPC_FPR0;
806 vcpu->arch.vsr[2 * i] = set_reg_val(id, *val);
807 } else {
808 /* let generic code handle it */
809 r = -EINVAL;
810 }
811 break;
812 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
813 if (cpu_has_feature(CPU_FTR_VSX)) {
814 long int i = id - KVM_REG_PPC_VSR0;
815 vcpu->arch.vsr[2 * i] = val->vsxval[0];
816 vcpu->arch.vsr[2 * i + 1] = val->vsxval[1];
817 } else {
818 r = -ENXIO;
819 }
820 break;
821#endif /* CONFIG_VSX */
Paul Mackerras55b665b2012-09-25 20:33:06 +0000822 case KVM_REG_PPC_VPA_ADDR:
823 addr = set_reg_val(id, *val);
824 r = -EINVAL;
825 if (!addr && (vcpu->arch.slb_shadow.next_gpa ||
826 vcpu->arch.dtl.next_gpa))
827 break;
828 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca));
829 break;
830 case KVM_REG_PPC_VPA_SLB:
831 addr = val->vpaval.addr;
832 len = val->vpaval.length;
833 r = -EINVAL;
834 if (addr && !vcpu->arch.vpa.next_gpa)
835 break;
836 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len);
837 break;
838 case KVM_REG_PPC_VPA_DTL:
839 addr = val->vpaval.addr;
840 len = val->vpaval.length;
841 r = -EINVAL;
Paul Mackerras9f8c8c72012-10-15 01:18:37 +0000842 if (addr && (len < sizeof(struct dtl_entry) ||
843 !vcpu->arch.vpa.next_gpa))
Paul Mackerras55b665b2012-09-25 20:33:06 +0000844 break;
845 len -= len % sizeof(struct dtl_entry);
846 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
847 break;
Paul Mackerras31f34382011-12-12 12:26:50 +0000848 default:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +0000849 r = -EINVAL;
Paul Mackerras31f34382011-12-12 12:26:50 +0000850 break;
851 }
852
853 return r;
854}
855
Paul Mackerrasde56a942011-06-29 00:21:34 +0000856int kvmppc_core_check_processor_compat(void)
857{
Paul Mackerras9e368f22011-06-29 00:40:08 +0000858 if (cpu_has_feature(CPU_FTR_HVMODE))
Paul Mackerrasde56a942011-06-29 00:21:34 +0000859 return 0;
860 return -EIO;
861}
862
863struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
864{
865 struct kvm_vcpu *vcpu;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000866 int err = -EINVAL;
867 int core;
868 struct kvmppc_vcore *vcore;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000869
Paul Mackerras371fefd2011-06-29 00:23:08 +0000870 core = id / threads_per_core;
871 if (core >= KVM_MAX_VCORES)
872 goto out;
873
874 err = -ENOMEM;
Sasha Levin6b75e6b2011-12-07 10:24:56 +0200875 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000876 if (!vcpu)
877 goto out;
878
879 err = kvm_vcpu_init(vcpu, kvm, id);
880 if (err)
881 goto free_vcpu;
882
883 vcpu->arch.shared = &vcpu->arch.shregs;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000884 vcpu->arch.mmcr[0] = MMCR0_FC;
885 vcpu->arch.ctrl = CTRL_RUNLATCH;
886 /* default to host PVR, since we can't spoof it */
887 vcpu->arch.pvr = mfspr(SPRN_PVR);
888 kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000889 spin_lock_init(&vcpu->arch.vpa_update_lock);
Paul Mackerrasc7b67672012-10-15 01:18:07 +0000890 spin_lock_init(&vcpu->arch.tbacct_lock);
891 vcpu->arch.busy_preempt = TB_NIL;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000892
Paul Mackerrasde56a942011-06-29 00:21:34 +0000893 kvmppc_mmu_book3s_hv_init(vcpu);
894
Paul Mackerras8455d792012-10-15 01:17:42 +0000895 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000896
897 init_waitqueue_head(&vcpu->arch.cpu_run);
898
899 mutex_lock(&kvm->lock);
900 vcore = kvm->arch.vcores[core];
901 if (!vcore) {
902 vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
903 if (vcore) {
904 INIT_LIST_HEAD(&vcore->runnable_threads);
905 spin_lock_init(&vcore->lock);
Paul Mackerras19ccb762011-07-23 17:42:46 +1000906 init_waitqueue_head(&vcore->wq);
Paul Mackerrasc7b67672012-10-15 01:18:07 +0000907 vcore->preempt_tb = TB_NIL;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000908 }
909 kvm->arch.vcores[core] = vcore;
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000910 kvm->arch.online_vcores++;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000911 }
912 mutex_unlock(&kvm->lock);
913
914 if (!vcore)
915 goto free_vcpu;
916
917 spin_lock(&vcore->lock);
918 ++vcore->num_threads;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000919 spin_unlock(&vcore->lock);
920 vcpu->arch.vcore = vcore;
921
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200922 vcpu->arch.cpu_type = KVM_CPU_3S_64;
923 kvmppc_sanity_check(vcpu);
924
Paul Mackerrasde56a942011-06-29 00:21:34 +0000925 return vcpu;
926
927free_vcpu:
Sasha Levin6b75e6b2011-12-07 10:24:56 +0200928 kmem_cache_free(kvm_vcpu_cache, vcpu);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000929out:
930 return ERR_PTR(err);
931}
932
Paul Mackerrasc35635e2013-04-18 19:51:04 +0000933static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa)
934{
935 if (vpa->pinned_addr)
936 kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa,
937 vpa->dirty);
938}
939
Paul Mackerrasde56a942011-06-29 00:21:34 +0000940void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
941{
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000942 spin_lock(&vcpu->arch.vpa_update_lock);
Paul Mackerrasc35635e2013-04-18 19:51:04 +0000943 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
944 unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
945 unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
Paul Mackerras2e25aa52012-02-19 17:46:32 +0000946 spin_unlock(&vcpu->arch.vpa_update_lock);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000947 kvm_vcpu_uninit(vcpu);
Sasha Levin6b75e6b2011-12-07 10:24:56 +0200948 kmem_cache_free(kvm_vcpu_cache, vcpu);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000949}
950
Paul Mackerras19ccb762011-07-23 17:42:46 +1000951static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000952{
Paul Mackerras19ccb762011-07-23 17:42:46 +1000953 unsigned long dec_nsec, now;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000954
Paul Mackerras19ccb762011-07-23 17:42:46 +1000955 now = get_tb();
956 if (now > vcpu->arch.dec_expires) {
957 /* decrementer has already gone negative */
958 kvmppc_core_queue_dec(vcpu);
Scott Wood7e28e60e2011-11-08 18:23:20 -0600959 kvmppc_core_prepare_to_enter(vcpu);
Paul Mackerras19ccb762011-07-23 17:42:46 +1000960 return;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000961 }
Paul Mackerras19ccb762011-07-23 17:42:46 +1000962 dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC
963 / tb_ticks_per_sec;
964 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
965 HRTIMER_MODE_REL);
966 vcpu->arch.timer_running = 1;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000967}
968
Paul Mackerras19ccb762011-07-23 17:42:46 +1000969static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
Paul Mackerras371fefd2011-06-29 00:23:08 +0000970{
Paul Mackerras19ccb762011-07-23 17:42:46 +1000971 vcpu->arch.ceded = 0;
972 if (vcpu->arch.timer_running) {
973 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
974 vcpu->arch.timer_running = 0;
975 }
Paul Mackerras371fefd2011-06-29 00:23:08 +0000976}
977
978extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
979extern void xics_wake_cpu(int cpu);
980
981static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
982 struct kvm_vcpu *vcpu)
983{
Paul Mackerrasc7b67672012-10-15 01:18:07 +0000984 u64 now;
985
Paul Mackerras371fefd2011-06-29 00:23:08 +0000986 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
987 return;
Paul Mackerrasc7b67672012-10-15 01:18:07 +0000988 spin_lock(&vcpu->arch.tbacct_lock);
989 now = mftb();
990 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
991 vcpu->arch.stolen_logged;
992 vcpu->arch.busy_preempt = now;
993 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
994 spin_unlock(&vcpu->arch.tbacct_lock);
Paul Mackerras371fefd2011-06-29 00:23:08 +0000995 --vc->n_runnable;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000996 list_del(&vcpu->arch.run_list);
997}
998
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000999static int kvmppc_grab_hwthread(int cpu)
1000{
1001 struct paca_struct *tpaca;
1002 long timeout = 1000;
1003
1004 tpaca = &paca[cpu];
1005
1006 /* Ensure the thread won't go into the kernel if it wakes */
1007 tpaca->kvm_hstate.hwthread_req = 1;
Paul Mackerras7b444c62012-10-15 01:16:14 +00001008 tpaca->kvm_hstate.kvm_vcpu = NULL;
Paul Mackerrasf0888f72012-02-03 00:54:17 +00001009
1010 /*
1011 * If the thread is already executing in the kernel (e.g. handling
1012 * a stray interrupt), wait for it to get back to nap mode.
1013 * The smp_mb() is to ensure that our setting of hwthread_req
1014 * is visible before we look at hwthread_state, so if this
1015 * races with the code at system_reset_pSeries and the thread
1016 * misses our setting of hwthread_req, we are sure to see its
1017 * setting of hwthread_state, and vice versa.
1018 */
1019 smp_mb();
1020 while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) {
1021 if (--timeout <= 0) {
1022 pr_err("KVM: couldn't grab cpu %d\n", cpu);
1023 return -EBUSY;
1024 }
1025 udelay(1);
1026 }
1027 return 0;
1028}
1029
1030static void kvmppc_release_hwthread(int cpu)
1031{
1032 struct paca_struct *tpaca;
1033
1034 tpaca = &paca[cpu];
1035 tpaca->kvm_hstate.hwthread_req = 0;
1036 tpaca->kvm_hstate.kvm_vcpu = NULL;
1037}
1038
Paul Mackerras371fefd2011-06-29 00:23:08 +00001039static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
1040{
1041 int cpu;
1042 struct paca_struct *tpaca;
1043 struct kvmppc_vcore *vc = vcpu->arch.vcore;
1044
Paul Mackerras19ccb762011-07-23 17:42:46 +10001045 if (vcpu->arch.timer_running) {
1046 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
1047 vcpu->arch.timer_running = 0;
1048 }
Paul Mackerras371fefd2011-06-29 00:23:08 +00001049 cpu = vc->pcpu + vcpu->arch.ptid;
1050 tpaca = &paca[cpu];
1051 tpaca->kvm_hstate.kvm_vcpu = vcpu;
1052 tpaca->kvm_hstate.kvm_vcore = vc;
Paul Mackerras19ccb762011-07-23 17:42:46 +10001053 tpaca->kvm_hstate.napping = 0;
1054 vcpu->cpu = vc->pcpu;
Paul Mackerras371fefd2011-06-29 00:23:08 +00001055 smp_wmb();
Michael Neuling251da032011-11-10 16:03:20 +00001056#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
Paul Mackerras371fefd2011-06-29 00:23:08 +00001057 if (vcpu->arch.ptid) {
Paul Mackerras371fefd2011-06-29 00:23:08 +00001058 xics_wake_cpu(cpu);
1059 ++vc->n_woken;
1060 }
1061#endif
1062}
1063
1064static void kvmppc_wait_for_nap(struct kvmppc_vcore *vc)
1065{
1066 int i;
1067
1068 HMT_low();
1069 i = 0;
1070 while (vc->nap_count < vc->n_woken) {
1071 if (++i >= 1000000) {
1072 pr_err("kvmppc_wait_for_nap timeout %d %d\n",
1073 vc->nap_count, vc->n_woken);
1074 break;
1075 }
1076 cpu_relax();
1077 }
1078 HMT_medium();
1079}
1080
1081/*
1082 * Check that we are on thread 0 and that any other threads in
Paul Mackerras7b444c62012-10-15 01:16:14 +00001083 * this core are off-line. Then grab the threads so they can't
1084 * enter the kernel.
Paul Mackerras371fefd2011-06-29 00:23:08 +00001085 */
1086static int on_primary_thread(void)
1087{
1088 int cpu = smp_processor_id();
1089 int thr = cpu_thread_in_core(cpu);
1090
1091 if (thr)
1092 return 0;
1093 while (++thr < threads_per_core)
1094 if (cpu_online(cpu + thr))
1095 return 0;
Paul Mackerras7b444c62012-10-15 01:16:14 +00001096
1097 /* Grab all hw threads so they can't go into the kernel */
1098 for (thr = 1; thr < threads_per_core; ++thr) {
1099 if (kvmppc_grab_hwthread(cpu + thr)) {
1100 /* Couldn't grab one; let the others go */
1101 do {
1102 kvmppc_release_hwthread(cpu + thr);
1103 } while (--thr > 0);
1104 return 0;
1105 }
1106 }
Paul Mackerras371fefd2011-06-29 00:23:08 +00001107 return 1;
1108}
1109
1110/*
1111 * Run a set of guest threads on a physical core.
1112 * Called with vc->lock held.
1113 */
Paul Mackerras913d3ff9a2012-10-15 01:16:48 +00001114static void kvmppc_run_core(struct kvmppc_vcore *vc)
Paul Mackerras371fefd2011-06-29 00:23:08 +00001115{
Paul Mackerras19ccb762011-07-23 17:42:46 +10001116 struct kvm_vcpu *vcpu, *vcpu0, *vnext;
Paul Mackerras371fefd2011-06-29 00:23:08 +00001117 long ret;
Paul Mackerrasde56a942011-06-29 00:21:34 +00001118 u64 now;
Paul Mackerras081f3232012-06-01 20:20:24 +10001119 int ptid, i, need_vpa_update;
Paul Mackerras2c9097e2012-09-11 13:27:01 +00001120 int srcu_idx;
Paul Mackerras913d3ff9a2012-10-15 01:16:48 +00001121 struct kvm_vcpu *vcpus_to_update[threads_per_core];
Paul Mackerrasde56a942011-06-29 00:21:34 +00001122
Paul Mackerras371fefd2011-06-29 00:23:08 +00001123 /* don't start if any threads have a signal pending */
Paul Mackerras081f3232012-06-01 20:20:24 +10001124 need_vpa_update = 0;
1125 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
Paul Mackerras371fefd2011-06-29 00:23:08 +00001126 if (signal_pending(vcpu->arch.run_task))
Paul Mackerras913d3ff9a2012-10-15 01:16:48 +00001127 return;
1128 if (vcpu->arch.vpa.update_pending ||
1129 vcpu->arch.slb_shadow.update_pending ||
1130 vcpu->arch.dtl.update_pending)
1131 vcpus_to_update[need_vpa_update++] = vcpu;
Paul Mackerras081f3232012-06-01 20:20:24 +10001132 }
1133
1134 /*
1135 * Initialize *vc, in particular vc->vcore_state, so we can
1136 * drop the vcore lock if necessary.
1137 */
1138 vc->n_woken = 0;
1139 vc->nap_count = 0;
1140 vc->entry_exit_count = 0;
Paul Mackerras2f12f032012-10-15 01:17:17 +00001141 vc->vcore_state = VCORE_STARTING;
Paul Mackerras081f3232012-06-01 20:20:24 +10001142 vc->in_guest = 0;
1143 vc->napping_threads = 0;
1144
1145 /*
1146 * Updating any of the vpas requires calling kvmppc_pin_guest_page,
1147 * which can't be called with any spinlocks held.
1148 */
1149 if (need_vpa_update) {
1150 spin_unlock(&vc->lock);
Paul Mackerras913d3ff9a2012-10-15 01:16:48 +00001151 for (i = 0; i < need_vpa_update; ++i)
1152 kvmppc_update_vpas(vcpus_to_update[i]);
Paul Mackerras081f3232012-06-01 20:20:24 +10001153 spin_lock(&vc->lock);
1154 }
Paul Mackerrasde56a942011-06-29 00:21:34 +00001155
1156 /*
Paul Mackerras19ccb762011-07-23 17:42:46 +10001157 * Assign physical thread IDs, first to non-ceded vcpus
1158 * and then to ceded ones.
1159 */
1160 ptid = 0;
1161 vcpu0 = NULL;
1162 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
1163 if (!vcpu->arch.ceded) {
1164 if (!ptid)
1165 vcpu0 = vcpu;
1166 vcpu->arch.ptid = ptid++;
1167 }
1168 }
Paul Mackerrasc7b67672012-10-15 01:18:07 +00001169 if (!vcpu0)
1170 goto out; /* nothing to run; should never happen */
Paul Mackerras19ccb762011-07-23 17:42:46 +10001171 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
1172 if (vcpu->arch.ceded)
1173 vcpu->arch.ptid = ptid++;
1174
Paul Mackerras7b444c62012-10-15 01:16:14 +00001175 /*
1176 * Make sure we are running on thread 0, and that
1177 * secondary threads are offline.
1178 */
1179 if (threads_per_core > 1 && !on_primary_thread()) {
1180 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
1181 vcpu->arch.ret = -EBUSY;
1182 goto out;
1183 }
1184
Paul Mackerras371fefd2011-06-29 00:23:08 +00001185 vc->pcpu = smp_processor_id();
Paul Mackerras2e25aa52012-02-19 17:46:32 +00001186 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
Paul Mackerras371fefd2011-06-29 00:23:08 +00001187 kvmppc_start_thread(vcpu);
Paul Mackerras0456ec42012-02-03 00:56:21 +00001188 kvmppc_create_dtl_entry(vcpu, vc);
Paul Mackerras2e25aa52012-02-19 17:46:32 +00001189 }
Paul Mackerras371fefd2011-06-29 00:23:08 +00001190
Paul Mackerras2f12f032012-10-15 01:17:17 +00001191 vc->vcore_state = VCORE_RUNNING;
Paul Mackerras371fefd2011-06-29 00:23:08 +00001192 preempt_disable();
Paul Mackerras19ccb762011-07-23 17:42:46 +10001193 spin_unlock(&vc->lock);
Paul Mackerrasde56a942011-06-29 00:21:34 +00001194
Paul Mackerras19ccb762011-07-23 17:42:46 +10001195 kvm_guest_enter();
Paul Mackerras2c9097e2012-09-11 13:27:01 +00001196
1197 srcu_idx = srcu_read_lock(&vcpu0->kvm->srcu);
1198
Paul Mackerras19ccb762011-07-23 17:42:46 +10001199 __kvmppc_vcore_entry(NULL, vcpu0);
1200
Paul Mackerras371fefd2011-06-29 00:23:08 +00001201 spin_lock(&vc->lock);
Paul Mackerras19ccb762011-07-23 17:42:46 +10001202 /* disable sending of IPIs on virtual external irqs */
1203 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
1204 vcpu->cpu = -1;
1205 /* wait for secondary threads to finish writing their state to memory */
Paul Mackerras371fefd2011-06-29 00:23:08 +00001206 if (vc->nap_count < vc->n_woken)
1207 kvmppc_wait_for_nap(vc);
Paul Mackerras2f12f032012-10-15 01:17:17 +00001208 for (i = 0; i < threads_per_core; ++i)
1209 kvmppc_release_hwthread(vc->pcpu + i);
Paul Mackerras371fefd2011-06-29 00:23:08 +00001210 /* prevent other vcpu threads from doing kvmppc_start_thread() now */
Paul Mackerras19ccb762011-07-23 17:42:46 +10001211 vc->vcore_state = VCORE_EXITING;
Paul Mackerras371fefd2011-06-29 00:23:08 +00001212 spin_unlock(&vc->lock);
1213
Paul Mackerras2c9097e2012-09-11 13:27:01 +00001214 srcu_read_unlock(&vcpu0->kvm->srcu, srcu_idx);
1215
Paul Mackerras371fefd2011-06-29 00:23:08 +00001216 /* make sure updates to secondary vcpu structs are visible now */
1217 smp_mb();
Paul Mackerrasde56a942011-06-29 00:21:34 +00001218 kvm_guest_exit();
1219
1220 preempt_enable();
1221 kvm_resched(vcpu);
1222
Paul Mackerras913d3ff9a2012-10-15 01:16:48 +00001223 spin_lock(&vc->lock);
Paul Mackerrasde56a942011-06-29 00:21:34 +00001224 now = get_tb();
Paul Mackerras371fefd2011-06-29 00:23:08 +00001225 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
1226 /* cancel pending dec exception if dec is positive */
1227 if (now < vcpu->arch.dec_expires &&
1228 kvmppc_core_pending_dec(vcpu))
1229 kvmppc_core_dequeue_dec(vcpu);
Paul Mackerras19ccb762011-07-23 17:42:46 +10001230
1231 ret = RESUME_GUEST;
1232 if (vcpu->arch.trap)
1233 ret = kvmppc_handle_exit(vcpu->arch.kvm_run, vcpu,
1234 vcpu->arch.run_task);
1235
Paul Mackerras371fefd2011-06-29 00:23:08 +00001236 vcpu->arch.ret = ret;
1237 vcpu->arch.trap = 0;
Paul Mackerras19ccb762011-07-23 17:42:46 +10001238
1239 if (vcpu->arch.ceded) {
1240 if (ret != RESUME_GUEST)
1241 kvmppc_end_cede(vcpu);
1242 else
1243 kvmppc_set_timer(vcpu);
1244 }
Paul Mackerras371fefd2011-06-29 00:23:08 +00001245 }
Paul Mackerrasde56a942011-06-29 00:21:34 +00001246
Paul Mackerrasde56a942011-06-29 00:21:34 +00001247 out:
Paul Mackerras19ccb762011-07-23 17:42:46 +10001248 vc->vcore_state = VCORE_INACTIVE;
Paul Mackerras371fefd2011-06-29 00:23:08 +00001249 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
1250 arch.run_list) {
1251 if (vcpu->arch.ret != RESUME_GUEST) {
1252 kvmppc_remove_runnable(vc, vcpu);
1253 wake_up(&vcpu->arch.cpu_run);
1254 }
1255 }
Paul Mackerras371fefd2011-06-29 00:23:08 +00001256}
1257
Paul Mackerras19ccb762011-07-23 17:42:46 +10001258/*
1259 * Wait for some other vcpu thread to execute us, and
1260 * wake us up when we need to handle something in the host.
1261 */
1262static void kvmppc_wait_for_exec(struct kvm_vcpu *vcpu, int wait_state)
Paul Mackerras371fefd2011-06-29 00:23:08 +00001263{
Paul Mackerras371fefd2011-06-29 00:23:08 +00001264 DEFINE_WAIT(wait);
1265
Paul Mackerras19ccb762011-07-23 17:42:46 +10001266 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
1267 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE)
1268 schedule();
1269 finish_wait(&vcpu->arch.cpu_run, &wait);
1270}
Paul Mackerras371fefd2011-06-29 00:23:08 +00001271
Paul Mackerras19ccb762011-07-23 17:42:46 +10001272/*
1273 * All the vcpus in this vcore are idle, so wait for a decrementer
1274 * or external interrupt to one of the vcpus. vc->lock is held.
1275 */
1276static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
1277{
1278 DEFINE_WAIT(wait);
Paul Mackerras19ccb762011-07-23 17:42:46 +10001279
1280 prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
1281 vc->vcore_state = VCORE_SLEEPING;
1282 spin_unlock(&vc->lock);
Paul Mackerras913d3ff9a2012-10-15 01:16:48 +00001283 schedule();
Paul Mackerras19ccb762011-07-23 17:42:46 +10001284 finish_wait(&vc->wq, &wait);
1285 spin_lock(&vc->lock);
1286 vc->vcore_state = VCORE_INACTIVE;
1287}
1288
1289static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1290{
1291 int n_ceded;
Paul Mackerras19ccb762011-07-23 17:42:46 +10001292 struct kvmppc_vcore *vc;
1293 struct kvm_vcpu *v, *vn;
Paul Mackerras9e368f22011-06-29 00:40:08 +00001294
Paul Mackerras371fefd2011-06-29 00:23:08 +00001295 kvm_run->exit_reason = 0;
1296 vcpu->arch.ret = RESUME_GUEST;
1297 vcpu->arch.trap = 0;
Paul Mackerras2f12f032012-10-15 01:17:17 +00001298 kvmppc_update_vpas(vcpu);
Paul Mackerras371fefd2011-06-29 00:23:08 +00001299
Paul Mackerras371fefd2011-06-29 00:23:08 +00001300 /*
1301 * Synchronize with other threads in this virtual core
1302 */
1303 vc = vcpu->arch.vcore;
1304 spin_lock(&vc->lock);
Paul Mackerras19ccb762011-07-23 17:42:46 +10001305 vcpu->arch.ceded = 0;
Paul Mackerras371fefd2011-06-29 00:23:08 +00001306 vcpu->arch.run_task = current;
1307 vcpu->arch.kvm_run = kvm_run;
Paul Mackerrasc7b67672012-10-15 01:18:07 +00001308 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
Paul Mackerras19ccb762011-07-23 17:42:46 +10001309 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
Paul Mackerrasc7b67672012-10-15 01:18:07 +00001310 vcpu->arch.busy_preempt = TB_NIL;
Paul Mackerras371fefd2011-06-29 00:23:08 +00001311 list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads);
1312 ++vc->n_runnable;
1313
Paul Mackerras19ccb762011-07-23 17:42:46 +10001314 /*
1315 * This happens the first time this is called for a vcpu.
1316 * If the vcore is already running, we may be able to start
1317 * this thread straight away and have it join in.
1318 */
Paul Mackerras8455d792012-10-15 01:17:42 +00001319 if (!signal_pending(current)) {
Paul Mackerras19ccb762011-07-23 17:42:46 +10001320 if (vc->vcore_state == VCORE_RUNNING &&
1321 VCORE_EXIT_COUNT(vc) == 0) {
1322 vcpu->arch.ptid = vc->n_runnable - 1;
Paul Mackerras2f12f032012-10-15 01:17:17 +00001323 kvmppc_create_dtl_entry(vcpu, vc);
Paul Mackerras371fefd2011-06-29 00:23:08 +00001324 kvmppc_start_thread(vcpu);
Paul Mackerras8455d792012-10-15 01:17:42 +00001325 } else if (vc->vcore_state == VCORE_SLEEPING) {
1326 wake_up(&vc->wq);
Paul Mackerras19ccb762011-07-23 17:42:46 +10001327 }
Paul Mackerras371fefd2011-06-29 00:23:08 +00001328
Paul Mackerras8455d792012-10-15 01:17:42 +00001329 }
Paul Mackerras19ccb762011-07-23 17:42:46 +10001330
1331 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
1332 !signal_pending(current)) {
Paul Mackerras8455d792012-10-15 01:17:42 +00001333 if (vc->vcore_state != VCORE_INACTIVE) {
Paul Mackerras19ccb762011-07-23 17:42:46 +10001334 spin_unlock(&vc->lock);
1335 kvmppc_wait_for_exec(vcpu, TASK_INTERRUPTIBLE);
1336 spin_lock(&vc->lock);
1337 continue;
1338 }
Paul Mackerras19ccb762011-07-23 17:42:46 +10001339 list_for_each_entry_safe(v, vn, &vc->runnable_threads,
1340 arch.run_list) {
Scott Wood7e28e60e2011-11-08 18:23:20 -06001341 kvmppc_core_prepare_to_enter(v);
Paul Mackerras19ccb762011-07-23 17:42:46 +10001342 if (signal_pending(v->arch.run_task)) {
1343 kvmppc_remove_runnable(vc, v);
1344 v->stat.signal_exits++;
1345 v->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
1346 v->arch.ret = -EINTR;
1347 wake_up(&v->arch.cpu_run);
1348 }
1349 }
Paul Mackerras8455d792012-10-15 01:17:42 +00001350 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
1351 break;
1352 vc->runner = vcpu;
1353 n_ceded = 0;
1354 list_for_each_entry(v, &vc->runnable_threads, arch.run_list)
1355 if (!v->arch.pending_exceptions)
1356 n_ceded += v->arch.ceded;
1357 if (n_ceded == vc->n_runnable)
1358 kvmppc_vcore_blocked(vc);
1359 else
1360 kvmppc_run_core(vc);
Paul Mackerras0456ec42012-02-03 00:56:21 +00001361 vc->runner = NULL;
Paul Mackerras371fefd2011-06-29 00:23:08 +00001362 }
1363
Paul Mackerras8455d792012-10-15 01:17:42 +00001364 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
1365 (vc->vcore_state == VCORE_RUNNING ||
1366 vc->vcore_state == VCORE_EXITING)) {
1367 spin_unlock(&vc->lock);
1368 kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE);
1369 spin_lock(&vc->lock);
1370 }
1371
1372 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
1373 kvmppc_remove_runnable(vc, vcpu);
1374 vcpu->stat.signal_exits++;
1375 kvm_run->exit_reason = KVM_EXIT_INTR;
1376 vcpu->arch.ret = -EINTR;
1377 }
1378
1379 if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) {
1380 /* Wake up some vcpu to run the core */
1381 v = list_first_entry(&vc->runnable_threads,
1382 struct kvm_vcpu, arch.run_list);
1383 wake_up(&v->arch.cpu_run);
Paul Mackerras19ccb762011-07-23 17:42:46 +10001384 }
Paul Mackerras371fefd2011-06-29 00:23:08 +00001385
Paul Mackerras19ccb762011-07-23 17:42:46 +10001386 spin_unlock(&vc->lock);
Paul Mackerras371fefd2011-06-29 00:23:08 +00001387 return vcpu->arch.ret;
Paul Mackerrasde56a942011-06-29 00:21:34 +00001388}
1389
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001390int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
1391{
1392 int r;
Paul Mackerras913d3ff9a2012-10-15 01:16:48 +00001393 int srcu_idx;
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001394
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001395 if (!vcpu->arch.sane) {
1396 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1397 return -EINVAL;
1398 }
1399
Scott Wood25051b52011-11-08 18:23:23 -06001400 kvmppc_core_prepare_to_enter(vcpu);
1401
Paul Mackerras19ccb762011-07-23 17:42:46 +10001402 /* No need to go into the guest when all we'll do is come back out */
1403 if (signal_pending(current)) {
1404 run->exit_reason = KVM_EXIT_INTR;
1405 return -EINTR;
1406 }
1407
Paul Mackerras32fad282012-05-04 02:32:53 +00001408 atomic_inc(&vcpu->kvm->arch.vcpus_running);
1409 /* Order vcpus_running vs. rma_setup_done, see kvmppc_alloc_reset_hpt */
1410 smp_mb();
1411
1412 /* On the first time here, set up HTAB and VRMA or RMA */
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001413 if (!vcpu->kvm->arch.rma_setup_done) {
Paul Mackerras32fad282012-05-04 02:32:53 +00001414 r = kvmppc_hv_setup_htab_rma(vcpu);
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001415 if (r)
Paul Mackerras32fad282012-05-04 02:32:53 +00001416 goto out;
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001417 }
Paul Mackerras19ccb762011-07-23 17:42:46 +10001418
1419 flush_fp_to_thread(current);
1420 flush_altivec_to_thread(current);
1421 flush_vsx_to_thread(current);
1422 vcpu->arch.wqp = &vcpu->arch.vcore->wq;
Paul Mackerras342d3db2011-12-12 12:38:05 +00001423 vcpu->arch.pgdir = current->mm->pgd;
Paul Mackerrasc7b67672012-10-15 01:18:07 +00001424 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
Paul Mackerras19ccb762011-07-23 17:42:46 +10001425
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001426 do {
1427 r = kvmppc_run_vcpu(run, vcpu);
1428
1429 if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
1430 !(vcpu->arch.shregs.msr & MSR_PR)) {
1431 r = kvmppc_pseries_do_hcall(vcpu);
Scott Wood7e28e60e2011-11-08 18:23:20 -06001432 kvmppc_core_prepare_to_enter(vcpu);
Paul Mackerras913d3ff9a2012-10-15 01:16:48 +00001433 } else if (r == RESUME_PAGE_FAULT) {
1434 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1435 r = kvmppc_book3s_hv_page_fault(run, vcpu,
1436 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
1437 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001438 }
1439 } while (r == RESUME_GUEST);
Paul Mackerras32fad282012-05-04 02:32:53 +00001440
1441 out:
Paul Mackerrasc7b67672012-10-15 01:18:07 +00001442 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
Paul Mackerras32fad282012-05-04 02:32:53 +00001443 atomic_dec(&vcpu->kvm->arch.vcpus_running);
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001444 return r;
1445}
1446
David Gibson54738c02011-06-29 00:22:41 +00001447
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001448/* Work out RMLS (real mode limit selector) field value for a given RMA size.
Paul Mackerras9e368f22011-06-29 00:40:08 +00001449 Assumes POWER7 or PPC970. */
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001450static inline int lpcr_rmls(unsigned long rma_size)
1451{
1452 switch (rma_size) {
1453 case 32ul << 20: /* 32 MB */
Paul Mackerras9e368f22011-06-29 00:40:08 +00001454 if (cpu_has_feature(CPU_FTR_ARCH_206))
1455 return 8; /* only supported on POWER7 */
1456 return -1;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001457 case 64ul << 20: /* 64 MB */
1458 return 3;
1459 case 128ul << 20: /* 128 MB */
1460 return 7;
1461 case 256ul << 20: /* 256 MB */
1462 return 4;
1463 case 1ul << 30: /* 1 GB */
1464 return 2;
1465 case 16ul << 30: /* 16 GB */
1466 return 1;
1467 case 256ul << 30: /* 256 GB */
1468 return 0;
1469 default:
1470 return -1;
1471 }
1472}
1473
1474static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1475{
Alexander Grafb4e70612012-01-16 16:50:10 +01001476 struct kvmppc_linear_info *ri = vma->vm_file->private_data;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001477 struct page *page;
1478
1479 if (vmf->pgoff >= ri->npages)
1480 return VM_FAULT_SIGBUS;
1481
1482 page = pfn_to_page(ri->base_pfn + vmf->pgoff);
1483 get_page(page);
1484 vmf->page = page;
1485 return 0;
1486}
1487
1488static const struct vm_operations_struct kvm_rma_vm_ops = {
1489 .fault = kvm_rma_fault,
1490};
1491
1492static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
1493{
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -07001494 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001495 vma->vm_ops = &kvm_rma_vm_ops;
1496 return 0;
1497}
1498
1499static int kvm_rma_release(struct inode *inode, struct file *filp)
1500{
Alexander Grafb4e70612012-01-16 16:50:10 +01001501 struct kvmppc_linear_info *ri = filp->private_data;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001502
1503 kvm_release_rma(ri);
1504 return 0;
1505}
1506
1507static struct file_operations kvm_rma_fops = {
1508 .mmap = kvm_rma_mmap,
1509 .release = kvm_rma_release,
1510};
1511
1512long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
1513{
Alexander Grafb4e70612012-01-16 16:50:10 +01001514 struct kvmppc_linear_info *ri;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001515 long fd;
1516
1517 ri = kvm_alloc_rma();
1518 if (!ri)
1519 return -ENOMEM;
1520
1521 fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR);
1522 if (fd < 0)
1523 kvm_release_rma(ri);
1524
1525 ret->rma_size = ri->npages << PAGE_SHIFT;
1526 return fd;
1527}
1528
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001529static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
1530 int linux_psize)
1531{
1532 struct mmu_psize_def *def = &mmu_psize_defs[linux_psize];
1533
1534 if (!def->shift)
1535 return;
1536 (*sps)->page_shift = def->shift;
1537 (*sps)->slb_enc = def->sllp;
1538 (*sps)->enc[0].page_shift = def->shift;
1539 (*sps)->enc[0].pte_enc = def->penc;
1540 (*sps)++;
1541}
1542
1543int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
1544{
1545 struct kvm_ppc_one_seg_page_size *sps;
1546
1547 info->flags = KVM_PPC_PAGE_SIZES_REAL;
1548 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1549 info->flags |= KVM_PPC_1T_SEGMENTS;
1550 info->slb_size = mmu_slb_size;
1551
1552 /* We only support these sizes for now, and no muti-size segments */
1553 sps = &info->sps[0];
1554 kvmppc_add_seg_page_size(&sps, MMU_PAGE_4K);
1555 kvmppc_add_seg_page_size(&sps, MMU_PAGE_64K);
1556 kvmppc_add_seg_page_size(&sps, MMU_PAGE_16M);
1557
1558 return 0;
1559}
1560
Paul Mackerras82ed3612011-12-15 02:03:22 +00001561/*
1562 * Get (and clear) the dirty memory log for a memory slot.
1563 */
1564int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1565{
1566 struct kvm_memory_slot *memslot;
1567 int r;
1568 unsigned long n;
1569
1570 mutex_lock(&kvm->slots_lock);
1571
1572 r = -EINVAL;
Alex Williamsonbbacc0c2012-12-10 10:33:09 -07001573 if (log->slot >= KVM_USER_MEM_SLOTS)
Paul Mackerras82ed3612011-12-15 02:03:22 +00001574 goto out;
1575
1576 memslot = id_to_memslot(kvm->memslots, log->slot);
1577 r = -ENOENT;
1578 if (!memslot->dirty_bitmap)
1579 goto out;
1580
1581 n = kvm_dirty_bitmap_bytes(memslot);
1582 memset(memslot->dirty_bitmap, 0, n);
1583
Paul Mackerrasdfe49db2012-09-11 13:28:18 +00001584 r = kvmppc_hv_get_dirty_log(kvm, memslot, memslot->dirty_bitmap);
Paul Mackerras82ed3612011-12-15 02:03:22 +00001585 if (r)
1586 goto out;
1587
1588 r = -EFAULT;
1589 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
1590 goto out;
1591
1592 r = 0;
1593out:
1594 mutex_unlock(&kvm->slots_lock);
1595 return r;
1596}
1597
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001598static void unpin_slot(struct kvm_memory_slot *memslot)
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001599{
1600 unsigned long *physp;
1601 unsigned long j, npages, pfn;
1602 struct page *page;
1603
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001604 physp = memslot->arch.slot_phys;
1605 npages = memslot->npages;
1606 if (!physp)
1607 return;
1608 for (j = 0; j < npages; j++) {
1609 if (!(physp[j] & KVMPPC_GOT_PAGE))
1610 continue;
1611 pfn = physp[j] >> PAGE_SHIFT;
1612 page = pfn_to_page(pfn);
1613 SetPageDirty(page);
1614 put_page(page);
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001615 }
1616}
1617
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001618void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
1619 struct kvm_memory_slot *dont)
1620{
1621 if (!dont || free->arch.rmap != dont->arch.rmap) {
1622 vfree(free->arch.rmap);
1623 free->arch.rmap = NULL;
1624 }
1625 if (!dont || free->arch.slot_phys != dont->arch.slot_phys) {
1626 unpin_slot(free);
1627 vfree(free->arch.slot_phys);
1628 free->arch.slot_phys = NULL;
1629 }
1630}
1631
1632int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
1633 unsigned long npages)
1634{
1635 slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
1636 if (!slot->arch.rmap)
1637 return -ENOMEM;
1638 slot->arch.slot_phys = NULL;
1639
1640 return 0;
1641}
1642
1643int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1644 struct kvm_memory_slot *memslot,
1645 struct kvm_userspace_memory_region *mem)
1646{
1647 unsigned long *phys;
1648
1649 /* Allocate a slot_phys array if needed */
1650 phys = memslot->arch.slot_phys;
1651 if (!kvm->arch.using_mmu_notifiers && !phys && memslot->npages) {
1652 phys = vzalloc(memslot->npages * sizeof(unsigned long));
1653 if (!phys)
1654 return -ENOMEM;
1655 memslot->arch.slot_phys = phys;
1656 }
1657
1658 return 0;
1659}
1660
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001661void kvmppc_core_commit_memory_region(struct kvm *kvm,
Paul Mackerrasdfe49db2012-09-11 13:28:18 +00001662 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09001663 const struct kvm_memory_slot *old)
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001664{
Paul Mackerrasdfe49db2012-09-11 13:28:18 +00001665 unsigned long npages = mem->memory_size >> PAGE_SHIFT;
1666 struct kvm_memory_slot *memslot;
1667
Takuya Yoshikawa84826442013-02-27 19:45:25 +09001668 if (npages && old->npages) {
Paul Mackerrasdfe49db2012-09-11 13:28:18 +00001669 /*
1670 * If modifying a memslot, reset all the rmap dirty bits.
1671 * If this is a new memslot, we don't need to do anything
1672 * since the rmap array starts out as all zeroes,
1673 * i.e. no pages are dirty.
1674 */
1675 memslot = id_to_memslot(kvm->memslots, mem->slot);
1676 kvmppc_hv_get_dirty_log(kvm, memslot, NULL);
1677 }
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001678}
1679
Paul Mackerras32fad282012-05-04 02:32:53 +00001680static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001681{
1682 int err = 0;
1683 struct kvm *kvm = vcpu->kvm;
Alexander Grafb4e70612012-01-16 16:50:10 +01001684 struct kvmppc_linear_info *ri = NULL;
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001685 unsigned long hva;
1686 struct kvm_memory_slot *memslot;
1687 struct vm_area_struct *vma;
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001688 unsigned long lpcr, senc;
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001689 unsigned long psize, porder;
1690 unsigned long rma_size;
1691 unsigned long rmls;
1692 unsigned long *physp;
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001693 unsigned long i, npages;
Paul Mackerras2c9097e2012-09-11 13:27:01 +00001694 int srcu_idx;
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001695
1696 mutex_lock(&kvm->lock);
1697 if (kvm->arch.rma_setup_done)
1698 goto out; /* another vcpu beat us to it */
1699
Paul Mackerras32fad282012-05-04 02:32:53 +00001700 /* Allocate hashed page table (if not done already) and reset it */
1701 if (!kvm->arch.hpt_virt) {
1702 err = kvmppc_alloc_hpt(kvm, NULL);
1703 if (err) {
1704 pr_err("KVM: Couldn't alloc HPT\n");
1705 goto out;
1706 }
1707 }
1708
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001709 /* Look up the memslot for guest physical address 0 */
Paul Mackerras2c9097e2012-09-11 13:27:01 +00001710 srcu_idx = srcu_read_lock(&kvm->srcu);
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001711 memslot = gfn_to_memslot(kvm, 0);
1712
1713 /* We must have some memory at 0 by now */
1714 err = -EINVAL;
1715 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
Paul Mackerras2c9097e2012-09-11 13:27:01 +00001716 goto out_srcu;
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001717
1718 /* Look up the VMA for the start of this memory slot */
1719 hva = memslot->userspace_addr;
1720 down_read(&current->mm->mmap_sem);
1721 vma = find_vma(current->mm, hva);
1722 if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
1723 goto up_out;
1724
1725 psize = vma_kernel_pagesize(vma);
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001726 porder = __ilog2(psize);
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001727
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001728 /* Is this one of our preallocated RMAs? */
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001729 if (vma->vm_file && vma->vm_file->f_op == &kvm_rma_fops &&
1730 hva == vma->vm_start)
1731 ri = vma->vm_file->private_data;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001732
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001733 up_read(&current->mm->mmap_sem);
1734
1735 if (!ri) {
1736 /* On POWER7, use VRMA; on PPC970, give up */
1737 err = -EPERM;
1738 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
1739 pr_err("KVM: CPU requires an RMO\n");
Paul Mackerras2c9097e2012-09-11 13:27:01 +00001740 goto out_srcu;
Paul Mackerras9e368f22011-06-29 00:40:08 +00001741 }
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001742
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001743 /* We can handle 4k, 64k or 16M pages in the VRMA */
1744 err = -EINVAL;
1745 if (!(psize == 0x1000 || psize == 0x10000 ||
1746 psize == 0x1000000))
Paul Mackerras2c9097e2012-09-11 13:27:01 +00001747 goto out_srcu;
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001748
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001749 /* Update VRMASD field in the LPCR */
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001750 senc = slb_pgsize_encoding(psize);
Paul Mackerras697d3892011-12-12 12:36:37 +00001751 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
1752 (VRMA_VSID << SLB_VSID_SHIFT_1T);
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001753 lpcr = kvm->arch.lpcr & ~LPCR_VRMASD;
1754 lpcr |= senc << (LPCR_VRMASD_SH - 4);
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001755 kvm->arch.lpcr = lpcr;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001756
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001757 /* Create HPTEs in the hash page table for the VRMA */
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001758 kvmppc_map_vrma(vcpu, memslot, porder);
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001759
1760 } else {
1761 /* Set up to use an RMO region */
1762 rma_size = ri->npages;
1763 if (rma_size > memslot->npages)
1764 rma_size = memslot->npages;
1765 rma_size <<= PAGE_SHIFT;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001766 rmls = lpcr_rmls(rma_size);
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001767 err = -EINVAL;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001768 if (rmls < 0) {
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001769 pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size);
Paul Mackerras2c9097e2012-09-11 13:27:01 +00001770 goto out_srcu;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001771 }
1772 atomic_inc(&ri->use_count);
1773 kvm->arch.rma = ri;
Paul Mackerras9e368f22011-06-29 00:40:08 +00001774
1775 /* Update LPCR and RMOR */
1776 lpcr = kvm->arch.lpcr;
1777 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
1778 /* PPC970; insert RMLS value (split field) in HID4 */
1779 lpcr &= ~((1ul << HID4_RMLS0_SH) |
1780 (3ul << HID4_RMLS2_SH));
1781 lpcr |= ((rmls >> 2) << HID4_RMLS0_SH) |
1782 ((rmls & 3) << HID4_RMLS2_SH);
1783 /* RMOR is also in HID4 */
1784 lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
1785 << HID4_RMOR_SH;
1786 } else {
1787 /* POWER7 */
1788 lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L);
1789 lpcr |= rmls << LPCR_RMLS_SH;
1790 kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT;
1791 }
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001792 kvm->arch.lpcr = lpcr;
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001793 pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001794 ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001795
1796 /* Initialize phys addrs of pages in RMO */
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001797 npages = ri->npages;
1798 porder = __ilog2(npages);
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001799 physp = memslot->arch.slot_phys;
1800 if (physp) {
1801 if (npages > memslot->npages)
1802 npages = memslot->npages;
1803 spin_lock(&kvm->arch.slot_phys_lock);
1804 for (i = 0; i < npages; ++i)
1805 physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) +
1806 porder;
1807 spin_unlock(&kvm->arch.slot_phys_lock);
1808 }
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001809 }
1810
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001811 /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
1812 smp_wmb();
1813 kvm->arch.rma_setup_done = 1;
1814 err = 0;
Paul Mackerras2c9097e2012-09-11 13:27:01 +00001815 out_srcu:
1816 srcu_read_unlock(&kvm->srcu, srcu_idx);
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001817 out:
1818 mutex_unlock(&kvm->lock);
1819 return err;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001820
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001821 up_out:
1822 up_read(&current->mm->mmap_sem);
1823 goto out;
Paul Mackerrasde56a942011-06-29 00:21:34 +00001824}
1825
1826int kvmppc_core_init_vm(struct kvm *kvm)
1827{
Paul Mackerras32fad282012-05-04 02:32:53 +00001828 unsigned long lpcr, lpid;
Paul Mackerrasde56a942011-06-29 00:21:34 +00001829
Paul Mackerras32fad282012-05-04 02:32:53 +00001830 /* Allocate the guest's logical partition ID */
1831
1832 lpid = kvmppc_alloc_lpid();
1833 if (lpid < 0)
1834 return -ENOMEM;
1835 kvm->arch.lpid = lpid;
Paul Mackerrasde56a942011-06-29 00:21:34 +00001836
Paul Mackerras1b400ba2012-11-21 23:28:08 +00001837 /*
1838 * Since we don't flush the TLB when tearing down a VM,
1839 * and this lpid might have previously been used,
1840 * make sure we flush on each core before running the new VM.
1841 */
1842 cpumask_setall(&kvm->arch.need_tlb_flush);
1843
David Gibson54738c02011-06-29 00:22:41 +00001844 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
Michael Ellerman8e591cb2013-04-17 20:30:00 +00001845 INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001846
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001847 kvm->arch.rma = NULL;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001848
Paul Mackerras9e368f22011-06-29 00:40:08 +00001849 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001850
Paul Mackerras9e368f22011-06-29 00:40:08 +00001851 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
1852 /* PPC970; HID4 is effectively the LPCR */
Paul Mackerras9e368f22011-06-29 00:40:08 +00001853 kvm->arch.host_lpid = 0;
1854 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4);
1855 lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH));
1856 lpcr |= ((lpid >> 4) << HID4_LPID1_SH) |
1857 ((lpid & 0xf) << HID4_LPID5_SH);
1858 } else {
1859 /* POWER7; init LPCR for virtual RMA mode */
1860 kvm->arch.host_lpid = mfspr(SPRN_LPID);
1861 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
1862 lpcr &= LPCR_PECE | LPCR_LPES;
1863 lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
Paul Mackerras697d3892011-12-12 12:36:37 +00001864 LPCR_VPM0 | LPCR_VPM1;
1865 kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
1866 (VRMA_VSID << SLB_VSID_SHIFT_1T);
Paul Mackerras9e368f22011-06-29 00:40:08 +00001867 }
1868 kvm->arch.lpcr = lpcr;
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001869
Paul Mackerras342d3db2011-12-12 12:38:05 +00001870 kvm->arch.using_mmu_notifiers = !!cpu_has_feature(CPU_FTR_ARCH_206);
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001871 spin_lock_init(&kvm->arch.slot_phys_lock);
Paul Mackerras512691d2012-10-15 01:15:41 +00001872
1873 /*
1874 * Don't allow secondary CPU threads to come online
1875 * while any KVM VMs exist.
1876 */
1877 inhibit_secondary_onlining();
1878
David Gibson54738c02011-06-29 00:22:41 +00001879 return 0;
Paul Mackerrasde56a942011-06-29 00:21:34 +00001880}
1881
1882void kvmppc_core_destroy_vm(struct kvm *kvm)
1883{
Paul Mackerras512691d2012-10-15 01:15:41 +00001884 uninhibit_secondary_onlining();
1885
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +00001886 if (kvm->arch.rma) {
1887 kvm_release_rma(kvm->arch.rma);
1888 kvm->arch.rma = NULL;
1889 }
1890
Michael Ellerman8e591cb2013-04-17 20:30:00 +00001891 kvmppc_rtas_tokens_free(kvm);
1892
Paul Mackerrasde56a942011-06-29 00:21:34 +00001893 kvmppc_free_hpt(kvm);
David Gibson54738c02011-06-29 00:22:41 +00001894 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
Paul Mackerrasde56a942011-06-29 00:21:34 +00001895}
1896
1897/* These are stubs for now */
1898void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
1899{
1900}
1901
1902/* We don't need to emulate any privileged instructions or dcbz */
1903int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
1904 unsigned int inst, int *advance)
1905{
1906 return EMULATE_FAIL;
1907}
1908
Alexander Graf54771e62012-05-04 14:55:12 +02001909int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001910{
1911 return EMULATE_FAIL;
1912}
1913
Alexander Graf54771e62012-05-04 14:55:12 +02001914int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001915{
1916 return EMULATE_FAIL;
1917}
1918
1919static int kvmppc_book3s_hv_init(void)
1920{
1921 int r;
1922
1923 r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1924
1925 if (r)
1926 return r;
1927
1928 r = kvmppc_mmu_hv_init();
1929
1930 return r;
1931}
1932
1933static void kvmppc_book3s_hv_exit(void)
1934{
1935 kvm_exit();
1936}
1937
1938module_init(kvmppc_book3s_hv_init);
1939module_exit(kvmppc_book3s_hv_exit);