Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> |
| 3 | * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. |
| 4 | * |
| 5 | * Authors: |
| 6 | * Paul Mackerras <paulus@au1.ibm.com> |
| 7 | * Alexander Graf <agraf@suse.de> |
| 8 | * Kevin Wolf <mail@kevin-wolf.de> |
| 9 | * |
| 10 | * Description: KVM functions specific to running on Book 3S |
| 11 | * processors in hypervisor mode (specifically POWER7 and later). |
| 12 | * |
| 13 | * This file is derived from arch/powerpc/kvm/book3s.c, |
| 14 | * by Alexander Graf <agraf@suse.de>. |
| 15 | * |
| 16 | * This program is free software; you can redistribute it and/or modify |
| 17 | * it under the terms of the GNU General Public License, version 2, as |
| 18 | * published by the Free Software Foundation. |
| 19 | */ |
| 20 | |
| 21 | #include <linux/kvm_host.h> |
| 22 | #include <linux/err.h> |
| 23 | #include <linux/slab.h> |
| 24 | #include <linux/preempt.h> |
| 25 | #include <linux/sched.h> |
| 26 | #include <linux/delay.h> |
Paul Gortmaker | 66b15db | 2011-05-27 10:46:24 -0400 | [diff] [blame] | 27 | #include <linux/export.h> |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 28 | #include <linux/fs.h> |
| 29 | #include <linux/anon_inodes.h> |
| 30 | #include <linux/cpumask.h> |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 31 | #include <linux/spinlock.h> |
| 32 | #include <linux/page-flags.h> |
Paul Mackerras | 2c9097e | 2012-09-11 13:27:01 +0000 | [diff] [blame] | 33 | #include <linux/srcu.h> |
Alexander Graf | 398a76c | 2013-12-09 13:53:42 +0100 | [diff] [blame] | 34 | #include <linux/miscdevice.h> |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 35 | |
| 36 | #include <asm/reg.h> |
| 37 | #include <asm/cputable.h> |
| 38 | #include <asm/cacheflush.h> |
| 39 | #include <asm/tlbflush.h> |
| 40 | #include <asm/uaccess.h> |
| 41 | #include <asm/io.h> |
| 42 | #include <asm/kvm_ppc.h> |
| 43 | #include <asm/kvm_book3s.h> |
| 44 | #include <asm/mmu_context.h> |
| 45 | #include <asm/lppaca.h> |
| 46 | #include <asm/processor.h> |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 47 | #include <asm/cputhreads.h> |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 48 | #include <asm/page.h> |
Michael Neuling | de1d924 | 2011-11-09 20:39:49 +0000 | [diff] [blame] | 49 | #include <asm/hvcall.h> |
David Howells | ae3a197 | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 50 | #include <asm/switch_to.h> |
Paul Mackerras | 512691d | 2012-10-15 01:15:41 +0000 | [diff] [blame] | 51 | #include <asm/smp.h> |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 52 | #include <linux/gfp.h> |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 53 | #include <linux/vmalloc.h> |
| 54 | #include <linux/highmem.h> |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 55 | #include <linux/hugetlb.h> |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 56 | #include <linux/module.h> |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 57 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 58 | #include "book3s.h" |
| 59 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 60 | /* #define EXIT_DEBUG */ |
| 61 | /* #define EXIT_DEBUG_SIMPLE */ |
| 62 | /* #define EXIT_DEBUG_INT */ |
| 63 | |
Paul Mackerras | 913d3ff9a | 2012-10-15 01:16:48 +0000 | [diff] [blame] | 64 | /* Used to indicate that a guest page fault needs to be handled */ |
| 65 | #define RESUME_PAGE_FAULT (RESUME_GUEST | RESUME_FLAG_ARCH1) |
| 66 | |
Paul Mackerras | c7b6767 | 2012-10-15 01:18:07 +0000 | [diff] [blame] | 67 | /* Used as a "null" value for timebase values */ |
| 68 | #define TB_NIL (~(u64)0) |
| 69 | |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 70 | static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1); |
| 71 | |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 72 | static void kvmppc_end_cede(struct kvm_vcpu *vcpu); |
Paul Mackerras | 32fad28 | 2012-05-04 02:32:53 +0000 | [diff] [blame] | 73 | static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 74 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 75 | static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) |
Benjamin Herrenschmidt | 54695c3 | 2013-04-17 20:30:50 +0000 | [diff] [blame] | 76 | { |
| 77 | int me; |
| 78 | int cpu = vcpu->cpu; |
| 79 | wait_queue_head_t *wqp; |
| 80 | |
| 81 | wqp = kvm_arch_vcpu_wq(vcpu); |
| 82 | if (waitqueue_active(wqp)) { |
| 83 | wake_up_interruptible(wqp); |
| 84 | ++vcpu->stat.halt_wakeup; |
| 85 | } |
| 86 | |
| 87 | me = get_cpu(); |
| 88 | |
| 89 | /* CPU points to the first thread of the core */ |
| 90 | if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) { |
Anton Blanchard | 7505258 | 2014-03-25 10:47:01 +1100 | [diff] [blame] | 91 | #ifdef CONFIG_PPC_ICP_NATIVE |
Benjamin Herrenschmidt | 54695c3 | 2013-04-17 20:30:50 +0000 | [diff] [blame] | 92 | int real_cpu = cpu + vcpu->arch.ptid; |
| 93 | if (paca[real_cpu].kvm_hstate.xics_phys) |
| 94 | xics_wake_cpu(real_cpu); |
Andreas Schwab | 48eaef0 | 2013-12-30 15:36:56 +0100 | [diff] [blame] | 95 | else |
| 96 | #endif |
| 97 | if (cpu_online(cpu)) |
Benjamin Herrenschmidt | 54695c3 | 2013-04-17 20:30:50 +0000 | [diff] [blame] | 98 | smp_send_reschedule(cpu); |
| 99 | } |
| 100 | put_cpu(); |
| 101 | } |
| 102 | |
Paul Mackerras | c7b6767 | 2012-10-15 01:18:07 +0000 | [diff] [blame] | 103 | /* |
| 104 | * We use the vcpu_load/put functions to measure stolen time. |
| 105 | * Stolen time is counted as time when either the vcpu is able to |
| 106 | * run as part of a virtual core, but the task running the vcore |
| 107 | * is preempted or sleeping, or when the vcpu needs something done |
| 108 | * in the kernel by the task running the vcpu, but that task is |
| 109 | * preempted or sleeping. Those two things have to be counted |
| 110 | * separately, since one of the vcpu tasks will take on the job |
| 111 | * of running the core, and the other vcpu tasks in the vcore will |
| 112 | * sleep waiting for it to do that, but that sleep shouldn't count |
| 113 | * as stolen time. |
| 114 | * |
| 115 | * Hence we accumulate stolen time when the vcpu can run as part of |
| 116 | * a vcore using vc->stolen_tb, and the stolen time when the vcpu |
| 117 | * needs its task to do other things in the kernel (for example, |
| 118 | * service a page fault) in busy_stolen. We don't accumulate |
| 119 | * stolen time for a vcore when it is inactive, or for a vcpu |
| 120 | * when it is in state RUNNING or NOTREADY. NOTREADY is a bit of |
| 121 | * a misnomer; it means that the vcpu task is not executing in |
| 122 | * the KVM_VCPU_RUN ioctl, i.e. it is in userspace or elsewhere in |
| 123 | * the kernel. We don't have any way of dividing up that time |
| 124 | * between time that the vcpu is genuinely stopped, time that |
| 125 | * the task is actively working on behalf of the vcpu, and time |
| 126 | * that the task is preempted, so we don't count any of it as |
| 127 | * stolen. |
| 128 | * |
| 129 | * Updates to busy_stolen are protected by arch.tbacct_lock; |
| 130 | * updates to vc->stolen_tb are protected by the arch.tbacct_lock |
| 131 | * of the vcpu that has taken responsibility for running the vcore |
| 132 | * (i.e. vc->runner). The stolen times are measured in units of |
| 133 | * timebase ticks. (Note that the != TB_NIL checks below are |
| 134 | * purely defensive; they should never fail.) |
| 135 | */ |
| 136 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 137 | static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 138 | { |
Paul Mackerras | 0456ec4 | 2012-02-03 00:56:21 +0000 | [diff] [blame] | 139 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
Paul Mackerras | bf3d32e | 2013-11-16 17:46:04 +1100 | [diff] [blame] | 140 | unsigned long flags; |
Paul Mackerras | 0456ec4 | 2012-02-03 00:56:21 +0000 | [diff] [blame] | 141 | |
Paul Mackerras | bf3d32e | 2013-11-16 17:46:04 +1100 | [diff] [blame] | 142 | spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); |
Paul Mackerras | c7b6767 | 2012-10-15 01:18:07 +0000 | [diff] [blame] | 143 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE && |
| 144 | vc->preempt_tb != TB_NIL) { |
Paul Mackerras | 0456ec4 | 2012-02-03 00:56:21 +0000 | [diff] [blame] | 145 | vc->stolen_tb += mftb() - vc->preempt_tb; |
Paul Mackerras | c7b6767 | 2012-10-15 01:18:07 +0000 | [diff] [blame] | 146 | vc->preempt_tb = TB_NIL; |
| 147 | } |
| 148 | if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST && |
| 149 | vcpu->arch.busy_preempt != TB_NIL) { |
| 150 | vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; |
| 151 | vcpu->arch.busy_preempt = TB_NIL; |
| 152 | } |
Paul Mackerras | bf3d32e | 2013-11-16 17:46:04 +1100 | [diff] [blame] | 153 | spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 154 | } |
| 155 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 156 | static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 157 | { |
Paul Mackerras | 0456ec4 | 2012-02-03 00:56:21 +0000 | [diff] [blame] | 158 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
Paul Mackerras | bf3d32e | 2013-11-16 17:46:04 +1100 | [diff] [blame] | 159 | unsigned long flags; |
Paul Mackerras | 0456ec4 | 2012-02-03 00:56:21 +0000 | [diff] [blame] | 160 | |
Paul Mackerras | bf3d32e | 2013-11-16 17:46:04 +1100 | [diff] [blame] | 161 | spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); |
Paul Mackerras | 0456ec4 | 2012-02-03 00:56:21 +0000 | [diff] [blame] | 162 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) |
| 163 | vc->preempt_tb = mftb(); |
Paul Mackerras | c7b6767 | 2012-10-15 01:18:07 +0000 | [diff] [blame] | 164 | if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) |
| 165 | vcpu->arch.busy_preempt = mftb(); |
Paul Mackerras | bf3d32e | 2013-11-16 17:46:04 +1100 | [diff] [blame] | 166 | spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 167 | } |
| 168 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 169 | static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 170 | { |
| 171 | vcpu->arch.shregs.msr = msr; |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 172 | kvmppc_end_cede(vcpu); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 173 | } |
| 174 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 175 | void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 176 | { |
| 177 | vcpu->arch.pvr = pvr; |
| 178 | } |
| 179 | |
Paul Mackerras | 388cc6e | 2013-09-21 14:35:02 +1000 | [diff] [blame] | 180 | int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat) |
| 181 | { |
| 182 | unsigned long pcr = 0; |
| 183 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
| 184 | |
| 185 | if (arch_compat) { |
| 186 | if (!cpu_has_feature(CPU_FTR_ARCH_206)) |
| 187 | return -EINVAL; /* 970 has no compat mode support */ |
| 188 | |
| 189 | switch (arch_compat) { |
| 190 | case PVR_ARCH_205: |
Paul Mackerras | 5557ae0 | 2014-01-08 21:25:24 +1100 | [diff] [blame] | 191 | /* |
| 192 | * If an arch bit is set in PCR, all the defined |
| 193 | * higher-order arch bits also have to be set. |
| 194 | */ |
| 195 | pcr = PCR_ARCH_206 | PCR_ARCH_205; |
Paul Mackerras | 388cc6e | 2013-09-21 14:35:02 +1000 | [diff] [blame] | 196 | break; |
| 197 | case PVR_ARCH_206: |
| 198 | case PVR_ARCH_206p: |
Paul Mackerras | 5557ae0 | 2014-01-08 21:25:24 +1100 | [diff] [blame] | 199 | pcr = PCR_ARCH_206; |
| 200 | break; |
| 201 | case PVR_ARCH_207: |
Paul Mackerras | 388cc6e | 2013-09-21 14:35:02 +1000 | [diff] [blame] | 202 | break; |
| 203 | default: |
| 204 | return -EINVAL; |
| 205 | } |
Paul Mackerras | 5557ae0 | 2014-01-08 21:25:24 +1100 | [diff] [blame] | 206 | |
| 207 | if (!cpu_has_feature(CPU_FTR_ARCH_207S)) { |
| 208 | /* POWER7 can't emulate POWER8 */ |
| 209 | if (!(pcr & PCR_ARCH_206)) |
| 210 | return -EINVAL; |
| 211 | pcr &= ~PCR_ARCH_206; |
| 212 | } |
Paul Mackerras | 388cc6e | 2013-09-21 14:35:02 +1000 | [diff] [blame] | 213 | } |
| 214 | |
| 215 | spin_lock(&vc->lock); |
| 216 | vc->arch_compat = arch_compat; |
| 217 | vc->pcr = pcr; |
| 218 | spin_unlock(&vc->lock); |
| 219 | |
| 220 | return 0; |
| 221 | } |
| 222 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 223 | void kvmppc_dump_regs(struct kvm_vcpu *vcpu) |
| 224 | { |
| 225 | int r; |
| 226 | |
| 227 | pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id); |
| 228 | pr_err("pc = %.16lx msr = %.16llx trap = %x\n", |
| 229 | vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap); |
| 230 | for (r = 0; r < 16; ++r) |
| 231 | pr_err("r%2d = %.16lx r%d = %.16lx\n", |
| 232 | r, kvmppc_get_gpr(vcpu, r), |
| 233 | r+16, kvmppc_get_gpr(vcpu, r+16)); |
| 234 | pr_err("ctr = %.16lx lr = %.16lx\n", |
| 235 | vcpu->arch.ctr, vcpu->arch.lr); |
| 236 | pr_err("srr0 = %.16llx srr1 = %.16llx\n", |
| 237 | vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); |
| 238 | pr_err("sprg0 = %.16llx sprg1 = %.16llx\n", |
| 239 | vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); |
| 240 | pr_err("sprg2 = %.16llx sprg3 = %.16llx\n", |
| 241 | vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); |
| 242 | pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n", |
| 243 | vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr); |
| 244 | pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); |
| 245 | pr_err("fault dar = %.16lx dsisr = %.8x\n", |
| 246 | vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); |
| 247 | pr_err("SLB (%d entries):\n", vcpu->arch.slb_max); |
| 248 | for (r = 0; r < vcpu->arch.slb_max; ++r) |
| 249 | pr_err(" ESID = %.16llx VSID = %.16llx\n", |
| 250 | vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); |
| 251 | pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n", |
Paul Mackerras | a0144e2 | 2013-09-20 14:52:38 +1000 | [diff] [blame] | 252 | vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 253 | vcpu->arch.last_inst); |
| 254 | } |
| 255 | |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 256 | struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id) |
| 257 | { |
| 258 | int r; |
| 259 | struct kvm_vcpu *v, *ret = NULL; |
| 260 | |
| 261 | mutex_lock(&kvm->lock); |
| 262 | kvm_for_each_vcpu(r, v, kvm) { |
| 263 | if (v->vcpu_id == id) { |
| 264 | ret = v; |
| 265 | break; |
| 266 | } |
| 267 | } |
| 268 | mutex_unlock(&kvm->lock); |
| 269 | return ret; |
| 270 | } |
| 271 | |
| 272 | static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa) |
| 273 | { |
Anton Blanchard | f13c13a | 2013-08-07 02:01:26 +1000 | [diff] [blame] | 274 | vpa->__old_status |= LPPACA_OLD_SHARED_PROC; |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 275 | vpa->yield_count = 1; |
| 276 | } |
| 277 | |
Paul Mackerras | 55b665b | 2012-09-25 20:33:06 +0000 | [diff] [blame] | 278 | static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v, |
| 279 | unsigned long addr, unsigned long len) |
| 280 | { |
| 281 | /* check address is cacheline aligned */ |
| 282 | if (addr & (L1_CACHE_BYTES - 1)) |
| 283 | return -EINVAL; |
| 284 | spin_lock(&vcpu->arch.vpa_update_lock); |
| 285 | if (v->next_gpa != addr || v->len != len) { |
| 286 | v->next_gpa = addr; |
| 287 | v->len = addr ? len : 0; |
| 288 | v->update_pending = 1; |
| 289 | } |
| 290 | spin_unlock(&vcpu->arch.vpa_update_lock); |
| 291 | return 0; |
| 292 | } |
| 293 | |
Paul Mackerras | 2e25aa5 | 2012-02-19 17:46:32 +0000 | [diff] [blame] | 294 | /* Length for a per-processor buffer is passed in at offset 4 in the buffer */ |
| 295 | struct reg_vpa { |
| 296 | u32 dummy; |
| 297 | union { |
| 298 | u16 hword; |
| 299 | u32 word; |
| 300 | } length; |
| 301 | }; |
| 302 | |
| 303 | static int vpa_is_registered(struct kvmppc_vpa *vpap) |
| 304 | { |
| 305 | if (vpap->update_pending) |
| 306 | return vpap->next_gpa != 0; |
| 307 | return vpap->pinned_addr != NULL; |
| 308 | } |
| 309 | |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 310 | static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, |
| 311 | unsigned long flags, |
| 312 | unsigned long vcpuid, unsigned long vpa) |
| 313 | { |
| 314 | struct kvm *kvm = vcpu->kvm; |
Paul Mackerras | 93e6024 | 2011-12-12 12:28:55 +0000 | [diff] [blame] | 315 | unsigned long len, nb; |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 316 | void *va; |
| 317 | struct kvm_vcpu *tvcpu; |
Paul Mackerras | 2e25aa5 | 2012-02-19 17:46:32 +0000 | [diff] [blame] | 318 | int err; |
| 319 | int subfunc; |
| 320 | struct kvmppc_vpa *vpap; |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 321 | |
| 322 | tvcpu = kvmppc_find_vcpu(kvm, vcpuid); |
| 323 | if (!tvcpu) |
| 324 | return H_PARAMETER; |
| 325 | |
Paul Mackerras | 2e25aa5 | 2012-02-19 17:46:32 +0000 | [diff] [blame] | 326 | subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK; |
| 327 | if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL || |
| 328 | subfunc == H_VPA_REG_SLB) { |
| 329 | /* Registering new area - address must be cache-line aligned */ |
| 330 | if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa) |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 331 | return H_PARAMETER; |
Paul Mackerras | 2e25aa5 | 2012-02-19 17:46:32 +0000 | [diff] [blame] | 332 | |
| 333 | /* convert logical addr to kernel addr and read length */ |
Paul Mackerras | 93e6024 | 2011-12-12 12:28:55 +0000 | [diff] [blame] | 334 | va = kvmppc_pin_guest_page(kvm, vpa, &nb); |
| 335 | if (va == NULL) |
Paul Mackerras | b2b2f16 | 2011-12-12 12:28:21 +0000 | [diff] [blame] | 336 | return H_PARAMETER; |
Paul Mackerras | 2e25aa5 | 2012-02-19 17:46:32 +0000 | [diff] [blame] | 337 | if (subfunc == H_VPA_REG_VPA) |
| 338 | len = ((struct reg_vpa *)va)->length.hword; |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 339 | else |
Paul Mackerras | 2e25aa5 | 2012-02-19 17:46:32 +0000 | [diff] [blame] | 340 | len = ((struct reg_vpa *)va)->length.word; |
Paul Mackerras | c35635e | 2013-04-18 19:51:04 +0000 | [diff] [blame] | 341 | kvmppc_unpin_guest_page(kvm, va, vpa, false); |
Paul Mackerras | 2e25aa5 | 2012-02-19 17:46:32 +0000 | [diff] [blame] | 342 | |
| 343 | /* Check length */ |
| 344 | if (len > nb || len < sizeof(struct reg_vpa)) |
| 345 | return H_PARAMETER; |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 346 | } else { |
Paul Mackerras | 2e25aa5 | 2012-02-19 17:46:32 +0000 | [diff] [blame] | 347 | vpa = 0; |
| 348 | len = 0; |
| 349 | } |
| 350 | |
| 351 | err = H_PARAMETER; |
| 352 | vpap = NULL; |
| 353 | spin_lock(&tvcpu->arch.vpa_update_lock); |
| 354 | |
| 355 | switch (subfunc) { |
| 356 | case H_VPA_REG_VPA: /* register VPA */ |
| 357 | if (len < sizeof(struct lppaca)) |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 358 | break; |
Paul Mackerras | 2e25aa5 | 2012-02-19 17:46:32 +0000 | [diff] [blame] | 359 | vpap = &tvcpu->arch.vpa; |
| 360 | err = 0; |
| 361 | break; |
| 362 | |
| 363 | case H_VPA_REG_DTL: /* register DTL */ |
| 364 | if (len < sizeof(struct dtl_entry)) |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 365 | break; |
Paul Mackerras | 2e25aa5 | 2012-02-19 17:46:32 +0000 | [diff] [blame] | 366 | len -= len % sizeof(struct dtl_entry); |
| 367 | |
| 368 | /* Check that they have previously registered a VPA */ |
| 369 | err = H_RESOURCE; |
| 370 | if (!vpa_is_registered(&tvcpu->arch.vpa)) |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 371 | break; |
Paul Mackerras | 2e25aa5 | 2012-02-19 17:46:32 +0000 | [diff] [blame] | 372 | |
| 373 | vpap = &tvcpu->arch.dtl; |
| 374 | err = 0; |
| 375 | break; |
| 376 | |
| 377 | case H_VPA_REG_SLB: /* register SLB shadow buffer */ |
| 378 | /* Check that they have previously registered a VPA */ |
| 379 | err = H_RESOURCE; |
| 380 | if (!vpa_is_registered(&tvcpu->arch.vpa)) |
| 381 | break; |
| 382 | |
| 383 | vpap = &tvcpu->arch.slb_shadow; |
| 384 | err = 0; |
| 385 | break; |
| 386 | |
| 387 | case H_VPA_DEREG_VPA: /* deregister VPA */ |
| 388 | /* Check they don't still have a DTL or SLB buf registered */ |
| 389 | err = H_RESOURCE; |
| 390 | if (vpa_is_registered(&tvcpu->arch.dtl) || |
| 391 | vpa_is_registered(&tvcpu->arch.slb_shadow)) |
| 392 | break; |
| 393 | |
| 394 | vpap = &tvcpu->arch.vpa; |
| 395 | err = 0; |
| 396 | break; |
| 397 | |
| 398 | case H_VPA_DEREG_DTL: /* deregister DTL */ |
| 399 | vpap = &tvcpu->arch.dtl; |
| 400 | err = 0; |
| 401 | break; |
| 402 | |
| 403 | case H_VPA_DEREG_SLB: /* deregister SLB shadow buffer */ |
| 404 | vpap = &tvcpu->arch.slb_shadow; |
| 405 | err = 0; |
| 406 | break; |
| 407 | } |
| 408 | |
| 409 | if (vpap) { |
| 410 | vpap->next_gpa = vpa; |
| 411 | vpap->len = len; |
| 412 | vpap->update_pending = 1; |
| 413 | } |
| 414 | |
| 415 | spin_unlock(&tvcpu->arch.vpa_update_lock); |
| 416 | |
| 417 | return err; |
| 418 | } |
| 419 | |
Paul Mackerras | 081f323 | 2012-06-01 20:20:24 +1000 | [diff] [blame] | 420 | static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap) |
Paul Mackerras | 2e25aa5 | 2012-02-19 17:46:32 +0000 | [diff] [blame] | 421 | { |
Paul Mackerras | 081f323 | 2012-06-01 20:20:24 +1000 | [diff] [blame] | 422 | struct kvm *kvm = vcpu->kvm; |
Paul Mackerras | 2e25aa5 | 2012-02-19 17:46:32 +0000 | [diff] [blame] | 423 | void *va; |
| 424 | unsigned long nb; |
Paul Mackerras | 081f323 | 2012-06-01 20:20:24 +1000 | [diff] [blame] | 425 | unsigned long gpa; |
| 426 | |
| 427 | /* |
| 428 | * We need to pin the page pointed to by vpap->next_gpa, |
| 429 | * but we can't call kvmppc_pin_guest_page under the lock |
| 430 | * as it does get_user_pages() and down_read(). So we |
| 431 | * have to drop the lock, pin the page, then get the lock |
| 432 | * again and check that a new area didn't get registered |
| 433 | * in the meantime. |
| 434 | */ |
| 435 | for (;;) { |
| 436 | gpa = vpap->next_gpa; |
| 437 | spin_unlock(&vcpu->arch.vpa_update_lock); |
| 438 | va = NULL; |
| 439 | nb = 0; |
| 440 | if (gpa) |
Paul Mackerras | c35635e | 2013-04-18 19:51:04 +0000 | [diff] [blame] | 441 | va = kvmppc_pin_guest_page(kvm, gpa, &nb); |
Paul Mackerras | 081f323 | 2012-06-01 20:20:24 +1000 | [diff] [blame] | 442 | spin_lock(&vcpu->arch.vpa_update_lock); |
| 443 | if (gpa == vpap->next_gpa) |
| 444 | break; |
| 445 | /* sigh... unpin that one and try again */ |
| 446 | if (va) |
Paul Mackerras | c35635e | 2013-04-18 19:51:04 +0000 | [diff] [blame] | 447 | kvmppc_unpin_guest_page(kvm, va, gpa, false); |
Paul Mackerras | 081f323 | 2012-06-01 20:20:24 +1000 | [diff] [blame] | 448 | } |
Paul Mackerras | 2e25aa5 | 2012-02-19 17:46:32 +0000 | [diff] [blame] | 449 | |
| 450 | vpap->update_pending = 0; |
Paul Mackerras | 081f323 | 2012-06-01 20:20:24 +1000 | [diff] [blame] | 451 | if (va && nb < vpap->len) { |
| 452 | /* |
| 453 | * If it's now too short, it must be that userspace |
| 454 | * has changed the mappings underlying guest memory, |
| 455 | * so unregister the region. |
| 456 | */ |
Paul Mackerras | c35635e | 2013-04-18 19:51:04 +0000 | [diff] [blame] | 457 | kvmppc_unpin_guest_page(kvm, va, gpa, false); |
Paul Mackerras | 081f323 | 2012-06-01 20:20:24 +1000 | [diff] [blame] | 458 | va = NULL; |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 459 | } |
Paul Mackerras | 2e25aa5 | 2012-02-19 17:46:32 +0000 | [diff] [blame] | 460 | if (vpap->pinned_addr) |
Paul Mackerras | c35635e | 2013-04-18 19:51:04 +0000 | [diff] [blame] | 461 | kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa, |
| 462 | vpap->dirty); |
| 463 | vpap->gpa = gpa; |
Paul Mackerras | 2e25aa5 | 2012-02-19 17:46:32 +0000 | [diff] [blame] | 464 | vpap->pinned_addr = va; |
Paul Mackerras | c35635e | 2013-04-18 19:51:04 +0000 | [diff] [blame] | 465 | vpap->dirty = false; |
Paul Mackerras | 2e25aa5 | 2012-02-19 17:46:32 +0000 | [diff] [blame] | 466 | if (va) |
| 467 | vpap->pinned_end = va + vpap->len; |
| 468 | } |
Paul Mackerras | 93e6024 | 2011-12-12 12:28:55 +0000 | [diff] [blame] | 469 | |
Paul Mackerras | 2e25aa5 | 2012-02-19 17:46:32 +0000 | [diff] [blame] | 470 | static void kvmppc_update_vpas(struct kvm_vcpu *vcpu) |
| 471 | { |
Paul Mackerras | 2f12f03 | 2012-10-15 01:17:17 +0000 | [diff] [blame] | 472 | if (!(vcpu->arch.vpa.update_pending || |
| 473 | vcpu->arch.slb_shadow.update_pending || |
| 474 | vcpu->arch.dtl.update_pending)) |
| 475 | return; |
| 476 | |
Paul Mackerras | 2e25aa5 | 2012-02-19 17:46:32 +0000 | [diff] [blame] | 477 | spin_lock(&vcpu->arch.vpa_update_lock); |
| 478 | if (vcpu->arch.vpa.update_pending) { |
Paul Mackerras | 081f323 | 2012-06-01 20:20:24 +1000 | [diff] [blame] | 479 | kvmppc_update_vpa(vcpu, &vcpu->arch.vpa); |
Paul Mackerras | 55b665b | 2012-09-25 20:33:06 +0000 | [diff] [blame] | 480 | if (vcpu->arch.vpa.pinned_addr) |
| 481 | init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); |
Paul Mackerras | 2e25aa5 | 2012-02-19 17:46:32 +0000 | [diff] [blame] | 482 | } |
| 483 | if (vcpu->arch.dtl.update_pending) { |
Paul Mackerras | 081f323 | 2012-06-01 20:20:24 +1000 | [diff] [blame] | 484 | kvmppc_update_vpa(vcpu, &vcpu->arch.dtl); |
Paul Mackerras | 2e25aa5 | 2012-02-19 17:46:32 +0000 | [diff] [blame] | 485 | vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; |
| 486 | vcpu->arch.dtl_index = 0; |
| 487 | } |
| 488 | if (vcpu->arch.slb_shadow.update_pending) |
Paul Mackerras | 081f323 | 2012-06-01 20:20:24 +1000 | [diff] [blame] | 489 | kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow); |
Paul Mackerras | 2e25aa5 | 2012-02-19 17:46:32 +0000 | [diff] [blame] | 490 | spin_unlock(&vcpu->arch.vpa_update_lock); |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 491 | } |
| 492 | |
Paul Mackerras | c7b6767 | 2012-10-15 01:18:07 +0000 | [diff] [blame] | 493 | /* |
| 494 | * Return the accumulated stolen time for the vcore up until `now'. |
| 495 | * The caller should hold the vcore lock. |
| 496 | */ |
| 497 | static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now) |
| 498 | { |
| 499 | u64 p; |
| 500 | |
| 501 | /* |
| 502 | * If we are the task running the vcore, then since we hold |
| 503 | * the vcore lock, we can't be preempted, so stolen_tb/preempt_tb |
| 504 | * can't be updated, so we don't need the tbacct_lock. |
| 505 | * If the vcore is inactive, it can't become active (since we |
| 506 | * hold the vcore lock), so the vcpu load/put functions won't |
| 507 | * update stolen_tb/preempt_tb, and we don't need tbacct_lock. |
| 508 | */ |
| 509 | if (vc->vcore_state != VCORE_INACTIVE && |
| 510 | vc->runner->arch.run_task != current) { |
Paul Mackerras | bf3d32e | 2013-11-16 17:46:04 +1100 | [diff] [blame] | 511 | spin_lock_irq(&vc->runner->arch.tbacct_lock); |
Paul Mackerras | c7b6767 | 2012-10-15 01:18:07 +0000 | [diff] [blame] | 512 | p = vc->stolen_tb; |
| 513 | if (vc->preempt_tb != TB_NIL) |
| 514 | p += now - vc->preempt_tb; |
Paul Mackerras | bf3d32e | 2013-11-16 17:46:04 +1100 | [diff] [blame] | 515 | spin_unlock_irq(&vc->runner->arch.tbacct_lock); |
Paul Mackerras | c7b6767 | 2012-10-15 01:18:07 +0000 | [diff] [blame] | 516 | } else { |
| 517 | p = vc->stolen_tb; |
| 518 | } |
| 519 | return p; |
| 520 | } |
| 521 | |
Paul Mackerras | 0456ec4 | 2012-02-03 00:56:21 +0000 | [diff] [blame] | 522 | static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, |
| 523 | struct kvmppc_vcore *vc) |
| 524 | { |
| 525 | struct dtl_entry *dt; |
| 526 | struct lppaca *vpa; |
Paul Mackerras | c7b6767 | 2012-10-15 01:18:07 +0000 | [diff] [blame] | 527 | unsigned long stolen; |
| 528 | unsigned long core_stolen; |
| 529 | u64 now; |
Paul Mackerras | 0456ec4 | 2012-02-03 00:56:21 +0000 | [diff] [blame] | 530 | |
| 531 | dt = vcpu->arch.dtl_ptr; |
| 532 | vpa = vcpu->arch.vpa.pinned_addr; |
Paul Mackerras | c7b6767 | 2012-10-15 01:18:07 +0000 | [diff] [blame] | 533 | now = mftb(); |
| 534 | core_stolen = vcore_stolen_time(vc, now); |
| 535 | stolen = core_stolen - vcpu->arch.stolen_logged; |
| 536 | vcpu->arch.stolen_logged = core_stolen; |
Paul Mackerras | bf3d32e | 2013-11-16 17:46:04 +1100 | [diff] [blame] | 537 | spin_lock_irq(&vcpu->arch.tbacct_lock); |
Paul Mackerras | c7b6767 | 2012-10-15 01:18:07 +0000 | [diff] [blame] | 538 | stolen += vcpu->arch.busy_stolen; |
| 539 | vcpu->arch.busy_stolen = 0; |
Paul Mackerras | bf3d32e | 2013-11-16 17:46:04 +1100 | [diff] [blame] | 540 | spin_unlock_irq(&vcpu->arch.tbacct_lock); |
Paul Mackerras | 0456ec4 | 2012-02-03 00:56:21 +0000 | [diff] [blame] | 541 | if (!dt || !vpa) |
| 542 | return; |
| 543 | memset(dt, 0, sizeof(struct dtl_entry)); |
| 544 | dt->dispatch_reason = 7; |
| 545 | dt->processor_id = vc->pcpu + vcpu->arch.ptid; |
Paul Mackerras | 93b0f4d | 2013-09-06 13:17:46 +1000 | [diff] [blame] | 546 | dt->timebase = now + vc->tb_offset; |
Paul Mackerras | c7b6767 | 2012-10-15 01:18:07 +0000 | [diff] [blame] | 547 | dt->enqueue_to_dispatch_time = stolen; |
Paul Mackerras | 0456ec4 | 2012-02-03 00:56:21 +0000 | [diff] [blame] | 548 | dt->srr0 = kvmppc_get_pc(vcpu); |
| 549 | dt->srr1 = vcpu->arch.shregs.msr; |
| 550 | ++dt; |
| 551 | if (dt == vcpu->arch.dtl.pinned_end) |
| 552 | dt = vcpu->arch.dtl.pinned_addr; |
| 553 | vcpu->arch.dtl_ptr = dt; |
| 554 | /* order writing *dt vs. writing vpa->dtl_idx */ |
| 555 | smp_wmb(); |
| 556 | vpa->dtl_idx = ++vcpu->arch.dtl_index; |
Paul Mackerras | c35635e | 2013-04-18 19:51:04 +0000 | [diff] [blame] | 557 | vcpu->arch.dtl.dirty = true; |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 558 | } |
| 559 | |
Michael Neuling | 9642382 | 2014-06-02 11:03:01 +1000 | [diff] [blame^] | 560 | static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu) |
| 561 | { |
| 562 | if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207) |
| 563 | return true; |
| 564 | if ((!vcpu->arch.vcore->arch_compat) && |
| 565 | cpu_has_feature(CPU_FTR_ARCH_207S)) |
| 566 | return true; |
| 567 | return false; |
| 568 | } |
| 569 | |
| 570 | static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags, |
| 571 | unsigned long resource, unsigned long value1, |
| 572 | unsigned long value2) |
| 573 | { |
| 574 | switch (resource) { |
| 575 | case H_SET_MODE_RESOURCE_SET_CIABR: |
| 576 | if (!kvmppc_power8_compatible(vcpu)) |
| 577 | return H_P2; |
| 578 | if (value2) |
| 579 | return H_P4; |
| 580 | if (mflags) |
| 581 | return H_UNSUPPORTED_FLAG_START; |
| 582 | /* Guests can't breakpoint the hypervisor */ |
| 583 | if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER) |
| 584 | return H_P3; |
| 585 | vcpu->arch.ciabr = value1; |
| 586 | return H_SUCCESS; |
| 587 | case H_SET_MODE_RESOURCE_SET_DAWR: |
| 588 | if (!kvmppc_power8_compatible(vcpu)) |
| 589 | return H_P2; |
| 590 | if (mflags) |
| 591 | return H_UNSUPPORTED_FLAG_START; |
| 592 | if (value2 & DABRX_HYP) |
| 593 | return H_P4; |
| 594 | vcpu->arch.dawr = value1; |
| 595 | vcpu->arch.dawrx = value2; |
| 596 | return H_SUCCESS; |
| 597 | default: |
| 598 | return H_TOO_HARD; |
| 599 | } |
| 600 | } |
| 601 | |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 602 | int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) |
| 603 | { |
| 604 | unsigned long req = kvmppc_get_gpr(vcpu, 3); |
| 605 | unsigned long target, ret = H_SUCCESS; |
| 606 | struct kvm_vcpu *tvcpu; |
Michael Ellerman | 8e591cb | 2013-04-17 20:30:00 +0000 | [diff] [blame] | 607 | int idx, rc; |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 608 | |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 609 | if (req <= MAX_HCALL_OPCODE && |
| 610 | !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls)) |
| 611 | return RESUME_HOST; |
| 612 | |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 613 | switch (req) { |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 614 | case H_ENTER: |
Paul Mackerras | 2c9097e | 2012-09-11 13:27:01 +0000 | [diff] [blame] | 615 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 616 | ret = kvmppc_virtmode_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4), |
| 617 | kvmppc_get_gpr(vcpu, 5), |
| 618 | kvmppc_get_gpr(vcpu, 6), |
| 619 | kvmppc_get_gpr(vcpu, 7)); |
Paul Mackerras | 2c9097e | 2012-09-11 13:27:01 +0000 | [diff] [blame] | 620 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 621 | break; |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 622 | case H_CEDE: |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 623 | break; |
| 624 | case H_PROD: |
| 625 | target = kvmppc_get_gpr(vcpu, 4); |
| 626 | tvcpu = kvmppc_find_vcpu(vcpu->kvm, target); |
| 627 | if (!tvcpu) { |
| 628 | ret = H_PARAMETER; |
| 629 | break; |
| 630 | } |
| 631 | tvcpu->arch.prodded = 1; |
| 632 | smp_mb(); |
| 633 | if (vcpu->arch.ceded) { |
| 634 | if (waitqueue_active(&vcpu->wq)) { |
| 635 | wake_up_interruptible(&vcpu->wq); |
| 636 | vcpu->stat.halt_wakeup++; |
| 637 | } |
| 638 | } |
| 639 | break; |
| 640 | case H_CONFER: |
Paul Mackerras | 42d7604 | 2013-09-06 13:23:21 +1000 | [diff] [blame] | 641 | target = kvmppc_get_gpr(vcpu, 4); |
| 642 | if (target == -1) |
| 643 | break; |
| 644 | tvcpu = kvmppc_find_vcpu(vcpu->kvm, target); |
| 645 | if (!tvcpu) { |
| 646 | ret = H_PARAMETER; |
| 647 | break; |
| 648 | } |
| 649 | kvm_vcpu_yield_to(tvcpu); |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 650 | break; |
| 651 | case H_REGISTER_VPA: |
| 652 | ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4), |
| 653 | kvmppc_get_gpr(vcpu, 5), |
| 654 | kvmppc_get_gpr(vcpu, 6)); |
| 655 | break; |
Michael Ellerman | 8e591cb | 2013-04-17 20:30:00 +0000 | [diff] [blame] | 656 | case H_RTAS: |
| 657 | if (list_empty(&vcpu->kvm->arch.rtas_tokens)) |
| 658 | return RESUME_HOST; |
| 659 | |
Paul Mackerras | c943809 | 2013-11-16 17:46:05 +1100 | [diff] [blame] | 660 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
Michael Ellerman | 8e591cb | 2013-04-17 20:30:00 +0000 | [diff] [blame] | 661 | rc = kvmppc_rtas_hcall(vcpu); |
Paul Mackerras | c943809 | 2013-11-16 17:46:05 +1100 | [diff] [blame] | 662 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
Michael Ellerman | 8e591cb | 2013-04-17 20:30:00 +0000 | [diff] [blame] | 663 | |
| 664 | if (rc == -ENOENT) |
| 665 | return RESUME_HOST; |
| 666 | else if (rc == 0) |
| 667 | break; |
| 668 | |
| 669 | /* Send the error out to userspace via KVM_RUN */ |
| 670 | return rc; |
Michael Neuling | 9642382 | 2014-06-02 11:03:01 +1000 | [diff] [blame^] | 671 | case H_SET_MODE: |
| 672 | ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4), |
| 673 | kvmppc_get_gpr(vcpu, 5), |
| 674 | kvmppc_get_gpr(vcpu, 6), |
| 675 | kvmppc_get_gpr(vcpu, 7)); |
| 676 | if (ret == H_TOO_HARD) |
| 677 | return RESUME_HOST; |
| 678 | break; |
Benjamin Herrenschmidt | bc5ad3f | 2013-04-17 20:30:26 +0000 | [diff] [blame] | 679 | case H_XIRR: |
| 680 | case H_CPPR: |
| 681 | case H_EOI: |
| 682 | case H_IPI: |
Paul Mackerras | 8e44ddc | 2013-05-23 15:42:21 +0000 | [diff] [blame] | 683 | case H_IPOLL: |
| 684 | case H_XIRR_X: |
Benjamin Herrenschmidt | bc5ad3f | 2013-04-17 20:30:26 +0000 | [diff] [blame] | 685 | if (kvmppc_xics_enabled(vcpu)) { |
| 686 | ret = kvmppc_xics_hcall(vcpu, req); |
| 687 | break; |
| 688 | } /* fallthrough */ |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 689 | default: |
| 690 | return RESUME_HOST; |
| 691 | } |
| 692 | kvmppc_set_gpr(vcpu, 3, ret); |
| 693 | vcpu->arch.hcall_needed = 0; |
| 694 | return RESUME_GUEST; |
| 695 | } |
| 696 | |
Paul Mackerras | ae2113a | 2014-06-02 11:03:00 +1000 | [diff] [blame] | 697 | static int kvmppc_hcall_impl_hv(unsigned long cmd) |
| 698 | { |
| 699 | switch (cmd) { |
| 700 | case H_CEDE: |
| 701 | case H_PROD: |
| 702 | case H_CONFER: |
| 703 | case H_REGISTER_VPA: |
Michael Neuling | 9642382 | 2014-06-02 11:03:01 +1000 | [diff] [blame^] | 704 | case H_SET_MODE: |
Paul Mackerras | ae2113a | 2014-06-02 11:03:00 +1000 | [diff] [blame] | 705 | #ifdef CONFIG_KVM_XICS |
| 706 | case H_XIRR: |
| 707 | case H_CPPR: |
| 708 | case H_EOI: |
| 709 | case H_IPI: |
| 710 | case H_IPOLL: |
| 711 | case H_XIRR_X: |
| 712 | #endif |
| 713 | return 1; |
| 714 | } |
| 715 | |
| 716 | /* See if it's in the real-mode table */ |
| 717 | return kvmppc_hcall_impl_hv_realmode(cmd); |
| 718 | } |
| 719 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 720 | static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 721 | struct task_struct *tsk) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 722 | { |
| 723 | int r = RESUME_HOST; |
| 724 | |
| 725 | vcpu->stat.sum_exits++; |
| 726 | |
| 727 | run->exit_reason = KVM_EXIT_UNKNOWN; |
| 728 | run->ready_for_interrupt_injection = 1; |
| 729 | switch (vcpu->arch.trap) { |
| 730 | /* We're good on these - the host merely wanted to get our attention */ |
| 731 | case BOOK3S_INTERRUPT_HV_DECREMENTER: |
| 732 | vcpu->stat.dec_exits++; |
| 733 | r = RESUME_GUEST; |
| 734 | break; |
| 735 | case BOOK3S_INTERRUPT_EXTERNAL: |
Paul Mackerras | 5d00f66 | 2014-01-08 21:25:28 +1100 | [diff] [blame] | 736 | case BOOK3S_INTERRUPT_H_DOORBELL: |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 737 | vcpu->stat.ext_intr_exits++; |
| 738 | r = RESUME_GUEST; |
| 739 | break; |
| 740 | case BOOK3S_INTERRUPT_PERFMON: |
| 741 | r = RESUME_GUEST; |
| 742 | break; |
Paul Mackerras | b4072df | 2012-11-23 22:37:50 +0000 | [diff] [blame] | 743 | case BOOK3S_INTERRUPT_MACHINE_CHECK: |
| 744 | /* |
| 745 | * Deliver a machine check interrupt to the guest. |
| 746 | * We have to do this, even if the host has handled the |
| 747 | * machine check, because machine checks use SRR0/1 and |
| 748 | * the interrupt might have trashed guest state in them. |
| 749 | */ |
| 750 | kvmppc_book3s_queue_irqprio(vcpu, |
| 751 | BOOK3S_INTERRUPT_MACHINE_CHECK); |
| 752 | r = RESUME_GUEST; |
| 753 | break; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 754 | case BOOK3S_INTERRUPT_PROGRAM: |
| 755 | { |
| 756 | ulong flags; |
| 757 | /* |
| 758 | * Normally program interrupts are delivered directly |
| 759 | * to the guest by the hardware, but we can get here |
| 760 | * as a result of a hypervisor emulation interrupt |
| 761 | * (e40) getting turned into a 700 by BML RTAS. |
| 762 | */ |
| 763 | flags = vcpu->arch.shregs.msr & 0x1f0000ull; |
| 764 | kvmppc_core_queue_program(vcpu, flags); |
| 765 | r = RESUME_GUEST; |
| 766 | break; |
| 767 | } |
| 768 | case BOOK3S_INTERRUPT_SYSCALL: |
| 769 | { |
| 770 | /* hcall - punt to userspace */ |
| 771 | int i; |
| 772 | |
Liu Ping Fan | 27025a6 | 2013-11-19 14:12:48 +0800 | [diff] [blame] | 773 | /* hypercall with MSR_PR has already been handled in rmode, |
| 774 | * and never reaches here. |
| 775 | */ |
| 776 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 777 | run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3); |
| 778 | for (i = 0; i < 9; ++i) |
| 779 | run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i); |
| 780 | run->exit_reason = KVM_EXIT_PAPR_HCALL; |
| 781 | vcpu->arch.hcall_needed = 1; |
| 782 | r = RESUME_HOST; |
| 783 | break; |
| 784 | } |
| 785 | /* |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 786 | * We get these next two if the guest accesses a page which it thinks |
| 787 | * it has mapped but which is not actually present, either because |
| 788 | * it is for an emulated I/O device or because the corresonding |
| 789 | * host page has been paged out. Any other HDSI/HISI interrupts |
| 790 | * have been handled already. |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 791 | */ |
| 792 | case BOOK3S_INTERRUPT_H_DATA_STORAGE: |
Paul Mackerras | 913d3ff9a | 2012-10-15 01:16:48 +0000 | [diff] [blame] | 793 | r = RESUME_PAGE_FAULT; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 794 | break; |
| 795 | case BOOK3S_INTERRUPT_H_INST_STORAGE: |
Paul Mackerras | 913d3ff9a | 2012-10-15 01:16:48 +0000 | [diff] [blame] | 796 | vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); |
| 797 | vcpu->arch.fault_dsisr = 0; |
| 798 | r = RESUME_PAGE_FAULT; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 799 | break; |
| 800 | /* |
| 801 | * This occurs if the guest executes an illegal instruction. |
| 802 | * We just generate a program interrupt to the guest, since |
| 803 | * we don't emulate any guest instructions at this stage. |
| 804 | */ |
| 805 | case BOOK3S_INTERRUPT_H_EMUL_ASSIST: |
Michael Ellerman | bd3048b | 2014-01-08 21:25:23 +1100 | [diff] [blame] | 806 | kvmppc_core_queue_program(vcpu, SRR1_PROGILL); |
| 807 | r = RESUME_GUEST; |
| 808 | break; |
| 809 | /* |
| 810 | * This occurs if the guest (kernel or userspace), does something that |
| 811 | * is prohibited by HFSCR. We just generate a program interrupt to |
| 812 | * the guest. |
| 813 | */ |
| 814 | case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: |
| 815 | kvmppc_core_queue_program(vcpu, SRR1_PROGILL); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 816 | r = RESUME_GUEST; |
| 817 | break; |
| 818 | default: |
| 819 | kvmppc_dump_regs(vcpu); |
| 820 | printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n", |
| 821 | vcpu->arch.trap, kvmppc_get_pc(vcpu), |
| 822 | vcpu->arch.shregs.msr); |
Paul Mackerras | f3271d4 | 2013-09-20 14:52:41 +1000 | [diff] [blame] | 823 | run->hw.hardware_exit_reason = vcpu->arch.trap; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 824 | r = RESUME_HOST; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 825 | break; |
| 826 | } |
| 827 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 828 | return r; |
| 829 | } |
| 830 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 831 | static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu, |
| 832 | struct kvm_sregs *sregs) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 833 | { |
| 834 | int i; |
| 835 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 836 | memset(sregs, 0, sizeof(struct kvm_sregs)); |
Aneesh Kumar K.V | 8791644 | 2013-08-22 17:08:39 +0530 | [diff] [blame] | 837 | sregs->pvr = vcpu->arch.pvr; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 838 | for (i = 0; i < vcpu->arch.slb_max; i++) { |
| 839 | sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige; |
| 840 | sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; |
| 841 | } |
| 842 | |
| 843 | return 0; |
| 844 | } |
| 845 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 846 | static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu, |
| 847 | struct kvm_sregs *sregs) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 848 | { |
| 849 | int i, j; |
| 850 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 851 | kvmppc_set_pvr_hv(vcpu, sregs->pvr); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 852 | |
| 853 | j = 0; |
| 854 | for (i = 0; i < vcpu->arch.slb_nr; i++) { |
| 855 | if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) { |
| 856 | vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe; |
| 857 | vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv; |
| 858 | ++j; |
| 859 | } |
| 860 | } |
| 861 | vcpu->arch.slb_max = j; |
| 862 | |
| 863 | return 0; |
| 864 | } |
| 865 | |
Paul Mackerras | a0144e2 | 2013-09-20 14:52:38 +1000 | [diff] [blame] | 866 | static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr) |
| 867 | { |
| 868 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
| 869 | u64 mask; |
| 870 | |
| 871 | spin_lock(&vc->lock); |
| 872 | /* |
Anton Blanchard | d682916 | 2014-01-08 21:25:30 +1100 | [diff] [blame] | 873 | * If ILE (interrupt little-endian) has changed, update the |
| 874 | * MSR_LE bit in the intr_msr for each vcpu in this vcore. |
| 875 | */ |
| 876 | if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { |
| 877 | struct kvm *kvm = vcpu->kvm; |
| 878 | struct kvm_vcpu *vcpu; |
| 879 | int i; |
| 880 | |
| 881 | mutex_lock(&kvm->lock); |
| 882 | kvm_for_each_vcpu(i, vcpu, kvm) { |
| 883 | if (vcpu->arch.vcore != vc) |
| 884 | continue; |
| 885 | if (new_lpcr & LPCR_ILE) |
| 886 | vcpu->arch.intr_msr |= MSR_LE; |
| 887 | else |
| 888 | vcpu->arch.intr_msr &= ~MSR_LE; |
| 889 | } |
| 890 | mutex_unlock(&kvm->lock); |
| 891 | } |
| 892 | |
| 893 | /* |
Paul Mackerras | a0144e2 | 2013-09-20 14:52:38 +1000 | [diff] [blame] | 894 | * Userspace can only modify DPFD (default prefetch depth), |
| 895 | * ILE (interrupt little-endian) and TC (translation control). |
Paul Mackerras | e0622bd | 2014-01-08 21:25:27 +1100 | [diff] [blame] | 896 | * On POWER8 userspace can also modify AIL (alt. interrupt loc.) |
Paul Mackerras | a0144e2 | 2013-09-20 14:52:38 +1000 | [diff] [blame] | 897 | */ |
| 898 | mask = LPCR_DPFD | LPCR_ILE | LPCR_TC; |
Paul Mackerras | e0622bd | 2014-01-08 21:25:27 +1100 | [diff] [blame] | 899 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) |
| 900 | mask |= LPCR_AIL; |
Paul Mackerras | a0144e2 | 2013-09-20 14:52:38 +1000 | [diff] [blame] | 901 | vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); |
| 902 | spin_unlock(&vc->lock); |
| 903 | } |
| 904 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 905 | static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, |
| 906 | union kvmppc_one_reg *val) |
Paul Mackerras | 31f3438 | 2011-12-12 12:26:50 +0000 | [diff] [blame] | 907 | { |
Paul Mackerras | a136a8b | 2012-09-25 20:31:56 +0000 | [diff] [blame] | 908 | int r = 0; |
| 909 | long int i; |
Paul Mackerras | 31f3438 | 2011-12-12 12:26:50 +0000 | [diff] [blame] | 910 | |
Paul Mackerras | a136a8b | 2012-09-25 20:31:56 +0000 | [diff] [blame] | 911 | switch (id) { |
Paul Mackerras | 31f3438 | 2011-12-12 12:26:50 +0000 | [diff] [blame] | 912 | case KVM_REG_PPC_HIOR: |
Paul Mackerras | a136a8b | 2012-09-25 20:31:56 +0000 | [diff] [blame] | 913 | *val = get_reg_val(id, 0); |
| 914 | break; |
| 915 | case KVM_REG_PPC_DABR: |
| 916 | *val = get_reg_val(id, vcpu->arch.dabr); |
| 917 | break; |
Paul Mackerras | 8563bf5 | 2014-01-08 21:25:29 +1100 | [diff] [blame] | 918 | case KVM_REG_PPC_DABRX: |
| 919 | *val = get_reg_val(id, vcpu->arch.dabrx); |
| 920 | break; |
Paul Mackerras | a136a8b | 2012-09-25 20:31:56 +0000 | [diff] [blame] | 921 | case KVM_REG_PPC_DSCR: |
| 922 | *val = get_reg_val(id, vcpu->arch.dscr); |
| 923 | break; |
| 924 | case KVM_REG_PPC_PURR: |
| 925 | *val = get_reg_val(id, vcpu->arch.purr); |
| 926 | break; |
| 927 | case KVM_REG_PPC_SPURR: |
| 928 | *val = get_reg_val(id, vcpu->arch.spurr); |
| 929 | break; |
| 930 | case KVM_REG_PPC_AMR: |
| 931 | *val = get_reg_val(id, vcpu->arch.amr); |
| 932 | break; |
| 933 | case KVM_REG_PPC_UAMOR: |
| 934 | *val = get_reg_val(id, vcpu->arch.uamor); |
| 935 | break; |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 936 | case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS: |
Paul Mackerras | a136a8b | 2012-09-25 20:31:56 +0000 | [diff] [blame] | 937 | i = id - KVM_REG_PPC_MMCR0; |
| 938 | *val = get_reg_val(id, vcpu->arch.mmcr[i]); |
| 939 | break; |
| 940 | case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8: |
| 941 | i = id - KVM_REG_PPC_PMC1; |
| 942 | *val = get_reg_val(id, vcpu->arch.pmc[i]); |
Paul Mackerras | 31f3438 | 2011-12-12 12:26:50 +0000 | [diff] [blame] | 943 | break; |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 944 | case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2: |
| 945 | i = id - KVM_REG_PPC_SPMC1; |
| 946 | *val = get_reg_val(id, vcpu->arch.spmc[i]); |
| 947 | break; |
Paul Mackerras | 1494178 | 2013-09-06 13:11:18 +1000 | [diff] [blame] | 948 | case KVM_REG_PPC_SIAR: |
| 949 | *val = get_reg_val(id, vcpu->arch.siar); |
| 950 | break; |
| 951 | case KVM_REG_PPC_SDAR: |
| 952 | *val = get_reg_val(id, vcpu->arch.sdar); |
| 953 | break; |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 954 | case KVM_REG_PPC_SIER: |
| 955 | *val = get_reg_val(id, vcpu->arch.sier); |
Paul Mackerras | a8bd19e | 2012-09-25 20:32:30 +0000 | [diff] [blame] | 956 | break; |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 957 | case KVM_REG_PPC_IAMR: |
| 958 | *val = get_reg_val(id, vcpu->arch.iamr); |
Paul Mackerras | a8bd19e | 2012-09-25 20:32:30 +0000 | [diff] [blame] | 959 | break; |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 960 | case KVM_REG_PPC_PSPB: |
| 961 | *val = get_reg_val(id, vcpu->arch.pspb); |
| 962 | break; |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 963 | case KVM_REG_PPC_DPDES: |
| 964 | *val = get_reg_val(id, vcpu->arch.vcore->dpdes); |
| 965 | break; |
| 966 | case KVM_REG_PPC_DAWR: |
| 967 | *val = get_reg_val(id, vcpu->arch.dawr); |
| 968 | break; |
| 969 | case KVM_REG_PPC_DAWRX: |
| 970 | *val = get_reg_val(id, vcpu->arch.dawrx); |
| 971 | break; |
| 972 | case KVM_REG_PPC_CIABR: |
| 973 | *val = get_reg_val(id, vcpu->arch.ciabr); |
| 974 | break; |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 975 | case KVM_REG_PPC_CSIGR: |
| 976 | *val = get_reg_val(id, vcpu->arch.csigr); |
| 977 | break; |
| 978 | case KVM_REG_PPC_TACR: |
| 979 | *val = get_reg_val(id, vcpu->arch.tacr); |
| 980 | break; |
| 981 | case KVM_REG_PPC_TCSCR: |
| 982 | *val = get_reg_val(id, vcpu->arch.tcscr); |
| 983 | break; |
| 984 | case KVM_REG_PPC_PID: |
| 985 | *val = get_reg_val(id, vcpu->arch.pid); |
| 986 | break; |
| 987 | case KVM_REG_PPC_ACOP: |
| 988 | *val = get_reg_val(id, vcpu->arch.acop); |
| 989 | break; |
| 990 | case KVM_REG_PPC_WORT: |
| 991 | *val = get_reg_val(id, vcpu->arch.wort); |
| 992 | break; |
Paul Mackerras | 55b665b | 2012-09-25 20:33:06 +0000 | [diff] [blame] | 993 | case KVM_REG_PPC_VPA_ADDR: |
| 994 | spin_lock(&vcpu->arch.vpa_update_lock); |
| 995 | *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); |
| 996 | spin_unlock(&vcpu->arch.vpa_update_lock); |
| 997 | break; |
| 998 | case KVM_REG_PPC_VPA_SLB: |
| 999 | spin_lock(&vcpu->arch.vpa_update_lock); |
| 1000 | val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa; |
| 1001 | val->vpaval.length = vcpu->arch.slb_shadow.len; |
| 1002 | spin_unlock(&vcpu->arch.vpa_update_lock); |
| 1003 | break; |
| 1004 | case KVM_REG_PPC_VPA_DTL: |
| 1005 | spin_lock(&vcpu->arch.vpa_update_lock); |
| 1006 | val->vpaval.addr = vcpu->arch.dtl.next_gpa; |
| 1007 | val->vpaval.length = vcpu->arch.dtl.len; |
| 1008 | spin_unlock(&vcpu->arch.vpa_update_lock); |
| 1009 | break; |
Paul Mackerras | 93b0f4d | 2013-09-06 13:17:46 +1000 | [diff] [blame] | 1010 | case KVM_REG_PPC_TB_OFFSET: |
| 1011 | *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); |
| 1012 | break; |
Paul Mackerras | a0144e2 | 2013-09-20 14:52:38 +1000 | [diff] [blame] | 1013 | case KVM_REG_PPC_LPCR: |
| 1014 | *val = get_reg_val(id, vcpu->arch.vcore->lpcr); |
| 1015 | break; |
Paul Mackerras | 4b8473c | 2013-09-20 14:52:39 +1000 | [diff] [blame] | 1016 | case KVM_REG_PPC_PPR: |
| 1017 | *val = get_reg_val(id, vcpu->arch.ppr); |
| 1018 | break; |
Michael Neuling | a7d80d0 | 2014-03-25 10:47:03 +1100 | [diff] [blame] | 1019 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
| 1020 | case KVM_REG_PPC_TFHAR: |
| 1021 | *val = get_reg_val(id, vcpu->arch.tfhar); |
| 1022 | break; |
| 1023 | case KVM_REG_PPC_TFIAR: |
| 1024 | *val = get_reg_val(id, vcpu->arch.tfiar); |
| 1025 | break; |
| 1026 | case KVM_REG_PPC_TEXASR: |
| 1027 | *val = get_reg_val(id, vcpu->arch.texasr); |
| 1028 | break; |
| 1029 | case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31: |
| 1030 | i = id - KVM_REG_PPC_TM_GPR0; |
| 1031 | *val = get_reg_val(id, vcpu->arch.gpr_tm[i]); |
| 1032 | break; |
| 1033 | case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63: |
| 1034 | { |
| 1035 | int j; |
| 1036 | i = id - KVM_REG_PPC_TM_VSR0; |
| 1037 | if (i < 32) |
| 1038 | for (j = 0; j < TS_FPRWIDTH; j++) |
| 1039 | val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; |
| 1040 | else { |
| 1041 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
| 1042 | val->vval = vcpu->arch.vr_tm.vr[i-32]; |
| 1043 | else |
| 1044 | r = -ENXIO; |
| 1045 | } |
| 1046 | break; |
| 1047 | } |
| 1048 | case KVM_REG_PPC_TM_CR: |
| 1049 | *val = get_reg_val(id, vcpu->arch.cr_tm); |
| 1050 | break; |
| 1051 | case KVM_REG_PPC_TM_LR: |
| 1052 | *val = get_reg_val(id, vcpu->arch.lr_tm); |
| 1053 | break; |
| 1054 | case KVM_REG_PPC_TM_CTR: |
| 1055 | *val = get_reg_val(id, vcpu->arch.ctr_tm); |
| 1056 | break; |
| 1057 | case KVM_REG_PPC_TM_FPSCR: |
| 1058 | *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); |
| 1059 | break; |
| 1060 | case KVM_REG_PPC_TM_AMR: |
| 1061 | *val = get_reg_val(id, vcpu->arch.amr_tm); |
| 1062 | break; |
| 1063 | case KVM_REG_PPC_TM_PPR: |
| 1064 | *val = get_reg_val(id, vcpu->arch.ppr_tm); |
| 1065 | break; |
| 1066 | case KVM_REG_PPC_TM_VRSAVE: |
| 1067 | *val = get_reg_val(id, vcpu->arch.vrsave_tm); |
| 1068 | break; |
| 1069 | case KVM_REG_PPC_TM_VSCR: |
| 1070 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
| 1071 | *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); |
| 1072 | else |
| 1073 | r = -ENXIO; |
| 1074 | break; |
| 1075 | case KVM_REG_PPC_TM_DSCR: |
| 1076 | *val = get_reg_val(id, vcpu->arch.dscr_tm); |
| 1077 | break; |
| 1078 | case KVM_REG_PPC_TM_TAR: |
| 1079 | *val = get_reg_val(id, vcpu->arch.tar_tm); |
| 1080 | break; |
| 1081 | #endif |
Paul Mackerras | 388cc6e | 2013-09-21 14:35:02 +1000 | [diff] [blame] | 1082 | case KVM_REG_PPC_ARCH_COMPAT: |
| 1083 | *val = get_reg_val(id, vcpu->arch.vcore->arch_compat); |
| 1084 | break; |
Paul Mackerras | 31f3438 | 2011-12-12 12:26:50 +0000 | [diff] [blame] | 1085 | default: |
Paul Mackerras | a136a8b | 2012-09-25 20:31:56 +0000 | [diff] [blame] | 1086 | r = -EINVAL; |
Paul Mackerras | 31f3438 | 2011-12-12 12:26:50 +0000 | [diff] [blame] | 1087 | break; |
| 1088 | } |
| 1089 | |
| 1090 | return r; |
| 1091 | } |
| 1092 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 1093 | static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, |
| 1094 | union kvmppc_one_reg *val) |
Paul Mackerras | 31f3438 | 2011-12-12 12:26:50 +0000 | [diff] [blame] | 1095 | { |
Paul Mackerras | a136a8b | 2012-09-25 20:31:56 +0000 | [diff] [blame] | 1096 | int r = 0; |
| 1097 | long int i; |
Paul Mackerras | 55b665b | 2012-09-25 20:33:06 +0000 | [diff] [blame] | 1098 | unsigned long addr, len; |
Paul Mackerras | 31f3438 | 2011-12-12 12:26:50 +0000 | [diff] [blame] | 1099 | |
Paul Mackerras | a136a8b | 2012-09-25 20:31:56 +0000 | [diff] [blame] | 1100 | switch (id) { |
Paul Mackerras | 31f3438 | 2011-12-12 12:26:50 +0000 | [diff] [blame] | 1101 | case KVM_REG_PPC_HIOR: |
Paul Mackerras | 31f3438 | 2011-12-12 12:26:50 +0000 | [diff] [blame] | 1102 | /* Only allow this to be set to zero */ |
Paul Mackerras | a136a8b | 2012-09-25 20:31:56 +0000 | [diff] [blame] | 1103 | if (set_reg_val(id, *val)) |
Paul Mackerras | 31f3438 | 2011-12-12 12:26:50 +0000 | [diff] [blame] | 1104 | r = -EINVAL; |
| 1105 | break; |
Paul Mackerras | a136a8b | 2012-09-25 20:31:56 +0000 | [diff] [blame] | 1106 | case KVM_REG_PPC_DABR: |
| 1107 | vcpu->arch.dabr = set_reg_val(id, *val); |
| 1108 | break; |
Paul Mackerras | 8563bf5 | 2014-01-08 21:25:29 +1100 | [diff] [blame] | 1109 | case KVM_REG_PPC_DABRX: |
| 1110 | vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP; |
| 1111 | break; |
Paul Mackerras | a136a8b | 2012-09-25 20:31:56 +0000 | [diff] [blame] | 1112 | case KVM_REG_PPC_DSCR: |
| 1113 | vcpu->arch.dscr = set_reg_val(id, *val); |
| 1114 | break; |
| 1115 | case KVM_REG_PPC_PURR: |
| 1116 | vcpu->arch.purr = set_reg_val(id, *val); |
| 1117 | break; |
| 1118 | case KVM_REG_PPC_SPURR: |
| 1119 | vcpu->arch.spurr = set_reg_val(id, *val); |
| 1120 | break; |
| 1121 | case KVM_REG_PPC_AMR: |
| 1122 | vcpu->arch.amr = set_reg_val(id, *val); |
| 1123 | break; |
| 1124 | case KVM_REG_PPC_UAMOR: |
| 1125 | vcpu->arch.uamor = set_reg_val(id, *val); |
| 1126 | break; |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 1127 | case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS: |
Paul Mackerras | a136a8b | 2012-09-25 20:31:56 +0000 | [diff] [blame] | 1128 | i = id - KVM_REG_PPC_MMCR0; |
| 1129 | vcpu->arch.mmcr[i] = set_reg_val(id, *val); |
| 1130 | break; |
| 1131 | case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8: |
| 1132 | i = id - KVM_REG_PPC_PMC1; |
| 1133 | vcpu->arch.pmc[i] = set_reg_val(id, *val); |
| 1134 | break; |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 1135 | case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2: |
| 1136 | i = id - KVM_REG_PPC_SPMC1; |
| 1137 | vcpu->arch.spmc[i] = set_reg_val(id, *val); |
| 1138 | break; |
Paul Mackerras | 1494178 | 2013-09-06 13:11:18 +1000 | [diff] [blame] | 1139 | case KVM_REG_PPC_SIAR: |
| 1140 | vcpu->arch.siar = set_reg_val(id, *val); |
| 1141 | break; |
| 1142 | case KVM_REG_PPC_SDAR: |
| 1143 | vcpu->arch.sdar = set_reg_val(id, *val); |
| 1144 | break; |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 1145 | case KVM_REG_PPC_SIER: |
| 1146 | vcpu->arch.sier = set_reg_val(id, *val); |
Paul Mackerras | a8bd19e | 2012-09-25 20:32:30 +0000 | [diff] [blame] | 1147 | break; |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 1148 | case KVM_REG_PPC_IAMR: |
| 1149 | vcpu->arch.iamr = set_reg_val(id, *val); |
Paul Mackerras | a8bd19e | 2012-09-25 20:32:30 +0000 | [diff] [blame] | 1150 | break; |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 1151 | case KVM_REG_PPC_PSPB: |
| 1152 | vcpu->arch.pspb = set_reg_val(id, *val); |
| 1153 | break; |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 1154 | case KVM_REG_PPC_DPDES: |
| 1155 | vcpu->arch.vcore->dpdes = set_reg_val(id, *val); |
| 1156 | break; |
| 1157 | case KVM_REG_PPC_DAWR: |
| 1158 | vcpu->arch.dawr = set_reg_val(id, *val); |
| 1159 | break; |
| 1160 | case KVM_REG_PPC_DAWRX: |
| 1161 | vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP; |
| 1162 | break; |
| 1163 | case KVM_REG_PPC_CIABR: |
| 1164 | vcpu->arch.ciabr = set_reg_val(id, *val); |
| 1165 | /* Don't allow setting breakpoints in hypervisor code */ |
| 1166 | if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) |
| 1167 | vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */ |
| 1168 | break; |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 1169 | case KVM_REG_PPC_CSIGR: |
| 1170 | vcpu->arch.csigr = set_reg_val(id, *val); |
| 1171 | break; |
| 1172 | case KVM_REG_PPC_TACR: |
| 1173 | vcpu->arch.tacr = set_reg_val(id, *val); |
| 1174 | break; |
| 1175 | case KVM_REG_PPC_TCSCR: |
| 1176 | vcpu->arch.tcscr = set_reg_val(id, *val); |
| 1177 | break; |
| 1178 | case KVM_REG_PPC_PID: |
| 1179 | vcpu->arch.pid = set_reg_val(id, *val); |
| 1180 | break; |
| 1181 | case KVM_REG_PPC_ACOP: |
| 1182 | vcpu->arch.acop = set_reg_val(id, *val); |
| 1183 | break; |
| 1184 | case KVM_REG_PPC_WORT: |
| 1185 | vcpu->arch.wort = set_reg_val(id, *val); |
| 1186 | break; |
Paul Mackerras | 55b665b | 2012-09-25 20:33:06 +0000 | [diff] [blame] | 1187 | case KVM_REG_PPC_VPA_ADDR: |
| 1188 | addr = set_reg_val(id, *val); |
| 1189 | r = -EINVAL; |
| 1190 | if (!addr && (vcpu->arch.slb_shadow.next_gpa || |
| 1191 | vcpu->arch.dtl.next_gpa)) |
| 1192 | break; |
| 1193 | r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca)); |
| 1194 | break; |
| 1195 | case KVM_REG_PPC_VPA_SLB: |
| 1196 | addr = val->vpaval.addr; |
| 1197 | len = val->vpaval.length; |
| 1198 | r = -EINVAL; |
| 1199 | if (addr && !vcpu->arch.vpa.next_gpa) |
| 1200 | break; |
| 1201 | r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len); |
| 1202 | break; |
| 1203 | case KVM_REG_PPC_VPA_DTL: |
| 1204 | addr = val->vpaval.addr; |
| 1205 | len = val->vpaval.length; |
| 1206 | r = -EINVAL; |
Paul Mackerras | 9f8c8c7 | 2012-10-15 01:18:37 +0000 | [diff] [blame] | 1207 | if (addr && (len < sizeof(struct dtl_entry) || |
| 1208 | !vcpu->arch.vpa.next_gpa)) |
Paul Mackerras | 55b665b | 2012-09-25 20:33:06 +0000 | [diff] [blame] | 1209 | break; |
| 1210 | len -= len % sizeof(struct dtl_entry); |
| 1211 | r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); |
| 1212 | break; |
Paul Mackerras | 93b0f4d | 2013-09-06 13:17:46 +1000 | [diff] [blame] | 1213 | case KVM_REG_PPC_TB_OFFSET: |
| 1214 | /* round up to multiple of 2^24 */ |
| 1215 | vcpu->arch.vcore->tb_offset = |
| 1216 | ALIGN(set_reg_val(id, *val), 1UL << 24); |
| 1217 | break; |
Paul Mackerras | a0144e2 | 2013-09-20 14:52:38 +1000 | [diff] [blame] | 1218 | case KVM_REG_PPC_LPCR: |
| 1219 | kvmppc_set_lpcr(vcpu, set_reg_val(id, *val)); |
| 1220 | break; |
Paul Mackerras | 4b8473c | 2013-09-20 14:52:39 +1000 | [diff] [blame] | 1221 | case KVM_REG_PPC_PPR: |
| 1222 | vcpu->arch.ppr = set_reg_val(id, *val); |
| 1223 | break; |
Michael Neuling | a7d80d0 | 2014-03-25 10:47:03 +1100 | [diff] [blame] | 1224 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
| 1225 | case KVM_REG_PPC_TFHAR: |
| 1226 | vcpu->arch.tfhar = set_reg_val(id, *val); |
| 1227 | break; |
| 1228 | case KVM_REG_PPC_TFIAR: |
| 1229 | vcpu->arch.tfiar = set_reg_val(id, *val); |
| 1230 | break; |
| 1231 | case KVM_REG_PPC_TEXASR: |
| 1232 | vcpu->arch.texasr = set_reg_val(id, *val); |
| 1233 | break; |
| 1234 | case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31: |
| 1235 | i = id - KVM_REG_PPC_TM_GPR0; |
| 1236 | vcpu->arch.gpr_tm[i] = set_reg_val(id, *val); |
| 1237 | break; |
| 1238 | case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63: |
| 1239 | { |
| 1240 | int j; |
| 1241 | i = id - KVM_REG_PPC_TM_VSR0; |
| 1242 | if (i < 32) |
| 1243 | for (j = 0; j < TS_FPRWIDTH; j++) |
| 1244 | vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; |
| 1245 | else |
| 1246 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
| 1247 | vcpu->arch.vr_tm.vr[i-32] = val->vval; |
| 1248 | else |
| 1249 | r = -ENXIO; |
| 1250 | break; |
| 1251 | } |
| 1252 | case KVM_REG_PPC_TM_CR: |
| 1253 | vcpu->arch.cr_tm = set_reg_val(id, *val); |
| 1254 | break; |
| 1255 | case KVM_REG_PPC_TM_LR: |
| 1256 | vcpu->arch.lr_tm = set_reg_val(id, *val); |
| 1257 | break; |
| 1258 | case KVM_REG_PPC_TM_CTR: |
| 1259 | vcpu->arch.ctr_tm = set_reg_val(id, *val); |
| 1260 | break; |
| 1261 | case KVM_REG_PPC_TM_FPSCR: |
| 1262 | vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); |
| 1263 | break; |
| 1264 | case KVM_REG_PPC_TM_AMR: |
| 1265 | vcpu->arch.amr_tm = set_reg_val(id, *val); |
| 1266 | break; |
| 1267 | case KVM_REG_PPC_TM_PPR: |
| 1268 | vcpu->arch.ppr_tm = set_reg_val(id, *val); |
| 1269 | break; |
| 1270 | case KVM_REG_PPC_TM_VRSAVE: |
| 1271 | vcpu->arch.vrsave_tm = set_reg_val(id, *val); |
| 1272 | break; |
| 1273 | case KVM_REG_PPC_TM_VSCR: |
| 1274 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
| 1275 | vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); |
| 1276 | else |
| 1277 | r = - ENXIO; |
| 1278 | break; |
| 1279 | case KVM_REG_PPC_TM_DSCR: |
| 1280 | vcpu->arch.dscr_tm = set_reg_val(id, *val); |
| 1281 | break; |
| 1282 | case KVM_REG_PPC_TM_TAR: |
| 1283 | vcpu->arch.tar_tm = set_reg_val(id, *val); |
| 1284 | break; |
| 1285 | #endif |
Paul Mackerras | 388cc6e | 2013-09-21 14:35:02 +1000 | [diff] [blame] | 1286 | case KVM_REG_PPC_ARCH_COMPAT: |
| 1287 | r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val)); |
| 1288 | break; |
Paul Mackerras | 31f3438 | 2011-12-12 12:26:50 +0000 | [diff] [blame] | 1289 | default: |
Paul Mackerras | a136a8b | 2012-09-25 20:31:56 +0000 | [diff] [blame] | 1290 | r = -EINVAL; |
Paul Mackerras | 31f3438 | 2011-12-12 12:26:50 +0000 | [diff] [blame] | 1291 | break; |
| 1292 | } |
| 1293 | |
| 1294 | return r; |
| 1295 | } |
| 1296 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 1297 | static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, |
| 1298 | unsigned int id) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1299 | { |
| 1300 | struct kvm_vcpu *vcpu; |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1301 | int err = -EINVAL; |
| 1302 | int core; |
| 1303 | struct kvmppc_vcore *vcore; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1304 | |
Michael Ellerman | 3102f78 | 2014-05-23 18:15:29 +1000 | [diff] [blame] | 1305 | core = id / threads_per_subcore; |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1306 | if (core >= KVM_MAX_VCORES) |
| 1307 | goto out; |
| 1308 | |
| 1309 | err = -ENOMEM; |
Sasha Levin | 6b75e6b | 2011-12-07 10:24:56 +0200 | [diff] [blame] | 1310 | vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1311 | if (!vcpu) |
| 1312 | goto out; |
| 1313 | |
| 1314 | err = kvm_vcpu_init(vcpu, kvm, id); |
| 1315 | if (err) |
| 1316 | goto free_vcpu; |
| 1317 | |
| 1318 | vcpu->arch.shared = &vcpu->arch.shregs; |
Alexander Graf | 5deb8e7 | 2014-04-24 13:46:24 +0200 | [diff] [blame] | 1319 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE |
| 1320 | /* |
| 1321 | * The shared struct is never shared on HV, |
| 1322 | * so we can always use host endianness |
| 1323 | */ |
| 1324 | #ifdef __BIG_ENDIAN__ |
| 1325 | vcpu->arch.shared_big_endian = true; |
| 1326 | #else |
| 1327 | vcpu->arch.shared_big_endian = false; |
| 1328 | #endif |
| 1329 | #endif |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1330 | vcpu->arch.mmcr[0] = MMCR0_FC; |
| 1331 | vcpu->arch.ctrl = CTRL_RUNLATCH; |
| 1332 | /* default to host PVR, since we can't spoof it */ |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 1333 | kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR)); |
Paul Mackerras | 2e25aa5 | 2012-02-19 17:46:32 +0000 | [diff] [blame] | 1334 | spin_lock_init(&vcpu->arch.vpa_update_lock); |
Paul Mackerras | c7b6767 | 2012-10-15 01:18:07 +0000 | [diff] [blame] | 1335 | spin_lock_init(&vcpu->arch.tbacct_lock); |
| 1336 | vcpu->arch.busy_preempt = TB_NIL; |
Anton Blanchard | d682916 | 2014-01-08 21:25:30 +1100 | [diff] [blame] | 1337 | vcpu->arch.intr_msr = MSR_SF | MSR_ME; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1338 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1339 | kvmppc_mmu_book3s_hv_init(vcpu); |
| 1340 | |
Paul Mackerras | 8455d79 | 2012-10-15 01:17:42 +0000 | [diff] [blame] | 1341 | vcpu->arch.state = KVMPPC_VCPU_NOTREADY; |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1342 | |
| 1343 | init_waitqueue_head(&vcpu->arch.cpu_run); |
| 1344 | |
| 1345 | mutex_lock(&kvm->lock); |
| 1346 | vcore = kvm->arch.vcores[core]; |
| 1347 | if (!vcore) { |
| 1348 | vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL); |
| 1349 | if (vcore) { |
| 1350 | INIT_LIST_HEAD(&vcore->runnable_threads); |
| 1351 | spin_lock_init(&vcore->lock); |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1352 | init_waitqueue_head(&vcore->wq); |
Paul Mackerras | c7b6767 | 2012-10-15 01:18:07 +0000 | [diff] [blame] | 1353 | vcore->preempt_tb = TB_NIL; |
Paul Mackerras | a0144e2 | 2013-09-20 14:52:38 +1000 | [diff] [blame] | 1354 | vcore->lpcr = kvm->arch.lpcr; |
Michael Ellerman | 3102f78 | 2014-05-23 18:15:29 +1000 | [diff] [blame] | 1355 | vcore->first_vcpuid = core * threads_per_subcore; |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 1356 | vcore->kvm = kvm; |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1357 | } |
| 1358 | kvm->arch.vcores[core] = vcore; |
Paul Mackerras | 1b400ba | 2012-11-21 23:28:08 +0000 | [diff] [blame] | 1359 | kvm->arch.online_vcores++; |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1360 | } |
| 1361 | mutex_unlock(&kvm->lock); |
| 1362 | |
| 1363 | if (!vcore) |
| 1364 | goto free_vcpu; |
| 1365 | |
| 1366 | spin_lock(&vcore->lock); |
| 1367 | ++vcore->num_threads; |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1368 | spin_unlock(&vcore->lock); |
| 1369 | vcpu->arch.vcore = vcore; |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 1370 | vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid; |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1371 | |
Alexander Graf | af8f38b | 2011-08-10 13:57:08 +0200 | [diff] [blame] | 1372 | vcpu->arch.cpu_type = KVM_CPU_3S_64; |
| 1373 | kvmppc_sanity_check(vcpu); |
| 1374 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1375 | return vcpu; |
| 1376 | |
| 1377 | free_vcpu: |
Sasha Levin | 6b75e6b | 2011-12-07 10:24:56 +0200 | [diff] [blame] | 1378 | kmem_cache_free(kvm_vcpu_cache, vcpu); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1379 | out: |
| 1380 | return ERR_PTR(err); |
| 1381 | } |
| 1382 | |
Paul Mackerras | c35635e | 2013-04-18 19:51:04 +0000 | [diff] [blame] | 1383 | static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa) |
| 1384 | { |
| 1385 | if (vpa->pinned_addr) |
| 1386 | kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa, |
| 1387 | vpa->dirty); |
| 1388 | } |
| 1389 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 1390 | static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1391 | { |
Paul Mackerras | 2e25aa5 | 2012-02-19 17:46:32 +0000 | [diff] [blame] | 1392 | spin_lock(&vcpu->arch.vpa_update_lock); |
Paul Mackerras | c35635e | 2013-04-18 19:51:04 +0000 | [diff] [blame] | 1393 | unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); |
| 1394 | unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow); |
| 1395 | unpin_vpa(vcpu->kvm, &vcpu->arch.vpa); |
Paul Mackerras | 2e25aa5 | 2012-02-19 17:46:32 +0000 | [diff] [blame] | 1396 | spin_unlock(&vcpu->arch.vpa_update_lock); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1397 | kvm_vcpu_uninit(vcpu); |
Sasha Levin | 6b75e6b | 2011-12-07 10:24:56 +0200 | [diff] [blame] | 1398 | kmem_cache_free(kvm_vcpu_cache, vcpu); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1399 | } |
| 1400 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 1401 | static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu) |
| 1402 | { |
| 1403 | /* Indicate we want to get back into the guest */ |
| 1404 | return 1; |
| 1405 | } |
| 1406 | |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1407 | static void kvmppc_set_timer(struct kvm_vcpu *vcpu) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1408 | { |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1409 | unsigned long dec_nsec, now; |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1410 | |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1411 | now = get_tb(); |
| 1412 | if (now > vcpu->arch.dec_expires) { |
| 1413 | /* decrementer has already gone negative */ |
| 1414 | kvmppc_core_queue_dec(vcpu); |
Scott Wood | 7e28e60e | 2011-11-08 18:23:20 -0600 | [diff] [blame] | 1415 | kvmppc_core_prepare_to_enter(vcpu); |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1416 | return; |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1417 | } |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1418 | dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC |
| 1419 | / tb_ticks_per_sec; |
| 1420 | hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec), |
| 1421 | HRTIMER_MODE_REL); |
| 1422 | vcpu->arch.timer_running = 1; |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1423 | } |
| 1424 | |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1425 | static void kvmppc_end_cede(struct kvm_vcpu *vcpu) |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1426 | { |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1427 | vcpu->arch.ceded = 0; |
| 1428 | if (vcpu->arch.timer_running) { |
| 1429 | hrtimer_try_to_cancel(&vcpu->arch.dec_timer); |
| 1430 | vcpu->arch.timer_running = 0; |
| 1431 | } |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1432 | } |
| 1433 | |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 1434 | extern void __kvmppc_vcore_entry(void); |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1435 | |
| 1436 | static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, |
| 1437 | struct kvm_vcpu *vcpu) |
| 1438 | { |
Paul Mackerras | c7b6767 | 2012-10-15 01:18:07 +0000 | [diff] [blame] | 1439 | u64 now; |
| 1440 | |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1441 | if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) |
| 1442 | return; |
Paul Mackerras | bf3d32e | 2013-11-16 17:46:04 +1100 | [diff] [blame] | 1443 | spin_lock_irq(&vcpu->arch.tbacct_lock); |
Paul Mackerras | c7b6767 | 2012-10-15 01:18:07 +0000 | [diff] [blame] | 1444 | now = mftb(); |
| 1445 | vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - |
| 1446 | vcpu->arch.stolen_logged; |
| 1447 | vcpu->arch.busy_preempt = now; |
| 1448 | vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; |
Paul Mackerras | bf3d32e | 2013-11-16 17:46:04 +1100 | [diff] [blame] | 1449 | spin_unlock_irq(&vcpu->arch.tbacct_lock); |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1450 | --vc->n_runnable; |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1451 | list_del(&vcpu->arch.run_list); |
| 1452 | } |
| 1453 | |
Paul Mackerras | f0888f7 | 2012-02-03 00:54:17 +0000 | [diff] [blame] | 1454 | static int kvmppc_grab_hwthread(int cpu) |
| 1455 | { |
| 1456 | struct paca_struct *tpaca; |
| 1457 | long timeout = 1000; |
| 1458 | |
| 1459 | tpaca = &paca[cpu]; |
| 1460 | |
| 1461 | /* Ensure the thread won't go into the kernel if it wakes */ |
| 1462 | tpaca->kvm_hstate.hwthread_req = 1; |
Paul Mackerras | 7b444c6 | 2012-10-15 01:16:14 +0000 | [diff] [blame] | 1463 | tpaca->kvm_hstate.kvm_vcpu = NULL; |
Paul Mackerras | f0888f7 | 2012-02-03 00:54:17 +0000 | [diff] [blame] | 1464 | |
| 1465 | /* |
| 1466 | * If the thread is already executing in the kernel (e.g. handling |
| 1467 | * a stray interrupt), wait for it to get back to nap mode. |
| 1468 | * The smp_mb() is to ensure that our setting of hwthread_req |
| 1469 | * is visible before we look at hwthread_state, so if this |
| 1470 | * races with the code at system_reset_pSeries and the thread |
| 1471 | * misses our setting of hwthread_req, we are sure to see its |
| 1472 | * setting of hwthread_state, and vice versa. |
| 1473 | */ |
| 1474 | smp_mb(); |
| 1475 | while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) { |
| 1476 | if (--timeout <= 0) { |
| 1477 | pr_err("KVM: couldn't grab cpu %d\n", cpu); |
| 1478 | return -EBUSY; |
| 1479 | } |
| 1480 | udelay(1); |
| 1481 | } |
| 1482 | return 0; |
| 1483 | } |
| 1484 | |
| 1485 | static void kvmppc_release_hwthread(int cpu) |
| 1486 | { |
| 1487 | struct paca_struct *tpaca; |
| 1488 | |
| 1489 | tpaca = &paca[cpu]; |
| 1490 | tpaca->kvm_hstate.hwthread_req = 0; |
| 1491 | tpaca->kvm_hstate.kvm_vcpu = NULL; |
| 1492 | } |
| 1493 | |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1494 | static void kvmppc_start_thread(struct kvm_vcpu *vcpu) |
| 1495 | { |
| 1496 | int cpu; |
| 1497 | struct paca_struct *tpaca; |
| 1498 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
| 1499 | |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1500 | if (vcpu->arch.timer_running) { |
| 1501 | hrtimer_try_to_cancel(&vcpu->arch.dec_timer); |
| 1502 | vcpu->arch.timer_running = 0; |
| 1503 | } |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1504 | cpu = vc->pcpu + vcpu->arch.ptid; |
| 1505 | tpaca = &paca[cpu]; |
| 1506 | tpaca->kvm_hstate.kvm_vcpu = vcpu; |
| 1507 | tpaca->kvm_hstate.kvm_vcore = vc; |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 1508 | tpaca->kvm_hstate.ptid = vcpu->arch.ptid; |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1509 | vcpu->cpu = vc->pcpu; |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1510 | smp_wmb(); |
Michael Neuling | 251da03 | 2011-11-10 16:03:20 +0000 | [diff] [blame] | 1511 | #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 1512 | if (cpu != smp_processor_id()) { |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1513 | xics_wake_cpu(cpu); |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 1514 | if (vcpu->arch.ptid) |
| 1515 | ++vc->n_woken; |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1516 | } |
| 1517 | #endif |
| 1518 | } |
| 1519 | |
| 1520 | static void kvmppc_wait_for_nap(struct kvmppc_vcore *vc) |
| 1521 | { |
| 1522 | int i; |
| 1523 | |
| 1524 | HMT_low(); |
| 1525 | i = 0; |
| 1526 | while (vc->nap_count < vc->n_woken) { |
| 1527 | if (++i >= 1000000) { |
| 1528 | pr_err("kvmppc_wait_for_nap timeout %d %d\n", |
| 1529 | vc->nap_count, vc->n_woken); |
| 1530 | break; |
| 1531 | } |
| 1532 | cpu_relax(); |
| 1533 | } |
| 1534 | HMT_medium(); |
| 1535 | } |
| 1536 | |
| 1537 | /* |
| 1538 | * Check that we are on thread 0 and that any other threads in |
Paul Mackerras | 7b444c6 | 2012-10-15 01:16:14 +0000 | [diff] [blame] | 1539 | * this core are off-line. Then grab the threads so they can't |
| 1540 | * enter the kernel. |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1541 | */ |
| 1542 | static int on_primary_thread(void) |
| 1543 | { |
| 1544 | int cpu = smp_processor_id(); |
Michael Ellerman | 3102f78 | 2014-05-23 18:15:29 +1000 | [diff] [blame] | 1545 | int thr; |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1546 | |
Michael Ellerman | 3102f78 | 2014-05-23 18:15:29 +1000 | [diff] [blame] | 1547 | /* Are we on a primary subcore? */ |
| 1548 | if (cpu_thread_in_subcore(cpu)) |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1549 | return 0; |
Michael Ellerman | 3102f78 | 2014-05-23 18:15:29 +1000 | [diff] [blame] | 1550 | |
| 1551 | thr = 0; |
| 1552 | while (++thr < threads_per_subcore) |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1553 | if (cpu_online(cpu + thr)) |
| 1554 | return 0; |
Paul Mackerras | 7b444c6 | 2012-10-15 01:16:14 +0000 | [diff] [blame] | 1555 | |
| 1556 | /* Grab all hw threads so they can't go into the kernel */ |
Michael Ellerman | 3102f78 | 2014-05-23 18:15:29 +1000 | [diff] [blame] | 1557 | for (thr = 1; thr < threads_per_subcore; ++thr) { |
Paul Mackerras | 7b444c6 | 2012-10-15 01:16:14 +0000 | [diff] [blame] | 1558 | if (kvmppc_grab_hwthread(cpu + thr)) { |
| 1559 | /* Couldn't grab one; let the others go */ |
| 1560 | do { |
| 1561 | kvmppc_release_hwthread(cpu + thr); |
| 1562 | } while (--thr > 0); |
| 1563 | return 0; |
| 1564 | } |
| 1565 | } |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1566 | return 1; |
| 1567 | } |
| 1568 | |
| 1569 | /* |
| 1570 | * Run a set of guest threads on a physical core. |
| 1571 | * Called with vc->lock held. |
| 1572 | */ |
Paul Mackerras | 913d3ff9a | 2012-10-15 01:16:48 +0000 | [diff] [blame] | 1573 | static void kvmppc_run_core(struct kvmppc_vcore *vc) |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1574 | { |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 1575 | struct kvm_vcpu *vcpu, *vnext; |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1576 | long ret; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1577 | u64 now; |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 1578 | int i, need_vpa_update; |
Paul Mackerras | 2c9097e | 2012-09-11 13:27:01 +0000 | [diff] [blame] | 1579 | int srcu_idx; |
Paul Mackerras | 913d3ff9a | 2012-10-15 01:16:48 +0000 | [diff] [blame] | 1580 | struct kvm_vcpu *vcpus_to_update[threads_per_core]; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1581 | |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1582 | /* don't start if any threads have a signal pending */ |
Paul Mackerras | 081f323 | 2012-06-01 20:20:24 +1000 | [diff] [blame] | 1583 | need_vpa_update = 0; |
| 1584 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1585 | if (signal_pending(vcpu->arch.run_task)) |
Paul Mackerras | 913d3ff9a | 2012-10-15 01:16:48 +0000 | [diff] [blame] | 1586 | return; |
| 1587 | if (vcpu->arch.vpa.update_pending || |
| 1588 | vcpu->arch.slb_shadow.update_pending || |
| 1589 | vcpu->arch.dtl.update_pending) |
| 1590 | vcpus_to_update[need_vpa_update++] = vcpu; |
Paul Mackerras | 081f323 | 2012-06-01 20:20:24 +1000 | [diff] [blame] | 1591 | } |
| 1592 | |
| 1593 | /* |
| 1594 | * Initialize *vc, in particular vc->vcore_state, so we can |
| 1595 | * drop the vcore lock if necessary. |
| 1596 | */ |
| 1597 | vc->n_woken = 0; |
| 1598 | vc->nap_count = 0; |
| 1599 | vc->entry_exit_count = 0; |
Paul Mackerras | 2f12f03 | 2012-10-15 01:17:17 +0000 | [diff] [blame] | 1600 | vc->vcore_state = VCORE_STARTING; |
Paul Mackerras | 081f323 | 2012-06-01 20:20:24 +1000 | [diff] [blame] | 1601 | vc->in_guest = 0; |
| 1602 | vc->napping_threads = 0; |
| 1603 | |
| 1604 | /* |
| 1605 | * Updating any of the vpas requires calling kvmppc_pin_guest_page, |
| 1606 | * which can't be called with any spinlocks held. |
| 1607 | */ |
| 1608 | if (need_vpa_update) { |
| 1609 | spin_unlock(&vc->lock); |
Paul Mackerras | 913d3ff9a | 2012-10-15 01:16:48 +0000 | [diff] [blame] | 1610 | for (i = 0; i < need_vpa_update; ++i) |
| 1611 | kvmppc_update_vpas(vcpus_to_update[i]); |
Paul Mackerras | 081f323 | 2012-06-01 20:20:24 +1000 | [diff] [blame] | 1612 | spin_lock(&vc->lock); |
| 1613 | } |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1614 | |
| 1615 | /* |
Michael Ellerman | 3102f78 | 2014-05-23 18:15:29 +1000 | [diff] [blame] | 1616 | * Make sure we are running on primary threads, and that secondary |
| 1617 | * threads are offline. Also check if the number of threads in this |
| 1618 | * guest are greater than the current system threads per guest. |
Paul Mackerras | 7b444c6 | 2012-10-15 01:16:14 +0000 | [diff] [blame] | 1619 | */ |
Michael Ellerman | 3102f78 | 2014-05-23 18:15:29 +1000 | [diff] [blame] | 1620 | if ((threads_per_core > 1) && |
| 1621 | ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) { |
Paul Mackerras | 7b444c6 | 2012-10-15 01:16:14 +0000 | [diff] [blame] | 1622 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) |
| 1623 | vcpu->arch.ret = -EBUSY; |
| 1624 | goto out; |
| 1625 | } |
| 1626 | |
Michael Ellerman | 3102f78 | 2014-05-23 18:15:29 +1000 | [diff] [blame] | 1627 | |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1628 | vc->pcpu = smp_processor_id(); |
Paul Mackerras | 2e25aa5 | 2012-02-19 17:46:32 +0000 | [diff] [blame] | 1629 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1630 | kvmppc_start_thread(vcpu); |
Paul Mackerras | 0456ec4 | 2012-02-03 00:56:21 +0000 | [diff] [blame] | 1631 | kvmppc_create_dtl_entry(vcpu, vc); |
Paul Mackerras | 2e25aa5 | 2012-02-19 17:46:32 +0000 | [diff] [blame] | 1632 | } |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1633 | |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 1634 | /* Set this explicitly in case thread 0 doesn't have a vcpu */ |
| 1635 | get_paca()->kvm_hstate.kvm_vcore = vc; |
| 1636 | get_paca()->kvm_hstate.ptid = 0; |
| 1637 | |
Paul Mackerras | 2f12f03 | 2012-10-15 01:17:17 +0000 | [diff] [blame] | 1638 | vc->vcore_state = VCORE_RUNNING; |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1639 | preempt_disable(); |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1640 | spin_unlock(&vc->lock); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1641 | |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1642 | kvm_guest_enter(); |
Paul Mackerras | 2c9097e | 2012-09-11 13:27:01 +0000 | [diff] [blame] | 1643 | |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 1644 | srcu_idx = srcu_read_lock(&vc->kvm->srcu); |
Paul Mackerras | 2c9097e | 2012-09-11 13:27:01 +0000 | [diff] [blame] | 1645 | |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 1646 | __kvmppc_vcore_entry(); |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1647 | |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1648 | spin_lock(&vc->lock); |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1649 | /* disable sending of IPIs on virtual external irqs */ |
| 1650 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) |
| 1651 | vcpu->cpu = -1; |
| 1652 | /* wait for secondary threads to finish writing their state to memory */ |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1653 | if (vc->nap_count < vc->n_woken) |
| 1654 | kvmppc_wait_for_nap(vc); |
Michael Ellerman | 3102f78 | 2014-05-23 18:15:29 +1000 | [diff] [blame] | 1655 | for (i = 0; i < threads_per_subcore; ++i) |
Paul Mackerras | 2f12f03 | 2012-10-15 01:17:17 +0000 | [diff] [blame] | 1656 | kvmppc_release_hwthread(vc->pcpu + i); |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1657 | /* prevent other vcpu threads from doing kvmppc_start_thread() now */ |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1658 | vc->vcore_state = VCORE_EXITING; |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1659 | spin_unlock(&vc->lock); |
| 1660 | |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 1661 | srcu_read_unlock(&vc->kvm->srcu, srcu_idx); |
Paul Mackerras | 2c9097e | 2012-09-11 13:27:01 +0000 | [diff] [blame] | 1662 | |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1663 | /* make sure updates to secondary vcpu structs are visible now */ |
| 1664 | smp_mb(); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1665 | kvm_guest_exit(); |
| 1666 | |
| 1667 | preempt_enable(); |
Takuya Yoshikawa | c08ac06 | 2013-12-13 15:07:21 +0900 | [diff] [blame] | 1668 | cond_resched(); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1669 | |
Paul Mackerras | 913d3ff9a | 2012-10-15 01:16:48 +0000 | [diff] [blame] | 1670 | spin_lock(&vc->lock); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1671 | now = get_tb(); |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1672 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { |
| 1673 | /* cancel pending dec exception if dec is positive */ |
| 1674 | if (now < vcpu->arch.dec_expires && |
| 1675 | kvmppc_core_pending_dec(vcpu)) |
| 1676 | kvmppc_core_dequeue_dec(vcpu); |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1677 | |
| 1678 | ret = RESUME_GUEST; |
| 1679 | if (vcpu->arch.trap) |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 1680 | ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu, |
| 1681 | vcpu->arch.run_task); |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1682 | |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1683 | vcpu->arch.ret = ret; |
| 1684 | vcpu->arch.trap = 0; |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1685 | |
| 1686 | if (vcpu->arch.ceded) { |
Greg Kurz | e59d24e | 2014-02-06 17:36:56 +0100 | [diff] [blame] | 1687 | if (!is_kvmppc_resume_guest(ret)) |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1688 | kvmppc_end_cede(vcpu); |
| 1689 | else |
| 1690 | kvmppc_set_timer(vcpu); |
| 1691 | } |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1692 | } |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1693 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1694 | out: |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1695 | vc->vcore_state = VCORE_INACTIVE; |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1696 | list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, |
| 1697 | arch.run_list) { |
Greg Kurz | e59d24e | 2014-02-06 17:36:56 +0100 | [diff] [blame] | 1698 | if (!is_kvmppc_resume_guest(vcpu->arch.ret)) { |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1699 | kvmppc_remove_runnable(vc, vcpu); |
| 1700 | wake_up(&vcpu->arch.cpu_run); |
| 1701 | } |
| 1702 | } |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1703 | } |
| 1704 | |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1705 | /* |
| 1706 | * Wait for some other vcpu thread to execute us, and |
| 1707 | * wake us up when we need to handle something in the host. |
| 1708 | */ |
| 1709 | static void kvmppc_wait_for_exec(struct kvm_vcpu *vcpu, int wait_state) |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1710 | { |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1711 | DEFINE_WAIT(wait); |
| 1712 | |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1713 | prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state); |
| 1714 | if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) |
| 1715 | schedule(); |
| 1716 | finish_wait(&vcpu->arch.cpu_run, &wait); |
| 1717 | } |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1718 | |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1719 | /* |
| 1720 | * All the vcpus in this vcore are idle, so wait for a decrementer |
| 1721 | * or external interrupt to one of the vcpus. vc->lock is held. |
| 1722 | */ |
| 1723 | static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) |
| 1724 | { |
| 1725 | DEFINE_WAIT(wait); |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1726 | |
| 1727 | prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE); |
| 1728 | vc->vcore_state = VCORE_SLEEPING; |
| 1729 | spin_unlock(&vc->lock); |
Paul Mackerras | 913d3ff9a | 2012-10-15 01:16:48 +0000 | [diff] [blame] | 1730 | schedule(); |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1731 | finish_wait(&vc->wq, &wait); |
| 1732 | spin_lock(&vc->lock); |
| 1733 | vc->vcore_state = VCORE_INACTIVE; |
| 1734 | } |
| 1735 | |
| 1736 | static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
| 1737 | { |
| 1738 | int n_ceded; |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1739 | struct kvmppc_vcore *vc; |
| 1740 | struct kvm_vcpu *v, *vn; |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 1741 | |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1742 | kvm_run->exit_reason = 0; |
| 1743 | vcpu->arch.ret = RESUME_GUEST; |
| 1744 | vcpu->arch.trap = 0; |
Paul Mackerras | 2f12f03 | 2012-10-15 01:17:17 +0000 | [diff] [blame] | 1745 | kvmppc_update_vpas(vcpu); |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1746 | |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1747 | /* |
| 1748 | * Synchronize with other threads in this virtual core |
| 1749 | */ |
| 1750 | vc = vcpu->arch.vcore; |
| 1751 | spin_lock(&vc->lock); |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1752 | vcpu->arch.ceded = 0; |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1753 | vcpu->arch.run_task = current; |
| 1754 | vcpu->arch.kvm_run = kvm_run; |
Paul Mackerras | c7b6767 | 2012-10-15 01:18:07 +0000 | [diff] [blame] | 1755 | vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1756 | vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; |
Paul Mackerras | c7b6767 | 2012-10-15 01:18:07 +0000 | [diff] [blame] | 1757 | vcpu->arch.busy_preempt = TB_NIL; |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1758 | list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads); |
| 1759 | ++vc->n_runnable; |
| 1760 | |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1761 | /* |
| 1762 | * This happens the first time this is called for a vcpu. |
| 1763 | * If the vcore is already running, we may be able to start |
| 1764 | * this thread straight away and have it join in. |
| 1765 | */ |
Paul Mackerras | 8455d79 | 2012-10-15 01:17:42 +0000 | [diff] [blame] | 1766 | if (!signal_pending(current)) { |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1767 | if (vc->vcore_state == VCORE_RUNNING && |
| 1768 | VCORE_EXIT_COUNT(vc) == 0) { |
Paul Mackerras | 2f12f03 | 2012-10-15 01:17:17 +0000 | [diff] [blame] | 1769 | kvmppc_create_dtl_entry(vcpu, vc); |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1770 | kvmppc_start_thread(vcpu); |
Paul Mackerras | 8455d79 | 2012-10-15 01:17:42 +0000 | [diff] [blame] | 1771 | } else if (vc->vcore_state == VCORE_SLEEPING) { |
| 1772 | wake_up(&vc->wq); |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1773 | } |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1774 | |
Paul Mackerras | 8455d79 | 2012-10-15 01:17:42 +0000 | [diff] [blame] | 1775 | } |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1776 | |
| 1777 | while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && |
| 1778 | !signal_pending(current)) { |
Paul Mackerras | 8455d79 | 2012-10-15 01:17:42 +0000 | [diff] [blame] | 1779 | if (vc->vcore_state != VCORE_INACTIVE) { |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1780 | spin_unlock(&vc->lock); |
| 1781 | kvmppc_wait_for_exec(vcpu, TASK_INTERRUPTIBLE); |
| 1782 | spin_lock(&vc->lock); |
| 1783 | continue; |
| 1784 | } |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1785 | list_for_each_entry_safe(v, vn, &vc->runnable_threads, |
| 1786 | arch.run_list) { |
Scott Wood | 7e28e60e | 2011-11-08 18:23:20 -0600 | [diff] [blame] | 1787 | kvmppc_core_prepare_to_enter(v); |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1788 | if (signal_pending(v->arch.run_task)) { |
| 1789 | kvmppc_remove_runnable(vc, v); |
| 1790 | v->stat.signal_exits++; |
| 1791 | v->arch.kvm_run->exit_reason = KVM_EXIT_INTR; |
| 1792 | v->arch.ret = -EINTR; |
| 1793 | wake_up(&v->arch.cpu_run); |
| 1794 | } |
| 1795 | } |
Paul Mackerras | 8455d79 | 2012-10-15 01:17:42 +0000 | [diff] [blame] | 1796 | if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) |
| 1797 | break; |
| 1798 | vc->runner = vcpu; |
| 1799 | n_ceded = 0; |
Paul Mackerras | 4619ac8 | 2013-04-17 20:31:41 +0000 | [diff] [blame] | 1800 | list_for_each_entry(v, &vc->runnable_threads, arch.run_list) { |
Paul Mackerras | 8455d79 | 2012-10-15 01:17:42 +0000 | [diff] [blame] | 1801 | if (!v->arch.pending_exceptions) |
| 1802 | n_ceded += v->arch.ceded; |
Paul Mackerras | 4619ac8 | 2013-04-17 20:31:41 +0000 | [diff] [blame] | 1803 | else |
| 1804 | v->arch.ceded = 0; |
| 1805 | } |
Paul Mackerras | 8455d79 | 2012-10-15 01:17:42 +0000 | [diff] [blame] | 1806 | if (n_ceded == vc->n_runnable) |
| 1807 | kvmppc_vcore_blocked(vc); |
| 1808 | else |
| 1809 | kvmppc_run_core(vc); |
Paul Mackerras | 0456ec4 | 2012-02-03 00:56:21 +0000 | [diff] [blame] | 1810 | vc->runner = NULL; |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1811 | } |
| 1812 | |
Paul Mackerras | 8455d79 | 2012-10-15 01:17:42 +0000 | [diff] [blame] | 1813 | while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && |
| 1814 | (vc->vcore_state == VCORE_RUNNING || |
| 1815 | vc->vcore_state == VCORE_EXITING)) { |
| 1816 | spin_unlock(&vc->lock); |
| 1817 | kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE); |
| 1818 | spin_lock(&vc->lock); |
| 1819 | } |
| 1820 | |
| 1821 | if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { |
| 1822 | kvmppc_remove_runnable(vc, vcpu); |
| 1823 | vcpu->stat.signal_exits++; |
| 1824 | kvm_run->exit_reason = KVM_EXIT_INTR; |
| 1825 | vcpu->arch.ret = -EINTR; |
| 1826 | } |
| 1827 | |
| 1828 | if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) { |
| 1829 | /* Wake up some vcpu to run the core */ |
| 1830 | v = list_first_entry(&vc->runnable_threads, |
| 1831 | struct kvm_vcpu, arch.run_list); |
| 1832 | wake_up(&v->arch.cpu_run); |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1833 | } |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1834 | |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1835 | spin_unlock(&vc->lock); |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 1836 | return vcpu->arch.ret; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1837 | } |
| 1838 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 1839 | static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1840 | { |
| 1841 | int r; |
Paul Mackerras | 913d3ff9a | 2012-10-15 01:16:48 +0000 | [diff] [blame] | 1842 | int srcu_idx; |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1843 | |
Alexander Graf | af8f38b | 2011-08-10 13:57:08 +0200 | [diff] [blame] | 1844 | if (!vcpu->arch.sane) { |
| 1845 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 1846 | return -EINVAL; |
| 1847 | } |
| 1848 | |
Scott Wood | 25051b5 | 2011-11-08 18:23:23 -0600 | [diff] [blame] | 1849 | kvmppc_core_prepare_to_enter(vcpu); |
| 1850 | |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1851 | /* No need to go into the guest when all we'll do is come back out */ |
| 1852 | if (signal_pending(current)) { |
| 1853 | run->exit_reason = KVM_EXIT_INTR; |
| 1854 | return -EINTR; |
| 1855 | } |
| 1856 | |
Paul Mackerras | 32fad28 | 2012-05-04 02:32:53 +0000 | [diff] [blame] | 1857 | atomic_inc(&vcpu->kvm->arch.vcpus_running); |
| 1858 | /* Order vcpus_running vs. rma_setup_done, see kvmppc_alloc_reset_hpt */ |
| 1859 | smp_mb(); |
| 1860 | |
| 1861 | /* On the first time here, set up HTAB and VRMA or RMA */ |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 1862 | if (!vcpu->kvm->arch.rma_setup_done) { |
Paul Mackerras | 32fad28 | 2012-05-04 02:32:53 +0000 | [diff] [blame] | 1863 | r = kvmppc_hv_setup_htab_rma(vcpu); |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 1864 | if (r) |
Paul Mackerras | 32fad28 | 2012-05-04 02:32:53 +0000 | [diff] [blame] | 1865 | goto out; |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 1866 | } |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1867 | |
| 1868 | flush_fp_to_thread(current); |
| 1869 | flush_altivec_to_thread(current); |
| 1870 | flush_vsx_to_thread(current); |
| 1871 | vcpu->arch.wqp = &vcpu->arch.vcore->wq; |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 1872 | vcpu->arch.pgdir = current->mm->pgd; |
Paul Mackerras | c7b6767 | 2012-10-15 01:18:07 +0000 | [diff] [blame] | 1873 | vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 1874 | |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1875 | do { |
| 1876 | r = kvmppc_run_vcpu(run, vcpu); |
| 1877 | |
| 1878 | if (run->exit_reason == KVM_EXIT_PAPR_HCALL && |
| 1879 | !(vcpu->arch.shregs.msr & MSR_PR)) { |
| 1880 | r = kvmppc_pseries_do_hcall(vcpu); |
Scott Wood | 7e28e60e | 2011-11-08 18:23:20 -0600 | [diff] [blame] | 1881 | kvmppc_core_prepare_to_enter(vcpu); |
Paul Mackerras | 913d3ff9a | 2012-10-15 01:16:48 +0000 | [diff] [blame] | 1882 | } else if (r == RESUME_PAGE_FAULT) { |
| 1883 | srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); |
| 1884 | r = kvmppc_book3s_hv_page_fault(run, vcpu, |
| 1885 | vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); |
| 1886 | srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1887 | } |
Greg Kurz | e59d24e | 2014-02-06 17:36:56 +0100 | [diff] [blame] | 1888 | } while (is_kvmppc_resume_guest(r)); |
Paul Mackerras | 32fad28 | 2012-05-04 02:32:53 +0000 | [diff] [blame] | 1889 | |
| 1890 | out: |
Paul Mackerras | c7b6767 | 2012-10-15 01:18:07 +0000 | [diff] [blame] | 1891 | vcpu->arch.state = KVMPPC_VCPU_NOTREADY; |
Paul Mackerras | 32fad28 | 2012-05-04 02:32:53 +0000 | [diff] [blame] | 1892 | atomic_dec(&vcpu->kvm->arch.vcpus_running); |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 1893 | return r; |
| 1894 | } |
| 1895 | |
David Gibson | 54738c0 | 2011-06-29 00:22:41 +0000 | [diff] [blame] | 1896 | |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 1897 | /* Work out RMLS (real mode limit selector) field value for a given RMA size. |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 1898 | Assumes POWER7 or PPC970. */ |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 1899 | static inline int lpcr_rmls(unsigned long rma_size) |
| 1900 | { |
| 1901 | switch (rma_size) { |
| 1902 | case 32ul << 20: /* 32 MB */ |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 1903 | if (cpu_has_feature(CPU_FTR_ARCH_206)) |
| 1904 | return 8; /* only supported on POWER7 */ |
| 1905 | return -1; |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 1906 | case 64ul << 20: /* 64 MB */ |
| 1907 | return 3; |
| 1908 | case 128ul << 20: /* 128 MB */ |
| 1909 | return 7; |
| 1910 | case 256ul << 20: /* 256 MB */ |
| 1911 | return 4; |
| 1912 | case 1ul << 30: /* 1 GB */ |
| 1913 | return 2; |
| 1914 | case 16ul << 30: /* 16 GB */ |
| 1915 | return 1; |
| 1916 | case 256ul << 30: /* 256 GB */ |
| 1917 | return 0; |
| 1918 | default: |
| 1919 | return -1; |
| 1920 | } |
| 1921 | } |
| 1922 | |
| 1923 | static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
| 1924 | { |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 1925 | struct page *page; |
Aneesh Kumar K.V | 6c45b81 | 2013-07-02 11:15:17 +0530 | [diff] [blame] | 1926 | struct kvm_rma_info *ri = vma->vm_file->private_data; |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 1927 | |
Aneesh Kumar K.V | 6c45b81 | 2013-07-02 11:15:17 +0530 | [diff] [blame] | 1928 | if (vmf->pgoff >= kvm_rma_pages) |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 1929 | return VM_FAULT_SIGBUS; |
| 1930 | |
| 1931 | page = pfn_to_page(ri->base_pfn + vmf->pgoff); |
| 1932 | get_page(page); |
| 1933 | vmf->page = page; |
| 1934 | return 0; |
| 1935 | } |
| 1936 | |
| 1937 | static const struct vm_operations_struct kvm_rma_vm_ops = { |
| 1938 | .fault = kvm_rma_fault, |
| 1939 | }; |
| 1940 | |
| 1941 | static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma) |
| 1942 | { |
Konstantin Khlebnikov | 314e51b | 2012-10-08 16:29:02 -0700 | [diff] [blame] | 1943 | vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 1944 | vma->vm_ops = &kvm_rma_vm_ops; |
| 1945 | return 0; |
| 1946 | } |
| 1947 | |
| 1948 | static int kvm_rma_release(struct inode *inode, struct file *filp) |
| 1949 | { |
Aneesh Kumar K.V | 6c45b81 | 2013-07-02 11:15:17 +0530 | [diff] [blame] | 1950 | struct kvm_rma_info *ri = filp->private_data; |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 1951 | |
| 1952 | kvm_release_rma(ri); |
| 1953 | return 0; |
| 1954 | } |
| 1955 | |
Al Viro | 75ef9de | 2013-04-04 19:09:41 -0400 | [diff] [blame] | 1956 | static const struct file_operations kvm_rma_fops = { |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 1957 | .mmap = kvm_rma_mmap, |
| 1958 | .release = kvm_rma_release, |
| 1959 | }; |
| 1960 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 1961 | static long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, |
| 1962 | struct kvm_allocate_rma *ret) |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 1963 | { |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 1964 | long fd; |
Aneesh Kumar K.V | 6c45b81 | 2013-07-02 11:15:17 +0530 | [diff] [blame] | 1965 | struct kvm_rma_info *ri; |
| 1966 | /* |
| 1967 | * Only do this on PPC970 in HV mode |
| 1968 | */ |
| 1969 | if (!cpu_has_feature(CPU_FTR_HVMODE) || |
| 1970 | !cpu_has_feature(CPU_FTR_ARCH_201)) |
| 1971 | return -EINVAL; |
| 1972 | |
| 1973 | if (!kvm_rma_pages) |
| 1974 | return -EINVAL; |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 1975 | |
| 1976 | ri = kvm_alloc_rma(); |
| 1977 | if (!ri) |
| 1978 | return -ENOMEM; |
| 1979 | |
Yann Droneaud | 2f84d5e | 2013-08-24 22:14:08 +0200 | [diff] [blame] | 1980 | fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR | O_CLOEXEC); |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 1981 | if (fd < 0) |
| 1982 | kvm_release_rma(ri); |
| 1983 | |
Aneesh Kumar K.V | 6c45b81 | 2013-07-02 11:15:17 +0530 | [diff] [blame] | 1984 | ret->rma_size = kvm_rma_pages << PAGE_SHIFT; |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 1985 | return fd; |
| 1986 | } |
| 1987 | |
Benjamin Herrenschmidt | 5b74716 | 2012-04-26 19:43:42 +0000 | [diff] [blame] | 1988 | static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps, |
| 1989 | int linux_psize) |
| 1990 | { |
| 1991 | struct mmu_psize_def *def = &mmu_psize_defs[linux_psize]; |
| 1992 | |
| 1993 | if (!def->shift) |
| 1994 | return; |
| 1995 | (*sps)->page_shift = def->shift; |
| 1996 | (*sps)->slb_enc = def->sllp; |
| 1997 | (*sps)->enc[0].page_shift = def->shift; |
Aneesh Kumar K.V | b1022fb | 2013-04-28 09:37:35 +0000 | [diff] [blame] | 1998 | /* |
| 1999 | * Only return base page encoding. We don't want to return |
| 2000 | * all the supporting pte_enc, because our H_ENTER doesn't |
| 2001 | * support MPSS yet. Once they do, we can start passing all |
| 2002 | * support pte_enc here |
| 2003 | */ |
| 2004 | (*sps)->enc[0].pte_enc = def->penc[linux_psize]; |
Aneesh Kumar K.V | 1f365bb | 2014-05-06 23:31:36 +0530 | [diff] [blame] | 2005 | /* |
| 2006 | * Add 16MB MPSS support if host supports it |
| 2007 | */ |
| 2008 | if (linux_psize != MMU_PAGE_16M && def->penc[MMU_PAGE_16M] != -1) { |
| 2009 | (*sps)->enc[1].page_shift = 24; |
| 2010 | (*sps)->enc[1].pte_enc = def->penc[MMU_PAGE_16M]; |
| 2011 | } |
Benjamin Herrenschmidt | 5b74716 | 2012-04-26 19:43:42 +0000 | [diff] [blame] | 2012 | (*sps)++; |
| 2013 | } |
| 2014 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 2015 | static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm, |
| 2016 | struct kvm_ppc_smmu_info *info) |
Benjamin Herrenschmidt | 5b74716 | 2012-04-26 19:43:42 +0000 | [diff] [blame] | 2017 | { |
| 2018 | struct kvm_ppc_one_seg_page_size *sps; |
| 2019 | |
| 2020 | info->flags = KVM_PPC_PAGE_SIZES_REAL; |
| 2021 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) |
| 2022 | info->flags |= KVM_PPC_1T_SEGMENTS; |
| 2023 | info->slb_size = mmu_slb_size; |
| 2024 | |
| 2025 | /* We only support these sizes for now, and no muti-size segments */ |
| 2026 | sps = &info->sps[0]; |
| 2027 | kvmppc_add_seg_page_size(&sps, MMU_PAGE_4K); |
| 2028 | kvmppc_add_seg_page_size(&sps, MMU_PAGE_64K); |
| 2029 | kvmppc_add_seg_page_size(&sps, MMU_PAGE_16M); |
| 2030 | |
| 2031 | return 0; |
| 2032 | } |
| 2033 | |
Paul Mackerras | 82ed361 | 2011-12-15 02:03:22 +0000 | [diff] [blame] | 2034 | /* |
| 2035 | * Get (and clear) the dirty memory log for a memory slot. |
| 2036 | */ |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 2037 | static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm, |
| 2038 | struct kvm_dirty_log *log) |
Paul Mackerras | 82ed361 | 2011-12-15 02:03:22 +0000 | [diff] [blame] | 2039 | { |
| 2040 | struct kvm_memory_slot *memslot; |
| 2041 | int r; |
| 2042 | unsigned long n; |
| 2043 | |
| 2044 | mutex_lock(&kvm->slots_lock); |
| 2045 | |
| 2046 | r = -EINVAL; |
Alex Williamson | bbacc0c | 2012-12-10 10:33:09 -0700 | [diff] [blame] | 2047 | if (log->slot >= KVM_USER_MEM_SLOTS) |
Paul Mackerras | 82ed361 | 2011-12-15 02:03:22 +0000 | [diff] [blame] | 2048 | goto out; |
| 2049 | |
| 2050 | memslot = id_to_memslot(kvm->memslots, log->slot); |
| 2051 | r = -ENOENT; |
| 2052 | if (!memslot->dirty_bitmap) |
| 2053 | goto out; |
| 2054 | |
| 2055 | n = kvm_dirty_bitmap_bytes(memslot); |
| 2056 | memset(memslot->dirty_bitmap, 0, n); |
| 2057 | |
Paul Mackerras | dfe49db | 2012-09-11 13:28:18 +0000 | [diff] [blame] | 2058 | r = kvmppc_hv_get_dirty_log(kvm, memslot, memslot->dirty_bitmap); |
Paul Mackerras | 82ed361 | 2011-12-15 02:03:22 +0000 | [diff] [blame] | 2059 | if (r) |
| 2060 | goto out; |
| 2061 | |
| 2062 | r = -EFAULT; |
| 2063 | if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) |
| 2064 | goto out; |
| 2065 | |
| 2066 | r = 0; |
| 2067 | out: |
| 2068 | mutex_unlock(&kvm->slots_lock); |
| 2069 | return r; |
| 2070 | } |
| 2071 | |
Paul Mackerras | a66b48c | 2012-09-11 13:27:46 +0000 | [diff] [blame] | 2072 | static void unpin_slot(struct kvm_memory_slot *memslot) |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 2073 | { |
| 2074 | unsigned long *physp; |
| 2075 | unsigned long j, npages, pfn; |
| 2076 | struct page *page; |
| 2077 | |
Paul Mackerras | a66b48c | 2012-09-11 13:27:46 +0000 | [diff] [blame] | 2078 | physp = memslot->arch.slot_phys; |
| 2079 | npages = memslot->npages; |
| 2080 | if (!physp) |
| 2081 | return; |
| 2082 | for (j = 0; j < npages; j++) { |
| 2083 | if (!(physp[j] & KVMPPC_GOT_PAGE)) |
| 2084 | continue; |
| 2085 | pfn = physp[j] >> PAGE_SHIFT; |
| 2086 | page = pfn_to_page(pfn); |
| 2087 | SetPageDirty(page); |
| 2088 | put_page(page); |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 2089 | } |
| 2090 | } |
| 2091 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 2092 | static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free, |
| 2093 | struct kvm_memory_slot *dont) |
Paul Mackerras | a66b48c | 2012-09-11 13:27:46 +0000 | [diff] [blame] | 2094 | { |
| 2095 | if (!dont || free->arch.rmap != dont->arch.rmap) { |
| 2096 | vfree(free->arch.rmap); |
| 2097 | free->arch.rmap = NULL; |
| 2098 | } |
| 2099 | if (!dont || free->arch.slot_phys != dont->arch.slot_phys) { |
| 2100 | unpin_slot(free); |
| 2101 | vfree(free->arch.slot_phys); |
| 2102 | free->arch.slot_phys = NULL; |
| 2103 | } |
| 2104 | } |
| 2105 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 2106 | static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot, |
| 2107 | unsigned long npages) |
Paul Mackerras | a66b48c | 2012-09-11 13:27:46 +0000 | [diff] [blame] | 2108 | { |
| 2109 | slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap)); |
| 2110 | if (!slot->arch.rmap) |
| 2111 | return -ENOMEM; |
| 2112 | slot->arch.slot_phys = NULL; |
| 2113 | |
| 2114 | return 0; |
| 2115 | } |
| 2116 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 2117 | static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm, |
| 2118 | struct kvm_memory_slot *memslot, |
| 2119 | struct kvm_userspace_memory_region *mem) |
Paul Mackerras | a66b48c | 2012-09-11 13:27:46 +0000 | [diff] [blame] | 2120 | { |
| 2121 | unsigned long *phys; |
| 2122 | |
| 2123 | /* Allocate a slot_phys array if needed */ |
| 2124 | phys = memslot->arch.slot_phys; |
| 2125 | if (!kvm->arch.using_mmu_notifiers && !phys && memslot->npages) { |
| 2126 | phys = vzalloc(memslot->npages * sizeof(unsigned long)); |
| 2127 | if (!phys) |
| 2128 | return -ENOMEM; |
| 2129 | memslot->arch.slot_phys = phys; |
| 2130 | } |
| 2131 | |
| 2132 | return 0; |
| 2133 | } |
| 2134 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 2135 | static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm, |
| 2136 | struct kvm_userspace_memory_region *mem, |
| 2137 | const struct kvm_memory_slot *old) |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 2138 | { |
Paul Mackerras | dfe49db | 2012-09-11 13:28:18 +0000 | [diff] [blame] | 2139 | unsigned long npages = mem->memory_size >> PAGE_SHIFT; |
| 2140 | struct kvm_memory_slot *memslot; |
| 2141 | |
Takuya Yoshikawa | 8482644 | 2013-02-27 19:45:25 +0900 | [diff] [blame] | 2142 | if (npages && old->npages) { |
Paul Mackerras | dfe49db | 2012-09-11 13:28:18 +0000 | [diff] [blame] | 2143 | /* |
| 2144 | * If modifying a memslot, reset all the rmap dirty bits. |
| 2145 | * If this is a new memslot, we don't need to do anything |
| 2146 | * since the rmap array starts out as all zeroes, |
| 2147 | * i.e. no pages are dirty. |
| 2148 | */ |
| 2149 | memslot = id_to_memslot(kvm->memslots, mem->slot); |
| 2150 | kvmppc_hv_get_dirty_log(kvm, memslot, NULL); |
| 2151 | } |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 2152 | } |
| 2153 | |
Paul Mackerras | a0144e2 | 2013-09-20 14:52:38 +1000 | [diff] [blame] | 2154 | /* |
| 2155 | * Update LPCR values in kvm->arch and in vcores. |
| 2156 | * Caller must hold kvm->lock. |
| 2157 | */ |
| 2158 | void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask) |
| 2159 | { |
| 2160 | long int i; |
| 2161 | u32 cores_done = 0; |
| 2162 | |
| 2163 | if ((kvm->arch.lpcr & mask) == lpcr) |
| 2164 | return; |
| 2165 | |
| 2166 | kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr; |
| 2167 | |
| 2168 | for (i = 0; i < KVM_MAX_VCORES; ++i) { |
| 2169 | struct kvmppc_vcore *vc = kvm->arch.vcores[i]; |
| 2170 | if (!vc) |
| 2171 | continue; |
| 2172 | spin_lock(&vc->lock); |
| 2173 | vc->lpcr = (vc->lpcr & ~mask) | lpcr; |
| 2174 | spin_unlock(&vc->lock); |
| 2175 | if (++cores_done >= kvm->arch.online_vcores) |
| 2176 | break; |
| 2177 | } |
| 2178 | } |
| 2179 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 2180 | static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu) |
| 2181 | { |
| 2182 | return; |
| 2183 | } |
| 2184 | |
Paul Mackerras | 32fad28 | 2012-05-04 02:32:53 +0000 | [diff] [blame] | 2185 | static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 2186 | { |
| 2187 | int err = 0; |
| 2188 | struct kvm *kvm = vcpu->kvm; |
Aneesh Kumar K.V | 6c45b81 | 2013-07-02 11:15:17 +0530 | [diff] [blame] | 2189 | struct kvm_rma_info *ri = NULL; |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 2190 | unsigned long hva; |
| 2191 | struct kvm_memory_slot *memslot; |
| 2192 | struct vm_area_struct *vma; |
Paul Mackerras | a0144e2 | 2013-09-20 14:52:38 +1000 | [diff] [blame] | 2193 | unsigned long lpcr = 0, senc; |
| 2194 | unsigned long lpcr_mask = 0; |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 2195 | unsigned long psize, porder; |
| 2196 | unsigned long rma_size; |
| 2197 | unsigned long rmls; |
| 2198 | unsigned long *physp; |
Paul Mackerras | da9d1d7 | 2011-12-12 12:31:41 +0000 | [diff] [blame] | 2199 | unsigned long i, npages; |
Paul Mackerras | 2c9097e | 2012-09-11 13:27:01 +0000 | [diff] [blame] | 2200 | int srcu_idx; |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 2201 | |
| 2202 | mutex_lock(&kvm->lock); |
| 2203 | if (kvm->arch.rma_setup_done) |
| 2204 | goto out; /* another vcpu beat us to it */ |
| 2205 | |
Paul Mackerras | 32fad28 | 2012-05-04 02:32:53 +0000 | [diff] [blame] | 2206 | /* Allocate hashed page table (if not done already) and reset it */ |
| 2207 | if (!kvm->arch.hpt_virt) { |
| 2208 | err = kvmppc_alloc_hpt(kvm, NULL); |
| 2209 | if (err) { |
| 2210 | pr_err("KVM: Couldn't alloc HPT\n"); |
| 2211 | goto out; |
| 2212 | } |
| 2213 | } |
| 2214 | |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 2215 | /* Look up the memslot for guest physical address 0 */ |
Paul Mackerras | 2c9097e | 2012-09-11 13:27:01 +0000 | [diff] [blame] | 2216 | srcu_idx = srcu_read_lock(&kvm->srcu); |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 2217 | memslot = gfn_to_memslot(kvm, 0); |
| 2218 | |
| 2219 | /* We must have some memory at 0 by now */ |
| 2220 | err = -EINVAL; |
| 2221 | if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) |
Paul Mackerras | 2c9097e | 2012-09-11 13:27:01 +0000 | [diff] [blame] | 2222 | goto out_srcu; |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 2223 | |
| 2224 | /* Look up the VMA for the start of this memory slot */ |
| 2225 | hva = memslot->userspace_addr; |
| 2226 | down_read(¤t->mm->mmap_sem); |
| 2227 | vma = find_vma(current->mm, hva); |
| 2228 | if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO)) |
| 2229 | goto up_out; |
| 2230 | |
| 2231 | psize = vma_kernel_pagesize(vma); |
Paul Mackerras | da9d1d7 | 2011-12-12 12:31:41 +0000 | [diff] [blame] | 2232 | porder = __ilog2(psize); |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 2233 | |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 2234 | /* Is this one of our preallocated RMAs? */ |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 2235 | if (vma->vm_file && vma->vm_file->f_op == &kvm_rma_fops && |
| 2236 | hva == vma->vm_start) |
| 2237 | ri = vma->vm_file->private_data; |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 2238 | |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 2239 | up_read(¤t->mm->mmap_sem); |
| 2240 | |
| 2241 | if (!ri) { |
| 2242 | /* On POWER7, use VRMA; on PPC970, give up */ |
| 2243 | err = -EPERM; |
| 2244 | if (cpu_has_feature(CPU_FTR_ARCH_201)) { |
| 2245 | pr_err("KVM: CPU requires an RMO\n"); |
Paul Mackerras | 2c9097e | 2012-09-11 13:27:01 +0000 | [diff] [blame] | 2246 | goto out_srcu; |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 2247 | } |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 2248 | |
Paul Mackerras | da9d1d7 | 2011-12-12 12:31:41 +0000 | [diff] [blame] | 2249 | /* We can handle 4k, 64k or 16M pages in the VRMA */ |
| 2250 | err = -EINVAL; |
| 2251 | if (!(psize == 0x1000 || psize == 0x10000 || |
| 2252 | psize == 0x1000000)) |
Paul Mackerras | 2c9097e | 2012-09-11 13:27:01 +0000 | [diff] [blame] | 2253 | goto out_srcu; |
Paul Mackerras | da9d1d7 | 2011-12-12 12:31:41 +0000 | [diff] [blame] | 2254 | |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 2255 | /* Update VRMASD field in the LPCR */ |
Paul Mackerras | da9d1d7 | 2011-12-12 12:31:41 +0000 | [diff] [blame] | 2256 | senc = slb_pgsize_encoding(psize); |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 2257 | kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | |
| 2258 | (VRMA_VSID << SLB_VSID_SHIFT_1T); |
Paul Mackerras | a0144e2 | 2013-09-20 14:52:38 +1000 | [diff] [blame] | 2259 | lpcr_mask = LPCR_VRMASD; |
| 2260 | /* the -4 is to account for senc values starting at 0x10 */ |
| 2261 | lpcr = senc << (LPCR_VRMASD_SH - 4); |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 2262 | |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 2263 | /* Create HPTEs in the hash page table for the VRMA */ |
Paul Mackerras | da9d1d7 | 2011-12-12 12:31:41 +0000 | [diff] [blame] | 2264 | kvmppc_map_vrma(vcpu, memslot, porder); |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 2265 | |
| 2266 | } else { |
| 2267 | /* Set up to use an RMO region */ |
Aneesh Kumar K.V | 6c45b81 | 2013-07-02 11:15:17 +0530 | [diff] [blame] | 2268 | rma_size = kvm_rma_pages; |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 2269 | if (rma_size > memslot->npages) |
| 2270 | rma_size = memslot->npages; |
| 2271 | rma_size <<= PAGE_SHIFT; |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 2272 | rmls = lpcr_rmls(rma_size); |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 2273 | err = -EINVAL; |
Chen Gang | 5d226ae | 2013-07-22 14:32:35 +0800 | [diff] [blame] | 2274 | if ((long)rmls < 0) { |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 2275 | pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size); |
Paul Mackerras | 2c9097e | 2012-09-11 13:27:01 +0000 | [diff] [blame] | 2276 | goto out_srcu; |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 2277 | } |
| 2278 | atomic_inc(&ri->use_count); |
| 2279 | kvm->arch.rma = ri; |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 2280 | |
| 2281 | /* Update LPCR and RMOR */ |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 2282 | if (cpu_has_feature(CPU_FTR_ARCH_201)) { |
| 2283 | /* PPC970; insert RMLS value (split field) in HID4 */ |
Paul Mackerras | a0144e2 | 2013-09-20 14:52:38 +1000 | [diff] [blame] | 2284 | lpcr_mask = (1ul << HID4_RMLS0_SH) | |
| 2285 | (3ul << HID4_RMLS2_SH) | HID4_RMOR; |
| 2286 | lpcr = ((rmls >> 2) << HID4_RMLS0_SH) | |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 2287 | ((rmls & 3) << HID4_RMLS2_SH); |
| 2288 | /* RMOR is also in HID4 */ |
| 2289 | lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff) |
| 2290 | << HID4_RMOR_SH; |
| 2291 | } else { |
| 2292 | /* POWER7 */ |
Paul Mackerras | a0144e2 | 2013-09-20 14:52:38 +1000 | [diff] [blame] | 2293 | lpcr_mask = LPCR_VPM0 | LPCR_VRMA_L | LPCR_RMLS; |
| 2294 | lpcr = rmls << LPCR_RMLS_SH; |
Aneesh Kumar K.V | 6c45b81 | 2013-07-02 11:15:17 +0530 | [diff] [blame] | 2295 | kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT; |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 2296 | } |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 2297 | pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n", |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 2298 | ri->base_pfn << PAGE_SHIFT, rma_size, lpcr); |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 2299 | |
| 2300 | /* Initialize phys addrs of pages in RMO */ |
Aneesh Kumar K.V | 6c45b81 | 2013-07-02 11:15:17 +0530 | [diff] [blame] | 2301 | npages = kvm_rma_pages; |
Paul Mackerras | da9d1d7 | 2011-12-12 12:31:41 +0000 | [diff] [blame] | 2302 | porder = __ilog2(npages); |
Paul Mackerras | a66b48c | 2012-09-11 13:27:46 +0000 | [diff] [blame] | 2303 | physp = memslot->arch.slot_phys; |
| 2304 | if (physp) { |
| 2305 | if (npages > memslot->npages) |
| 2306 | npages = memslot->npages; |
| 2307 | spin_lock(&kvm->arch.slot_phys_lock); |
| 2308 | for (i = 0; i < npages; ++i) |
| 2309 | physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) + |
| 2310 | porder; |
| 2311 | spin_unlock(&kvm->arch.slot_phys_lock); |
| 2312 | } |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 2313 | } |
| 2314 | |
Paul Mackerras | a0144e2 | 2013-09-20 14:52:38 +1000 | [diff] [blame] | 2315 | kvmppc_update_lpcr(kvm, lpcr, lpcr_mask); |
| 2316 | |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 2317 | /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */ |
| 2318 | smp_wmb(); |
| 2319 | kvm->arch.rma_setup_done = 1; |
| 2320 | err = 0; |
Paul Mackerras | 2c9097e | 2012-09-11 13:27:01 +0000 | [diff] [blame] | 2321 | out_srcu: |
| 2322 | srcu_read_unlock(&kvm->srcu, srcu_idx); |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 2323 | out: |
| 2324 | mutex_unlock(&kvm->lock); |
| 2325 | return err; |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 2326 | |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 2327 | up_out: |
| 2328 | up_read(¤t->mm->mmap_sem); |
Lai Jiangshan | 505d642 | 2013-03-16 00:50:49 +0800 | [diff] [blame] | 2329 | goto out_srcu; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2330 | } |
| 2331 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 2332 | static int kvmppc_core_init_vm_hv(struct kvm *kvm) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2333 | { |
Paul Mackerras | 32fad28 | 2012-05-04 02:32:53 +0000 | [diff] [blame] | 2334 | unsigned long lpcr, lpid; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2335 | |
Paul Mackerras | 32fad28 | 2012-05-04 02:32:53 +0000 | [diff] [blame] | 2336 | /* Allocate the guest's logical partition ID */ |
| 2337 | |
| 2338 | lpid = kvmppc_alloc_lpid(); |
Chen Gang | 5d226ae | 2013-07-22 14:32:35 +0800 | [diff] [blame] | 2339 | if ((long)lpid < 0) |
Paul Mackerras | 32fad28 | 2012-05-04 02:32:53 +0000 | [diff] [blame] | 2340 | return -ENOMEM; |
| 2341 | kvm->arch.lpid = lpid; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2342 | |
Paul Mackerras | 1b400ba | 2012-11-21 23:28:08 +0000 | [diff] [blame] | 2343 | /* |
| 2344 | * Since we don't flush the TLB when tearing down a VM, |
| 2345 | * and this lpid might have previously been used, |
| 2346 | * make sure we flush on each core before running the new VM. |
| 2347 | */ |
| 2348 | cpumask_setall(&kvm->arch.need_tlb_flush); |
| 2349 | |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 2350 | /* Start out with the default set of hcalls enabled */ |
| 2351 | memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls, |
| 2352 | sizeof(kvm->arch.enabled_hcalls)); |
| 2353 | |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 2354 | kvm->arch.rma = NULL; |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 2355 | |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 2356 | kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 2357 | |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 2358 | if (cpu_has_feature(CPU_FTR_ARCH_201)) { |
| 2359 | /* PPC970; HID4 is effectively the LPCR */ |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 2360 | kvm->arch.host_lpid = 0; |
| 2361 | kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4); |
| 2362 | lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH)); |
| 2363 | lpcr |= ((lpid >> 4) << HID4_LPID1_SH) | |
| 2364 | ((lpid & 0xf) << HID4_LPID5_SH); |
| 2365 | } else { |
| 2366 | /* POWER7; init LPCR for virtual RMA mode */ |
| 2367 | kvm->arch.host_lpid = mfspr(SPRN_LPID); |
| 2368 | kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); |
| 2369 | lpcr &= LPCR_PECE | LPCR_LPES; |
| 2370 | lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE | |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 2371 | LPCR_VPM0 | LPCR_VPM1; |
| 2372 | kvm->arch.vrma_slb_v = SLB_VSID_B_1T | |
| 2373 | (VRMA_VSID << SLB_VSID_SHIFT_1T); |
Paul Mackerras | e0622bd | 2014-01-08 21:25:27 +1100 | [diff] [blame] | 2374 | /* On POWER8 turn on online bit to enable PURR/SPURR */ |
| 2375 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) |
| 2376 | lpcr |= LPCR_ONL; |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 2377 | } |
| 2378 | kvm->arch.lpcr = lpcr; |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 2379 | |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 2380 | kvm->arch.using_mmu_notifiers = !!cpu_has_feature(CPU_FTR_ARCH_206); |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 2381 | spin_lock_init(&kvm->arch.slot_phys_lock); |
Paul Mackerras | 512691d | 2012-10-15 01:15:41 +0000 | [diff] [blame] | 2382 | |
| 2383 | /* |
Michael Ellerman | 441c19c | 2014-05-23 18:15:25 +1000 | [diff] [blame] | 2384 | * Track that we now have a HV mode VM active. This blocks secondary |
| 2385 | * CPU threads from coming online. |
Paul Mackerras | 512691d | 2012-10-15 01:15:41 +0000 | [diff] [blame] | 2386 | */ |
Michael Ellerman | 441c19c | 2014-05-23 18:15:25 +1000 | [diff] [blame] | 2387 | kvm_hv_vm_activated(); |
Paul Mackerras | 512691d | 2012-10-15 01:15:41 +0000 | [diff] [blame] | 2388 | |
David Gibson | 54738c0 | 2011-06-29 00:22:41 +0000 | [diff] [blame] | 2389 | return 0; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2390 | } |
| 2391 | |
Paul Mackerras | f1378b1 | 2013-09-27 15:33:43 +0530 | [diff] [blame] | 2392 | static void kvmppc_free_vcores(struct kvm *kvm) |
| 2393 | { |
| 2394 | long int i; |
| 2395 | |
| 2396 | for (i = 0; i < KVM_MAX_VCORES; ++i) |
| 2397 | kfree(kvm->arch.vcores[i]); |
| 2398 | kvm->arch.online_vcores = 0; |
| 2399 | } |
| 2400 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 2401 | static void kvmppc_core_destroy_vm_hv(struct kvm *kvm) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2402 | { |
Michael Ellerman | 441c19c | 2014-05-23 18:15:25 +1000 | [diff] [blame] | 2403 | kvm_hv_vm_deactivated(); |
Paul Mackerras | 512691d | 2012-10-15 01:15:41 +0000 | [diff] [blame] | 2404 | |
Paul Mackerras | f1378b1 | 2013-09-27 15:33:43 +0530 | [diff] [blame] | 2405 | kvmppc_free_vcores(kvm); |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 2406 | if (kvm->arch.rma) { |
| 2407 | kvm_release_rma(kvm->arch.rma); |
| 2408 | kvm->arch.rma = NULL; |
| 2409 | } |
| 2410 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2411 | kvmppc_free_hpt(kvm); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2412 | } |
| 2413 | |
| 2414 | /* We don't need to emulate any privileged instructions or dcbz */ |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 2415 | static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 2416 | unsigned int inst, int *advance) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2417 | { |
| 2418 | return EMULATE_FAIL; |
| 2419 | } |
| 2420 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 2421 | static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn, |
| 2422 | ulong spr_val) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2423 | { |
| 2424 | return EMULATE_FAIL; |
| 2425 | } |
| 2426 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 2427 | static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn, |
| 2428 | ulong *spr_val) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2429 | { |
| 2430 | return EMULATE_FAIL; |
| 2431 | } |
| 2432 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 2433 | static int kvmppc_core_check_processor_compat_hv(void) |
| 2434 | { |
| 2435 | if (!cpu_has_feature(CPU_FTR_HVMODE)) |
| 2436 | return -EIO; |
| 2437 | return 0; |
| 2438 | } |
| 2439 | |
| 2440 | static long kvm_arch_vm_ioctl_hv(struct file *filp, |
| 2441 | unsigned int ioctl, unsigned long arg) |
| 2442 | { |
| 2443 | struct kvm *kvm __maybe_unused = filp->private_data; |
| 2444 | void __user *argp = (void __user *)arg; |
| 2445 | long r; |
| 2446 | |
| 2447 | switch (ioctl) { |
| 2448 | |
| 2449 | case KVM_ALLOCATE_RMA: { |
| 2450 | struct kvm_allocate_rma rma; |
| 2451 | struct kvm *kvm = filp->private_data; |
| 2452 | |
| 2453 | r = kvm_vm_ioctl_allocate_rma(kvm, &rma); |
| 2454 | if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma))) |
| 2455 | r = -EFAULT; |
| 2456 | break; |
| 2457 | } |
| 2458 | |
| 2459 | case KVM_PPC_ALLOCATE_HTAB: { |
| 2460 | u32 htab_order; |
| 2461 | |
| 2462 | r = -EFAULT; |
| 2463 | if (get_user(htab_order, (u32 __user *)argp)) |
| 2464 | break; |
| 2465 | r = kvmppc_alloc_reset_hpt(kvm, &htab_order); |
| 2466 | if (r) |
| 2467 | break; |
| 2468 | r = -EFAULT; |
| 2469 | if (put_user(htab_order, (u32 __user *)argp)) |
| 2470 | break; |
| 2471 | r = 0; |
| 2472 | break; |
| 2473 | } |
| 2474 | |
| 2475 | case KVM_PPC_GET_HTAB_FD: { |
| 2476 | struct kvm_get_htab_fd ghf; |
| 2477 | |
| 2478 | r = -EFAULT; |
| 2479 | if (copy_from_user(&ghf, argp, sizeof(ghf))) |
| 2480 | break; |
| 2481 | r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf); |
| 2482 | break; |
| 2483 | } |
| 2484 | |
| 2485 | default: |
| 2486 | r = -ENOTTY; |
| 2487 | } |
| 2488 | |
| 2489 | return r; |
| 2490 | } |
| 2491 | |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 2492 | /* |
| 2493 | * List of hcall numbers to enable by default. |
| 2494 | * For compatibility with old userspace, we enable by default |
| 2495 | * all hcalls that were implemented before the hcall-enabling |
| 2496 | * facility was added. Note this list should not include H_RTAS. |
| 2497 | */ |
| 2498 | static unsigned int default_hcall_list[] = { |
| 2499 | H_REMOVE, |
| 2500 | H_ENTER, |
| 2501 | H_READ, |
| 2502 | H_PROTECT, |
| 2503 | H_BULK_REMOVE, |
| 2504 | H_GET_TCE, |
| 2505 | H_PUT_TCE, |
| 2506 | H_SET_DABR, |
| 2507 | H_SET_XDABR, |
| 2508 | H_CEDE, |
| 2509 | H_PROD, |
| 2510 | H_CONFER, |
| 2511 | H_REGISTER_VPA, |
| 2512 | #ifdef CONFIG_KVM_XICS |
| 2513 | H_EOI, |
| 2514 | H_CPPR, |
| 2515 | H_IPI, |
| 2516 | H_IPOLL, |
| 2517 | H_XIRR, |
| 2518 | H_XIRR_X, |
| 2519 | #endif |
| 2520 | 0 |
| 2521 | }; |
| 2522 | |
| 2523 | static void init_default_hcalls(void) |
| 2524 | { |
| 2525 | int i; |
Paul Mackerras | ae2113a | 2014-06-02 11:03:00 +1000 | [diff] [blame] | 2526 | unsigned int hcall; |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 2527 | |
Paul Mackerras | ae2113a | 2014-06-02 11:03:00 +1000 | [diff] [blame] | 2528 | for (i = 0; default_hcall_list[i]; ++i) { |
| 2529 | hcall = default_hcall_list[i]; |
| 2530 | WARN_ON(!kvmppc_hcall_impl_hv(hcall)); |
| 2531 | __set_bit(hcall / 4, default_enabled_hcalls); |
| 2532 | } |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 2533 | } |
| 2534 | |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 2535 | static struct kvmppc_ops kvm_ops_hv = { |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 2536 | .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv, |
| 2537 | .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv, |
| 2538 | .get_one_reg = kvmppc_get_one_reg_hv, |
| 2539 | .set_one_reg = kvmppc_set_one_reg_hv, |
| 2540 | .vcpu_load = kvmppc_core_vcpu_load_hv, |
| 2541 | .vcpu_put = kvmppc_core_vcpu_put_hv, |
| 2542 | .set_msr = kvmppc_set_msr_hv, |
| 2543 | .vcpu_run = kvmppc_vcpu_run_hv, |
| 2544 | .vcpu_create = kvmppc_core_vcpu_create_hv, |
| 2545 | .vcpu_free = kvmppc_core_vcpu_free_hv, |
| 2546 | .check_requests = kvmppc_core_check_requests_hv, |
| 2547 | .get_dirty_log = kvm_vm_ioctl_get_dirty_log_hv, |
| 2548 | .flush_memslot = kvmppc_core_flush_memslot_hv, |
| 2549 | .prepare_memory_region = kvmppc_core_prepare_memory_region_hv, |
| 2550 | .commit_memory_region = kvmppc_core_commit_memory_region_hv, |
| 2551 | .unmap_hva = kvm_unmap_hva_hv, |
| 2552 | .unmap_hva_range = kvm_unmap_hva_range_hv, |
| 2553 | .age_hva = kvm_age_hva_hv, |
| 2554 | .test_age_hva = kvm_test_age_hva_hv, |
| 2555 | .set_spte_hva = kvm_set_spte_hva_hv, |
| 2556 | .mmu_destroy = kvmppc_mmu_destroy_hv, |
| 2557 | .free_memslot = kvmppc_core_free_memslot_hv, |
| 2558 | .create_memslot = kvmppc_core_create_memslot_hv, |
| 2559 | .init_vm = kvmppc_core_init_vm_hv, |
| 2560 | .destroy_vm = kvmppc_core_destroy_vm_hv, |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 2561 | .get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv, |
| 2562 | .emulate_op = kvmppc_core_emulate_op_hv, |
| 2563 | .emulate_mtspr = kvmppc_core_emulate_mtspr_hv, |
| 2564 | .emulate_mfspr = kvmppc_core_emulate_mfspr_hv, |
| 2565 | .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv, |
| 2566 | .arch_vm_ioctl = kvm_arch_vm_ioctl_hv, |
Paul Mackerras | ae2113a | 2014-06-02 11:03:00 +1000 | [diff] [blame] | 2567 | .hcall_implemented = kvmppc_hcall_impl_hv, |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 2568 | }; |
| 2569 | |
| 2570 | static int kvmppc_book3s_init_hv(void) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2571 | { |
| 2572 | int r; |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 2573 | /* |
| 2574 | * FIXME!! Do we need to check on all cpus ? |
| 2575 | */ |
| 2576 | r = kvmppc_core_check_processor_compat_hv(); |
| 2577 | if (r < 0) |
Paul Mackerras | 739e242 | 2014-03-25 10:47:05 +1100 | [diff] [blame] | 2578 | return -ENODEV; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2579 | |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 2580 | kvm_ops_hv.owner = THIS_MODULE; |
| 2581 | kvmppc_hv_ops = &kvm_ops_hv; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2582 | |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 2583 | init_default_hcalls(); |
| 2584 | |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 2585 | r = kvmppc_mmu_hv_init(); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2586 | return r; |
| 2587 | } |
| 2588 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 2589 | static void kvmppc_book3s_exit_hv(void) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2590 | { |
Aneesh Kumar K.V | cbbc58d | 2013-10-07 22:18:01 +0530 | [diff] [blame] | 2591 | kvmppc_hv_ops = NULL; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 2592 | } |
| 2593 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 2594 | module_init(kvmppc_book3s_init_hv); |
| 2595 | module_exit(kvmppc_book3s_exit_hv); |
Aneesh Kumar K.V | 2ba9f0d | 2013-10-07 22:17:59 +0530 | [diff] [blame] | 2596 | MODULE_LICENSE("GPL"); |
Alexander Graf | 398a76c | 2013-12-09 13:53:42 +0100 | [diff] [blame] | 2597 | MODULE_ALIAS_MISCDEV(KVM_MINOR); |
| 2598 | MODULE_ALIAS("devname:kvm"); |