blob: 0144baf8290429f4c82c7d4b126c2a165c71206b [file] [log] [blame]
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/kvm_host.h>
#include <asm/kvm_mmio.h>
#include <asm/kvm_emulate.h>
#include <trace/events/kvm.h>
#include "trace.h"
/**
* kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
* @vcpu: The VCPU pointer
* @run: The VCPU run struct containing the mmio data
*
* This should only be called after returning from userspace for MMIO load
* emulation.
*/
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
__u32 *dest;
unsigned int len;
int mask;
if (!run->mmio.is_write) {
dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt);
memset(dest, 0, sizeof(int));
len = run->mmio.len;
if (len > 4)
return -EINVAL;
memcpy(dest, run->mmio.data, len);
trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
*((u64 *)run->mmio.data));
if (vcpu->arch.mmio_decode.sign_extend && len < 4) {
mask = 1U << ((len * 8) - 1);
*dest = (*dest ^ mask) - mask;
}
}
return 0;
}
static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_exit_mmio *mmio)
{
unsigned long rt, len;
bool is_write, sign_extend;
if ((vcpu->arch.hsr >> 8) & 1) {
/* cache operation on I/O addr, tell guest unsupported */
kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
return 1;
}
if ((vcpu->arch.hsr >> 7) & 1) {
/* page table accesses IO mem: tell guest to fix its TTBR */
kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
return 1;
}
switch ((vcpu->arch.hsr >> 22) & 0x3) {
case 0:
len = 1;
break;
case 1:
len = 2;
break;
case 2:
len = 4;
break;
default:
kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
return -EFAULT;
}
is_write = vcpu->arch.hsr & HSR_WNR;
sign_extend = vcpu->arch.hsr & HSR_SSE;
rt = (vcpu->arch.hsr & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
if (kvm_vcpu_reg_is_pc(vcpu, rt)) {
/* IO memory trying to read/write pc */
kvm_inject_pabt(vcpu, vcpu->arch.hxfar);
return 1;
}
mmio->is_write = is_write;
mmio->phys_addr = fault_ipa;
mmio->len = len;
vcpu->arch.mmio_decode.sign_extend = sign_extend;
vcpu->arch.mmio_decode.rt = rt;
/*
* The MMIO instruction is emulated and should not be re-executed
* in the guest.
*/
kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1);
return 0;
}
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
phys_addr_t fault_ipa)
{
struct kvm_exit_mmio mmio;
unsigned long rt;
int ret;
/*
* Prepare MMIO operation. First stash it in a private
* structure that we can use for in-kernel emulation. If the
* kernel can't handle it, copy it into run->mmio and let user
* space do its magic.
*/
if (vcpu->arch.hsr & HSR_ISV) {
ret = decode_hsr(vcpu, fault_ipa, &mmio);
if (ret)
return ret;
} else {
kvm_err("load/store instruction decoding not implemented\n");
return -ENOSYS;
}
rt = vcpu->arch.mmio_decode.rt;
trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE :
KVM_TRACE_MMIO_READ_UNSATISFIED,
mmio.len, fault_ipa,
(mmio.is_write) ? *vcpu_reg(vcpu, rt) : 0);
if (mmio.is_write)
memcpy(mmio.data, vcpu_reg(vcpu, rt), mmio.len);
kvm_prepare_mmio(run, &mmio);
return 0;
}