blob: 72a12f2171b26bba2937e3c288f4bd51f1b5540b [file] [log] [blame]
Christoffer Dall45e96ea2013-01-20 18:43:58 -05001/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#include <linux/kvm_host.h>
20#include <asm/kvm_mmio.h>
21#include <asm/kvm_emulate.h>
22#include <trace/events/kvm.h>
23
24#include "trace.h"
25
26/**
27 * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
28 * @vcpu: The VCPU pointer
29 * @run: The VCPU run struct containing the mmio data
30 *
31 * This should only be called after returning from userspace for MMIO load
32 * emulation.
33 */
34int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
35{
Marc Zyngierdb730d82012-10-03 11:17:02 +010036 unsigned long *dest;
Christoffer Dall45e96ea2013-01-20 18:43:58 -050037 unsigned int len;
38 int mask;
39
40 if (!run->mmio.is_write) {
41 dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt);
Marc Zyngierf42798c2013-03-05 02:43:23 +000042 *dest = 0;
Christoffer Dall45e96ea2013-01-20 18:43:58 -050043
44 len = run->mmio.len;
Marc Zyngierf42798c2013-03-05 02:43:23 +000045 if (len > sizeof(unsigned long))
Christoffer Dall45e96ea2013-01-20 18:43:58 -050046 return -EINVAL;
47
48 memcpy(dest, run->mmio.data, len);
49
50 trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
51 *((u64 *)run->mmio.data));
52
Marc Zyngierf42798c2013-03-05 02:43:23 +000053 if (vcpu->arch.mmio_decode.sign_extend &&
54 len < sizeof(unsigned long)) {
Christoffer Dall45e96ea2013-01-20 18:43:58 -050055 mask = 1U << ((len * 8) - 1);
56 *dest = (*dest ^ mask) - mask;
57 }
58 }
59
60 return 0;
61}
62
63static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
64 struct kvm_exit_mmio *mmio)
65{
66 unsigned long rt, len;
67 bool is_write, sign_extend;
68
Marc Zyngier78abfcd2012-09-18 11:36:16 +010069 if (kvm_vcpu_dabt_isextabt(vcpu)) {
Christoffer Dall45e96ea2013-01-20 18:43:58 -050070 /* cache operation on I/O addr, tell guest unsupported */
Marc Zyngier7393b592012-09-17 19:27:09 +010071 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
Christoffer Dall45e96ea2013-01-20 18:43:58 -050072 return 1;
73 }
74
Marc Zyngierb37670b2012-09-18 11:37:28 +010075 if (kvm_vcpu_dabt_iss1tw(vcpu)) {
Christoffer Dall45e96ea2013-01-20 18:43:58 -050076 /* page table accesses IO mem: tell guest to fix its TTBR */
Marc Zyngier7393b592012-09-17 19:27:09 +010077 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
Christoffer Dall45e96ea2013-01-20 18:43:58 -050078 return 1;
79 }
80
Marc Zyngiera7123372012-09-18 11:43:30 +010081 len = kvm_vcpu_dabt_get_as(vcpu);
82 if (unlikely(len < 0))
83 return len;
Christoffer Dall45e96ea2013-01-20 18:43:58 -050084
Marc Zyngier023cc962012-09-18 11:12:26 +010085 is_write = kvm_vcpu_dabt_iswrite(vcpu);
Marc Zyngier7c511b82012-09-18 11:23:02 +010086 sign_extend = kvm_vcpu_dabt_issext(vcpu);
Marc Zyngierd0adf742012-09-18 11:28:57 +010087 rt = kvm_vcpu_dabt_get_rd(vcpu);
Christoffer Dall45e96ea2013-01-20 18:43:58 -050088
89 if (kvm_vcpu_reg_is_pc(vcpu, rt)) {
90 /* IO memory trying to read/write pc */
Marc Zyngier7393b592012-09-17 19:27:09 +010091 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
Christoffer Dall45e96ea2013-01-20 18:43:58 -050092 return 1;
93 }
94
95 mmio->is_write = is_write;
96 mmio->phys_addr = fault_ipa;
97 mmio->len = len;
98 vcpu->arch.mmio_decode.sign_extend = sign_extend;
99 vcpu->arch.mmio_decode.rt = rt;
100
101 /*
102 * The MMIO instruction is emulated and should not be re-executed
103 * in the guest.
104 */
Marc Zyngier23b415d2012-09-18 12:07:06 +0100105 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
Christoffer Dall45e96ea2013-01-20 18:43:58 -0500106 return 0;
107}
108
109int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
110 phys_addr_t fault_ipa)
111{
112 struct kvm_exit_mmio mmio;
113 unsigned long rt;
114 int ret;
115
116 /*
117 * Prepare MMIO operation. First stash it in a private
118 * structure that we can use for in-kernel emulation. If the
119 * kernel can't handle it, copy it into run->mmio and let user
120 * space do its magic.
121 */
122
Marc Zyngier4a1df282012-09-18 11:06:23 +0100123 if (kvm_vcpu_dabt_isvalid(vcpu)) {
Christoffer Dall45e96ea2013-01-20 18:43:58 -0500124 ret = decode_hsr(vcpu, fault_ipa, &mmio);
125 if (ret)
126 return ret;
127 } else {
128 kvm_err("load/store instruction decoding not implemented\n");
129 return -ENOSYS;
130 }
131
132 rt = vcpu->arch.mmio_decode.rt;
133 trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE :
134 KVM_TRACE_MMIO_READ_UNSATISFIED,
135 mmio.len, fault_ipa,
136 (mmio.is_write) ? *vcpu_reg(vcpu, rt) : 0);
137
138 if (mmio.is_write)
139 memcpy(mmio.data, vcpu_reg(vcpu, rt), mmio.len);
140
Marc Zyngier1a89dd92013-01-21 19:36:12 -0500141 if (vgic_handle_mmio(vcpu, run, &mmio))
142 return 1;
143
Christoffer Dall45e96ea2013-01-20 18:43:58 -0500144 kvm_prepare_mmio(run, &mmio);
145 return 0;
146}