blob: 02ca76555bd4bc065bcc7fb47c36431c82c9cb84 [file] [log] [blame]
Christoffer Dall45e96ea2013-01-20 18:43:58 -05001/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#include <linux/kvm_host.h>
20#include <asm/kvm_mmio.h>
21#include <asm/kvm_emulate.h>
22#include <trace/events/kvm.h>
23
24#include "trace.h"
25
26/**
27 * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
28 * @vcpu: The VCPU pointer
29 * @run: The VCPU run struct containing the mmio data
30 *
31 * This should only be called after returning from userspace for MMIO load
32 * emulation.
33 */
34int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
35{
Marc Zyngierdb730d82012-10-03 11:17:02 +010036 unsigned long *dest;
Christoffer Dall45e96ea2013-01-20 18:43:58 -050037 unsigned int len;
38 int mask;
39
40 if (!run->mmio.is_write) {
41 dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt);
42 memset(dest, 0, sizeof(int));
43
44 len = run->mmio.len;
45 if (len > 4)
46 return -EINVAL;
47
48 memcpy(dest, run->mmio.data, len);
49
50 trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
51 *((u64 *)run->mmio.data));
52
53 if (vcpu->arch.mmio_decode.sign_extend && len < 4) {
54 mask = 1U << ((len * 8) - 1);
55 *dest = (*dest ^ mask) - mask;
56 }
57 }
58
59 return 0;
60}
61
62static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
63 struct kvm_exit_mmio *mmio)
64{
65 unsigned long rt, len;
66 bool is_write, sign_extend;
67
Marc Zyngier78abfcd2012-09-18 11:36:16 +010068 if (kvm_vcpu_dabt_isextabt(vcpu)) {
Christoffer Dall45e96ea2013-01-20 18:43:58 -050069 /* cache operation on I/O addr, tell guest unsupported */
Marc Zyngier7393b592012-09-17 19:27:09 +010070 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
Christoffer Dall45e96ea2013-01-20 18:43:58 -050071 return 1;
72 }
73
Marc Zyngierb37670b2012-09-18 11:37:28 +010074 if (kvm_vcpu_dabt_iss1tw(vcpu)) {
Christoffer Dall45e96ea2013-01-20 18:43:58 -050075 /* page table accesses IO mem: tell guest to fix its TTBR */
Marc Zyngier7393b592012-09-17 19:27:09 +010076 kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
Christoffer Dall45e96ea2013-01-20 18:43:58 -050077 return 1;
78 }
79
Marc Zyngiera7123372012-09-18 11:43:30 +010080 len = kvm_vcpu_dabt_get_as(vcpu);
81 if (unlikely(len < 0))
82 return len;
Christoffer Dall45e96ea2013-01-20 18:43:58 -050083
Marc Zyngier023cc962012-09-18 11:12:26 +010084 is_write = kvm_vcpu_dabt_iswrite(vcpu);
Marc Zyngier7c511b82012-09-18 11:23:02 +010085 sign_extend = kvm_vcpu_dabt_issext(vcpu);
Marc Zyngierd0adf742012-09-18 11:28:57 +010086 rt = kvm_vcpu_dabt_get_rd(vcpu);
Christoffer Dall45e96ea2013-01-20 18:43:58 -050087
88 if (kvm_vcpu_reg_is_pc(vcpu, rt)) {
89 /* IO memory trying to read/write pc */
Marc Zyngier7393b592012-09-17 19:27:09 +010090 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
Christoffer Dall45e96ea2013-01-20 18:43:58 -050091 return 1;
92 }
93
94 mmio->is_write = is_write;
95 mmio->phys_addr = fault_ipa;
96 mmio->len = len;
97 vcpu->arch.mmio_decode.sign_extend = sign_extend;
98 vcpu->arch.mmio_decode.rt = rt;
99
100 /*
101 * The MMIO instruction is emulated and should not be re-executed
102 * in the guest.
103 */
Marc Zyngier23b415d2012-09-18 12:07:06 +0100104 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
Christoffer Dall45e96ea2013-01-20 18:43:58 -0500105 return 0;
106}
107
108int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
109 phys_addr_t fault_ipa)
110{
111 struct kvm_exit_mmio mmio;
112 unsigned long rt;
113 int ret;
114
115 /*
116 * Prepare MMIO operation. First stash it in a private
117 * structure that we can use for in-kernel emulation. If the
118 * kernel can't handle it, copy it into run->mmio and let user
119 * space do its magic.
120 */
121
Marc Zyngier4a1df282012-09-18 11:06:23 +0100122 if (kvm_vcpu_dabt_isvalid(vcpu)) {
Christoffer Dall45e96ea2013-01-20 18:43:58 -0500123 ret = decode_hsr(vcpu, fault_ipa, &mmio);
124 if (ret)
125 return ret;
126 } else {
127 kvm_err("load/store instruction decoding not implemented\n");
128 return -ENOSYS;
129 }
130
131 rt = vcpu->arch.mmio_decode.rt;
132 trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE :
133 KVM_TRACE_MMIO_READ_UNSATISFIED,
134 mmio.len, fault_ipa,
135 (mmio.is_write) ? *vcpu_reg(vcpu, rt) : 0);
136
137 if (mmio.is_write)
138 memcpy(mmio.data, vcpu_reg(vcpu, rt), mmio.len);
139
Marc Zyngier1a89dd92013-01-21 19:36:12 -0500140 if (vgic_handle_mmio(vcpu, run, &mmio))
141 return 1;
142
Christoffer Dall45e96ea2013-01-20 18:43:58 -0500143 kvm_prepare_mmio(run, &mmio);
144 return 0;
145}