blob: 2786eae10c0d4aec26b0c810ddd9b3ca42b9a8dd [file] [log] [blame]
Christoffer Dall749cf76c2013-01-20 18:28:06 -05001/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#include <linux/errno.h>
20#include <linux/err.h>
21#include <linux/kvm_host.h>
22#include <linux/module.h>
23#include <linux/vmalloc.h>
24#include <linux/fs.h>
Marc Zyngier728d5772012-10-15 12:09:45 +010025#include <asm/cputype.h>
Christoffer Dall749cf76c2013-01-20 18:28:06 -050026#include <asm/uaccess.h>
27#include <asm/kvm.h>
28#include <asm/kvm_asm.h>
29#include <asm/kvm_emulate.h>
Christoffer Dall11382452013-01-20 18:28:10 -050030#include <asm/kvm_coproc.h>
Christoffer Dall749cf76c2013-01-20 18:28:06 -050031
32#define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
33#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
34
35struct kvm_stats_debugfs_item debugfs_entries[] = {
36 { NULL }
37};
38
39int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
40{
41 return 0;
42}
43
44static u64 core_reg_offset_from_id(u64 id)
45{
46 return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
47}
48
49static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
50{
51 u32 __user *uaddr = (u32 __user *)(long)reg->addr;
52 struct kvm_regs *regs = &vcpu->arch.regs;
53 u64 off;
54
55 if (KVM_REG_SIZE(reg->id) != 4)
56 return -ENOENT;
57
58 /* Our ID is an index into the kvm_regs struct. */
59 off = core_reg_offset_from_id(reg->id);
60 if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id))
61 return -ENOENT;
62
63 return put_user(((u32 *)regs)[off], uaddr);
64}
65
66static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
67{
68 u32 __user *uaddr = (u32 __user *)(long)reg->addr;
69 struct kvm_regs *regs = &vcpu->arch.regs;
70 u64 off, val;
71
72 if (KVM_REG_SIZE(reg->id) != 4)
73 return -ENOENT;
74
75 /* Our ID is an index into the kvm_regs struct. */
76 off = core_reg_offset_from_id(reg->id);
77 if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id))
78 return -ENOENT;
79
80 if (get_user(val, uaddr) != 0)
81 return -EFAULT;
82
83 if (off == KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr)) {
84 unsigned long mode = val & MODE_MASK;
85 switch (mode) {
86 case USR_MODE:
87 case FIQ_MODE:
88 case IRQ_MODE:
89 case SVC_MODE:
90 case ABT_MODE:
91 case UND_MODE:
92 break;
93 default:
94 return -EINVAL;
95 }
96 }
97
98 ((u32 *)regs)[off] = val;
99 return 0;
100}
101
102int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
103{
104 return -EINVAL;
105}
106
107int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
108{
109 return -EINVAL;
110}
111
Andre Przywara39735a32013-12-13 14:23:26 +0100112#ifndef CONFIG_KVM_ARM_TIMER
113
114#define NUM_TIMER_REGS 0
115
116static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
117{
118 return 0;
119}
120
121static bool is_timer_reg(u64 index)
122{
123 return false;
124}
125
126int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
127{
128 return 0;
129}
130
131u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
132{
133 return 0;
134}
135
136#else
137
138#define NUM_TIMER_REGS 3
139
140static bool is_timer_reg(u64 index)
141{
142 switch (index) {
143 case KVM_REG_ARM_TIMER_CTL:
144 case KVM_REG_ARM_TIMER_CNT:
145 case KVM_REG_ARM_TIMER_CVAL:
146 return true;
147 }
148 return false;
149}
150
151static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
152{
153 if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
154 return -EFAULT;
155 uindices++;
156 if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
157 return -EFAULT;
158 uindices++;
159 if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
160 return -EFAULT;
161
162 return 0;
163}
164
165#endif
166
167static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
168{
169 void __user *uaddr = (void __user *)(long)reg->addr;
170 u64 val;
171 int ret;
172
173 ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
174 if (ret != 0)
175 return ret;
176
177 return kvm_arm_timer_set_reg(vcpu, reg->id, val);
178}
179
180static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
181{
182 void __user *uaddr = (void __user *)(long)reg->addr;
183 u64 val;
184
185 val = kvm_arm_timer_get_reg(vcpu, reg->id);
186 return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
187}
188
Christoffer Dall749cf76c2013-01-20 18:28:06 -0500189static unsigned long num_core_regs(void)
190{
191 return sizeof(struct kvm_regs) / sizeof(u32);
192}
193
194/**
195 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
196 *
197 * This is for all registers.
198 */
199unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
200{
Andre Przywara39735a32013-12-13 14:23:26 +0100201 return num_core_regs() + kvm_arm_num_coproc_regs(vcpu)
202 + NUM_TIMER_REGS;
Christoffer Dall749cf76c2013-01-20 18:28:06 -0500203}
204
205/**
206 * kvm_arm_copy_reg_indices - get indices of all registers.
207 *
208 * We do core registers right here, then we apppend coproc regs.
209 */
210int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
211{
212 unsigned int i;
213 const u64 core_reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE;
Andre Przywara39735a32013-12-13 14:23:26 +0100214 int ret;
Christoffer Dall749cf76c2013-01-20 18:28:06 -0500215
216 for (i = 0; i < sizeof(struct kvm_regs)/sizeof(u32); i++) {
217 if (put_user(core_reg | i, uindices))
218 return -EFAULT;
219 uindices++;
220 }
221
Andre Przywara39735a32013-12-13 14:23:26 +0100222 ret = copy_timer_indices(vcpu, uindices);
223 if (ret)
224 return ret;
225 uindices += NUM_TIMER_REGS;
226
Christoffer Dall11382452013-01-20 18:28:10 -0500227 return kvm_arm_copy_coproc_indices(vcpu, uindices);
Christoffer Dall749cf76c2013-01-20 18:28:06 -0500228}
229
230int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
231{
232 /* We currently use nothing arch-specific in upper 32 bits */
233 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32)
234 return -EINVAL;
235
236 /* Register group 16 means we want a core register. */
237 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
238 return get_core_reg(vcpu, reg);
239
Andre Przywara39735a32013-12-13 14:23:26 +0100240 if (is_timer_reg(reg->id))
241 return get_timer_reg(vcpu, reg);
242
Christoffer Dall11382452013-01-20 18:28:10 -0500243 return kvm_arm_coproc_get_reg(vcpu, reg);
Christoffer Dall749cf76c2013-01-20 18:28:06 -0500244}
245
246int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
247{
248 /* We currently use nothing arch-specific in upper 32 bits */
249 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32)
250 return -EINVAL;
251
252 /* Register group 16 means we set a core register. */
253 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
254 return set_core_reg(vcpu, reg);
255
Andre Przywara39735a32013-12-13 14:23:26 +0100256 if (is_timer_reg(reg->id))
257 return set_timer_reg(vcpu, reg);
258
Christoffer Dall11382452013-01-20 18:28:10 -0500259 return kvm_arm_coproc_set_reg(vcpu, reg);
Christoffer Dall749cf76c2013-01-20 18:28:06 -0500260}
261
262int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
263 struct kvm_sregs *sregs)
264{
265 return -EINVAL;
266}
267
268int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
269 struct kvm_sregs *sregs)
270{
271 return -EINVAL;
272}
273
Marc Zyngier728d5772012-10-15 12:09:45 +0100274int __attribute_const__ kvm_target_cpu(void)
275{
276 unsigned long implementor = read_cpuid_implementor();
277 unsigned long part_number = read_cpuid_part_number();
278
279 if (implementor != ARM_CPU_IMP_ARM)
280 return -EINVAL;
281
282 switch (part_number) {
Jonathan Austine8c2d992013-09-26 16:49:28 +0100283 case ARM_CPU_PART_CORTEX_A7:
284 return KVM_ARM_TARGET_CORTEX_A7;
Marc Zyngier728d5772012-10-15 12:09:45 +0100285 case ARM_CPU_PART_CORTEX_A15:
286 return KVM_ARM_TARGET_CORTEX_A15;
287 default:
288 return -EINVAL;
289 }
290}
291
Christoffer Dall749cf76c2013-01-20 18:28:06 -0500292int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
293 const struct kvm_vcpu_init *init)
294{
295 unsigned int i;
296
Jonathan Austine8c2d992013-09-26 16:49:28 +0100297 /* We can only cope with guest==host and only on A15/A7 (for now). */
Christoffer Dall749cf76c2013-01-20 18:28:06 -0500298 if (init->target != kvm_target_cpu())
299 return -EINVAL;
300
301 vcpu->arch.target = init->target;
302 bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
303
304 /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
305 for (i = 0; i < sizeof(init->features) * 8; i++) {
306 if (test_bit(i, (void *)init->features)) {
307 if (i >= KVM_VCPU_MAX_FEATURES)
308 return -ENOENT;
309 set_bit(i, vcpu->arch.features);
310 }
311 }
312
313 /* Now we know what it is, we can reset it. */
314 return kvm_reset_vcpu(vcpu);
315}
316
Anup Patel4a6fee82013-09-30 14:20:05 +0530317int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
318{
319 int target = kvm_target_cpu();
320
321 if (target < 0)
322 return -ENODEV;
323
324 memset(init, 0, sizeof(*init));
325
326 /*
327 * For now, we don't return any features.
328 * In future, we might use features to return target
329 * specific features available for the preferred
330 * target type.
331 */
332 init->target = (__u32)target;
333
334 return 0;
335}
336
Christoffer Dall749cf76c2013-01-20 18:28:06 -0500337int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
338{
339 return -EINVAL;
340}
341
342int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
343{
344 return -EINVAL;
345}
346
347int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
348 struct kvm_translation *tr)
349{
350 return -EINVAL;
351}