blob: 94923609753b2ae91715080fff2cd544281c8621 [file] [log] [blame]
Marc Zyngier7c8c5e6a2012-12-10 16:15:34 +00001/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/kvm/coproc.c:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Authors: Rusty Russell <rusty@rustcorp.com.au>
8 * Christoffer Dall <c.dall@virtualopensystems.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
23#include <linux/mm.h>
24#include <linux/kvm_host.h>
25#include <linux/uaccess.h>
26#include <asm/kvm_arm.h>
27#include <asm/kvm_host.h>
28#include <asm/kvm_emulate.h>
29#include <asm/kvm_coproc.h>
30#include <asm/cacheflush.h>
31#include <asm/cputype.h>
32#include <trace/events/kvm.h>
33
34#include "sys_regs.h"
35
36/*
37 * All of this file is extremly similar to the ARM coproc.c, but the
38 * types are different. My gut feeling is that it should be pretty
39 * easy to merge, but that would be an ABI breakage -- again. VFP
40 * would also need to be abstracted.
Marc Zyngier62a89c42013-02-07 10:32:33 +000041 *
42 * For AArch32, we only take care of what is being trapped. Anything
43 * that has to do with init and userspace access has to go via the
44 * 64bit interface.
Marc Zyngier7c8c5e6a2012-12-10 16:15:34 +000045 */
46
47/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
48static u32 cache_levels;
49
50/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
51#define CSSELR_MAX 12
52
53/* Which cache CCSIDR represents depends on CSSELR value. */
54static u32 get_ccsidr(u32 csselr)
55{
56 u32 ccsidr;
57
58 /* Make sure noone else changes CSSELR during this! */
59 local_irq_disable();
60 /* Put value into CSSELR */
61 asm volatile("msr csselr_el1, %x0" : : "r" (csselr));
62 isb();
63 /* Read result out of CCSIDR */
64 asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr));
65 local_irq_enable();
66
67 return ccsidr;
68}
69
70static void do_dc_cisw(u32 val)
71{
72 asm volatile("dc cisw, %x0" : : "r" (val));
73 dsb();
74}
75
76static void do_dc_csw(u32 val)
77{
78 asm volatile("dc csw, %x0" : : "r" (val));
79 dsb();
80}
81
82/* See note at ARM ARM B1.14.4 */
83static bool access_dcsw(struct kvm_vcpu *vcpu,
84 const struct sys_reg_params *p,
85 const struct sys_reg_desc *r)
86{
87 unsigned long val;
88 int cpu;
89
90 if (!p->is_write)
91 return read_from_write_only(vcpu, p);
92
93 cpu = get_cpu();
94
95 cpumask_setall(&vcpu->arch.require_dcache_flush);
96 cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
97
98 /* If we were already preempted, take the long way around */
99 if (cpu != vcpu->arch.last_pcpu) {
100 flush_cache_all();
101 goto done;
102 }
103
104 val = *vcpu_reg(vcpu, p->Rt);
105
106 switch (p->CRm) {
107 case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
108 case 14: /* DCCISW */
109 do_dc_cisw(val);
110 break;
111
112 case 10: /* DCCSW */
113 do_dc_csw(val);
114 break;
115 }
116
117done:
118 put_cpu();
119
120 return true;
121}
122
123/*
124 * We could trap ID_DFR0 and tell the guest we don't support performance
125 * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
126 * NAKed, so it will read the PMCR anyway.
127 *
128 * Therefore we tell the guest we have 0 counters. Unfortunately, we
129 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
130 * all PM registers, which doesn't crash the guest kernel at least.
131 */
132static bool pm_fake(struct kvm_vcpu *vcpu,
133 const struct sys_reg_params *p,
134 const struct sys_reg_desc *r)
135{
136 if (p->is_write)
137 return ignore_write(vcpu, p);
138 else
139 return read_zero(vcpu, p);
140}
141
142static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
143{
144 u64 amair;
145
146 asm volatile("mrs %0, amair_el1\n" : "=r" (amair));
147 vcpu_sys_reg(vcpu, AMAIR_EL1) = amair;
148}
149
150static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
151{
152 /*
153 * Simply map the vcpu_id into the Aff0 field of the MPIDR.
154 */
155 vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff);
156}
157
158/*
159 * Architected system registers.
160 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
161 */
162static const struct sys_reg_desc sys_reg_descs[] = {
163 /* DC ISW */
164 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010),
165 access_dcsw },
166 /* DC CSW */
167 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010),
168 access_dcsw },
169 /* DC CISW */
170 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
171 access_dcsw },
172
Marc Zyngier62a89c42013-02-07 10:32:33 +0000173 /* TEECR32_EL1 */
174 { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
175 NULL, reset_val, TEECR32_EL1, 0 },
176 /* TEEHBR32_EL1 */
177 { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000),
178 NULL, reset_val, TEEHBR32_EL1, 0 },
179 /* DBGVCR32_EL2 */
180 { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
181 NULL, reset_val, DBGVCR32_EL2, 0 },
182
Marc Zyngier7c8c5e6a2012-12-10 16:15:34 +0000183 /* MPIDR_EL1 */
184 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101),
185 NULL, reset_mpidr, MPIDR_EL1 },
186 /* SCTLR_EL1 */
187 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
188 NULL, reset_val, SCTLR_EL1, 0x00C50078 },
189 /* CPACR_EL1 */
190 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
191 NULL, reset_val, CPACR_EL1, 0 },
192 /* TTBR0_EL1 */
193 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
194 NULL, reset_unknown, TTBR0_EL1 },
195 /* TTBR1_EL1 */
196 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
197 NULL, reset_unknown, TTBR1_EL1 },
198 /* TCR_EL1 */
199 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
200 NULL, reset_val, TCR_EL1, 0 },
201
202 /* AFSR0_EL1 */
203 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
204 NULL, reset_unknown, AFSR0_EL1 },
205 /* AFSR1_EL1 */
206 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
207 NULL, reset_unknown, AFSR1_EL1 },
208 /* ESR_EL1 */
209 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
210 NULL, reset_unknown, ESR_EL1 },
211 /* FAR_EL1 */
212 { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
213 NULL, reset_unknown, FAR_EL1 },
214
215 /* PMINTENSET_EL1 */
216 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
217 pm_fake },
218 /* PMINTENCLR_EL1 */
219 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
220 pm_fake },
221
222 /* MAIR_EL1 */
223 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
224 NULL, reset_unknown, MAIR_EL1 },
225 /* AMAIR_EL1 */
226 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
227 NULL, reset_amair_el1, AMAIR_EL1 },
228
229 /* VBAR_EL1 */
230 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
231 NULL, reset_val, VBAR_EL1, 0 },
232 /* CONTEXTIDR_EL1 */
233 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
234 NULL, reset_val, CONTEXTIDR_EL1, 0 },
235 /* TPIDR_EL1 */
236 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
237 NULL, reset_unknown, TPIDR_EL1 },
238
239 /* CNTKCTL_EL1 */
240 { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000),
241 NULL, reset_val, CNTKCTL_EL1, 0},
242
243 /* CSSELR_EL1 */
244 { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
245 NULL, reset_unknown, CSSELR_EL1 },
246
247 /* PMCR_EL0 */
248 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
249 pm_fake },
250 /* PMCNTENSET_EL0 */
251 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
252 pm_fake },
253 /* PMCNTENCLR_EL0 */
254 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
255 pm_fake },
256 /* PMOVSCLR_EL0 */
257 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
258 pm_fake },
259 /* PMSWINC_EL0 */
260 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
261 pm_fake },
262 /* PMSELR_EL0 */
263 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
264 pm_fake },
265 /* PMCEID0_EL0 */
266 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
267 pm_fake },
268 /* PMCEID1_EL0 */
269 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
270 pm_fake },
271 /* PMCCNTR_EL0 */
272 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
273 pm_fake },
274 /* PMXEVTYPER_EL0 */
275 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
276 pm_fake },
277 /* PMXEVCNTR_EL0 */
278 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
279 pm_fake },
280 /* PMUSERENR_EL0 */
281 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
282 pm_fake },
283 /* PMOVSSET_EL0 */
284 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
285 pm_fake },
286
287 /* TPIDR_EL0 */
288 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
289 NULL, reset_unknown, TPIDR_EL0 },
290 /* TPIDRRO_EL0 */
291 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
292 NULL, reset_unknown, TPIDRRO_EL0 },
Marc Zyngier62a89c42013-02-07 10:32:33 +0000293
294 /* DACR32_EL2 */
295 { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
296 NULL, reset_unknown, DACR32_EL2 },
297 /* IFSR32_EL2 */
298 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001),
299 NULL, reset_unknown, IFSR32_EL2 },
300 /* FPEXC32_EL2 */
301 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000),
302 NULL, reset_val, FPEXC32_EL2, 0x70 },
303};
304
305/* Trapped cp15 registers */
306static const struct sys_reg_desc cp15_regs[] = {
307 /*
308 * DC{C,I,CI}SW operations:
309 */
310 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
311 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
312 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
313 { Op1( 0), CRn( 9), CRm(12), Op2( 0), pm_fake },
314 { Op1( 0), CRn( 9), CRm(12), Op2( 1), pm_fake },
315 { Op1( 0), CRn( 9), CRm(12), Op2( 2), pm_fake },
316 { Op1( 0), CRn( 9), CRm(12), Op2( 3), pm_fake },
317 { Op1( 0), CRn( 9), CRm(12), Op2( 5), pm_fake },
318 { Op1( 0), CRn( 9), CRm(12), Op2( 6), pm_fake },
319 { Op1( 0), CRn( 9), CRm(12), Op2( 7), pm_fake },
320 { Op1( 0), CRn( 9), CRm(13), Op2( 0), pm_fake },
321 { Op1( 0), CRn( 9), CRm(13), Op2( 1), pm_fake },
322 { Op1( 0), CRn( 9), CRm(13), Op2( 2), pm_fake },
323 { Op1( 0), CRn( 9), CRm(14), Op2( 0), pm_fake },
324 { Op1( 0), CRn( 9), CRm(14), Op2( 1), pm_fake },
325 { Op1( 0), CRn( 9), CRm(14), Op2( 2), pm_fake },
Marc Zyngier7c8c5e6a2012-12-10 16:15:34 +0000326};
327
328/* Target specific emulation tables */
329static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
330
331void kvm_register_target_sys_reg_table(unsigned int target,
332 struct kvm_sys_reg_target_table *table)
333{
334 target_tables[target] = table;
335}
336
337/* Get specific register table for this target. */
Marc Zyngier62a89c42013-02-07 10:32:33 +0000338static const struct sys_reg_desc *get_target_table(unsigned target,
339 bool mode_is_64,
340 size_t *num)
Marc Zyngier7c8c5e6a2012-12-10 16:15:34 +0000341{
342 struct kvm_sys_reg_target_table *table;
343
344 table = target_tables[target];
Marc Zyngier62a89c42013-02-07 10:32:33 +0000345 if (mode_is_64) {
346 *num = table->table64.num;
347 return table->table64.table;
348 } else {
349 *num = table->table32.num;
350 return table->table32.table;
351 }
Marc Zyngier7c8c5e6a2012-12-10 16:15:34 +0000352}
353
354static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
355 const struct sys_reg_desc table[],
356 unsigned int num)
357{
358 unsigned int i;
359
360 for (i = 0; i < num; i++) {
361 const struct sys_reg_desc *r = &table[i];
362
363 if (params->Op0 != r->Op0)
364 continue;
365 if (params->Op1 != r->Op1)
366 continue;
367 if (params->CRn != r->CRn)
368 continue;
369 if (params->CRm != r->CRm)
370 continue;
371 if (params->Op2 != r->Op2)
372 continue;
373
374 return r;
375 }
376 return NULL;
377}
378
Marc Zyngier62a89c42013-02-07 10:32:33 +0000379int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
380{
381 kvm_inject_undefined(vcpu);
382 return 1;
383}
384
385int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
386{
387 kvm_inject_undefined(vcpu);
388 return 1;
389}
390
391static void emulate_cp15(struct kvm_vcpu *vcpu,
392 const struct sys_reg_params *params)
393{
394 size_t num;
395 const struct sys_reg_desc *table, *r;
396
397 table = get_target_table(vcpu->arch.target, false, &num);
398
399 /* Search target-specific then generic table. */
400 r = find_reg(params, table, num);
401 if (!r)
402 r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
403
404 if (likely(r)) {
405 /*
406 * Not having an accessor means that we have
407 * configured a trap that we don't know how to
408 * handle. This certainly qualifies as a gross bug
409 * that should be fixed right away.
410 */
411 BUG_ON(!r->access);
412
413 if (likely(r->access(vcpu, params, r))) {
414 /* Skip instruction, since it was emulated */
415 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
416 return;
417 }
418 /* If access function fails, it should complain. */
419 }
420
421 kvm_err("Unsupported guest CP15 access at: %08lx\n", *vcpu_pc(vcpu));
422 print_sys_reg_instr(params);
423 kvm_inject_undefined(vcpu);
424}
425
426/**
427 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
428 * @vcpu: The VCPU pointer
429 * @run: The kvm_run struct
430 */
431int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
432{
433 struct sys_reg_params params;
434 u32 hsr = kvm_vcpu_get_hsr(vcpu);
435 int Rt2 = (hsr >> 10) & 0xf;
436
437 params.CRm = (hsr >> 1) & 0xf;
438 params.Rt = (hsr >> 5) & 0xf;
439 params.is_write = ((hsr & 1) == 0);
440
441 params.Op0 = 0;
442 params.Op1 = (hsr >> 16) & 0xf;
443 params.Op2 = 0;
444 params.CRn = 0;
445
446 /*
447 * Massive hack here. Store Rt2 in the top 32bits so we only
448 * have one register to deal with. As we use the same trap
449 * backends between AArch32 and AArch64, we get away with it.
450 */
451 if (params.is_write) {
452 u64 val = *vcpu_reg(vcpu, params.Rt);
453 val &= 0xffffffff;
454 val |= *vcpu_reg(vcpu, Rt2) << 32;
455 *vcpu_reg(vcpu, params.Rt) = val;
456 }
457
458 emulate_cp15(vcpu, &params);
459
460 /* Do the opposite hack for the read side */
461 if (!params.is_write) {
462 u64 val = *vcpu_reg(vcpu, params.Rt);
463 val >>= 32;
464 *vcpu_reg(vcpu, Rt2) = val;
465 }
466
467 return 1;
468}
469
470/**
471 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
472 * @vcpu: The VCPU pointer
473 * @run: The kvm_run struct
474 */
475int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
476{
477 struct sys_reg_params params;
478 u32 hsr = kvm_vcpu_get_hsr(vcpu);
479
480 params.CRm = (hsr >> 1) & 0xf;
481 params.Rt = (hsr >> 5) & 0xf;
482 params.is_write = ((hsr & 1) == 0);
483 params.CRn = (hsr >> 10) & 0xf;
484 params.Op0 = 0;
485 params.Op1 = (hsr >> 14) & 0x7;
486 params.Op2 = (hsr >> 17) & 0x7;
487
488 emulate_cp15(vcpu, &params);
489 return 1;
490}
491
Marc Zyngier7c8c5e6a2012-12-10 16:15:34 +0000492static int emulate_sys_reg(struct kvm_vcpu *vcpu,
493 const struct sys_reg_params *params)
494{
495 size_t num;
496 const struct sys_reg_desc *table, *r;
497
Marc Zyngier62a89c42013-02-07 10:32:33 +0000498 table = get_target_table(vcpu->arch.target, true, &num);
Marc Zyngier7c8c5e6a2012-12-10 16:15:34 +0000499
500 /* Search target-specific then generic table. */
501 r = find_reg(params, table, num);
502 if (!r)
503 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
504
505 if (likely(r)) {
506 /*
507 * Not having an accessor means that we have
508 * configured a trap that we don't know how to
509 * handle. This certainly qualifies as a gross bug
510 * that should be fixed right away.
511 */
512 BUG_ON(!r->access);
513
514 if (likely(r->access(vcpu, params, r))) {
515 /* Skip instruction, since it was emulated */
516 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
517 return 1;
518 }
519 /* If access function fails, it should complain. */
520 } else {
521 kvm_err("Unsupported guest sys_reg access at: %lx\n",
522 *vcpu_pc(vcpu));
523 print_sys_reg_instr(params);
524 }
525 kvm_inject_undefined(vcpu);
526 return 1;
527}
528
529static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
530 const struct sys_reg_desc *table, size_t num)
531{
532 unsigned long i;
533
534 for (i = 0; i < num; i++)
535 if (table[i].reset)
536 table[i].reset(vcpu, &table[i]);
537}
538
539/**
540 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
541 * @vcpu: The VCPU pointer
542 * @run: The kvm_run struct
543 */
544int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
545{
546 struct sys_reg_params params;
547 unsigned long esr = kvm_vcpu_get_hsr(vcpu);
548
549 params.Op0 = (esr >> 20) & 3;
550 params.Op1 = (esr >> 14) & 0x7;
551 params.CRn = (esr >> 10) & 0xf;
552 params.CRm = (esr >> 1) & 0xf;
553 params.Op2 = (esr >> 17) & 0x7;
554 params.Rt = (esr >> 5) & 0x1f;
555 params.is_write = !(esr & 1);
556
557 return emulate_sys_reg(vcpu, &params);
558}
559
560/******************************************************************************
561 * Userspace API
562 *****************************************************************************/
563
564static bool index_to_params(u64 id, struct sys_reg_params *params)
565{
566 switch (id & KVM_REG_SIZE_MASK) {
567 case KVM_REG_SIZE_U64:
568 /* Any unused index bits means it's not valid. */
569 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
570 | KVM_REG_ARM_COPROC_MASK
571 | KVM_REG_ARM64_SYSREG_OP0_MASK
572 | KVM_REG_ARM64_SYSREG_OP1_MASK
573 | KVM_REG_ARM64_SYSREG_CRN_MASK
574 | KVM_REG_ARM64_SYSREG_CRM_MASK
575 | KVM_REG_ARM64_SYSREG_OP2_MASK))
576 return false;
577 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
578 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
579 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
580 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
581 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
582 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
583 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
584 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
585 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
586 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
587 return true;
588 default:
589 return false;
590 }
591}
592
593/* Decode an index value, and find the sys_reg_desc entry. */
594static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
595 u64 id)
596{
597 size_t num;
598 const struct sys_reg_desc *table, *r;
599 struct sys_reg_params params;
600
601 /* We only do sys_reg for now. */
602 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
603 return NULL;
604
605 if (!index_to_params(id, &params))
606 return NULL;
607
Marc Zyngier62a89c42013-02-07 10:32:33 +0000608 table = get_target_table(vcpu->arch.target, true, &num);
Marc Zyngier7c8c5e6a2012-12-10 16:15:34 +0000609 r = find_reg(&params, table, num);
610 if (!r)
611 r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
612
613 /* Not saved in the sys_reg array? */
614 if (r && !r->reg)
615 r = NULL;
616
617 return r;
618}
619
620/*
621 * These are the invariant sys_reg registers: we let the guest see the
622 * host versions of these, so they're part of the guest state.
623 *
624 * A future CPU may provide a mechanism to present different values to
625 * the guest, or a future kvm may trap them.
626 */
627
628#define FUNCTION_INVARIANT(reg) \
629 static void get_##reg(struct kvm_vcpu *v, \
630 const struct sys_reg_desc *r) \
631 { \
632 u64 val; \
633 \
634 asm volatile("mrs %0, " __stringify(reg) "\n" \
635 : "=r" (val)); \
636 ((struct sys_reg_desc *)r)->val = val; \
637 }
638
639FUNCTION_INVARIANT(midr_el1)
640FUNCTION_INVARIANT(ctr_el0)
641FUNCTION_INVARIANT(revidr_el1)
642FUNCTION_INVARIANT(id_pfr0_el1)
643FUNCTION_INVARIANT(id_pfr1_el1)
644FUNCTION_INVARIANT(id_dfr0_el1)
645FUNCTION_INVARIANT(id_afr0_el1)
646FUNCTION_INVARIANT(id_mmfr0_el1)
647FUNCTION_INVARIANT(id_mmfr1_el1)
648FUNCTION_INVARIANT(id_mmfr2_el1)
649FUNCTION_INVARIANT(id_mmfr3_el1)
650FUNCTION_INVARIANT(id_isar0_el1)
651FUNCTION_INVARIANT(id_isar1_el1)
652FUNCTION_INVARIANT(id_isar2_el1)
653FUNCTION_INVARIANT(id_isar3_el1)
654FUNCTION_INVARIANT(id_isar4_el1)
655FUNCTION_INVARIANT(id_isar5_el1)
656FUNCTION_INVARIANT(clidr_el1)
657FUNCTION_INVARIANT(aidr_el1)
658
659/* ->val is filled in by kvm_sys_reg_table_init() */
660static struct sys_reg_desc invariant_sys_regs[] = {
661 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000),
662 NULL, get_midr_el1 },
663 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110),
664 NULL, get_revidr_el1 },
665 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000),
666 NULL, get_id_pfr0_el1 },
667 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001),
668 NULL, get_id_pfr1_el1 },
669 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010),
670 NULL, get_id_dfr0_el1 },
671 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011),
672 NULL, get_id_afr0_el1 },
673 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100),
674 NULL, get_id_mmfr0_el1 },
675 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101),
676 NULL, get_id_mmfr1_el1 },
677 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110),
678 NULL, get_id_mmfr2_el1 },
679 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111),
680 NULL, get_id_mmfr3_el1 },
681 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
682 NULL, get_id_isar0_el1 },
683 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001),
684 NULL, get_id_isar1_el1 },
685 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
686 NULL, get_id_isar2_el1 },
687 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011),
688 NULL, get_id_isar3_el1 },
689 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100),
690 NULL, get_id_isar4_el1 },
691 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101),
692 NULL, get_id_isar5_el1 },
693 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001),
694 NULL, get_clidr_el1 },
695 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111),
696 NULL, get_aidr_el1 },
697 { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001),
698 NULL, get_ctr_el0 },
699};
700
701static int reg_from_user(void *val, const void __user *uaddr, u64 id)
702{
703 /* This Just Works because we are little endian. */
704 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
705 return -EFAULT;
706 return 0;
707}
708
709static int reg_to_user(void __user *uaddr, const void *val, u64 id)
710{
711 /* This Just Works because we are little endian. */
712 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
713 return -EFAULT;
714 return 0;
715}
716
717static int get_invariant_sys_reg(u64 id, void __user *uaddr)
718{
719 struct sys_reg_params params;
720 const struct sys_reg_desc *r;
721
722 if (!index_to_params(id, &params))
723 return -ENOENT;
724
725 r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
726 if (!r)
727 return -ENOENT;
728
729 return reg_to_user(uaddr, &r->val, id);
730}
731
732static int set_invariant_sys_reg(u64 id, void __user *uaddr)
733{
734 struct sys_reg_params params;
735 const struct sys_reg_desc *r;
736 int err;
737 u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
738
739 if (!index_to_params(id, &params))
740 return -ENOENT;
741 r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
742 if (!r)
743 return -ENOENT;
744
745 err = reg_from_user(&val, uaddr, id);
746 if (err)
747 return err;
748
749 /* This is what we mean by invariant: you can't change it. */
750 if (r->val != val)
751 return -EINVAL;
752
753 return 0;
754}
755
756static bool is_valid_cache(u32 val)
757{
758 u32 level, ctype;
759
760 if (val >= CSSELR_MAX)
761 return -ENOENT;
762
763 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
764 level = (val >> 1);
765 ctype = (cache_levels >> (level * 3)) & 7;
766
767 switch (ctype) {
768 case 0: /* No cache */
769 return false;
770 case 1: /* Instruction cache only */
771 return (val & 1);
772 case 2: /* Data cache only */
773 case 4: /* Unified cache */
774 return !(val & 1);
775 case 3: /* Separate instruction and data caches */
776 return true;
777 default: /* Reserved: we can't know instruction or data. */
778 return false;
779 }
780}
781
782static int demux_c15_get(u64 id, void __user *uaddr)
783{
784 u32 val;
785 u32 __user *uval = uaddr;
786
787 /* Fail if we have unknown bits set. */
788 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
789 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
790 return -ENOENT;
791
792 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
793 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
794 if (KVM_REG_SIZE(id) != 4)
795 return -ENOENT;
796 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
797 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
798 if (!is_valid_cache(val))
799 return -ENOENT;
800
801 return put_user(get_ccsidr(val), uval);
802 default:
803 return -ENOENT;
804 }
805}
806
807static int demux_c15_set(u64 id, void __user *uaddr)
808{
809 u32 val, newval;
810 u32 __user *uval = uaddr;
811
812 /* Fail if we have unknown bits set. */
813 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
814 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
815 return -ENOENT;
816
817 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
818 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
819 if (KVM_REG_SIZE(id) != 4)
820 return -ENOENT;
821 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
822 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
823 if (!is_valid_cache(val))
824 return -ENOENT;
825
826 if (get_user(newval, uval))
827 return -EFAULT;
828
829 /* This is also invariant: you can't change it. */
830 if (newval != get_ccsidr(val))
831 return -EINVAL;
832 return 0;
833 default:
834 return -ENOENT;
835 }
836}
837
838int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
839{
840 const struct sys_reg_desc *r;
841 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
842
843 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
844 return demux_c15_get(reg->id, uaddr);
845
846 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
847 return -ENOENT;
848
849 r = index_to_sys_reg_desc(vcpu, reg->id);
850 if (!r)
851 return get_invariant_sys_reg(reg->id, uaddr);
852
853 return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
854}
855
856int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
857{
858 const struct sys_reg_desc *r;
859 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
860
861 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
862 return demux_c15_set(reg->id, uaddr);
863
864 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
865 return -ENOENT;
866
867 r = index_to_sys_reg_desc(vcpu, reg->id);
868 if (!r)
869 return set_invariant_sys_reg(reg->id, uaddr);
870
871 return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
872}
873
874static unsigned int num_demux_regs(void)
875{
876 unsigned int i, count = 0;
877
878 for (i = 0; i < CSSELR_MAX; i++)
879 if (is_valid_cache(i))
880 count++;
881
882 return count;
883}
884
885static int write_demux_regids(u64 __user *uindices)
886{
887 u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
888 unsigned int i;
889
890 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
891 for (i = 0; i < CSSELR_MAX; i++) {
892 if (!is_valid_cache(i))
893 continue;
894 if (put_user(val | i, uindices))
895 return -EFAULT;
896 uindices++;
897 }
898 return 0;
899}
900
901static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
902{
903 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
904 KVM_REG_ARM64_SYSREG |
905 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
906 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
907 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
908 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
909 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
910}
911
912static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
913{
914 if (!*uind)
915 return true;
916
917 if (put_user(sys_reg_to_index(reg), *uind))
918 return false;
919
920 (*uind)++;
921 return true;
922}
923
924/* Assumed ordered tables, see kvm_sys_reg_table_init. */
925static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
926{
927 const struct sys_reg_desc *i1, *i2, *end1, *end2;
928 unsigned int total = 0;
929 size_t num;
930
931 /* We check for duplicates here, to allow arch-specific overrides. */
Marc Zyngier62a89c42013-02-07 10:32:33 +0000932 i1 = get_target_table(vcpu->arch.target, true, &num);
Marc Zyngier7c8c5e6a2012-12-10 16:15:34 +0000933 end1 = i1 + num;
934 i2 = sys_reg_descs;
935 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
936
937 BUG_ON(i1 == end1 || i2 == end2);
938
939 /* Walk carefully, as both tables may refer to the same register. */
940 while (i1 || i2) {
941 int cmp = cmp_sys_reg(i1, i2);
942 /* target-specific overrides generic entry. */
943 if (cmp <= 0) {
944 /* Ignore registers we trap but don't save. */
945 if (i1->reg) {
946 if (!copy_reg_to_user(i1, &uind))
947 return -EFAULT;
948 total++;
949 }
950 } else {
951 /* Ignore registers we trap but don't save. */
952 if (i2->reg) {
953 if (!copy_reg_to_user(i2, &uind))
954 return -EFAULT;
955 total++;
956 }
957 }
958
959 if (cmp <= 0 && ++i1 == end1)
960 i1 = NULL;
961 if (cmp >= 0 && ++i2 == end2)
962 i2 = NULL;
963 }
964 return total;
965}
966
967unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
968{
969 return ARRAY_SIZE(invariant_sys_regs)
970 + num_demux_regs()
971 + walk_sys_regs(vcpu, (u64 __user *)NULL);
972}
973
974int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
975{
976 unsigned int i;
977 int err;
978
979 /* Then give them all the invariant registers' indices. */
980 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
981 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
982 return -EFAULT;
983 uindices++;
984 }
985
986 err = walk_sys_regs(vcpu, uindices);
987 if (err < 0)
988 return err;
989 uindices += err;
990
991 return write_demux_regids(uindices);
992}
993
994void kvm_sys_reg_table_init(void)
995{
996 unsigned int i;
997 struct sys_reg_desc clidr;
998
999 /* Make sure tables are unique and in order. */
1000 for (i = 1; i < ARRAY_SIZE(sys_reg_descs); i++)
1001 BUG_ON(cmp_sys_reg(&sys_reg_descs[i-1], &sys_reg_descs[i]) >= 0);
1002
1003 /* We abuse the reset function to overwrite the table itself. */
1004 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
1005 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
1006
1007 /*
1008 * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
1009 *
1010 * If software reads the Cache Type fields from Ctype1
1011 * upwards, once it has seen a value of 0b000, no caches
1012 * exist at further-out levels of the hierarchy. So, for
1013 * example, if Ctype3 is the first Cache Type field with a
1014 * value of 0b000, the values of Ctype4 to Ctype7 must be
1015 * ignored.
1016 */
1017 get_clidr_el1(NULL, &clidr); /* Ugly... */
1018 cache_levels = clidr.val;
1019 for (i = 0; i < 7; i++)
1020 if (((cache_levels >> (i*3)) & 7) == 0)
1021 break;
1022 /* Clear all higher bits. */
1023 cache_levels &= (1 << (i*3))-1;
1024}
1025
1026/**
1027 * kvm_reset_sys_regs - sets system registers to reset value
1028 * @vcpu: The VCPU pointer
1029 *
1030 * This function finds the right table above and sets the registers on the
1031 * virtual CPU struct to their architecturally defined reset values.
1032 */
1033void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
1034{
1035 size_t num;
1036 const struct sys_reg_desc *table;
1037
1038 /* Catch someone adding a register without putting in reset entry. */
1039 memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
1040
1041 /* Generic chip reset first (so target could override). */
1042 reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
1043
Marc Zyngier62a89c42013-02-07 10:32:33 +00001044 table = get_target_table(vcpu->arch.target, true, &num);
Marc Zyngier7c8c5e6a2012-12-10 16:15:34 +00001045 reset_sys_reg_descs(vcpu, table, num);
1046
1047 for (num = 1; num < NR_SYS_REGS; num++)
1048 if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
1049 panic("Didn't reset vcpu_sys_reg(%zi)", num);
1050}