blob: c5b254c4d0da89102a5c26964d9d567afe80c093 [file] [log] [blame]
James Hogan90e93112016-06-23 17:34:39 +01001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Generation of main entry point for the guest, exception handling.
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 *
11 * Copyright (C) 2016 Imagination Technologies Ltd.
12 */
13
14#include <linux/kvm_host.h>
James Hoganc550d532016-10-11 23:14:39 +010015#include <linux/log2.h>
James Hogan7faa6ee2016-10-07 23:58:53 +010016#include <asm/mmu_context.h>
James Hogan90e93112016-06-23 17:34:39 +010017#include <asm/msa.h>
18#include <asm/setup.h>
James Hogana7cfa7a2016-09-10 23:56:46 +010019#include <asm/tlbex.h>
James Hogan90e93112016-06-23 17:34:39 +010020#include <asm/uasm.h>
21
22/* Register names */
23#define ZERO 0
24#define AT 1
25#define V0 2
26#define V1 3
27#define A0 4
28#define A1 5
29
30#if _MIPS_SIM == _MIPS_SIM_ABI32
31#define T0 8
32#define T1 9
33#define T2 10
34#define T3 11
35#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
36
37#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
38#define T0 12
39#define T1 13
40#define T2 14
41#define T3 15
42#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
43
44#define S0 16
45#define S1 17
46#define T9 25
47#define K0 26
48#define K1 27
49#define GP 28
50#define SP 29
51#define RA 31
52
53/* Some CP0 registers */
54#define C0_HWRENA 7, 0
55#define C0_BADVADDR 8, 0
James Hogan6a97c772015-04-23 16:54:35 +010056#define C0_BADINSTR 8, 1
57#define C0_BADINSTRP 8, 2
James Hogan90e93112016-06-23 17:34:39 +010058#define C0_ENTRYHI 10, 0
59#define C0_STATUS 12, 0
60#define C0_CAUSE 13, 0
61#define C0_EPC 14, 0
62#define C0_EBASE 15, 1
James Hogan90e93112016-06-23 17:34:39 +010063#define C0_CONFIG5 16, 5
64#define C0_DDATA_LO 28, 3
65#define C0_ERROREPC 30, 0
66
67#define CALLFRAME_SIZ 32
68
James Hogan1d756942016-07-08 11:53:24 +010069#ifdef CONFIG_64BIT
70#define ST0_KX_IF_64 ST0_KX
71#else
72#define ST0_KX_IF_64 0
73#endif
74
James Hogan1e5217f52016-06-23 17:34:45 +010075static unsigned int scratch_vcpu[2] = { C0_DDATA_LO };
76static unsigned int scratch_tmp[2] = { C0_ERROREPC };
77
James Hogan90e93112016-06-23 17:34:39 +010078enum label_id {
79 label_fpu_1 = 1,
80 label_msa_1,
81 label_return_to_host,
82 label_kernel_asid,
James Hogan1f9ca622016-06-23 17:34:46 +010083 label_exit_common,
James Hogan90e93112016-06-23 17:34:39 +010084};
85
86UASM_L_LA(_fpu_1)
87UASM_L_LA(_msa_1)
88UASM_L_LA(_return_to_host)
89UASM_L_LA(_kernel_asid)
James Hogan1f9ca622016-06-23 17:34:46 +010090UASM_L_LA(_exit_common)
James Hogan90e93112016-06-23 17:34:39 +010091
92static void *kvm_mips_build_enter_guest(void *addr);
93static void *kvm_mips_build_ret_from_exit(void *addr);
94static void *kvm_mips_build_ret_to_guest(void *addr);
95static void *kvm_mips_build_ret_to_host(void *addr);
96
James Hogan29b500b2016-11-11 14:08:32 +000097/*
98 * The version of this function in tlbex.c uses current_cpu_type(), but for KVM
99 * we assume symmetry.
100 */
101static int c0_kscratch(void)
102{
103 switch (boot_cpu_type()) {
104 case CPU_XLP:
105 case CPU_XLR:
106 return 22;
107 default:
108 return 31;
109 }
110}
111
James Hogan90e93112016-06-23 17:34:39 +0100112/**
James Hogan1e5217f52016-06-23 17:34:45 +0100113 * kvm_mips_entry_setup() - Perform global setup for entry code.
114 *
115 * Perform global setup for entry code, such as choosing a scratch register.
116 *
117 * Returns: 0 on success.
118 * -errno on failure.
119 */
120int kvm_mips_entry_setup(void)
121{
122 /*
123 * We prefer to use KScratchN registers if they are available over the
124 * defaults above, which may not work on all cores.
125 */
James Hogan29b500b2016-11-11 14:08:32 +0000126 unsigned int kscratch_mask = cpu_data[0].kscratch_mask;
James Hogan1e5217f52016-06-23 17:34:45 +0100127
James Hogana7cfa7a2016-09-10 23:56:46 +0100128 if (pgd_reg != -1)
129 kscratch_mask &= ~BIT(pgd_reg);
130
James Hogan1e5217f52016-06-23 17:34:45 +0100131 /* Pick a scratch register for storing VCPU */
132 if (kscratch_mask) {
James Hogan29b500b2016-11-11 14:08:32 +0000133 scratch_vcpu[0] = c0_kscratch();
James Hogan1e5217f52016-06-23 17:34:45 +0100134 scratch_vcpu[1] = ffs(kscratch_mask) - 1;
135 kscratch_mask &= ~BIT(scratch_vcpu[1]);
136 }
137
138 /* Pick a scratch register to use as a temp for saving state */
139 if (kscratch_mask) {
James Hogan29b500b2016-11-11 14:08:32 +0000140 scratch_tmp[0] = c0_kscratch();
James Hogan1e5217f52016-06-23 17:34:45 +0100141 scratch_tmp[1] = ffs(kscratch_mask) - 1;
142 kscratch_mask &= ~BIT(scratch_tmp[1]);
143 }
144
145 return 0;
146}
147
148static void kvm_mips_build_save_scratch(u32 **p, unsigned int tmp,
149 unsigned int frame)
150{
151 /* Save the VCPU scratch register value in cp0_epc of the stack frame */
James Hogane41637d2016-07-08 11:53:23 +0100152 UASM_i_MFC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
James Hogan1e5217f52016-06-23 17:34:45 +0100153 UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
154
155 /* Save the temp scratch register value in cp0_cause of stack frame */
James Hogan29b500b2016-11-11 14:08:32 +0000156 if (scratch_tmp[0] == c0_kscratch()) {
James Hogane41637d2016-07-08 11:53:23 +0100157 UASM_i_MFC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
James Hogan1e5217f52016-06-23 17:34:45 +0100158 UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
159 }
160}
161
162static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp,
163 unsigned int frame)
164{
165 /*
166 * Restore host scratch register values saved by
167 * kvm_mips_build_save_scratch().
168 */
169 UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
James Hogane41637d2016-07-08 11:53:23 +0100170 UASM_i_MTC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
James Hogan1e5217f52016-06-23 17:34:45 +0100171
James Hogan29b500b2016-11-11 14:08:32 +0000172 if (scratch_tmp[0] == c0_kscratch()) {
James Hogan1e5217f52016-06-23 17:34:45 +0100173 UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
James Hogane41637d2016-07-08 11:53:23 +0100174 UASM_i_MTC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
James Hogan1e5217f52016-06-23 17:34:45 +0100175 }
176}
177
178/**
James Hogan0d17aea2016-07-08 11:53:25 +0100179 * build_set_exc_base() - Assemble code to write exception base address.
180 * @p: Code buffer pointer.
181 * @reg: Source register (generated code may set WG bit in @reg).
182 *
183 * Assemble code to modify the exception base address in the EBase register,
184 * using the appropriately sized access and setting the WG bit if necessary.
185 */
186static inline void build_set_exc_base(u32 **p, unsigned int reg)
187{
188 if (cpu_has_ebase_wg) {
189 /* Set WG so that all the bits get written */
190 uasm_i_ori(p, reg, reg, MIPS_EBASE_WG);
191 UASM_i_MTC0(p, reg, C0_EBASE);
192 } else {
193 uasm_i_mtc0(p, reg, C0_EBASE);
194 }
195}
196
197/**
James Hogan90e93112016-06-23 17:34:39 +0100198 * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU.
199 * @addr: Address to start writing code.
200 *
201 * Assemble the start of the vcpu_run function to run a guest VCPU. The function
202 * conforms to the following prototype:
203 *
204 * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
205 *
206 * The exit from the guest and return to the caller is handled by the code
207 * generated by kvm_mips_build_ret_to_host().
208 *
209 * Returns: Next address after end of written function.
210 */
211void *kvm_mips_build_vcpu_run(void *addr)
212{
213 u32 *p = addr;
214 unsigned int i;
215
216 /*
217 * A0: run
218 * A1: vcpu
219 */
220
221 /* k0/k1 not being used in host kernel context */
James Hogane41637d2016-07-08 11:53:23 +0100222 UASM_i_ADDIU(&p, K1, SP, -(int)sizeof(struct pt_regs));
James Hogan90e93112016-06-23 17:34:39 +0100223 for (i = 16; i < 32; ++i) {
224 if (i == 24)
225 i = 28;
226 UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
227 }
228
James Hogan90e93112016-06-23 17:34:39 +0100229 /* Save host status */
230 uasm_i_mfc0(&p, V0, C0_STATUS);
231 UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1);
232
James Hogan1e5217f52016-06-23 17:34:45 +0100233 /* Save scratch registers, will be used to store pointer to vcpu etc */
234 kvm_mips_build_save_scratch(&p, V1, K1);
James Hogan90e93112016-06-23 17:34:39 +0100235
James Hogan1e5217f52016-06-23 17:34:45 +0100236 /* VCPU scratch register has pointer to vcpu */
James Hogane41637d2016-07-08 11:53:23 +0100237 UASM_i_MTC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]);
James Hogan90e93112016-06-23 17:34:39 +0100238
239 /* Offset into vcpu->arch */
James Hogane41637d2016-07-08 11:53:23 +0100240 UASM_i_ADDIU(&p, K1, A1, offsetof(struct kvm_vcpu, arch));
James Hogan90e93112016-06-23 17:34:39 +0100241
242 /*
243 * Save the host stack to VCPU, used for exception processing
244 * when we exit from the Guest
245 */
246 UASM_i_SW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
247
248 /* Save the kernel gp as well */
249 UASM_i_SW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
250
251 /*
252 * Setup status register for running the guest in UM, interrupts
253 * are disabled
254 */
James Hogan1d756942016-07-08 11:53:24 +0100255 UASM_i_LA(&p, K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64);
James Hogan90e93112016-06-23 17:34:39 +0100256 uasm_i_mtc0(&p, K0, C0_STATUS);
257 uasm_i_ehb(&p);
258
259 /* load up the new EBASE */
260 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
James Hogan0d17aea2016-07-08 11:53:25 +0100261 build_set_exc_base(&p, K0);
James Hogan90e93112016-06-23 17:34:39 +0100262
263 /*
264 * Now that the new EBASE has been loaded, unset BEV, set
265 * interrupt mask as it was but make sure that timer interrupts
266 * are enabled
267 */
James Hogan1d756942016-07-08 11:53:24 +0100268 uasm_i_addiu(&p, K0, ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64);
James Hogan90e93112016-06-23 17:34:39 +0100269 uasm_i_andi(&p, V0, V0, ST0_IM);
270 uasm_i_or(&p, K0, K0, V0);
271 uasm_i_mtc0(&p, K0, C0_STATUS);
272 uasm_i_ehb(&p);
273
274 p = kvm_mips_build_enter_guest(p);
275
276 return p;
277}
278
279/**
280 * kvm_mips_build_enter_guest() - Assemble code to resume guest execution.
281 * @addr: Address to start writing code.
282 *
283 * Assemble the code to resume guest execution. This code is common between the
284 * initial entry into the guest from the host, and returning from the exit
285 * handler back to the guest.
286 *
287 * Returns: Next address after end of written function.
288 */
289static void *kvm_mips_build_enter_guest(void *addr)
290{
291 u32 *p = addr;
292 unsigned int i;
293 struct uasm_label labels[2];
294 struct uasm_reloc relocs[2];
295 struct uasm_label *l = labels;
296 struct uasm_reloc *r = relocs;
297
298 memset(labels, 0, sizeof(labels));
299 memset(relocs, 0, sizeof(relocs));
300
301 /* Set Guest EPC */
302 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
James Hogane41637d2016-07-08 11:53:23 +0100303 UASM_i_MTC0(&p, T0, C0_EPC);
James Hogan90e93112016-06-23 17:34:39 +0100304
305 /* Set the ASID for the Guest Kernel */
306 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1);
307 UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]),
308 T0);
309 uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL);
310 uasm_i_xori(&p, T0, T0, KSU_USER);
311 uasm_il_bnez(&p, &r, T0, label_kernel_asid);
James Hoganc550d532016-10-11 23:14:39 +0100312 UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch,
313 guest_kernel_mm.context.asid));
James Hogan90e93112016-06-23 17:34:39 +0100314 /* else user */
James Hoganc550d532016-10-11 23:14:39 +0100315 UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch,
316 guest_user_mm.context.asid));
James Hogan90e93112016-06-23 17:34:39 +0100317 uasm_l_kernel_asid(&l, p);
318
319 /* t1: contains the base of the ASID array, need to get the cpu id */
320 /* smp_processor_id */
James Hogane41637d2016-07-08 11:53:23 +0100321 uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP);
James Hoganc550d532016-10-11 23:14:39 +0100322 /* index the ASID array */
323 uasm_i_sll(&p, T2, T2, ilog2(sizeof(long)));
James Hogan90e93112016-06-23 17:34:39 +0100324 UASM_i_ADDU(&p, T3, T1, T2);
James Hoganc550d532016-10-11 23:14:39 +0100325 UASM_i_LW(&p, K0, 0, T3);
James Hogan90e93112016-06-23 17:34:39 +0100326#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
James Hoganc550d532016-10-11 23:14:39 +0100327 /*
328 * reuse ASID array offset
329 * cpuinfo_mips is a multiple of sizeof(long)
330 */
331 uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/sizeof(long));
James Hogan90e93112016-06-23 17:34:39 +0100332 uasm_i_mul(&p, T2, T2, T3);
333
334 UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask);
335 UASM_i_ADDU(&p, AT, AT, T2);
336 UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT);
337 uasm_i_and(&p, K0, K0, T2);
338#else
339 uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID);
340#endif
James Hogan7faa6ee2016-10-07 23:58:53 +0100341
342 /*
343 * Set up KVM T&E GVA pgd.
344 * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
345 * - call tlbmiss_handler_setup_pgd(mm->pgd)
346 * - but skips write into CP0_PWBase for now
347 */
348 UASM_i_LW(&p, A0, (int)offsetof(struct mm_struct, pgd) -
349 (int)offsetof(struct mm_struct, context.asid), T1);
350
351 UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
352 uasm_i_jalr(&p, RA, T9);
353 uasm_i_mtc0(&p, K0, C0_ENTRYHI);
354
James Hogan90e93112016-06-23 17:34:39 +0100355 uasm_i_ehb(&p);
356
357 /* Disable RDHWR access */
358 uasm_i_mtc0(&p, ZERO, C0_HWRENA);
359
360 /* load the guest context from VCPU and return */
361 for (i = 1; i < 32; ++i) {
362 /* Guest k0/k1 loaded later */
363 if (i == K0 || i == K1)
364 continue;
365 UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
366 }
367
James Hogan70e92c7e2016-07-04 19:35:11 +0100368#ifndef CONFIG_CPU_MIPSR6
James Hogan90e93112016-06-23 17:34:39 +0100369 /* Restore hi/lo */
370 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1);
371 uasm_i_mthi(&p, K0);
372
373 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1);
374 uasm_i_mtlo(&p, K0);
James Hogan70e92c7e2016-07-04 19:35:11 +0100375#endif
James Hogan90e93112016-06-23 17:34:39 +0100376
377 /* Restore the guest's k0/k1 registers */
378 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
379 UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
380
381 /* Jump to guest */
382 uasm_i_eret(&p);
383
384 uasm_resolve_relocs(relocs, labels);
385
386 return p;
387}
388
389/**
James Hogana7cfa7a2016-09-10 23:56:46 +0100390 * kvm_mips_build_tlb_refill_exception() - Assemble TLB refill handler.
391 * @addr: Address to start writing code.
392 * @handler: Address of common handler (within range of @addr).
393 *
394 * Assemble TLB refill exception fast path handler for guest execution.
395 *
396 * Returns: Next address after end of written function.
397 */
398void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler)
399{
400 u32 *p = addr;
401 struct uasm_label labels[2];
402 struct uasm_reloc relocs[2];
403 struct uasm_label *l = labels;
404 struct uasm_reloc *r = relocs;
405
406 memset(labels, 0, sizeof(labels));
407 memset(relocs, 0, sizeof(relocs));
408
409 /* Save guest k1 into scratch register */
410 UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
411
412 /* Get the VCPU pointer from the VCPU scratch register */
413 UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
414
415 /* Save guest k0 into VCPU structure */
416 UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1);
417
418 /*
419 * Some of the common tlbex code uses current_cpu_type(). For KVM we
420 * assume symmetry and just disable preemption to silence the warning.
421 */
422 preempt_disable();
423
424 /*
425 * Now for the actual refill bit. A lot of this can be common with the
426 * Linux TLB refill handler, however we don't need to handle so many
427 * cases. We only need to handle user mode refills, and user mode runs
428 * with 32-bit addressing.
429 *
430 * Therefore the branch to label_vmalloc generated by build_get_pmde64()
431 * that isn't resolved should never actually get taken and is harmless
432 * to leave in place for now.
433 */
434
435#ifdef CONFIG_64BIT
436 build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
437#else
438 build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
439#endif
440
441 /* we don't support huge pages yet */
442
443 build_get_ptep(&p, K0, K1);
444 build_update_entries(&p, K0, K1);
445 build_tlb_write_entry(&p, &l, &r, tlb_random);
446
447 preempt_enable();
448
449 /* Get the VCPU pointer from the VCPU scratch register again */
450 UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
451
452 /* Restore the guest's k0/k1 registers */
453 UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1);
454 uasm_i_ehb(&p);
455 UASM_i_MFC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
456
457 /* Jump to guest */
458 uasm_i_eret(&p);
459
460 return p;
461}
462
463/**
James Hogan90e93112016-06-23 17:34:39 +0100464 * kvm_mips_build_exception() - Assemble first level guest exception handler.
465 * @addr: Address to start writing code.
James Hogan1f9ca622016-06-23 17:34:46 +0100466 * @handler: Address of common handler (within range of @addr).
James Hogan90e93112016-06-23 17:34:39 +0100467 *
468 * Assemble exception vector code for guest execution. The generated vector will
James Hogan1f9ca622016-06-23 17:34:46 +0100469 * branch to the common exception handler generated by kvm_mips_build_exit().
James Hogan90e93112016-06-23 17:34:39 +0100470 *
471 * Returns: Next address after end of written function.
472 */
James Hogan1f9ca622016-06-23 17:34:46 +0100473void *kvm_mips_build_exception(void *addr, void *handler)
James Hogan90e93112016-06-23 17:34:39 +0100474{
475 u32 *p = addr;
James Hogan1f9ca622016-06-23 17:34:46 +0100476 struct uasm_label labels[2];
477 struct uasm_reloc relocs[2];
478 struct uasm_label *l = labels;
479 struct uasm_reloc *r = relocs;
480
481 memset(labels, 0, sizeof(labels));
482 memset(relocs, 0, sizeof(relocs));
James Hogan90e93112016-06-23 17:34:39 +0100483
James Hoganeadfb502016-06-23 17:34:47 +0100484 /* Save guest k1 into scratch register */
James Hogane41637d2016-07-08 11:53:23 +0100485 UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
James Hogan90e93112016-06-23 17:34:39 +0100486
James Hoganeadfb502016-06-23 17:34:47 +0100487 /* Get the VCPU pointer from the VCPU scratch register */
James Hogane41637d2016-07-08 11:53:23 +0100488 UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
489 UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
James Hoganeadfb502016-06-23 17:34:47 +0100490
491 /* Save guest k0 into VCPU structure */
492 UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
James Hogan90e93112016-06-23 17:34:39 +0100493
James Hogan1f9ca622016-06-23 17:34:46 +0100494 /* Branch to the common handler */
495 uasm_il_b(&p, &r, label_exit_common);
James Hogan90e93112016-06-23 17:34:39 +0100496 uasm_i_nop(&p);
497
James Hogan1f9ca622016-06-23 17:34:46 +0100498 uasm_l_exit_common(&l, handler);
499 uasm_resolve_relocs(relocs, labels);
500
James Hogan90e93112016-06-23 17:34:39 +0100501 return p;
502}
503
504/**
505 * kvm_mips_build_exit() - Assemble common guest exit handler.
506 * @addr: Address to start writing code.
507 *
508 * Assemble the generic guest exit handling code. This is called by the
509 * exception vectors (generated by kvm_mips_build_exception()), and calls
510 * kvm_mips_handle_exit(), then either resumes the guest or returns to the host
511 * depending on the return value.
512 *
513 * Returns: Next address after end of written function.
514 */
515void *kvm_mips_build_exit(void *addr)
516{
517 u32 *p = addr;
518 unsigned int i;
519 struct uasm_label labels[3];
520 struct uasm_reloc relocs[3];
521 struct uasm_label *l = labels;
522 struct uasm_reloc *r = relocs;
523
524 memset(labels, 0, sizeof(labels));
525 memset(relocs, 0, sizeof(relocs));
526
527 /*
528 * Generic Guest exception handler. We end up here when the guest
529 * does something that causes a trap to kernel mode.
James Hoganeadfb502016-06-23 17:34:47 +0100530 *
531 * Both k0/k1 registers will have already been saved (k0 into the vcpu
532 * structure, and k1 into the scratch_tmp register).
533 *
534 * The k1 register will already contain the kvm_vcpu_arch pointer.
James Hogan90e93112016-06-23 17:34:39 +0100535 */
536
James Hogan90e93112016-06-23 17:34:39 +0100537 /* Start saving Guest context to VCPU */
538 for (i = 0; i < 32; ++i) {
539 /* Guest k0/k1 saved later */
540 if (i == K0 || i == K1)
541 continue;
542 UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
543 }
544
James Hogan70e92c7e2016-07-04 19:35:11 +0100545#ifndef CONFIG_CPU_MIPSR6
James Hogan90e93112016-06-23 17:34:39 +0100546 /* We need to save hi/lo and restore them on the way out */
547 uasm_i_mfhi(&p, T0);
548 UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1);
549
550 uasm_i_mflo(&p, T0);
551 UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1);
James Hogan70e92c7e2016-07-04 19:35:11 +0100552#endif
James Hogan90e93112016-06-23 17:34:39 +0100553
James Hoganeadfb502016-06-23 17:34:47 +0100554 /* Finally save guest k1 to VCPU */
555 uasm_i_ehb(&p);
James Hogane41637d2016-07-08 11:53:23 +0100556 UASM_i_MFC0(&p, T0, scratch_tmp[0], scratch_tmp[1]);
James Hogan90e93112016-06-23 17:34:39 +0100557 UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
558
559 /* Now that context has been saved, we can use other registers */
560
561 /* Restore vcpu */
James Hogane41637d2016-07-08 11:53:23 +0100562 UASM_i_MFC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]);
James Hogan90e93112016-06-23 17:34:39 +0100563 uasm_i_move(&p, S1, A1);
564
565 /* Restore run (vcpu->run) */
566 UASM_i_LW(&p, A0, offsetof(struct kvm_vcpu, run), A1);
567 /* Save pointer to run in s0, will be saved by the compiler */
568 uasm_i_move(&p, S0, A0);
569
570 /*
571 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
572 * the exception
573 */
James Hogane41637d2016-07-08 11:53:23 +0100574 UASM_i_MFC0(&p, K0, C0_EPC);
James Hogan90e93112016-06-23 17:34:39 +0100575 UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1);
576
James Hogane41637d2016-07-08 11:53:23 +0100577 UASM_i_MFC0(&p, K0, C0_BADVADDR);
James Hogan90e93112016-06-23 17:34:39 +0100578 UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr),
579 K1);
580
581 uasm_i_mfc0(&p, K0, C0_CAUSE);
582 uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1);
583
James Hogan6a97c772015-04-23 16:54:35 +0100584 if (cpu_has_badinstr) {
585 uasm_i_mfc0(&p, K0, C0_BADINSTR);
586 uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch,
587 host_cp0_badinstr), K1);
588 }
589
590 if (cpu_has_badinstrp) {
591 uasm_i_mfc0(&p, K0, C0_BADINSTRP);
592 uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch,
593 host_cp0_badinstrp), K1);
594 }
595
James Hogan90e93112016-06-23 17:34:39 +0100596 /* Now restore the host state just enough to run the handlers */
597
598 /* Switch EBASE to the one used by Linux */
599 /* load up the host EBASE */
600 uasm_i_mfc0(&p, V0, C0_STATUS);
601
602 uasm_i_lui(&p, AT, ST0_BEV >> 16);
603 uasm_i_or(&p, K0, V0, AT);
604
605 uasm_i_mtc0(&p, K0, C0_STATUS);
606 uasm_i_ehb(&p);
607
608 UASM_i_LA_mostly(&p, K0, (long)&ebase);
609 UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0);
James Hogan0d17aea2016-07-08 11:53:25 +0100610 build_set_exc_base(&p, K0);
James Hogan90e93112016-06-23 17:34:39 +0100611
James Hogand37f4032016-06-23 17:34:42 +0100612 if (raw_cpu_has_fpu) {
613 /*
614 * If FPU is enabled, save FCR31 and clear it so that later
615 * ctc1's don't trigger FPE for pending exceptions.
616 */
617 uasm_i_lui(&p, AT, ST0_CU1 >> 16);
618 uasm_i_and(&p, V1, V0, AT);
619 uasm_il_beqz(&p, &r, V1, label_fpu_1);
620 uasm_i_nop(&p);
621 uasm_i_cfc1(&p, T0, 31);
622 uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31),
623 K1);
624 uasm_i_ctc1(&p, ZERO, 31);
625 uasm_l_fpu_1(&l, p);
626 }
James Hogan90e93112016-06-23 17:34:39 +0100627
James Hogan38ea7a72016-06-23 17:34:43 +0100628 if (cpu_has_msa) {
629 /*
630 * If MSA is enabled, save MSACSR and clear it so that later
631 * instructions don't trigger MSAFPE for pending exceptions.
632 */
633 uasm_i_mfc0(&p, T0, C0_CONFIG5);
634 uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */
635 uasm_il_beqz(&p, &r, T0, label_msa_1);
636 uasm_i_nop(&p);
637 uasm_i_cfcmsa(&p, T0, MSA_CSR);
638 uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr),
639 K1);
640 uasm_i_ctcmsa(&p, MSA_CSR, ZERO);
641 uasm_l_msa_1(&l, p);
642 }
James Hogan90e93112016-06-23 17:34:39 +0100643
644 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
645 uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
646 uasm_i_and(&p, V0, V0, AT);
647 uasm_i_lui(&p, AT, ST0_CU0 >> 16);
648 uasm_i_or(&p, V0, V0, AT);
James Hogan4c881452017-01-03 17:43:00 +0000649#ifdef CONFIG_64BIT
650 uasm_i_ori(&p, V0, V0, ST0_SX | ST0_UX);
651#endif
James Hogan90e93112016-06-23 17:34:39 +0100652 uasm_i_mtc0(&p, V0, C0_STATUS);
653 uasm_i_ehb(&p);
654
655 /* Load up host GP */
656 UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
657
658 /* Need a stack before we can jump to "C" */
659 UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
660
661 /* Saved host state */
James Hogane41637d2016-07-08 11:53:23 +0100662 UASM_i_ADDIU(&p, SP, SP, -(int)sizeof(struct pt_regs));
James Hogan90e93112016-06-23 17:34:39 +0100663
664 /*
665 * XXXKYMA do we need to load the host ASID, maybe not because the
666 * kernel entries are marked GLOBAL, need to verify
667 */
668
James Hogan1e5217f52016-06-23 17:34:45 +0100669 /* Restore host scratch registers, as we'll have clobbered them */
670 kvm_mips_build_restore_scratch(&p, K0, SP);
James Hogan90e93112016-06-23 17:34:39 +0100671
672 /* Restore RDHWR access */
673 UASM_i_LA_mostly(&p, K0, (long)&hwrena);
674 uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
675 uasm_i_mtc0(&p, K0, C0_HWRENA);
676
677 /* Jump to handler */
678 /*
679 * XXXKYMA: not sure if this is safe, how large is the stack??
680 * Now jump to the kvm_mips_handle_exit() to see if we can deal
681 * with this in the kernel
682 */
683 UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
684 uasm_i_jalr(&p, RA, T9);
James Hogane41637d2016-07-08 11:53:23 +0100685 UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ);
James Hogan90e93112016-06-23 17:34:39 +0100686
687 uasm_resolve_relocs(relocs, labels);
688
689 p = kvm_mips_build_ret_from_exit(p);
690
691 return p;
692}
693
694/**
695 * kvm_mips_build_ret_from_exit() - Assemble guest exit return handler.
696 * @addr: Address to start writing code.
697 *
698 * Assemble the code to handle the return from kvm_mips_handle_exit(), either
699 * resuming the guest or returning to the host depending on the return value.
700 *
701 * Returns: Next address after end of written function.
702 */
703static void *kvm_mips_build_ret_from_exit(void *addr)
704{
705 u32 *p = addr;
706 struct uasm_label labels[2];
707 struct uasm_reloc relocs[2];
708 struct uasm_label *l = labels;
709 struct uasm_reloc *r = relocs;
710
711 memset(labels, 0, sizeof(labels));
712 memset(relocs, 0, sizeof(relocs));
713
714 /* Return from handler Make sure interrupts are disabled */
715 uasm_i_di(&p, ZERO);
716 uasm_i_ehb(&p);
717
718 /*
719 * XXXKYMA: k0/k1 could have been blown away if we processed
720 * an exception while we were handling the exception from the
721 * guest, reload k1
722 */
723
724 uasm_i_move(&p, K1, S1);
James Hogane41637d2016-07-08 11:53:23 +0100725 UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
James Hogan90e93112016-06-23 17:34:39 +0100726
727 /*
728 * Check return value, should tell us if we are returning to the
729 * host (handle I/O etc)or resuming the guest
730 */
731 uasm_i_andi(&p, T0, V0, RESUME_HOST);
732 uasm_il_bnez(&p, &r, T0, label_return_to_host);
733 uasm_i_nop(&p);
734
735 p = kvm_mips_build_ret_to_guest(p);
736
737 uasm_l_return_to_host(&l, p);
738 p = kvm_mips_build_ret_to_host(p);
739
740 uasm_resolve_relocs(relocs, labels);
741
742 return p;
743}
744
745/**
746 * kvm_mips_build_ret_to_guest() - Assemble code to return to the guest.
747 * @addr: Address to start writing code.
748 *
749 * Assemble the code to handle return from the guest exit handler
750 * (kvm_mips_handle_exit()) back to the guest.
751 *
752 * Returns: Next address after end of written function.
753 */
754static void *kvm_mips_build_ret_to_guest(void *addr)
755{
756 u32 *p = addr;
757
James Hogan1e5217f52016-06-23 17:34:45 +0100758 /* Put the saved pointer to vcpu (s1) back into the scratch register */
James Hogane41637d2016-07-08 11:53:23 +0100759 UASM_i_MTC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]);
James Hogan90e93112016-06-23 17:34:39 +0100760
761 /* Load up the Guest EBASE to minimize the window where BEV is set */
762 UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
763
764 /* Switch EBASE back to the one used by KVM */
765 uasm_i_mfc0(&p, V1, C0_STATUS);
766 uasm_i_lui(&p, AT, ST0_BEV >> 16);
767 uasm_i_or(&p, K0, V1, AT);
768 uasm_i_mtc0(&p, K0, C0_STATUS);
769 uasm_i_ehb(&p);
James Hogan0d17aea2016-07-08 11:53:25 +0100770 build_set_exc_base(&p, T0);
James Hogan90e93112016-06-23 17:34:39 +0100771
772 /* Setup status register for running guest in UM */
773 uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE);
James Hogan4c881452017-01-03 17:43:00 +0000774 UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX));
James Hogan90e93112016-06-23 17:34:39 +0100775 uasm_i_and(&p, V1, V1, AT);
776 uasm_i_mtc0(&p, V1, C0_STATUS);
777 uasm_i_ehb(&p);
778
779 p = kvm_mips_build_enter_guest(p);
780
781 return p;
782}
783
784/**
785 * kvm_mips_build_ret_to_host() - Assemble code to return to the host.
786 * @addr: Address to start writing code.
787 *
788 * Assemble the code to handle return from the guest exit handler
789 * (kvm_mips_handle_exit()) back to the host, i.e. to the caller of the vcpu_run
790 * function generated by kvm_mips_build_vcpu_run().
791 *
792 * Returns: Next address after end of written function.
793 */
794static void *kvm_mips_build_ret_to_host(void *addr)
795{
796 u32 *p = addr;
797 unsigned int i;
798
799 /* EBASE is already pointing to Linux */
800 UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1);
James Hogane41637d2016-07-08 11:53:23 +0100801 UASM_i_ADDIU(&p, K1, K1, -(int)sizeof(struct pt_regs));
James Hogan90e93112016-06-23 17:34:39 +0100802
James Hogan90e93112016-06-23 17:34:39 +0100803 /*
804 * r2/v0 is the return code, shift it down by 2 (arithmetic)
805 * to recover the err code
806 */
807 uasm_i_sra(&p, K0, V0, 2);
808 uasm_i_move(&p, V0, K0);
809
810 /* Load context saved on the host stack */
811 for (i = 16; i < 31; ++i) {
812 if (i == 24)
813 i = 28;
814 UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
815 }
816
James Hogan90e93112016-06-23 17:34:39 +0100817 /* Restore RDHWR access */
818 UASM_i_LA_mostly(&p, K0, (long)&hwrena);
819 uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
820 uasm_i_mtc0(&p, K0, C0_HWRENA);
821
822 /* Restore RA, which is the address we will return to */
823 UASM_i_LW(&p, RA, offsetof(struct pt_regs, regs[RA]), K1);
824 uasm_i_jr(&p, RA);
825 uasm_i_nop(&p);
826
827 return p;
828}
829