blob: 531fbf5131c02b1771b1d52f4006e1082deb15e1 [file] [log] [blame]
James Hogan98e91b82014-11-18 14:09:12 +00001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * FPU context handling code for KVM.
7 *
8 * Copyright (C) 2015 Imagination Technologies Ltd.
9 */
10
11#include <asm/asm.h>
12#include <asm/asm-offsets.h>
13#include <asm/fpregdef.h>
14#include <asm/mipsregs.h>
15#include <asm/regdef.h>
16
17 .set noreorder
18 .set noat
19
20LEAF(__kvm_save_fpu)
21 .set push
22 .set mips64r2
23 SET_HARDFLOAT
24 mfc0 t0, CP0_STATUS
25 sll t0, t0, 5 # is Status.FR set?
26 bgez t0, 1f # no: skip odd doubles
27 nop
28 sdc1 $f1, VCPU_FPR1(a0)
29 sdc1 $f3, VCPU_FPR3(a0)
30 sdc1 $f5, VCPU_FPR5(a0)
31 sdc1 $f7, VCPU_FPR7(a0)
32 sdc1 $f9, VCPU_FPR9(a0)
33 sdc1 $f11, VCPU_FPR11(a0)
34 sdc1 $f13, VCPU_FPR13(a0)
35 sdc1 $f15, VCPU_FPR15(a0)
36 sdc1 $f17, VCPU_FPR17(a0)
37 sdc1 $f19, VCPU_FPR19(a0)
38 sdc1 $f21, VCPU_FPR21(a0)
39 sdc1 $f23, VCPU_FPR23(a0)
40 sdc1 $f25, VCPU_FPR25(a0)
41 sdc1 $f27, VCPU_FPR27(a0)
42 sdc1 $f29, VCPU_FPR29(a0)
43 sdc1 $f31, VCPU_FPR31(a0)
441: sdc1 $f0, VCPU_FPR0(a0)
45 sdc1 $f2, VCPU_FPR2(a0)
46 sdc1 $f4, VCPU_FPR4(a0)
47 sdc1 $f6, VCPU_FPR6(a0)
48 sdc1 $f8, VCPU_FPR8(a0)
49 sdc1 $f10, VCPU_FPR10(a0)
50 sdc1 $f12, VCPU_FPR12(a0)
51 sdc1 $f14, VCPU_FPR14(a0)
52 sdc1 $f16, VCPU_FPR16(a0)
53 sdc1 $f18, VCPU_FPR18(a0)
54 sdc1 $f20, VCPU_FPR20(a0)
55 sdc1 $f22, VCPU_FPR22(a0)
56 sdc1 $f24, VCPU_FPR24(a0)
57 sdc1 $f26, VCPU_FPR26(a0)
58 sdc1 $f28, VCPU_FPR28(a0)
59 jr ra
60 sdc1 $f30, VCPU_FPR30(a0)
61 .set pop
62 END(__kvm_save_fpu)
63
64LEAF(__kvm_restore_fpu)
65 .set push
66 .set mips64r2
67 SET_HARDFLOAT
68 mfc0 t0, CP0_STATUS
69 sll t0, t0, 5 # is Status.FR set?
70 bgez t0, 1f # no: skip odd doubles
71 nop
72 ldc1 $f1, VCPU_FPR1(a0)
73 ldc1 $f3, VCPU_FPR3(a0)
74 ldc1 $f5, VCPU_FPR5(a0)
75 ldc1 $f7, VCPU_FPR7(a0)
76 ldc1 $f9, VCPU_FPR9(a0)
77 ldc1 $f11, VCPU_FPR11(a0)
78 ldc1 $f13, VCPU_FPR13(a0)
79 ldc1 $f15, VCPU_FPR15(a0)
80 ldc1 $f17, VCPU_FPR17(a0)
81 ldc1 $f19, VCPU_FPR19(a0)
82 ldc1 $f21, VCPU_FPR21(a0)
83 ldc1 $f23, VCPU_FPR23(a0)
84 ldc1 $f25, VCPU_FPR25(a0)
85 ldc1 $f27, VCPU_FPR27(a0)
86 ldc1 $f29, VCPU_FPR29(a0)
87 ldc1 $f31, VCPU_FPR31(a0)
881: ldc1 $f0, VCPU_FPR0(a0)
89 ldc1 $f2, VCPU_FPR2(a0)
90 ldc1 $f4, VCPU_FPR4(a0)
91 ldc1 $f6, VCPU_FPR6(a0)
92 ldc1 $f8, VCPU_FPR8(a0)
93 ldc1 $f10, VCPU_FPR10(a0)
94 ldc1 $f12, VCPU_FPR12(a0)
95 ldc1 $f14, VCPU_FPR14(a0)
96 ldc1 $f16, VCPU_FPR16(a0)
97 ldc1 $f18, VCPU_FPR18(a0)
98 ldc1 $f20, VCPU_FPR20(a0)
99 ldc1 $f22, VCPU_FPR22(a0)
100 ldc1 $f24, VCPU_FPR24(a0)
101 ldc1 $f26, VCPU_FPR26(a0)
102 ldc1 $f28, VCPU_FPR28(a0)
103 jr ra
104 ldc1 $f30, VCPU_FPR30(a0)
105 .set pop
106 END(__kvm_restore_fpu)
107
108LEAF(__kvm_restore_fcsr)
109 .set push
110 SET_HARDFLOAT
111 lw t0, VCPU_FCR31(a0)
112 /*
113 * The ctc1 must stay at this offset in __kvm_restore_fcsr.
114 * See kvm_mips_csr_die_notify() which handles t0 containing a value
115 * which triggers an FP Exception, which must be stepped over and
116 * ignored since the set cause bits must remain there for the guest.
117 */
118 ctc1 t0, fcr31
119 jr ra
120 nop
121 .set pop
122 END(__kvm_restore_fcsr)