James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * Generation of main entry point for the guest, exception handling. |
| 7 | * |
| 8 | * Copyright (C) 2012 MIPS Technologies, Inc. |
| 9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> |
| 10 | * |
| 11 | * Copyright (C) 2016 Imagination Technologies Ltd. |
| 12 | */ |
| 13 | |
| 14 | #include <linux/kvm_host.h> |
James Hogan | c550d53 | 2016-10-11 23:14:39 +0100 | [diff] [blame] | 15 | #include <linux/log2.h> |
James Hogan | 7faa6ee | 2016-10-07 23:58:53 +0100 | [diff] [blame] | 16 | #include <asm/mmu_context.h> |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 17 | #include <asm/msa.h> |
| 18 | #include <asm/setup.h> |
| 19 | #include <asm/uasm.h> |
| 20 | |
| 21 | /* Register names */ |
| 22 | #define ZERO 0 |
| 23 | #define AT 1 |
| 24 | #define V0 2 |
| 25 | #define V1 3 |
| 26 | #define A0 4 |
| 27 | #define A1 5 |
| 28 | |
| 29 | #if _MIPS_SIM == _MIPS_SIM_ABI32 |
| 30 | #define T0 8 |
| 31 | #define T1 9 |
| 32 | #define T2 10 |
| 33 | #define T3 11 |
| 34 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ |
| 35 | |
| 36 | #if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 |
| 37 | #define T0 12 |
| 38 | #define T1 13 |
| 39 | #define T2 14 |
| 40 | #define T3 15 |
| 41 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */ |
| 42 | |
| 43 | #define S0 16 |
| 44 | #define S1 17 |
| 45 | #define T9 25 |
| 46 | #define K0 26 |
| 47 | #define K1 27 |
| 48 | #define GP 28 |
| 49 | #define SP 29 |
| 50 | #define RA 31 |
| 51 | |
| 52 | /* Some CP0 registers */ |
| 53 | #define C0_HWRENA 7, 0 |
| 54 | #define C0_BADVADDR 8, 0 |
| 55 | #define C0_ENTRYHI 10, 0 |
| 56 | #define C0_STATUS 12, 0 |
| 57 | #define C0_CAUSE 13, 0 |
| 58 | #define C0_EPC 14, 0 |
| 59 | #define C0_EBASE 15, 1 |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 60 | #define C0_CONFIG5 16, 5 |
| 61 | #define C0_DDATA_LO 28, 3 |
| 62 | #define C0_ERROREPC 30, 0 |
| 63 | |
| 64 | #define CALLFRAME_SIZ 32 |
| 65 | |
James Hogan | 1d75694 | 2016-07-08 11:53:24 +0100 | [diff] [blame] | 66 | #ifdef CONFIG_64BIT |
| 67 | #define ST0_KX_IF_64 ST0_KX |
| 68 | #else |
| 69 | #define ST0_KX_IF_64 0 |
| 70 | #endif |
| 71 | |
James Hogan | 1e5217f5 | 2016-06-23 17:34:45 +0100 | [diff] [blame] | 72 | static unsigned int scratch_vcpu[2] = { C0_DDATA_LO }; |
| 73 | static unsigned int scratch_tmp[2] = { C0_ERROREPC }; |
| 74 | |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 75 | enum label_id { |
| 76 | label_fpu_1 = 1, |
| 77 | label_msa_1, |
| 78 | label_return_to_host, |
| 79 | label_kernel_asid, |
James Hogan | 1f9ca62 | 2016-06-23 17:34:46 +0100 | [diff] [blame] | 80 | label_exit_common, |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 81 | }; |
| 82 | |
| 83 | UASM_L_LA(_fpu_1) |
| 84 | UASM_L_LA(_msa_1) |
| 85 | UASM_L_LA(_return_to_host) |
| 86 | UASM_L_LA(_kernel_asid) |
James Hogan | 1f9ca62 | 2016-06-23 17:34:46 +0100 | [diff] [blame] | 87 | UASM_L_LA(_exit_common) |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 88 | |
| 89 | static void *kvm_mips_build_enter_guest(void *addr); |
| 90 | static void *kvm_mips_build_ret_from_exit(void *addr); |
| 91 | static void *kvm_mips_build_ret_to_guest(void *addr); |
| 92 | static void *kvm_mips_build_ret_to_host(void *addr); |
| 93 | |
James Hogan | 29b500b | 2016-11-11 14:08:32 +0000 | [diff] [blame^] | 94 | /* |
| 95 | * The version of this function in tlbex.c uses current_cpu_type(), but for KVM |
| 96 | * we assume symmetry. |
| 97 | */ |
| 98 | static int c0_kscratch(void) |
| 99 | { |
| 100 | switch (boot_cpu_type()) { |
| 101 | case CPU_XLP: |
| 102 | case CPU_XLR: |
| 103 | return 22; |
| 104 | default: |
| 105 | return 31; |
| 106 | } |
| 107 | } |
| 108 | |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 109 | /** |
James Hogan | 1e5217f5 | 2016-06-23 17:34:45 +0100 | [diff] [blame] | 110 | * kvm_mips_entry_setup() - Perform global setup for entry code. |
| 111 | * |
| 112 | * Perform global setup for entry code, such as choosing a scratch register. |
| 113 | * |
| 114 | * Returns: 0 on success. |
| 115 | * -errno on failure. |
| 116 | */ |
| 117 | int kvm_mips_entry_setup(void) |
| 118 | { |
| 119 | /* |
| 120 | * We prefer to use KScratchN registers if they are available over the |
| 121 | * defaults above, which may not work on all cores. |
| 122 | */ |
James Hogan | 29b500b | 2016-11-11 14:08:32 +0000 | [diff] [blame^] | 123 | unsigned int kscratch_mask = cpu_data[0].kscratch_mask; |
James Hogan | 1e5217f5 | 2016-06-23 17:34:45 +0100 | [diff] [blame] | 124 | |
| 125 | /* Pick a scratch register for storing VCPU */ |
| 126 | if (kscratch_mask) { |
James Hogan | 29b500b | 2016-11-11 14:08:32 +0000 | [diff] [blame^] | 127 | scratch_vcpu[0] = c0_kscratch(); |
James Hogan | 1e5217f5 | 2016-06-23 17:34:45 +0100 | [diff] [blame] | 128 | scratch_vcpu[1] = ffs(kscratch_mask) - 1; |
| 129 | kscratch_mask &= ~BIT(scratch_vcpu[1]); |
| 130 | } |
| 131 | |
| 132 | /* Pick a scratch register to use as a temp for saving state */ |
| 133 | if (kscratch_mask) { |
James Hogan | 29b500b | 2016-11-11 14:08:32 +0000 | [diff] [blame^] | 134 | scratch_tmp[0] = c0_kscratch(); |
James Hogan | 1e5217f5 | 2016-06-23 17:34:45 +0100 | [diff] [blame] | 135 | scratch_tmp[1] = ffs(kscratch_mask) - 1; |
| 136 | kscratch_mask &= ~BIT(scratch_tmp[1]); |
| 137 | } |
| 138 | |
| 139 | return 0; |
| 140 | } |
| 141 | |
| 142 | static void kvm_mips_build_save_scratch(u32 **p, unsigned int tmp, |
| 143 | unsigned int frame) |
| 144 | { |
| 145 | /* Save the VCPU scratch register value in cp0_epc of the stack frame */ |
James Hogan | e41637d | 2016-07-08 11:53:23 +0100 | [diff] [blame] | 146 | UASM_i_MFC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]); |
James Hogan | 1e5217f5 | 2016-06-23 17:34:45 +0100 | [diff] [blame] | 147 | UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame); |
| 148 | |
| 149 | /* Save the temp scratch register value in cp0_cause of stack frame */ |
James Hogan | 29b500b | 2016-11-11 14:08:32 +0000 | [diff] [blame^] | 150 | if (scratch_tmp[0] == c0_kscratch()) { |
James Hogan | e41637d | 2016-07-08 11:53:23 +0100 | [diff] [blame] | 151 | UASM_i_MFC0(p, tmp, scratch_tmp[0], scratch_tmp[1]); |
James Hogan | 1e5217f5 | 2016-06-23 17:34:45 +0100 | [diff] [blame] | 152 | UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame); |
| 153 | } |
| 154 | } |
| 155 | |
| 156 | static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp, |
| 157 | unsigned int frame) |
| 158 | { |
| 159 | /* |
| 160 | * Restore host scratch register values saved by |
| 161 | * kvm_mips_build_save_scratch(). |
| 162 | */ |
| 163 | UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame); |
James Hogan | e41637d | 2016-07-08 11:53:23 +0100 | [diff] [blame] | 164 | UASM_i_MTC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]); |
James Hogan | 1e5217f5 | 2016-06-23 17:34:45 +0100 | [diff] [blame] | 165 | |
James Hogan | 29b500b | 2016-11-11 14:08:32 +0000 | [diff] [blame^] | 166 | if (scratch_tmp[0] == c0_kscratch()) { |
James Hogan | 1e5217f5 | 2016-06-23 17:34:45 +0100 | [diff] [blame] | 167 | UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame); |
James Hogan | e41637d | 2016-07-08 11:53:23 +0100 | [diff] [blame] | 168 | UASM_i_MTC0(p, tmp, scratch_tmp[0], scratch_tmp[1]); |
James Hogan | 1e5217f5 | 2016-06-23 17:34:45 +0100 | [diff] [blame] | 169 | } |
| 170 | } |
| 171 | |
| 172 | /** |
James Hogan | 0d17aea | 2016-07-08 11:53:25 +0100 | [diff] [blame] | 173 | * build_set_exc_base() - Assemble code to write exception base address. |
| 174 | * @p: Code buffer pointer. |
| 175 | * @reg: Source register (generated code may set WG bit in @reg). |
| 176 | * |
| 177 | * Assemble code to modify the exception base address in the EBase register, |
| 178 | * using the appropriately sized access and setting the WG bit if necessary. |
| 179 | */ |
| 180 | static inline void build_set_exc_base(u32 **p, unsigned int reg) |
| 181 | { |
| 182 | if (cpu_has_ebase_wg) { |
| 183 | /* Set WG so that all the bits get written */ |
| 184 | uasm_i_ori(p, reg, reg, MIPS_EBASE_WG); |
| 185 | UASM_i_MTC0(p, reg, C0_EBASE); |
| 186 | } else { |
| 187 | uasm_i_mtc0(p, reg, C0_EBASE); |
| 188 | } |
| 189 | } |
| 190 | |
| 191 | /** |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 192 | * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU. |
| 193 | * @addr: Address to start writing code. |
| 194 | * |
| 195 | * Assemble the start of the vcpu_run function to run a guest VCPU. The function |
| 196 | * conforms to the following prototype: |
| 197 | * |
| 198 | * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu); |
| 199 | * |
| 200 | * The exit from the guest and return to the caller is handled by the code |
| 201 | * generated by kvm_mips_build_ret_to_host(). |
| 202 | * |
| 203 | * Returns: Next address after end of written function. |
| 204 | */ |
| 205 | void *kvm_mips_build_vcpu_run(void *addr) |
| 206 | { |
| 207 | u32 *p = addr; |
| 208 | unsigned int i; |
| 209 | |
| 210 | /* |
| 211 | * A0: run |
| 212 | * A1: vcpu |
| 213 | */ |
| 214 | |
| 215 | /* k0/k1 not being used in host kernel context */ |
James Hogan | e41637d | 2016-07-08 11:53:23 +0100 | [diff] [blame] | 216 | UASM_i_ADDIU(&p, K1, SP, -(int)sizeof(struct pt_regs)); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 217 | for (i = 16; i < 32; ++i) { |
| 218 | if (i == 24) |
| 219 | i = 28; |
| 220 | UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), K1); |
| 221 | } |
| 222 | |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 223 | /* Save host status */ |
| 224 | uasm_i_mfc0(&p, V0, C0_STATUS); |
| 225 | UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1); |
| 226 | |
James Hogan | 1e5217f5 | 2016-06-23 17:34:45 +0100 | [diff] [blame] | 227 | /* Save scratch registers, will be used to store pointer to vcpu etc */ |
| 228 | kvm_mips_build_save_scratch(&p, V1, K1); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 229 | |
James Hogan | 1e5217f5 | 2016-06-23 17:34:45 +0100 | [diff] [blame] | 230 | /* VCPU scratch register has pointer to vcpu */ |
James Hogan | e41637d | 2016-07-08 11:53:23 +0100 | [diff] [blame] | 231 | UASM_i_MTC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 232 | |
| 233 | /* Offset into vcpu->arch */ |
James Hogan | e41637d | 2016-07-08 11:53:23 +0100 | [diff] [blame] | 234 | UASM_i_ADDIU(&p, K1, A1, offsetof(struct kvm_vcpu, arch)); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 235 | |
| 236 | /* |
| 237 | * Save the host stack to VCPU, used for exception processing |
| 238 | * when we exit from the Guest |
| 239 | */ |
| 240 | UASM_i_SW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1); |
| 241 | |
| 242 | /* Save the kernel gp as well */ |
| 243 | UASM_i_SW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1); |
| 244 | |
| 245 | /* |
| 246 | * Setup status register for running the guest in UM, interrupts |
| 247 | * are disabled |
| 248 | */ |
James Hogan | 1d75694 | 2016-07-08 11:53:24 +0100 | [diff] [blame] | 249 | UASM_i_LA(&p, K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 250 | uasm_i_mtc0(&p, K0, C0_STATUS); |
| 251 | uasm_i_ehb(&p); |
| 252 | |
| 253 | /* load up the new EBASE */ |
| 254 | UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1); |
James Hogan | 0d17aea | 2016-07-08 11:53:25 +0100 | [diff] [blame] | 255 | build_set_exc_base(&p, K0); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 256 | |
| 257 | /* |
| 258 | * Now that the new EBASE has been loaded, unset BEV, set |
| 259 | * interrupt mask as it was but make sure that timer interrupts |
| 260 | * are enabled |
| 261 | */ |
James Hogan | 1d75694 | 2016-07-08 11:53:24 +0100 | [diff] [blame] | 262 | uasm_i_addiu(&p, K0, ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 263 | uasm_i_andi(&p, V0, V0, ST0_IM); |
| 264 | uasm_i_or(&p, K0, K0, V0); |
| 265 | uasm_i_mtc0(&p, K0, C0_STATUS); |
| 266 | uasm_i_ehb(&p); |
| 267 | |
| 268 | p = kvm_mips_build_enter_guest(p); |
| 269 | |
| 270 | return p; |
| 271 | } |
| 272 | |
| 273 | /** |
| 274 | * kvm_mips_build_enter_guest() - Assemble code to resume guest execution. |
| 275 | * @addr: Address to start writing code. |
| 276 | * |
| 277 | * Assemble the code to resume guest execution. This code is common between the |
| 278 | * initial entry into the guest from the host, and returning from the exit |
| 279 | * handler back to the guest. |
| 280 | * |
| 281 | * Returns: Next address after end of written function. |
| 282 | */ |
| 283 | static void *kvm_mips_build_enter_guest(void *addr) |
| 284 | { |
| 285 | u32 *p = addr; |
| 286 | unsigned int i; |
| 287 | struct uasm_label labels[2]; |
| 288 | struct uasm_reloc relocs[2]; |
| 289 | struct uasm_label *l = labels; |
| 290 | struct uasm_reloc *r = relocs; |
| 291 | |
| 292 | memset(labels, 0, sizeof(labels)); |
| 293 | memset(relocs, 0, sizeof(relocs)); |
| 294 | |
| 295 | /* Set Guest EPC */ |
| 296 | UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1); |
James Hogan | e41637d | 2016-07-08 11:53:23 +0100 | [diff] [blame] | 297 | UASM_i_MTC0(&p, T0, C0_EPC); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 298 | |
| 299 | /* Set the ASID for the Guest Kernel */ |
| 300 | UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1); |
| 301 | UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]), |
| 302 | T0); |
| 303 | uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL); |
| 304 | uasm_i_xori(&p, T0, T0, KSU_USER); |
| 305 | uasm_il_bnez(&p, &r, T0, label_kernel_asid); |
James Hogan | c550d53 | 2016-10-11 23:14:39 +0100 | [diff] [blame] | 306 | UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch, |
| 307 | guest_kernel_mm.context.asid)); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 308 | /* else user */ |
James Hogan | c550d53 | 2016-10-11 23:14:39 +0100 | [diff] [blame] | 309 | UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch, |
| 310 | guest_user_mm.context.asid)); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 311 | uasm_l_kernel_asid(&l, p); |
| 312 | |
| 313 | /* t1: contains the base of the ASID array, need to get the cpu id */ |
| 314 | /* smp_processor_id */ |
James Hogan | e41637d | 2016-07-08 11:53:23 +0100 | [diff] [blame] | 315 | uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP); |
James Hogan | c550d53 | 2016-10-11 23:14:39 +0100 | [diff] [blame] | 316 | /* index the ASID array */ |
| 317 | uasm_i_sll(&p, T2, T2, ilog2(sizeof(long))); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 318 | UASM_i_ADDU(&p, T3, T1, T2); |
James Hogan | c550d53 | 2016-10-11 23:14:39 +0100 | [diff] [blame] | 319 | UASM_i_LW(&p, K0, 0, T3); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 320 | #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE |
James Hogan | c550d53 | 2016-10-11 23:14:39 +0100 | [diff] [blame] | 321 | /* |
| 322 | * reuse ASID array offset |
| 323 | * cpuinfo_mips is a multiple of sizeof(long) |
| 324 | */ |
| 325 | uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/sizeof(long)); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 326 | uasm_i_mul(&p, T2, T2, T3); |
| 327 | |
| 328 | UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask); |
| 329 | UASM_i_ADDU(&p, AT, AT, T2); |
| 330 | UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT); |
| 331 | uasm_i_and(&p, K0, K0, T2); |
| 332 | #else |
| 333 | uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID); |
| 334 | #endif |
James Hogan | 7faa6ee | 2016-10-07 23:58:53 +0100 | [diff] [blame] | 335 | |
| 336 | /* |
| 337 | * Set up KVM T&E GVA pgd. |
| 338 | * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD(): |
| 339 | * - call tlbmiss_handler_setup_pgd(mm->pgd) |
| 340 | * - but skips write into CP0_PWBase for now |
| 341 | */ |
| 342 | UASM_i_LW(&p, A0, (int)offsetof(struct mm_struct, pgd) - |
| 343 | (int)offsetof(struct mm_struct, context.asid), T1); |
| 344 | |
| 345 | UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd); |
| 346 | uasm_i_jalr(&p, RA, T9); |
| 347 | uasm_i_mtc0(&p, K0, C0_ENTRYHI); |
| 348 | |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 349 | uasm_i_ehb(&p); |
| 350 | |
| 351 | /* Disable RDHWR access */ |
| 352 | uasm_i_mtc0(&p, ZERO, C0_HWRENA); |
| 353 | |
| 354 | /* load the guest context from VCPU and return */ |
| 355 | for (i = 1; i < 32; ++i) { |
| 356 | /* Guest k0/k1 loaded later */ |
| 357 | if (i == K0 || i == K1) |
| 358 | continue; |
| 359 | UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1); |
| 360 | } |
| 361 | |
James Hogan | 70e92c7e | 2016-07-04 19:35:11 +0100 | [diff] [blame] | 362 | #ifndef CONFIG_CPU_MIPSR6 |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 363 | /* Restore hi/lo */ |
| 364 | UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1); |
| 365 | uasm_i_mthi(&p, K0); |
| 366 | |
| 367 | UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1); |
| 368 | uasm_i_mtlo(&p, K0); |
James Hogan | 70e92c7e | 2016-07-04 19:35:11 +0100 | [diff] [blame] | 369 | #endif |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 370 | |
| 371 | /* Restore the guest's k0/k1 registers */ |
| 372 | UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1); |
| 373 | UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1); |
| 374 | |
| 375 | /* Jump to guest */ |
| 376 | uasm_i_eret(&p); |
| 377 | |
| 378 | uasm_resolve_relocs(relocs, labels); |
| 379 | |
| 380 | return p; |
| 381 | } |
| 382 | |
| 383 | /** |
| 384 | * kvm_mips_build_exception() - Assemble first level guest exception handler. |
| 385 | * @addr: Address to start writing code. |
James Hogan | 1f9ca62 | 2016-06-23 17:34:46 +0100 | [diff] [blame] | 386 | * @handler: Address of common handler (within range of @addr). |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 387 | * |
| 388 | * Assemble exception vector code for guest execution. The generated vector will |
James Hogan | 1f9ca62 | 2016-06-23 17:34:46 +0100 | [diff] [blame] | 389 | * branch to the common exception handler generated by kvm_mips_build_exit(). |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 390 | * |
| 391 | * Returns: Next address after end of written function. |
| 392 | */ |
James Hogan | 1f9ca62 | 2016-06-23 17:34:46 +0100 | [diff] [blame] | 393 | void *kvm_mips_build_exception(void *addr, void *handler) |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 394 | { |
| 395 | u32 *p = addr; |
James Hogan | 1f9ca62 | 2016-06-23 17:34:46 +0100 | [diff] [blame] | 396 | struct uasm_label labels[2]; |
| 397 | struct uasm_reloc relocs[2]; |
| 398 | struct uasm_label *l = labels; |
| 399 | struct uasm_reloc *r = relocs; |
| 400 | |
| 401 | memset(labels, 0, sizeof(labels)); |
| 402 | memset(relocs, 0, sizeof(relocs)); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 403 | |
James Hogan | eadfb50 | 2016-06-23 17:34:47 +0100 | [diff] [blame] | 404 | /* Save guest k1 into scratch register */ |
James Hogan | e41637d | 2016-07-08 11:53:23 +0100 | [diff] [blame] | 405 | UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 406 | |
James Hogan | eadfb50 | 2016-06-23 17:34:47 +0100 | [diff] [blame] | 407 | /* Get the VCPU pointer from the VCPU scratch register */ |
James Hogan | e41637d | 2016-07-08 11:53:23 +0100 | [diff] [blame] | 408 | UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]); |
| 409 | UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch)); |
James Hogan | eadfb50 | 2016-06-23 17:34:47 +0100 | [diff] [blame] | 410 | |
| 411 | /* Save guest k0 into VCPU structure */ |
| 412 | UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 413 | |
James Hogan | 1f9ca62 | 2016-06-23 17:34:46 +0100 | [diff] [blame] | 414 | /* Branch to the common handler */ |
| 415 | uasm_il_b(&p, &r, label_exit_common); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 416 | uasm_i_nop(&p); |
| 417 | |
James Hogan | 1f9ca62 | 2016-06-23 17:34:46 +0100 | [diff] [blame] | 418 | uasm_l_exit_common(&l, handler); |
| 419 | uasm_resolve_relocs(relocs, labels); |
| 420 | |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 421 | return p; |
| 422 | } |
| 423 | |
| 424 | /** |
| 425 | * kvm_mips_build_exit() - Assemble common guest exit handler. |
| 426 | * @addr: Address to start writing code. |
| 427 | * |
| 428 | * Assemble the generic guest exit handling code. This is called by the |
| 429 | * exception vectors (generated by kvm_mips_build_exception()), and calls |
| 430 | * kvm_mips_handle_exit(), then either resumes the guest or returns to the host |
| 431 | * depending on the return value. |
| 432 | * |
| 433 | * Returns: Next address after end of written function. |
| 434 | */ |
| 435 | void *kvm_mips_build_exit(void *addr) |
| 436 | { |
| 437 | u32 *p = addr; |
| 438 | unsigned int i; |
| 439 | struct uasm_label labels[3]; |
| 440 | struct uasm_reloc relocs[3]; |
| 441 | struct uasm_label *l = labels; |
| 442 | struct uasm_reloc *r = relocs; |
| 443 | |
| 444 | memset(labels, 0, sizeof(labels)); |
| 445 | memset(relocs, 0, sizeof(relocs)); |
| 446 | |
| 447 | /* |
| 448 | * Generic Guest exception handler. We end up here when the guest |
| 449 | * does something that causes a trap to kernel mode. |
James Hogan | eadfb50 | 2016-06-23 17:34:47 +0100 | [diff] [blame] | 450 | * |
| 451 | * Both k0/k1 registers will have already been saved (k0 into the vcpu |
| 452 | * structure, and k1 into the scratch_tmp register). |
| 453 | * |
| 454 | * The k1 register will already contain the kvm_vcpu_arch pointer. |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 455 | */ |
| 456 | |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 457 | /* Start saving Guest context to VCPU */ |
| 458 | for (i = 0; i < 32; ++i) { |
| 459 | /* Guest k0/k1 saved later */ |
| 460 | if (i == K0 || i == K1) |
| 461 | continue; |
| 462 | UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1); |
| 463 | } |
| 464 | |
James Hogan | 70e92c7e | 2016-07-04 19:35:11 +0100 | [diff] [blame] | 465 | #ifndef CONFIG_CPU_MIPSR6 |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 466 | /* We need to save hi/lo and restore them on the way out */ |
| 467 | uasm_i_mfhi(&p, T0); |
| 468 | UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1); |
| 469 | |
| 470 | uasm_i_mflo(&p, T0); |
| 471 | UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1); |
James Hogan | 70e92c7e | 2016-07-04 19:35:11 +0100 | [diff] [blame] | 472 | #endif |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 473 | |
James Hogan | eadfb50 | 2016-06-23 17:34:47 +0100 | [diff] [blame] | 474 | /* Finally save guest k1 to VCPU */ |
| 475 | uasm_i_ehb(&p); |
James Hogan | e41637d | 2016-07-08 11:53:23 +0100 | [diff] [blame] | 476 | UASM_i_MFC0(&p, T0, scratch_tmp[0], scratch_tmp[1]); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 477 | UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1); |
| 478 | |
| 479 | /* Now that context has been saved, we can use other registers */ |
| 480 | |
| 481 | /* Restore vcpu */ |
James Hogan | e41637d | 2016-07-08 11:53:23 +0100 | [diff] [blame] | 482 | UASM_i_MFC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 483 | uasm_i_move(&p, S1, A1); |
| 484 | |
| 485 | /* Restore run (vcpu->run) */ |
| 486 | UASM_i_LW(&p, A0, offsetof(struct kvm_vcpu, run), A1); |
| 487 | /* Save pointer to run in s0, will be saved by the compiler */ |
| 488 | uasm_i_move(&p, S0, A0); |
| 489 | |
| 490 | /* |
| 491 | * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process |
| 492 | * the exception |
| 493 | */ |
James Hogan | e41637d | 2016-07-08 11:53:23 +0100 | [diff] [blame] | 494 | UASM_i_MFC0(&p, K0, C0_EPC); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 495 | UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1); |
| 496 | |
James Hogan | e41637d | 2016-07-08 11:53:23 +0100 | [diff] [blame] | 497 | UASM_i_MFC0(&p, K0, C0_BADVADDR); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 498 | UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr), |
| 499 | K1); |
| 500 | |
| 501 | uasm_i_mfc0(&p, K0, C0_CAUSE); |
| 502 | uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1); |
| 503 | |
| 504 | /* Now restore the host state just enough to run the handlers */ |
| 505 | |
| 506 | /* Switch EBASE to the one used by Linux */ |
| 507 | /* load up the host EBASE */ |
| 508 | uasm_i_mfc0(&p, V0, C0_STATUS); |
| 509 | |
| 510 | uasm_i_lui(&p, AT, ST0_BEV >> 16); |
| 511 | uasm_i_or(&p, K0, V0, AT); |
| 512 | |
| 513 | uasm_i_mtc0(&p, K0, C0_STATUS); |
| 514 | uasm_i_ehb(&p); |
| 515 | |
| 516 | UASM_i_LA_mostly(&p, K0, (long)&ebase); |
| 517 | UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0); |
James Hogan | 0d17aea | 2016-07-08 11:53:25 +0100 | [diff] [blame] | 518 | build_set_exc_base(&p, K0); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 519 | |
James Hogan | d37f403 | 2016-06-23 17:34:42 +0100 | [diff] [blame] | 520 | if (raw_cpu_has_fpu) { |
| 521 | /* |
| 522 | * If FPU is enabled, save FCR31 and clear it so that later |
| 523 | * ctc1's don't trigger FPE for pending exceptions. |
| 524 | */ |
| 525 | uasm_i_lui(&p, AT, ST0_CU1 >> 16); |
| 526 | uasm_i_and(&p, V1, V0, AT); |
| 527 | uasm_il_beqz(&p, &r, V1, label_fpu_1); |
| 528 | uasm_i_nop(&p); |
| 529 | uasm_i_cfc1(&p, T0, 31); |
| 530 | uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31), |
| 531 | K1); |
| 532 | uasm_i_ctc1(&p, ZERO, 31); |
| 533 | uasm_l_fpu_1(&l, p); |
| 534 | } |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 535 | |
James Hogan | 38ea7a7 | 2016-06-23 17:34:43 +0100 | [diff] [blame] | 536 | if (cpu_has_msa) { |
| 537 | /* |
| 538 | * If MSA is enabled, save MSACSR and clear it so that later |
| 539 | * instructions don't trigger MSAFPE for pending exceptions. |
| 540 | */ |
| 541 | uasm_i_mfc0(&p, T0, C0_CONFIG5); |
| 542 | uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */ |
| 543 | uasm_il_beqz(&p, &r, T0, label_msa_1); |
| 544 | uasm_i_nop(&p); |
| 545 | uasm_i_cfcmsa(&p, T0, MSA_CSR); |
| 546 | uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr), |
| 547 | K1); |
| 548 | uasm_i_ctcmsa(&p, MSA_CSR, ZERO); |
| 549 | uasm_l_msa_1(&l, p); |
| 550 | } |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 551 | |
| 552 | /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ |
| 553 | uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE)); |
| 554 | uasm_i_and(&p, V0, V0, AT); |
| 555 | uasm_i_lui(&p, AT, ST0_CU0 >> 16); |
| 556 | uasm_i_or(&p, V0, V0, AT); |
James Hogan | 4c88145 | 2017-01-03 17:43:00 +0000 | [diff] [blame] | 557 | #ifdef CONFIG_64BIT |
| 558 | uasm_i_ori(&p, V0, V0, ST0_SX | ST0_UX); |
| 559 | #endif |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 560 | uasm_i_mtc0(&p, V0, C0_STATUS); |
| 561 | uasm_i_ehb(&p); |
| 562 | |
| 563 | /* Load up host GP */ |
| 564 | UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1); |
| 565 | |
| 566 | /* Need a stack before we can jump to "C" */ |
| 567 | UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1); |
| 568 | |
| 569 | /* Saved host state */ |
James Hogan | e41637d | 2016-07-08 11:53:23 +0100 | [diff] [blame] | 570 | UASM_i_ADDIU(&p, SP, SP, -(int)sizeof(struct pt_regs)); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 571 | |
| 572 | /* |
| 573 | * XXXKYMA do we need to load the host ASID, maybe not because the |
| 574 | * kernel entries are marked GLOBAL, need to verify |
| 575 | */ |
| 576 | |
James Hogan | 1e5217f5 | 2016-06-23 17:34:45 +0100 | [diff] [blame] | 577 | /* Restore host scratch registers, as we'll have clobbered them */ |
| 578 | kvm_mips_build_restore_scratch(&p, K0, SP); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 579 | |
| 580 | /* Restore RDHWR access */ |
| 581 | UASM_i_LA_mostly(&p, K0, (long)&hwrena); |
| 582 | uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0); |
| 583 | uasm_i_mtc0(&p, K0, C0_HWRENA); |
| 584 | |
| 585 | /* Jump to handler */ |
| 586 | /* |
| 587 | * XXXKYMA: not sure if this is safe, how large is the stack?? |
| 588 | * Now jump to the kvm_mips_handle_exit() to see if we can deal |
| 589 | * with this in the kernel |
| 590 | */ |
| 591 | UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit); |
| 592 | uasm_i_jalr(&p, RA, T9); |
James Hogan | e41637d | 2016-07-08 11:53:23 +0100 | [diff] [blame] | 593 | UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 594 | |
| 595 | uasm_resolve_relocs(relocs, labels); |
| 596 | |
| 597 | p = kvm_mips_build_ret_from_exit(p); |
| 598 | |
| 599 | return p; |
| 600 | } |
| 601 | |
| 602 | /** |
| 603 | * kvm_mips_build_ret_from_exit() - Assemble guest exit return handler. |
| 604 | * @addr: Address to start writing code. |
| 605 | * |
| 606 | * Assemble the code to handle the return from kvm_mips_handle_exit(), either |
| 607 | * resuming the guest or returning to the host depending on the return value. |
| 608 | * |
| 609 | * Returns: Next address after end of written function. |
| 610 | */ |
| 611 | static void *kvm_mips_build_ret_from_exit(void *addr) |
| 612 | { |
| 613 | u32 *p = addr; |
| 614 | struct uasm_label labels[2]; |
| 615 | struct uasm_reloc relocs[2]; |
| 616 | struct uasm_label *l = labels; |
| 617 | struct uasm_reloc *r = relocs; |
| 618 | |
| 619 | memset(labels, 0, sizeof(labels)); |
| 620 | memset(relocs, 0, sizeof(relocs)); |
| 621 | |
| 622 | /* Return from handler Make sure interrupts are disabled */ |
| 623 | uasm_i_di(&p, ZERO); |
| 624 | uasm_i_ehb(&p); |
| 625 | |
| 626 | /* |
| 627 | * XXXKYMA: k0/k1 could have been blown away if we processed |
| 628 | * an exception while we were handling the exception from the |
| 629 | * guest, reload k1 |
| 630 | */ |
| 631 | |
| 632 | uasm_i_move(&p, K1, S1); |
James Hogan | e41637d | 2016-07-08 11:53:23 +0100 | [diff] [blame] | 633 | UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch)); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 634 | |
| 635 | /* |
| 636 | * Check return value, should tell us if we are returning to the |
| 637 | * host (handle I/O etc)or resuming the guest |
| 638 | */ |
| 639 | uasm_i_andi(&p, T0, V0, RESUME_HOST); |
| 640 | uasm_il_bnez(&p, &r, T0, label_return_to_host); |
| 641 | uasm_i_nop(&p); |
| 642 | |
| 643 | p = kvm_mips_build_ret_to_guest(p); |
| 644 | |
| 645 | uasm_l_return_to_host(&l, p); |
| 646 | p = kvm_mips_build_ret_to_host(p); |
| 647 | |
| 648 | uasm_resolve_relocs(relocs, labels); |
| 649 | |
| 650 | return p; |
| 651 | } |
| 652 | |
| 653 | /** |
| 654 | * kvm_mips_build_ret_to_guest() - Assemble code to return to the guest. |
| 655 | * @addr: Address to start writing code. |
| 656 | * |
| 657 | * Assemble the code to handle return from the guest exit handler |
| 658 | * (kvm_mips_handle_exit()) back to the guest. |
| 659 | * |
| 660 | * Returns: Next address after end of written function. |
| 661 | */ |
| 662 | static void *kvm_mips_build_ret_to_guest(void *addr) |
| 663 | { |
| 664 | u32 *p = addr; |
| 665 | |
James Hogan | 1e5217f5 | 2016-06-23 17:34:45 +0100 | [diff] [blame] | 666 | /* Put the saved pointer to vcpu (s1) back into the scratch register */ |
James Hogan | e41637d | 2016-07-08 11:53:23 +0100 | [diff] [blame] | 667 | UASM_i_MTC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 668 | |
| 669 | /* Load up the Guest EBASE to minimize the window where BEV is set */ |
| 670 | UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1); |
| 671 | |
| 672 | /* Switch EBASE back to the one used by KVM */ |
| 673 | uasm_i_mfc0(&p, V1, C0_STATUS); |
| 674 | uasm_i_lui(&p, AT, ST0_BEV >> 16); |
| 675 | uasm_i_or(&p, K0, V1, AT); |
| 676 | uasm_i_mtc0(&p, K0, C0_STATUS); |
| 677 | uasm_i_ehb(&p); |
James Hogan | 0d17aea | 2016-07-08 11:53:25 +0100 | [diff] [blame] | 678 | build_set_exc_base(&p, T0); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 679 | |
| 680 | /* Setup status register for running guest in UM */ |
| 681 | uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE); |
James Hogan | 4c88145 | 2017-01-03 17:43:00 +0000 | [diff] [blame] | 682 | UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX)); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 683 | uasm_i_and(&p, V1, V1, AT); |
| 684 | uasm_i_mtc0(&p, V1, C0_STATUS); |
| 685 | uasm_i_ehb(&p); |
| 686 | |
| 687 | p = kvm_mips_build_enter_guest(p); |
| 688 | |
| 689 | return p; |
| 690 | } |
| 691 | |
| 692 | /** |
| 693 | * kvm_mips_build_ret_to_host() - Assemble code to return to the host. |
| 694 | * @addr: Address to start writing code. |
| 695 | * |
| 696 | * Assemble the code to handle return from the guest exit handler |
| 697 | * (kvm_mips_handle_exit()) back to the host, i.e. to the caller of the vcpu_run |
| 698 | * function generated by kvm_mips_build_vcpu_run(). |
| 699 | * |
| 700 | * Returns: Next address after end of written function. |
| 701 | */ |
| 702 | static void *kvm_mips_build_ret_to_host(void *addr) |
| 703 | { |
| 704 | u32 *p = addr; |
| 705 | unsigned int i; |
| 706 | |
| 707 | /* EBASE is already pointing to Linux */ |
| 708 | UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1); |
James Hogan | e41637d | 2016-07-08 11:53:23 +0100 | [diff] [blame] | 709 | UASM_i_ADDIU(&p, K1, K1, -(int)sizeof(struct pt_regs)); |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 710 | |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 711 | /* |
| 712 | * r2/v0 is the return code, shift it down by 2 (arithmetic) |
| 713 | * to recover the err code |
| 714 | */ |
| 715 | uasm_i_sra(&p, K0, V0, 2); |
| 716 | uasm_i_move(&p, V0, K0); |
| 717 | |
| 718 | /* Load context saved on the host stack */ |
| 719 | for (i = 16; i < 31; ++i) { |
| 720 | if (i == 24) |
| 721 | i = 28; |
| 722 | UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1); |
| 723 | } |
| 724 | |
James Hogan | 90e9311 | 2016-06-23 17:34:39 +0100 | [diff] [blame] | 725 | /* Restore RDHWR access */ |
| 726 | UASM_i_LA_mostly(&p, K0, (long)&hwrena); |
| 727 | uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0); |
| 728 | uasm_i_mtc0(&p, K0, C0_HWRENA); |
| 729 | |
| 730 | /* Restore RA, which is the address we will return to */ |
| 731 | UASM_i_LW(&p, RA, offsetof(struct pt_regs, regs[RA]), K1); |
| 732 | uasm_i_jr(&p, RA); |
| 733 | uasm_i_nop(&p); |
| 734 | |
| 735 | return p; |
| 736 | } |
| 737 | |