Scott Wood | d30f6e4 | 2011-12-20 15:34:43 +0000 | [diff] [blame] | 1 | /* |
| 2 | * This program is free software; you can redistribute it and/or modify |
| 3 | * it under the terms of the GNU General Public License, version 2, as |
| 4 | * published by the Free Software Foundation. |
| 5 | * |
| 6 | * This program is distributed in the hope that it will be useful, |
| 7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 9 | * GNU General Public License for more details. |
| 10 | * |
| 11 | * You should have received a copy of the GNU General Public License |
| 12 | * along with this program; if not, write to the Free Software |
| 13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
| 14 | * |
| 15 | * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. |
| 16 | * |
| 17 | * Author: Varun Sethi <varun.sethi@freescale.com> |
| 18 | * Author: Scott Wood <scotwood@freescale.com> |
| 19 | * |
| 20 | * This file is derived from arch/powerpc/kvm/booke_interrupts.S |
| 21 | */ |
| 22 | |
| 23 | #include <asm/ppc_asm.h> |
| 24 | #include <asm/kvm_asm.h> |
| 25 | #include <asm/reg.h> |
| 26 | #include <asm/mmu-44x.h> |
| 27 | #include <asm/page.h> |
| 28 | #include <asm/asm-compat.h> |
| 29 | #include <asm/asm-offsets.h> |
| 30 | #include <asm/bitsperlong.h> |
| 31 | |
| 32 | #include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */ |
| 33 | |
| 34 | #define GET_VCPU(vcpu, thread) \ |
| 35 | PPC_LL vcpu, THREAD_KVM_VCPU(thread) |
| 36 | |
| 37 | #define SET_VCPU(vcpu) \ |
| 38 | PPC_STL vcpu, (THREAD + THREAD_KVM_VCPU)(r2) |
| 39 | |
| 40 | #define LONGBYTES (BITS_PER_LONG / 8) |
| 41 | |
| 42 | #define VCPU_GPR(n) (VCPU_GPRS + (n * LONGBYTES)) |
| 43 | #define VCPU_GUEST_SPRG(n) (VCPU_GUEST_SPRGS + (n * LONGBYTES)) |
| 44 | |
| 45 | /* The host stack layout: */ |
| 46 | #define HOST_R1 (0 * LONGBYTES) /* Implied by stwu. */ |
| 47 | #define HOST_CALLEE_LR (1 * LONGBYTES) |
| 48 | #define HOST_RUN (2 * LONGBYTES) /* struct kvm_run */ |
| 49 | /* |
| 50 | * r2 is special: it holds 'current', and it made nonvolatile in the |
| 51 | * kernel with the -ffixed-r2 gcc option. |
| 52 | */ |
| 53 | #define HOST_R2 (3 * LONGBYTES) |
| 54 | #define HOST_NV_GPRS (4 * LONGBYTES) |
| 55 | #define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * LONGBYTES)) |
| 56 | #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + LONGBYTES) |
| 57 | #define HOST_STACK_SIZE ((HOST_MIN_STACK_SIZE + 15) & ~15) /* Align. */ |
| 58 | #define HOST_STACK_LR (HOST_STACK_SIZE + LONGBYTES) /* In caller stack frame. */ |
| 59 | |
| 60 | #define NEED_EMU 0x00000001 /* emulation -- save nv regs */ |
| 61 | #define NEED_DEAR 0x00000002 /* save faulting DEAR */ |
| 62 | #define NEED_ESR 0x00000004 /* save faulting ESR */ |
| 63 | |
| 64 | /* |
| 65 | * On entry: |
| 66 | * r4 = vcpu, r5 = srr0, r6 = srr1 |
| 67 | * saved in vcpu: cr, ctr, r3-r13 |
| 68 | */ |
| 69 | .macro kvm_handler_common intno, srr0, flags |
| 70 | mfspr r10, SPRN_PID |
| 71 | lwz r8, VCPU_HOST_PID(r4) |
| 72 | PPC_LL r11, VCPU_SHARED(r4) |
| 73 | PPC_STL r14, VCPU_GPR(r14)(r4) /* We need a non-volatile GPR. */ |
| 74 | li r14, \intno |
| 75 | |
| 76 | stw r10, VCPU_GUEST_PID(r4) |
| 77 | mtspr SPRN_PID, r8 |
| 78 | |
| 79 | .if \flags & NEED_EMU |
| 80 | lwz r9, VCPU_KVM(r4) |
| 81 | .endif |
| 82 | |
| 83 | #ifdef CONFIG_KVM_EXIT_TIMING |
| 84 | /* save exit time */ |
| 85 | 1: mfspr r7, SPRN_TBRU |
| 86 | mfspr r8, SPRN_TBRL |
| 87 | mfspr r9, SPRN_TBRU |
| 88 | cmpw r9, r7 |
| 89 | PPC_STL r8, VCPU_TIMING_EXIT_TBL(r4) |
| 90 | bne- 1b |
| 91 | PPC_STL r9, VCPU_TIMING_EXIT_TBU(r4) |
| 92 | #endif |
| 93 | |
| 94 | oris r8, r6, MSR_CE@h |
| 95 | #ifndef CONFIG_64BIT |
| 96 | stw r6, (VCPU_SHARED_MSR + 4)(r11) |
| 97 | #else |
| 98 | std r6, (VCPU_SHARED_MSR)(r11) |
| 99 | #endif |
| 100 | ori r8, r8, MSR_ME | MSR_RI |
| 101 | PPC_STL r5, VCPU_PC(r4) |
| 102 | |
| 103 | /* |
| 104 | * Make sure CE/ME/RI are set (if appropriate for exception type) |
| 105 | * whether or not the guest had it set. Since mfmsr/mtmsr are |
| 106 | * somewhat expensive, skip in the common case where the guest |
| 107 | * had all these bits set (and thus they're still set if |
| 108 | * appropriate for the exception type). |
| 109 | */ |
| 110 | cmpw r6, r8 |
| 111 | .if \flags & NEED_EMU |
| 112 | lwz r9, KVM_LPID(r9) |
| 113 | .endif |
| 114 | beq 1f |
| 115 | mfmsr r7 |
| 116 | .if \srr0 != SPRN_MCSRR0 && \srr0 != SPRN_CSRR0 |
| 117 | oris r7, r7, MSR_CE@h |
| 118 | .endif |
| 119 | .if \srr0 != SPRN_MCSRR0 |
| 120 | ori r7, r7, MSR_ME | MSR_RI |
| 121 | .endif |
| 122 | mtmsr r7 |
| 123 | 1: |
| 124 | |
| 125 | .if \flags & NEED_EMU |
| 126 | /* |
| 127 | * This assumes you have external PID support. |
| 128 | * To support a bookehv CPU without external PID, you'll |
| 129 | * need to look up the TLB entry and create a temporary mapping. |
| 130 | * |
| 131 | * FIXME: we don't currently handle if the lwepx faults. PR-mode |
| 132 | * booke doesn't handle it either. Since Linux doesn't use |
| 133 | * broadcast tlbivax anymore, the only way this should happen is |
| 134 | * if the guest maps its memory execute-but-not-read, or if we |
| 135 | * somehow take a TLB miss in the middle of this entry code and |
| 136 | * evict the relevant entry. On e500mc, all kernel lowmem is |
| 137 | * bolted into TLB1 large page mappings, and we don't use |
| 138 | * broadcast invalidates, so we should not take a TLB miss here. |
| 139 | * |
| 140 | * Later we'll need to deal with faults here. Disallowing guest |
| 141 | * mappings that are execute-but-not-read could be an option on |
| 142 | * e500mc, but not on chips with an LRAT if it is used. |
| 143 | */ |
| 144 | |
| 145 | mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */ |
| 146 | PPC_STL r15, VCPU_GPR(r15)(r4) |
| 147 | PPC_STL r16, VCPU_GPR(r16)(r4) |
| 148 | PPC_STL r17, VCPU_GPR(r17)(r4) |
| 149 | PPC_STL r18, VCPU_GPR(r18)(r4) |
| 150 | PPC_STL r19, VCPU_GPR(r19)(r4) |
| 151 | mr r8, r3 |
| 152 | PPC_STL r20, VCPU_GPR(r20)(r4) |
| 153 | rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS |
| 154 | PPC_STL r21, VCPU_GPR(r21)(r4) |
| 155 | rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR |
| 156 | PPC_STL r22, VCPU_GPR(r22)(r4) |
| 157 | rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID |
| 158 | PPC_STL r23, VCPU_GPR(r23)(r4) |
| 159 | PPC_STL r24, VCPU_GPR(r24)(r4) |
| 160 | PPC_STL r25, VCPU_GPR(r25)(r4) |
| 161 | PPC_STL r26, VCPU_GPR(r26)(r4) |
| 162 | PPC_STL r27, VCPU_GPR(r27)(r4) |
| 163 | PPC_STL r28, VCPU_GPR(r28)(r4) |
| 164 | PPC_STL r29, VCPU_GPR(r29)(r4) |
| 165 | PPC_STL r30, VCPU_GPR(r30)(r4) |
| 166 | PPC_STL r31, VCPU_GPR(r31)(r4) |
| 167 | mtspr SPRN_EPLC, r8 |
| 168 | isync |
| 169 | lwepx r9, 0, r5 |
| 170 | mtspr SPRN_EPLC, r3 |
| 171 | stw r9, VCPU_LAST_INST(r4) |
| 172 | .endif |
| 173 | |
| 174 | .if \flags & NEED_ESR |
| 175 | mfspr r8, SPRN_ESR |
| 176 | PPC_STL r8, VCPU_FAULT_ESR(r4) |
| 177 | .endif |
| 178 | |
| 179 | .if \flags & NEED_DEAR |
| 180 | mfspr r9, SPRN_DEAR |
| 181 | PPC_STL r9, VCPU_FAULT_DEAR(r4) |
| 182 | .endif |
| 183 | |
| 184 | b kvmppc_resume_host |
| 185 | .endm |
| 186 | |
| 187 | /* |
| 188 | * For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h |
| 189 | */ |
| 190 | .macro kvm_handler intno srr0, srr1, flags |
| 191 | _GLOBAL(kvmppc_handler_\intno\()_\srr1) |
| 192 | GET_VCPU(r11, r10) |
| 193 | PPC_STL r3, VCPU_GPR(r3)(r11) |
| 194 | mfspr r3, SPRN_SPRG_RSCRATCH0 |
| 195 | PPC_STL r4, VCPU_GPR(r4)(r11) |
| 196 | PPC_LL r4, THREAD_NORMSAVE(0)(r10) |
| 197 | PPC_STL r5, VCPU_GPR(r5)(r11) |
| 198 | PPC_STL r13, VCPU_CR(r11) |
| 199 | mfspr r5, \srr0 |
| 200 | PPC_STL r3, VCPU_GPR(r10)(r11) |
| 201 | PPC_LL r3, THREAD_NORMSAVE(2)(r10) |
| 202 | PPC_STL r6, VCPU_GPR(r6)(r11) |
| 203 | PPC_STL r4, VCPU_GPR(r11)(r11) |
| 204 | mfspr r6, \srr1 |
| 205 | PPC_STL r7, VCPU_GPR(r7)(r11) |
| 206 | PPC_STL r8, VCPU_GPR(r8)(r11) |
| 207 | PPC_STL r9, VCPU_GPR(r9)(r11) |
| 208 | PPC_STL r3, VCPU_GPR(r13)(r11) |
| 209 | mfctr r7 |
| 210 | PPC_STL r12, VCPU_GPR(r12)(r11) |
| 211 | PPC_STL r7, VCPU_CTR(r11) |
| 212 | mr r4, r11 |
| 213 | kvm_handler_common \intno, \srr0, \flags |
| 214 | .endm |
| 215 | |
| 216 | .macro kvm_lvl_handler intno scratch srr0, srr1, flags |
| 217 | _GLOBAL(kvmppc_handler_\intno\()_\srr1) |
| 218 | mfspr r10, SPRN_SPRG_THREAD |
| 219 | GET_VCPU(r11, r10) |
| 220 | PPC_STL r3, VCPU_GPR(r3)(r11) |
| 221 | mfspr r3, \scratch |
| 222 | PPC_STL r4, VCPU_GPR(r4)(r11) |
| 223 | PPC_LL r4, GPR9(r8) |
| 224 | PPC_STL r5, VCPU_GPR(r5)(r11) |
| 225 | PPC_STL r9, VCPU_CR(r11) |
| 226 | mfspr r5, \srr0 |
| 227 | PPC_STL r3, VCPU_GPR(r8)(r11) |
| 228 | PPC_LL r3, GPR10(r8) |
| 229 | PPC_STL r6, VCPU_GPR(r6)(r11) |
| 230 | PPC_STL r4, VCPU_GPR(r9)(r11) |
| 231 | mfspr r6, \srr1 |
| 232 | PPC_LL r4, GPR11(r8) |
| 233 | PPC_STL r7, VCPU_GPR(r7)(r11) |
| 234 | PPC_STL r8, VCPU_GPR(r8)(r11) |
| 235 | PPC_STL r3, VCPU_GPR(r10)(r11) |
| 236 | mfctr r7 |
| 237 | PPC_STL r12, VCPU_GPR(r12)(r11) |
| 238 | PPC_STL r4, VCPU_GPR(r11)(r11) |
| 239 | PPC_STL r7, VCPU_CTR(r11) |
| 240 | mr r4, r11 |
| 241 | kvm_handler_common \intno, \srr0, \flags |
| 242 | .endm |
| 243 | |
| 244 | kvm_lvl_handler BOOKE_INTERRUPT_CRITICAL, \ |
| 245 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 |
| 246 | kvm_lvl_handler BOOKE_INTERRUPT_MACHINE_CHECK, \ |
| 247 | SPRN_SPRG_RSCRATCH_MC, SPRN_MCSRR0, SPRN_MCSRR1, 0 |
| 248 | kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, \ |
| 249 | SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR) |
| 250 | kvm_handler BOOKE_INTERRUPT_INST_STORAGE, SPRN_SRR0, SPRN_SRR1, NEED_ESR |
| 251 | kvm_handler BOOKE_INTERRUPT_EXTERNAL, SPRN_SRR0, SPRN_SRR1, 0 |
| 252 | kvm_handler BOOKE_INTERRUPT_ALIGNMENT, \ |
| 253 | SPRN_SRR0, SPRN_SRR1, (NEED_DEAR | NEED_ESR) |
| 254 | kvm_handler BOOKE_INTERRUPT_PROGRAM, SPRN_SRR0, SPRN_SRR1, NEED_ESR |
| 255 | kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0 |
| 256 | kvm_handler BOOKE_INTERRUPT_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0 |
| 257 | kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0 |
| 258 | kvm_handler BOOKE_INTERRUPT_DECREMENTER, SPRN_SRR0, SPRN_SRR1, 0 |
| 259 | kvm_handler BOOKE_INTERRUPT_FIT, SPRN_SRR0, SPRN_SRR1, 0 |
| 260 | kvm_lvl_handler BOOKE_INTERRUPT_WATCHDOG, \ |
| 261 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 |
| 262 | kvm_handler BOOKE_INTERRUPT_DTLB_MISS, \ |
| 263 | SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR) |
| 264 | kvm_handler BOOKE_INTERRUPT_ITLB_MISS, SPRN_SRR0, SPRN_SRR1, 0 |
| 265 | kvm_handler BOOKE_INTERRUPT_SPE_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0 |
| 266 | kvm_handler BOOKE_INTERRUPT_SPE_FP_DATA, SPRN_SRR0, SPRN_SRR1, 0 |
| 267 | kvm_handler BOOKE_INTERRUPT_SPE_FP_ROUND, SPRN_SRR0, SPRN_SRR1, 0 |
| 268 | kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, SPRN_SRR0, SPRN_SRR1, 0 |
| 269 | kvm_handler BOOKE_INTERRUPT_DOORBELL, SPRN_SRR0, SPRN_SRR1, 0 |
| 270 | kvm_lvl_handler BOOKE_INTERRUPT_DOORBELL_CRITICAL, \ |
| 271 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 |
| 272 | kvm_handler BOOKE_INTERRUPT_HV_PRIV, SPRN_SRR0, SPRN_SRR1, NEED_EMU |
| 273 | kvm_handler BOOKE_INTERRUPT_HV_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0 |
| 274 | kvm_handler BOOKE_INTERRUPT_GUEST_DBELL, SPRN_GSRR0, SPRN_GSRR1, 0 |
| 275 | kvm_lvl_handler BOOKE_INTERRUPT_GUEST_DBELL_CRIT, \ |
| 276 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 |
| 277 | kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \ |
| 278 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 |
| 279 | kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \ |
| 280 | SPRN_SPRG_RSCRATCH_DBG, SPRN_DSRR0, SPRN_DSRR1, 0 |
| 281 | |
| 282 | |
| 283 | /* Registers: |
| 284 | * SPRG_SCRATCH0: guest r10 |
| 285 | * r4: vcpu pointer |
| 286 | * r11: vcpu->arch.shared |
| 287 | * r14: KVM exit number |
| 288 | */ |
| 289 | _GLOBAL(kvmppc_resume_host) |
| 290 | /* Save remaining volatile guest register state to vcpu. */ |
| 291 | mfspr r3, SPRN_VRSAVE |
| 292 | PPC_STL r0, VCPU_GPR(r0)(r4) |
| 293 | PPC_STL r1, VCPU_GPR(r1)(r4) |
| 294 | mflr r5 |
| 295 | mfspr r6, SPRN_SPRG4 |
| 296 | PPC_STL r2, VCPU_GPR(r2)(r4) |
| 297 | PPC_STL r5, VCPU_LR(r4) |
| 298 | mfspr r7, SPRN_SPRG5 |
| 299 | PPC_STL r3, VCPU_VRSAVE(r4) |
| 300 | PPC_STL r6, VCPU_SHARED_SPRG4(r11) |
| 301 | mfspr r8, SPRN_SPRG6 |
| 302 | PPC_STL r7, VCPU_SHARED_SPRG5(r11) |
| 303 | mfspr r9, SPRN_SPRG7 |
| 304 | PPC_STL r8, VCPU_SHARED_SPRG6(r11) |
| 305 | mfxer r3 |
| 306 | PPC_STL r9, VCPU_SHARED_SPRG7(r11) |
| 307 | |
| 308 | /* save guest MAS registers and restore host mas4 & mas6 */ |
| 309 | mfspr r5, SPRN_MAS0 |
| 310 | PPC_STL r3, VCPU_XER(r4) |
| 311 | mfspr r6, SPRN_MAS1 |
| 312 | stw r5, VCPU_SHARED_MAS0(r11) |
| 313 | mfspr r7, SPRN_MAS2 |
| 314 | stw r6, VCPU_SHARED_MAS1(r11) |
| 315 | #ifndef CONFIG_64BIT |
| 316 | stw r7, (VCPU_SHARED_MAS2 + 4)(r11) |
| 317 | #else |
| 318 | std r7, (VCPU_SHARED_MAS2)(r11) |
| 319 | #endif |
| 320 | mfspr r5, SPRN_MAS3 |
| 321 | mfspr r6, SPRN_MAS4 |
| 322 | stw r5, VCPU_SHARED_MAS7_3+4(r11) |
| 323 | mfspr r7, SPRN_MAS6 |
| 324 | stw r6, VCPU_SHARED_MAS4(r11) |
| 325 | mfspr r5, SPRN_MAS7 |
| 326 | lwz r6, VCPU_HOST_MAS4(r4) |
| 327 | stw r7, VCPU_SHARED_MAS6(r11) |
| 328 | lwz r8, VCPU_HOST_MAS6(r4) |
| 329 | mtspr SPRN_MAS4, r6 |
| 330 | stw r5, VCPU_SHARED_MAS7_3+0(r11) |
| 331 | mtspr SPRN_MAS6, r8 |
| 332 | mfspr r3, SPRN_EPCR |
| 333 | rlwinm r3, r3, 0, ~SPRN_EPCR_DMIUH |
| 334 | mtspr SPRN_EPCR, r3 |
| 335 | isync |
| 336 | |
| 337 | /* Restore host stack pointer */ |
| 338 | PPC_LL r1, VCPU_HOST_STACK(r4) |
| 339 | PPC_LL r2, HOST_R2(r1) |
| 340 | |
| 341 | /* Switch to kernel stack and jump to handler. */ |
| 342 | PPC_LL r3, HOST_RUN(r1) |
| 343 | mr r5, r14 /* intno */ |
| 344 | mr r14, r4 /* Save vcpu pointer. */ |
| 345 | bl kvmppc_handle_exit |
| 346 | |
| 347 | /* Restore vcpu pointer and the nonvolatiles we used. */ |
| 348 | mr r4, r14 |
| 349 | PPC_LL r14, VCPU_GPR(r14)(r4) |
| 350 | |
| 351 | andi. r5, r3, RESUME_FLAG_NV |
| 352 | beq skip_nv_load |
| 353 | PPC_LL r15, VCPU_GPR(r15)(r4) |
| 354 | PPC_LL r16, VCPU_GPR(r16)(r4) |
| 355 | PPC_LL r17, VCPU_GPR(r17)(r4) |
| 356 | PPC_LL r18, VCPU_GPR(r18)(r4) |
| 357 | PPC_LL r19, VCPU_GPR(r19)(r4) |
| 358 | PPC_LL r20, VCPU_GPR(r20)(r4) |
| 359 | PPC_LL r21, VCPU_GPR(r21)(r4) |
| 360 | PPC_LL r22, VCPU_GPR(r22)(r4) |
| 361 | PPC_LL r23, VCPU_GPR(r23)(r4) |
| 362 | PPC_LL r24, VCPU_GPR(r24)(r4) |
| 363 | PPC_LL r25, VCPU_GPR(r25)(r4) |
| 364 | PPC_LL r26, VCPU_GPR(r26)(r4) |
| 365 | PPC_LL r27, VCPU_GPR(r27)(r4) |
| 366 | PPC_LL r28, VCPU_GPR(r28)(r4) |
| 367 | PPC_LL r29, VCPU_GPR(r29)(r4) |
| 368 | PPC_LL r30, VCPU_GPR(r30)(r4) |
| 369 | PPC_LL r31, VCPU_GPR(r31)(r4) |
| 370 | skip_nv_load: |
| 371 | /* Should we return to the guest? */ |
| 372 | andi. r5, r3, RESUME_FLAG_HOST |
| 373 | beq lightweight_exit |
| 374 | |
| 375 | srawi r3, r3, 2 /* Shift -ERR back down. */ |
| 376 | |
| 377 | heavyweight_exit: |
| 378 | /* Not returning to guest. */ |
| 379 | PPC_LL r5, HOST_STACK_LR(r1) |
| 380 | |
| 381 | /* |
| 382 | * We already saved guest volatile register state; now save the |
| 383 | * non-volatiles. |
| 384 | */ |
| 385 | |
| 386 | PPC_STL r15, VCPU_GPR(r15)(r4) |
| 387 | PPC_STL r16, VCPU_GPR(r16)(r4) |
| 388 | PPC_STL r17, VCPU_GPR(r17)(r4) |
| 389 | PPC_STL r18, VCPU_GPR(r18)(r4) |
| 390 | PPC_STL r19, VCPU_GPR(r19)(r4) |
| 391 | PPC_STL r20, VCPU_GPR(r20)(r4) |
| 392 | PPC_STL r21, VCPU_GPR(r21)(r4) |
| 393 | PPC_STL r22, VCPU_GPR(r22)(r4) |
| 394 | PPC_STL r23, VCPU_GPR(r23)(r4) |
| 395 | PPC_STL r24, VCPU_GPR(r24)(r4) |
| 396 | PPC_STL r25, VCPU_GPR(r25)(r4) |
| 397 | PPC_STL r26, VCPU_GPR(r26)(r4) |
| 398 | PPC_STL r27, VCPU_GPR(r27)(r4) |
| 399 | PPC_STL r28, VCPU_GPR(r28)(r4) |
| 400 | PPC_STL r29, VCPU_GPR(r29)(r4) |
| 401 | PPC_STL r30, VCPU_GPR(r30)(r4) |
| 402 | PPC_STL r31, VCPU_GPR(r31)(r4) |
| 403 | |
| 404 | /* Load host non-volatile register state from host stack. */ |
| 405 | PPC_LL r14, HOST_NV_GPR(r14)(r1) |
| 406 | PPC_LL r15, HOST_NV_GPR(r15)(r1) |
| 407 | PPC_LL r16, HOST_NV_GPR(r16)(r1) |
| 408 | PPC_LL r17, HOST_NV_GPR(r17)(r1) |
| 409 | PPC_LL r18, HOST_NV_GPR(r18)(r1) |
| 410 | PPC_LL r19, HOST_NV_GPR(r19)(r1) |
| 411 | PPC_LL r20, HOST_NV_GPR(r20)(r1) |
| 412 | PPC_LL r21, HOST_NV_GPR(r21)(r1) |
| 413 | PPC_LL r22, HOST_NV_GPR(r22)(r1) |
| 414 | PPC_LL r23, HOST_NV_GPR(r23)(r1) |
| 415 | PPC_LL r24, HOST_NV_GPR(r24)(r1) |
| 416 | PPC_LL r25, HOST_NV_GPR(r25)(r1) |
| 417 | PPC_LL r26, HOST_NV_GPR(r26)(r1) |
| 418 | PPC_LL r27, HOST_NV_GPR(r27)(r1) |
| 419 | PPC_LL r28, HOST_NV_GPR(r28)(r1) |
| 420 | PPC_LL r29, HOST_NV_GPR(r29)(r1) |
| 421 | PPC_LL r30, HOST_NV_GPR(r30)(r1) |
| 422 | PPC_LL r31, HOST_NV_GPR(r31)(r1) |
| 423 | |
| 424 | /* Return to kvm_vcpu_run(). */ |
| 425 | mtlr r5 |
| 426 | addi r1, r1, HOST_STACK_SIZE |
| 427 | /* r3 still contains the return code from kvmppc_handle_exit(). */ |
| 428 | blr |
| 429 | |
| 430 | /* Registers: |
| 431 | * r3: kvm_run pointer |
| 432 | * r4: vcpu pointer |
| 433 | */ |
| 434 | _GLOBAL(__kvmppc_vcpu_run) |
| 435 | stwu r1, -HOST_STACK_SIZE(r1) |
| 436 | PPC_STL r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */ |
| 437 | |
| 438 | /* Save host state to stack. */ |
| 439 | PPC_STL r3, HOST_RUN(r1) |
| 440 | mflr r3 |
| 441 | PPC_STL r3, HOST_STACK_LR(r1) |
| 442 | |
| 443 | /* Save host non-volatile register state to stack. */ |
| 444 | PPC_STL r14, HOST_NV_GPR(r14)(r1) |
| 445 | PPC_STL r15, HOST_NV_GPR(r15)(r1) |
| 446 | PPC_STL r16, HOST_NV_GPR(r16)(r1) |
| 447 | PPC_STL r17, HOST_NV_GPR(r17)(r1) |
| 448 | PPC_STL r18, HOST_NV_GPR(r18)(r1) |
| 449 | PPC_STL r19, HOST_NV_GPR(r19)(r1) |
| 450 | PPC_STL r20, HOST_NV_GPR(r20)(r1) |
| 451 | PPC_STL r21, HOST_NV_GPR(r21)(r1) |
| 452 | PPC_STL r22, HOST_NV_GPR(r22)(r1) |
| 453 | PPC_STL r23, HOST_NV_GPR(r23)(r1) |
| 454 | PPC_STL r24, HOST_NV_GPR(r24)(r1) |
| 455 | PPC_STL r25, HOST_NV_GPR(r25)(r1) |
| 456 | PPC_STL r26, HOST_NV_GPR(r26)(r1) |
| 457 | PPC_STL r27, HOST_NV_GPR(r27)(r1) |
| 458 | PPC_STL r28, HOST_NV_GPR(r28)(r1) |
| 459 | PPC_STL r29, HOST_NV_GPR(r29)(r1) |
| 460 | PPC_STL r30, HOST_NV_GPR(r30)(r1) |
| 461 | PPC_STL r31, HOST_NV_GPR(r31)(r1) |
| 462 | |
| 463 | /* Load guest non-volatiles. */ |
| 464 | PPC_LL r14, VCPU_GPR(r14)(r4) |
| 465 | PPC_LL r15, VCPU_GPR(r15)(r4) |
| 466 | PPC_LL r16, VCPU_GPR(r16)(r4) |
| 467 | PPC_LL r17, VCPU_GPR(r17)(r4) |
| 468 | PPC_LL r18, VCPU_GPR(r18)(r4) |
| 469 | PPC_LL r19, VCPU_GPR(r19)(r4) |
| 470 | PPC_LL r20, VCPU_GPR(r20)(r4) |
| 471 | PPC_LL r21, VCPU_GPR(r21)(r4) |
| 472 | PPC_LL r22, VCPU_GPR(r22)(r4) |
| 473 | PPC_LL r23, VCPU_GPR(r23)(r4) |
| 474 | PPC_LL r24, VCPU_GPR(r24)(r4) |
| 475 | PPC_LL r25, VCPU_GPR(r25)(r4) |
| 476 | PPC_LL r26, VCPU_GPR(r26)(r4) |
| 477 | PPC_LL r27, VCPU_GPR(r27)(r4) |
| 478 | PPC_LL r28, VCPU_GPR(r28)(r4) |
| 479 | PPC_LL r29, VCPU_GPR(r29)(r4) |
| 480 | PPC_LL r30, VCPU_GPR(r30)(r4) |
| 481 | PPC_LL r31, VCPU_GPR(r31)(r4) |
| 482 | |
| 483 | |
| 484 | lightweight_exit: |
| 485 | PPC_STL r2, HOST_R2(r1) |
| 486 | |
| 487 | mfspr r3, SPRN_PID |
| 488 | stw r3, VCPU_HOST_PID(r4) |
| 489 | lwz r3, VCPU_GUEST_PID(r4) |
| 490 | mtspr SPRN_PID, r3 |
| 491 | |
| 492 | /* Save vcpu pointer for the exception handlers |
| 493 | * must be done before loading guest r2. |
| 494 | */ |
| 495 | // SET_VCPU(r4) |
| 496 | |
| 497 | PPC_LL r11, VCPU_SHARED(r4) |
| 498 | /* Save host mas4 and mas6 and load guest MAS registers */ |
| 499 | mfspr r3, SPRN_MAS4 |
| 500 | stw r3, VCPU_HOST_MAS4(r4) |
| 501 | mfspr r3, SPRN_MAS6 |
| 502 | stw r3, VCPU_HOST_MAS6(r4) |
| 503 | lwz r3, VCPU_SHARED_MAS0(r11) |
| 504 | lwz r5, VCPU_SHARED_MAS1(r11) |
| 505 | #ifndef CONFIG_64BIT |
| 506 | lwz r6, (VCPU_SHARED_MAS2 + 4)(r11) |
| 507 | #else |
| 508 | ld r6, (VCPU_SHARED_MAS2)(r11) |
| 509 | #endif |
| 510 | lwz r7, VCPU_SHARED_MAS7_3+4(r11) |
| 511 | lwz r8, VCPU_SHARED_MAS4(r11) |
| 512 | mtspr SPRN_MAS0, r3 |
| 513 | mtspr SPRN_MAS1, r5 |
| 514 | mtspr SPRN_MAS2, r6 |
| 515 | mtspr SPRN_MAS3, r7 |
| 516 | mtspr SPRN_MAS4, r8 |
| 517 | lwz r3, VCPU_SHARED_MAS6(r11) |
| 518 | lwz r5, VCPU_SHARED_MAS7_3+0(r11) |
| 519 | mtspr SPRN_MAS6, r3 |
| 520 | mtspr SPRN_MAS7, r5 |
| 521 | /* Disable MAS register updates via exception */ |
| 522 | mfspr r3, SPRN_EPCR |
| 523 | oris r3, r3, SPRN_EPCR_DMIUH@h |
| 524 | mtspr SPRN_EPCR, r3 |
| 525 | |
| 526 | /* |
| 527 | * Host interrupt handlers may have clobbered these guest-readable |
| 528 | * SPRGs, so we need to reload them here with the guest's values. |
| 529 | */ |
| 530 | lwz r3, VCPU_VRSAVE(r4) |
| 531 | lwz r5, VCPU_SHARED_SPRG4(r11) |
| 532 | mtspr SPRN_VRSAVE, r3 |
| 533 | lwz r6, VCPU_SHARED_SPRG5(r11) |
| 534 | mtspr SPRN_SPRG4W, r5 |
| 535 | lwz r7, VCPU_SHARED_SPRG6(r11) |
| 536 | mtspr SPRN_SPRG5W, r6 |
| 537 | lwz r8, VCPU_SHARED_SPRG7(r11) |
| 538 | mtspr SPRN_SPRG6W, r7 |
| 539 | mtspr SPRN_SPRG7W, r8 |
| 540 | |
| 541 | /* Load some guest volatiles. */ |
| 542 | PPC_LL r3, VCPU_LR(r4) |
| 543 | PPC_LL r5, VCPU_XER(r4) |
| 544 | PPC_LL r6, VCPU_CTR(r4) |
| 545 | PPC_LL r7, VCPU_CR(r4) |
| 546 | PPC_LL r8, VCPU_PC(r4) |
| 547 | #ifndef CONFIG_64BIT |
| 548 | lwz r9, (VCPU_SHARED_MSR + 4)(r11) |
| 549 | #else |
| 550 | ld r9, (VCPU_SHARED_MSR)(r11) |
| 551 | #endif |
| 552 | PPC_LL r0, VCPU_GPR(r0)(r4) |
| 553 | PPC_LL r1, VCPU_GPR(r1)(r4) |
| 554 | PPC_LL r2, VCPU_GPR(r2)(r4) |
| 555 | PPC_LL r10, VCPU_GPR(r10)(r4) |
| 556 | PPC_LL r11, VCPU_GPR(r11)(r4) |
| 557 | PPC_LL r12, VCPU_GPR(r12)(r4) |
| 558 | PPC_LL r13, VCPU_GPR(r13)(r4) |
| 559 | mtlr r3 |
| 560 | mtxer r5 |
| 561 | mtctr r6 |
| 562 | mtcr r7 |
| 563 | mtsrr0 r8 |
| 564 | mtsrr1 r9 |
| 565 | |
| 566 | #ifdef CONFIG_KVM_EXIT_TIMING |
| 567 | /* save enter time */ |
| 568 | 1: |
| 569 | mfspr r6, SPRN_TBRU |
| 570 | mfspr r7, SPRN_TBRL |
| 571 | mfspr r8, SPRN_TBRU |
| 572 | cmpw r8, r6 |
| 573 | PPC_STL r7, VCPU_TIMING_LAST_ENTER_TBL(r4) |
| 574 | bne 1b |
| 575 | PPC_STL r8, VCPU_TIMING_LAST_ENTER_TBU(r4) |
| 576 | #endif |
| 577 | |
| 578 | /* Finish loading guest volatiles and jump to guest. */ |
| 579 | PPC_LL r5, VCPU_GPR(r5)(r4) |
| 580 | PPC_LL r6, VCPU_GPR(r6)(r4) |
| 581 | PPC_LL r7, VCPU_GPR(r7)(r4) |
| 582 | PPC_LL r8, VCPU_GPR(r8)(r4) |
| 583 | PPC_LL r9, VCPU_GPR(r9)(r4) |
| 584 | |
| 585 | PPC_LL r3, VCPU_GPR(r3)(r4) |
| 586 | PPC_LL r4, VCPU_GPR(r4)(r4) |
| 587 | rfi |