Jeremy Fitzhardinge | cdacc12 | 2008-07-08 15:06:46 -0700 | [diff] [blame] | 1 | /* |
Tejun Heo | 130ace1 | 2009-02-06 00:57:48 +0900 | [diff] [blame] | 2 | * Asm versions of Xen pv-ops, suitable for either direct use or |
| 3 | * inlining. The inline versions are the same as the direct-use |
| 4 | * versions, with the pre- and post-amble chopped off. |
| 5 | * |
| 6 | * This code is encoded for size rather than absolute efficiency, with |
| 7 | * a view to being able to inline as much as possible. |
| 8 | * |
| 9 | * We only bother with direct forms (ie, vcpu in pda) of the |
| 10 | * operations here; the indirect forms are better handled in C, since |
| 11 | * they're generally too large to inline anyway. |
Jeremy Fitzhardinge | cdacc12 | 2008-07-08 15:06:46 -0700 | [diff] [blame] | 12 | */ |
| 13 | |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 14 | #include <asm/errno.h> |
Brian Gerst | 9af4565 | 2009-01-19 00:38:58 +0900 | [diff] [blame] | 15 | #include <asm/percpu.h> |
Jeremy Fitzhardinge | 5393744 | 2009-02-02 13:55:42 -0800 | [diff] [blame] | 16 | #include <asm/processor-flags.h> |
| 17 | #include <asm/segment.h> |
Jeremy Fitzhardinge | cdacc12 | 2008-07-08 15:06:46 -0700 | [diff] [blame] | 18 | |
| 19 | #include <xen/interface/xen.h> |
| 20 | |
Jeremy Fitzhardinge | 5393744 | 2009-02-02 13:55:42 -0800 | [diff] [blame] | 21 | #include "xen-asm.h" |
Jeremy Fitzhardinge | cdacc12 | 2008-07-08 15:06:46 -0700 | [diff] [blame] | 22 | |
Jeremy Fitzhardinge | 997409d | 2008-07-08 15:07:00 -0700 | [diff] [blame] | 23 | ENTRY(xen_adjust_exception_frame) |
Tejun Heo | 130ace1 | 2009-02-06 00:57:48 +0900 | [diff] [blame] | 24 | mov 8+0(%rsp), %rcx |
| 25 | mov 8+8(%rsp), %r11 |
Jeremy Fitzhardinge | 997409d | 2008-07-08 15:07:00 -0700 | [diff] [blame] | 26 | ret $16 |
| 27 | |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 28 | hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 |
| 29 | /* |
Tejun Heo | 130ace1 | 2009-02-06 00:57:48 +0900 | [diff] [blame] | 30 | * Xen64 iret frame: |
| 31 | * |
| 32 | * ss |
| 33 | * rsp |
| 34 | * rflags |
| 35 | * cs |
| 36 | * rip <-- standard iret frame |
| 37 | * |
| 38 | * flags |
| 39 | * |
| 40 | * rcx } |
| 41 | * r11 }<-- pushed by hypercall page |
| 42 | * rsp->rax } |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 43 | */ |
Jeremy Fitzhardinge | cdacc12 | 2008-07-08 15:06:46 -0700 | [diff] [blame] | 44 | ENTRY(xen_iret) |
| 45 | pushq $0 |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 46 | 1: jmp hypercall_iret |
| 47 | ENDPATCH(xen_iret) |
| 48 | RELOC(xen_iret, 1b+1) |
Jeremy Fitzhardinge | cdacc12 | 2008-07-08 15:06:46 -0700 | [diff] [blame] | 49 | |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 50 | /* |
Tejun Heo | 130ace1 | 2009-02-06 00:57:48 +0900 | [diff] [blame] | 51 | * sysexit is not used for 64-bit processes, so it's only ever used to |
| 52 | * return to 32-bit compat userspace. |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 53 | */ |
Jeremy Fitzhardinge | cdacc12 | 2008-07-08 15:06:46 -0700 | [diff] [blame] | 54 | ENTRY(xen_sysexit) |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 55 | pushq $__USER32_DS |
| 56 | pushq %rcx |
| 57 | pushq $X86_EFLAGS_IF |
| 58 | pushq $__USER32_CS |
| 59 | pushq %rdx |
| 60 | |
Jeremy Fitzhardinge | 2dc1697 | 2008-07-21 16:49:58 -0700 | [diff] [blame] | 61 | pushq $0 |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 62 | 1: jmp hypercall_iret |
| 63 | ENDPATCH(xen_sysexit) |
| 64 | RELOC(xen_sysexit, 1b+1) |
| 65 | |
| 66 | ENTRY(xen_sysret64) |
Tejun Heo | 130ace1 | 2009-02-06 00:57:48 +0900 | [diff] [blame] | 67 | /* |
| 68 | * We're already on the usermode stack at this point, but |
| 69 | * still with the kernel gs, so we can easily switch back |
| 70 | */ |
Ingo Molnar | c38e503 | 2015-03-17 14:42:59 +0100 | [diff] [blame^] | 71 | movq %rsp, PER_CPU_VAR(rsp_scratch) |
Tejun Heo | 130ace1 | 2009-02-06 00:57:48 +0900 | [diff] [blame] | 72 | movq PER_CPU_VAR(kernel_stack), %rsp |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 73 | |
| 74 | pushq $__USER_DS |
Ingo Molnar | c38e503 | 2015-03-17 14:42:59 +0100 | [diff] [blame^] | 75 | pushq PER_CPU_VAR(rsp_scratch) |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 76 | pushq %r11 |
| 77 | pushq $__USER_CS |
| 78 | pushq %rcx |
| 79 | |
| 80 | pushq $VGCF_in_syscall |
| 81 | 1: jmp hypercall_iret |
| 82 | ENDPATCH(xen_sysret64) |
| 83 | RELOC(xen_sysret64, 1b+1) |
| 84 | |
| 85 | ENTRY(xen_sysret32) |
Tejun Heo | 130ace1 | 2009-02-06 00:57:48 +0900 | [diff] [blame] | 86 | /* |
| 87 | * We're already on the usermode stack at this point, but |
| 88 | * still with the kernel gs, so we can easily switch back |
| 89 | */ |
Ingo Molnar | c38e503 | 2015-03-17 14:42:59 +0100 | [diff] [blame^] | 90 | movq %rsp, PER_CPU_VAR(rsp_scratch) |
Brian Gerst | 9af4565 | 2009-01-19 00:38:58 +0900 | [diff] [blame] | 91 | movq PER_CPU_VAR(kernel_stack), %rsp |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 92 | |
| 93 | pushq $__USER32_DS |
Ingo Molnar | c38e503 | 2015-03-17 14:42:59 +0100 | [diff] [blame^] | 94 | pushq PER_CPU_VAR(rsp_scratch) |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 95 | pushq %r11 |
| 96 | pushq $__USER32_CS |
| 97 | pushq %rcx |
| 98 | |
Jeremy Fitzhardinge | 6aaf5d6 | 2009-11-25 13:15:38 -0800 | [diff] [blame] | 99 | pushq $0 |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 100 | 1: jmp hypercall_iret |
| 101 | ENDPATCH(xen_sysret32) |
| 102 | RELOC(xen_sysret32, 1b+1) |
| 103 | |
| 104 | /* |
Tejun Heo | 130ace1 | 2009-02-06 00:57:48 +0900 | [diff] [blame] | 105 | * Xen handles syscall callbacks much like ordinary exceptions, which |
| 106 | * means we have: |
| 107 | * - kernel gs |
| 108 | * - kernel rsp |
| 109 | * - an iret-like stack frame on the stack (including rcx and r11): |
| 110 | * ss |
| 111 | * rsp |
| 112 | * rflags |
| 113 | * cs |
| 114 | * rip |
| 115 | * r11 |
| 116 | * rsp->rcx |
| 117 | * |
| 118 | * In all the entrypoints, we undo all that to make it look like a |
| 119 | * CPU-generated syscall/sysenter and jump to the normal entrypoint. |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 120 | */ |
| 121 | |
| 122 | .macro undo_xen_syscall |
Tejun Heo | 130ace1 | 2009-02-06 00:57:48 +0900 | [diff] [blame] | 123 | mov 0*8(%rsp), %rcx |
| 124 | mov 1*8(%rsp), %r11 |
| 125 | mov 5*8(%rsp), %rsp |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 126 | .endm |
| 127 | |
| 128 | /* Normal 64-bit system call target */ |
| 129 | ENTRY(xen_syscall_target) |
| 130 | undo_xen_syscall |
| 131 | jmp system_call_after_swapgs |
| 132 | ENDPROC(xen_syscall_target) |
| 133 | |
| 134 | #ifdef CONFIG_IA32_EMULATION |
| 135 | |
| 136 | /* 32-bit compat syscall target */ |
| 137 | ENTRY(xen_syscall32_target) |
| 138 | undo_xen_syscall |
| 139 | jmp ia32_cstar_target |
| 140 | ENDPROC(xen_syscall32_target) |
| 141 | |
| 142 | /* 32-bit compat sysenter target */ |
| 143 | ENTRY(xen_sysenter_target) |
| 144 | undo_xen_syscall |
| 145 | jmp ia32_sysenter_target |
| 146 | ENDPROC(xen_sysenter_target) |
| 147 | |
| 148 | #else /* !CONFIG_IA32_EMULATION */ |
| 149 | |
| 150 | ENTRY(xen_syscall32_target) |
| 151 | ENTRY(xen_sysenter_target) |
Tejun Heo | 130ace1 | 2009-02-06 00:57:48 +0900 | [diff] [blame] | 152 | lea 16(%rsp), %rsp /* strip %rcx, %r11 */ |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 153 | mov $-ENOSYS, %rax |
Jeremy Fitzhardinge | 6aaf5d6 | 2009-11-25 13:15:38 -0800 | [diff] [blame] | 154 | pushq $0 |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 155 | jmp hypercall_iret |
| 156 | ENDPROC(xen_syscall32_target) |
| 157 | ENDPROC(xen_sysenter_target) |
| 158 | |
| 159 | #endif /* CONFIG_IA32_EMULATION */ |